Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dbcabad1 authored by David Chinner's avatar David Chinner Committed by Tim Shimmin
Browse files

[XFS] Fix block reservation mechanism.



The block reservation mechanism has been broken since the per-cpu
superblock counters were introduced. Make the block reservation code work
with the per-cpu counters by syncing the counters, snapshotting the amount
of available space and then doing a modifcation of the counter state
according to the result. Continue in a loop until we either have no space
available or we reserve some space.

SGI-PV: 956323
SGI-Modid: xfs-linux-melb:xfs-kern:27895a

Signed-off-by: default avatarDavid Chinner <dgc@sgi.com>
Signed-off-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarTim Shimmin <tes@sgi.com>
parent 20f4ebf2
Loading
Loading
Loading
Loading
+49 −5
Original line number Diff line number Diff line
@@ -460,7 +460,7 @@ xfs_fs_counts(
{
	unsigned long	s;

	xfs_icsb_sync_counters_lazy(mp);
	xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
	s = XFS_SB_LOCK(mp);
	cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
	cnt->freertx = mp->m_sb.sb_frextents;
@@ -491,7 +491,7 @@ xfs_reserve_blocks(
	__uint64_t              *inval,
	xfs_fsop_resblks_t      *outval)
{
	__int64_t		lcounter, delta;
	__int64_t		lcounter, delta, fdblks_delta;
	__uint64_t		request;
	unsigned long		s;

@@ -504,17 +504,35 @@ xfs_reserve_blocks(
	}

	request = *inval;

	/*
	 * With per-cpu counters, this becomes an interesting
	 * problem. we needto work out if we are freeing or allocation
	 * blocks first, then we can do the modification as necessary.
	 *
	 * We do this under the XFS_SB_LOCK so that if we are near
	 * ENOSPC, we will hold out any changes while we work out
	 * what to do. This means that the amount of free space can
	 * change while we do this, so we need to retry if we end up
	 * trying to reserve more space than is available.
	 *
	 * We also use the xfs_mod_incore_sb() interface so that we
	 * don't have to care about whether per cpu counter are
	 * enabled, disabled or even compiled in....
	 */
retry:
	s = XFS_SB_LOCK(mp);
	xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED);

	/*
	 * If our previous reservation was larger than the current value,
	 * then move any unused blocks back to the free pool.
	 */

	fdblks_delta = 0;
	if (mp->m_resblks > request) {
		lcounter = mp->m_resblks_avail - request;
		if (lcounter  > 0) {		/* release unused blocks */
			mp->m_sb.sb_fdblocks += lcounter;
			fdblks_delta = lcounter;
			mp->m_resblks_avail -= lcounter;
		}
		mp->m_resblks = request;
@@ -522,24 +540,50 @@ xfs_reserve_blocks(
		__int64_t	free;

		free =  mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
		if (!free)
			goto out; /* ENOSPC and fdblks_delta = 0 */

		delta = request - mp->m_resblks;
		lcounter = free - delta;
		if (lcounter < 0) {
			/* We can't satisfy the request, just get what we can */
			mp->m_resblks += free;
			mp->m_resblks_avail += free;
			fdblks_delta = -free;
			mp->m_sb.sb_fdblocks = XFS_ALLOC_SET_ASIDE(mp);
		} else {
			fdblks_delta = -delta;
			mp->m_sb.sb_fdblocks =
				lcounter + XFS_ALLOC_SET_ASIDE(mp);
			mp->m_resblks = request;
			mp->m_resblks_avail += delta;
		}
	}

out:
	outval->resblks = mp->m_resblks;
	outval->resblks_avail = mp->m_resblks_avail;
	XFS_SB_UNLOCK(mp, s);

	if (fdblks_delta) {
		/*
		 * If we are putting blocks back here, m_resblks_avail is
		 * already at it's max so this will put it in the free pool.
		 *
		 * If we need space, we'll either succeed in getting it
		 * from the free block count or we'll get an enospc. If
		 * we get a ENOSPC, it means things changed while we were
		 * calculating fdblks_delta and so we should try again to
		 * see if there is anything left to reserve.
		 *
		 * Don't set the reserved flag here - we don't want to reserve
		 * the extra reserve blocks from the reserve.....
		 */
		int error;
		error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, fdblks_delta, 0);
		if (error == ENOSPC)
			goto retry;
	}

	return 0;
}

+3 −13
Original line number Diff line number Diff line
@@ -1979,8 +1979,8 @@ xfs_icsb_enable_counter(
	xfs_icsb_unlock_all_counters(mp);
}

STATIC void
xfs_icsb_sync_counters_int(
void
xfs_icsb_sync_counters_flags(
	xfs_mount_t	*mp,
	int		flags)
{
@@ -2012,17 +2012,7 @@ STATIC void
xfs_icsb_sync_counters(
	xfs_mount_t	*mp)
{
	xfs_icsb_sync_counters_int(mp, 0);
}

/*
 * lazy addition used for things like df, background sb syncs, etc
 */
void
xfs_icsb_sync_counters_lazy(
	xfs_mount_t	*mp)
{
	xfs_icsb_sync_counters_int(mp, XFS_ICSB_LAZY_COUNT);
	xfs_icsb_sync_counters_flags(mp, 0);
}

/*
+1 −1
Original line number Diff line number Diff line
@@ -307,7 +307,7 @@ typedef struct xfs_icsb_cnts {
#define XFS_ICSB_LAZY_COUNT	(1 << 1)	/* accuracy not needed */

extern int	xfs_icsb_init_counters(struct xfs_mount *);
extern void	xfs_icsb_sync_counters_lazy(struct xfs_mount *);
extern void	xfs_icsb_sync_counters_flags(struct xfs_mount *, int);

#else
#define xfs_icsb_init_counters(mp)	(0)
+1 −1
Original line number Diff line number Diff line
@@ -806,7 +806,7 @@ xfs_statvfs(

	statp->f_type = XFS_SB_MAGIC;

	xfs_icsb_sync_counters_lazy(mp);
	xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
	s = XFS_SB_LOCK(mp);
	statp->f_bsize = sbp->sb_blocksize;
	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;