[XFS] Fix block reservation mechanism.
The block reservation mechanism has been broken since the per-cpu superblock counters were introduced. Make the block reservation code work with the per-cpu counters by syncing the counters, snapshotting the amount of available space and then doing a modifcation of the counter state according to the result. Continue in a loop until we either have no space available or we reserve some space. SGI-PV: 956323 SGI-Modid: xfs-linux-melb:xfs-kern:27895a Signed-off-by: David Chinner <dgc@sgi.com> Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Tim Shimmin <tes@sgi.com>
This commit is contained in:
committed by
Tim Shimmin
parent
20f4ebf2bf
commit
dbcabad19a
@@ -460,7 +460,7 @@ xfs_fs_counts(
|
|||||||
{
|
{
|
||||||
unsigned long s;
|
unsigned long s;
|
||||||
|
|
||||||
xfs_icsb_sync_counters_lazy(mp);
|
xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
|
||||||
s = XFS_SB_LOCK(mp);
|
s = XFS_SB_LOCK(mp);
|
||||||
cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
|
cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
|
||||||
cnt->freertx = mp->m_sb.sb_frextents;
|
cnt->freertx = mp->m_sb.sb_frextents;
|
||||||
@@ -491,7 +491,7 @@ xfs_reserve_blocks(
|
|||||||
__uint64_t *inval,
|
__uint64_t *inval,
|
||||||
xfs_fsop_resblks_t *outval)
|
xfs_fsop_resblks_t *outval)
|
||||||
{
|
{
|
||||||
__int64_t lcounter, delta;
|
__int64_t lcounter, delta, fdblks_delta;
|
||||||
__uint64_t request;
|
__uint64_t request;
|
||||||
unsigned long s;
|
unsigned long s;
|
||||||
|
|
||||||
@@ -504,17 +504,35 @@ xfs_reserve_blocks(
|
|||||||
}
|
}
|
||||||
|
|
||||||
request = *inval;
|
request = *inval;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* With per-cpu counters, this becomes an interesting
|
||||||
|
* problem. we needto work out if we are freeing or allocation
|
||||||
|
* blocks first, then we can do the modification as necessary.
|
||||||
|
*
|
||||||
|
* We do this under the XFS_SB_LOCK so that if we are near
|
||||||
|
* ENOSPC, we will hold out any changes while we work out
|
||||||
|
* what to do. This means that the amount of free space can
|
||||||
|
* change while we do this, so we need to retry if we end up
|
||||||
|
* trying to reserve more space than is available.
|
||||||
|
*
|
||||||
|
* We also use the xfs_mod_incore_sb() interface so that we
|
||||||
|
* don't have to care about whether per cpu counter are
|
||||||
|
* enabled, disabled or even compiled in....
|
||||||
|
*/
|
||||||
|
retry:
|
||||||
s = XFS_SB_LOCK(mp);
|
s = XFS_SB_LOCK(mp);
|
||||||
|
xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If our previous reservation was larger than the current value,
|
* If our previous reservation was larger than the current value,
|
||||||
* then move any unused blocks back to the free pool.
|
* then move any unused blocks back to the free pool.
|
||||||
*/
|
*/
|
||||||
|
fdblks_delta = 0;
|
||||||
if (mp->m_resblks > request) {
|
if (mp->m_resblks > request) {
|
||||||
lcounter = mp->m_resblks_avail - request;
|
lcounter = mp->m_resblks_avail - request;
|
||||||
if (lcounter > 0) { /* release unused blocks */
|
if (lcounter > 0) { /* release unused blocks */
|
||||||
mp->m_sb.sb_fdblocks += lcounter;
|
fdblks_delta = lcounter;
|
||||||
mp->m_resblks_avail -= lcounter;
|
mp->m_resblks_avail -= lcounter;
|
||||||
}
|
}
|
||||||
mp->m_resblks = request;
|
mp->m_resblks = request;
|
||||||
@@ -522,24 +540,50 @@ xfs_reserve_blocks(
|
|||||||
__int64_t free;
|
__int64_t free;
|
||||||
|
|
||||||
free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
|
free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
|
||||||
|
if (!free)
|
||||||
|
goto out; /* ENOSPC and fdblks_delta = 0 */
|
||||||
|
|
||||||
delta = request - mp->m_resblks;
|
delta = request - mp->m_resblks;
|
||||||
lcounter = free - delta;
|
lcounter = free - delta;
|
||||||
if (lcounter < 0) {
|
if (lcounter < 0) {
|
||||||
/* We can't satisfy the request, just get what we can */
|
/* We can't satisfy the request, just get what we can */
|
||||||
mp->m_resblks += free;
|
mp->m_resblks += free;
|
||||||
mp->m_resblks_avail += free;
|
mp->m_resblks_avail += free;
|
||||||
|
fdblks_delta = -free;
|
||||||
mp->m_sb.sb_fdblocks = XFS_ALLOC_SET_ASIDE(mp);
|
mp->m_sb.sb_fdblocks = XFS_ALLOC_SET_ASIDE(mp);
|
||||||
} else {
|
} else {
|
||||||
|
fdblks_delta = -delta;
|
||||||
mp->m_sb.sb_fdblocks =
|
mp->m_sb.sb_fdblocks =
|
||||||
lcounter + XFS_ALLOC_SET_ASIDE(mp);
|
lcounter + XFS_ALLOC_SET_ASIDE(mp);
|
||||||
mp->m_resblks = request;
|
mp->m_resblks = request;
|
||||||
mp->m_resblks_avail += delta;
|
mp->m_resblks_avail += delta;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
outval->resblks = mp->m_resblks;
|
outval->resblks = mp->m_resblks;
|
||||||
outval->resblks_avail = mp->m_resblks_avail;
|
outval->resblks_avail = mp->m_resblks_avail;
|
||||||
XFS_SB_UNLOCK(mp, s);
|
XFS_SB_UNLOCK(mp, s);
|
||||||
|
|
||||||
|
if (fdblks_delta) {
|
||||||
|
/*
|
||||||
|
* If we are putting blocks back here, m_resblks_avail is
|
||||||
|
* already at it's max so this will put it in the free pool.
|
||||||
|
*
|
||||||
|
* If we need space, we'll either succeed in getting it
|
||||||
|
* from the free block count or we'll get an enospc. If
|
||||||
|
* we get a ENOSPC, it means things changed while we were
|
||||||
|
* calculating fdblks_delta and so we should try again to
|
||||||
|
* see if there is anything left to reserve.
|
||||||
|
*
|
||||||
|
* Don't set the reserved flag here - we don't want to reserve
|
||||||
|
* the extra reserve blocks from the reserve.....
|
||||||
|
*/
|
||||||
|
int error;
|
||||||
|
error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, fdblks_delta, 0);
|
||||||
|
if (error == ENOSPC)
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1979,8 +1979,8 @@ xfs_icsb_enable_counter(
|
|||||||
xfs_icsb_unlock_all_counters(mp);
|
xfs_icsb_unlock_all_counters(mp);
|
||||||
}
|
}
|
||||||
|
|
||||||
STATIC void
|
void
|
||||||
xfs_icsb_sync_counters_int(
|
xfs_icsb_sync_counters_flags(
|
||||||
xfs_mount_t *mp,
|
xfs_mount_t *mp,
|
||||||
int flags)
|
int flags)
|
||||||
{
|
{
|
||||||
@@ -2012,17 +2012,7 @@ STATIC void
|
|||||||
xfs_icsb_sync_counters(
|
xfs_icsb_sync_counters(
|
||||||
xfs_mount_t *mp)
|
xfs_mount_t *mp)
|
||||||
{
|
{
|
||||||
xfs_icsb_sync_counters_int(mp, 0);
|
xfs_icsb_sync_counters_flags(mp, 0);
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* lazy addition used for things like df, background sb syncs, etc
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
xfs_icsb_sync_counters_lazy(
|
|
||||||
xfs_mount_t *mp)
|
|
||||||
{
|
|
||||||
xfs_icsb_sync_counters_int(mp, XFS_ICSB_LAZY_COUNT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -307,7 +307,7 @@ typedef struct xfs_icsb_cnts {
|
|||||||
#define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */
|
#define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */
|
||||||
|
|
||||||
extern int xfs_icsb_init_counters(struct xfs_mount *);
|
extern int xfs_icsb_init_counters(struct xfs_mount *);
|
||||||
extern void xfs_icsb_sync_counters_lazy(struct xfs_mount *);
|
extern void xfs_icsb_sync_counters_flags(struct xfs_mount *, int);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#define xfs_icsb_init_counters(mp) (0)
|
#define xfs_icsb_init_counters(mp) (0)
|
||||||
|
@@ -806,7 +806,7 @@ xfs_statvfs(
|
|||||||
|
|
||||||
statp->f_type = XFS_SB_MAGIC;
|
statp->f_type = XFS_SB_MAGIC;
|
||||||
|
|
||||||
xfs_icsb_sync_counters_lazy(mp);
|
xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
|
||||||
s = XFS_SB_LOCK(mp);
|
s = XFS_SB_LOCK(mp);
|
||||||
statp->f_bsize = sbp->sb_blocksize;
|
statp->f_bsize = sbp->sb_blocksize;
|
||||||
lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
|
lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
|
||||||
|
Reference in New Issue
Block a user