Loading fs/xfs/libxfs/xfs_ialloc.c +8 −7 Original line number Diff line number Diff line Loading @@ -606,20 +606,20 @@ xfs_ialloc_ag_alloc( uint16_t allocmask = (uint16_t) -1; /* init. to full chunk */ struct xfs_inobt_rec_incore rec; struct xfs_perag *pag; int do_sparse = 0; #ifdef DEBUG /* randomly do sparse inode allocations */ if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb)) do_sparse = prandom_u32() & 1; #endif memset(&args, 0, sizeof(args)); args.tp = tp; args.mp = tp->t_mountp; args.fsbno = NULLFSBLOCK; #ifdef DEBUG /* randomly do sparse inode allocations */ if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb) && args.mp->m_ialloc_min_blks < args.mp->m_ialloc_blks) do_sparse = prandom_u32() & 1; #endif /* * Locking will ensure that we don't have two callers in here * at one time. Loading Loading @@ -768,6 +768,7 @@ sparse_alloc: return error; newlen = args.len << args.mp->m_sb.sb_inopblog; ASSERT(newlen <= XFS_INODES_PER_CHUNK); allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1; } Loading fs/xfs/xfs_inode.c +4 −3 Original line number Diff line number Diff line Loading @@ -2244,6 +2244,7 @@ xfs_ifree_cluster( int inodes_per_cluster; int nbufs; int i, j; int ioffset; xfs_daddr_t blkno; xfs_buf_t *bp; xfs_inode_t *ip; Loading @@ -2264,9 +2265,9 @@ xfs_ifree_cluster( * physically allocated. Skip the cluster if an inode falls into * a sparse region. */ if ((xic->alloc & XFS_INOBT_MASK(inum - xic->first_ino)) == 0) { ASSERT(((inum - xic->first_ino) % inodes_per_cluster) == 0); ioffset = inum - xic->first_ino; if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) { ASSERT(do_mod(ioffset, inodes_per_cluster) == 0); continue; } Loading Loading
fs/xfs/libxfs/xfs_ialloc.c +8 −7 Original line number Diff line number Diff line Loading @@ -606,20 +606,20 @@ xfs_ialloc_ag_alloc( uint16_t allocmask = (uint16_t) -1; /* init. to full chunk */ struct xfs_inobt_rec_incore rec; struct xfs_perag *pag; int do_sparse = 0; #ifdef DEBUG /* randomly do sparse inode allocations */ if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb)) do_sparse = prandom_u32() & 1; #endif memset(&args, 0, sizeof(args)); args.tp = tp; args.mp = tp->t_mountp; args.fsbno = NULLFSBLOCK; #ifdef DEBUG /* randomly do sparse inode allocations */ if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb) && args.mp->m_ialloc_min_blks < args.mp->m_ialloc_blks) do_sparse = prandom_u32() & 1; #endif /* * Locking will ensure that we don't have two callers in here * at one time. Loading Loading @@ -768,6 +768,7 @@ sparse_alloc: return error; newlen = args.len << args.mp->m_sb.sb_inopblog; ASSERT(newlen <= XFS_INODES_PER_CHUNK); allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1; } Loading
fs/xfs/xfs_inode.c +4 −3 Original line number Diff line number Diff line Loading @@ -2244,6 +2244,7 @@ xfs_ifree_cluster( int inodes_per_cluster; int nbufs; int i, j; int ioffset; xfs_daddr_t blkno; xfs_buf_t *bp; xfs_inode_t *ip; Loading @@ -2264,9 +2265,9 @@ xfs_ifree_cluster( * physically allocated. Skip the cluster if an inode falls into * a sparse region. */ if ((xic->alloc & XFS_INOBT_MASK(inum - xic->first_ino)) == 0) { ASSERT(((inum - xic->first_ino) % inodes_per_cluster) == 0); ioffset = inum - xic->first_ino; if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) { ASSERT(do_mod(ioffset, inodes_per_cluster) == 0); continue; } Loading