Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4b8fdfec authored by Jie Liu's avatar Jie Liu Committed by Dave Chinner
Browse files

xfs: introduce xfs_bulkstat_ichunk_ra



From: Jie Liu <jeff.liu@oracle.com>

Introduce xfs_bulkstat_ichunk_ra() to loop over all clusters in the
next inode chunk, then performs readahead if there are any allocated
inodes in that cluster.

Refactor xfs_bulkstat() with it.

Signed-off-by: default avatarJie Liu <jeff.liu@oracle.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent d4c27348
Loading
Loading
Loading
Loading
+32 −24
Original line number Original line Diff line number Diff line
@@ -172,6 +172,37 @@ xfs_bulkstat_one(
				    xfs_bulkstat_one_fmt, ubused, stat);
				    xfs_bulkstat_one_fmt, ubused, stat);
}
}


/*
 * Loop over all clusters in a chunk for a given incore inode allocation btree
 * record.  Do a readahead if there are any allocated inodes in that cluster.
 */
STATIC void
xfs_bulkstat_ichunk_ra(
	struct xfs_mount		*mp,
	xfs_agnumber_t			agno,
	struct xfs_inobt_rec_incore	*irec)
{
	xfs_agblock_t			agbno;
	struct blk_plug			plug;
	int				blks_per_cluster;
	int				inodes_per_cluster;
	int				i;	/* inode chunk index */

	agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
	blks_per_cluster = xfs_icluster_size_fsb(mp);
	inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;

	blk_start_plug(&plug);
	for (i = 0; i < XFS_INODES_PER_CHUNK;
	     i += inodes_per_cluster, agbno += blks_per_cluster) {
		if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) {
			xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster,
					     &xfs_inode_buf_ops);
		}
	}
	blk_finish_plug(&plug);
}

#define XFS_BULKSTAT_UBLEFT(ubleft)	((ubleft) >= statstruct_size)
#define XFS_BULKSTAT_UBLEFT(ubleft)	((ubleft) >= statstruct_size)


/*
/*
@@ -187,7 +218,6 @@ xfs_bulkstat(
	char			__user *ubuffer, /* buffer with inode stats */
	char			__user *ubuffer, /* buffer with inode stats */
	int			*done)	/* 1 if there are more stats to get */
	int			*done)	/* 1 if there are more stats to get */
{
{
	xfs_agblock_t		agbno=0;/* allocation group block number */
	xfs_buf_t		*agbp;	/* agi header buffer */
	xfs_buf_t		*agbp;	/* agi header buffer */
	xfs_agi_t		*agi;	/* agi header data */
	xfs_agi_t		*agi;	/* agi header data */
	xfs_agino_t		agino;	/* inode # in allocation group */
	xfs_agino_t		agino;	/* inode # in allocation group */
@@ -206,8 +236,6 @@ xfs_bulkstat(
	xfs_inobt_rec_incore_t	*irbuf;	/* start of irec buffer */
	xfs_inobt_rec_incore_t	*irbuf;	/* start of irec buffer */
	xfs_inobt_rec_incore_t	*irbufend; /* end of good irec buffer entries */
	xfs_inobt_rec_incore_t	*irbufend; /* end of good irec buffer entries */
	xfs_ino_t		lastino; /* last inode number returned */
	xfs_ino_t		lastino; /* last inode number returned */
	int			blks_per_cluster; /* # of blocks per cluster */
	int			inodes_per_cluster;/* # of inodes per cluster */
	int			nirbuf;	/* size of irbuf */
	int			nirbuf;	/* size of irbuf */
	int			rval;	/* return value error code */
	int			rval;	/* return value error code */
	int			tmp;	/* result value from btree calls */
	int			tmp;	/* result value from btree calls */
@@ -237,8 +265,6 @@ xfs_bulkstat(
	*done = 0;
	*done = 0;
	fmterror = 0;
	fmterror = 0;
	ubufp = ubuffer;
	ubufp = ubuffer;
	blks_per_cluster = xfs_icluster_size_fsb(mp);
	inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
	irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
	irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
	if (!irbuf)
	if (!irbuf)
		return -ENOMEM;
		return -ENOMEM;
@@ -347,25 +373,7 @@ xfs_bulkstat(
			 * Also start read-ahead now for this chunk.
			 * Also start read-ahead now for this chunk.
			 */
			 */
			if (r.ir_freecount < XFS_INODES_PER_CHUNK) {
			if (r.ir_freecount < XFS_INODES_PER_CHUNK) {
				struct blk_plug	plug;
				xfs_bulkstat_ichunk_ra(mp, agno, &r);
				/*
				 * Loop over all clusters in the next chunk.
				 * Do a readahead if there are any allocated
				 * inodes in that cluster.
				 */
				blk_start_plug(&plug);
				agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino);
				for (chunkidx = 0;
				     chunkidx < XFS_INODES_PER_CHUNK;
				     chunkidx += inodes_per_cluster,
				     agbno += blks_per_cluster) {
					if (xfs_inobt_maskn(chunkidx,
					    inodes_per_cluster) & ~r.ir_free)
						xfs_btree_reada_bufs(mp, agno,
							agbno, blks_per_cluster,
							&xfs_inode_buf_ops);
				}
				blk_finish_plug(&plug);
				irbp->ir_startino = r.ir_startino;
				irbp->ir_startino = r.ir_startino;
				irbp->ir_freecount = r.ir_freecount;
				irbp->ir_freecount = r.ir_freecount;
				irbp->ir_free = r.ir_free;
				irbp->ir_free = r.ir_free;