Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a1954242 authored by Darrick J. Wong's avatar Darrick J. Wong
Browse files

xfs: hoist inode cluster checks out of loop



Hoist the inode cluster checks out of the inobt record check loop into
a separate function in preparation for refactoring of that loop.  No
functional changes here; that's in the next patch.

Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarBrian Foster <bfoster@redhat.com>
parent 22234c62
Loading
Loading
Loading
Loading
+65 −54
Original line number Diff line number Diff line
@@ -188,19 +188,19 @@ xchk_iallocbt_check_cluster_freemask(
	return 0;
}

/* Make sure the free mask is consistent with what the inodes think. */
/* Check an inode cluster. */
STATIC int
xchk_iallocbt_check_freemask(
xchk_iallocbt_check_cluster(
	struct xchk_btree		*bs,
	struct xfs_inobt_rec_incore	*irec)
	struct xfs_inobt_rec_incore	*irec,
	xfs_agino_t			agino)
{
	struct xfs_imap			imap;
	struct xfs_mount		*mp = bs->cur->bc_mp;
	struct xfs_dinode		*dip;
	struct xfs_buf			*bp;
	xfs_ino_t			fsino;
	xfs_agino_t			nr_inodes;
	xfs_agino_t			agino;
	unsigned int			nr_inodes;
	xfs_agino_t			chunkino;
	xfs_agino_t			clusterino;
	xfs_agblock_t			agbno;
@@ -212,9 +212,6 @@ xchk_iallocbt_check_freemask(
	nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK,
			mp->m_inodes_per_cluster);

	for (agino = irec->ir_startino;
	     agino < irec->ir_startino + XFS_INODES_PER_CHUNK;
	     agino += mp->m_inodes_per_cluster) {
	fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
	chunkino = agino - irec->ir_startino;
	agbno = XFS_AGINO_TO_AGBNO(mp, agino);
@@ -229,7 +226,7 @@ xchk_iallocbt_check_freemask(
	ir_holemask = (irec->ir_holemask & holemask);
	if (ir_holemask != holemask && ir_holemask != 0) {
		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
			continue;
		return 0;
	}

	/* If any part of this is a hole, skip it. */
@@ -237,35 +234,49 @@ xchk_iallocbt_check_freemask(
		xchk_xref_is_not_owned_by(bs->sc, agbno,
				mp->m_blocks_per_cluster,
				&XFS_RMAP_OINFO_INODES);
			continue;
		return 0;
	}

	xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster,
			&XFS_RMAP_OINFO_INODES);

	/* Grab the inode cluster buffer. */
		imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno,
				agbno);
	imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno, agbno);
	imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
	imap.im_boffset = 0;

		error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap,
				&dip, &bp, 0, 0);
		if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0,
				&error))
			continue;
	error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &bp, 0, 0);
	if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
		return 0;

	/* Which inodes are free? */
	for (clusterino = 0; clusterino < nr_inodes; clusterino++) {
			error = xchk_iallocbt_check_cluster_freemask(bs,
					fsino, chunkino, clusterino, irec, bp);
			if (error) {
		error = xchk_iallocbt_check_cluster_freemask(bs, fsino,
				chunkino, clusterino, irec, bp);
		if (error)
			break;
	}

	xfs_trans_brelse(bs->cur->bc_tp, bp);
	return error;
}
		}

		xfs_trans_brelse(bs->cur->bc_tp, bp);
/* Make sure the free mask is consistent with what the inodes think. */
STATIC int
xchk_iallocbt_check_freemask(
	struct xchk_btree		*bs,
	struct xfs_inobt_rec_incore	*irec)
{
	struct xfs_mount		*mp = bs->cur->bc_mp;
	xfs_agino_t			agino;
	int				error = 0;

	for (agino = irec->ir_startino;
	     agino < irec->ir_startino + XFS_INODES_PER_CHUNK;
	     agino += mp->m_inodes_per_cluster) {
		error = xchk_iallocbt_check_cluster(bs, irec, agino);
		if (error)
			break;
	}

	return error;