Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 16259e7d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Nathan Scott
Browse files

[XFS] Endianess annotations for various allocator data structures



SGI-PV: 943272
SGI-Modid: xfs-linux:xfs-kern:201006a

Signed-off-by: default avatarChristoph Hellwig <hch@sgi.com>
Signed-off-by: default avatarNathan Scott <nathans@sgi.com>
parent e2ed81fb
Loading
Loading
Loading
Loading
+28 −30
Original line number Diff line number Diff line
@@ -48,27 +48,26 @@ struct xfs_trans;
 * are > 64k, our value cannot be confused for an EFS superblock's.
 */

typedef struct xfs_agf
{
typedef struct xfs_agf {
	/*
	 * Common allocation group header information
	 */
	__uint32_t	agf_magicnum;	/* magic number == XFS_AGF_MAGIC */
	__uint32_t	agf_versionnum;	/* header version == XFS_AGF_VERSION */
	xfs_agnumber_t	agf_seqno;	/* sequence # starting from 0 */
	xfs_agblock_t	agf_length;	/* size in blocks of a.g. */
	__be32		agf_magicnum;	/* magic number == XFS_AGF_MAGIC */
	__be32		agf_versionnum;	/* header version == XFS_AGF_VERSION */
	__be32		agf_seqno;	/* sequence # starting from 0 */
	__be32		agf_length;	/* size in blocks of a.g. */
	/*
	 * Freespace information
	 */
	xfs_agblock_t	agf_roots[XFS_BTNUM_AGF];	/* root blocks */
	__uint32_t	agf_spare0;	/* spare field */
	__uint32_t	agf_levels[XFS_BTNUM_AGF];	/* btree levels */
	__uint32_t	agf_spare1;	/* spare field */
	__uint32_t	agf_flfirst;	/* first freelist block's index */
	__uint32_t	agf_fllast;	/* last freelist block's index */
	__uint32_t	agf_flcount;	/* count of blocks in freelist */
	xfs_extlen_t	agf_freeblks;	/* total free blocks */
	xfs_extlen_t	agf_longest;	/* longest free space */
	__be32		agf_roots[XFS_BTNUM_AGF];	/* root blocks */
	__be32		agf_spare0;	/* spare field */
	__be32		agf_levels[XFS_BTNUM_AGF];	/* btree levels */
	__be32		agf_spare1;	/* spare field */
	__be32		agf_flfirst;	/* first freelist block's index */
	__be32		agf_fllast;	/* last freelist block's index */
	__be32		agf_flcount;	/* count of blocks in freelist */
	__be32		agf_freeblks;	/* total free blocks */
	__be32		agf_longest;	/* longest free space */
} xfs_agf_t;

#define	XFS_AGF_MAGICNUM	0x00000001
@@ -96,31 +95,30 @@ typedef struct xfs_agf
 */
#define	XFS_AGI_UNLINKED_BUCKETS	64

typedef struct xfs_agi
{
typedef struct xfs_agi {
	/*
	 * Common allocation group header information
	 */
	__uint32_t	agi_magicnum;	/* magic number == XFS_AGI_MAGIC */
	__uint32_t	agi_versionnum;	/* header version == XFS_AGI_VERSION */
	xfs_agnumber_t	agi_seqno;	/* sequence # starting from 0 */
	xfs_agblock_t	agi_length;	/* size in blocks of a.g. */
	__be32		agi_magicnum;	/* magic number == XFS_AGI_MAGIC */
	__be32		agi_versionnum;	/* header version == XFS_AGI_VERSION */
	__be32		agi_seqno;	/* sequence # starting from 0 */
	__be32		agi_length;	/* size in blocks of a.g. */
	/*
	 * Inode information
	 * Inodes are mapped by interpreting the inode number, so no
	 * mapping data is needed here.
	 */
	xfs_agino_t	agi_count;	/* count of allocated inodes */
	xfs_agblock_t	agi_root;	/* root of inode btree */
	__uint32_t	agi_level;	/* levels in inode btree */
	xfs_agino_t	agi_freecount;	/* number of free inodes */
	xfs_agino_t	agi_newino;	/* new inode just allocated */
	xfs_agino_t	agi_dirino;	/* last directory inode chunk */
	__be32		agi_count;	/* count of allocated inodes */
	__be32		agi_root;	/* root of inode btree */
	__be32		agi_level;	/* levels in inode btree */
	__be32		agi_freecount;	/* number of free inodes */
	__be32		agi_newino;	/* new inode just allocated */
	__be32		agi_dirino;	/* last directory inode chunk */
	/*
	 * Hash table of inodes which have been unlinked but are
	 * still being referenced.
	 */
	xfs_agino_t	agi_unlinked[XFS_AGI_UNLINKED_BUCKETS];
	__be32		agi_unlinked[XFS_AGI_UNLINKED_BUCKETS];
} xfs_agi_t;

#define	XFS_AGI_MAGICNUM	0x00000001
@@ -201,8 +199,8 @@ typedef struct xfs_perag
	(MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + MIN(cl + 1, XFS_AG_MAXLEVELS(mp)))
#define	XFS_MIN_FREELIST(a,mp)		\
	(XFS_MIN_FREELIST_RAW(		\
		INT_GET((a)->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT), \
		INT_GET((a)->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT), mp))
		be32_to_cpu((a)->agf_levels[XFS_BTNUM_BNOi]), \
		be32_to_cpu((a)->agf_levels[XFS_BTNUM_CNTi]), mp))
#define	XFS_MIN_FREELIST_PAG(pag,mp)	\
	(XFS_MIN_FREELIST_RAW(		\
		(uint_t)(pag)->pagf_levels[XFS_BTNUM_BNOi], \
+68 −78
Original line number Diff line number Diff line
@@ -231,8 +231,8 @@ xfs_alloc_fix_minleft(
	if (args->minleft == 0)
		return 1;
	agf = XFS_BUF_TO_AGF(args->agbp);
	diff = INT_GET(agf->agf_freeblks, ARCH_CONVERT)
		+ INT_GET(agf->agf_flcount, ARCH_CONVERT)
	diff = be32_to_cpu(agf->agf_freeblks)
		+ be32_to_cpu(agf->agf_flcount)
		- args->len - args->minleft;
	if (diff >= 0)
		return 1;
@@ -307,7 +307,8 @@ xfs_alloc_fixup_trees(
			bnoblock = XFS_BUF_TO_ALLOC_BLOCK(bno_cur->bc_bufs[0]);
			cntblock = XFS_BUF_TO_ALLOC_BLOCK(cnt_cur->bc_bufs[0]);
			XFS_WANT_CORRUPTED_RETURN(
				INT_GET(bnoblock->bb_numrecs, ARCH_CONVERT) == INT_GET(cntblock->bb_numrecs, ARCH_CONVERT));
				be16_to_cpu(bnoblock->bb_numrecs) ==
				be16_to_cpu(cntblock->bb_numrecs));
		}
	}
#endif
@@ -493,21 +494,17 @@ xfs_alloc_trace_modagf(
		(void *)str,
		(void *)mp,
		(void *)(__psint_t)flags,
		(void *)(__psunsigned_t)INT_GET(agf->agf_seqno, ARCH_CONVERT),
		(void *)(__psunsigned_t)INT_GET(agf->agf_length, ARCH_CONVERT),
		(void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_BNO],
						ARCH_CONVERT),
		(void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_CNT],
						ARCH_CONVERT),
		(void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_BNO],
						ARCH_CONVERT),
		(void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_CNT],
						ARCH_CONVERT),
		(void *)(__psunsigned_t)INT_GET(agf->agf_flfirst, ARCH_CONVERT),
		(void *)(__psunsigned_t)INT_GET(agf->agf_fllast, ARCH_CONVERT),
		(void *)(__psunsigned_t)INT_GET(agf->agf_flcount, ARCH_CONVERT),
		(void *)(__psunsigned_t)INT_GET(agf->agf_freeblks, ARCH_CONVERT),
		(void *)(__psunsigned_t)INT_GET(agf->agf_longest, ARCH_CONVERT));
		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_seqno),
		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_length),
		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_flfirst),
		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_fllast),
		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_flcount),
		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_freeblks),
		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_longest));
}

STATIC void
@@ -600,12 +597,12 @@ xfs_alloc_ag_vextent(
		if (!(args->wasfromfl)) {

			agf = XFS_BUF_TO_AGF(args->agbp);
			INT_MOD(agf->agf_freeblks, ARCH_CONVERT, -(args->len));
			be32_add(&agf->agf_freeblks, -(args->len));
			xfs_trans_agblocks_delta(args->tp,
						 -((long)(args->len)));
			args->pag->pagf_freeblks -= args->len;
			ASSERT(INT_GET(agf->agf_freeblks, ARCH_CONVERT)
				<= INT_GET(agf->agf_length, ARCH_CONVERT));
			ASSERT(be32_to_cpu(agf->agf_freeblks) <=
				be32_to_cpu(agf->agf_length));
			TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
			xfs_alloc_log_agf(args->tp, args->agbp,
						XFS_AGF_FREEBLKS);
@@ -711,8 +708,7 @@ xfs_alloc_ag_vextent_exact(
	cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
		args->agno, XFS_BTNUM_CNT, NULL, 0);
	ASSERT(args->agbno + args->len <=
		INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
			ARCH_CONVERT));
		be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
			args->agbno, args->len, XFSA_FIXUP_BNO_OK))) {
		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
@@ -885,8 +881,7 @@ xfs_alloc_ag_vextent_near(
			goto error0;
		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
		ltend = ltbno + ltlen;
		ASSERT(ltend <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
				ARCH_CONVERT));
		ASSERT(ltend <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
		args->len = blen;
		if (!xfs_alloc_fix_minleft(args)) {
			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
@@ -1241,8 +1236,7 @@ xfs_alloc_ag_vextent_near(
		ltlen, &ltnew);
	ASSERT(ltnew >= ltbno);
	ASSERT(ltnew + rlen <= ltend);
	ASSERT(ltnew + rlen <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
		ARCH_CONVERT));
	ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
	args->agbno = ltnew;
	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
			ltnew, rlen, XFSA_FIXUP_BNO_OK)))
@@ -1405,8 +1399,7 @@ xfs_alloc_ag_vextent_size(
	args->agbno = rbno;
	XFS_WANT_CORRUPTED_GOTO(
		args->agbno + args->len <=
			INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
			ARCH_CONVERT),
			be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
		error0);
	TRACE_ALLOC("normal", args);
	return 0;
@@ -1454,8 +1447,8 @@ xfs_alloc_ag_vextent_small(
	 * freelist.
	 */
	else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
		 (INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_flcount,
			ARCH_CONVERT) > args->minleft)) {
		 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
		  > args->minleft)) {
		if ((error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno)))
			goto error0;
		if (fbno != NULLAGBLOCK) {
@@ -1470,8 +1463,7 @@ xfs_alloc_ag_vextent_small(
			args->agbno = fbno;
			XFS_WANT_CORRUPTED_GOTO(
				args->agbno + args->len <=
				INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
					ARCH_CONVERT),
				be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
				error0);
			args->wasfromfl = 1;
			TRACE_ALLOC("freelist", args);
@@ -1745,12 +1737,12 @@ xfs_free_ag_extent(

		agf = XFS_BUF_TO_AGF(agbp);
		pag = &mp->m_perag[agno];
		INT_MOD(agf->agf_freeblks, ARCH_CONVERT, len);
		be32_add(&agf->agf_freeblks, len);
		xfs_trans_agblocks_delta(tp, len);
		pag->pagf_freeblks += len;
		XFS_WANT_CORRUPTED_GOTO(
			INT_GET(agf->agf_freeblks, ARCH_CONVERT)
				<= INT_GET(agf->agf_length, ARCH_CONVERT),
			be32_to_cpu(agf->agf_freeblks) <=
			be32_to_cpu(agf->agf_length),
			error0);
		TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
		xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
@@ -1897,18 +1889,18 @@ xfs_alloc_fix_freelist(
	 */
	agf = XFS_BUF_TO_AGF(agbp);
	need = XFS_MIN_FREELIST(agf, mp);
	delta = need > INT_GET(agf->agf_flcount, ARCH_CONVERT) ?
		(need - INT_GET(agf->agf_flcount, ARCH_CONVERT)) : 0;
	delta = need > be32_to_cpu(agf->agf_flcount) ?
		(need - be32_to_cpu(agf->agf_flcount)) : 0;
	/*
	 * If there isn't enough total or single-extent, reject it.
	 */
	longest = INT_GET(agf->agf_longest, ARCH_CONVERT);
	longest = be32_to_cpu(agf->agf_longest);
	longest = (longest > delta) ? (longest - delta) :
		(INT_GET(agf->agf_flcount, ARCH_CONVERT) > 0 || longest > 0);
		(be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
	if (args->minlen + args->alignment + args->minalignslop - 1 > longest ||
	     (args->minleft &&
		(int)(INT_GET(agf->agf_freeblks, ARCH_CONVERT) +
		   INT_GET(agf->agf_flcount, ARCH_CONVERT) - need - args->total) <
		(int)(be32_to_cpu(agf->agf_freeblks) +
		   be32_to_cpu(agf->agf_flcount) - need - args->total) <
	     (int)args->minleft)) {
		xfs_trans_brelse(tp, agbp);
		args->agbp = NULL;
@@ -1917,7 +1909,7 @@ xfs_alloc_fix_freelist(
	/*
	 * Make the freelist shorter if it's too long.
	 */
	while (INT_GET(agf->agf_flcount, ARCH_CONVERT) > need) {
	while (be32_to_cpu(agf->agf_flcount) > need) {
		xfs_buf_t	*bp;

		if ((error = xfs_alloc_get_freelist(tp, agbp, &bno)))
@@ -1944,9 +1936,9 @@ xfs_alloc_fix_freelist(
	/*
	 * Make the freelist longer if it's too short.
	 */
	while (INT_GET(agf->agf_flcount, ARCH_CONVERT) < need) {
	while (be32_to_cpu(agf->agf_flcount) < need) {
		targs.agbno = 0;
		targs.maxlen = need - INT_GET(agf->agf_flcount, ARCH_CONVERT);
		targs.maxlen = need - be32_to_cpu(agf->agf_flcount);
		/*
		 * Allocate as many blocks as possible at once.
		 */
@@ -2006,19 +1998,19 @@ xfs_alloc_get_freelist(
	 */
	mp = tp->t_mountp;
	if ((error = xfs_alloc_read_agfl(mp, tp,
			INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp)))
			be32_to_cpu(agf->agf_seqno), &agflbp)))
		return error;
	agfl = XFS_BUF_TO_AGFL(agflbp);
	/*
	 * Get the block number and update the data structures.
	 */
	bno = INT_GET(agfl->agfl_bno[INT_GET(agf->agf_flfirst, ARCH_CONVERT)], ARCH_CONVERT);
	INT_MOD(agf->agf_flfirst, ARCH_CONVERT, 1);
	bno = INT_GET(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)], ARCH_CONVERT);
	be32_add(&agf->agf_flfirst, 1);
	xfs_trans_brelse(tp, agflbp);
	if (INT_GET(agf->agf_flfirst, ARCH_CONVERT) == XFS_AGFL_SIZE(mp))
	if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
		agf->agf_flfirst = 0;
	pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)];
	INT_MOD(agf->agf_flcount, ARCH_CONVERT, -1);
	pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
	be32_add(&agf->agf_flcount, -1);
	xfs_trans_agflist_delta(tp, -1);
	pag->pagf_flcount--;
	TRACE_MODAGF(NULL, agf, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT);
@@ -2033,7 +2025,7 @@ xfs_alloc_get_freelist(
	 * the freeing transaction must be pushed to disk NOW by forcing
	 * to disk all iclogs up that transaction's LSN.
	 */
	xfs_alloc_search_busy(tp, INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1);
	xfs_alloc_search_busy(tp, be32_to_cpu(agf->agf_seqno), bno, 1);
	return 0;
}

@@ -2111,18 +2103,18 @@ xfs_alloc_put_freelist(
	mp = tp->t_mountp;

	if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
			INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp)))
			be32_to_cpu(agf->agf_seqno), &agflbp)))
		return error;
	agfl = XFS_BUF_TO_AGFL(agflbp);
	INT_MOD(agf->agf_fllast, ARCH_CONVERT, 1);
	if (INT_GET(agf->agf_fllast, ARCH_CONVERT) == XFS_AGFL_SIZE(mp))
	be32_add(&agf->agf_fllast, 1);
	if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
		agf->agf_fllast = 0;
	pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)];
	INT_MOD(agf->agf_flcount, ARCH_CONVERT, 1);
	pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
	be32_add(&agf->agf_flcount, 1);
	xfs_trans_agflist_delta(tp, 1);
	pag->pagf_flcount++;
	ASSERT(INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp));
	blockp = &agfl->agfl_bno[INT_GET(agf->agf_fllast, ARCH_CONVERT)];
	ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
	blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
	INT_SET(*blockp, ARCH_CONVERT, bno);
	TRACE_MODAGF(NULL, agf, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
@@ -2169,14 +2161,12 @@ xfs_alloc_read_agf(
	 */
	agf = XFS_BUF_TO_AGF(bp);
	agf_ok =
		INT_GET(agf->agf_magicnum, ARCH_CONVERT) == XFS_AGF_MAGIC &&
		XFS_AGF_GOOD_VERSION(
			INT_GET(agf->agf_versionnum, ARCH_CONVERT)) &&
		INT_GET(agf->agf_freeblks, ARCH_CONVERT) <=
				INT_GET(agf->agf_length, ARCH_CONVERT) &&
		INT_GET(agf->agf_flfirst, ARCH_CONVERT) < XFS_AGFL_SIZE(mp) &&
		INT_GET(agf->agf_fllast,  ARCH_CONVERT) < XFS_AGFL_SIZE(mp) &&
		INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp);
		be32_to_cpu(agf->agf_magicnum) == XFS_AGF_MAGIC &&
		XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
		be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
		be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
		be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
		be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp);
	if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
			XFS_RANDOM_ALLOC_READ_AGF))) {
		XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
@@ -2186,13 +2176,13 @@ xfs_alloc_read_agf(
	}
	pag = &mp->m_perag[agno];
	if (!pag->pagf_init) {
		pag->pagf_freeblks = INT_GET(agf->agf_freeblks, ARCH_CONVERT);
		pag->pagf_flcount = INT_GET(agf->agf_flcount, ARCH_CONVERT);
		pag->pagf_longest = INT_GET(agf->agf_longest, ARCH_CONVERT);
		pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
		pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
		pag->pagf_longest = be32_to_cpu(agf->agf_longest);
		pag->pagf_levels[XFS_BTNUM_BNOi] =
			INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT);
			be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
		pag->pagf_levels[XFS_BTNUM_CNTi] =
			INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT);
			be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
		spinlock_init(&pag->pagb_lock, "xfspagb");
		pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS *
					sizeof(xfs_perag_busy_t), KM_SLEEP);
@@ -2200,13 +2190,13 @@ xfs_alloc_read_agf(
	}
#ifdef DEBUG
	else if (!XFS_FORCED_SHUTDOWN(mp)) {
		ASSERT(pag->pagf_freeblks == INT_GET(agf->agf_freeblks, ARCH_CONVERT));
		ASSERT(pag->pagf_flcount == INT_GET(agf->agf_flcount, ARCH_CONVERT));
		ASSERT(pag->pagf_longest == INT_GET(agf->agf_longest, ARCH_CONVERT));
		ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
		ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
		ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
		ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
		       INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT));
		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
		ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
		       INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT));
		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
	}
#endif
	XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGF, XFS_AGF_REF);
@@ -2455,7 +2445,7 @@ xfs_free_extent(
#ifdef DEBUG
	ASSERT(args.agbp != NULL);
	agf = XFS_BUF_TO_AGF(args.agbp);
	ASSERT(args.agbno + len <= INT_GET(agf->agf_length, ARCH_CONVERT));
	ASSERT(args.agbno + len <= be32_to_cpu(agf->agf_length));
#endif
	error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno,
		len, 0);
+208 −205

File changed.

Preview size limit exceeded, changes collapsed.

+10 −5
Original line number Diff line number Diff line
@@ -38,13 +38,18 @@ struct xfs_mount;
/*
 * Data record/key structure
 */
typedef struct xfs_alloc_rec
{
typedef struct xfs_alloc_rec {
	__be32		ar_startblock;	/* starting block number */
	__be32		ar_blockcount;	/* count of free blocks */
} xfs_alloc_rec_t, xfs_alloc_key_t;

typedef struct xfs_alloc_rec_incore {
	xfs_agblock_t	ar_startblock;	/* starting block number */
	xfs_extlen_t	ar_blockcount;	/* count of free blocks */
} xfs_alloc_rec_t, xfs_alloc_key_t;
} xfs_alloc_rec_incore_t;

typedef xfs_agblock_t xfs_alloc_ptr_t;	/* btree pointer type */
/* btree pointer type */
typedef __be32 xfs_alloc_ptr_t;
/* btree block header type */
typedef	struct xfs_btree_sblock xfs_alloc_block_t;

+31 −31
Original line number Diff line number Diff line
@@ -2763,8 +2763,8 @@ xfs_bmap_btree_to_extents(
	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
	rblock = ifp->if_broot;
	ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) == 1);
	ASSERT(INT_GET(rblock->bb_numrecs, ARCH_CONVERT) == 1);
	ASSERT(be16_to_cpu(rblock->bb_level) == 1);
	ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
	ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1);
	mp = ip->i_mount;
	pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes);
@@ -3207,11 +3207,11 @@ xfs_bmap_extents_to_btree(
	 * Fill in the root.
	 */
	block = ifp->if_broot;
	INT_SET(block->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC);
	INT_SET(block->bb_level, ARCH_CONVERT, 1);
	INT_SET(block->bb_numrecs, ARCH_CONVERT, 1);
	INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLDFSBNO);
	INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLDFSBNO);
	block->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
	block->bb_level = cpu_to_be16(1);
	block->bb_numrecs = cpu_to_be16(1);
	block->bb_leftsib = cpu_to_be64(NULLDFSBNO);
	block->bb_rightsib = cpu_to_be64(NULLDFSBNO);
	/*
	 * Need a cursor.  Can't allocate until bb_level is filled in.
	 */
@@ -3264,10 +3264,10 @@ xfs_bmap_extents_to_btree(
	 * Fill in the child block.
	 */
	ablock = XFS_BUF_TO_BMBT_BLOCK(abp);
	INT_SET(ablock->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC);
	ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
	ablock->bb_level = 0;
	INT_SET(ablock->bb_leftsib, ARCH_CONVERT, NULLDFSBNO);
	INT_SET(ablock->bb_rightsib, ARCH_CONVERT, NULLDFSBNO);
	ablock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
	ablock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
	arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
	for (ep = ifp->if_u1.if_extents, cnt = i = 0; i < nextents; i++, ep++) {
@@ -3277,8 +3277,8 @@ xfs_bmap_extents_to_btree(
			arp++; cnt++;
		}
	}
	INT_SET(ablock->bb_numrecs, ARCH_CONVERT, cnt);
	ASSERT(INT_GET(ablock->bb_numrecs, ARCH_CONVERT) == XFS_IFORK_NEXTENTS(ip, whichfork));
	ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
	ablock->bb_numrecs = cpu_to_be16(cnt);
	/*
	 * Fill in the root key and pointer.
	 */
@@ -3292,7 +3292,7 @@ xfs_bmap_extents_to_btree(
	 * the root is at the right level.
	 */
	xfs_bmbt_log_block(cur, abp, XFS_BB_ALL_BITS);
	xfs_bmbt_log_recs(cur, abp, 1, INT_GET(ablock->bb_numrecs, ARCH_CONVERT));
	xfs_bmbt_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
	ASSERT(*curp == NULL);
	*curp = cur;
	*logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork);
@@ -4371,8 +4371,8 @@ xfs_bmap_read_extents(
	/*
	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
	 */
	ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
	level = INT_GET(block->bb_level, ARCH_CONVERT);
	level = be16_to_cpu(block->bb_level);
	ASSERT(level > 0);
	pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
	ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO);
	ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount);
@@ -4415,7 +4415,7 @@ xfs_bmap_read_extents(
		xfs_extnum_t	num_recs;


		num_recs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
		num_recs = be16_to_cpu(block->bb_numrecs);
		if (unlikely(i + num_recs > room)) {
			ASSERT(i + num_recs <= room);
			xfs_fs_cmn_err(CE_WARN, ip->i_mount,
@@ -4432,7 +4432,7 @@ xfs_bmap_read_extents(
		/*
		 * Read-ahead the next leaf block, if any.
		 */
		nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
		nextbno = be64_to_cpu(block->bb_rightsib);
		if (nextbno != NULLFSBLOCK)
			xfs_btree_reada_bufl(mp, nextbno, 1);
		/*
@@ -4689,7 +4689,7 @@ xfs_bmapi(
	}
	if (wr && *firstblock == NULLFSBLOCK) {
		if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
			minleft = INT_GET(ifp->if_broot->bb_level, ARCH_CONVERT) + 1;
			minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
		else
			minleft = 1;
	} else
@@ -5967,10 +5967,10 @@ xfs_check_block(
	xfs_bmbt_ptr_t		*pp, *thispa;	/* pointer to block address */
	xfs_bmbt_key_t		*prevp, *keyp;

	ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
	ASSERT(be16_to_cpu(block->bb_level) > 0);

	prevp = NULL;
	for( i = 1; i <= INT_GET(block->bb_numrecs, ARCH_CONVERT);i++) {
	for( i = 1; i <= be16_to_cpu(block->bb_numrecs); i++) {
		dmxr = mp->m_bmap_dmxr[0];

		if (root) {
@@ -5995,7 +5995,7 @@ xfs_check_block(
			pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
				xfs_bmbt, block, i, dmxr);
		}
		for (j = i+1; j <= INT_GET(block->bb_numrecs, ARCH_CONVERT); j++) {
		for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
			if (root) {
				thispa = XFS_BMAP_BROOT_PTR_ADDR(block, j, sz);
			} else {
@@ -6048,8 +6048,8 @@ xfs_bmap_check_leaf_extents(
	/*
	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
	 */
	ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
	level = INT_GET(block->bb_level, ARCH_CONVERT);
	level = be16_to_cpu(block->bb_level);
	ASSERT(level > 0);
	xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
	pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
	ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO);
@@ -6109,13 +6109,13 @@ xfs_bmap_check_leaf_extents(
		xfs_extnum_t	num_recs;


		num_recs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
		num_recs = be16_to_cpu(block->bb_numrecs);

		/*
		 * Read-ahead the next leaf block, if any.
		 */

		nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
		nextbno = be64_to_cpu(block->bb_rightsib);

		/*
		 * Check all the extents to make sure they are OK.
@@ -6212,8 +6212,8 @@ xfs_bmap_count_blocks(
	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
	 */
	block = ifp->if_broot;
	ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
	level = INT_GET(block->bb_level, ARCH_CONVERT);
	level = be16_to_cpu(block->bb_level);
	ASSERT(level > 0);
	pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
	ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO);
	ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount);
@@ -6258,14 +6258,14 @@ xfs_bmap_count_tree(

	if (--level) {
		/* Not at node above leafs, count this level of nodes */
		nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
		nextbno = be64_to_cpu(block->bb_rightsib);
		while (nextbno != NULLFSBLOCK) {
			if ((error = xfs_btree_read_bufl(mp, tp, nextbno,
				0, &nbp, XFS_BMAP_BTREE_REF)))
				return error;
			*count += 1;
			nextblock = XFS_BUF_TO_BMBT_BLOCK(nbp);
			nextbno = INT_GET(nextblock->bb_rightsib, ARCH_CONVERT);
			nextbno = be64_to_cpu(nextblock->bb_rightsib);
			xfs_trans_brelse(tp, nbp);
		}

@@ -6284,8 +6284,8 @@ xfs_bmap_count_tree(
	} else {
		/* count all level 1 nodes and their leaves */
		for (;;) {
			nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
			numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
			nextbno = be64_to_cpu(block->bb_rightsib);
			numrecs = be16_to_cpu(block->bb_numrecs);
			frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize,
				xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]);
			if (unlikely(xfs_bmap_disk_count_leaves(frp, numrecs, count) < 0)) {
Loading