Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b53e675d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Lachlan McIlroy
Browse files

[XFS] xlog_rec_header/xlog_rec_ext_header endianess annotations



Mostly trivial conversion with one exceptions: h_num_logops was kept in
native endian previously and only converted to big endian in xlog_sync,
but we always keep it big endian now. With todays cpus fast byteswap
instructions that's not an issue but the new variant keeps the code clean
and maintainable.

SGI-PV: 971186
SGI-Modid: xfs-linux-melb:xfs-kern:29821a

Signed-off-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarLachlan McIlroy <lachlan@sgi.com>
Signed-off-by: default avatarTim Shimmin <tes@sgi.com>
parent 67fcb7bf
Loading
Loading
Loading
Loading
+42 −48
Original line number Original line Diff line number Diff line
@@ -1227,12 +1227,12 @@ xlog_alloc_log(xfs_mount_t *mp,


		head = &iclog->ic_header;
		head = &iclog->ic_header;
		memset(head, 0, sizeof(xlog_rec_header_t));
		memset(head, 0, sizeof(xlog_rec_header_t));
		INT_SET(head->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM);
		head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
		INT_SET(head->h_version, ARCH_CONVERT,
		head->h_version = cpu_to_be32(
			XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
			XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
		INT_SET(head->h_size, ARCH_CONVERT, log->l_iclog_size);
		head->h_size = cpu_to_be32(log->l_iclog_size);
		/* new fields */
		/* new fields */
		INT_SET(head->h_fmt, ARCH_CONVERT, XLOG_FMT);
		head->h_fmt = cpu_to_be32(XLOG_FMT);
		memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
		memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));




@@ -1378,7 +1378,7 @@ xlog_sync(xlog_t *log,
{
{
	xfs_caddr_t	dptr;		/* pointer to byte sized element */
	xfs_caddr_t	dptr;		/* pointer to byte sized element */
	xfs_buf_t	*bp;
	xfs_buf_t	*bp;
	int		i, ops;
	int		i;
	uint		count;		/* byte count of bwrite */
	uint		count;		/* byte count of bwrite */
	uint		count_init;	/* initial count before roundup */
	uint		count_init;	/* initial count before roundup */
	int		roundoff;       /* roundoff to BB or stripe */
	int		roundoff;       /* roundoff to BB or stripe */
@@ -1417,21 +1417,17 @@ xlog_sync(xlog_t *log,


	/* real byte length */
	/* real byte length */
	if (v2) {
	if (v2) {
		INT_SET(iclog->ic_header.h_len, 
		iclog->ic_header.h_len =
			ARCH_CONVERT,
			cpu_to_be32(iclog->ic_offset + roundoff);
			iclog->ic_offset + roundoff);
	} else {
	} else {
		INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset);
		iclog->ic_header.h_len =
			cpu_to_be32(iclog->ic_offset);
	}
	}


	/* put ops count in correct order */
	ops = iclog->ic_header.h_num_logops;
	INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops);

	bp = iclog->ic_bp;
	bp = iclog->ic_bp;
	ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1);
	ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1);
	XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
	XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
	XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)));
	XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)));


	XFS_STATS_ADD(xs_log_blocks, BTOBB(count));
	XFS_STATS_ADD(xs_log_blocks, BTOBB(count));


@@ -1495,9 +1491,9 @@ xlog_sync(xlog_t *log,
		 * case, though.
		 * case, though.
		 */
		 */
		for (i = 0; i < split; i += BBSIZE) {
		for (i = 0; i < split; i += BBSIZE) {
			INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1);
			be32_add((__be32 *)dptr, 1);
			if (INT_GET(*(uint *)dptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM)
			if (be32_to_cpu(*(__be32 *)dptr) == XLOG_HEADER_MAGIC_NUM)
				INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1);
				be32_add((__be32 *)dptr, 1);
			dptr += BBSIZE;
			dptr += BBSIZE;
		}
		}


@@ -1586,7 +1582,7 @@ xlog_state_finish_copy(xlog_t *log,
{
{
	spin_lock(&log->l_icloglock);
	spin_lock(&log->l_icloglock);


	iclog->ic_header.h_num_logops += record_cnt;
	be32_add(&iclog->ic_header.h_num_logops, record_cnt);
	iclog->ic_offset += copy_bytes;
	iclog->ic_offset += copy_bytes;


	spin_unlock(&log->l_icloglock);
	spin_unlock(&log->l_icloglock);
@@ -1813,7 +1809,7 @@ xlog_write(xfs_mount_t * mp,


	/* start_lsn is the first lsn written to. That's all we need. */
	/* start_lsn is the first lsn written to. That's all we need. */
	if (! *start_lsn)
	if (! *start_lsn)
	    *start_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT);
	    *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);


	/* This loop writes out as many regions as can fit in the amount
	/* This loop writes out as many regions as can fit in the amount
	 * of space which was allocated by xlog_state_get_iclog_space().
	 * of space which was allocated by xlog_state_get_iclog_space().
@@ -1983,7 +1979,8 @@ xlog_state_clean_log(xlog_t *log)
			 * We don't need to cover the dummy.
			 * We don't need to cover the dummy.
			 */
			 */
			if (!changed &&
			if (!changed &&
			   (INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT) == XLOG_COVER_OPS)) {
			   (be32_to_cpu(iclog->ic_header.h_num_logops) ==
			   		XLOG_COVER_OPS)) {
				changed = 1;
				changed = 1;
			} else {
			} else {
				/*
				/*
@@ -2051,7 +2048,7 @@ xlog_get_lowest_lsn(
	lowest_lsn = 0;
	lowest_lsn = 0;
	do {
	do {
	    if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) {
	    if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) {
		lsn = INT_GET(lsn_log->ic_header.h_lsn, ARCH_CONVERT);
		lsn = be64_to_cpu(lsn_log->ic_header.h_lsn);
		if ((lsn && !lowest_lsn) ||
		if ((lsn && !lowest_lsn) ||
		    (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) {
		    (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) {
			lowest_lsn = lsn;
			lowest_lsn = lsn;
@@ -2152,11 +2149,9 @@ xlog_state_do_callback(
				 */
				 */


				lowest_lsn = xlog_get_lowest_lsn(log);
				lowest_lsn = xlog_get_lowest_lsn(log);
				if (lowest_lsn && (
				if (lowest_lsn &&
					XFS_LSN_CMP(
				    XFS_LSN_CMP(lowest_lsn,
						lowest_lsn,
				    		be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
						INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)
					)<0)) {
					iclog = iclog->ic_next;
					iclog = iclog->ic_next;
					continue; /* Leave this iclog for
					continue; /* Leave this iclog for
						   * another thread */
						   * another thread */
@@ -2171,11 +2166,10 @@ xlog_state_do_callback(
				 * No one else can be here except us.
				 * No one else can be here except us.
				 */
				 */
				spin_lock(&log->l_grant_lock);
				spin_lock(&log->l_grant_lock);
				ASSERT(XFS_LSN_CMP(
				ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn,
						log->l_last_sync_lsn,
				       be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
						INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)
				log->l_last_sync_lsn =
					)<=0);
					be64_to_cpu(iclog->ic_header.h_lsn);
				log->l_last_sync_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT);
				spin_unlock(&log->l_grant_lock);
				spin_unlock(&log->l_grant_lock);


				/*
				/*
@@ -2392,8 +2386,8 @@ xlog_state_get_iclog_space(xlog_t *log,
		xlog_tic_add_region(ticket,
		xlog_tic_add_region(ticket,
				    log->l_iclog_hsize,
				    log->l_iclog_hsize,
				    XLOG_REG_TYPE_LRHEADER);
				    XLOG_REG_TYPE_LRHEADER);
		INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle);
		head->h_cycle = cpu_to_be32(log->l_curr_cycle);
		INT_SET(head->h_lsn, ARCH_CONVERT,
		head->h_lsn = cpu_to_be64(
			xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
			xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
		ASSERT(log->l_curr_block >= 0);
		ASSERT(log->l_curr_block >= 0);
	}
	}
@@ -2823,7 +2817,7 @@ xlog_state_release_iclog(xlog_t *log,
	    iclog->ic_state == XLOG_STATE_WANT_SYNC) {
	    iclog->ic_state == XLOG_STATE_WANT_SYNC) {
		sync++;
		sync++;
		iclog->ic_state = XLOG_STATE_SYNCING;
		iclog->ic_state = XLOG_STATE_SYNCING;
		INT_SET(iclog->ic_header.h_tail_lsn, ARCH_CONVERT, log->l_tail_lsn);
		iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn);
		xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn);
		xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn);
		/* cycle incremented when incrementing curr_block */
		/* cycle incremented when incrementing curr_block */
	}
	}
@@ -2861,7 +2855,7 @@ xlog_state_switch_iclogs(xlog_t *log,
	if (!eventual_size)
	if (!eventual_size)
		eventual_size = iclog->ic_offset;
		eventual_size = iclog->ic_offset;
	iclog->ic_state = XLOG_STATE_WANT_SYNC;
	iclog->ic_state = XLOG_STATE_WANT_SYNC;
	INT_SET(iclog->ic_header.h_prev_block, ARCH_CONVERT, log->l_prev_block);
	iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
	log->l_prev_block = log->l_curr_block;
	log->l_prev_block = log->l_curr_block;
	log->l_prev_cycle = log->l_curr_cycle;
	log->l_prev_cycle = log->l_curr_cycle;


@@ -2957,7 +2951,7 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed)
				 * the previous sync.
				 * the previous sync.
				 */
				 */
				iclog->ic_refcnt++;
				iclog->ic_refcnt++;
				lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT);
				lsn = be64_to_cpu(iclog->ic_header.h_lsn);
				xlog_state_switch_iclogs(log, iclog, 0);
				xlog_state_switch_iclogs(log, iclog, 0);
				spin_unlock(&log->l_icloglock);
				spin_unlock(&log->l_icloglock);


@@ -2965,7 +2959,7 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed)
					return XFS_ERROR(EIO);
					return XFS_ERROR(EIO);
				*log_flushed = 1;
				*log_flushed = 1;
				spin_lock(&log->l_icloglock);
				spin_lock(&log->l_icloglock);
				if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) == lsn &&
				if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn &&
				    iclog->ic_state != XLOG_STATE_DIRTY)
				    iclog->ic_state != XLOG_STATE_DIRTY)
					goto maybe_sleep;
					goto maybe_sleep;
				else
				else
@@ -3049,7 +3043,7 @@ xlog_state_sync(xlog_t *log,
    }
    }


    do {
    do {
	if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) != lsn) {
	if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
		iclog = iclog->ic_next;
		iclog = iclog->ic_next;
		continue;
		continue;
	}
	}
@@ -3460,18 +3454,18 @@ xlog_verify_iclog(xlog_t *log,
	spin_unlock(&log->l_icloglock);
	spin_unlock(&log->l_icloglock);


	/* check log magic numbers */
	/* check log magic numbers */
	ptr = (xfs_caddr_t) &(iclog->ic_header);
	if (be32_to_cpu(iclog->ic_header.h_magicno) != XLOG_HEADER_MAGIC_NUM)
	if (INT_GET(*(uint *)ptr, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM)
		xlog_panic("xlog_verify_iclog: invalid magic num");
		xlog_panic("xlog_verify_iclog: invalid magic num");


	for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&(iclog->ic_header))+count;
	ptr = (xfs_caddr_t) &iclog->ic_header;
	for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count;
	     ptr += BBSIZE) {
	     ptr += BBSIZE) {
		if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM)
		if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM)
			xlog_panic("xlog_verify_iclog: unexpected magic num");
			xlog_panic("xlog_verify_iclog: unexpected magic num");
	}
	}


	/* check fields */
	/* check fields */
	len = INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT);
	len = be32_to_cpu(iclog->ic_header.h_num_logops);
	ptr = iclog->ic_datap;
	ptr = iclog->ic_datap;
	base_ptr = ptr;
	base_ptr = ptr;
	ophead = (xlog_op_header_t *)ptr;
	ophead = (xlog_op_header_t *)ptr;
@@ -3512,9 +3506,9 @@ xlog_verify_iclog(xlog_t *log,
			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
				op_len = INT_GET(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT);
				op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
			} else {
			} else {
				op_len = INT_GET(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT);
				op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
			}
			}
		}
		}
		ptr += sizeof(xlog_op_header_t) + op_len;
		ptr += sizeof(xlog_op_header_t) + op_len;
+2 −1
Original line number Original line Diff line number Diff line
@@ -22,8 +22,9 @@


#define CYCLE_LSN(lsn) ((uint)((lsn)>>32))
#define CYCLE_LSN(lsn) ((uint)((lsn)>>32))
#define BLOCK_LSN(lsn) ((uint)(lsn))
#define BLOCK_LSN(lsn) ((uint)(lsn))

/* this is used in a spot where we might otherwise double-endian-flip */
/* this is used in a spot where we might otherwise double-endian-flip */
#define CYCLE_LSN_DISK(lsn) (((uint *)&(lsn))[0])
#define CYCLE_LSN_DISK(lsn) (((__be32 *)&(lsn))[0])


#ifdef __KERNEL__
#ifdef __KERNEL__
/*
/*
+20 −20
Original line number Original line Diff line number Diff line
@@ -63,10 +63,10 @@ static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block)


static inline uint xlog_get_cycle(char *ptr)
static inline uint xlog_get_cycle(char *ptr)
{
{
	if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM)
	if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM)
		return INT_GET(*((uint *)ptr + 1), ARCH_CONVERT);
		return be32_to_cpu(*((__be32 *)ptr + 1));
	else
	else
		return INT_GET(*(uint *)ptr, ARCH_CONVERT);
		return be32_to_cpu(*(__be32 *)ptr);
}
}


#define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
#define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
@@ -85,9 +85,9 @@ static inline uint xlog_get_cycle(char *ptr)
 *
 *
 * this has endian issues, of course.
 * this has endian issues, of course.
 */
 */
static inline uint xlog_get_client_id(uint i)
static inline uint xlog_get_client_id(__be32 i)
{
{
	return INT_GET(i, ARCH_CONVERT) >> 24;
	return be32_to_cpu(i) >> 24;
}
}


#define xlog_panic(args...)	cmn_err(CE_PANIC, ## args)
#define xlog_panic(args...)	cmn_err(CE_PANIC, ## args)
@@ -287,25 +287,25 @@ typedef struct xlog_op_header {
#endif
#endif


typedef struct xlog_rec_header {
typedef struct xlog_rec_header {
	uint	  h_magicno;	/* log record (LR) identifier		:  4 */
	__be32	  h_magicno;	/* log record (LR) identifier		:  4 */
	uint	  h_cycle;	/* write cycle of log			:  4 */
	__be32	  h_cycle;	/* write cycle of log			:  4 */
	int	  h_version;	/* LR version				:  4 */
	__be32	  h_version;	/* LR version				:  4 */
	int	  h_len;	/* len in bytes; should be 64-bit aligned: 4 */
	__be32	  h_len;	/* len in bytes; should be 64-bit aligned: 4 */
	xfs_lsn_t h_lsn;	/* lsn of this LR			:  8 */
	__be64	  h_lsn;	/* lsn of this LR			:  8 */
	xfs_lsn_t h_tail_lsn;	/* lsn of 1st LR w/ buffers not committed: 8 */
	__be64	  h_tail_lsn;	/* lsn of 1st LR w/ buffers not committed: 8 */
	uint	  h_chksum;	/* may not be used; non-zero if used	:  4 */
	__be32	  h_chksum;	/* may not be used; non-zero if used	:  4 */
	int	  h_prev_block; /* block number to previous LR		:  4 */
	__be32	  h_prev_block; /* block number to previous LR		:  4 */
	int	  h_num_logops;	/* number of log operations in this LR	:  4 */
	__be32	  h_num_logops;	/* number of log operations in this LR	:  4 */
	uint	  h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE];
	__be32	  h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE];
	/* new fields */
	/* new fields */
	int       h_fmt;        /* format of log record                 :  4 */
	__be32    h_fmt;        /* format of log record                 :  4 */
	uuid_t	  h_fs_uuid;    /* uuid of FS                           : 16 */
	uuid_t	  h_fs_uuid;    /* uuid of FS                           : 16 */
	int       h_size;	/* iclog size				:  4 */
	__be32	  h_size;	/* iclog size				:  4 */
} xlog_rec_header_t;
} xlog_rec_header_t;


typedef struct xlog_rec_ext_header {
typedef struct xlog_rec_ext_header {
	uint	  xh_cycle;	/* write cycle of log			: 4 */
	__be32	  xh_cycle;	/* write cycle of log			: 4 */
	uint	  xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /*	: 256 */
	__be32	  xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /*	: 256 */
} xlog_rec_ext_header_t;
} xlog_rec_ext_header_t;


#ifdef __KERNEL__
#ifdef __KERNEL__
+56 −64
Original line number Original line Diff line number Diff line
@@ -198,7 +198,7 @@ xlog_header_check_dump(
	cmn_err(CE_DEBUG, "    log : uuid = ");
	cmn_err(CE_DEBUG, "    log : uuid = ");
	for (b = 0; b < 16; b++)
	for (b = 0; b < 16; b++)
		cmn_err(CE_DEBUG, "%02x",((uchar_t *)&head->h_fs_uuid)[b]);
		cmn_err(CE_DEBUG, "%02x",((uchar_t *)&head->h_fs_uuid)[b]);
	cmn_err(CE_DEBUG, ", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT));
	cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt));
}
}
#else
#else
#define xlog_header_check_dump(mp, head)
#define xlog_header_check_dump(mp, head)
@@ -212,14 +212,14 @@ xlog_header_check_recover(
	xfs_mount_t		*mp,
	xfs_mount_t		*mp,
	xlog_rec_header_t	*head)
	xlog_rec_header_t	*head)
{
{
	ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM);
	ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);


	/*
	/*
	 * IRIX doesn't write the h_fmt field and leaves it zeroed
	 * IRIX doesn't write the h_fmt field and leaves it zeroed
	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
	 * a dirty log created in IRIX.
	 * a dirty log created in IRIX.
	 */
	 */
	if (unlikely(INT_GET(head->h_fmt, ARCH_CONVERT) != XLOG_FMT)) {
	if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) {
		xlog_warn(
		xlog_warn(
	"XFS: dirty log written in incompatible format - can't recover");
	"XFS: dirty log written in incompatible format - can't recover");
		xlog_header_check_dump(mp, head);
		xlog_header_check_dump(mp, head);
@@ -245,7 +245,7 @@ xlog_header_check_mount(
	xfs_mount_t		*mp,
	xfs_mount_t		*mp,
	xlog_rec_header_t	*head)
	xlog_rec_header_t	*head)
{
{
	ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM);
	ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);


	if (uuid_is_nil(&head->h_fs_uuid)) {
	if (uuid_is_nil(&head->h_fs_uuid)) {
		/*
		/*
@@ -447,8 +447,7 @@ xlog_find_verify_log_record(


		head = (xlog_rec_header_t *)offset;
		head = (xlog_rec_header_t *)offset;


		if (XLOG_HEADER_MAGIC_NUM ==
		if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno))
		    INT_GET(head->h_magicno, ARCH_CONVERT))
			break;
			break;


		if (!smallmem)
		if (!smallmem)
@@ -480,7 +479,7 @@ xlog_find_verify_log_record(
	 * record do we update last_blk.
	 * record do we update last_blk.
	 */
	 */
	if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
	if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
		uint	h_size = INT_GET(head->h_size, ARCH_CONVERT);
		uint	h_size = be32_to_cpu(head->h_size);


		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
		if (h_size % XLOG_HEADER_CYCLE_SIZE)
		if (h_size % XLOG_HEADER_CYCLE_SIZE)
@@ -489,8 +488,8 @@ xlog_find_verify_log_record(
		xhdrs = 1;
		xhdrs = 1;
	}
	}


	if (*last_blk - i + extra_bblks
	if (*last_blk - i + extra_bblks !=
			!= BTOBB(INT_GET(head->h_len, ARCH_CONVERT)) + xhdrs)
	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
		*last_blk = i;
		*last_blk = i;


out:
out:
@@ -823,8 +822,7 @@ xlog_find_tail(
		if ((error = xlog_bread(log, i, 1, bp)))
		if ((error = xlog_bread(log, i, 1, bp)))
			goto bread_err;
			goto bread_err;
		offset = xlog_align(log, i, 1, bp);
		offset = xlog_align(log, i, 1, bp);
		if (XLOG_HEADER_MAGIC_NUM ==
		if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
		    INT_GET(*(uint *)offset, ARCH_CONVERT)) {
			found = 1;
			found = 1;
			break;
			break;
		}
		}
@@ -841,7 +839,7 @@ xlog_find_tail(
				goto bread_err;
				goto bread_err;
			offset = xlog_align(log, i, 1, bp);
			offset = xlog_align(log, i, 1, bp);
			if (XLOG_HEADER_MAGIC_NUM ==
			if (XLOG_HEADER_MAGIC_NUM ==
			    INT_GET(*(uint*)offset, ARCH_CONVERT)) {
			    be32_to_cpu(*(__be32 *)offset)) {
				found = 2;
				found = 2;
				break;
				break;
			}
			}
@@ -855,7 +853,7 @@ xlog_find_tail(


	/* find blk_no of tail of log */
	/* find blk_no of tail of log */
	rhead = (xlog_rec_header_t *)offset;
	rhead = (xlog_rec_header_t *)offset;
	*tail_blk = BLOCK_LSN(INT_GET(rhead->h_tail_lsn, ARCH_CONVERT));
	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));


	/*
	/*
	 * Reset log values according to the state of the log when we
	 * Reset log values according to the state of the log when we
@@ -869,11 +867,11 @@ xlog_find_tail(
	 */
	 */
	log->l_prev_block = i;
	log->l_prev_block = i;
	log->l_curr_block = (int)*head_blk;
	log->l_curr_block = (int)*head_blk;
	log->l_curr_cycle = INT_GET(rhead->h_cycle, ARCH_CONVERT);
	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
	if (found == 2)
	if (found == 2)
		log->l_curr_cycle++;
		log->l_curr_cycle++;
	log->l_tail_lsn = INT_GET(rhead->h_tail_lsn, ARCH_CONVERT);
	log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn);
	log->l_last_sync_lsn = INT_GET(rhead->h_lsn, ARCH_CONVERT);
	log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn);
	log->l_grant_reserve_cycle = log->l_curr_cycle;
	log->l_grant_reserve_cycle = log->l_curr_cycle;
	log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
	log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
	log->l_grant_write_cycle = log->l_curr_cycle;
	log->l_grant_write_cycle = log->l_curr_cycle;
@@ -891,8 +889,8 @@ xlog_find_tail(
	 * unmount record rather than the block after it.
	 * unmount record rather than the block after it.
	 */
	 */
	if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
	if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
		int	h_size = INT_GET(rhead->h_size, ARCH_CONVERT);
		int	h_size = be32_to_cpu(rhead->h_size);
		int	h_version = INT_GET(rhead->h_version, ARCH_CONVERT);
		int	h_version = be32_to_cpu(rhead->h_version);


		if ((h_version & XLOG_VERSION_2) &&
		if ((h_version & XLOG_VERSION_2) &&
		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
@@ -906,10 +904,10 @@ xlog_find_tail(
		hblks = 1;
		hblks = 1;
	}
	}
	after_umount_blk = (i + hblks + (int)
	after_umount_blk = (i + hblks + (int)
		BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT))) % log->l_logBBsize;
		BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
	tail_lsn = log->l_tail_lsn;
	tail_lsn = log->l_tail_lsn;
	if (*head_blk == after_umount_blk &&
	if (*head_blk == after_umount_blk &&
	    INT_GET(rhead->h_num_logops, ARCH_CONVERT) == 1) {
	    be32_to_cpu(rhead->h_num_logops) == 1) {
		umount_data_blk = (i + hblks) % log->l_logBBsize;
		umount_data_blk = (i + hblks) % log->l_logBBsize;
		if ((error = xlog_bread(log, umount_data_blk, 1, bp))) {
		if ((error = xlog_bread(log, umount_data_blk, 1, bp))) {
			goto bread_err;
			goto bread_err;
@@ -1100,14 +1098,13 @@ xlog_add_record(
	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;


	memset(buf, 0, BBSIZE);
	memset(buf, 0, BBSIZE);
	INT_SET(recp->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM);
	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
	INT_SET(recp->h_cycle, ARCH_CONVERT, cycle);
	recp->h_cycle = cpu_to_be32(cycle);
	INT_SET(recp->h_version, ARCH_CONVERT,
	recp->h_version = cpu_to_be32(
			XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
			XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
	INT_SET(recp->h_lsn, ARCH_CONVERT, xlog_assign_lsn(cycle, block));
	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
	INT_SET(recp->h_tail_lsn, ARCH_CONVERT,
	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
		xlog_assign_lsn(tail_cycle, tail_block));
	recp->h_fmt = cpu_to_be32(XLOG_FMT);
	INT_SET(recp->h_fmt, ARCH_CONVERT, XLOG_FMT);
	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
}
}


@@ -2214,7 +2211,7 @@ xlog_recover_do_buffer_trans(
	 * overlap with future reads of those inodes.
	 * overlap with future reads of those inodes.
	 */
	 */
	if (XFS_DINODE_MAGIC ==
	if (XFS_DINODE_MAGIC ==
	    INT_GET(*((__uint16_t *)(xfs_buf_offset(bp, 0))), ARCH_CONVERT) &&
	    be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
	    (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
	    (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
			(__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
			(__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
		XFS_BUF_STALE(bp);
		XFS_BUF_STALE(bp);
@@ -2584,8 +2581,7 @@ xlog_recover_do_dquot_trans(
	/*
	/*
	 * This type of quotas was turned off, so ignore this record.
	 * This type of quotas was turned off, so ignore this record.
	 */
	 */
	type = INT_GET(recddq->d_flags, ARCH_CONVERT) &
	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
			(XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
	ASSERT(type);
	ASSERT(type);
	if (log->l_quotaoffs_flag & type)
	if (log->l_quotaoffs_flag & type)
		return (0);
		return (0);
@@ -2898,8 +2894,8 @@ xlog_recover_process_data(
	unsigned long		hash;
	unsigned long		hash;
	uint			flags;
	uint			flags;


	lp = dp + INT_GET(rhead->h_len, ARCH_CONVERT);
	lp = dp + be32_to_cpu(rhead->h_len);
	num_logops = INT_GET(rhead->h_num_logops, ARCH_CONVERT);
	num_logops = be32_to_cpu(rhead->h_num_logops);


	/* check the log format matches our own - else we can't recover */
	/* check the log format matches our own - else we can't recover */
	if (xlog_header_check_recover(log->l_mp, rhead))
	if (xlog_header_check_recover(log->l_mp, rhead))
@@ -2922,7 +2918,7 @@ xlog_recover_process_data(
		if (trans == NULL) {		   /* not found; add new tid */
		if (trans == NULL) {		   /* not found; add new tid */
			if (ohead->oh_flags & XLOG_START_TRANS)
			if (ohead->oh_flags & XLOG_START_TRANS)
				xlog_recover_new_tid(&rhash[hash], tid,
				xlog_recover_new_tid(&rhash[hash], tid,
					INT_GET(rhead->h_lsn, ARCH_CONVERT));
					be64_to_cpu(rhead->h_lsn));
		} else {
		} else {
			ASSERT(dp + be32_to_cpu(ohead->oh_len) <= lp);
			ASSERT(dp + be32_to_cpu(ohead->oh_len) <= lp);
			flags = ohead->oh_flags & ~XLOG_END_TRANS;
			flags = ohead->oh_flags & ~XLOG_END_TRANS;
@@ -3313,16 +3309,16 @@ xlog_pack_data_checksum(
	int		size)
	int		size)
{
{
	int		i;
	int		i;
	uint		*up;
	__be32		*up;
	uint		chksum = 0;
	uint		chksum = 0;


	up = (uint *)iclog->ic_datap;
	up = (__be32 *)iclog->ic_datap;
	/* divide length by 4 to get # words */
	/* divide length by 4 to get # words */
	for (i = 0; i < (size >> 2); i++) {
	for (i = 0; i < (size >> 2); i++) {
		chksum ^= INT_GET(*up, ARCH_CONVERT);
		chksum ^= be32_to_cpu(*up);
		up++;
		up++;
	}
	}
	INT_SET(iclog->ic_header.h_chksum, ARCH_CONVERT, chksum);
	iclog->ic_header.h_chksum = cpu_to_be32(chksum);
}
}
#else
#else
#define xlog_pack_data_checksum(log, iclog, size)
#define xlog_pack_data_checksum(log, iclog, size)
@@ -3339,7 +3335,7 @@ xlog_pack_data(
{
{
	int			i, j, k;
	int			i, j, k;
	int			size = iclog->ic_offset + roundoff;
	int			size = iclog->ic_offset + roundoff;
	uint			cycle_lsn;
	__be32			cycle_lsn;
	xfs_caddr_t		dp;
	xfs_caddr_t		dp;
	xlog_in_core_2_t	*xhdr;
	xlog_in_core_2_t	*xhdr;


@@ -3350,8 +3346,8 @@ xlog_pack_data(
	dp = iclog->ic_datap;
	dp = iclog->ic_datap;
	for (i = 0; i < BTOBB(size) &&
	for (i = 0; i < BTOBB(size) &&
		i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
		i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
		iclog->ic_header.h_cycle_data[i] = *(uint *)dp;
		iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
		*(uint *)dp = cycle_lsn;
		*(__be32 *)dp = cycle_lsn;
		dp += BBSIZE;
		dp += BBSIZE;
	}
	}


@@ -3360,8 +3356,8 @@ xlog_pack_data(
		for ( ; i < BTOBB(size); i++) {
		for ( ; i < BTOBB(size); i++) {
			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
			xhdr[j].hic_xheader.xh_cycle_data[k] = *(uint *)dp;
			xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
			*(uint *)dp = cycle_lsn;
			*(__be32 *)dp = cycle_lsn;
			dp += BBSIZE;
			dp += BBSIZE;
		}
		}


@@ -3378,21 +3374,21 @@ xlog_unpack_data_checksum(
	xfs_caddr_t		dp,
	xfs_caddr_t		dp,
	xlog_t			*log)
	xlog_t			*log)
{
{
	uint			*up = (uint *)dp;
	__be32			*up = (__be32 *)dp;
	uint			chksum = 0;
	uint			chksum = 0;
	int			i;
	int			i;


	/* divide length by 4 to get # words */
	/* divide length by 4 to get # words */
	for (i=0; i < INT_GET(rhead->h_len, ARCH_CONVERT) >> 2; i++) {
	for (i=0; i < be32_to_cpu(rhead->h_len) >> 2; i++) {
		chksum ^= INT_GET(*up, ARCH_CONVERT);
		chksum ^= be32_to_cpu(*up);
		up++;
		up++;
	}
	}
	if (chksum != INT_GET(rhead->h_chksum, ARCH_CONVERT)) {
	if (chksum != be32_to_cpu(rhead->h_chksum)) {
	    if (rhead->h_chksum ||
	    if (rhead->h_chksum ||
		((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
		((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
		    cmn_err(CE_DEBUG,
		    cmn_err(CE_DEBUG,
			"XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n",
			"XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n",
			    INT_GET(rhead->h_chksum, ARCH_CONVERT), chksum);
			    be32_to_cpu(rhead->h_chksum), chksum);
		    cmn_err(CE_DEBUG,
		    cmn_err(CE_DEBUG,
"XFS: Disregard message if filesystem was created with non-DEBUG kernel");
"XFS: Disregard message if filesystem was created with non-DEBUG kernel");
		    if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
		    if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
@@ -3416,18 +3412,18 @@ xlog_unpack_data(
	int			i, j, k;
	int			i, j, k;
	xlog_in_core_2_t	*xhdr;
	xlog_in_core_2_t	*xhdr;


	for (i = 0; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)) &&
	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
		*(uint *)dp = *(uint *)&rhead->h_cycle_data[i];
		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
		dp += BBSIZE;
		dp += BBSIZE;
	}
	}


	if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
	if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
		xhdr = (xlog_in_core_2_t *)rhead;
		xhdr = (xlog_in_core_2_t *)rhead;
		for ( ; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); i++) {
		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
			*(uint *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
			dp += BBSIZE;
			dp += BBSIZE;
		}
		}
	}
	}
@@ -3443,24 +3439,21 @@ xlog_valid_rec_header(
{
{
	int			hlen;
	int			hlen;


	if (unlikely(
	if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) {
	    (INT_GET(rhead->h_magicno, ARCH_CONVERT) !=
			XLOG_HEADER_MAGIC_NUM))) {
		XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
		XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
				XFS_ERRLEVEL_LOW, log->l_mp);
				XFS_ERRLEVEL_LOW, log->l_mp);
		return XFS_ERROR(EFSCORRUPTED);
		return XFS_ERROR(EFSCORRUPTED);
	}
	}
	if (unlikely(
	if (unlikely(
	    (!rhead->h_version ||
	    (!rhead->h_version ||
	    (INT_GET(rhead->h_version, ARCH_CONVERT) &
	    (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
			(~XLOG_VERSION_OKBITS)) != 0))) {
		xlog_warn("XFS: %s: unrecognised log version (%d).",
		xlog_warn("XFS: %s: unrecognised log version (%d).",
			__FUNCTION__, INT_GET(rhead->h_version, ARCH_CONVERT));
			__FUNCTION__, be32_to_cpu(rhead->h_version));
		return XFS_ERROR(EIO);
		return XFS_ERROR(EIO);
	}
	}


	/* LR body must have data or it wouldn't have been written */
	/* LR body must have data or it wouldn't have been written */
	hlen = INT_GET(rhead->h_len, ARCH_CONVERT);
	hlen = be32_to_cpu(rhead->h_len);
	if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
	if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
		XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
		XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
				XFS_ERRLEVEL_LOW, log->l_mp);
				XFS_ERRLEVEL_LOW, log->l_mp);
@@ -3520,9 +3513,8 @@ xlog_do_recovery_pass(
		error = xlog_valid_rec_header(log, rhead, tail_blk);
		error = xlog_valid_rec_header(log, rhead, tail_blk);
		if (error)
		if (error)
			goto bread_err1;
			goto bread_err1;
		h_size = INT_GET(rhead->h_size, ARCH_CONVERT);
		h_size = be32_to_cpu(rhead->h_size);
		if ((INT_GET(rhead->h_version, ARCH_CONVERT)
		if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
				& XLOG_VERSION_2) &&
		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
			if (h_size % XLOG_HEADER_CYCLE_SIZE)
			if (h_size % XLOG_HEADER_CYCLE_SIZE)
@@ -3559,7 +3551,7 @@ xlog_do_recovery_pass(
				goto bread_err2;
				goto bread_err2;


			/* blocks in data section */
			/* blocks in data section */
			bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
			error = xlog_bread(log, blk_no + hblks, bblks, dbp);
			error = xlog_bread(log, blk_no + hblks, bblks, dbp);
			if (error)
			if (error)
				goto bread_err2;
				goto bread_err2;
@@ -3634,7 +3626,7 @@ xlog_do_recovery_pass(
			if (error)
			if (error)
				goto bread_err2;
				goto bread_err2;


			bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
			blk_no += hblks;
			blk_no += hblks;


			/* Read in data for log record */
			/* Read in data for log record */
@@ -3705,7 +3697,7 @@ xlog_do_recovery_pass(
			error = xlog_valid_rec_header(log, rhead, blk_no);
			error = xlog_valid_rec_header(log, rhead, blk_no);
			if (error)
			if (error)
				goto bread_err2;
				goto bread_err2;
			bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
			if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp)))
			if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp)))
				goto bread_err2;
				goto bread_err2;
			offset = xlog_align(log, blk_no+hblks, bblks, dbp);
			offset = xlog_align(log, blk_no+hblks, bblks, dbp);