Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 579aa9ca authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Lachlan McIlroy
Browse files

[XFS] shrink mrlock_t



The writer field is not needed for non_DEBU builds so remove it. While
we're at i also clean up the interface for is locked asserts to go through
and xfs_iget.c helper with an interface like the xfs_ilock routines to
isolated the XFS codebase from mrlock internals. That way we can kill
mrlock_t entirely once rw_semaphores grow an islocked facility. Also
remove unused flags to the ilock family of functions.

SGI-PV: 976035
SGI-Modid: xfs-linux-melb:xfs-kern:30902a

Signed-off-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarLachlan McIlroy <lachlan@sgi.com>
parent eca450b7
Loading
Loading
Loading
Loading
+24 −36
Original line number Diff line number Diff line
@@ -20,29 +20,24 @@

#include <linux/rwsem.h>

enum { MR_NONE, MR_ACCESS, MR_UPDATE };

typedef struct {
	struct rw_semaphore	mr_lock;
#ifdef DEBUG
	int			mr_writer;
#endif
} mrlock_t;

#ifdef DEBUG
#define mrinit(mrp, name)	\
	do { (mrp)->mr_writer = 0; init_rwsem(&(mrp)->mr_lock); } while (0)
#else
#define mrinit(mrp, name)	\
	do { init_rwsem(&(mrp)->mr_lock); } while (0)
#endif

#define mrlock_init(mrp, t,n,s)	mrinit(mrp, n)
#define mrfree(mrp)		do { } while (0)

static inline void mraccess(mrlock_t *mrp)
{
	down_read(&mrp->mr_lock);
}

static inline void mrupdate(mrlock_t *mrp)
{
	down_write(&mrp->mr_lock);
	mrp->mr_writer = 1;
}

static inline void mraccess_nested(mrlock_t *mrp, int subclass)
{
	down_read_nested(&mrp->mr_lock, subclass);
@@ -51,10 +46,11 @@ static inline void mraccess_nested(mrlock_t *mrp, int subclass)
static inline void mrupdate_nested(mrlock_t *mrp, int subclass)
{
	down_write_nested(&mrp->mr_lock, subclass);
#ifdef DEBUG
	mrp->mr_writer = 1;
#endif
}


static inline int mrtryaccess(mrlock_t *mrp)
{
	return down_read_trylock(&mrp->mr_lock);
@@ -64,39 +60,31 @@ static inline int mrtryupdate(mrlock_t *mrp)
{
	if (!down_write_trylock(&mrp->mr_lock))
		return 0;
#ifdef DEBUG
	mrp->mr_writer = 1;
#endif
	return 1;
}

static inline void mrunlock(mrlock_t *mrp)
static inline void mrunlock_excl(mrlock_t *mrp)
{
	if (mrp->mr_writer) {
#ifdef DEBUG
	mrp->mr_writer = 0;
#endif
	up_write(&mrp->mr_lock);
	} else {
		up_read(&mrp->mr_lock);
}

static inline void mrunlock_shared(mrlock_t *mrp)
{
	up_read(&mrp->mr_lock);
}

static inline void mrdemote(mrlock_t *mrp)
{
#ifdef DEBUG
	mrp->mr_writer = 0;
#endif
	downgrade_write(&mrp->mr_lock);
}

#ifdef DEBUG
/*
 * Debug-only routine, without some platform-specific asm code, we can
 * now only answer requests regarding whether we hold the lock for write
 * (reader state is outside our visibility, we only track writer state).
 * Note: means !ismrlocked would give false positives, so don't do that.
 */
static inline int ismrlocked(mrlock_t *mrp, int type)
{
	if (mrp && type == MR_UPDATE)
		return mrp->mr_writer;
	return 1;
}
#endif

#endif /* __XFS_SUPPORT_MRLOCK_H__ */
+9 −12
Original line number Diff line number Diff line
@@ -394,7 +394,7 @@ xfs_zero_last_block(
	int		error = 0;
	xfs_bmbt_irec_t	imap;

	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));

	zero_offset = XFS_B_FSB_OFFSET(mp, isize);
	if (zero_offset == 0) {
@@ -425,14 +425,14 @@ xfs_zero_last_block(
	 * out sync.  We need to drop the ilock while we do this so we
	 * don't deadlock when the buffer cache calls back to us.
	 */
	xfs_iunlock(ip, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);

	zero_len = mp->m_sb.sb_blocksize - zero_offset;
	if (isize + zero_len > offset)
		zero_len = offset - isize;
	error = xfs_iozero(ip, isize, zero_len);

	xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
	xfs_ilock(ip, XFS_ILOCK_EXCL);
	ASSERT(error >= 0);
	return error;
}
@@ -465,8 +465,7 @@ xfs_zero_eof(
	int		error = 0;
	xfs_bmbt_irec_t	imap;

	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
	ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
	ASSERT(offset > isize);

	/*
@@ -475,8 +474,7 @@ xfs_zero_eof(
	 */
	error = xfs_zero_last_block(ip, offset, isize);
	if (error) {
		ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
		ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
		return error;
	}

@@ -507,8 +505,7 @@ xfs_zero_eof(
		error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
				  0, NULL, 0, &imap, &nimaps, NULL, NULL);
		if (error) {
			ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
			ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
			ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
			return error;
		}
		ASSERT(nimaps > 0);
@@ -532,7 +529,7 @@ xfs_zero_eof(
		 * Drop the inode lock while we're doing the I/O.
		 * We'll still have the iolock to protect us.
		 */
		xfs_iunlock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
		xfs_iunlock(ip, XFS_ILOCK_EXCL);

		zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
		zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
@@ -548,13 +545,13 @@ xfs_zero_eof(
		start_zero_fsb = imap.br_startoff + imap.br_blockcount;
		ASSERT(start_zero_fsb <= (end_zero_fsb + 1));

		xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
		xfs_ilock(ip, XFS_ILOCK_EXCL);
	}

	return 0;

out_lock:
	xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
	xfs_ilock(ip, XFS_ILOCK_EXCL);
	ASSERT(error >= 0);
	return error;
}
+2 −2
Original line number Diff line number Diff line
@@ -933,7 +933,7 @@ xfs_qm_dqget(
	       type == XFS_DQ_PROJ ||
	       type == XFS_DQ_GROUP);
	if (ip) {
		ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
		if (type == XFS_DQ_USER)
			ASSERT(ip->i_udquot == NULL);
		else
@@ -1088,7 +1088,7 @@ xfs_qm_dqget(
	xfs_qm_mplist_unlock(mp);
	XFS_DQ_HASH_UNLOCK(h);
 dqret:
	ASSERT((ip == NULL) || XFS_ISLOCKED_INODE_EXCL(ip));
	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
	xfs_dqtrace_entry(dqp, "DQGET DONE");
	*O_dqpp = dqp;
	return (0);
+11 −10
Original line number Diff line number Diff line
@@ -670,7 +670,7 @@ xfs_qm_dqattach_one(
	xfs_dquot_t	*dqp;
	int		error;

	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
	error = 0;
	/*
	 * See if we already have it in the inode itself. IO_idqpp is
@@ -874,7 +874,7 @@ xfs_qm_dqattach(
		return 0;

	ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 ||
	       XFS_ISLOCKED_INODE_EXCL(ip));
	       xfs_isilocked(ip, XFS_ILOCK_EXCL));

	if (! (flags & XFS_QMOPT_ILOCKED))
		xfs_ilock(ip, XFS_ILOCK_EXCL);
@@ -888,7 +888,8 @@ xfs_qm_dqattach(
			goto done;
		nquotas++;
	}
	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
	if (XFS_IS_OQUOTA_ON(mp)) {
		error = XFS_IS_GQUOTA_ON(mp) ?
			xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
@@ -913,7 +914,7 @@ xfs_qm_dqattach(
	 * This WON'T, in general, result in a thrash.
	 */
	if (nquotas == 2) {
		ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
		ASSERT(ip->i_udquot);
		ASSERT(ip->i_gdquot);

@@ -956,7 +957,7 @@ xfs_qm_dqattach(

#ifdef QUOTADEBUG
	else
		ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
#endif
	return error;
}
@@ -1291,7 +1292,7 @@ xfs_qm_dqget_noattach(
	xfs_mount_t	*mp;
	xfs_dquot_t	*udqp, *gdqp;

	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
	mp = ip->i_mount;
	udqp = NULL;
	gdqp = NULL;
@@ -1392,7 +1393,7 @@ xfs_qm_qino_alloc(
	 * Keep an extra reference to this quota inode. This inode is
	 * locked exclusively and joined to the transaction already.
	 */
	ASSERT(XFS_ISLOCKED_INODE_EXCL(*ip));
	ASSERT(xfs_isilocked(*ip, XFS_ILOCK_EXCL));
	VN_HOLD(XFS_ITOV((*ip)));

	/*
@@ -2557,7 +2558,7 @@ xfs_qm_vop_chown(
	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;

	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));

	/* old dquot */
@@ -2601,7 +2602,7 @@ xfs_qm_vop_chown_reserve(
	uint		delblks, blkflags, prjflags = 0;
	xfs_dquot_t	*unresudq, *unresgdq, *delblksudq, *delblksgdq;

	ASSERT(XFS_ISLOCKED_INODE(ip));
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
	mp = ip->i_mount;
	ASSERT(XFS_IS_QUOTA_RUNNING(mp));

@@ -2711,7 +2712,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
	if (!XFS_IS_QUOTA_ON(tp->t_mountp))
		return;

	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
	ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));

	if (udqp) {
+0 −5
Original line number Diff line number Diff line
@@ -27,11 +27,6 @@
/* Number of dquots that fit in to a dquot block */
#define XFS_QM_DQPERBLK(mp)	((mp)->m_quotainfo->qi_dqperchunk)

#define XFS_ISLOCKED_INODE(ip)		(ismrlocked(&(ip)->i_lock, \
					    MR_UPDATE | MR_ACCESS) != 0)
#define XFS_ISLOCKED_INODE_EXCL(ip)	(ismrlocked(&(ip)->i_lock, \
					    MR_UPDATE) != 0)

#define XFS_DQ_IS_ADDEDTO_TRX(t, d)	((d)->q_transp == (t))

#define XFS_QI_MPLRECLAIMS(mp)	((mp)->m_quotainfo->qi_dqreclaims)
Loading