Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 58d1dcf5 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker
Browse files

xprtrdma: Split rb_lock



/proc/lock_stat showed contention between rpcrdma_buffer_get/put
and the MR allocation functions during I/O intensive workloads.

Now that MRs are no longer allocated in rpcrdma_buffer_get(),
there's no reason the rb_mws list has to be managed using the
same lock as the send/receive buffers. Split that lock. The
new lock does not need to disable interrupts because buffer
get/put is never called in an interrupt context.

struct rpcrdma_buffer is re-arranged to ensure rb_mwlock and rb_mws
are always in a different cacheline than rb_lock and the buffer
pointers.

Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Reviewed-by: default avatarSteve Wise <swise@opengridcomputing.com>
Reviewed-by: default avatarSagi Grimberg <sagig@mellanox.com>
Tested-By: default avatarDevesh Sharma <devesh.sharma@avagotech.com>
Reviewed-by: default avatarDoug Ledford <dledford@redhat.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 7e53df11
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -65,6 +65,7 @@ fmr_op_init(struct rpcrdma_xprt *r_xprt)
	struct rpcrdma_mw *r;
	int i, rc;

	spin_lock_init(&buf->rb_mwlock);
	INIT_LIST_HEAD(&buf->rb_mws);
	INIT_LIST_HEAD(&buf->rb_all);

+1 −0
Original line number Diff line number Diff line
@@ -266,6 +266,7 @@ frwr_op_init(struct rpcrdma_xprt *r_xprt)
	struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
	int i;

	spin_lock_init(&buf->rb_mwlock);
	INIT_LIST_HEAD(&buf->rb_mws);
	INIT_LIST_HEAD(&buf->rb_all);

+4 −6
Original line number Diff line number Diff line
@@ -1173,15 +1173,14 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	struct rpcrdma_mw *mw = NULL;
	unsigned long flags;

	spin_lock_irqsave(&buf->rb_lock, flags);
	spin_lock(&buf->rb_mwlock);
	if (!list_empty(&buf->rb_mws)) {
		mw = list_first_entry(&buf->rb_mws,
				      struct rpcrdma_mw, mw_list);
		list_del_init(&mw->mw_list);
	}
	spin_unlock_irqrestore(&buf->rb_lock, flags);
	spin_unlock(&buf->rb_mwlock);

	if (!mw)
		pr_err("RPC:       %s: no MWs available\n", __func__);
@@ -1192,11 +1191,10 @@ void
rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
{
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	unsigned long flags;

	spin_lock_irqsave(&buf->rb_lock, flags);
	spin_lock(&buf->rb_mwlock);
	list_add_tail(&mw->mw_list, &buf->rb_mws);
	spin_unlock_irqrestore(&buf->rb_lock, flags);
	spin_unlock(&buf->rb_mwlock);
}

static void
+9 −7
Original line number Diff line number Diff line
@@ -282,15 +282,17 @@ rpcr_to_rdmar(struct rpc_rqst *rqst)
 * One of these is associated with a transport instance
 */
struct rpcrdma_buffer {
	spinlock_t	rb_lock;	/* protects indexes */
	u32		rb_max_requests;/* client max requests */
	struct list_head rb_mws;	/* optional memory windows/fmrs/frmrs */
	spinlock_t		rb_mwlock;	/* protect rb_mws list */
	struct list_head	rb_mws;
	struct list_head	rb_all;
	char			*rb_pool;

	spinlock_t		rb_lock;	/* protect buf arrays */
	u32			rb_max_requests;
	int			rb_send_index;
	struct rpcrdma_req	**rb_send_bufs;
	int			rb_recv_index;
	struct rpcrdma_req	**rb_send_bufs;
	struct rpcrdma_rep	**rb_recv_bufs;
	char		*rb_pool;
};
#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)