Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a5b027e1 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker
Browse files

xprtrdma: Saving IRQs no longer needed for rb_lock



Now that RPC replies are processed in a workqueue, there's no need
to disable IRQs when managing send and receive buffers. This saves
noticeable overhead per RPC.

Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Reviewed-by: default avatarSagi Grimberg <sagig@mellanox.com>
Tested-By: default avatarDevesh Sharma <devesh.sharma@avagotech.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 2da9ab30
Loading
Loading
Loading
Loading
+10 −14
Original line number Diff line number Diff line
@@ -1063,24 +1063,23 @@ struct rpcrdma_req *
rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
{
	struct rpcrdma_req *req;
	unsigned long flags;

	spin_lock_irqsave(&buffers->rb_lock, flags);
	spin_lock(&buffers->rb_lock);
	if (list_empty(&buffers->rb_send_bufs))
		goto out_reqbuf;
	req = rpcrdma_buffer_get_req_locked(buffers);
	if (list_empty(&buffers->rb_recv_bufs))
		goto out_repbuf;
	req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
	spin_unlock_irqrestore(&buffers->rb_lock, flags);
	spin_unlock(&buffers->rb_lock);
	return req;

out_reqbuf:
	spin_unlock_irqrestore(&buffers->rb_lock, flags);
	spin_unlock(&buffers->rb_lock);
	pr_warn("RPC:       %s: out of request buffers\n", __func__);
	return NULL;
out_repbuf:
	spin_unlock_irqrestore(&buffers->rb_lock, flags);
	spin_unlock(&buffers->rb_lock);
	pr_warn("RPC:       %s: out of reply buffers\n", __func__);
	req->rl_reply = NULL;
	return req;
@@ -1095,16 +1094,15 @@ rpcrdma_buffer_put(struct rpcrdma_req *req)
{
	struct rpcrdma_buffer *buffers = req->rl_buffer;
	struct rpcrdma_rep *rep = req->rl_reply;
	unsigned long flags;

	req->rl_niovs = 0;
	req->rl_reply = NULL;

	spin_lock_irqsave(&buffers->rb_lock, flags);
	spin_lock(&buffers->rb_lock);
	list_add_tail(&req->rl_free, &buffers->rb_send_bufs);
	if (rep)
		list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
	spin_unlock_irqrestore(&buffers->rb_lock, flags);
	spin_unlock(&buffers->rb_lock);
}

/*
@@ -1115,12 +1113,11 @@ void
rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
{
	struct rpcrdma_buffer *buffers = req->rl_buffer;
	unsigned long flags;

	spin_lock_irqsave(&buffers->rb_lock, flags);
	spin_lock(&buffers->rb_lock);
	if (!list_empty(&buffers->rb_recv_bufs))
		req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
	spin_unlock_irqrestore(&buffers->rb_lock, flags);
	spin_unlock(&buffers->rb_lock);
}

/*
@@ -1131,11 +1128,10 @@ void
rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
{
	struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
	unsigned long flags;

	spin_lock_irqsave(&buffers->rb_lock, flags);
	spin_lock(&buffers->rb_lock);
	list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
	spin_unlock_irqrestore(&buffers->rb_lock, flags);
	spin_unlock(&buffers->rb_lock);
}

/*