Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 87407673 authored by Tom Tucker's avatar Tom Tucker
Browse files

svcrdma: Use standard Linux lists for context cache



Replace the one-off linked list implementation used to implement the
context cache with the standard Linux list_head lists. Add a context
counter to catch resource leaks. A WARN_ON will be added later to
ensure that we've freed all contexts.

Signed-off-by: default avatarTom Tucker <tom@opengridcomputing.com>
parent 02e7452d
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -72,7 +72,7 @@ extern atomic_t rdma_stat_sq_prod;
 */
struct svc_rdma_op_ctxt {
	struct svc_rdma_op_ctxt *read_hdr;
	struct svc_rdma_op_ctxt *next;
	struct list_head free_list;
	struct xdr_buf arg;
	struct list_head dto_q;
	enum ib_wr_opcode wr_op;
@@ -104,7 +104,8 @@ struct svcxprt_rdma {

	struct ib_pd         *sc_pd;

	struct svc_rdma_op_ctxt  *sc_ctxt_head;
	atomic_t	     sc_ctxt_used;
	struct list_head     sc_ctxt_free;
	int		     sc_ctxt_cnt;
	int		     sc_ctxt_bump;
	int		     sc_ctxt_max;
+26 −21
Original line number Diff line number Diff line
@@ -103,8 +103,8 @@ static int rdma_bump_context_cache(struct svcxprt_rdma *xprt)
		spin_lock_bh(&xprt->sc_ctxt_lock);
		if (ctxt) {
			at_least_one = 1;
			ctxt->next = xprt->sc_ctxt_head;
			xprt->sc_ctxt_head = ctxt;
			INIT_LIST_HEAD(&ctxt->free_list);
			list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
		} else {
			/* kmalloc failed...give up for now */
			xprt->sc_ctxt_cnt--;
@@ -123,7 +123,7 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)

	while (1) {
		spin_lock_bh(&xprt->sc_ctxt_lock);
		if (unlikely(xprt->sc_ctxt_head == NULL)) {
		if (unlikely(list_empty(&xprt->sc_ctxt_free))) {
			/* Try to bump my cache. */
			spin_unlock_bh(&xprt->sc_ctxt_lock);

@@ -136,12 +136,15 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
			schedule_timeout_uninterruptible(msecs_to_jiffies(500));
			continue;
		}
		ctxt = xprt->sc_ctxt_head;
		xprt->sc_ctxt_head = ctxt->next;
		ctxt = list_entry(xprt->sc_ctxt_free.next,
				  struct svc_rdma_op_ctxt,
				  free_list);
		list_del_init(&ctxt->free_list);
		spin_unlock_bh(&xprt->sc_ctxt_lock);
		ctxt->xprt = xprt;
		INIT_LIST_HEAD(&ctxt->dto_q);
		ctxt->count = 0;
		atomic_inc(&xprt->sc_ctxt_used);
		break;
	}
	return ctxt;
@@ -163,10 +166,11 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
				 ctxt->sge[i].addr,
				 ctxt->sge[i].length,
				 ctxt->direction);

	spin_lock_bh(&xprt->sc_ctxt_lock);
	ctxt->next = xprt->sc_ctxt_head;
	xprt->sc_ctxt_head = ctxt;
	list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
	spin_unlock_bh(&xprt->sc_ctxt_lock);
	atomic_dec(&xprt->sc_ctxt_used);
}

/* ib_cq event handler */
@@ -412,28 +416,29 @@ static void create_context_cache(struct svcxprt_rdma *xprt,
	xprt->sc_ctxt_max = ctxt_max;
	xprt->sc_ctxt_bump = ctxt_bump;
	xprt->sc_ctxt_cnt = 0;
	xprt->sc_ctxt_head = NULL;
	atomic_set(&xprt->sc_ctxt_used, 0);

	INIT_LIST_HEAD(&xprt->sc_ctxt_free);
	for (i = 0; i < ctxt_count; i++) {
		ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
		if (ctxt) {
			ctxt->next = xprt->sc_ctxt_head;
			xprt->sc_ctxt_head = ctxt;
			INIT_LIST_HEAD(&ctxt->free_list);
			list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
			xprt->sc_ctxt_cnt++;
		}
	}
}

static void destroy_context_cache(struct svc_rdma_op_ctxt *ctxt)
static void destroy_context_cache(struct svcxprt_rdma *xprt)
{
	struct svc_rdma_op_ctxt *next;
	if (!ctxt)
		return;

	do {
		next = ctxt->next;
	while (!list_empty(&xprt->sc_ctxt_free)) {
		struct svc_rdma_op_ctxt *ctxt;
		ctxt = list_entry(xprt->sc_ctxt_free.next,
				  struct svc_rdma_op_ctxt,
				  free_list);
		list_del_init(&ctxt->free_list);
		kfree(ctxt);
		ctxt = next;
	} while (next);
	}
}

static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
@@ -470,7 +475,7 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
				     reqs +
				     cma_xprt->sc_sq_depth +
				     RPCRDMA_MAX_THREADS + 1); /* max */
		if (!cma_xprt->sc_ctxt_head) {
		if (list_empty(&cma_xprt->sc_ctxt_free)) {
			kfree(cma_xprt);
			return NULL;
		}
@@ -976,7 +981,7 @@ static void svc_rdma_free(struct svc_xprt *xprt)
	if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
		ib_dealloc_pd(rdma->sc_pd);

	destroy_context_cache(rdma->sc_ctxt_head);
	destroy_context_cache(rdma);
	kfree(rdma);
}