Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a25e758c authored by Tejun Heo's avatar Tejun Heo Committed by J. Bruce Fields
Browse files

sunrpc/xprtrdma: clean up workqueue usage



* Create and use svc_rdma_wq instead of using the system workqueue and
  flush_scheduled_work().  This workqueue is necessary to serve as
  flushing domain for rdma->sc_work which is used to destroy itself
  and thus can't be flushed explicitly.

* Replace cancel_delayed_work() + flush_scheduled_work() with
  cancel_delayed_work_sync().

* Implement synchronous connect in xprt_rdma_connect() using
  flush_delayed_work() on the rdma_connect work instead of using
  flush_scheduled_work().

This is to prepare for the deprecation and removal of
flush_scheduled_work().

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 8f3a6de3
Loading
Loading
Loading
Loading
+10 −1
Original line number Diff line number Diff line
@@ -43,6 +43,7 @@
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/sysctl.h>
#include <linux/workqueue.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/svc_rdma.h>
@@ -74,6 +75,8 @@ atomic_t rdma_stat_sq_prod;
struct kmem_cache *svc_rdma_map_cachep;
struct kmem_cache *svc_rdma_ctxt_cachep;

struct workqueue_struct *svc_rdma_wq;

/*
 * This function implements reading and resetting an atomic_t stat
 * variable through read/write to a proc file. Any write to the file
@@ -231,7 +234,7 @@ static ctl_table svcrdma_root_table[] = {
void svc_rdma_cleanup(void)
{
	dprintk("SVCRDMA Module Removed, deregister RPC RDMA transport\n");
	flush_scheduled_work();
	destroy_workqueue(svc_rdma_wq);
	if (svcrdma_table_header) {
		unregister_sysctl_table(svcrdma_table_header);
		svcrdma_table_header = NULL;
@@ -249,6 +252,11 @@ int svc_rdma_init(void)
	dprintk("\tsq_depth         : %d\n",
		svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT);
	dprintk("\tmax_inline       : %d\n", svcrdma_max_req_size);

	svc_rdma_wq = alloc_workqueue("svc_rdma", 0, 0);
	if (!svc_rdma_wq)
		return -ENOMEM;

	if (!svcrdma_table_header)
		svcrdma_table_header =
			register_sysctl_table(svcrdma_root_table);
@@ -283,6 +291,7 @@ int svc_rdma_init(void)
	kmem_cache_destroy(svc_rdma_map_cachep);
 err0:
	unregister_sysctl_table(svcrdma_table_header);
	destroy_workqueue(svc_rdma_wq);
	return -ENOMEM;
}
MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
+5 −1
Original line number Diff line number Diff line
@@ -45,6 +45,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <linux/sunrpc/svc_rdma.h>
@@ -90,6 +91,9 @@ struct svc_xprt_class svc_rdma_class = {
/* WR context cache. Created in svc_rdma.c  */
extern struct kmem_cache *svc_rdma_ctxt_cachep;

/* Workqueue created in svc_rdma.c */
extern struct workqueue_struct *svc_rdma_wq;

struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
{
	struct svc_rdma_op_ctxt *ctxt;
@@ -1187,7 +1191,7 @@ static void svc_rdma_free(struct svc_xprt *xprt)
	struct svcxprt_rdma *rdma =
		container_of(xprt, struct svcxprt_rdma, sc_xprt);
	INIT_WORK(&rdma->sc_work, __svc_rdma_free);
	schedule_work(&rdma->sc_work);
	queue_work(svc_rdma_wq, &rdma->sc_work);
}

static int svc_rdma_has_wspace(struct svc_xprt *xprt)
+2 −3
Original line number Diff line number Diff line
@@ -237,8 +237,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)

	dprintk("RPC:       %s: called\n", __func__);

	cancel_delayed_work(&r_xprt->rdma_connect);
	flush_scheduled_work();
	cancel_delayed_work_sync(&r_xprt->rdma_connect);

	xprt_clear_connected(xprt);

@@ -448,7 +447,7 @@ xprt_rdma_connect(struct rpc_task *task)
	} else {
		schedule_delayed_work(&r_xprt->rdma_connect, 0);
		if (!RPC_IS_ASYNC(task))
			flush_scheduled_work();
			flush_delayed_work(&r_xprt->rdma_connect);
	}
}