Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 34f52e35 authored by Trond Myklebust's avatar Trond Myklebust
Browse files

SUNRPC: Convert rpc_clnt->cl_users to a kref

parent c44fe705
Loading
Loading
Loading
Loading
+3 −9
Original line number Diff line number Diff line
@@ -161,15 +161,9 @@ nlm_destroy_host(struct nlm_host *host)
	 */
	nsm_unmonitor(host);

	if ((clnt = host->h_rpcclnt) != NULL) {
		if (atomic_read(&clnt->cl_users)) {
			printk(KERN_WARNING
				"lockd: active RPC handle\n");
			clnt->cl_dead = 1;
		} else {
			rpc_destroy_client(host->h_rpcclnt);
		}
	}
	clnt = host->h_rpcclnt;
	if (clnt != NULL)
		rpc_shutdown_client(clnt);
	kfree(host);
}

+1 −1
Original line number Diff line number Diff line
@@ -24,8 +24,8 @@ struct rpc_inode;
 * The high-level client handle
 */
struct rpc_clnt {
	struct kref		cl_kref;	/* Number of references */
	atomic_t		cl_count;	/* Number of clones */
	atomic_t		cl_users;	/* number of references */
	struct list_head	cl_clients;	/* Global list of clients */
	struct list_head	cl_tasks;	/* List of tasks */
	spinlock_t		cl_lock;	/* spinlock */
+28 −29
Original line number Diff line number Diff line
@@ -121,7 +121,6 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s
	clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
	if (!clnt)
		goto out_err;
	atomic_set(&clnt->cl_users, 0);
	atomic_set(&clnt->cl_count, 1);
	clnt->cl_parent = clnt;

@@ -157,6 +156,8 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s
	clnt->cl_rtt = &clnt->cl_rtt_default;
	rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval);

	kref_init(&clnt->cl_kref);

	err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
	if (err < 0)
		goto out_no_path;
@@ -272,10 +273,10 @@ rpc_clone_client(struct rpc_clnt *clnt)
	if (!new)
		goto out_no_clnt;
	atomic_set(&new->cl_count, 1);
	atomic_set(&new->cl_users, 0);
	new->cl_metrics = rpc_alloc_iostats(clnt);
	if (new->cl_metrics == NULL)
		goto out_no_stats;
	kref_init(&new->cl_kref);
	err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
	if (err != 0)
		goto out_no_path;
@@ -311,40 +312,28 @@ rpc_clone_client(struct rpc_clnt *clnt)
int
rpc_shutdown_client(struct rpc_clnt *clnt)
{
	dprintk("RPC:       shutting down %s client for %s, tasks=%d\n",
			clnt->cl_protname, clnt->cl_server,
			atomic_read(&clnt->cl_users));
	dprintk("RPC:       shutting down %s client for %s\n",
			clnt->cl_protname, clnt->cl_server);

	while (atomic_read(&clnt->cl_users) > 0) {
	while (!list_empty(&clnt->cl_tasks)) {
		/* Don't let rpc_release_client destroy us */
		clnt->cl_oneshot = 0;
		clnt->cl_dead = 0;
		rpc_killall_tasks(clnt);
		wait_event_timeout(destroy_wait,
			!atomic_read(&clnt->cl_users), 1*HZ);
	}

	if (atomic_read(&clnt->cl_users) < 0) {
		printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n",
				clnt, atomic_read(&clnt->cl_users));
#ifdef RPC_DEBUG
		rpc_show_tasks();
#endif
		BUG();
			list_empty(&clnt->cl_tasks), 1*HZ);
	}

	return rpc_destroy_client(clnt);
}

/*
 * Delete an RPC client
 * Free an RPC client
 */
int
rpc_destroy_client(struct rpc_clnt *clnt)
static void
rpc_free_client(struct kref *kref)
{
	if (!atomic_dec_and_test(&clnt->cl_count))
		return 1;
	BUG_ON(atomic_read(&clnt->cl_users) != 0);
	struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);

	dprintk("RPC:       destroying %s client for %s\n",
			clnt->cl_protname, clnt->cl_server);
@@ -368,23 +357,33 @@ rpc_destroy_client(struct rpc_clnt *clnt)
	clnt->cl_metrics = NULL;
	xprt_put(clnt->cl_xprt);
	kfree(clnt);
	return 0;
}

/*
 * Release an RPC client
 * Release reference to the RPC client
 */
void
rpc_release_client(struct rpc_clnt *clnt)
{
	dprintk("RPC:       rpc_release_client(%p, %d)\n",
			clnt, atomic_read(&clnt->cl_users));
	dprintk("RPC:       rpc_release_client(%p)\n", clnt);

	if (!atomic_dec_and_test(&clnt->cl_users))
		return;
	if (list_empty(&clnt->cl_tasks))
		wake_up(&destroy_wait);
	if (clnt->cl_oneshot || clnt->cl_dead)
		rpc_destroy_client(clnt);
	kref_put(&clnt->cl_kref, rpc_free_client);
}

/*
 * Delete an RPC client
 */
int
rpc_destroy_client(struct rpc_clnt *clnt)
{
	if (!atomic_dec_and_test(&clnt->cl_count))
		return 1;
	kref_put(&clnt->cl_kref, rpc_free_client);
	return 0;
}

/**
+1 −1
Original line number Diff line number Diff line
@@ -344,7 +344,7 @@ rpc_info_open(struct inode *inode, struct file *file)
		mutex_lock(&inode->i_mutex);
		clnt = RPC_I(inode)->private;
		if (clnt) {
			atomic_inc(&clnt->cl_users);
			kref_get(&clnt->cl_kref);
			m->private = clnt;
		} else {
			single_release(inode, file);
+2 −4
Original line number Diff line number Diff line
@@ -846,7 +846,7 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons
	task->tk_workqueue = rpciod_workqueue;

	if (clnt) {
		atomic_inc(&clnt->cl_users);
		kref_get(&clnt->cl_kref);
		if (clnt->cl_softrtry)
			task->tk_flags |= RPC_TASK_SOFT;
		if (!clnt->cl_intr)
@@ -898,9 +898,7 @@ struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc
cleanup:
	/* Check whether to release the client */
	if (clnt) {
		printk("rpc_new_task: failed, users=%d, oneshot=%d\n",
			atomic_read(&clnt->cl_users), clnt->cl_oneshot);
		atomic_inc(&clnt->cl_users); /* pretend we were used ... */
		kref_get(&clnt->cl_kref); /* pretend we were used ... */
		rpc_release_client(clnt);
	}
	goto out;