Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 47853e7f authored by Linus Torvalds's avatar Linus Torvalds
Browse files
parents 221fc10e 9e56904e
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -910,6 +910,14 @@ running once the system is up.
	nfsroot=	[NFS] nfs root filesystem for disk-less boxes.
			See Documentation/nfsroot.txt.

	nfs.callback_tcpport=
			[NFS] set the TCP port on which the NFSv4 callback
			channel should listen.

	nfs.idmap_cache_timeout=
			[NFS] set the maximum lifetime for idmapper cache
			entries.

	nmi_watchdog=	[KNL,BUGS=IA-32] Debugging features for SMP kernels

	no387		[BUGS=IA-32] Tells the kernel to use the 387 maths
+22 −19
Original line number Diff line number Diff line
@@ -26,11 +26,12 @@
static int	nlmclnt_test(struct nlm_rqst *, struct file_lock *);
static int	nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
static int	nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
static void	nlmclnt_unlock_callback(struct rpc_task *);
static void	nlmclnt_cancel_callback(struct rpc_task *);
static int	nlm_stat_to_errno(u32 stat);
static void	nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);

static const struct rpc_call_ops nlmclnt_unlock_ops;
static const struct rpc_call_ops nlmclnt_cancel_ops;

/*
 * Cookie counter for NLM requests
 */
@@ -222,7 +223,6 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
		}
		clnt->cl_softrtry = nfssrv->client->cl_softrtry;
		clnt->cl_intr = nfssrv->client->cl_intr;
		clnt->cl_chatty   = nfssrv->client->cl_chatty;
	}

	/* Keep the old signal mask */
@@ -399,8 +399,7 @@ in_grace_period:
/*
 * Generic NLM call, async version.
 */
int
nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
int nlmsvc_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
	struct nlm_host	*host = req->a_host;
	struct rpc_clnt	*clnt;
@@ -419,13 +418,12 @@ nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
	msg.rpc_proc = &clnt->cl_procinfo[proc];

        /* bootstrap and kick off the async RPC call */
        status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
        status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);

	return status;
}

static int
nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
static int nlmclnt_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
	struct nlm_host	*host = req->a_host;
	struct rpc_clnt	*clnt;
@@ -448,7 +446,7 @@ nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
	/* Increment host refcount */
	nlm_get_host(host);
        /* bootstrap and kick off the async RPC call */
        status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
        status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
	if (status < 0)
		nlm_release_host(host);
	return status;
@@ -664,7 +662,7 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)

	if (req->a_flags & RPC_TASK_ASYNC) {
		status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
					nlmclnt_unlock_callback);
					&nlmclnt_unlock_ops);
		/* Hrmf... Do the unlock early since locks_remove_posix()
		 * really expects us to free the lock synchronously */
		do_vfs_lock(fl);
@@ -692,10 +690,9 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
	return -ENOLCK;
}

static void
nlmclnt_unlock_callback(struct rpc_task *task)
static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
{
	struct nlm_rqst	*req = (struct nlm_rqst *) task->tk_calldata;
	struct nlm_rqst	*req = data;
	int		status = req->a_res.status;

	if (RPC_ASSASSINATED(task))
@@ -722,6 +719,10 @@ die:
	rpc_restart_call(task);
}

static const struct rpc_call_ops nlmclnt_unlock_ops = {
	.rpc_call_done = nlmclnt_unlock_callback,
};

/*
 * Cancel a blocked lock request.
 * We always use an async RPC call for this in order not to hang a
@@ -750,8 +751,7 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)

	nlmclnt_setlockargs(req, fl);

	status = nlmclnt_async_call(req, NLMPROC_CANCEL,
					nlmclnt_cancel_callback);
	status = nlmclnt_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops);
	if (status < 0) {
		nlmclnt_release_lockargs(req);
		kfree(req);
@@ -765,10 +765,9 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
	return status;
}

static void
nlmclnt_cancel_callback(struct rpc_task *task)
static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
{
	struct nlm_rqst	*req = (struct nlm_rqst *) task->tk_calldata;
	struct nlm_rqst	*req = data;

	if (RPC_ASSASSINATED(task))
		goto die;
@@ -807,6 +806,10 @@ retry_cancel:
	rpc_delay(task, 30 * HZ);
}

static const struct rpc_call_ops nlmclnt_cancel_ops = {
	.rpc_call_done = nlmclnt_cancel_callback,
};

/*
 * Convert an NLM status code to a generic kernel errno
 */
+2 −2
Original line number Diff line number Diff line
@@ -177,7 +177,7 @@ nlm_bind_host(struct nlm_host *host)
	if ((clnt = host->h_rpcclnt) != NULL) {
		xprt = clnt->cl_xprt;
		if (time_after_eq(jiffies, host->h_nextrebind)) {
			clnt->cl_port = 0;
			rpc_force_rebind(clnt);
			host->h_nextrebind = jiffies + NLM_HOST_REBIND;
			dprintk("lockd: next rebind in %ld jiffies\n",
					host->h_nextrebind - jiffies);
@@ -217,7 +217,7 @@ nlm_rebind_host(struct nlm_host *host)
{
	dprintk("lockd: rebind host %s\n", host->h_name);
	if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
		host->h_rpcclnt->cl_port = 0;
		rpc_force_rebind(host->h_rpcclnt);
		host->h_nextrebind = jiffies + NLM_HOST_REBIND;
	}
}
+0 −1
Original line number Diff line number Diff line
@@ -123,7 +123,6 @@ nsm_create(void)
	if (IS_ERR(clnt))
		goto out_err;
	clnt->cl_softrtry = 1;
	clnt->cl_chatty   = 1;
	clnt->cl_oneshot  = 1;
	return clnt;

+2 −2
Original line number Diff line number Diff line
@@ -178,6 +178,8 @@ lockd(struct svc_rqst *rqstp)

	}

	flush_signals(current);

	/*
	 * Check whether there's a new lockd process before
	 * shutting down the hosts and clearing the slot.
@@ -192,8 +194,6 @@ lockd(struct svc_rqst *rqstp)
			"lockd: new process, skipping host shutdown\n");
	wake_up(&lockd_exit);

	flush_signals(current);

	/* Exit the RPC thread */
	svc_exit_thread(rqstp);

Loading