Loading fs/Kconfig +33 −37 Original line number Original line Diff line number Diff line Loading @@ -1664,30 +1664,6 @@ config NFS_V4 If unsure, say N. If unsure, say N. config NFS_DIRECTIO bool "Allow direct I/O on NFS files" depends on NFS_FS help This option enables applications to perform uncached I/O on files in NFS file systems using the O_DIRECT open() flag. When O_DIRECT is set for a file, its data is not cached in the system's page cache. Data is moved to and from user-level application buffers directly. Unlike local disk-based file systems, NFS O_DIRECT has no alignment restrictions. Unless your program is designed to use O_DIRECT properly, you are much better off allowing the NFS client to manage data caching for you. Misusing O_DIRECT can cause poor server performance or network storms. This kernel build option defaults OFF to avoid exposing system administrators unwittingly to a potentially hazardous feature. For more details on NFS O_DIRECT, see fs/nfs/direct.c. If unsure, say N. This reduces the size of the NFS client, and causes open() to return EINVAL if a file residing in NFS is opened with the O_DIRECT flag. config NFSD config NFSD tristate "NFS server support" tristate "NFS server support" depends on INET depends on INET Loading Loading @@ -1808,15 +1784,33 @@ config SUNRPC_XPRT_RDMA tristate tristate depends on SUNRPC && INFINIBAND && EXPERIMENTAL depends on SUNRPC && INFINIBAND && EXPERIMENTAL default SUNRPC && INFINIBAND default SUNRPC && INFINIBAND help This option enables an RPC client transport capability that allows the NFS client to mount servers via an RDMA-enabled transport. To compile RPC client RDMA transport support as a module, choose M here: the module will be called xprtrdma. If unsure, say N. config SUNRPC_BIND34 config SUNRPC_BIND34 bool "Support for rpcbind versions 3 & 4 (EXPERIMENTAL)" bool "Support for rpcbind versions 3 & 4 (EXPERIMENTAL)" depends on SUNRPC && EXPERIMENTAL depends on SUNRPC && EXPERIMENTAL default n help help Provides kernel support for querying rpcbind servers via versions 3 RPC requests over IPv6 networks require support for larger and 4 of the rpcbind protocol. The kernel automatically falls back addresses when performing an RPC bind. Sun added support for to version 2 if a remote rpcbind service does not support versions IPv6 addressing by creating two new versions of the rpcbind 3 or 4. protocol (RFC 1833). This option enables support in the kernel RPC client for querying rpcbind servers via versions 3 and 4 of the rpcbind protocol. The kernel automatically falls back to version 2 if a remote rpcbind service does not support versions 3 or 4. By themselves, these new versions do not provide support for RPC over IPv6, but the new protocol versions are necessary to support it. If unsure, say N to get traditional behavior (version 2 rpcbind If unsure, say N to get traditional behavior (version 2 rpcbind requests only). requests only). Loading @@ -1830,12 +1824,13 @@ config RPCSEC_GSS_KRB5 select CRYPTO_DES select CRYPTO_DES select CRYPTO_CBC select CRYPTO_CBC help help Provides for secure RPC calls by means of a gss-api Choose Y here to enable Secure RPC using the Kerberos version 5 mechanism based on Kerberos V5. This is required for GSS-API mechanism (RFC 1964). NFSv4. Note: Requires an auxiliary userspace daemon which may be found on Secure RPC calls with Kerberos require an auxiliary user-space http://www.citi.umich.edu/projects/nfsv4/ daemon which may be found in the Linux nfs-utils package available from http://linux-nfs.org/. In addition, user-space Kerberos support should be installed. If unsure, say N. If unsure, say N. Loading @@ -1849,11 +1844,12 @@ config RPCSEC_GSS_SPKM3 select CRYPTO_CAST5 select CRYPTO_CAST5 select CRYPTO_CBC select CRYPTO_CBC help help Provides for secure RPC calls by means of a gss-api Choose Y here to enable Secure RPC using the SPKM3 public key mechanism based on the SPKM3 public-key mechanism. GSS-API mechansim (RFC 2025). Note: Requires an auxiliary userspace daemon which may be found on Secure RPC calls with SPKM3 require an auxiliary userspace http://www.citi.umich.edu/projects/nfsv4/ daemon which may be found in the Linux nfs-utils package available from http://linux-nfs.org/. If unsure, say N. If unsure, say N. Loading fs/lockd/clntproc.c +117 −67 Original line number Original line Diff line number Diff line Loading @@ -155,8 +155,6 @@ static void nlmclnt_release_lockargs(struct nlm_rqst *req) int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) { { struct nlm_rqst *call; struct nlm_rqst *call; sigset_t oldset; unsigned long flags; int status; int status; nlm_get_host(host); nlm_get_host(host); Loading @@ -168,22 +166,6 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) /* Set up the argument struct */ /* Set up the argument struct */ nlmclnt_setlockargs(call, fl); nlmclnt_setlockargs(call, fl); /* Keep the old signal mask */ spin_lock_irqsave(¤t->sighand->siglock, flags); oldset = current->blocked; /* If we're cleaning up locks because the process is exiting, * perform the RPC call asynchronously. */ if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type == F_UNLCK && (current->flags & PF_EXITING)) { sigfillset(¤t->blocked); /* Mask all signals */ recalc_sigpending(); call->a_flags = RPC_TASK_ASYNC; } spin_unlock_irqrestore(¤t->sighand->siglock, flags); if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { if (fl->fl_type != F_UNLCK) { if (fl->fl_type != F_UNLCK) { call->a_args.block = IS_SETLKW(cmd) ? 1 : 0; call->a_args.block = IS_SETLKW(cmd) ? 1 : 0; Loading @@ -198,11 +180,6 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) fl->fl_ops->fl_release_private(fl); fl->fl_ops->fl_release_private(fl); fl->fl_ops = NULL; fl->fl_ops = NULL; spin_lock_irqsave(¤t->sighand->siglock, flags); current->blocked = oldset; recalc_sigpending(); spin_unlock_irqrestore(¤t->sighand->siglock, flags); dprintk("lockd: clnt proc returns %d\n", status); dprintk("lockd: clnt proc returns %d\n", status); return status; return status; } } Loading @@ -221,6 +198,7 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) for(;;) { for(;;) { call = kzalloc(sizeof(*call), GFP_KERNEL); call = kzalloc(sizeof(*call), GFP_KERNEL); if (call != NULL) { if (call != NULL) { atomic_set(&call->a_count, 1); locks_init_lock(&call->a_args.lock.fl); locks_init_lock(&call->a_args.lock.fl); locks_init_lock(&call->a_res.lock.fl); locks_init_lock(&call->a_res.lock.fl); call->a_host = host; call->a_host = host; Loading @@ -237,6 +215,8 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) void nlm_release_call(struct nlm_rqst *call) void nlm_release_call(struct nlm_rqst *call) { { if (!atomic_dec_and_test(&call->a_count)) return; nlm_release_host(call->a_host); nlm_release_host(call->a_host); nlmclnt_release_lockargs(call); nlmclnt_release_lockargs(call); kfree(call); kfree(call); Loading Loading @@ -267,7 +247,7 @@ static int nlm_wait_on_grace(wait_queue_head_t *queue) * Generic NLM call * Generic NLM call */ */ static int static int nlmclnt_call(struct nlm_rqst *req, u32 proc) nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc) { { struct nlm_host *host = req->a_host; struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct rpc_clnt *clnt; Loading @@ -276,6 +256,7 @@ nlmclnt_call(struct nlm_rqst *req, u32 proc) struct rpc_message msg = { struct rpc_message msg = { .rpc_argp = argp, .rpc_argp = argp, .rpc_resp = resp, .rpc_resp = resp, .rpc_cred = cred, }; }; int status; int status; Loading Loading @@ -343,10 +324,16 @@ nlmclnt_call(struct nlm_rqst *req, u32 proc) /* /* * Generic NLM call, async version. * Generic NLM call, async version. */ */ static int __nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) { { struct nlm_host *host = req->a_host; struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct rpc_clnt *clnt; struct rpc_task_setup task_setup_data = { .rpc_message = msg, .callback_ops = tk_ops, .callback_data = req, .flags = RPC_TASK_ASYNC, }; dprintk("lockd: call procedure %d on %s (async)\n", dprintk("lockd: call procedure %d on %s (async)\n", (int)proc, host->h_name); (int)proc, host->h_name); Loading @@ -356,21 +343,36 @@ static int __nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message * if (clnt == NULL) if (clnt == NULL) goto out_err; goto out_err; msg->rpc_proc = &clnt->cl_procinfo[proc]; msg->rpc_proc = &clnt->cl_procinfo[proc]; task_setup_data.rpc_client = clnt; /* bootstrap and kick off the async RPC call */ /* bootstrap and kick off the async RPC call */ return rpc_call_async(clnt, msg, RPC_TASK_ASYNC, tk_ops, req); return rpc_run_task(&task_setup_data); out_err: out_err: tk_ops->rpc_release(req); tk_ops->rpc_release(req); return -ENOLCK; return ERR_PTR(-ENOLCK); } } static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) { struct rpc_task *task; task = __nlm_async_call(req, proc, msg, tk_ops); if (IS_ERR(task)) return PTR_ERR(task); rpc_put_task(task); return 0; } /* * NLM asynchronous call. */ int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) { { struct rpc_message msg = { struct rpc_message msg = { .rpc_argp = &req->a_args, .rpc_argp = &req->a_args, .rpc_resp = &req->a_res, .rpc_resp = &req->a_res, }; }; return __nlm_async_call(req, proc, &msg, tk_ops); return nlm_do_async_call(req, proc, &msg, tk_ops); } } int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) Loading @@ -378,7 +380,33 @@ int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *t struct rpc_message msg = { struct rpc_message msg = { .rpc_argp = &req->a_res, .rpc_argp = &req->a_res, }; }; return __nlm_async_call(req, proc, &msg, tk_ops); return nlm_do_async_call(req, proc, &msg, tk_ops); } /* * NLM client asynchronous call. * * Note that although the calls are asynchronous, and are therefore * guaranteed to complete, we still always attempt to wait for * completion in order to be able to correctly track the lock * state. */ static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) { struct rpc_message msg = { .rpc_argp = &req->a_args, .rpc_resp = &req->a_res, .rpc_cred = cred, }; struct rpc_task *task; int err; task = __nlm_async_call(req, proc, &msg, tk_ops); if (IS_ERR(task)) return PTR_ERR(task); err = rpc_wait_for_completion_task(task); rpc_put_task(task); return err; } } /* /* Loading @@ -389,7 +417,7 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) { { int status; int status; status = nlmclnt_call(req, NLMPROC_TEST); status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST); if (status < 0) if (status < 0) goto out; goto out; Loading Loading @@ -480,10 +508,12 @@ static int do_vfs_lock(struct file_lock *fl) static int static int nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) { { struct rpc_cred *cred = nfs_file_cred(fl->fl_file); struct nlm_host *host = req->a_host; struct nlm_host *host = req->a_host; struct nlm_res *resp = &req->a_res; struct nlm_res *resp = &req->a_res; struct nlm_wait *block = NULL; struct nlm_wait *block = NULL; unsigned char fl_flags = fl->fl_flags; unsigned char fl_flags = fl->fl_flags; unsigned char fl_type; int status = -ENOLCK; int status = -ENOLCK; if (nsm_monitor(host) < 0) { if (nsm_monitor(host) < 0) { Loading @@ -493,18 +523,22 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) } } fl->fl_flags |= FL_ACCESS; fl->fl_flags |= FL_ACCESS; status = do_vfs_lock(fl); status = do_vfs_lock(fl); fl->fl_flags = fl_flags; if (status < 0) if (status < 0) goto out; goto out; block = nlmclnt_prepare_block(host, fl); block = nlmclnt_prepare_block(host, fl); again: again: /* * Initialise resp->status to a valid non-zero value, * since 0 == nlm_lck_granted */ resp->status = nlm_lck_blocked; for(;;) { for(;;) { /* Reboot protection */ /* Reboot protection */ fl->fl_u.nfs_fl.state = host->h_state; fl->fl_u.nfs_fl.state = host->h_state; status = nlmclnt_call(req, NLMPROC_LOCK); status = nlmclnt_call(cred, req, NLMPROC_LOCK); if (status < 0) if (status < 0) goto out_unblock; if (!req->a_args.block) break; break; /* Did a reclaimer thread notify us of a server reboot? */ /* Did a reclaimer thread notify us of a server reboot? */ if (resp->status == nlm_lck_denied_grace_period) if (resp->status == nlm_lck_denied_grace_period) Loading @@ -513,15 +547,22 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) break; break; /* Wait on an NLM blocking lock */ /* Wait on an NLM blocking lock */ status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); /* if we were interrupted. Send a CANCEL request to the server * and exit */ if (status < 0) if (status < 0) goto out_unblock; break; if (resp->status != nlm_lck_blocked) if (resp->status != nlm_lck_blocked) break; break; } } /* if we were interrupted while blocking, then cancel the lock request * and exit */ if (resp->status == nlm_lck_blocked) { if (!req->a_args.block) goto out_unlock; if (nlmclnt_cancel(host, req->a_args.block, fl) == 0) goto out_unblock; } if (resp->status == nlm_granted) { if (resp->status == nlm_granted) { down_read(&host->h_rwsem); down_read(&host->h_rwsem); /* Check whether or not the server has rebooted */ /* Check whether or not the server has rebooted */ Loading @@ -530,20 +571,34 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) goto again; goto again; } } /* Ensure the resulting lock will get added to granted list */ /* Ensure the resulting lock will get added to granted list */ fl->fl_flags = fl_flags | FL_SLEEP; fl->fl_flags |= FL_SLEEP; if (do_vfs_lock(fl) < 0) if (do_vfs_lock(fl) < 0) printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__); printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__); up_read(&host->h_rwsem); up_read(&host->h_rwsem); fl->fl_flags = fl_flags; status = 0; } } if (status < 0) goto out_unlock; status = nlm_stat_to_errno(resp->status); status = nlm_stat_to_errno(resp->status); out_unblock: out_unblock: nlmclnt_finish_block(block); nlmclnt_finish_block(block); /* Cancel the blocked request if it is still pending */ if (resp->status == nlm_lck_blocked) nlmclnt_cancel(host, req->a_args.block, fl); out: out: nlm_release_call(req); nlm_release_call(req); return status; out_unlock: /* Fatal error: ensure that we remove the lock altogether */ dprintk("lockd: lock attempt ended in fatal error.\n" " Attempting to unlock.\n"); nlmclnt_finish_block(block); fl_type = fl->fl_type; fl->fl_type = F_UNLCK; down_read(&host->h_rwsem); do_vfs_lock(fl); up_read(&host->h_rwsem); fl->fl_type = fl_type; fl->fl_flags = fl_flags; fl->fl_flags = fl_flags; nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); return status; return status; } } Loading @@ -567,8 +622,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl) nlmclnt_setlockargs(req, fl); nlmclnt_setlockargs(req, fl); req->a_args.reclaim = 1; req->a_args.reclaim = 1; if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK); && req->a_res.status == nlm_granted) if (status >= 0 && req->a_res.status == nlm_granted) return 0; return 0; printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d " printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d " Loading Loading @@ -598,7 +653,8 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) { { struct nlm_host *host = req->a_host; struct nlm_host *host = req->a_host; struct nlm_res *resp = &req->a_res; struct nlm_res *resp = &req->a_res; int status = 0; int status; unsigned char fl_flags = fl->fl_flags; /* /* * Note: the server is supposed to either grant us the unlock * Note: the server is supposed to either grant us the unlock Loading @@ -607,16 +663,17 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) */ */ fl->fl_flags |= FL_EXISTS; fl->fl_flags |= FL_EXISTS; down_read(&host->h_rwsem); down_read(&host->h_rwsem); if (do_vfs_lock(fl) == -ENOENT) { status = do_vfs_lock(fl); up_read(&host->h_rwsem); up_read(&host->h_rwsem); fl->fl_flags = fl_flags; if (status == -ENOENT) { status = 0; goto out; goto out; } } up_read(&host->h_rwsem); if (req->a_flags & RPC_TASK_ASYNC) atomic_inc(&req->a_count); return nlm_async_call(req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); status = nlmclnt_call(req, NLMPROC_UNLOCK); if (status < 0) if (status < 0) goto out; goto out; Loading Loading @@ -671,16 +728,10 @@ static const struct rpc_call_ops nlmclnt_unlock_ops = { static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl) static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl) { { struct nlm_rqst *req; struct nlm_rqst *req; unsigned long flags; sigset_t oldset; int status; int status; /* Block all signals while setting up call */ dprintk("lockd: blocking lock attempt was interrupted by a signal.\n" spin_lock_irqsave(¤t->sighand->siglock, flags); " Attempting to cancel lock.\n"); oldset = current->blocked; sigfillset(¤t->blocked); recalc_sigpending(); spin_unlock_irqrestore(¤t->sighand->siglock, flags); req = nlm_alloc_call(nlm_get_host(host)); req = nlm_alloc_call(nlm_get_host(host)); if (!req) if (!req) Loading @@ -690,13 +741,12 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl nlmclnt_setlockargs(req, fl); nlmclnt_setlockargs(req, fl); req->a_args.block = block; req->a_args.block = block; status = nlm_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops); atomic_inc(&req->a_count); status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, spin_lock_irqsave(¤t->sighand->siglock, flags); NLMPROC_CANCEL, &nlmclnt_cancel_ops); current->blocked = oldset; if (status == 0 && req->a_res.status == nlm_lck_denied) recalc_sigpending(); status = -ENOLCK; spin_unlock_irqrestore(¤t->sighand->siglock, flags); nlm_release_call(req); return status; return status; } } Loading fs/lockd/host.c +11 −9 Original line number Original line Diff line number Diff line Loading @@ -42,9 +42,10 @@ static struct nsm_handle * nsm_find(const struct sockaddr_in *sin, /* /* * Common host lookup routine for server & client * Common host lookup routine for server & client */ */ static struct nlm_host * static struct nlm_host *nlm_lookup_host(int server, nlm_lookup_host(int server, const struct sockaddr_in *sin, const struct sockaddr_in *sin, int proto, int version, const char *hostname, int proto, u32 version, const char *hostname, unsigned int hostname_len, unsigned int hostname_len, const struct sockaddr_in *ssin) const struct sockaddr_in *ssin) { { Loading @@ -55,7 +56,7 @@ nlm_lookup_host(int server, const struct sockaddr_in *sin, int hash; int hash; dprintk("lockd: nlm_lookup_host("NIPQUAD_FMT"->"NIPQUAD_FMT dprintk("lockd: nlm_lookup_host("NIPQUAD_FMT"->"NIPQUAD_FMT ", p=%d, v=%d, my role=%s, name=%.*s)\n", ", p=%d, v=%u, my role=%s, name=%.*s)\n", NIPQUAD(ssin->sin_addr.s_addr), NIPQUAD(ssin->sin_addr.s_addr), NIPQUAD(sin->sin_addr.s_addr), proto, version, NIPQUAD(sin->sin_addr.s_addr), proto, version, server? "server" : "client", server? "server" : "client", Loading Loading @@ -175,9 +176,10 @@ nlm_destroy_host(struct nlm_host *host) /* /* * Find an NLM server handle in the cache. If there is none, create it. * Find an NLM server handle in the cache. If there is none, create it. */ */ struct nlm_host * struct nlm_host *nlmclnt_lookup_host(const struct sockaddr_in *sin, nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version, int proto, u32 version, const char *hostname, unsigned int hostname_len) const char *hostname, unsigned int hostname_len) { { struct sockaddr_in ssin = {0}; struct sockaddr_in ssin = {0}; Loading fs/lockd/mon.c +87 −26 Original line number Original line Diff line number Diff line Loading @@ -18,6 +18,8 @@ #define NLMDBG_FACILITY NLMDBG_MONITOR #define NLMDBG_FACILITY NLMDBG_MONITOR #define XDR_ADDRBUF_LEN (20) static struct rpc_clnt * nsm_create(void); static struct rpc_clnt * nsm_create(void); static struct rpc_program nsm_program; static struct rpc_program nsm_program; Loading Loading @@ -147,28 +149,55 @@ nsm_create(void) /* /* * XDR functions for NSM. * XDR functions for NSM. * * See http://www.opengroup.org/ for details on the Network * Status Monitor wire protocol. */ */ static __be32 * static __be32 *xdr_encode_nsm_string(__be32 *p, char *string) xdr_encode_common(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp) { { char buffer[20], *name; size_t len = strlen(string); if (len > SM_MAXSTRLEN) len = SM_MAXSTRLEN; return xdr_encode_opaque(p, string, len); } /* /* * Use the dotted-quad IP address of the remote host as * "mon_name" specifies the host to be monitored. * identifier. Linux statd always looks up the canonical * * hostname first for whatever remote hostname it receives, * Linux uses a text version of the IP address of the remote * so this works alright. * host as the host identifier (the "mon_name" argument). * * Linux statd always looks up the canonical hostname first for * whatever remote hostname it receives, so this works alright. */ */ if (nsm_use_hostnames) { static __be32 *xdr_encode_mon_name(__be32 *p, struct nsm_args *argp) name = argp->mon_name; { } else { char buffer[XDR_ADDRBUF_LEN + 1]; sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(argp->addr)); char *name = argp->mon_name; if (!nsm_use_hostnames) { snprintf(buffer, XDR_ADDRBUF_LEN, NIPQUAD_FMT, NIPQUAD(argp->addr)); name = buffer; name = buffer; } } if (!(p = xdr_encode_string(p, name)) || !(p = xdr_encode_string(p, utsname()->nodename))) return xdr_encode_nsm_string(p, name); } /* * The "my_id" argument specifies the hostname and RPC procedure * to be called when the status manager receives notification * (via the SM_NOTIFY call) that the state of host "mon_name" * has changed. */ static __be32 *xdr_encode_my_id(__be32 *p, struct nsm_args *argp) { p = xdr_encode_nsm_string(p, utsname()->nodename); if (!p) return ERR_PTR(-EIO); return ERR_PTR(-EIO); *p++ = htonl(argp->prog); *p++ = htonl(argp->prog); *p++ = htonl(argp->vers); *p++ = htonl(argp->vers); *p++ = htonl(argp->proc); *p++ = htonl(argp->proc); Loading @@ -176,18 +205,48 @@ xdr_encode_common(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp) return p; return p; } } static int /* xdr_encode_mon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp) * The "mon_id" argument specifies the non-private arguments * of an SM_MON or SM_UNMON call. */ static __be32 *xdr_encode_mon_id(__be32 *p, struct nsm_args *argp) { { p = xdr_encode_common(rqstp, p, argp); p = xdr_encode_mon_name(p, argp); if (IS_ERR(p)) if (!p) return PTR_ERR(p); return ERR_PTR(-EIO); return xdr_encode_my_id(p, argp); } /* Surprise - there may even be room for an IPv6 address now */ /* * The "priv" argument may contain private information required * by the SM_MON call. This information will be supplied in the * SM_NOTIFY call. * * Linux provides the raw IP address of the monitored host, * left in network byte order. */ static __be32 *xdr_encode_priv(__be32 *p, struct nsm_args *argp) { *p++ = argp->addr; *p++ = argp->addr; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; return p; } static int xdr_encode_mon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp) { p = xdr_encode_mon_id(p, argp); if (IS_ERR(p)) return PTR_ERR(p); p = xdr_encode_priv(p, argp); if (IS_ERR(p)) return PTR_ERR(p); rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p); rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p); return 0; return 0; } } Loading @@ -195,7 +254,7 @@ xdr_encode_mon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp) static int static int xdr_encode_unmon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp) xdr_encode_unmon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp) { { p = xdr_encode_common(rqstp, p, argp); p = xdr_encode_mon_id(p, argp); if (IS_ERR(p)) if (IS_ERR(p)) return PTR_ERR(p); return PTR_ERR(p); rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p); rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p); Loading @@ -220,9 +279,11 @@ xdr_decode_stat(struct rpc_rqst *rqstp, __be32 *p, struct nsm_res *resp) } } #define SM_my_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN)) #define SM_my_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN)) #define SM_my_id_sz (3+1+SM_my_name_sz) #define SM_my_id_sz (SM_my_name_sz+3) #define SM_mon_id_sz (1+XDR_QUADLEN(20)+SM_my_id_sz) #define SM_mon_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN)) #define SM_mon_sz (SM_mon_id_sz+4) #define SM_mon_id_sz (SM_mon_name_sz+SM_my_id_sz) #define SM_priv_sz (XDR_QUADLEN(SM_PRIV_SIZE)) #define SM_mon_sz (SM_mon_id_sz+SM_priv_sz) #define SM_monres_sz 2 #define SM_monres_sz 2 #define SM_unmonres_sz 1 #define SM_unmonres_sz 1 Loading fs/lockd/svc.c +12 −0 Original line number Original line Diff line number Diff line Loading @@ -74,7 +74,9 @@ static const unsigned long nlm_timeout_min = 3; static const unsigned long nlm_timeout_max = 20; static const unsigned long nlm_timeout_max = 20; static const int nlm_port_min = 0, nlm_port_max = 65535; static const int nlm_port_min = 0, nlm_port_max = 65535; #ifdef CONFIG_SYSCTL static struct ctl_table_header * nlm_sysctl_table; static struct ctl_table_header * nlm_sysctl_table; #endif static unsigned long get_lockd_grace_period(void) static unsigned long get_lockd_grace_period(void) { { Loading Loading @@ -359,6 +361,8 @@ lockd_down(void) } } EXPORT_SYMBOL(lockd_down); EXPORT_SYMBOL(lockd_down); #ifdef CONFIG_SYSCTL /* /* * Sysctl parameters (same as module parameters, different interface). * Sysctl parameters (same as module parameters, different interface). */ */ Loading Loading @@ -443,6 +447,8 @@ static ctl_table nlm_sysctl_root[] = { { .ctl_name = 0 } { .ctl_name = 0 } }; }; #endif /* CONFIG_SYSCTL */ /* /* * Module (and sysfs) parameters. * Module (and sysfs) parameters. */ */ Loading Loading @@ -516,15 +522,21 @@ module_param(nsm_use_hostnames, bool, 0644); static int __init init_nlm(void) static int __init init_nlm(void) { { #ifdef CONFIG_SYSCTL nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root); nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root); return nlm_sysctl_table ? 0 : -ENOMEM; return nlm_sysctl_table ? 0 : -ENOMEM; #else return 0; #endif } } static void __exit exit_nlm(void) static void __exit exit_nlm(void) { { /* FIXME: delete all NLM clients */ /* FIXME: delete all NLM clients */ nlm_shutdown_hosts(); nlm_shutdown_hosts(); #ifdef CONFIG_SYSCTL unregister_sysctl_table(nlm_sysctl_table); unregister_sysctl_table(nlm_sysctl_table); #endif } } module_init(init_nlm); module_init(init_nlm); Loading Loading
fs/Kconfig +33 −37 Original line number Original line Diff line number Diff line Loading @@ -1664,30 +1664,6 @@ config NFS_V4 If unsure, say N. If unsure, say N. config NFS_DIRECTIO bool "Allow direct I/O on NFS files" depends on NFS_FS help This option enables applications to perform uncached I/O on files in NFS file systems using the O_DIRECT open() flag. When O_DIRECT is set for a file, its data is not cached in the system's page cache. Data is moved to and from user-level application buffers directly. Unlike local disk-based file systems, NFS O_DIRECT has no alignment restrictions. Unless your program is designed to use O_DIRECT properly, you are much better off allowing the NFS client to manage data caching for you. Misusing O_DIRECT can cause poor server performance or network storms. This kernel build option defaults OFF to avoid exposing system administrators unwittingly to a potentially hazardous feature. For more details on NFS O_DIRECT, see fs/nfs/direct.c. If unsure, say N. This reduces the size of the NFS client, and causes open() to return EINVAL if a file residing in NFS is opened with the O_DIRECT flag. config NFSD config NFSD tristate "NFS server support" tristate "NFS server support" depends on INET depends on INET Loading Loading @@ -1808,15 +1784,33 @@ config SUNRPC_XPRT_RDMA tristate tristate depends on SUNRPC && INFINIBAND && EXPERIMENTAL depends on SUNRPC && INFINIBAND && EXPERIMENTAL default SUNRPC && INFINIBAND default SUNRPC && INFINIBAND help This option enables an RPC client transport capability that allows the NFS client to mount servers via an RDMA-enabled transport. To compile RPC client RDMA transport support as a module, choose M here: the module will be called xprtrdma. If unsure, say N. config SUNRPC_BIND34 config SUNRPC_BIND34 bool "Support for rpcbind versions 3 & 4 (EXPERIMENTAL)" bool "Support for rpcbind versions 3 & 4 (EXPERIMENTAL)" depends on SUNRPC && EXPERIMENTAL depends on SUNRPC && EXPERIMENTAL default n help help Provides kernel support for querying rpcbind servers via versions 3 RPC requests over IPv6 networks require support for larger and 4 of the rpcbind protocol. The kernel automatically falls back addresses when performing an RPC bind. Sun added support for to version 2 if a remote rpcbind service does not support versions IPv6 addressing by creating two new versions of the rpcbind 3 or 4. protocol (RFC 1833). This option enables support in the kernel RPC client for querying rpcbind servers via versions 3 and 4 of the rpcbind protocol. The kernel automatically falls back to version 2 if a remote rpcbind service does not support versions 3 or 4. By themselves, these new versions do not provide support for RPC over IPv6, but the new protocol versions are necessary to support it. If unsure, say N to get traditional behavior (version 2 rpcbind If unsure, say N to get traditional behavior (version 2 rpcbind requests only). requests only). Loading @@ -1830,12 +1824,13 @@ config RPCSEC_GSS_KRB5 select CRYPTO_DES select CRYPTO_DES select CRYPTO_CBC select CRYPTO_CBC help help Provides for secure RPC calls by means of a gss-api Choose Y here to enable Secure RPC using the Kerberos version 5 mechanism based on Kerberos V5. This is required for GSS-API mechanism (RFC 1964). NFSv4. Note: Requires an auxiliary userspace daemon which may be found on Secure RPC calls with Kerberos require an auxiliary user-space http://www.citi.umich.edu/projects/nfsv4/ daemon which may be found in the Linux nfs-utils package available from http://linux-nfs.org/. In addition, user-space Kerberos support should be installed. If unsure, say N. If unsure, say N. Loading @@ -1849,11 +1844,12 @@ config RPCSEC_GSS_SPKM3 select CRYPTO_CAST5 select CRYPTO_CAST5 select CRYPTO_CBC select CRYPTO_CBC help help Provides for secure RPC calls by means of a gss-api Choose Y here to enable Secure RPC using the SPKM3 public key mechanism based on the SPKM3 public-key mechanism. GSS-API mechansim (RFC 2025). Note: Requires an auxiliary userspace daemon which may be found on Secure RPC calls with SPKM3 require an auxiliary userspace http://www.citi.umich.edu/projects/nfsv4/ daemon which may be found in the Linux nfs-utils package available from http://linux-nfs.org/. If unsure, say N. If unsure, say N. Loading
fs/lockd/clntproc.c +117 −67 Original line number Original line Diff line number Diff line Loading @@ -155,8 +155,6 @@ static void nlmclnt_release_lockargs(struct nlm_rqst *req) int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) { { struct nlm_rqst *call; struct nlm_rqst *call; sigset_t oldset; unsigned long flags; int status; int status; nlm_get_host(host); nlm_get_host(host); Loading @@ -168,22 +166,6 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) /* Set up the argument struct */ /* Set up the argument struct */ nlmclnt_setlockargs(call, fl); nlmclnt_setlockargs(call, fl); /* Keep the old signal mask */ spin_lock_irqsave(¤t->sighand->siglock, flags); oldset = current->blocked; /* If we're cleaning up locks because the process is exiting, * perform the RPC call asynchronously. */ if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type == F_UNLCK && (current->flags & PF_EXITING)) { sigfillset(¤t->blocked); /* Mask all signals */ recalc_sigpending(); call->a_flags = RPC_TASK_ASYNC; } spin_unlock_irqrestore(¤t->sighand->siglock, flags); if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { if (fl->fl_type != F_UNLCK) { if (fl->fl_type != F_UNLCK) { call->a_args.block = IS_SETLKW(cmd) ? 1 : 0; call->a_args.block = IS_SETLKW(cmd) ? 1 : 0; Loading @@ -198,11 +180,6 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) fl->fl_ops->fl_release_private(fl); fl->fl_ops->fl_release_private(fl); fl->fl_ops = NULL; fl->fl_ops = NULL; spin_lock_irqsave(¤t->sighand->siglock, flags); current->blocked = oldset; recalc_sigpending(); spin_unlock_irqrestore(¤t->sighand->siglock, flags); dprintk("lockd: clnt proc returns %d\n", status); dprintk("lockd: clnt proc returns %d\n", status); return status; return status; } } Loading @@ -221,6 +198,7 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) for(;;) { for(;;) { call = kzalloc(sizeof(*call), GFP_KERNEL); call = kzalloc(sizeof(*call), GFP_KERNEL); if (call != NULL) { if (call != NULL) { atomic_set(&call->a_count, 1); locks_init_lock(&call->a_args.lock.fl); locks_init_lock(&call->a_args.lock.fl); locks_init_lock(&call->a_res.lock.fl); locks_init_lock(&call->a_res.lock.fl); call->a_host = host; call->a_host = host; Loading @@ -237,6 +215,8 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) void nlm_release_call(struct nlm_rqst *call) void nlm_release_call(struct nlm_rqst *call) { { if (!atomic_dec_and_test(&call->a_count)) return; nlm_release_host(call->a_host); nlm_release_host(call->a_host); nlmclnt_release_lockargs(call); nlmclnt_release_lockargs(call); kfree(call); kfree(call); Loading Loading @@ -267,7 +247,7 @@ static int nlm_wait_on_grace(wait_queue_head_t *queue) * Generic NLM call * Generic NLM call */ */ static int static int nlmclnt_call(struct nlm_rqst *req, u32 proc) nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc) { { struct nlm_host *host = req->a_host; struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct rpc_clnt *clnt; Loading @@ -276,6 +256,7 @@ nlmclnt_call(struct nlm_rqst *req, u32 proc) struct rpc_message msg = { struct rpc_message msg = { .rpc_argp = argp, .rpc_argp = argp, .rpc_resp = resp, .rpc_resp = resp, .rpc_cred = cred, }; }; int status; int status; Loading Loading @@ -343,10 +324,16 @@ nlmclnt_call(struct nlm_rqst *req, u32 proc) /* /* * Generic NLM call, async version. * Generic NLM call, async version. */ */ static int __nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) { { struct nlm_host *host = req->a_host; struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct rpc_clnt *clnt; struct rpc_task_setup task_setup_data = { .rpc_message = msg, .callback_ops = tk_ops, .callback_data = req, .flags = RPC_TASK_ASYNC, }; dprintk("lockd: call procedure %d on %s (async)\n", dprintk("lockd: call procedure %d on %s (async)\n", (int)proc, host->h_name); (int)proc, host->h_name); Loading @@ -356,21 +343,36 @@ static int __nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message * if (clnt == NULL) if (clnt == NULL) goto out_err; goto out_err; msg->rpc_proc = &clnt->cl_procinfo[proc]; msg->rpc_proc = &clnt->cl_procinfo[proc]; task_setup_data.rpc_client = clnt; /* bootstrap and kick off the async RPC call */ /* bootstrap and kick off the async RPC call */ return rpc_call_async(clnt, msg, RPC_TASK_ASYNC, tk_ops, req); return rpc_run_task(&task_setup_data); out_err: out_err: tk_ops->rpc_release(req); tk_ops->rpc_release(req); return -ENOLCK; return ERR_PTR(-ENOLCK); } } static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) { struct rpc_task *task; task = __nlm_async_call(req, proc, msg, tk_ops); if (IS_ERR(task)) return PTR_ERR(task); rpc_put_task(task); return 0; } /* * NLM asynchronous call. */ int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) { { struct rpc_message msg = { struct rpc_message msg = { .rpc_argp = &req->a_args, .rpc_argp = &req->a_args, .rpc_resp = &req->a_res, .rpc_resp = &req->a_res, }; }; return __nlm_async_call(req, proc, &msg, tk_ops); return nlm_do_async_call(req, proc, &msg, tk_ops); } } int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) Loading @@ -378,7 +380,33 @@ int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *t struct rpc_message msg = { struct rpc_message msg = { .rpc_argp = &req->a_res, .rpc_argp = &req->a_res, }; }; return __nlm_async_call(req, proc, &msg, tk_ops); return nlm_do_async_call(req, proc, &msg, tk_ops); } /* * NLM client asynchronous call. * * Note that although the calls are asynchronous, and are therefore * guaranteed to complete, we still always attempt to wait for * completion in order to be able to correctly track the lock * state. */ static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) { struct rpc_message msg = { .rpc_argp = &req->a_args, .rpc_resp = &req->a_res, .rpc_cred = cred, }; struct rpc_task *task; int err; task = __nlm_async_call(req, proc, &msg, tk_ops); if (IS_ERR(task)) return PTR_ERR(task); err = rpc_wait_for_completion_task(task); rpc_put_task(task); return err; } } /* /* Loading @@ -389,7 +417,7 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) { { int status; int status; status = nlmclnt_call(req, NLMPROC_TEST); status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST); if (status < 0) if (status < 0) goto out; goto out; Loading Loading @@ -480,10 +508,12 @@ static int do_vfs_lock(struct file_lock *fl) static int static int nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) { { struct rpc_cred *cred = nfs_file_cred(fl->fl_file); struct nlm_host *host = req->a_host; struct nlm_host *host = req->a_host; struct nlm_res *resp = &req->a_res; struct nlm_res *resp = &req->a_res; struct nlm_wait *block = NULL; struct nlm_wait *block = NULL; unsigned char fl_flags = fl->fl_flags; unsigned char fl_flags = fl->fl_flags; unsigned char fl_type; int status = -ENOLCK; int status = -ENOLCK; if (nsm_monitor(host) < 0) { if (nsm_monitor(host) < 0) { Loading @@ -493,18 +523,22 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) } } fl->fl_flags |= FL_ACCESS; fl->fl_flags |= FL_ACCESS; status = do_vfs_lock(fl); status = do_vfs_lock(fl); fl->fl_flags = fl_flags; if (status < 0) if (status < 0) goto out; goto out; block = nlmclnt_prepare_block(host, fl); block = nlmclnt_prepare_block(host, fl); again: again: /* * Initialise resp->status to a valid non-zero value, * since 0 == nlm_lck_granted */ resp->status = nlm_lck_blocked; for(;;) { for(;;) { /* Reboot protection */ /* Reboot protection */ fl->fl_u.nfs_fl.state = host->h_state; fl->fl_u.nfs_fl.state = host->h_state; status = nlmclnt_call(req, NLMPROC_LOCK); status = nlmclnt_call(cred, req, NLMPROC_LOCK); if (status < 0) if (status < 0) goto out_unblock; if (!req->a_args.block) break; break; /* Did a reclaimer thread notify us of a server reboot? */ /* Did a reclaimer thread notify us of a server reboot? */ if (resp->status == nlm_lck_denied_grace_period) if (resp->status == nlm_lck_denied_grace_period) Loading @@ -513,15 +547,22 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) break; break; /* Wait on an NLM blocking lock */ /* Wait on an NLM blocking lock */ status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); /* if we were interrupted. Send a CANCEL request to the server * and exit */ if (status < 0) if (status < 0) goto out_unblock; break; if (resp->status != nlm_lck_blocked) if (resp->status != nlm_lck_blocked) break; break; } } /* if we were interrupted while blocking, then cancel the lock request * and exit */ if (resp->status == nlm_lck_blocked) { if (!req->a_args.block) goto out_unlock; if (nlmclnt_cancel(host, req->a_args.block, fl) == 0) goto out_unblock; } if (resp->status == nlm_granted) { if (resp->status == nlm_granted) { down_read(&host->h_rwsem); down_read(&host->h_rwsem); /* Check whether or not the server has rebooted */ /* Check whether or not the server has rebooted */ Loading @@ -530,20 +571,34 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) goto again; goto again; } } /* Ensure the resulting lock will get added to granted list */ /* Ensure the resulting lock will get added to granted list */ fl->fl_flags = fl_flags | FL_SLEEP; fl->fl_flags |= FL_SLEEP; if (do_vfs_lock(fl) < 0) if (do_vfs_lock(fl) < 0) printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__); printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__); up_read(&host->h_rwsem); up_read(&host->h_rwsem); fl->fl_flags = fl_flags; status = 0; } } if (status < 0) goto out_unlock; status = nlm_stat_to_errno(resp->status); status = nlm_stat_to_errno(resp->status); out_unblock: out_unblock: nlmclnt_finish_block(block); nlmclnt_finish_block(block); /* Cancel the blocked request if it is still pending */ if (resp->status == nlm_lck_blocked) nlmclnt_cancel(host, req->a_args.block, fl); out: out: nlm_release_call(req); nlm_release_call(req); return status; out_unlock: /* Fatal error: ensure that we remove the lock altogether */ dprintk("lockd: lock attempt ended in fatal error.\n" " Attempting to unlock.\n"); nlmclnt_finish_block(block); fl_type = fl->fl_type; fl->fl_type = F_UNLCK; down_read(&host->h_rwsem); do_vfs_lock(fl); up_read(&host->h_rwsem); fl->fl_type = fl_type; fl->fl_flags = fl_flags; fl->fl_flags = fl_flags; nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); return status; return status; } } Loading @@ -567,8 +622,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl) nlmclnt_setlockargs(req, fl); nlmclnt_setlockargs(req, fl); req->a_args.reclaim = 1; req->a_args.reclaim = 1; if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK); && req->a_res.status == nlm_granted) if (status >= 0 && req->a_res.status == nlm_granted) return 0; return 0; printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d " printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d " Loading Loading @@ -598,7 +653,8 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) { { struct nlm_host *host = req->a_host; struct nlm_host *host = req->a_host; struct nlm_res *resp = &req->a_res; struct nlm_res *resp = &req->a_res; int status = 0; int status; unsigned char fl_flags = fl->fl_flags; /* /* * Note: the server is supposed to either grant us the unlock * Note: the server is supposed to either grant us the unlock Loading @@ -607,16 +663,17 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) */ */ fl->fl_flags |= FL_EXISTS; fl->fl_flags |= FL_EXISTS; down_read(&host->h_rwsem); down_read(&host->h_rwsem); if (do_vfs_lock(fl) == -ENOENT) { status = do_vfs_lock(fl); up_read(&host->h_rwsem); up_read(&host->h_rwsem); fl->fl_flags = fl_flags; if (status == -ENOENT) { status = 0; goto out; goto out; } } up_read(&host->h_rwsem); if (req->a_flags & RPC_TASK_ASYNC) atomic_inc(&req->a_count); return nlm_async_call(req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); status = nlmclnt_call(req, NLMPROC_UNLOCK); if (status < 0) if (status < 0) goto out; goto out; Loading Loading @@ -671,16 +728,10 @@ static const struct rpc_call_ops nlmclnt_unlock_ops = { static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl) static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl) { { struct nlm_rqst *req; struct nlm_rqst *req; unsigned long flags; sigset_t oldset; int status; int status; /* Block all signals while setting up call */ dprintk("lockd: blocking lock attempt was interrupted by a signal.\n" spin_lock_irqsave(¤t->sighand->siglock, flags); " Attempting to cancel lock.\n"); oldset = current->blocked; sigfillset(¤t->blocked); recalc_sigpending(); spin_unlock_irqrestore(¤t->sighand->siglock, flags); req = nlm_alloc_call(nlm_get_host(host)); req = nlm_alloc_call(nlm_get_host(host)); if (!req) if (!req) Loading @@ -690,13 +741,12 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl nlmclnt_setlockargs(req, fl); nlmclnt_setlockargs(req, fl); req->a_args.block = block; req->a_args.block = block; status = nlm_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops); atomic_inc(&req->a_count); status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, spin_lock_irqsave(¤t->sighand->siglock, flags); NLMPROC_CANCEL, &nlmclnt_cancel_ops); current->blocked = oldset; if (status == 0 && req->a_res.status == nlm_lck_denied) recalc_sigpending(); status = -ENOLCK; spin_unlock_irqrestore(¤t->sighand->siglock, flags); nlm_release_call(req); return status; return status; } } Loading
fs/lockd/host.c +11 −9 Original line number Original line Diff line number Diff line Loading @@ -42,9 +42,10 @@ static struct nsm_handle * nsm_find(const struct sockaddr_in *sin, /* /* * Common host lookup routine for server & client * Common host lookup routine for server & client */ */ static struct nlm_host * static struct nlm_host *nlm_lookup_host(int server, nlm_lookup_host(int server, const struct sockaddr_in *sin, const struct sockaddr_in *sin, int proto, int version, const char *hostname, int proto, u32 version, const char *hostname, unsigned int hostname_len, unsigned int hostname_len, const struct sockaddr_in *ssin) const struct sockaddr_in *ssin) { { Loading @@ -55,7 +56,7 @@ nlm_lookup_host(int server, const struct sockaddr_in *sin, int hash; int hash; dprintk("lockd: nlm_lookup_host("NIPQUAD_FMT"->"NIPQUAD_FMT dprintk("lockd: nlm_lookup_host("NIPQUAD_FMT"->"NIPQUAD_FMT ", p=%d, v=%d, my role=%s, name=%.*s)\n", ", p=%d, v=%u, my role=%s, name=%.*s)\n", NIPQUAD(ssin->sin_addr.s_addr), NIPQUAD(ssin->sin_addr.s_addr), NIPQUAD(sin->sin_addr.s_addr), proto, version, NIPQUAD(sin->sin_addr.s_addr), proto, version, server? "server" : "client", server? "server" : "client", Loading Loading @@ -175,9 +176,10 @@ nlm_destroy_host(struct nlm_host *host) /* /* * Find an NLM server handle in the cache. If there is none, create it. * Find an NLM server handle in the cache. If there is none, create it. */ */ struct nlm_host * struct nlm_host *nlmclnt_lookup_host(const struct sockaddr_in *sin, nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version, int proto, u32 version, const char *hostname, unsigned int hostname_len) const char *hostname, unsigned int hostname_len) { { struct sockaddr_in ssin = {0}; struct sockaddr_in ssin = {0}; Loading
fs/lockd/mon.c +87 −26 Original line number Original line Diff line number Diff line Loading @@ -18,6 +18,8 @@ #define NLMDBG_FACILITY NLMDBG_MONITOR #define NLMDBG_FACILITY NLMDBG_MONITOR #define XDR_ADDRBUF_LEN (20) static struct rpc_clnt * nsm_create(void); static struct rpc_clnt * nsm_create(void); static struct rpc_program nsm_program; static struct rpc_program nsm_program; Loading Loading @@ -147,28 +149,55 @@ nsm_create(void) /* /* * XDR functions for NSM. * XDR functions for NSM. * * See http://www.opengroup.org/ for details on the Network * Status Monitor wire protocol. */ */ static __be32 * static __be32 *xdr_encode_nsm_string(__be32 *p, char *string) xdr_encode_common(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp) { { char buffer[20], *name; size_t len = strlen(string); if (len > SM_MAXSTRLEN) len = SM_MAXSTRLEN; return xdr_encode_opaque(p, string, len); } /* /* * Use the dotted-quad IP address of the remote host as * "mon_name" specifies the host to be monitored. * identifier. Linux statd always looks up the canonical * * hostname first for whatever remote hostname it receives, * Linux uses a text version of the IP address of the remote * so this works alright. * host as the host identifier (the "mon_name" argument). * * Linux statd always looks up the canonical hostname first for * whatever remote hostname it receives, so this works alright. */ */ if (nsm_use_hostnames) { static __be32 *xdr_encode_mon_name(__be32 *p, struct nsm_args *argp) name = argp->mon_name; { } else { char buffer[XDR_ADDRBUF_LEN + 1]; sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(argp->addr)); char *name = argp->mon_name; if (!nsm_use_hostnames) { snprintf(buffer, XDR_ADDRBUF_LEN, NIPQUAD_FMT, NIPQUAD(argp->addr)); name = buffer; name = buffer; } } if (!(p = xdr_encode_string(p, name)) || !(p = xdr_encode_string(p, utsname()->nodename))) return xdr_encode_nsm_string(p, name); } /* * The "my_id" argument specifies the hostname and RPC procedure * to be called when the status manager receives notification * (via the SM_NOTIFY call) that the state of host "mon_name" * has changed. */ static __be32 *xdr_encode_my_id(__be32 *p, struct nsm_args *argp) { p = xdr_encode_nsm_string(p, utsname()->nodename); if (!p) return ERR_PTR(-EIO); return ERR_PTR(-EIO); *p++ = htonl(argp->prog); *p++ = htonl(argp->prog); *p++ = htonl(argp->vers); *p++ = htonl(argp->vers); *p++ = htonl(argp->proc); *p++ = htonl(argp->proc); Loading @@ -176,18 +205,48 @@ xdr_encode_common(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp) return p; return p; } } static int /* xdr_encode_mon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp) * The "mon_id" argument specifies the non-private arguments * of an SM_MON or SM_UNMON call. */ static __be32 *xdr_encode_mon_id(__be32 *p, struct nsm_args *argp) { { p = xdr_encode_common(rqstp, p, argp); p = xdr_encode_mon_name(p, argp); if (IS_ERR(p)) if (!p) return PTR_ERR(p); return ERR_PTR(-EIO); return xdr_encode_my_id(p, argp); } /* Surprise - there may even be room for an IPv6 address now */ /* * The "priv" argument may contain private information required * by the SM_MON call. This information will be supplied in the * SM_NOTIFY call. * * Linux provides the raw IP address of the monitored host, * left in network byte order. */ static __be32 *xdr_encode_priv(__be32 *p, struct nsm_args *argp) { *p++ = argp->addr; *p++ = argp->addr; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; return p; } static int xdr_encode_mon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp) { p = xdr_encode_mon_id(p, argp); if (IS_ERR(p)) return PTR_ERR(p); p = xdr_encode_priv(p, argp); if (IS_ERR(p)) return PTR_ERR(p); rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p); rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p); return 0; return 0; } } Loading @@ -195,7 +254,7 @@ xdr_encode_mon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp) static int static int xdr_encode_unmon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp) xdr_encode_unmon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp) { { p = xdr_encode_common(rqstp, p, argp); p = xdr_encode_mon_id(p, argp); if (IS_ERR(p)) if (IS_ERR(p)) return PTR_ERR(p); return PTR_ERR(p); rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p); rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p); Loading @@ -220,9 +279,11 @@ xdr_decode_stat(struct rpc_rqst *rqstp, __be32 *p, struct nsm_res *resp) } } #define SM_my_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN)) #define SM_my_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN)) #define SM_my_id_sz (3+1+SM_my_name_sz) #define SM_my_id_sz (SM_my_name_sz+3) #define SM_mon_id_sz (1+XDR_QUADLEN(20)+SM_my_id_sz) #define SM_mon_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN)) #define SM_mon_sz (SM_mon_id_sz+4) #define SM_mon_id_sz (SM_mon_name_sz+SM_my_id_sz) #define SM_priv_sz (XDR_QUADLEN(SM_PRIV_SIZE)) #define SM_mon_sz (SM_mon_id_sz+SM_priv_sz) #define SM_monres_sz 2 #define SM_monres_sz 2 #define SM_unmonres_sz 1 #define SM_unmonres_sz 1 Loading
fs/lockd/svc.c +12 −0 Original line number Original line Diff line number Diff line Loading @@ -74,7 +74,9 @@ static const unsigned long nlm_timeout_min = 3; static const unsigned long nlm_timeout_max = 20; static const unsigned long nlm_timeout_max = 20; static const int nlm_port_min = 0, nlm_port_max = 65535; static const int nlm_port_min = 0, nlm_port_max = 65535; #ifdef CONFIG_SYSCTL static struct ctl_table_header * nlm_sysctl_table; static struct ctl_table_header * nlm_sysctl_table; #endif static unsigned long get_lockd_grace_period(void) static unsigned long get_lockd_grace_period(void) { { Loading Loading @@ -359,6 +361,8 @@ lockd_down(void) } } EXPORT_SYMBOL(lockd_down); EXPORT_SYMBOL(lockd_down); #ifdef CONFIG_SYSCTL /* /* * Sysctl parameters (same as module parameters, different interface). * Sysctl parameters (same as module parameters, different interface). */ */ Loading Loading @@ -443,6 +447,8 @@ static ctl_table nlm_sysctl_root[] = { { .ctl_name = 0 } { .ctl_name = 0 } }; }; #endif /* CONFIG_SYSCTL */ /* /* * Module (and sysfs) parameters. * Module (and sysfs) parameters. */ */ Loading Loading @@ -516,15 +522,21 @@ module_param(nsm_use_hostnames, bool, 0644); static int __init init_nlm(void) static int __init init_nlm(void) { { #ifdef CONFIG_SYSCTL nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root); nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root); return nlm_sysctl_table ? 0 : -ENOMEM; return nlm_sysctl_table ? 0 : -ENOMEM; #else return 0; #endif } } static void __exit exit_nlm(void) static void __exit exit_nlm(void) { { /* FIXME: delete all NLM clients */ /* FIXME: delete all NLM clients */ nlm_shutdown_hosts(); nlm_shutdown_hosts(); #ifdef CONFIG_SYSCTL unregister_sysctl_table(nlm_sysctl_table); unregister_sysctl_table(nlm_sysctl_table); #endif } } module_init(init_nlm); module_init(init_nlm); Loading