Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e8680a24 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker
Browse files

SUNRPC: Use struct xdr_stream when constructing RPC Call header



Modernize and harden the code path that constructs each RPC Call
message.

Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent fe9a2705
Loading
Loading
Loading
Loading
+10 −5
Original line number Original line Diff line number Diff line
@@ -131,11 +131,12 @@ struct rpc_credops {
	void			(*crdestroy)(struct rpc_cred *);
	void			(*crdestroy)(struct rpc_cred *);


	int			(*crmatch)(struct auth_cred *, struct rpc_cred *, int);
	int			(*crmatch)(struct auth_cred *, struct rpc_cred *, int);
	__be32 *		(*crmarshal)(struct rpc_task *, __be32 *);
	int			(*crmarshal)(struct rpc_task *task,
					     struct xdr_stream *xdr);
	int			(*crrefresh)(struct rpc_task *);
	int			(*crrefresh)(struct rpc_task *);
	__be32 *		(*crvalidate)(struct rpc_task *, __be32 *);
	__be32 *		(*crvalidate)(struct rpc_task *, __be32 *);
	int			(*crwrap_req)(struct rpc_task *, kxdreproc_t,
	int			(*crwrap_req)(struct rpc_task *task,
						void *, __be32 *, void *);
					      struct xdr_stream *xdr);
	int			(*crunwrap_resp)(struct rpc_task *, kxdrdproc_t,
	int			(*crunwrap_resp)(struct rpc_task *, kxdrdproc_t,
						void *, __be32 *, void *);
						void *, __be32 *, void *);
	int			(*crkey_timeout)(struct rpc_cred *);
	int			(*crkey_timeout)(struct rpc_cred *);
@@ -165,9 +166,13 @@ struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *
void			rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
void			rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
struct rpc_cred *	rpcauth_lookupcred(struct rpc_auth *, int);
struct rpc_cred *	rpcauth_lookupcred(struct rpc_auth *, int);
void			put_rpccred(struct rpc_cred *);
void			put_rpccred(struct rpc_cred *);
__be32 *		rpcauth_marshcred(struct rpc_task *, __be32 *);
int			rpcauth_marshcred(struct rpc_task *task,
					  struct xdr_stream *xdr);
__be32 *		rpcauth_checkverf(struct rpc_task *, __be32 *);
__be32 *		rpcauth_checkverf(struct rpc_task *, __be32 *);
int			rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj);
int			rpcauth_wrap_req_encode(struct rpc_task *task,
						struct xdr_stream *xdr);
int			rpcauth_wrap_req(struct rpc_task *task,
					 struct xdr_stream *xdr);
int			rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj);
int			rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj);
bool			rpcauth_xmit_need_reencode(struct rpc_task *task);
bool			rpcauth_xmit_need_reencode(struct rpc_task *task);
int			rpcauth_refreshcred(struct rpc_task *);
int			rpcauth_refreshcred(struct rpc_task *);
+6 −0
Original line number Original line Diff line number Diff line
@@ -87,6 +87,12 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
#define	xdr_one		cpu_to_be32(1)
#define	xdr_one		cpu_to_be32(1)
#define	xdr_two		cpu_to_be32(2)
#define	xdr_two		cpu_to_be32(2)


#define	rpc_auth_null	cpu_to_be32(RPC_AUTH_NULL)
#define	rpc_auth_unix	cpu_to_be32(RPC_AUTH_UNIX)
#define	rpc_auth_gss	cpu_to_be32(RPC_AUTH_GSS)

#define	rpc_call	cpu_to_be32(RPC_CALL)

#define	rpc_success		cpu_to_be32(RPC_SUCCESS)
#define	rpc_success		cpu_to_be32(RPC_SUCCESS)
#define	rpc_prog_unavail	cpu_to_be32(RPC_PROG_UNAVAIL)
#define	rpc_prog_unavail	cpu_to_be32(RPC_PROG_UNAVAIL)
#define	rpc_prog_mismatch	cpu_to_be32(RPC_PROG_MISMATCH)
#define	rpc_prog_mismatch	cpu_to_be32(RPC_PROG_MISMATCH)
+29 −0
Original line number Original line Diff line number Diff line
@@ -213,6 +213,35 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
DEFINE_RPC_QUEUED_EVENT(sleep);
DEFINE_RPC_QUEUED_EVENT(sleep);
DEFINE_RPC_QUEUED_EVENT(wakeup);
DEFINE_RPC_QUEUED_EVENT(wakeup);


DECLARE_EVENT_CLASS(rpc_failure,

	TP_PROTO(const struct rpc_task *task),

	TP_ARGS(task),

	TP_STRUCT__entry(
		__field(unsigned int, task_id)
		__field(unsigned int, client_id)
	),

	TP_fast_assign(
		__entry->task_id = task->tk_pid;
		__entry->client_id = task->tk_client->cl_clid;
	),

	TP_printk("task:%u@%u",
		__entry->task_id, __entry->client_id)
);

#define DEFINE_RPC_FAILURE(name)					\
	DEFINE_EVENT(rpc_failure, rpc_bad_##name,			\
			TP_PROTO(					\
				const struct rpc_task *task		\
			),						\
			TP_ARGS(task))

DEFINE_RPC_FAILURE(callhdr);

TRACE_EVENT(rpc_stats_latency,
TRACE_EVENT(rpc_stats_latency,


	TP_PROTO(
	TP_PROTO(
+38 −18
Original line number Original line Diff line number Diff line
@@ -756,12 +756,21 @@ put_rpccred(struct rpc_cred *cred)
}
}
EXPORT_SYMBOL_GPL(put_rpccred);
EXPORT_SYMBOL_GPL(put_rpccred);


__be32 *
/**
rpcauth_marshcred(struct rpc_task *task, __be32 *p)
 * rpcauth_marshcred - Append RPC credential to end of @xdr
 * @task: controlling RPC task
 * @xdr: xdr_stream containing initial portion of RPC Call header
 *
 * On success, an appropriate verifier is added to @xdr, @xdr is
 * updated to point past the verifier, and zero is returned.
 * Otherwise, @xdr is in an undefined state and a negative errno
 * is returned.
 */
int rpcauth_marshcred(struct rpc_task *task, struct xdr_stream *xdr)
{
{
	struct rpc_cred	*cred = task->tk_rqstp->rq_cred;
	const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops;


	return cred->cr_ops->crmarshal(task, p);
	return ops->crmarshal(task, xdr);
}
}


__be32 *
__be32 *
@@ -772,26 +781,37 @@ rpcauth_checkverf(struct rpc_task *task, __be32 *p)
	return cred->cr_ops->crvalidate(task, p);
	return cred->cr_ops->crvalidate(task, p);
}
}


static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
/**
				   __be32 *data, void *obj)
 * rpcauth_wrap_req_encode - XDR encode the RPC procedure
 * @task: controlling RPC task
 * @xdr: stream where on-the-wire bytes are to be marshalled
 *
 * On success, @xdr contains the encoded and wrapped message.
 * Otherwise, @xdr is in an undefined state.
 */
int rpcauth_wrap_req_encode(struct rpc_task *task, struct xdr_stream *xdr)
{
{
	struct xdr_stream xdr;
	kxdreproc_t encode = task->tk_msg.rpc_proc->p_encode;


	xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data, rqstp);
	encode(task->tk_rqstp, xdr, task->tk_msg.rpc_argp);
	encode(rqstp, &xdr, obj);
	return 0;
}
}
EXPORT_SYMBOL_GPL(rpcauth_wrap_req_encode);


int
/**
rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp,
 * rpcauth_wrap_req - XDR encode and wrap the RPC procedure
		__be32 *data, void *obj)
 * @task: controlling RPC task
 * @xdr: stream where on-the-wire bytes are to be marshalled
 *
 * On success, @xdr contains the encoded and wrapped message,
 * and zero is returned. Otherwise, @xdr is in an undefined
 * state and a negative errno is returned.
 */
int rpcauth_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
{
{
	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
	const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops;


	if (cred->cr_ops->crwrap_req)
	return ops->crwrap_req(task, xdr);
		return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj);
	/* By default, we encode the arguments normally. */
	rpcauth_wrap_req_encode(encode, rqstp, data, obj);
	return 0;
}
}


static int
static int
+93 −98
Original line number Original line Diff line number Diff line
@@ -1527,17 +1527,19 @@ gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)


/*
/*
 * Marshal credentials.
 * Marshal credentials.
* Maybe we should keep a cached credential for performance reasons.
 *
 * The expensive part is computing the verifier. We can't cache a
 * pre-computed version of the verifier because the seqno, which
 * is different every time, is included in the MIC.
 */
 */
static __be32 *
static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr)
gss_marshal(struct rpc_task *task, __be32 *p)
{
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_cred *cred = req->rq_cred;
	struct rpc_cred *cred = req->rq_cred;
	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
						 gc_base);
						 gc_base);
	struct gss_cl_ctx	*ctx = gss_cred_get_ctx(cred);
	struct gss_cl_ctx	*ctx = gss_cred_get_ctx(cred);
	__be32		*cred_len;
	__be32		*p, *cred_len;
	u32             maj_stat = 0;
	u32             maj_stat = 0;
	struct xdr_netobj mic;
	struct xdr_netobj mic;
	struct kvec	iov;
	struct kvec	iov;
@@ -1545,7 +1547,13 @@ gss_marshal(struct rpc_task *task, __be32 *p)


	dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
	dprintk("RPC: %5u %s\n", task->tk_pid, __func__);


	*p++ = htonl(RPC_AUTH_GSS);
	/* Credential */

	p = xdr_reserve_space(xdr, 7 * sizeof(*p) +
			      ctx->gc_wire_ctx.len);
	if (!p)
		goto out_put_ctx;
	*p++ = rpc_auth_gss;
	cred_len = p++;
	cred_len = p++;


	spin_lock(&ctx->gc_seq_lock);
	spin_lock(&ctx->gc_seq_lock);
@@ -1554,12 +1562,14 @@ gss_marshal(struct rpc_task *task, __be32 *p)
	if (req->rq_seqno == MAXSEQ)
	if (req->rq_seqno == MAXSEQ)
		goto out_expired;
		goto out_expired;


	*p++ = htonl((u32) RPC_GSS_VERSION);
	*p++ = cpu_to_be32(RPC_GSS_VERSION);
	*p++ = htonl((u32) ctx->gc_proc);
	*p++ = cpu_to_be32(ctx->gc_proc);
	*p++ = htonl((u32) req->rq_seqno);
	*p++ = cpu_to_be32(req->rq_seqno);
	*p++ = htonl((u32) gss_cred->gc_service);
	*p++ = cpu_to_be32(gss_cred->gc_service);
	p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
	p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
	*cred_len = htonl((p - (cred_len + 1)) << 2);
	*cred_len = cpu_to_be32((p - (cred_len + 1)) << 2);

	/* Verifier */


	/* We compute the checksum for the verifier over the xdr-encoded bytes
	/* We compute the checksum for the verifier over the xdr-encoded bytes
	 * starting with the xid and ending at the end of the credential: */
	 * starting with the xid and ending at the end of the credential: */
@@ -1567,27 +1577,27 @@ gss_marshal(struct rpc_task *task, __be32 *p)
	iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
	iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
	xdr_buf_from_iov(&iov, &verf_buf);
	xdr_buf_from_iov(&iov, &verf_buf);


	/* set verifier flavor*/
	p = xdr_reserve_space(xdr, sizeof(*p));
	*p++ = htonl(RPC_AUTH_GSS);
	if (!p)

		goto out_put_ctx;
	*p++ = rpc_auth_gss;
	mic.data = (u8 *)(p + 1);
	mic.data = (u8 *)(p + 1);
	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
	if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
		goto out_expired;
		goto out_expired;
	} else if (maj_stat != 0) {
	else if (maj_stat != 0)
		pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
		goto out_put_ctx;
		task->tk_status = -EIO;
	if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
		goto out_put_ctx;
		goto out_put_ctx;
	}
	p = xdr_encode_opaque(p, NULL, mic.len);
	gss_put_ctx(ctx);
	gss_put_ctx(ctx);
	return p;
	return 0;
out_expired:
out_expired:
	gss_put_ctx(ctx);
	clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
	clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
	task->tk_status = -EKEYEXPIRED;
	return -EKEYEXPIRED;
out_put_ctx:
out_put_ctx:
	gss_put_ctx(ctx);
	gss_put_ctx(ctx);
	return NULL;
	return -EMSGSIZE;
}
}


static int gss_renew_cred(struct rpc_task *task)
static int gss_renew_cred(struct rpc_task *task)
@@ -1716,61 +1726,45 @@ gss_validate(struct rpc_task *task, __be32 *p)
	return ret;
	return ret;
}
}


static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
static int gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
				__be32 *p, void *obj)
			      struct rpc_task *task, struct xdr_stream *xdr)
{
	struct xdr_stream xdr;

	xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p, rqstp);
	encode(rqstp, &xdr, obj);
}

static inline int
gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
		   kxdreproc_t encode, struct rpc_rqst *rqstp,
		   __be32 *p, void *obj)
{
{
	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
	struct rpc_rqst *rqstp = task->tk_rqstp;
	struct xdr_buf	integ_buf;
	struct xdr_buf integ_buf, *snd_buf = &rqstp->rq_snd_buf;
	__be32          *integ_len = NULL;
	struct xdr_netobj mic;
	struct xdr_netobj mic;
	u32		offset;
	__be32 *p, *integ_len;
	__be32		*q;
	u32 offset, maj_stat;
	struct kvec	*iov;
	u32             maj_stat = 0;
	int		status = -EIO;


	p = xdr_reserve_space(xdr, 2 * sizeof(*p));
	if (!p)
		goto wrap_failed;
	integ_len = p++;
	integ_len = p++;
	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
	*p = cpu_to_be32(rqstp->rq_seqno);
	*p++ = htonl(rqstp->rq_seqno);


	gss_wrap_req_encode(encode, rqstp, p, obj);
	if (rpcauth_wrap_req_encode(task, xdr))
		goto wrap_failed;


	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
	if (xdr_buf_subsegment(snd_buf, &integ_buf,
	if (xdr_buf_subsegment(snd_buf, &integ_buf,
				offset, snd_buf->len - offset))
				offset, snd_buf->len - offset))
		return status;
		goto wrap_failed;
	*integ_len = htonl(integ_buf.len);
	*integ_len = cpu_to_be32(integ_buf.len);


	/* guess whether we're in the head or the tail: */
	p = xdr_reserve_space(xdr, 0);
	if (snd_buf->page_len || snd_buf->tail[0].iov_len)
	if (!p)
		iov = snd_buf->tail;
		goto wrap_failed;
	else
		iov = snd_buf->head;
	p = iov->iov_base + iov->iov_len;
	mic.data = (u8 *)(p + 1);
	mic.data = (u8 *)(p + 1);

	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
	status = -EIO; /* XXX? */
	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
	else if (maj_stat)
	else if (maj_stat)
		return status;
		goto wrap_failed;
	q = xdr_encode_opaque(p, NULL, mic.len);
	/* Check that the trailing MIC fit in the buffer, after the fact */

	if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
	offset = (u8 *)q - (u8 *)p;
		goto wrap_failed;
	iov->iov_len += offset;
	snd_buf->len += offset;
	return 0;
	return 0;
wrap_failed:
	return -EMSGSIZE;
}
}


static void
static void
@@ -1821,61 +1815,63 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
	return -EAGAIN;
	return -EAGAIN;
}
}


static inline int
static int gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
			     struct rpc_task *task, struct xdr_stream *xdr)
		  kxdreproc_t encode, struct rpc_rqst *rqstp,
		  __be32 *p, void *obj)
{
{
	struct rpc_rqst *rqstp = task->tk_rqstp;
	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
	u32		offset;
	u32		pad, offset, maj_stat;
	u32             maj_stat;
	int		status;
	int		status;
	__be32		*opaque_len;
	__be32		*p, *opaque_len;
	struct page	**inpages;
	struct page	**inpages;
	int		first;
	int		first;
	int		pad;
	struct kvec	*iov;
	struct kvec	*iov;
	char		*tmp;


	status = -EIO;
	p = xdr_reserve_space(xdr, 2 * sizeof(*p));
	if (!p)
		goto wrap_failed;
	opaque_len = p++;
	opaque_len = p++;
	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
	*p = cpu_to_be32(rqstp->rq_seqno);
	*p++ = htonl(rqstp->rq_seqno);


	gss_wrap_req_encode(encode, rqstp, p, obj);
	if (rpcauth_wrap_req_encode(task, xdr))
		goto wrap_failed;


	status = alloc_enc_pages(rqstp);
	status = alloc_enc_pages(rqstp);
	if (status)
	if (unlikely(status))
		return status;
		goto wrap_failed;
	first = snd_buf->page_base >> PAGE_SHIFT;
	first = snd_buf->page_base >> PAGE_SHIFT;
	inpages = snd_buf->pages + first;
	inpages = snd_buf->pages + first;
	snd_buf->pages = rqstp->rq_enc_pages;
	snd_buf->pages = rqstp->rq_enc_pages;
	snd_buf->page_base -= first << PAGE_SHIFT;
	snd_buf->page_base -= first << PAGE_SHIFT;
	/*
	/*
	 * Give the tail its own page, in case we need extra space in the
	 * Move the tail into its own page, in case gss_wrap needs
	 * head when wrapping:
	 * more space in the head when wrapping.
	 *
	 *
	 * call_allocate() allocates twice the slack space required
	 * Still... Why can't gss_wrap just slide the tail down?
	 * by the authentication flavor to rq_callsize.
	 * For GSS, slack is GSS_CRED_SLACK.
	 */
	 */
	if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
	if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
		char *tmp;

		tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
		tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
		memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
		memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
		snd_buf->tail[0].iov_base = tmp;
		snd_buf->tail[0].iov_base = tmp;
	}
	}
	status = -EIO;
	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
	maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
	maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
	/* slack space should prevent this ever happening: */
	/* slack space should prevent this ever happening: */
	BUG_ON(snd_buf->len > snd_buf->buflen);
	if (unlikely(snd_buf->len > snd_buf->buflen))
	status = -EIO;
		goto wrap_failed;
	/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
	/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
	 * done anyway, so it's safe to put the request on the wire: */
	 * done anyway, so it's safe to put the request on the wire: */
	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
	else if (maj_stat)
	else if (maj_stat)
		return status;
		goto wrap_failed;


	*opaque_len = htonl(snd_buf->len - offset);
	*opaque_len = cpu_to_be32(snd_buf->len - offset);
	/* guess whether we're in the head or the tail: */
	/* guess whether the pad goes into the head or the tail: */
	if (snd_buf->page_len || snd_buf->tail[0].iov_len)
	if (snd_buf->page_len || snd_buf->tail[0].iov_len)
		iov = snd_buf->tail;
		iov = snd_buf->tail;
	else
	else
@@ -1887,37 +1883,36 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
	snd_buf->len += pad;
	snd_buf->len += pad;


	return 0;
	return 0;
wrap_failed:
	return status;
}
}


static int
static int gss_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
gss_wrap_req(struct rpc_task *task,
	     kxdreproc_t encode, void *rqstp, __be32 *p, void *obj)
{
{
	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
			gc_base);
			gc_base);
	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
	int             status = -EIO;
	int status;


	dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
	dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
	status = -EIO;
	if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
	if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
		/* The spec seems a little ambiguous here, but I think that not
		/* The spec seems a little ambiguous here, but I think that not
		 * wrapping context destruction requests makes the most sense.
		 * wrapping context destruction requests makes the most sense.
		 */
		 */
		gss_wrap_req_encode(encode, rqstp, p, obj);
		status = rpcauth_wrap_req_encode(task, xdr);
		status = 0;
		goto out;
		goto out;
	}
	}
	switch (gss_cred->gc_service) {
	switch (gss_cred->gc_service) {
	case RPC_GSS_SVC_NONE:
	case RPC_GSS_SVC_NONE:
		gss_wrap_req_encode(encode, rqstp, p, obj);
		status = rpcauth_wrap_req_encode(task, xdr);
		status = 0;
		break;
		break;
	case RPC_GSS_SVC_INTEGRITY:
	case RPC_GSS_SVC_INTEGRITY:
		status = gss_wrap_req_integ(cred, ctx, encode, rqstp, p, obj);
		status = gss_wrap_req_integ(cred, ctx, task, xdr);
		break;
		break;
	case RPC_GSS_SVC_PRIVACY:
	case RPC_GSS_SVC_PRIVACY:
		status = gss_wrap_req_priv(cred, ctx, encode, rqstp, p, obj);
		status = gss_wrap_req_priv(cred, ctx, task, xdr);
		break;
		break;
	}
	}
out:
out:
Loading