Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7a90e8cc authored by Tom Tucker's avatar Tom Tucker Committed by J. Bruce Fields
Browse files

svc: Move sk_reserved to svc_xprt



This functionally trivial patch moves the sk_reserved field to the
transport independent svc_xprt structure.

Signed-off-by: default avatarTom Tucker <tom@opengridcomputing.com>
Acked-by: default avatarNeil Brown <neilb@suse.de>
Reviewed-by: default avatarChuck Lever <chuck.lever@oracle.com>
Reviewed-by: default avatarGreg Banks <gnb@sgi.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@citi.umich.edu>
parent 7a182083
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -53,6 +53,7 @@ struct svc_xprt {

	struct svc_pool		*xpt_pool;	/* current pool iff queued */
	struct svc_serv		*xpt_server;	/* service for transport */
	atomic_t    	    	xpt_reserved;	/* space on outq that is rsvd */
};

int	svc_reg_xprt_class(struct svc_xprt_class *);
+0 −2
Original line number Diff line number Diff line
@@ -20,8 +20,6 @@ struct svc_sock {
	struct socket *		sk_sock;	/* berkeley socket layer */
	struct sock *		sk_sk;		/* INET layer */

	atomic_t    	    	sk_reserved;	/* space on outq that is reserved */

	spinlock_t		sk_lock;	/* protects sk_deferred and
						 * sk_info_authunix */
	struct list_head	sk_deferred;	/* deferred requests that need to
+5 −5
Original line number Diff line number Diff line
@@ -288,7 +288,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
		rqstp->rq_sock = svsk;
		svc_xprt_get(&svsk->sk_xprt);
		rqstp->rq_reserved = serv->sv_max_mesg;
		atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
		atomic_add(rqstp->rq_reserved, &svsk->sk_xprt.xpt_reserved);
		BUG_ON(svsk->sk_xprt.xpt_pool != pool);
		wake_up(&rqstp->rq_wait);
	} else {
@@ -353,7 +353,7 @@ void svc_reserve(struct svc_rqst *rqstp, int space)

	if (space < rqstp->rq_reserved) {
		struct svc_sock *svsk = rqstp->rq_sock;
		atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved);
		atomic_sub((rqstp->rq_reserved - space), &svsk->sk_xprt.xpt_reserved);
		rqstp->rq_reserved = space;

		svc_sock_enqueue(svsk);
@@ -881,7 +881,7 @@ static int svc_udp_has_wspace(struct svc_xprt *xprt)
	 * sock space.
	 */
	set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
	required = atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg;
	required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
	if (required*2 > sock_wspace(svsk->sk_sk))
		return 0;
	clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
@@ -1327,7 +1327,7 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
	 * sock space.
	 */
	set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
	required = atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg;
	required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
	wspace = sk_stream_wspace(svsk->sk_sk);

	if (wspace < sk_stream_min_wspace(svsk->sk_sk))
@@ -1544,7 +1544,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
		rqstp->rq_sock = svsk;
		svc_xprt_get(&svsk->sk_xprt);
		rqstp->rq_reserved = serv->sv_max_mesg;
		atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
		atomic_add(rqstp->rq_reserved, &svsk->sk_xprt.xpt_reserved);
	} else {
		/* No data pending. Go to sleep */
		svc_thread_enqueue(pool, rqstp);