Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4eb701df authored by Neil Horman's avatar Neil Horman Committed by David S. Miller
Browse files

[SCTP] Fix SCTP sendbuffer accouting.



- Include chunk and skb sizes in sendbuffer accounting.
- 2 policies are supported. 0: per socket accouting, 1: per association
  accounting

DaveM: I've made the default per-socket.

Signed-off-by: default avatarNeil Horman <nhorman@redhat.com>
Signed-off-by: default avatarSridhar Samudrala <sri@us.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 594ccc14
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -643,6 +643,7 @@ enum {
	NET_SCTP_MAX_BURST               = 12,
	NET_SCTP_ADDIP_ENABLE		 = 13,
	NET_SCTP_PRSCTP_ENABLE		 = 14,
	NET_SCTP_SNDBUF_POLICY		 = 15,
};

/* /proc/sys/net/bridge */
+10 −1
Original line number Diff line number Diff line
@@ -154,6 +154,13 @@ extern struct sctp_globals {
	int max_retrans_path;
	int max_retrans_init;

	/*
	 * Policy for preforming sctp/socket accounting
	 * 0   - do socket level accounting, all assocs share sk_sndbuf
	 * 1   - do sctp accounting, each asoc may use sk_sndbuf bytes
	 */
	int sndbuf_policy;

	/* HB.interval		    - 30 seconds  */
	int hb_interval;

@@ -207,6 +214,7 @@ extern struct sctp_globals {
#define sctp_valid_cookie_life		(sctp_globals.valid_cookie_life)
#define sctp_cookie_preserve_enable	(sctp_globals.cookie_preserve_enable)
#define sctp_max_retrans_association	(sctp_globals.max_retrans_association)
#define sctp_sndbuf_policy	 	(sctp_globals.sndbuf_policy)
#define sctp_max_retrans_path		(sctp_globals.max_retrans_path)
#define sctp_max_retrans_init		(sctp_globals.max_retrans_init)
#define sctp_hb_interval		(sctp_globals.hb_interval)
@@ -1212,7 +1220,8 @@ struct sctp_endpoint {
	/* Default timeouts.  */
	int timeouts[SCTP_NUM_TIMEOUT_TYPES];

	/* Various thresholds.	*/
	/* sendbuf acct. policy.	*/
	__u32 sndbuf_policy;

	/* Name for debugging output... */
	char *debug_name;
+1 −0
Original line number Diff line number Diff line
@@ -125,6 +125,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
		sp->autoclose * HZ;

	/* Use SCTP specific send buffer space queues.  */
	ep->sndbuf_policy = sctp_sndbuf_policy;
	sk->sk_write_space = sctp_write_space;
	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);

+3 −0
Original line number Diff line number Diff line
@@ -1043,6 +1043,9 @@ SCTP_STATIC __init int sctp_init(void)
	sctp_max_retrans_path		= 5;
	sctp_max_retrans_init		= 8;

	/* Sendbuffer growth	    - do per-socket accounting */
	sctp_sndbuf_policy		= 0;

	/* HB.interval              - 30 seconds */
	sctp_hb_interval		= 30 * HZ;

+31 −5
Original line number Diff line number Diff line
@@ -115,9 +115,17 @@ static inline int sctp_wspace(struct sctp_association *asoc)
	struct sock *sk = asoc->base.sk;
	int amt = 0;

	if (asoc->ep->sndbuf_policy) {
		/* make sure that no association uses more than sk_sndbuf */
		amt = sk->sk_sndbuf - asoc->sndbuf_used;
	} else {
		/* do socket level accounting */
		amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
	}

	if (amt < 0)
		amt = 0;

	return amt;
}

@@ -138,12 +146,21 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
	/* The sndbuf space is tracked per association.  */
	sctp_association_hold(asoc);

	skb_set_owner_w(chunk->skb, sk);

	chunk->skb->destructor = sctp_wfree;
	/* Save the chunk pointer in skb for sctp_wfree to use later.  */
	*((struct sctp_chunk **)(chunk->skb->cb)) = chunk;

	asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk);
	sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk);
	asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) +
				sizeof(struct sk_buff) +
				sizeof(struct sctp_chunk);

	sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk) +
				sizeof(struct sk_buff) +
				sizeof(struct sctp_chunk);

	atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
}

/* Verify that this is a valid address. */
@@ -4422,8 +4439,17 @@ static void sctp_wfree(struct sk_buff *skb)
	chunk = *((struct sctp_chunk **)(skb->cb));
	asoc = chunk->asoc;
	sk = asoc->base.sk;
	asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk);
	sk->sk_wmem_queued -= SCTP_DATA_SNDSIZE(chunk);
	asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) +
				sizeof(struct sk_buff) +
				sizeof(struct sctp_chunk);

	sk->sk_wmem_queued -= SCTP_DATA_SNDSIZE(chunk) +
				sizeof(struct sk_buff) +
				sizeof(struct sctp_chunk);

	atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);

	sock_wfree(skb);
	__sctp_write_space(asoc);

	sctp_association_put(asoc);
Loading