Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8d6d8406 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'sctp'



Lee A. Roberts says:

====================
This series of patches resolves several SCTP association hangs observed during
SCTP stress testing.  Observable symptoms include communications hangs with
data being held in the association reassembly and/or lobby (ordering) queues.
Close examination of reassembly/ordering queues may show either duplicated
or missing packets.

In version #2, corrected build failure in initial version of patch series
due to wrong calling sequence for sctp_ulpq_partial_delivery() being inserted
in sctp_ulpq_renege().

In version #3, adjusted patch documentation to be less repetitive.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 726bc6b0 d003b41b
Loading
Loading
Loading
Loading
+7 −6
Original line number Diff line number Diff line
@@ -51,7 +51,7 @@
static void sctp_tsnmap_update(struct sctp_tsnmap *map);
static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off,
				     __u16 len, __u16 *start, __u16 *end);
static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap);
static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size);

/* Initialize a block of memory as a tsnmap.  */
struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len,
@@ -124,7 +124,7 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,

	gap = tsn - map->base_tsn;

	if (gap >= map->len && !sctp_tsnmap_grow(map, gap))
	if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1))
		return -ENOMEM;

	if (!sctp_tsnmap_has_gap(map) && gap == 0) {
@@ -360,23 +360,24 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
	return ngaps;
}

static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap)
static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size)
{
	unsigned long *new;
	unsigned long inc;
	u16  len;

	if (gap >= SCTP_TSN_MAP_SIZE)
	if (size > SCTP_TSN_MAP_SIZE)
		return 0;

	inc = ALIGN((gap - map->len),BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
	inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
	len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE);

	new = kzalloc(len>>3, GFP_ATOMIC);
	if (!new)
		return 0;

	bitmap_copy(new, map->tsn_map, map->max_tsn_seen - map->base_tsn);
	bitmap_copy(new, map->tsn_map,
		map->max_tsn_seen - map->cumulative_tsn_ack_point);
	kfree(map->tsn_map);
	map->tsn_map = new;
	map->len = len;
+71 −16
Original line number Diff line number Diff line
@@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
{
	struct sk_buff_head temp;
	struct sctp_ulpevent *event;
	int event_eor = 0;

	/* Create an event from the incoming chunk. */
	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
@@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
	/* Send event to the ULP.  'event' is the sctp_ulpevent for
	 * very first SKB on the 'temp' list.
	 */
	if (event)
	if (event) {
		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
		sctp_ulpq_tail_event(ulpq, event);
	}

	return 0;
	return event_eor;
}

/* Add a new event for propagation to the ULP.  */
@@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
		ctsn = cevent->tsn;

		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
		case SCTP_DATA_FIRST_FRAG:
			if (!first_frag)
				return NULL;
			goto done;
		case SCTP_DATA_MIDDLE_FRAG:
			if (!first_frag) {
				first_frag = pos;
				next_tsn = ctsn + 1;
				last_frag = pos;
			} else if (next_tsn == ctsn)
			} else if (next_tsn == ctsn) {
				next_tsn++;
			else
				last_frag = pos;
			} else
				goto done;
			break;
		case SCTP_DATA_LAST_FRAG:
@@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
			} else
				goto done;
			break;

		case SCTP_DATA_LAST_FRAG:
			if (!first_frag)
				return NULL;
			else
				goto done;
			break;

		default:
			return NULL;
		}
@@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
		struct sk_buff_head *list, __u16 needed)
{
	__u16 freed = 0;
	__u32 tsn;
	struct sk_buff *skb;
	__u32 tsn, last_tsn;
	struct sk_buff *skb, *flist, *last;
	struct sctp_ulpevent *event;
	struct sctp_tsnmap *tsnmap;

	tsnmap = &ulpq->asoc->peer.tsn_map;

	while ((skb = __skb_dequeue_tail(list)) != NULL) {
		freed += skb_headlen(skb);
	while ((skb = skb_peek_tail(list)) != NULL) {
		event = sctp_skb2event(skb);
		tsn = event->tsn;

		/* Don't renege below the Cumulative TSN ACK Point. */
		if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
			break;

		/* Events in ordering queue may have multiple fragments
		 * corresponding to additional TSNs.  Sum the total
		 * freed space; find the last TSN.
		 */
		freed += skb_headlen(skb);
		flist = skb_shinfo(skb)->frag_list;
		for (last = flist; flist; flist = flist->next) {
			last = flist;
			freed += skb_headlen(last);
		}
		if (last)
			last_tsn = sctp_skb2event(last)->tsn;
		else
			last_tsn = tsn;

		/* Unlink the event, then renege all applicable TSNs. */
		__skb_unlink(skb, list);
		sctp_ulpevent_free(event);
		while (TSN_lte(tsn, last_tsn)) {
			sctp_tsnmap_renege(tsnmap, tsn);
			tsn++;
		}
		if (freed >= needed)
			return freed;
	}
@@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
	struct sctp_ulpevent *event;
	struct sctp_association *asoc;
	struct sctp_sock *sp;
	__u32 ctsn;
	struct sk_buff *skb;

	asoc = ulpq->asoc;
	sp = sctp_sk(asoc->base.sk);

	/* If the association is already in Partial Delivery mode
	 * we have noting to do.
	 * we have nothing to do.
	 */
	if (ulpq->pd_mode)
		return;

	/* Data must be at or below the Cumulative TSN ACK Point to
	 * start partial delivery.
	 */
	skb = skb_peek(&asoc->ulpq.reasm);
	if (skb != NULL) {
		ctsn = sctp_skb2event(skb)->tsn;
		if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
			return;
	}

	/* If the user enabled fragment interleave socket option,
	 * multiple associations can enter partial delivery.
	 * Otherwise, we can only enter partial delivery if the
@@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
	}
	/* If able to free enough room, accept this chunk. */
	if (chunk && (freed >= needed)) {
		__u32 tsn;
		tsn = ntohl(chunk->subh.data_hdr->tsn);
		sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
		sctp_ulpq_tail_data(ulpq, chunk, gfp);

		int retval;
		retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
		/*
		 * Enter partial delivery if chunk has not been
		 * delivered; otherwise, drain the reassembly queue.
		 */
		if (retval <= 0)
			sctp_ulpq_partial_delivery(ulpq, gfp);
		else if (retval == 1)
			sctp_ulpq_reasm_drain(ulpq);
	}

	sk_mem_reclaim(asoc->base.sk);