Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 960fb66e authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

netem: add limitation to reordered packets



Fix two netem bugs :

1) When a frame was dropped by tfifo_enqueue(), drop counter
   was incremented twice.

2) When reordering is triggered, we enqueue a packet without
   checking queue limit. This can OOM pretty fast when this
   is repeated enough, since skbs are orphaned, no socket limit
   can help in this situation.

Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Mark Gordon <msg@google.com>
Cc: Andreas Terzis <aterzis@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Hagen Paul Pfeifer <hagen@jauu.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b94e52f6
Loading
Loading
Loading
Loading
+15 −27
Original line number Original line Diff line number Diff line
@@ -331,17 +331,15 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
	return PSCHED_NS2TICKS(ticks);
	return PSCHED_NS2TICKS(ticks);
}
}


static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
{
{
	struct sk_buff_head *list = &sch->q;
	struct sk_buff_head *list = &sch->q;
	psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
	psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
	struct sk_buff *skb;
	struct sk_buff *skb = skb_peek_tail(list);


	if (likely(skb_queue_len(list) < sch->limit)) {
		skb = skb_peek_tail(list);
	/* Optimize for add at tail */
	/* Optimize for add at tail */
	if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
	if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
			return qdisc_enqueue_tail(nskb, sch);
		return __skb_queue_tail(list, nskb);


	skb_queue_reverse_walk(list, skb) {
	skb_queue_reverse_walk(list, skb) {
		if (tnext >= netem_skb_cb(skb)->time_to_send)
		if (tnext >= netem_skb_cb(skb)->time_to_send)
@@ -349,11 +347,6 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
	}
	}


	__skb_queue_after(list, skb, nskb);
	__skb_queue_after(list, skb, nskb);
		sch->qstats.backlog += qdisc_pkt_len(nskb);
		return NET_XMIT_SUCCESS;
	}

	return qdisc_reshape_fail(nskb, sch);
}
}


/*
/*
@@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
	/* We don't fill cb now as skb_unshare() may invalidate it */
	/* We don't fill cb now as skb_unshare() may invalidate it */
	struct netem_skb_cb *cb;
	struct netem_skb_cb *cb;
	struct sk_buff *skb2;
	struct sk_buff *skb2;
	int ret;
	int count = 1;
	int count = 1;


	/* Random duplication */
	/* Random duplication */
@@ -419,6 +411,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
	}
	}


	if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
		return qdisc_reshape_fail(skb, sch);

	sch->qstats.backlog += qdisc_pkt_len(skb);

	cb = netem_skb_cb(skb);
	cb = netem_skb_cb(skb);
	if (q->gap == 0 ||		/* not doing reordering */
	if (q->gap == 0 ||		/* not doing reordering */
	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
@@ -450,7 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)


		cb->time_to_send = now + delay;
		cb->time_to_send = now + delay;
		++q->counter;
		++q->counter;
		ret = tfifo_enqueue(skb, sch);
		tfifo_enqueue(skb, sch);
	} else {
	} else {
		/*
		/*
		 * Do re-ordering by putting one out of N packets at the front
		 * Do re-ordering by putting one out of N packets at the front
@@ -460,16 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
		q->counter = 0;
		q->counter = 0;


		__skb_queue_head(&sch->q, skb);
		__skb_queue_head(&sch->q, skb);
		sch->qstats.backlog += qdisc_pkt_len(skb);
		sch->qstats.requeues++;
		sch->qstats.requeues++;
		ret = NET_XMIT_SUCCESS;
	}

	if (ret != NET_XMIT_SUCCESS) {
		if (net_xmit_drop_count(ret)) {
			sch->qstats.drops++;
			return ret;
		}
	}
	}


	return NET_XMIT_SUCCESS;
	return NET_XMIT_SUCCESS;