Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 11274e5a authored by Stephen Hemminger's avatar Stephen Hemminger Committed by David S. Miller
Browse files

[NETEM]: avoid excessive requeues



The netem code would call getnstimeofday() and dequeue/requeue after
every packet, even if it was waiting. Avoid this overhead by using
the throttled flag.

Signed-off-by: default avatarStephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 075aa573
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -298,6 +298,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
						 timer);

	wd->qdisc->flags &= ~TCQ_F_THROTTLED;
	smp_wmb();
	netif_schedule(wd->qdisc->dev);
	return HRTIMER_NORESTART;
}
@@ -315,6 +316,7 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
	ktime_t time;

	wd->qdisc->flags |= TCQ_F_THROTTLED;
	smp_wmb();
	time = ktime_set(0, 0);
	time = ktime_add_ns(time, PSCHED_US2NS(expires));
	hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
@@ -325,6 +327,7 @@ void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
{
	hrtimer_cancel(&wd->timer);
	wd->qdisc->flags &= ~TCQ_F_THROTTLED;
	smp_wmb();
}
EXPORT_SYMBOL(qdisc_watchdog_cancel);

+13 −10
Original line number Diff line number Diff line
@@ -273,6 +273,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
	struct netem_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;

	smp_mb();
	if (sch->flags & TCQ_F_THROTTLED)
		return NULL;

	skb = q->qdisc->dequeue(q->qdisc);
	if (skb) {
		const struct netem_skb_cb *cb
@@ -285,18 +289,17 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
		if (PSCHED_TLESS(cb->time_to_send, now)) {
			pr_debug("netem_dequeue: return skb=%p\n", skb);
			sch->q.qlen--;
			sch->flags &= ~TCQ_F_THROTTLED;
			return skb;
		} else {
			qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
		}

			if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
		if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
			qdisc_tree_decrease_qlen(q->qdisc, 1);
			sch->qstats.drops++;
				printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
			printk(KERN_ERR "netem: %s could not requeue\n",
			       q->qdisc->ops->id);
		}
		}

		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
	}

	return NULL;