Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9c01c9f1 authored by Paolo Abeni's avatar Paolo Abeni Committed by David S. Miller
Browse files

net: sched: always do stats accounting according to TCQ_F_CPUSTATS



The core sched implementation checks independently for NOLOCK flag
to acquire/release the root spin lock and for qdisc_is_percpu_stats()
to account per CPU values in many places.

This change update the last few places checking the TCQ_F_NOLOCK to
do per CPU stats accounting according to qdisc_is_percpu_stats()
value.

The above allows to clean dev_requeue_skb() implementation a bit
and makes stats update always consistent with a single flag.

v1 -> v2:
 - do not move qdisc_is_empty definition, fix build issue

Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1f5e6fdd
Loading
Loading
Loading
Loading
+14 −9
Original line number Diff line number Diff line
@@ -146,9 +146,14 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc)
	return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
}

static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
{
	return q->flags & TCQ_F_CPUSTATS;
}

static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
{
	if (qdisc->flags & TCQ_F_NOLOCK)
	if (qdisc_is_percpu_stats(qdisc))
		return qdisc->empty;
	return !qdisc->q.qlen;
}
@@ -490,7 +495,7 @@ static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
{
	u32 qlen = q->qstats.qlen;

	if (q->flags & TCQ_F_NOLOCK)
	if (qdisc_is_percpu_stats(q))
		qlen += atomic_read(&q->q.atomic_qlen);
	else
		qlen += q->q.qlen;
@@ -817,11 +822,6 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
	return sch->enqueue(skb, sch, to_free);
}

static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
{
	return q->flags & TCQ_F_CPUSTATS;
}

static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
				  __u64 bytes, __u32 packets)
{
@@ -1113,8 +1113,13 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)

	if (skb) {
		skb = __skb_dequeue(&sch->gso_skb);
		if (qdisc_is_percpu_stats(sch)) {
			qdisc_qstats_cpu_backlog_dec(sch, skb);
			qdisc_qstats_atomic_qlen_dec(sch);
		} else {
			qdisc_qstats_backlog_dec(sch, skb);
			sch->q.qlen--;
		}
	} else {
		skb = sch->dequeue(sch);
	}
+17 −33
Original line number Diff line number Diff line
@@ -118,52 +118,36 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
		spin_unlock(lock);
}

static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
	while (skb) {
		struct sk_buff *next = skb->next;

		__skb_queue_tail(&q->gso_skb, skb);
		q->qstats.requeues++;
		qdisc_qstats_backlog_inc(q, skb);
		q->q.qlen++;	/* it's still part of the queue */

		skb = next;
	}
	__netif_schedule(q);
	spinlock_t *lock = NULL;

	return 0;
	if (q->flags & TCQ_F_NOLOCK) {
		lock = qdisc_lock(q);
		spin_lock(lock);
	}

static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
{
	spinlock_t *lock = qdisc_lock(q);

	spin_lock(lock);
	while (skb) {
		struct sk_buff *next = skb->next;

		__skb_queue_tail(&q->gso_skb, skb);

		/* it's still part of the queue */
		if (qdisc_is_percpu_stats(q)) {
			qdisc_qstats_cpu_requeues_inc(q);
			qdisc_qstats_cpu_backlog_inc(q, skb);
			qdisc_qstats_atomic_qlen_inc(q);
		} else {
			q->qstats.requeues++;
			qdisc_qstats_backlog_inc(q, skb);
			q->q.qlen++;
		}

		skb = next;
	}
	if (lock)
		spin_unlock(lock);

	__netif_schedule(q);

	return 0;
}

static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
	if (q->flags & TCQ_F_NOLOCK)
		return dev_requeue_skb_locked(skb, q);
	else
		return __dev_requeue_skb(skb, q);
}

static void try_bulk_dequeue_skb(struct Qdisc *q,