Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bfe0d029 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

net_sched: factorize qdisc stats handling



HTB takes into account skb is segmented in stats updates.
Generalize this to all schedulers.

They should use qdisc_bstats_update() helper instead of manipulating
bstats.bytes and bstats.packets

Add bstats_update() helper too for classes that use
gnet_stats_basic_packed fields.

Note : Right now, TCQ_F_CAN_BYPASS shortcurt can be taken only if no
stab is setup on qdisc.

Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f1593d22
Loading
Loading
Loading
Loading
+14 −6
Original line number Diff line number Diff line
@@ -207,7 +207,7 @@ static inline int qdisc_qlen(struct Qdisc *q)
	return q->q.qlen;
}

static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
{
	return (struct qdisc_skb_cb *)skb->cb;
}
@@ -394,7 +394,7 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev)
	return true;
}

static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
{
	return qdisc_skb_cb(skb)->pkt_len;
}
@@ -426,10 +426,18 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
	return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
}

static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len)

static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
				 const struct sk_buff *skb)
{
	bstats->bytes += qdisc_pkt_len(skb);
	bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
}

static inline void qdisc_bstats_update(struct Qdisc *sch,
				       const struct sk_buff *skb)
{
	sch->bstats.bytes += len;
	sch->bstats.packets++;
	bstats_update(&sch->bstats, skb);
}

static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
@@ -437,7 +445,7 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
{
	__skb_queue_tail(list, skb);
	sch->qstats.backlog += qdisc_pkt_len(skb);
	__qdisc_update_bstats(sch, qdisc_pkt_len(skb));
	qdisc_bstats_update(sch, skb);

	return NET_XMIT_SUCCESS;
}
+4 −1
Original line number Diff line number Diff line
@@ -2297,7 +2297,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
		 */
		if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
			skb_dst_force(skb);
		__qdisc_update_bstats(q, skb->len);

		qdisc_skb_cb(skb)->pkt_len = skb->len;
		qdisc_bstats_update(q, skb);

		if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
			if (unlikely(contended)) {
				spin_unlock(&q->busylock);
+1 −2
Original line number Diff line number Diff line
@@ -508,8 +508,7 @@ static int tcf_csum(struct sk_buff *skb,

	spin_lock(&p->tcf_lock);
	p->tcf_tm.lastuse = jiffies;
	p->tcf_bstats.bytes += qdisc_pkt_len(skb);
	p->tcf_bstats.packets++;
	bstats_update(&p->tcf_bstats, skb);
	action = p->tcf_action;
	update_flags = p->update_flags;
	spin_unlock(&p->tcf_lock);
+1 −2
Original line number Diff line number Diff line
@@ -209,8 +209,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
	spin_lock(&ipt->tcf_lock);

	ipt->tcf_tm.lastuse = jiffies;
	ipt->tcf_bstats.bytes += qdisc_pkt_len(skb);
	ipt->tcf_bstats.packets++;
	bstats_update(&ipt->tcf_bstats, skb);

	/* yes, we have to worry about both in and out dev
	 worry later - danger - this API seems to have changed
+1 −2
Original line number Diff line number Diff line
@@ -165,8 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,

	spin_lock(&m->tcf_lock);
	m->tcf_tm.lastuse = jiffies;
	m->tcf_bstats.bytes += qdisc_pkt_len(skb);
	m->tcf_bstats.packets++;
	bstats_update(&m->tcf_bstats, skb);

	dev = m->tcfm_dev;
	if (!dev) {
Loading