Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7698b4fc authored by David S. Miller's avatar David S. Miller
Browse files

pkt_sched: Add and use qdisc_root() and qdisc_root_lock().



When code wants to lock the qdisc tree state, the logic
operation it's doing is locking the top-level qdisc that
sits of the root of the netdev_queue.

Add qdisc_root_lock() to represent this and convert the
easiest cases.

In order for this to work out in all cases, we have to
hook up the noop_qdisc to a dummy netdev_queue.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e2627c8c
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -161,6 +161,18 @@ struct tcf_proto
	struct tcf_proto_ops	*ops;
};

static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
{
	return qdisc->dev_queue->qdisc;
}

static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
{
	struct Qdisc *root = qdisc_root(qdisc);

	return &root->dev_queue->lock;
}

static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
{
	return qdisc->dev_queue->dev;
+4 −4
Original line number Diff line number Diff line
@@ -633,7 +633,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
	if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
		if (tca[TCA_RATE]) {
			err = gen_new_estimator(&sch->bstats, &sch->rate_est,
						&sch->dev_queue->lock,
						qdisc_root_lock(sch),
						tca[TCA_RATE]);
			if (err) {
				/*
@@ -675,7 +675,7 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
	}
	if (tca[TCA_RATE])
		gen_replace_estimator(&sch->bstats, &sch->rate_est,
				      &sch->dev_queue->lock, tca[TCA_RATE]);
				      qdisc_root_lock(sch), tca[TCA_RATE]);
	return 0;
}

@@ -967,7 +967,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
	q->qstats.qlen = q->q.qlen;

	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
					 TCA_XSTATS, &q->dev_queue->lock, &d) < 0)
					 TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
		goto nla_put_failure;

	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
@@ -1216,7 +1216,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
		goto nla_put_failure;

	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
					 TCA_XSTATS, &q->dev_queue->lock, &d) < 0)
					 TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
		goto nla_put_failure;

	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
+5 −4
Original line number Diff line number Diff line
@@ -1744,12 +1744,13 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)

	if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_ACT
		spinlock_t *root_lock = qdisc_root_lock(sch);
		struct cbq_sched_data *q = qdisc_priv(sch);

		spin_lock_bh(&sch->dev_queue->lock);
		spin_lock_bh(root_lock);
		if (q->rx_class == cl)
			q->rx_class = NULL;
		spin_unlock_bh(&sch->dev_queue->lock);
		spin_unlock_bh(root_lock);
#endif

		cbq_destroy_class(sch, cl);
@@ -1828,7 +1829,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t

		if (tca[TCA_RATE])
			gen_replace_estimator(&cl->bstats, &cl->rate_est,
					      &sch->dev_queue->lock,
					      qdisc_root_lock(sch),
					      tca[TCA_RATE]);
		return 0;
	}
@@ -1919,7 +1920,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t

	if (tca[TCA_RATE])
		gen_new_estimator(&cl->bstats, &cl->rate_est,
				  &sch->dev_queue->lock, tca[TCA_RATE]);
				  qdisc_root_lock(sch), tca[TCA_RATE]);

	*arg = (unsigned long)cl;
	return 0;
+16 −5
Original line number Diff line number Diff line
@@ -151,14 +151,17 @@ static inline int qdisc_restart(struct netdev_queue *txq,
{
	int ret = NETDEV_TX_BUSY;
	struct net_device *dev;
	spinlock_t *root_lock;
	struct sk_buff *skb;

	/* Dequeue packet */
	if (unlikely((skb = dequeue_skb(q)) == NULL))
		return 0;

	/* And release queue */
	spin_unlock(&txq->lock);
	root_lock = qdisc_root_lock(q);

	/* And release qdisc */
	spin_unlock(root_lock);

	dev = txq->dev;

@@ -167,7 +170,7 @@ static inline int qdisc_restart(struct netdev_queue *txq,
		ret = dev_hard_start_xmit(skb, dev, txq);
	HARD_TX_UNLOCK(dev, txq);

	spin_lock(&txq->lock);
	spin_lock(root_lock);

	switch (ret) {
	case NETDEV_TX_OK:
@@ -345,12 +348,18 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = {
	.owner		=	THIS_MODULE,
};

static struct netdev_queue noop_netdev_queue = {
	.lock		=	__SPIN_LOCK_UNLOCKED(noop_netdev_queue.lock),
	.qdisc		=	&noop_qdisc,
};

struct Qdisc noop_qdisc = {
	.enqueue	=	noop_enqueue,
	.dequeue	=	noop_dequeue,
	.flags		=	TCQ_F_BUILTIN,
	.ops		=	&noop_qdisc_ops,
	.list		=	LIST_HEAD_INIT(noop_qdisc.list),
	.dev_queue	=	&noop_netdev_queue,
};
EXPORT_SYMBOL(noop_qdisc);

@@ -666,19 +675,21 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock)

	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *dev_queue;
		spinlock_t *root_lock;
		struct Qdisc *q;
		int val;

		dev_queue = netdev_get_tx_queue(dev, i);
		q = dev_queue->qdisc;
		root_lock = qdisc_root_lock(q);

		if (lock)
			spin_lock_bh(&dev_queue->lock);
			spin_lock_bh(root_lock);

		val = test_bit(__QDISC_STATE_RUNNING, &q->state);

		if (lock)
			spin_unlock_bh(&dev_queue->lock);
			spin_unlock_bh(root_lock);

		if (val)
			return true;
+2 −2
Original line number Diff line number Diff line
@@ -1045,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,

		if (tca[TCA_RATE])
			gen_replace_estimator(&cl->bstats, &cl->rate_est,
					      &sch->dev_queue->lock,
					      qdisc_root_lock(sch),
					      tca[TCA_RATE]);
		return 0;
	}
@@ -1104,7 +1104,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,

	if (tca[TCA_RATE])
		gen_new_estimator(&cl->bstats, &cl->rate_est,
				  &sch->dev_queue->lock, tca[TCA_RATE]);
				  qdisc_root_lock(sch), tca[TCA_RATE]);
	*arg = (unsigned long)cl;
	return 0;
}
Loading