Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ced273ea authored by Alexander Aring's avatar Alexander Aring Committed by David S. Miller
Browse files

sched: act: ife: migrate to use per-cpu counters



This patch migrates the current counter handling which is protected by a
spinlock to a per-cpu counter handling. This reduce the time where the
spinlock is being held.

Signed-off-by: default avatarAlexander Aring <aring@mojatatu.com>
Acked-by: default avatarJamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 734534e9
Loading
Loading
Loading
Loading
+11 −18
Original line number Diff line number Diff line
@@ -477,7 +477,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,

	if (!exists) {
		ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
				     bind, false);
				     bind, true);
		if (ret)
			return ret;
		ret = ACT_P_CREATED;
@@ -638,19 +638,15 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
	u8 *tlv_data;
	u16 metalen;

	spin_lock(&ife->tcf_lock);
	bstats_update(&ife->tcf_bstats, skb);
	bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
	tcf_lastuse_update(&ife->tcf_tm);
	spin_unlock(&ife->tcf_lock);

	if (skb_at_tc_ingress(skb))
		skb_push(skb, skb->dev->hard_header_len);

	tlv_data = ife_decode(skb, &metalen);
	if (unlikely(!tlv_data)) {
		spin_lock(&ife->tcf_lock);
		ife->tcf_qstats.drops++;
		spin_unlock(&ife->tcf_lock);
		qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
		return TC_ACT_SHOT;
	}

@@ -668,14 +664,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
			 */
			pr_info_ratelimited("Unknown metaid %d dlen %d\n",
					    mtype, dlen);
			ife->tcf_qstats.overlimits++;
			qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
		}
	}

	if (WARN_ON(tlv_data != ifehdr_end)) {
		spin_lock(&ife->tcf_lock);
		ife->tcf_qstats.drops++;
		spin_unlock(&ife->tcf_lock);
		qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
		return TC_ACT_SHOT;
	}

@@ -727,23 +721,20 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
			exceed_mtu = true;
	}

	spin_lock(&ife->tcf_lock);
	bstats_update(&ife->tcf_bstats, skb);
	bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
	tcf_lastuse_update(&ife->tcf_tm);

	if (!metalen) {		/* no metadata to send */
		/* abuse overlimits to count when we allow packet
		 * with no metadata
		 */
		ife->tcf_qstats.overlimits++;
		spin_unlock(&ife->tcf_lock);
		qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
		return action;
	}
	/* could be stupid policy setup or mtu config
	 * so lets be conservative.. */
	if ((action == TC_ACT_SHOT) || exceed_mtu) {
		ife->tcf_qstats.drops++;
		spin_unlock(&ife->tcf_lock);
		qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
		return TC_ACT_SHOT;
	}

@@ -752,6 +743,8 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,

	ife_meta = ife_encode(skb, metalen);

	spin_lock(&ife->tcf_lock);

	/* XXX: we dont have a clever way of telling encode to
	 * not repeat some of the computations that are done by
	 * ops->presence_check...
@@ -763,8 +756,8 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
		}
		if (err < 0) {
			/* too corrupt to keep around if overwritten */
			ife->tcf_qstats.drops++;
			spin_unlock(&ife->tcf_lock);
			qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
			return TC_ACT_SHOT;
		}
		skboff += err;