Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c09fc2e1 authored by Jiri Pirko's avatar Jiri Pirko Committed by David S. Miller
Browse files

net: sched: cls_flow: no need to call tcf_exts_change for newly allocated struct



As the fnew struct just was allocated, so no need to use tcf_exts_change
to do atomic change, and we can just fill-up the unused exts struct
directly by tcf_exts_validate.

Signed-off-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8cc62513
Loading
Loading
Loading
Loading
+16 −25
Original line number Diff line number Diff line
@@ -388,7 +388,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
	struct flow_filter *fold, *fnew;
	struct nlattr *opt = tca[TCA_OPTIONS];
	struct nlattr *tb[TCA_FLOW_MAX + 1];
	struct tcf_exts e;
	unsigned int nkeys = 0;
	unsigned int perturb_period = 0;
	u32 baseclass = 0;
@@ -424,31 +423,27 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
			return -EOPNOTSUPP;
	}

	err = tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE);
	if (err < 0)
		goto err1;
	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
	if (err < 0)
		goto err1;

	err = -ENOBUFS;
	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
	if (!fnew)
		goto err1;
		return -ENOBUFS;

	err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
	if (err < 0)
		goto err2;
		goto err1;

	err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
	if (err < 0)
		goto err3;
		goto err2;

	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr);
	if (err < 0)
		goto err2;

	fold = (struct flow_filter *)*arg;
	if (fold) {
		err = -EINVAL;
		if (fold->handle != handle && handle)
			goto err3;
			goto err2;

		/* Copy fold into fnew */
		fnew->tp = fold->tp;
@@ -468,31 +463,31 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
		if (tb[TCA_FLOW_MODE])
			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
		if (mode != FLOW_MODE_HASH && nkeys > 1)
			goto err3;
			goto err2;

		if (mode == FLOW_MODE_HASH)
			perturb_period = fold->perturb_period;
		if (tb[TCA_FLOW_PERTURB]) {
			if (mode != FLOW_MODE_HASH)
				goto err3;
				goto err2;
			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
		}
	} else {
		err = -EINVAL;
		if (!handle)
			goto err3;
			goto err2;
		if (!tb[TCA_FLOW_KEYS])
			goto err3;
			goto err2;

		mode = FLOW_MODE_MAP;
		if (tb[TCA_FLOW_MODE])
			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
		if (mode != FLOW_MODE_HASH && nkeys > 1)
			goto err3;
			goto err2;

		if (tb[TCA_FLOW_PERTURB]) {
			if (mode != FLOW_MODE_HASH)
				goto err3;
				goto err2;
			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
		}

@@ -510,8 +505,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
	setup_deferrable_timer(&fnew->perturb_timer, flow_perturbation,
			       (unsigned long)fnew);

	tcf_exts_change(tp, &fnew->exts, &e);

	netif_keep_dst(qdisc_dev(tp->q));

	if (tb[TCA_FLOW_KEYS]) {
@@ -550,13 +543,11 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
		call_rcu(&fold->rcu, flow_destroy_filter);
	return 0;

err3:
err2:
	tcf_exts_destroy(&fnew->exts);
	tcf_em_tree_destroy(&fnew->ematches);
err2:
	kfree(fnew);
err1:
	tcf_exts_destroy(&e);
	kfree(fnew);
	return err;
}