Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 73867881 authored by Pablo Neira Ayuso's avatar Pablo Neira Ayuso Committed by David S. Miller
Browse files

drivers: net: use flow action infrastructure



This patch updates drivers to use the new flow action infrastructure.

Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
Acked-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3b1903ef
Loading
Loading
Loading
Loading
+33 −41
Original line number Diff line number Diff line
@@ -61,9 +61,9 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)

static int bnxt_tc_parse_redir(struct bnxt *bp,
			       struct bnxt_tc_actions *actions,
			       const struct tc_action *tc_act)
			       const struct flow_action_entry *act)
{
	struct net_device *dev = tcf_mirred_dev(tc_act);
	struct net_device *dev = act->dev;

	if (!dev) {
		netdev_info(bp->dev, "no dev in mirred action");
@@ -77,16 +77,16 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,

static int bnxt_tc_parse_vlan(struct bnxt *bp,
			      struct bnxt_tc_actions *actions,
			      const struct tc_action *tc_act)
			      const struct flow_action_entry *act)
{
	switch (tcf_vlan_action(tc_act)) {
	case TCA_VLAN_ACT_POP:
	switch (act->id) {
	case FLOW_ACTION_VLAN_POP:
		actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
		break;
	case TCA_VLAN_ACT_PUSH:
	case FLOW_ACTION_VLAN_PUSH:
		actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
		actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
		actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
		actions->push_vlan_tci = htons(act->vlan.vid);
		actions->push_vlan_tpid = act->vlan.proto;
		break;
	default:
		return -EOPNOTSUPP;
@@ -96,10 +96,10 @@ static int bnxt_tc_parse_vlan(struct bnxt *bp,

static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
				    struct bnxt_tc_actions *actions,
				    const struct tc_action *tc_act)
				    const struct flow_action_entry *act)
{
	struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
	struct ip_tunnel_key *tun_key = &tun_info->key;
	const struct ip_tunnel_info *tun_info = act->tunnel;
	const struct ip_tunnel_key *tun_key = &tun_info->key;

	if (ip_tunnel_info_af(tun_info) != AF_INET) {
		netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
@@ -113,51 +113,43 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,

static int bnxt_tc_parse_actions(struct bnxt *bp,
				 struct bnxt_tc_actions *actions,
				 struct tcf_exts *tc_exts)
				 struct flow_action *flow_action)
{
	const struct tc_action *tc_act;
	struct flow_action_entry *act;
	int i, rc;

	if (!tcf_exts_has_actions(tc_exts)) {
	if (!flow_action_has_entries(flow_action)) {
		netdev_info(bp->dev, "no actions");
		return -EINVAL;
	}

	tcf_exts_for_each_action(i, tc_act, tc_exts) {
		/* Drop action */
		if (is_tcf_gact_shot(tc_act)) {
	flow_action_for_each(i, act, flow_action) {
		switch (act->id) {
		case FLOW_ACTION_DROP:
			actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
			return 0; /* don't bother with other actions */
		}

		/* Redirect action */
		if (is_tcf_mirred_egress_redirect(tc_act)) {
			rc = bnxt_tc_parse_redir(bp, actions, tc_act);
		case FLOW_ACTION_REDIRECT:
			rc = bnxt_tc_parse_redir(bp, actions, act);
			if (rc)
				return rc;
			continue;
		}

		/* Push/pop VLAN */
		if (is_tcf_vlan(tc_act)) {
			rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
			break;
		case FLOW_ACTION_VLAN_POP:
		case FLOW_ACTION_VLAN_PUSH:
		case FLOW_ACTION_VLAN_MANGLE:
			rc = bnxt_tc_parse_vlan(bp, actions, act);
			if (rc)
				return rc;
			continue;
		}

		/* Tunnel encap */
		if (is_tcf_tunnel_set(tc_act)) {
			rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
			break;
		case FLOW_ACTION_TUNNEL_ENCAP:
			rc = bnxt_tc_parse_tunnel_set(bp, actions, act);
			if (rc)
				return rc;
			continue;
		}

		/* Tunnel decap */
		if (is_tcf_tunnel_release(tc_act)) {
			break;
		case FLOW_ACTION_TUNNEL_DECAP:
			actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
			continue;
			break;
		default:
			break;
		}
	}

@@ -308,7 +300,7 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
		flow->tun_mask.tp_src = match.mask->src;
	}

	return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
	return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action);
}

static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
+127 −123
Original line number Diff line number Diff line
@@ -292,7 +292,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
				u32 mask, u32 offset, u8 htype)
{
	switch (htype) {
	case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
	case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
		switch (offset) {
		case PEDIT_ETH_DMAC_31_0:
			fs->newdmac = 1;
@@ -310,7 +310,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
			offload_pedit(fs, val, mask, ETH_SMAC_47_16);
		}
		break;
	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
	case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
		switch (offset) {
		case PEDIT_IP4_SRC:
			offload_pedit(fs, val, mask, IP4_SRC);
@@ -320,7 +320,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
		}
		fs->nat_mode = NAT_MODE_ALL;
		break;
	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
	case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
		switch (offset) {
		case PEDIT_IP6_SRC_31_0:
			offload_pedit(fs, val, mask, IP6_SRC_31_0);
@@ -348,7 +348,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
		}
		fs->nat_mode = NAT_MODE_ALL;
		break;
	case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
	case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
		switch (offset) {
		case PEDIT_TCP_SPORT_DPORT:
			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -361,7 +361,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
		}
		fs->nat_mode = NAT_MODE_ALL;
		break;
	case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
	case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
		switch (offset) {
		case PEDIT_UDP_SPORT_DPORT:
			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -380,56 +380,63 @@ static void cxgb4_process_flow_actions(struct net_device *in,
				       struct tc_cls_flower_offload *cls,
				       struct ch_filter_specification *fs)
{
	const struct tc_action *a;
	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
	struct flow_action_entry *act;
	int i;

	tcf_exts_for_each_action(i, a, cls->exts) {
		if (is_tcf_gact_ok(a)) {
	flow_action_for_each(i, act, &rule->action) {
		switch (act->id) {
		case FLOW_ACTION_ACCEPT:
			fs->action = FILTER_PASS;
		} else if (is_tcf_gact_shot(a)) {
			break;
		case FLOW_ACTION_DROP:
			fs->action = FILTER_DROP;
		} else if (is_tcf_mirred_egress_redirect(a)) {
			struct net_device *out = tcf_mirred_dev(a);
			break;
		case FLOW_ACTION_REDIRECT: {
			struct net_device *out = act->dev;
			struct port_info *pi = netdev_priv(out);

			fs->action = FILTER_SWITCH;
			fs->eport = pi->port_id;
		} else if (is_tcf_vlan(a)) {
			u32 vlan_action = tcf_vlan_action(a);
			u8 prio = tcf_vlan_push_prio(a);
			u16 vid = tcf_vlan_push_vid(a);
			}
			break;
		case FLOW_ACTION_VLAN_POP:
		case FLOW_ACTION_VLAN_PUSH:
		case FLOW_ACTION_VLAN_MANGLE: {
			u8 prio = act->vlan.prio;
			u16 vid = act->vlan.vid;
			u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;

			switch (vlan_action) {
			case TCA_VLAN_ACT_POP:
			switch (act->id) {
			case FLOW_ACTION_VLAN_POP:
				fs->newvlan |= VLAN_REMOVE;
				break;
			case TCA_VLAN_ACT_PUSH:
			case FLOW_ACTION_VLAN_PUSH:
				fs->newvlan |= VLAN_INSERT;
				fs->vlan = vlan_tci;
				break;
			case TCA_VLAN_ACT_MODIFY:
			case FLOW_ACTION_VLAN_MANGLE:
				fs->newvlan |= VLAN_REWRITE;
				fs->vlan = vlan_tci;
				break;
			default:
				break;
			}
		} else if (is_tcf_pedit(a)) {
			}
			break;
		case FLOW_ACTION_MANGLE: {
			u32 mask, val, offset;
			int nkeys, i;
			u8 htype;

			nkeys = tcf_pedit_nkeys(a);
			for (i = 0; i < nkeys; i++) {
				htype = tcf_pedit_htype(a, i);
				mask = tcf_pedit_mask(a, i);
				val = tcf_pedit_val(a, i);
				offset = tcf_pedit_offset(a, i);
			htype = act->mangle.htype;
			mask = act->mangle.mask;
			val = act->mangle.val;
			offset = act->mangle.offset;

				process_pedit_field(fs, val, mask, offset,
						    htype);
			process_pedit_field(fs, val, mask, offset, htype);
			}
			break;
		default:
			break;
		}
	}
}
@@ -448,27 +455,17 @@ static bool valid_l4_mask(u32 mask)
}

static bool valid_pedit_action(struct net_device *dev,
			       const struct tc_action *a)
			       const struct flow_action_entry *act)
{
	u32 mask, offset;
	u8 cmd, htype;
	int nkeys, i;

	nkeys = tcf_pedit_nkeys(a);
	for (i = 0; i < nkeys; i++) {
		htype = tcf_pedit_htype(a, i);
		cmd = tcf_pedit_cmd(a, i);
		mask = tcf_pedit_mask(a, i);
		offset = tcf_pedit_offset(a, i);

		if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) {
			netdev_err(dev, "%s: Unsupported pedit cmd\n",
				   __func__);
			return false;
		}
	u8 htype;

	htype = act->mangle.htype;
	mask = act->mangle.mask;
	offset = act->mangle.offset;

	switch (htype) {
		case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
	case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
		switch (offset) {
		case PEDIT_ETH_DMAC_31_0:
		case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
@@ -480,7 +477,7 @@ static bool valid_pedit_action(struct net_device *dev,
			return false;
		}
		break;
		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
	case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
		switch (offset) {
		case PEDIT_IP4_SRC:
		case PEDIT_IP4_DST:
@@ -491,7 +488,7 @@ static bool valid_pedit_action(struct net_device *dev,
			return false;
		}
		break;
		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
	case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
		switch (offset) {
		case PEDIT_IP6_SRC_31_0:
		case PEDIT_IP6_SRC_63_32:
@@ -508,7 +505,7 @@ static bool valid_pedit_action(struct net_device *dev,
			return false;
		}
		break;
		case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
	case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
		switch (offset) {
		case PEDIT_TCP_SPORT_DPORT:
			if (!valid_l4_mask(~mask)) {
@@ -523,7 +520,7 @@ static bool valid_pedit_action(struct net_device *dev,
			return false;
		}
		break;
		case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
	case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
		switch (offset) {
		case PEDIT_UDP_SPORT_DPORT:
			if (!valid_l4_mask(~mask)) {
@@ -539,35 +536,35 @@ static bool valid_pedit_action(struct net_device *dev,
		}
		break;
	default:
			netdev_err(dev, "%s: Unsupported pedit type\n",
				   __func__);
		netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
		return false;
	}
	}
	return true;
}

static int cxgb4_validate_flow_actions(struct net_device *dev,
				       struct tc_cls_flower_offload *cls)
{
	const struct tc_action *a;
	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
	struct flow_action_entry *act;
	bool act_redir = false;
	bool act_pedit = false;
	bool act_vlan = false;
	int i;

	tcf_exts_for_each_action(i, a, cls->exts) {
		if (is_tcf_gact_ok(a)) {
	flow_action_for_each(i, act, &rule->action) {
		switch (act->id) {
		case FLOW_ACTION_ACCEPT:
		case FLOW_ACTION_DROP:
			/* Do nothing */
		} else if (is_tcf_gact_shot(a)) {
			/* Do nothing */
		} else if (is_tcf_mirred_egress_redirect(a)) {
			break;
		case FLOW_ACTION_REDIRECT: {
			struct adapter *adap = netdev2adap(dev);
			struct net_device *n_dev, *target_dev;
			unsigned int i;
			bool found = false;

			target_dev = tcf_mirred_dev(a);
			target_dev = act->dev;
			for_each_port(adap, i) {
				n_dev = adap->port[i];
				if (target_dev == n_dev) {
@@ -585,15 +582,18 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
				return -EINVAL;
			}
			act_redir = true;
		} else if (is_tcf_vlan(a)) {
			u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
			u32 vlan_action = tcf_vlan_action(a);
			}
			break;
		case FLOW_ACTION_VLAN_POP:
		case FLOW_ACTION_VLAN_PUSH:
		case FLOW_ACTION_VLAN_MANGLE: {
			u16 proto = be16_to_cpu(act->vlan.proto);

			switch (vlan_action) {
			case TCA_VLAN_ACT_POP:
			switch (act->id) {
			case FLOW_ACTION_VLAN_POP:
				break;
			case TCA_VLAN_ACT_PUSH:
			case TCA_VLAN_ACT_MODIFY:
			case FLOW_ACTION_VLAN_PUSH:
			case FLOW_ACTION_VLAN_MANGLE:
				if (proto != ETH_P_8021Q) {
					netdev_err(dev, "%s: Unsupported vlan proto\n",
						   __func__);
@@ -606,13 +606,17 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
				return -EOPNOTSUPP;
			}
			act_vlan = true;
		} else if (is_tcf_pedit(a)) {
			bool pedit_valid = valid_pedit_action(dev, a);
			}
			break;
		case FLOW_ACTION_MANGLE: {
			bool pedit_valid = valid_pedit_action(dev, act);

			if (!pedit_valid)
				return -EOPNOTSUPP;
			act_pedit = true;
		} else {
			}
			break;
		default:
			netdev_err(dev, "%s: Unsupported action\n", __func__);
			return -EOPNOTSUPP;
		}
+123 −142
Original line number Diff line number Diff line
@@ -1811,11 +1811,11 @@ struct pedit_headers_action {
};

static int pedit_header_offsets[] = {
	[TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
	[TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
	[TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
	[TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
	[TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
	[FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
	[FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
	[FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
	[FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
	[FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
};

#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
@@ -1825,7 +1825,7 @@ static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
{
	u32 *curr_pmask, *curr_pval;

	if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
	if (hdr_type >= 2)
		goto out_err;

	curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
@@ -1900,10 +1900,10 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
	__be16 mask_be16;
	void *action;

	set_masks = &hdrs[TCA_PEDIT_KEY_EX_CMD_SET].masks;
	add_masks = &hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].masks;
	set_vals = &hdrs[TCA_PEDIT_KEY_EX_CMD_SET].vals;
	add_vals = &hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].vals;
	set_masks = &hdrs[0].masks;
	add_masks = &hdrs[1].masks;
	set_vals = &hdrs[0].vals;
	add_vals = &hdrs[1].vals;

	action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
	action = parse_attr->mod_hdr_actions;
@@ -2028,43 +2028,33 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
static const struct pedit_headers zero_masks = {};

static int parse_tc_pedit_action(struct mlx5e_priv *priv,
				 const struct tc_action *a, int namespace,
				 const struct flow_action_entry *act, int namespace,
				 struct mlx5e_tc_flow_parse_attr *parse_attr,
				 struct pedit_headers_action *hdrs,
				 struct netlink_ext_ack *extack)
{
	int nkeys, i, err = -EOPNOTSUPP;
	u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
	int err = -EOPNOTSUPP;
	u32 mask, val, offset;
	u8 cmd, htype;
	u8 htype;

	nkeys = tcf_pedit_nkeys(a);

	for (i = 0; i < nkeys; i++) {
		htype = tcf_pedit_htype(a, i);
		cmd = tcf_pedit_cmd(a, i);
	htype = act->mangle.htype;
	err = -EOPNOTSUPP; /* can't be all optimistic */

		if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
			NL_SET_ERR_MSG_MOD(extack,
					   "legacy pedit isn't offloaded");
			goto out_err;
		}

		if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
			NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
	if (htype == FLOW_ACT_MANGLE_UNSPEC) {
		NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
		goto out_err;
	}

		mask = tcf_pedit_mask(a, i);
		val = tcf_pedit_val(a, i);
		offset = tcf_pedit_offset(a, i);
	mask = act->mangle.mask;
	val = act->mangle.val;
	offset = act->mangle.offset;

	err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
	if (err)
		goto out_err;

	hdrs[cmd].pedits++;
	}

	return 0;
out_err:
@@ -2139,15 +2129,15 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
}

static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
					  struct tcf_exts *exts,
					  struct flow_action *flow_action,
					  struct netlink_ext_ack *extack)
{
	const struct tc_action *a;
	const struct flow_action_entry *act;
	bool modify_ip_header;
	u8 htype, ip_proto;
	void *headers_v;
	u16 ethertype;
	int nkeys, i;
	int i;

	headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
	ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
@@ -2157,22 +2147,18 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
		goto out_ok;

	modify_ip_header = false;
	tcf_exts_for_each_action(i, a, exts) {
		int k;

		if (!is_tcf_pedit(a))
	flow_action_for_each(i, act, flow_action) {
		if (act->id != FLOW_ACTION_MANGLE &&
		    act->id != FLOW_ACTION_ADD)
			continue;

		nkeys = tcf_pedit_nkeys(a);
		for (k = 0; k < nkeys; k++) {
			htype = tcf_pedit_htype(a, k);
			if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
			    htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
		htype = act->mangle.htype;
		if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
		    htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
			modify_ip_header = true;
			break;
		}
	}
	}

	ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
	if (modify_ip_header && ip_proto != IPPROTO_TCP &&
@@ -2188,7 +2174,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
}

static bool actions_match_supported(struct mlx5e_priv *priv,
				    struct tcf_exts *exts,
				    struct flow_action *flow_action,
				    struct mlx5e_tc_flow_parse_attr *parse_attr,
				    struct mlx5e_tc_flow *flow,
				    struct netlink_ext_ack *extack)
@@ -2205,7 +2191,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
		return false;

	if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
		return modify_header_match_supported(&parse_attr->spec, exts,
		return modify_header_match_supported(&parse_attr->spec,
						     flow_action,
						     extack);

	return true;
@@ -2225,53 +2212,50 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
	return (fsystem_guid == psystem_guid);
}

static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
static int parse_tc_nic_actions(struct mlx5e_priv *priv,
				struct flow_action *flow_action,
				struct mlx5e_tc_flow_parse_attr *parse_attr,
				struct mlx5e_tc_flow *flow,
				struct netlink_ext_ack *extack)
{
	struct pedit_headers_action hdrs[__PEDIT_CMD_MAX] = {};
	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
	const struct tc_action *a;
	struct pedit_headers_action hdrs[2] = {};
	const struct flow_action_entry *act;
	u32 action = 0;
	int err, i;

	if (!tcf_exts_has_actions(exts))
	if (!flow_action_has_entries(flow_action))
		return -EINVAL;

	attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;

	tcf_exts_for_each_action(i, a, exts) {
		if (is_tcf_gact_shot(a)) {
	flow_action_for_each(i, act, flow_action) {
		switch (act->id) {
		case FLOW_ACTION_DROP:
			action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
			if (MLX5_CAP_FLOWTABLE(priv->mdev,
					       flow_table_properties_nic_receive.flow_counter))
				action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
			continue;
		}

		if (is_tcf_pedit(a)) {
			err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
			break;
		case FLOW_ACTION_MANGLE:
		case FLOW_ACTION_ADD:
			err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
						    parse_attr, hdrs, extack);
			if (err)
				return err;

			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
				  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
			continue;
		}

		if (is_tcf_csum(a)) {
			break;
		case FLOW_ACTION_CSUM:
			if (csum_offload_supported(priv, action,
						   tcf_csum_update_flags(a),
						   act->csum_flags,
						   extack))
				continue;
				break;

			return -EOPNOTSUPP;
		}

		if (is_tcf_mirred_egress_redirect(a)) {
			struct net_device *peer_dev = tcf_mirred_dev(a);
		case FLOW_ACTION_REDIRECT: {
			struct net_device *peer_dev = act->dev;

			if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
			    same_hw_devs(priv, netdev_priv(peer_dev))) {
@@ -2286,11 +2270,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
					    peer_dev->name);
				return -EINVAL;
			}
			continue;
			}

		if (is_tcf_skbedit_mark(a)) {
			u32 mark = tcf_skbedit_mark(a);
			break;
		case FLOW_ACTION_MARK: {
			u32 mark = act->mark;

			if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
				NL_SET_ERR_MSG_MOD(extack,
@@ -2300,11 +2283,12 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,

			attr->flow_tag = mark;
			action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
			continue;
			}

			break;
		default:
			return -EINVAL;
		}
	}

	if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
	    hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
@@ -2315,7 +2299,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
	}

	attr->action = action;
	if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
	if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
		return -EOPNOTSUPP;

	return 0;
@@ -2420,7 +2404,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
}

static int parse_tc_vlan_action(struct mlx5e_priv *priv,
				const struct tc_action *a,
				const struct flow_action_entry *act,
				struct mlx5_esw_flow_attr *attr,
				u32 *action)
{
@@ -2429,7 +2413,8 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
	if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
		return -EOPNOTSUPP;

	if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
	switch (act->id) {
	case FLOW_ACTION_VLAN_POP:
		if (vlan_idx) {
			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
								 MLX5_FS_VLAN_DEPTH))
@@ -2439,10 +2424,11 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
		} else {
			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
		}
	} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
		attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a);
		attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a);
		attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a);
		break;
	case FLOW_ACTION_VLAN_PUSH:
		attr->vlan_vid[vlan_idx] = act->vlan.vid;
		attr->vlan_prio[vlan_idx] = act->vlan.prio;
		attr->vlan_proto[vlan_idx] = act->vlan.proto;
		if (!attr->vlan_proto[vlan_idx])
			attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);

@@ -2454,13 +2440,15 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
		} else {
			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
			    (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
			     tcf_vlan_push_prio(a)))
			    (act->vlan.proto != htons(ETH_P_8021Q) ||
			     act->vlan.prio))
				return -EOPNOTSUPP;

			*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
		}
	} else { /* action is TCA_VLAN_ACT_MODIFY */
		break;
	default:
		/* action is FLOW_ACT_VLAN_MANGLE */
		return -EOPNOTSUPP;
	}

@@ -2469,59 +2457,56 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
	return 0;
}

static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
				struct flow_action *flow_action,
				struct mlx5e_tc_flow_parse_attr *parse_attr,
				struct mlx5e_tc_flow *flow,
				struct netlink_ext_ack *extack)
{
	struct pedit_headers_action hdrs[__PEDIT_CMD_MAX] = {};
	struct pedit_headers_action hdrs[2] = {};
	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
	struct mlx5e_rep_priv *rpriv = priv->ppriv;
	struct ip_tunnel_info *info = NULL;
	const struct tc_action *a;
	const struct ip_tunnel_info *info = NULL;
	const struct flow_action_entry *act;
	bool encap = false;
	u32 action = 0;
	int err, i;

	if (!tcf_exts_has_actions(exts))
	if (!flow_action_has_entries(flow_action))
		return -EINVAL;

	attr->in_rep = rpriv->rep;
	attr->in_mdev = priv->mdev;

	tcf_exts_for_each_action(i, a, exts) {
		if (is_tcf_gact_shot(a)) {
	flow_action_for_each(i, act, flow_action) {
		switch (act->id) {
		case FLOW_ACTION_DROP:
			action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
				  MLX5_FLOW_CONTEXT_ACTION_COUNT;
			continue;
		}

		if (is_tcf_pedit(a)) {
			err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
			break;
		case FLOW_ACTION_MANGLE:
		case FLOW_ACTION_ADD:
			err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
						    parse_attr, hdrs, extack);
			if (err)
				return err;

			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
			attr->split_count = attr->out_count;
			continue;
		}

		if (is_tcf_csum(a)) {
			break;
		case FLOW_ACTION_CSUM:
			if (csum_offload_supported(priv, action,
						   tcf_csum_update_flags(a),
						   extack))
				continue;
						   act->csum_flags, extack))
				break;

			return -EOPNOTSUPP;
		}

		if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) {
		case FLOW_ACTION_REDIRECT:
		case FLOW_ACTION_MIRRED: {
			struct mlx5e_priv *out_priv;
			struct net_device *out_dev;

			out_dev = tcf_mirred_dev(a);
			out_dev = act->dev;
			if (!out_dev) {
				/* out_dev is NULL when filters with
				 * non-existing mirred device are replayed to
@@ -2586,35 +2571,29 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
				       priv->netdev->name, out_dev->name);
				return -EINVAL;
			}
			continue;
			}

		if (is_tcf_tunnel_set(a)) {
			info = tcf_tunnel_info(a);
			break;
		case FLOW_ACTION_TUNNEL_ENCAP:
			info = act->tunnel;
			if (info)
				encap = true;
			else
				return -EOPNOTSUPP;
			continue;
		}

		if (is_tcf_vlan(a)) {
			err = parse_tc_vlan_action(priv, a, attr, &action);

			break;
		case FLOW_ACTION_VLAN_PUSH:
		case FLOW_ACTION_VLAN_POP:
			err = parse_tc_vlan_action(priv, act, attr, &action);
			if (err)
				return err;

			attr->split_count = attr->out_count;
			continue;
		}

		if (is_tcf_tunnel_release(a)) {
			break;
		case FLOW_ACTION_TUNNEL_DECAP:
			action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
			continue;
		}

		if (is_tcf_gact_goto_chain(a)) {
			u32 dest_chain = tcf_gact_goto_chain_index(a);
			break;
		case FLOW_ACTION_GOTO: {
			u32 dest_chain = act->chain_index;
			u32 max_chain = mlx5_eswitch_get_chain_range(esw);

			if (dest_chain <= attr->chain) {
@@ -2627,12 +2606,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
			}
			action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
			attr->dest_chain = dest_chain;

			continue;
			break;
			}

		default:
			return -EINVAL;
		}
	}

	if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
	    hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
@@ -2643,7 +2622,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
	}

	attr->action = action;
	if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
	if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
		return -EOPNOTSUPP;

	if (attr->dest_chain) {
@@ -2754,6 +2733,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
		     struct mlx5_eswitch_rep *in_rep,
		     struct mlx5_core_dev *in_mdev)
{
	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
	struct netlink_ext_ack *extack = f->common.extack;
	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
	struct mlx5e_tc_flow_parse_attr *parse_attr;
@@ -2775,7 +2755,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,

	flow->esw_attr->chain = f->common.chain_index;
	flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
	err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack);
	err = parse_tc_fdb_actions(priv, &rule->action, parse_attr, flow, extack);
	if (err)
		goto err_free;

@@ -2891,6 +2871,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
		   struct net_device *filter_dev,
		   struct mlx5e_tc_flow **__flow)
{
	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
	struct netlink_ext_ack *extack = f->common.extack;
	struct mlx5e_tc_flow_parse_attr *parse_attr;
	struct mlx5e_tc_flow *flow;
@@ -2913,7 +2894,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
	if (err)
		goto err_free;

	err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack);
	err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
	if (err)
		goto err_free;

+1 −1
Original line number Diff line number Diff line
@@ -588,7 +588,7 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
{
	u8 ethertype;

	if (action == TCA_VLAN_ACT_MODIFY) {
	if (action == FLOW_ACTION_VLAN_MANGLE) {
		switch (proto) {
		case ETH_P_8021Q:
			ethertype = 0;
+33 −21

File changed.

Preview size limit exceeded, changes collapsed.

Loading