Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e37268eb authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'add-flow_rule-infrastructure'



Pablo Neira Ayuso says:

====================
add flow_rule infrastructure

This patchset, as is, allows us to reuse the driver codebase to
configure ACL hardware offloads for the ethtool_rxnfc and the TC flower
interfaces. A few clients for this infrastructure are presented, such as
the bcm_sf2 and the qede drivers, for reference. Moreover all of the
existing drivers in the tree are converted to use this infrastructure.

This patchset is re-using the existing flow dissector infrastructure
that was introduced by Jiri Pirko et al. so the amount of abstractions
that this patchset adds are minimal. Well, just a few wrapper structures
for the selector side of the rules. And, in order to express actions,
this patchset exposes an action API that is based on the existing TC
action infrastructure and what existing drivers already support on that
front.

v7: This patchset is a rebase on top of the net-next tree, after
    addressing questions and feedback from driver developers in the
    last batch.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d9b5a675 37c5d3ef
Loading
Loading
Loading
Loading
+67 −35
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <net/dsa.h>
#include <linux/bitmap.h>
#include <net/flow_offload.h>

#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
@@ -257,7 +258,8 @@ static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
}

static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
				   struct ethtool_tcpip4_spec *v4_spec,
				   struct flow_dissector_key_ipv4_addrs *addrs,
				   struct flow_dissector_key_ports *ports,
				   unsigned int slice_num,
				   bool mask)
{
@@ -278,7 +280,7 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
	 * UDF_n_A6		[23:8]
	 * UDF_n_A5		[7:0]
	 */
	reg = be16_to_cpu(v4_spec->pdst) >> 8;
	reg = be16_to_cpu(ports->dst) >> 8;
	if (mask)
		offset = CORE_CFP_MASK_PORT(3);
	else
@@ -289,9 +291,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
	 * UDF_n_A4		[23:8]
	 * UDF_n_A3		[7:0]
	 */
	reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
	      (u32)be16_to_cpu(v4_spec->psrc) << 8 |
	      (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
	reg = (be16_to_cpu(ports->dst) & 0xff) << 24 |
	      (u32)be16_to_cpu(ports->src) << 8 |
	      (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8;
	if (mask)
		offset = CORE_CFP_MASK_PORT(2);
	else
@@ -302,9 +304,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
	 * UDF_n_A2		[23:8]
	 * UDF_n_A1		[7:0]
	 */
	reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
	      (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
	      (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
	reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 |
	      (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 |
	      (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8;
	if (mask)
		offset = CORE_CFP_MASK_PORT(1);
	else
@@ -317,8 +319,8 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
	 * Slice ID		[3:2]
	 * Slice valid		[1:0]
	 */
	reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
	      (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
	reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 |
	      (u32)(be32_to_cpu(addrs->src) >> 16) << 8 |
	      SLICE_NUM(slice_num) | SLICE_VALID;
	if (mask)
		offset = CORE_CFP_MASK_PORT(0);
@@ -332,9 +334,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
				     unsigned int queue_num,
				     struct ethtool_rx_flow_spec *fs)
{
	struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
	struct ethtool_rx_flow_spec_input input = {};
	const struct cfp_udf_layout *layout;
	unsigned int slice_num, rule_index;
	struct ethtool_rx_flow_rule *flow;
	struct flow_match_ipv4_addrs ipv4;
	struct flow_match_ports ports;
	struct flow_match_ip ip;
	u8 ip_proto, ip_frag;
	u8 num_udf;
	u32 reg;
@@ -343,13 +349,9 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
	switch (fs->flow_type & ~FLOW_EXT) {
	case TCP_V4_FLOW:
		ip_proto = IPPROTO_TCP;
		v4_spec = &fs->h_u.tcp_ip4_spec;
		v4_m_spec = &fs->m_u.tcp_ip4_spec;
		break;
	case UDP_V4_FLOW:
		ip_proto = IPPROTO_UDP;
		v4_spec = &fs->h_u.udp_ip4_spec;
		v4_m_spec = &fs->m_u.udp_ip4_spec;
		break;
	default:
		return -EINVAL;
@@ -367,11 +369,22 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
	if (rule_index > bcm_sf2_cfp_rule_size(priv))
		return -ENOSPC;

	input.fs = fs;
	flow = ethtool_rx_flow_rule_create(&input);
	if (IS_ERR(flow))
		return PTR_ERR(flow);

	flow_rule_match_ipv4_addrs(flow->rule, &ipv4);
	flow_rule_match_ports(flow->rule, &ports);
	flow_rule_match_ip(flow->rule, &ip);

	layout = &udf_tcpip4_layout;
	/* We only use one UDF slice for now */
	slice_num = bcm_sf2_get_slice_number(layout, 0);
	if (slice_num == UDF_NUM_SLICES)
		return -EINVAL;
	if (slice_num == UDF_NUM_SLICES) {
		ret = -EINVAL;
		goto out_err_flow_rule;
	}

	num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);

@@ -398,7 +411,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
	 * Reserved		[1]
	 * UDF_Valid[8]		[0]
	 */
	core_writel(priv, v4_spec->tos << IPTOS_SHIFT |
	core_writel(priv, ip.key->tos << IPTOS_SHIFT |
		    ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
		    udf_upper_bits(num_udf),
		    CORE_CFP_DATA_PORT(6));
@@ -417,8 +430,8 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
	core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));

	/* Program the match and the mask */
	bcm_sf2_cfp_slice_ipv4(priv, v4_spec, slice_num, false);
	bcm_sf2_cfp_slice_ipv4(priv, v4_m_spec, SLICE_NUM_MASK, true);
	bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, slice_num, false);
	bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, SLICE_NUM_MASK, true);

	/* Insert into TCAM now */
	bcm_sf2_cfp_rule_addr_set(priv, rule_index);
@@ -426,14 +439,14 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
	if (ret) {
		pr_err("TCAM entry at addr %d failed\n", rule_index);
		return ret;
		goto out_err_flow_rule;
	}

	/* Insert into Action and policer RAMs now */
	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num,
				      queue_num, true);
	if (ret)
		return ret;
		goto out_err_flow_rule;

	/* Turn on CFP for this rule now */
	reg = core_readl(priv, CORE_CFP_CTL_REG);
@@ -446,6 +459,10 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
	fs->location = rule_index;

	return 0;

out_err_flow_rule:
	ethtool_rx_flow_rule_destroy(flow);
	return ret;
}

static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
@@ -582,8 +599,12 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
				     struct ethtool_rx_flow_spec *fs)
{
	struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec;
	struct ethtool_rx_flow_spec_input input = {};
	unsigned int slice_num, rule_index[2];
	const struct cfp_udf_layout *layout;
	struct ethtool_rx_flow_rule *flow;
	struct flow_match_ipv6_addrs ipv6;
	struct flow_match_ports ports;
	u8 ip_proto, ip_frag;
	int ret = 0;
	u8 num_udf;
@@ -645,6 +666,15 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
		goto out_err;
	}

	input.fs = fs;
	flow = ethtool_rx_flow_rule_create(&input);
	if (IS_ERR(flow)) {
		ret = PTR_ERR(flow);
		goto out_err;
	}
	flow_rule_match_ipv6_addrs(flow->rule, &ipv6);
	flow_rule_match_ports(flow->rule, &ports);

	/* Apply the UDF layout for this filter */
	bcm_sf2_cfp_udf_set(priv, layout, slice_num);

@@ -688,10 +718,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
	core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));

	/* Slice the IPv6 source address and port */
	bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
				slice_num, false);
	bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
				SLICE_NUM_MASK, true);
	bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
			       ports.key->src, slice_num, false);
	bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
			       ports.mask->src, SLICE_NUM_MASK, true);

	/* Insert into TCAM now because we need to insert a second rule */
	bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
@@ -699,20 +729,20 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
	if (ret) {
		pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
		goto out_err;
		goto out_err_flow_rule;
	}

	/* Insert into Action and policer RAMs now */
	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
				      queue_num, false);
	if (ret)
		goto out_err;
		goto out_err_flow_rule;

	/* Now deal with the second slice to chain this rule */
	slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
	if (slice_num == UDF_NUM_SLICES) {
		ret = -EINVAL;
		goto out_err;
		goto out_err_flow_rule;
	}

	num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
@@ -748,10 +778,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
	/* Mask all */
	core_writel(priv, 0, CORE_CFP_MASK_PORT(5));

	bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6dst, v6_spec->pdst, slice_num,
			       false);
	bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6dst, v6_m_spec->pdst,
			       SLICE_NUM_MASK, true);
	bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
			       ports.key->dst, slice_num, false);
	bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
			       ports.key->dst, SLICE_NUM_MASK, true);

	/* Insert into TCAM now */
	bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
@@ -759,7 +789,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
	if (ret) {
		pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
		goto out_err;
		goto out_err_flow_rule;
	}

	/* Insert into Action and policer RAMs now, set chain ID to
@@ -768,7 +798,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
				      queue_num, true);
	if (ret)
		goto out_err;
		goto out_err_flow_rule;

	/* Turn on CFP for this rule now */
	reg = core_readl(priv, CORE_CFP_CTL_REG);
@@ -784,6 +814,8 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,

	return ret;

out_err_flow_rule:
	ethtool_rx_flow_rule_destroy(flow);
out_err:
	clear_bit(rule_index[1], priv->cfp.used);
	return ret;
+109 −143
Original line number Diff line number Diff line
@@ -61,9 +61,9 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)

static int bnxt_tc_parse_redir(struct bnxt *bp,
			       struct bnxt_tc_actions *actions,
			       const struct tc_action *tc_act)
			       const struct flow_action_entry *act)
{
	struct net_device *dev = tcf_mirred_dev(tc_act);
	struct net_device *dev = act->dev;

	if (!dev) {
		netdev_info(bp->dev, "no dev in mirred action");
@@ -77,16 +77,16 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,

static int bnxt_tc_parse_vlan(struct bnxt *bp,
			      struct bnxt_tc_actions *actions,
			      const struct tc_action *tc_act)
			      const struct flow_action_entry *act)
{
	switch (tcf_vlan_action(tc_act)) {
	case TCA_VLAN_ACT_POP:
	switch (act->id) {
	case FLOW_ACTION_VLAN_POP:
		actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
		break;
	case TCA_VLAN_ACT_PUSH:
	case FLOW_ACTION_VLAN_PUSH:
		actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
		actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
		actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
		actions->push_vlan_tci = htons(act->vlan.vid);
		actions->push_vlan_tpid = act->vlan.proto;
		break;
	default:
		return -EOPNOTSUPP;
@@ -96,10 +96,10 @@ static int bnxt_tc_parse_vlan(struct bnxt *bp,

static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
				    struct bnxt_tc_actions *actions,
				    const struct tc_action *tc_act)
				    const struct flow_action_entry *act)
{
	struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
	struct ip_tunnel_key *tun_key = &tun_info->key;
	const struct ip_tunnel_info *tun_info = act->tunnel;
	const struct ip_tunnel_key *tun_key = &tun_info->key;

	if (ip_tunnel_info_af(tun_info) != AF_INET) {
		netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
@@ -113,51 +113,43 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,

static int bnxt_tc_parse_actions(struct bnxt *bp,
				 struct bnxt_tc_actions *actions,
				 struct tcf_exts *tc_exts)
				 struct flow_action *flow_action)
{
	const struct tc_action *tc_act;
	struct flow_action_entry *act;
	int i, rc;

	if (!tcf_exts_has_actions(tc_exts)) {
	if (!flow_action_has_entries(flow_action)) {
		netdev_info(bp->dev, "no actions");
		return -EINVAL;
	}

	tcf_exts_for_each_action(i, tc_act, tc_exts) {
		/* Drop action */
		if (is_tcf_gact_shot(tc_act)) {
	flow_action_for_each(i, act, flow_action) {
		switch (act->id) {
		case FLOW_ACTION_DROP:
			actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
			return 0; /* don't bother with other actions */
		}

		/* Redirect action */
		if (is_tcf_mirred_egress_redirect(tc_act)) {
			rc = bnxt_tc_parse_redir(bp, actions, tc_act);
		case FLOW_ACTION_REDIRECT:
			rc = bnxt_tc_parse_redir(bp, actions, act);
			if (rc)
				return rc;
			continue;
		}

		/* Push/pop VLAN */
		if (is_tcf_vlan(tc_act)) {
			rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
			break;
		case FLOW_ACTION_VLAN_POP:
		case FLOW_ACTION_VLAN_PUSH:
		case FLOW_ACTION_VLAN_MANGLE:
			rc = bnxt_tc_parse_vlan(bp, actions, act);
			if (rc)
				return rc;
			continue;
		}

		/* Tunnel encap */
		if (is_tcf_tunnel_set(tc_act)) {
			rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
			break;
		case FLOW_ACTION_TUNNEL_ENCAP:
			rc = bnxt_tc_parse_tunnel_set(bp, actions, act);
			if (rc)
				return rc;
			continue;
		}

		/* Tunnel decap */
		if (is_tcf_tunnel_release(tc_act)) {
			break;
		case FLOW_ACTION_TUNNEL_DECAP:
			actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
			continue;
			break;
		default:
			break;
		}
	}

@@ -177,18 +169,12 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
	return 0;
}

#define GET_KEY(flow_cmd, key_type)					\
		skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
					  (flow_cmd)->key)
#define GET_MASK(flow_cmd, key_type)					\
		skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
					  (flow_cmd)->mask)

static int bnxt_tc_parse_flow(struct bnxt *bp,
			      struct tc_cls_flower_offload *tc_flow_cmd,
			      struct bnxt_tc_flow *flow)
{
	struct flow_dissector *dissector = tc_flow_cmd->dissector;
	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(tc_flow_cmd);
	struct flow_dissector *dissector = rule->match.dissector;

	/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
	if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
@@ -198,143 +184,123 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
		return -EOPNOTSUPP;
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_dissector_key_basic *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
		struct flow_dissector_key_basic *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_match_basic match;

		flow->l2_key.ether_type = key->n_proto;
		flow->l2_mask.ether_type = mask->n_proto;
		flow_rule_match_basic(rule, &match);
		flow->l2_key.ether_type = match.key->n_proto;
		flow->l2_mask.ether_type = match.mask->n_proto;

		if (key->n_proto == htons(ETH_P_IP) ||
		    key->n_proto == htons(ETH_P_IPV6)) {
			flow->l4_key.ip_proto = key->ip_proto;
			flow->l4_mask.ip_proto = mask->ip_proto;
		if (match.key->n_proto == htons(ETH_P_IP) ||
		    match.key->n_proto == htons(ETH_P_IPV6)) {
			flow->l4_key.ip_proto = match.key->ip_proto;
			flow->l4_mask.ip_proto = match.mask->ip_proto;
		}
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
		struct flow_dissector_key_eth_addrs *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
		struct flow_dissector_key_eth_addrs *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
		struct flow_match_eth_addrs match;

		flow_rule_match_eth_addrs(rule, &match);
		flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
		ether_addr_copy(flow->l2_key.dmac, key->dst);
		ether_addr_copy(flow->l2_mask.dmac, mask->dst);
		ether_addr_copy(flow->l2_key.smac, key->src);
		ether_addr_copy(flow->l2_mask.smac, mask->src);
		ether_addr_copy(flow->l2_key.dmac, match.key->dst);
		ether_addr_copy(flow->l2_mask.dmac, match.mask->dst);
		ether_addr_copy(flow->l2_key.smac, match.key->src);
		ether_addr_copy(flow->l2_mask.smac, match.mask->src);
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_dissector_key_vlan *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
		struct flow_dissector_key_vlan *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_match_vlan match;

		flow_rule_match_vlan(rule, &match);
		flow->l2_key.inner_vlan_tci =
		   cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
			cpu_to_be16(VLAN_TCI(match.key->vlan_id,
					     match.key->vlan_priority));
		flow->l2_mask.inner_vlan_tci =
		   cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
			cpu_to_be16((VLAN_TCI(match.mask->vlan_id,
					      match.mask->vlan_priority)));
		flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
		flow->l2_mask.inner_vlan_tpid = htons(0xffff);
		flow->l2_key.num_vlans = 1;
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
		struct flow_dissector_key_ipv4_addrs *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
		struct flow_dissector_key_ipv4_addrs *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
		struct flow_match_ipv4_addrs match;

		flow_rule_match_ipv4_addrs(rule, &match);
		flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
		flow->l3_key.ipv4.daddr.s_addr = key->dst;
		flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
		flow->l3_key.ipv4.saddr.s_addr = key->src;
		flow->l3_mask.ipv4.saddr.s_addr = mask->src;
	} else if (dissector_uses_key(dissector,
				      FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
		struct flow_dissector_key_ipv6_addrs *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
		struct flow_dissector_key_ipv6_addrs *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);

		flow->l3_key.ipv4.daddr.s_addr = match.key->dst;
		flow->l3_mask.ipv4.daddr.s_addr = match.mask->dst;
		flow->l3_key.ipv4.saddr.s_addr = match.key->src;
		flow->l3_mask.ipv4.saddr.s_addr = match.mask->src;
	} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
		struct flow_match_ipv6_addrs match;

		flow_rule_match_ipv6_addrs(rule, &match);
		flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
		flow->l3_key.ipv6.daddr = key->dst;
		flow->l3_mask.ipv6.daddr = mask->dst;
		flow->l3_key.ipv6.saddr = key->src;
		flow->l3_mask.ipv6.saddr = mask->src;
		flow->l3_key.ipv6.daddr = match.key->dst;
		flow->l3_mask.ipv6.daddr = match.mask->dst;
		flow->l3_key.ipv6.saddr = match.key->src;
		flow->l3_mask.ipv6.saddr = match.mask->src;
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
		struct flow_dissector_key_ports *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
		struct flow_dissector_key_ports *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
		struct flow_match_ports match;

		flow_rule_match_ports(rule, &match);
		flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
		flow->l4_key.ports.dport = key->dst;
		flow->l4_mask.ports.dport = mask->dst;
		flow->l4_key.ports.sport = key->src;
		flow->l4_mask.ports.sport = mask->src;
		flow->l4_key.ports.dport = match.key->dst;
		flow->l4_mask.ports.dport = match.mask->dst;
		flow->l4_key.ports.sport = match.key->src;
		flow->l4_mask.ports.sport = match.mask->src;
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
		struct flow_dissector_key_icmp *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
		struct flow_dissector_key_icmp *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
		struct flow_match_icmp match;

		flow_rule_match_icmp(rule, &match);
		flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
		flow->l4_key.icmp.type = key->type;
		flow->l4_key.icmp.code = key->code;
		flow->l4_mask.icmp.type = mask->type;
		flow->l4_mask.icmp.code = mask->code;
		flow->l4_key.icmp.type = match.key->type;
		flow->l4_key.icmp.code = match.key->code;
		flow->l4_mask.icmp.type = match.mask->type;
		flow->l4_mask.icmp.code = match.mask->code;
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
		struct flow_dissector_key_ipv4_addrs *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
		struct flow_dissector_key_ipv4_addrs *mask =
				GET_MASK(tc_flow_cmd,
					 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
		struct flow_match_ipv4_addrs match;

		flow_rule_match_enc_ipv4_addrs(rule, &match);
		flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
		flow->tun_key.u.ipv4.dst = key->dst;
		flow->tun_mask.u.ipv4.dst = mask->dst;
		flow->tun_key.u.ipv4.src = key->src;
		flow->tun_mask.u.ipv4.src = mask->src;
	} else if (dissector_uses_key(dissector,
		flow->tun_key.u.ipv4.dst = match.key->dst;
		flow->tun_mask.u.ipv4.dst = match.mask->dst;
		flow->tun_key.u.ipv4.src = match.key->src;
		flow->tun_mask.u.ipv4.src = match.mask->src;
	} else if (flow_rule_match_key(rule,
				      FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
		return -EOPNOTSUPP;
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
		struct flow_dissector_key_keyid *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
		struct flow_dissector_key_keyid *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
		struct flow_match_enc_keyid match;

		flow_rule_match_enc_keyid(rule, &match);
		flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
		flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
		flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
		flow->tun_key.tun_id = key32_to_tunnel_id(match.key->keyid);
		flow->tun_mask.tun_id = key32_to_tunnel_id(match.mask->keyid);
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
		struct flow_dissector_key_ports *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
		struct flow_dissector_key_ports *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
		struct flow_match_ports match;

		flow_rule_match_enc_ports(rule, &match);
		flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
		flow->tun_key.tp_dst = key->dst;
		flow->tun_mask.tp_dst = mask->dst;
		flow->tun_key.tp_src = key->src;
		flow->tun_mask.tp_src = mask->src;
		flow->tun_key.tp_dst = match.key->dst;
		flow->tun_mask.tp_dst = match.mask->dst;
		flow->tun_key.tp_src = match.key->src;
		flow->tun_mask.tp_src = match.mask->src;
	}

	return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
	return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action);
}

static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
@@ -1422,7 +1388,7 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
	lastused = flow->lastused;
	spin_unlock(&flow->stats_lock);

	tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
	flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets,
			  lastused);
	return 0;
}
+209 −241

File changed.

Preview size limit exceeded, changes collapsed.

+68 −110

File changed.

Preview size limit exceeded, changes collapsed.

+80 −115

File changed.

Preview size limit exceeded, changes collapsed.

Loading