Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0075fa0f authored by Harshitha Ramamurthy's avatar Harshitha Ramamurthy Committed by Jeff Kirsher
Browse files

i40evf: Add support to apply cloud filters



This patch enables a tc filter to be applied as a cloud
filter for the VF. This patch adds functions which parse the
tc filter, extract the necessary fields needed to configure the
filter and package them in a virtchnl message to be sent to the
PF to apply them.

Signed-off-by: default avatarHarshitha Ramamurthy <harshitha.ramamurthy@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 3872c8d4
Loading
Loading
Loading
Loading
+48 −0
Original line number Original line Diff line number Diff line
@@ -54,6 +54,8 @@
#include <net/ip6_checksum.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
#include <net/pkt_cls.h>
#include <net/udp.h>
#include <net/udp.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>


#include "i40e_type.h"
#include "i40e_type.h"
#include <linux/avf/virtchnl.h>
#include <linux/avf/virtchnl.h>
@@ -184,6 +186,14 @@ struct i40evf_channel_config {
	u8 total_qps;
	u8 total_qps;
};
};


/* State of cloud filter */
enum i40evf_cloud_filter_state_t {
	__I40EVF_CF_INVALID,	 /* cloud filter not added */
	__I40EVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */
	__I40EVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */
	__I40EVF_CF_ACTIVE,	 /* cloud filter is active */
};

/* Driver state. The order of these is important! */
/* Driver state. The order of these is important! */
enum i40evf_state_t {
enum i40evf_state_t {
	__I40EVF_STARTUP,		/* driver loaded, probe complete */
	__I40EVF_STARTUP,		/* driver loaded, probe complete */
@@ -205,6 +215,36 @@ enum i40evf_critical_section_t {
	__I40EVF_IN_REMOVE_TASK,	/* device being removed */
	__I40EVF_IN_REMOVE_TASK,	/* device being removed */
};
};


#define I40EVF_CLOUD_FIELD_OMAC		0x01
#define I40EVF_CLOUD_FIELD_IMAC		0x02
#define I40EVF_CLOUD_FIELD_IVLAN	0x04
#define I40EVF_CLOUD_FIELD_TEN_ID	0x08
#define I40EVF_CLOUD_FIELD_IIP		0x10

#define I40EVF_CF_FLAGS_OMAC	I40EVF_CLOUD_FIELD_OMAC
#define I40EVF_CF_FLAGS_IMAC	I40EVF_CLOUD_FIELD_IMAC
#define I40EVF_CF_FLAGS_IMAC_IVLAN	(I40EVF_CLOUD_FIELD_IMAC |\
					 I40EVF_CLOUD_FIELD_IVLAN)
#define I40EVF_CF_FLAGS_IMAC_TEN_ID	(I40EVF_CLOUD_FIELD_IMAC |\
					 I40EVF_CLOUD_FIELD_TEN_ID)
#define I40EVF_CF_FLAGS_OMAC_TEN_ID_IMAC	(I40EVF_CLOUD_FIELD_OMAC |\
						 I40EVF_CLOUD_FIELD_IMAC |\
						 I40EVF_CLOUD_FIELD_TEN_ID)
#define I40EVF_CF_FLAGS_IMAC_IVLAN_TEN_ID	(I40EVF_CLOUD_FIELD_IMAC |\
						 I40EVF_CLOUD_FIELD_IVLAN |\
						 I40EVF_CLOUD_FIELD_TEN_ID)
#define I40EVF_CF_FLAGS_IIP	I40E_CLOUD_FIELD_IIP

/* bookkeeping of cloud filters */
struct i40evf_cloud_filter {
	enum i40evf_cloud_filter_state_t state;
	struct list_head list;
	struct virtchnl_filter f;
	unsigned long cookie;
	bool del;		/* filter needs to be deleted */
	bool add;		/* filter needs to be added */
};

/* board specific private data structure */
/* board specific private data structure */
struct i40evf_adapter {
struct i40evf_adapter {
	struct timer_list watchdog_timer;
	struct timer_list watchdog_timer;
@@ -287,6 +327,8 @@ struct i40evf_adapter {
#define I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING	BIT(20)
#define I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING	BIT(20)
#define I40EVF_FLAG_AQ_ENABLE_CHANNELS		BIT(21)
#define I40EVF_FLAG_AQ_ENABLE_CHANNELS		BIT(21)
#define I40EVF_FLAG_AQ_DISABLE_CHANNELS		BIT(22)
#define I40EVF_FLAG_AQ_DISABLE_CHANNELS		BIT(22)
#define I40EVF_FLAG_AQ_ADD_CLOUD_FILTER		BIT(23)
#define I40EVF_FLAG_AQ_DEL_CLOUD_FILTER		BIT(24)


	/* OS defined structs */
	/* OS defined structs */
	struct net_device *netdev;
	struct net_device *netdev;
@@ -335,6 +377,10 @@ struct i40evf_adapter {
	/* ADQ related members */
	/* ADQ related members */
	struct i40evf_channel_config ch_config;
	struct i40evf_channel_config ch_config;
	u8 num_tc;
	u8 num_tc;
	struct list_head cloud_filter_list;
	/* lock to protest access to the cloud filter list */
	spinlock_t cloud_filter_list_lock;
	u16 num_cloud_filters;
};
};




@@ -403,4 +449,6 @@ void i40evf_notify_client_open(struct i40e_vsi *vsi);
void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset);
void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset);
void i40evf_enable_channels(struct i40evf_adapter *adapter);
void i40evf_enable_channels(struct i40evf_adapter *adapter);
void i40evf_disable_channels(struct i40evf_adapter *adapter);
void i40evf_disable_channels(struct i40evf_adapter *adapter);
void i40evf_add_cloud_filter(struct i40evf_adapter *adapter);
void i40evf_del_cloud_filter(struct i40evf_adapter *adapter);
#endif /* _I40EVF_H_ */
#endif /* _I40EVF_H_ */
+575 −5
Original line number Original line Diff line number Diff line
@@ -1041,6 +1041,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
	struct net_device *netdev = adapter->netdev;
	struct net_device *netdev = adapter->netdev;
	struct i40evf_vlan_filter *vlf;
	struct i40evf_vlan_filter *vlf;
	struct i40evf_mac_filter *f;
	struct i40evf_mac_filter *f;
	struct i40evf_cloud_filter *cf;


	if (adapter->state <= __I40EVF_DOWN_PENDING)
	if (adapter->state <= __I40EVF_DOWN_PENDING)
		return;
		return;
@@ -1064,11 +1065,18 @@ void i40evf_down(struct i40evf_adapter *adapter)


	/* remove all VLAN filters */
	/* remove all VLAN filters */
	list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
	list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
		f->remove = true;
		vlf->remove = true;
	}
	}


	spin_unlock_bh(&adapter->mac_vlan_list_lock);
	spin_unlock_bh(&adapter->mac_vlan_list_lock);


	/* remove all cloud filters */
	spin_lock_bh(&adapter->cloud_filter_list_lock);
	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
		cf->del = true;
	}
	spin_unlock_bh(&adapter->cloud_filter_list_lock);

	if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
	if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
	    adapter->state != __I40EVF_RESETTING) {
	    adapter->state != __I40EVF_RESETTING) {
		/* cancel any current operation */
		/* cancel any current operation */
@@ -1079,6 +1087,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
		 */
		 */
		adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
		adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
		adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
		adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
	}
	}


@@ -1756,6 +1765,16 @@ static void i40evf_watchdog_task(struct work_struct *work)
		goto watchdog_done;
		goto watchdog_done;
	}
	}


	if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) {
		i40evf_add_cloud_filter(adapter);
		goto watchdog_done;
	}

	if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) {
		i40evf_del_cloud_filter(adapter);
		goto watchdog_done;
	}

	schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
	schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));


	if (adapter->state == __I40EVF_RUNNING)
	if (adapter->state == __I40EVF_RUNNING)
@@ -1779,6 +1798,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
{
{
	struct i40evf_mac_filter *f, *ftmp;
	struct i40evf_mac_filter *f, *ftmp;
	struct i40evf_vlan_filter *fv, *fvtmp;
	struct i40evf_vlan_filter *fv, *fvtmp;
	struct i40evf_cloud_filter *cf, *cftmp;


	adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
	adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;


@@ -1800,7 +1820,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)


	spin_lock_bh(&adapter->mac_vlan_list_lock);
	spin_lock_bh(&adapter->mac_vlan_list_lock);


	/* Delete all of the filters, both MAC and VLAN. */
	/* Delete all of the filters */
	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
		list_del(&f->list);
		list_del(&f->list);
		kfree(f);
		kfree(f);
@@ -1813,6 +1833,14 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)


	spin_unlock_bh(&adapter->mac_vlan_list_lock);
	spin_unlock_bh(&adapter->mac_vlan_list_lock);


	spin_lock_bh(&adapter->cloud_filter_list_lock);
	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
		list_del(&cf->list);
		kfree(cf);
		adapter->num_cloud_filters--;
	}
	spin_unlock_bh(&adapter->cloud_filter_list_lock);

	i40evf_free_misc_irq(adapter);
	i40evf_free_misc_irq(adapter);
	i40evf_reset_interrupt_capability(adapter);
	i40evf_reset_interrupt_capability(adapter);
	i40evf_free_queues(adapter);
	i40evf_free_queues(adapter);
@@ -1842,9 +1870,11 @@ static void i40evf_reset_task(struct work_struct *work)
	struct i40evf_adapter *adapter = container_of(work,
	struct i40evf_adapter *adapter = container_of(work,
						      struct i40evf_adapter,
						      struct i40evf_adapter,
						      reset_task);
						      reset_task);
	struct virtchnl_vf_resource *vfres = adapter->vf_res;
	struct net_device *netdev = adapter->netdev;
	struct net_device *netdev = adapter->netdev;
	struct i40e_hw *hw = &adapter->hw;
	struct i40e_hw *hw = &adapter->hw;
	struct i40evf_vlan_filter *vlf;
	struct i40evf_vlan_filter *vlf;
	struct i40evf_cloud_filter *cf;
	struct i40evf_mac_filter *f;
	struct i40evf_mac_filter *f;
	u32 reg_val;
	u32 reg_val;
	int i = 0, err;
	int i = 0, err;
@@ -1969,8 +1999,19 @@ static void i40evf_reset_task(struct work_struct *work)


	spin_unlock_bh(&adapter->mac_vlan_list_lock);
	spin_unlock_bh(&adapter->mac_vlan_list_lock);


	/* check if TCs are running and re-add all cloud filters */
	spin_lock_bh(&adapter->cloud_filter_list_lock);
	if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
	    adapter->num_tc) {
		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
			cf->add = true;
		}
	}
	spin_unlock_bh(&adapter->cloud_filter_list_lock);

	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
	i40evf_misc_irq_enable(adapter);
	i40evf_misc_irq_enable(adapter);


	mod_timer(&adapter->watchdog_timer, jiffies + 2);
	mod_timer(&adapter->watchdog_timer, jiffies + 2);
@@ -2320,6 +2361,24 @@ static int i40evf_validate_ch_config(struct i40evf_adapter *adapter,
	return ret;
	return ret;
}
}


/**
 * i40evf_del_all_cloud_filters - delete all cloud filters
 * on the traffic classes
 **/
static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter)
{
	struct i40evf_cloud_filter *cf, *cftmp;

	spin_lock_bh(&adapter->cloud_filter_list_lock);
	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
				 list) {
		list_del(&cf->list);
		kfree(cf);
		adapter->num_cloud_filters--;
	}
	spin_unlock_bh(&adapter->cloud_filter_list_lock);
}

/**
/**
 * __i40evf_setup_tc - configure multiple traffic classes
 * __i40evf_setup_tc - configure multiple traffic classes
 * @netdev: network interface device structure
 * @netdev: network interface device structure
@@ -2353,6 +2412,7 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
			adapter->num_tc = 0;
			adapter->num_tc = 0;
			netif_tx_stop_all_queues(netdev);
			netif_tx_stop_all_queues(netdev);
			netif_tx_disable(netdev);
			netif_tx_disable(netdev);
			i40evf_del_all_cloud_filters(adapter);
			adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS;
			adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS;
			goto exit;
			goto exit;
		} else {
		} else {
@@ -2417,6 +2477,499 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
	return ret;
	return ret;
}
}


/**
 * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel
 * @adapter: board private structure
 * @cls_flower: pointer to struct tc_cls_flower_offload
 * @filter: pointer to cloud filter structure
 */
static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
				   struct tc_cls_flower_offload *f,
				   struct i40evf_cloud_filter *filter)
{
	u16 n_proto_mask = 0;
	u16 n_proto_key = 0;
	u8 field_flags = 0;
	u16 addr_type = 0;
	u16 n_proto = 0;
	int i = 0;

	if (f->dissector->used_keys &
	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
		dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
			f->dissector->used_keys);
		return -EOPNOTSUPP;
	}

	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
		struct flow_dissector_key_keyid *mask =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_ENC_KEYID,
						  f->mask);

		if (mask->keyid != 0)
			field_flags |= I40EVF_CLOUD_FIELD_TEN_ID;
	}

	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_dissector_key_basic *key =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_BASIC,
						  f->key);

		struct flow_dissector_key_basic *mask =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_BASIC,
						  f->mask);
		n_proto_key = ntohs(key->n_proto);
		n_proto_mask = ntohs(mask->n_proto);

		if (n_proto_key == ETH_P_ALL) {
			n_proto_key = 0;
			n_proto_mask = 0;
		}
		n_proto = n_proto_key & n_proto_mask;
		if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
			return -EINVAL;
		if (n_proto == ETH_P_IPV6) {
			/* specify flow type as TCP IPv6 */
			filter->f.flow_type = VIRTCHNL_TCP_V6_FLOW;
		}

		if (key->ip_proto != IPPROTO_TCP) {
			dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
			return -EINVAL;
		}
	}

	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
		struct flow_dissector_key_eth_addrs *key =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
						  f->key);

		struct flow_dissector_key_eth_addrs *mask =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
						  f->mask);
		/* use is_broadcast and is_zero to check for all 0xf or 0 */
		if (!is_zero_ether_addr(mask->dst)) {
			if (is_broadcast_ether_addr(mask->dst)) {
				field_flags |= I40EVF_CLOUD_FIELD_OMAC;
			} else {
				dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
					mask->dst);
				return I40E_ERR_CONFIG;
			}
		}

		if (!is_zero_ether_addr(mask->src)) {
			if (is_broadcast_ether_addr(mask->src)) {
				field_flags |= I40EVF_CLOUD_FIELD_IMAC;
			} else {
				dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
					mask->src);
				return I40E_ERR_CONFIG;
			}
		}

		if (!is_zero_ether_addr(key->dst))
			if (is_valid_ether_addr(key->dst) ||
			    is_multicast_ether_addr(key->dst)) {
				/* set the mask if a valid dst_mac address */
				for (i = 0; i < ETH_ALEN; i++)
					filter->f.mask.tcp_spec.dst_mac[i] |=
									0xff;
				ether_addr_copy(filter->f.data.tcp_spec.dst_mac,
						key->dst);
			}

		if (!is_zero_ether_addr(key->src))
			if (is_valid_ether_addr(key->src) ||
			    is_multicast_ether_addr(key->src)) {
				/* set the mask if a valid dst_mac address */
				for (i = 0; i < ETH_ALEN; i++)
					filter->f.mask.tcp_spec.src_mac[i] |=
									0xff;
				ether_addr_copy(filter->f.data.tcp_spec.src_mac,
						key->src);
		}
	}

	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_dissector_key_vlan *key =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_VLAN,
						  f->key);
		struct flow_dissector_key_vlan *mask =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_VLAN,
						  f->mask);

		if (mask->vlan_id) {
			if (mask->vlan_id == VLAN_VID_MASK) {
				field_flags |= I40EVF_CLOUD_FIELD_IVLAN;
			} else {
				dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
					mask->vlan_id);
				return I40E_ERR_CONFIG;
			}
		}
		filter->f.mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
		filter->f.data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id);
	}

	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
		struct flow_dissector_key_control *key =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_CONTROL,
						  f->key);

		addr_type = key->addr_type;
	}

	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
		struct flow_dissector_key_ipv4_addrs *key =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
						  f->key);
		struct flow_dissector_key_ipv4_addrs *mask =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
						  f->mask);

		if (mask->dst) {
			if (mask->dst == cpu_to_be32(0xffffffff)) {
				field_flags |= I40EVF_CLOUD_FIELD_IIP;
			} else {
				dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
					be32_to_cpu(mask->dst));
				return I40E_ERR_CONFIG;
			}
		}

		if (mask->src) {
			if (mask->src == cpu_to_be32(0xffffffff)) {
				field_flags |= I40EVF_CLOUD_FIELD_IIP;
			} else {
				dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
					be32_to_cpu(mask->dst));
				return I40E_ERR_CONFIG;
			}
		}

		if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) {
			dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
			return I40E_ERR_CONFIG;
		}
		if (key->dst) {
			filter->f.mask.tcp_spec.dst_ip[0] |=
							cpu_to_be32(0xffffffff);
			filter->f.data.tcp_spec.dst_ip[0] = key->dst;
		}
		if (key->src) {
			filter->f.mask.tcp_spec.src_ip[0] |=
							cpu_to_be32(0xffffffff);
			filter->f.data.tcp_spec.src_ip[0] = key->src;
		}
	}

	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
		struct flow_dissector_key_ipv6_addrs *key =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
						  f->key);
		struct flow_dissector_key_ipv6_addrs *mask =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
						  f->mask);

		/* validate mask, make sure it is not IPV6_ADDR_ANY */
		if (ipv6_addr_any(&mask->dst)) {
			dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
				IPV6_ADDR_ANY);
			return I40E_ERR_CONFIG;
		}

		/* src and dest IPv6 address should not be LOOPBACK
		 * (0:0:0:0:0:0:0:1) which can be represented as ::1
		 */
		if (ipv6_addr_loopback(&key->dst) ||
		    ipv6_addr_loopback(&key->src)) {
			dev_err(&adapter->pdev->dev,
				"ipv6 addr should not be loopback\n");
			return I40E_ERR_CONFIG;
		}
		if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
			field_flags |= I40EVF_CLOUD_FIELD_IIP;

		if (key->dst.s6_addr) {
			for (i = 0; i < 4; i++)
				filter->f.mask.tcp_spec.dst_ip[i] |=
							cpu_to_be32(0xffffffff);
			memcpy(&filter->f.data.tcp_spec.dst_ip,
			       &key->dst.s6_addr32,
			       sizeof(filter->f.data.tcp_spec.dst_ip));
		}
		if (key->src.s6_addr) {
			for (i = 0; i < 4; i++)
				filter->f.mask.tcp_spec.src_ip[i] |=
							cpu_to_be32(0xffffffff);
			memcpy(&filter->f.data.tcp_spec.src_ip,
			       &key->src.s6_addr32,
			       sizeof(filter->f.data.tcp_spec.src_ip));
		}
	}
	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
		struct flow_dissector_key_ports *key =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_PORTS,
						  f->key);
		struct flow_dissector_key_ports *mask =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_PORTS,
						  f->mask);

		if (mask->src) {
			if (mask->src == cpu_to_be16(0xffff)) {
				field_flags |= I40EVF_CLOUD_FIELD_IIP;
			} else {
				dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
					be16_to_cpu(mask->src));
				return I40E_ERR_CONFIG;
			}
		}

		if (mask->dst) {
			if (mask->dst == cpu_to_be16(0xffff)) {
				field_flags |= I40EVF_CLOUD_FIELD_IIP;
			} else {
				dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
					be16_to_cpu(mask->dst));
				return I40E_ERR_CONFIG;
			}
		}
		if (key->dst) {
			filter->f.mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
			filter->f.data.tcp_spec.dst_port = key->dst;
		}

		if (key->src) {
			filter->f.mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
			filter->f.data.tcp_spec.src_port = key->dst;
		}
	}
	filter->f.field_flags = field_flags;

	return 0;
}

/**
 * i40evf_handle_tclass - Forward to a traffic class on the device
 * @adapter: board private structure
 * @tc: traffic class index on the device
 * @filter: pointer to cloud filter structure
 */
static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc,
				struct i40evf_cloud_filter *filter)
{
	if (tc == 0)
		return 0;
	if (tc < adapter->num_tc) {
		if (!filter->f.data.tcp_spec.dst_port) {
			dev_err(&adapter->pdev->dev,
				"Specify destination port to redirect to traffic class other than TC0\n");
			return -EINVAL;
		}
	}
	/* redirect to a traffic class on the same device */
	filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
	filter->f.action_meta = tc;
	return 0;
}

/**
 * i40evf_configure_clsflower - Add tc flower filters
 * @adapter: board private structure
 * @cls_flower: Pointer to struct tc_cls_flower_offload
 */
static int i40evf_configure_clsflower(struct i40evf_adapter *adapter,
				      struct tc_cls_flower_offload *cls_flower)
{
	int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
	struct i40evf_cloud_filter *filter = NULL;
	int err = 0, count = 50;

	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
				&adapter->crit_section)) {
		udelay(1);
		if (--count == 0)
			return -EINVAL;
	}

	if (tc < 0) {
		dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
		return -EINVAL;
	}

	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
	if (!filter) {
		err = -ENOMEM;
		goto clearout;
	}
	filter->cookie = cls_flower->cookie;

	/* set the mask to all zeroes to begin with */
	memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
	/* start out with flow type and eth type IPv4 to begin with */
	filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
	err = i40evf_parse_cls_flower(adapter, cls_flower, filter);
	if (err < 0)
		goto err;

	err = i40evf_handle_tclass(adapter, tc, filter);
	if (err < 0)
		goto err;

	/* add filter to the list */
	spin_lock_bh(&adapter->cloud_filter_list_lock);
	list_add_tail(&filter->list, &adapter->cloud_filter_list);
	adapter->num_cloud_filters++;
	filter->add = true;
	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
	spin_unlock_bh(&adapter->cloud_filter_list_lock);
err:
	if (err)
		kfree(filter);
clearout:
	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
	return err;
}

/* i40evf_find_cf - Find the cloud filter in the list
 * @adapter: Board private structure
 * @cookie: filter specific cookie
 *
 * Returns ptr to the filter object or NULL. Must be called while holding the
 * cloud_filter_list_lock.
 */
static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter,
						  unsigned long *cookie)
{
	struct i40evf_cloud_filter *filter = NULL;

	if (!cookie)
		return NULL;

	list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
		if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
			return filter;
	}
	return NULL;
}

/**
 * i40evf_delete_clsflower - Remove tc flower filters
 * @adapter: board private structure
 * @cls_flower: Pointer to struct tc_cls_flower_offload
 */
static int i40evf_delete_clsflower(struct i40evf_adapter *adapter,
				   struct tc_cls_flower_offload *cls_flower)
{
	struct i40evf_cloud_filter *filter = NULL;
	int err = 0;

	spin_lock_bh(&adapter->cloud_filter_list_lock);
	filter = i40evf_find_cf(adapter, &cls_flower->cookie);
	if (filter) {
		filter->del = true;
		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
	} else {
		err = -EINVAL;
	}
	spin_unlock_bh(&adapter->cloud_filter_list_lock);

	return err;
}

/**
 * i40evf_setup_tc_cls_flower - flower classifier offloads
 * @netdev: net device to configure
 * @type_data: offload data
 */
static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter,
				      struct tc_cls_flower_offload *cls_flower)
{
	if (cls_flower->common.chain_index)
		return -EOPNOTSUPP;

	switch (cls_flower->command) {
	case TC_CLSFLOWER_REPLACE:
		return i40evf_configure_clsflower(adapter, cls_flower);
	case TC_CLSFLOWER_DESTROY:
		return i40evf_delete_clsflower(adapter, cls_flower);
	case TC_CLSFLOWER_STATS:
		return -EOPNOTSUPP;
	default:
		return -EINVAL;
	}
}

/**
 * i40evf_setup_tc_block_cb - block callback for tc
 * @type: type of offload
 * @type_data: offload data
 * @cb_priv:
 *
 * This function is the block callback for traffic classes
 **/
static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
				    void *cb_priv)
{
	switch (type) {
	case TC_SETUP_CLSFLOWER:
		return i40evf_setup_tc_cls_flower(cb_priv, type_data);
	default:
		return -EOPNOTSUPP;
	}
}

/**
 * i40evf_setup_tc_block - register callbacks for tc
 * @netdev: network interface device structure
 * @f: tc offload data
 *
 * This function registers block callbacks for tc
 * offloads
 **/
static int i40evf_setup_tc_block(struct net_device *dev,
				 struct tc_block_offload *f)
{
	struct i40evf_adapter *adapter = netdev_priv(dev);

	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
		return -EOPNOTSUPP;

	switch (f->command) {
	case TC_BLOCK_BIND:
		return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb,
					     adapter, adapter);
	case TC_BLOCK_UNBIND:
		tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb,
					adapter);
		return 0;
	default:
		return -EOPNOTSUPP;
	}
}

/**
/**
 * i40evf_setup_tc - configure multiple traffic classes
 * i40evf_setup_tc - configure multiple traffic classes
 * @netdev: network interface device structure
 * @netdev: network interface device structure
@@ -2431,10 +2984,14 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
			   void *type_data)
			   void *type_data)
{
{
	if (type != TC_SETUP_QDISC_MQPRIO)
	switch (type) {
		return -EOPNOTSUPP;
	case TC_SETUP_QDISC_MQPRIO:

		return __i40evf_setup_tc(netdev, type_data);
		return __i40evf_setup_tc(netdev, type_data);
	case TC_SETUP_BLOCK:
		return i40evf_setup_tc_block(netdev, type_data);
	default:
		return -EOPNOTSUPP;
	}
}
}


/**
/**
@@ -2819,6 +3376,9 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
		hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
		hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
				NETIF_F_HW_VLAN_CTAG_RX);
				NETIF_F_HW_VLAN_CTAG_RX);
	/* Enable cloud filter if ADQ is supported */
	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
		hw_features |= NETIF_F_HW_TC;


	netdev->hw_features |= hw_features;
	netdev->hw_features |= hw_features;


@@ -3186,9 +3746,11 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
	mutex_init(&hw->aq.arq_mutex);
	mutex_init(&hw->aq.arq_mutex);


	spin_lock_init(&adapter->mac_vlan_list_lock);
	spin_lock_init(&adapter->mac_vlan_list_lock);
	spin_lock_init(&adapter->cloud_filter_list_lock);


	INIT_LIST_HEAD(&adapter->mac_filter_list);
	INIT_LIST_HEAD(&adapter->mac_filter_list);
	INIT_LIST_HEAD(&adapter->vlan_filter_list);
	INIT_LIST_HEAD(&adapter->vlan_filter_list);
	INIT_LIST_HEAD(&adapter->cloud_filter_list);


	INIT_WORK(&adapter->reset_task, i40evf_reset_task);
	INIT_WORK(&adapter->reset_task, i40evf_reset_task);
	INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
	INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
@@ -3315,6 +3877,7 @@ static void i40evf_remove(struct pci_dev *pdev)
	struct i40evf_adapter *adapter = netdev_priv(netdev);
	struct i40evf_adapter *adapter = netdev_priv(netdev);
	struct i40evf_vlan_filter *vlf, *vlftmp;
	struct i40evf_vlan_filter *vlf, *vlftmp;
	struct i40evf_mac_filter *f, *ftmp;
	struct i40evf_mac_filter *f, *ftmp;
	struct i40evf_cloud_filter *cf, *cftmp;
	struct i40e_hw *hw = &adapter->hw;
	struct i40e_hw *hw = &adapter->hw;
	int err;
	int err;
	/* Indicate we are in remove and not to run reset_task */
	/* Indicate we are in remove and not to run reset_task */
@@ -3385,6 +3948,13 @@ static void i40evf_remove(struct pci_dev *pdev)


	spin_unlock_bh(&adapter->mac_vlan_list_lock);
	spin_unlock_bh(&adapter->mac_vlan_list_lock);


	spin_lock_bh(&adapter->cloud_filter_list_lock);
	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
		list_del(&cf->list);
		kfree(cf);
	}
	spin_unlock_bh(&adapter->cloud_filter_list_lock);

	free_netdev(netdev);
	free_netdev(netdev);


	pci_disable_pcie_error_reporting(pdev);
	pci_disable_pcie_error_reporting(pdev);