Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 45cc3a0c authored by David S. Miller's avatar David S. Miller
Browse files


Ben Hutchings says:

====================
More refactoring and cleanup, particularly around filter management.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 35fdb94b b766630b
Loading
Loading
Loading
Loading
+0 −1
Original line number Original line Diff line number Diff line
sfc-y			+= efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \
sfc-y			+= efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \
			   filter.o \
			   selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
			   selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
			   tenxpress.o txc43128_phy.o falcon_boards.o \
			   tenxpress.o txc43128_phy.o falcon_boards.o \
			   mcdi.o mcdi_port.o mcdi_mon.o ptp.o
			   mcdi.o mcdi_port.o mcdi_mon.o ptp.o
+52 −44
Original line number Original line Diff line number Diff line
@@ -17,7 +17,6 @@
#include <linux/ip.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/tcp.h>
#include <linux/in.h>
#include <linux/in.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/ethtool.h>
#include <linux/topology.h>
#include <linux/topology.h>
#include <linux/gfp.h>
#include <linux/gfp.h>
@@ -339,6 +338,7 @@ static void efx_init_eventq(struct efx_channel *channel)
	channel->eventq_read_ptr = 0;
	channel->eventq_read_ptr = 0;


	efx_nic_init_eventq(channel);
	efx_nic_init_eventq(channel);
	channel->eventq_init = true;
}
}


/* Enable event queue processing and NAPI */
/* Enable event queue processing and NAPI */
@@ -367,10 +367,14 @@ static void efx_stop_eventq(struct efx_channel *channel)


static void efx_fini_eventq(struct efx_channel *channel)
static void efx_fini_eventq(struct efx_channel *channel)
{
{
	if (!channel->eventq_init)
		return;

	netif_dbg(channel->efx, drv, channel->efx->net_dev,
	netif_dbg(channel->efx, drv, channel->efx->net_dev,
		  "chan %d fini event queue\n", channel->channel);
		  "chan %d fini event queue\n", channel->channel);


	efx_nic_fini_eventq(channel);
	efx_nic_fini_eventq(channel);
	channel->eventq_init = false;
}
}


static void efx_remove_eventq(struct efx_channel *channel)
static void efx_remove_eventq(struct efx_channel *channel)
@@ -606,7 +610,7 @@ static void efx_start_datapath(struct efx_nic *efx)


	/* RX filters also have scatter-enabled flags */
	/* RX filters also have scatter-enabled flags */
	if (efx->rx_scatter != old_rx_scatter)
	if (efx->rx_scatter != old_rx_scatter)
		efx_filter_update_rx_scatter(efx);
		efx->type->filter_update_rx_scatter(efx);


	/* We must keep at least one descriptor in a TX ring empty.
	/* We must keep at least one descriptor in a TX ring empty.
	 * We could avoid this when the queue size does not exactly
	 * We could avoid this when the queue size does not exactly
@@ -871,10 +875,9 @@ void efx_link_status_changed(struct efx_nic *efx)
	/* Status message for kernel log */
	/* Status message for kernel log */
	if (link_state->up)
	if (link_state->up)
		netif_info(efx, link, efx->net_dev,
		netif_info(efx, link, efx->net_dev,
			   "link up at %uMbps %s-duplex (MTU %d)%s\n",
			   "link up at %uMbps %s-duplex (MTU %d)\n",
			   link_state->speed, link_state->fd ? "full" : "half",
			   link_state->speed, link_state->fd ? "full" : "half",
			   efx->net_dev->mtu,
			   efx->net_dev->mtu);
			   (efx->promiscuous ? " [PROMISC]" : ""));
	else
	else
		netif_info(efx, link, efx->net_dev, "link down\n");
		netif_info(efx, link, efx->net_dev, "link down\n");
}
}
@@ -923,10 +926,6 @@ int __efx_reconfigure_port(struct efx_nic *efx)


	WARN_ON(!mutex_is_locked(&efx->mac_lock));
	WARN_ON(!mutex_is_locked(&efx->mac_lock));


	/* Serialise the promiscuous flag with efx_set_rx_mode. */
	netif_addr_lock_bh(efx->net_dev);
	netif_addr_unlock_bh(efx->net_dev);

	/* Disable PHY transmit in mac level loopbacks */
	/* Disable PHY transmit in mac level loopbacks */
	phy_mode = efx->phy_mode;
	phy_mode = efx->phy_mode;
	if (LOOPBACK_INTERNAL(efx))
	if (LOOPBACK_INTERNAL(efx))
@@ -1084,6 +1083,7 @@ static int efx_init_io(struct efx_nic *efx)
{
{
	struct pci_dev *pci_dev = efx->pci_dev;
	struct pci_dev *pci_dev = efx->pci_dev;
	dma_addr_t dma_mask = efx->type->max_dma_mask;
	dma_addr_t dma_mask = efx->type->max_dma_mask;
	unsigned int mem_map_size = efx->type->mem_map_size(efx);
	int rc;
	int rc;


	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1136,20 +1136,18 @@ static int efx_init_io(struct efx_nic *efx)
		rc = -EIO;
		rc = -EIO;
		goto fail3;
		goto fail3;
	}
	}
	efx->membase = ioremap_nocache(efx->membase_phys,
	efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
				       efx->type->mem_map_size);
	if (!efx->membase) {
	if (!efx->membase) {
		netif_err(efx, probe, efx->net_dev,
		netif_err(efx, probe, efx->net_dev,
			  "could not map memory BAR at %llx+%x\n",
			  "could not map memory BAR at %llx+%x\n",
			  (unsigned long long)efx->membase_phys,
			  (unsigned long long)efx->membase_phys, mem_map_size);
			  efx->type->mem_map_size);
		rc = -ENOMEM;
		rc = -ENOMEM;
		goto fail4;
		goto fail4;
	}
	}
	netif_dbg(efx, probe, efx->net_dev,
	netif_dbg(efx, probe, efx->net_dev,
		  "memory BAR at %llx+%x (virtual %p)\n",
		  "memory BAR at %llx+%x (virtual %p)\n",
		  (unsigned long long)efx->membase_phys,
		  (unsigned long long)efx->membase_phys, mem_map_size,
		  efx->type->mem_map_size, efx->membase);
		  efx->membase);


	return 0;
	return 0;


@@ -1228,8 +1226,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
 */
 */
static int efx_probe_interrupts(struct efx_nic *efx)
static int efx_probe_interrupts(struct efx_nic *efx)
{
{
	unsigned int max_channels =
		min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
	unsigned int extra_channels = 0;
	unsigned int extra_channels = 0;
	unsigned int i, j;
	unsigned int i, j;
	int rc;
	int rc;
@@ -1246,7 +1242,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
		if (separate_tx_channels)
		if (separate_tx_channels)
			n_channels *= 2;
			n_channels *= 2;
		n_channels += extra_channels;
		n_channels += extra_channels;
		n_channels = min(n_channels, max_channels);
		n_channels = min(n_channels, efx->max_channels);


		for (i = 0; i < n_channels; i++)
		for (i = 0; i < n_channels; i++)
			xentries[i].entry = i;
			xentries[i].entry = i;
@@ -1497,6 +1493,44 @@ static void efx_remove_nic(struct efx_nic *efx)
	efx->type->remove(efx);
	efx->type->remove(efx);
}
}


static int efx_probe_filters(struct efx_nic *efx)
{
	int rc;

	spin_lock_init(&efx->filter_lock);

	rc = efx->type->filter_table_probe(efx);
	if (rc)
		return rc;

#ifdef CONFIG_RFS_ACCEL
	if (efx->type->offload_features & NETIF_F_NTUPLE) {
		efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
					   sizeof(*efx->rps_flow_id),
					   GFP_KERNEL);
		if (!efx->rps_flow_id) {
			efx->type->filter_table_remove(efx);
			return -ENOMEM;
		}
	}
#endif

	return 0;
}

static void efx_remove_filters(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
	kfree(efx->rps_flow_id);
#endif
	efx->type->filter_table_remove(efx);
}

static void efx_restore_filters(struct efx_nic *efx)
{
	efx->type->filter_table_restore(efx);
}

/**************************************************************************
/**************************************************************************
 *
 *
 * NIC startup/shutdown
 * NIC startup/shutdown
@@ -1987,30 +2021,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
static void efx_set_rx_mode(struct net_device *net_dev)
static void efx_set_rx_mode(struct net_device *net_dev)
{
{
	struct efx_nic *efx = netdev_priv(net_dev);
	struct efx_nic *efx = netdev_priv(net_dev);
	struct netdev_hw_addr *ha;
	union efx_multicast_hash *mc_hash = &efx->multicast_hash;
	u32 crc;
	int bit;

	efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);

	/* Build multicast hash table */
	if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
		memset(mc_hash, 0xff, sizeof(*mc_hash));
	} else {
		memset(mc_hash, 0x00, sizeof(*mc_hash));
		netdev_for_each_mc_addr(ha, net_dev) {
			crc = ether_crc_le(ETH_ALEN, ha->addr);
			bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
			__set_bit_le(bit, mc_hash);
		}

		/* Broadcast packets go through the multicast hash filter.
		 * ether_crc_le() of the broadcast address is 0xbe2612ff
		 * so we always add bit 0xff to the mask.
		 */
		__set_bit_le(0xff, mc_hash);
	}


	if (efx->port_enabled)
	if (efx->port_enabled)
		queue_work(efx->workqueue, &efx->mac_work);
		queue_work(efx->workqueue, &efx->mac_work);
@@ -2489,8 +2499,6 @@ static int efx_init_struct(struct efx_nic *efx,
		efx->msi_context[i].index = i;
		efx->msi_context[i].index = i;
	}
	}


	EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);

	/* Higher numbered interrupt modes are less capable! */
	/* Higher numbered interrupt modes are less capable! */
	efx->interrupt_mode = max(efx->type->max_interrupt_mode,
	efx->interrupt_mode = max(efx->type->max_interrupt_mode,
				  interrupt_mode);
				  interrupt_mode);
+86 −21
Original line number Original line Diff line number Diff line
@@ -68,27 +68,92 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_TXQ_MIN_ENT(efx)	(2 * efx_tx_max_skb_descs(efx))
#define EFX_TXQ_MIN_ENT(efx)	(2 * efx_tx_max_skb_descs(efx))


/* Filters */
/* Filters */
extern int efx_probe_filters(struct efx_nic *efx);

extern void efx_restore_filters(struct efx_nic *efx);
/**
extern void efx_remove_filters(struct efx_nic *efx);
 * efx_filter_insert_filter - add or replace a filter
extern void efx_filter_update_rx_scatter(struct efx_nic *efx);
 * @efx: NIC in which to insert the filter
extern s32 efx_filter_insert_filter(struct efx_nic *efx,
 * @spec: Specification for the filter
 * @replace_equal: Flag for whether the specified filter may replace an
 *	existing filter with equal priority
 *
 * On success, return the filter ID.
 * On failure, return a negative error code.
 *
 * If an existing filter has equal match values to the new filter
 * spec, then the new filter might replace it, depending on the
 * relative priorities.  If the existing filter has lower priority, or
 * if @replace_equal is set and it has equal priority, then it is
 * replaced.  Otherwise the function fails, returning -%EPERM if
 * the existing filter has higher priority or -%EEXIST if it has
 * equal priority.
 */
static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
					   struct efx_filter_spec *spec,
					   struct efx_filter_spec *spec,
				    bool replace);
					   bool replace_equal)
extern int efx_filter_remove_id_safe(struct efx_nic *efx,
{
	return efx->type->filter_insert(efx, spec, replace_equal);
}

/**
 * efx_filter_remove_id_safe - remove a filter by ID, carefully
 * @efx: NIC from which to remove the filter
 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
 *
 * This function will range-check @filter_id, so it is safe to call
 * with a value passed from userland.
 */
static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
					    enum efx_filter_priority priority,
					    enum efx_filter_priority priority,
				     u32 filter_id);
					    u32 filter_id)
extern int efx_filter_get_filter_safe(struct efx_nic *efx,
{
	return efx->type->filter_remove_safe(efx, priority, filter_id);
}

/**
 * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
 * @efx: NIC from which to remove the filter
 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
 * @spec: Buffer in which to store filter specification
 *
 * This function will range-check @filter_id, so it is safe to call
 * with a value passed from userland.
 */
static inline int
efx_filter_get_filter_safe(struct efx_nic *efx,
			   enum efx_filter_priority priority,
			   enum efx_filter_priority priority,
				      u32 filter_id, struct efx_filter_spec *);
			   u32 filter_id, struct efx_filter_spec *spec)
extern void efx_filter_clear_rx(struct efx_nic *efx,
{
				enum efx_filter_priority priority);
	return efx->type->filter_get_safe(efx, priority, filter_id, spec);
extern u32 efx_filter_count_rx_used(struct efx_nic *efx,
}
				    enum efx_filter_priority priority);

extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx);
/**
extern s32 efx_filter_get_rx_ids(struct efx_nic *efx,
 * efx_farch_filter_clear_rx - remove RX filters by priority
 * @efx: NIC from which to remove the filters
 * @priority: Maximum priority to remove
 */
static inline void efx_filter_clear_rx(struct efx_nic *efx,
				       enum efx_filter_priority priority)
{
	return efx->type->filter_clear_rx(efx, priority);
}

static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
					   enum efx_filter_priority priority)
{
	return efx->type->filter_count_rx_used(efx, priority);
}
static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
{
	return efx->type->filter_get_rx_id_limit(efx);
}
static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
					enum efx_filter_priority priority,
					enum efx_filter_priority priority,
				 u32 *buf, u32 size);
					u32 *buf, u32 size)
{
	return efx->type->filter_get_rx_ids(efx, priority, buf, size);
}
#ifdef CONFIG_RFS_ACCEL
#ifdef CONFIG_RFS_ACCEL
extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
			  u16 rxq_index, u32 flow_id);
			  u16 rxq_index, u32 flow_id);
+120 −95
Original line number Original line Diff line number Diff line
@@ -799,11 +799,12 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
	return efx_reset(efx, rc);
	return efx_reset(efx, rc);
}
}


/* MAC address mask including only MC flag */
/* MAC address mask including only I/G bit */
static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };


#define IP4_ADDR_FULL_MASK	((__force __be32)~0)
#define IP4_ADDR_FULL_MASK	((__force __be32)~0)
#define PORT_FULL_MASK		((__force __be16)~0)
#define PORT_FULL_MASK		((__force __be16)~0)
#define ETHER_TYPE_FULL_MASK	((__force __be16)~0)


static int efx_ethtool_get_class_rule(struct efx_nic *efx,
static int efx_ethtool_get_class_rule(struct efx_nic *efx,
				      struct ethtool_rx_flow_spec *rule)
				      struct ethtool_rx_flow_spec *rule)
@@ -813,8 +814,6 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
	struct ethhdr *mac_entry = &rule->h_u.ether_spec;
	struct ethhdr *mac_entry = &rule->h_u.ether_spec;
	struct ethhdr *mac_mask = &rule->m_u.ether_spec;
	struct ethhdr *mac_mask = &rule->m_u.ether_spec;
	struct efx_filter_spec spec;
	struct efx_filter_spec spec;
	u16 vid;
	u8 proto;
	int rc;
	int rc;


	rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
	rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
@@ -822,44 +821,72 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
	if (rc)
	if (rc)
		return rc;
		return rc;


	if (spec.dmaq_id == 0xfff)
	if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
		rule->ring_cookie = RX_CLS_FLOW_DISC;
		rule->ring_cookie = RX_CLS_FLOW_DISC;
	else
	else
		rule->ring_cookie = spec.dmaq_id;
		rule->ring_cookie = spec.dmaq_id;


	if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) {
	if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
		rule->flow_type = ETHER_FLOW;
	    spec.ether_type == htons(ETH_P_IP) &&
		memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN);
	    (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
		if (spec.type == EFX_FILTER_MC_DEF)
	    (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
			memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN);
	    !(spec.match_flags &
		return 0;
	      ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
		EFX_FILTER_MATCH_IP_PROTO |
		EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
		rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
				   TCP_V4_FLOW : UDP_V4_FLOW);
		if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
			ip_entry->ip4dst = spec.loc_host[0];
			ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
		}
		}

		if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
	rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest);
			ip_entry->ip4src = spec.rem_host[0];
	if (rc == 0) {
			ip_mask->ip4src = IP4_ADDR_FULL_MASK;
		}
		if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
			ip_entry->pdst = spec.loc_port;
			ip_mask->pdst = PORT_FULL_MASK;
		}
		if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
			ip_entry->psrc = spec.rem_port;
			ip_mask->psrc = PORT_FULL_MASK;
		}
	} else if (!(spec.match_flags &
		     ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
		       EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
		       EFX_FILTER_MATCH_OUTER_VID))) {
		rule->flow_type = ETHER_FLOW;
		rule->flow_type = ETHER_FLOW;
		if (spec.match_flags &
		    (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
			memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN);
			if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
				memset(mac_mask->h_dest, ~0, ETH_ALEN);
				memset(mac_mask->h_dest, ~0, ETH_ALEN);
		if (vid != EFX_FILTER_VID_UNSPEC) {
			else
			rule->flow_type |= FLOW_EXT;
				memcpy(mac_mask->h_dest, mac_addr_ig_mask,
			rule->h_ext.vlan_tci = htons(vid);
				       ETH_ALEN);
			rule->m_ext.vlan_tci = htons(0xfff);
		}
		}
		return 0;
		if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
			memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN);
			memset(mac_mask->h_source, ~0, ETH_ALEN);
		}
		if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
			mac_entry->h_proto = spec.ether_type;
			mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
		}
	} else {
		/* The above should handle all filters that we insert */
		WARN_ON(1);
		return -EINVAL;
	}
	}


	rc = efx_filter_get_ipv4_local(&spec, &proto,
	if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
				       &ip_entry->ip4dst, &ip_entry->pdst);
		rule->flow_type |= FLOW_EXT;
	if (rc != 0) {
		rule->h_ext.vlan_tci = spec.outer_vid;
		rc = efx_filter_get_ipv4_full(
		rule->m_ext.vlan_tci = htons(0xfff);
			&spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
			&ip_entry->ip4src, &ip_entry->psrc);
		EFX_WARN_ON_PARANOID(rc);
		ip_mask->ip4src = IP4_ADDR_FULL_MASK;
		ip_mask->psrc = PORT_FULL_MASK;
	}
	}
	rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;

	ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
	ip_mask->pdst = PORT_FULL_MASK;
	return rc;
	return rc;
}
}


@@ -967,82 +994,80 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
	efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
	efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
			   efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
			   efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
			   (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
			   (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
			   0xfff : rule->ring_cookie);
			   EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);


	switch (rule->flow_type) {
	switch (rule->flow_type & ~FLOW_EXT) {
	case TCP_V4_FLOW:
	case TCP_V4_FLOW:
	case UDP_V4_FLOW: {
	case UDP_V4_FLOW:
		u8 proto = (rule->flow_type == TCP_V4_FLOW ?
		spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
				    EFX_FILTER_MATCH_IP_PROTO);
		spec.ether_type = htons(ETH_P_IP);
		spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
				 IPPROTO_TCP : IPPROTO_UDP);
				 IPPROTO_TCP : IPPROTO_UDP);

		if (ip_mask->ip4dst) {
		/* Must match all of destination, */
			if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
		if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK &&
		      ip_mask->pdst == PORT_FULL_MASK))
				return -EINVAL;
				return -EINVAL;
		/* all or none of source, */
			spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
		if ((ip_mask->ip4src || ip_mask->psrc) &&
			spec.loc_host[0] = ip_entry->ip4dst;
		    !(ip_mask->ip4src == IP4_ADDR_FULL_MASK &&
		}
		      ip_mask->psrc == PORT_FULL_MASK))
		if (ip_mask->ip4src) {
			if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
				return -EINVAL;
				return -EINVAL;
		/* and nothing else */
			spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
		if (ip_mask->tos || rule->m_ext.vlan_tci)
			spec.rem_host[0] = ip_entry->ip4src;
		}
		if (ip_mask->pdst) {
			if (ip_mask->pdst != PORT_FULL_MASK)
				return -EINVAL;
				return -EINVAL;

			spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
		if (ip_mask->ip4src)
			spec.loc_port = ip_entry->pdst;
			rc = efx_filter_set_ipv4_full(&spec, proto,
						      ip_entry->ip4dst,
						      ip_entry->pdst,
						      ip_entry->ip4src,
						      ip_entry->psrc);
		else
			rc = efx_filter_set_ipv4_local(&spec, proto,
						       ip_entry->ip4dst,
						       ip_entry->pdst);
		if (rc)
			return rc;
		break;
		}
		}

		if (ip_mask->psrc) {
	case ETHER_FLOW | FLOW_EXT:
			if (ip_mask->psrc != PORT_FULL_MASK)
	case ETHER_FLOW: {
		u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ?
				     ntohs(rule->m_ext.vlan_tci) : 0);

		/* Must not match on source address or Ethertype */
		if (!is_zero_ether_addr(mac_mask->h_source) ||
		    mac_mask->h_proto)
				return -EINVAL;
				return -EINVAL;
			spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
			spec.rem_port = ip_entry->psrc;
		}
		if (ip_mask->tos)
			return -EINVAL;
		break;


		/* Is it a default UC or MC filter? */
	case ETHER_FLOW:
		if (ether_addr_equal(mac_mask->h_dest, mac_addr_mc_mask) &&
		if (!is_zero_ether_addr(mac_mask->h_dest)) {
		    vlan_tag_mask == 0) {
			if (ether_addr_equal(mac_mask->h_dest,
			if (is_multicast_ether_addr(mac_entry->h_dest))
					     mac_addr_ig_mask))
				rc = efx_filter_set_mc_def(&spec);
				spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
			else if (is_broadcast_ether_addr(mac_mask->h_dest))
				spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
			else
			else
				rc = efx_filter_set_uc_def(&spec);
				return -EINVAL;
			memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN);
		}
		}
		/* Otherwise, it must match all of destination and all
		if (!is_zero_ether_addr(mac_mask->h_source)) {
		 * or none of VID.
			if (!is_broadcast_ether_addr(mac_mask->h_source))
		 */
				return -EINVAL;
		else if (is_broadcast_ether_addr(mac_mask->h_dest) &&
			spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
			 (vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) {
			memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN);
			rc = efx_filter_set_eth_local(
				&spec,
				vlan_tag_mask ?
				ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
				mac_entry->h_dest);
		} else {
			rc = -EINVAL;
		}
		}
		if (rc)
		if (mac_mask->h_proto) {
			return rc;
			if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
		break;
				return -EINVAL;
			spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
			spec.ether_type = mac_entry->h_proto;
		}
		}
		break;


	default:
	default:
		return -EINVAL;
		return -EINVAL;
	}
	}


	if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
		if (rule->m_ext.vlan_tci != htons(0xfff))
			return -EINVAL;
		spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
		spec.outer_vid = rule->h_ext.vlan_tci;
	}

	rc = efx_filter_insert_filter(efx, &spec, true);
	rc = efx_filter_insert_filter(efx, &spec, true);
	if (rc < 0)
	if (rc < 0)
		return rc;
		return rc;
+82 −49

File changed.

Preview size limit exceeded, changes collapsed.

Loading