Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit abe303db authored by David S. Miller's avatar David S. Miller
Browse files


Ben Hutchings says:

====================
1. More workarounds for TX queue flush failures that can occur during
   interface reconfiguration.
2. Fix spurious failure of a firmware request running during a system
   clock change, e.g. ntpd started at the same time as driver load.
3. Fix inconsistent statistics after a firmware upgrade.
4. Fix a variable (non-)initialisation in offline self-test that can
   make it more disruptive than intended.
5. Fix a race that can (at least) cause an assertion failure.
6. Miscellaneous cleanup.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a20da984 b9cc977d
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -106,8 +106,8 @@ static struct workqueue_struct *reset_workqueue;
 *
 * This is only used in MSI-X interrupt mode
 */
static unsigned int separate_tx_channels;
module_param(separate_tx_channels, uint, 0444);
static bool separate_tx_channels;
module_param(separate_tx_channels, bool, 0444);
MODULE_PARM_DESC(separate_tx_channels,
		 "Use separate channels for TX and RX");

@@ -160,8 +160,8 @@ static unsigned int rss_cpus;
module_param(rss_cpus, uint, 0444);
MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");

static int phy_flash_cfg;
module_param(phy_flash_cfg, int, 0644);
static bool phy_flash_cfg;
module_param(phy_flash_cfg, bool, 0644);
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");

static unsigned irq_adapt_low_thresh = 8000;
@@ -2279,7 +2279,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
	netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
		   RESET_TYPE(method));

	netif_device_detach(efx->net_dev);
	efx_device_detach_sync(efx);
	efx_reset_down(efx, method);

	rc = efx->type->reset(efx, method);
@@ -2758,7 +2758,7 @@ static int efx_pm_freeze(struct device *dev)
	if (efx->state != STATE_DISABLED) {
		efx->state = STATE_UNINIT;

		netif_device_detach(efx->net_dev);
		efx_device_detach_sync(efx);

		efx_stop_all(efx);
		efx_stop_interrupts(efx, false);
+13 −0
Original line number Diff line number Diff line
@@ -163,4 +163,17 @@ extern void efx_link_status_changed(struct efx_nic *efx);
extern void efx_link_set_advertising(struct efx_nic *efx, u32);
extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);

static inline void efx_device_detach_sync(struct efx_nic *efx)
{
	struct net_device *dev = efx->net_dev;

	/* Lock/freeze all TX queues so that we can be sure the
	 * TX scheduler is stopped when we're done and before
	 * netif_device_present() becomes false.
	 */
	netif_tx_lock(dev);
	netif_device_detach(dev);
	netif_tx_unlock(dev);
}

#endif /* EFX_EFX_H */
+14 −11
Original line number Diff line number Diff line
@@ -816,6 +816,9 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
/* MAC address mask including only MC flag */
static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };

#define IP4_ADDR_FULL_MASK	((__force __be32)~0)
#define PORT_FULL_MASK		((__force __be16)~0)

static int efx_ethtool_get_class_rule(struct efx_nic *efx,
				      struct ethtool_rx_flow_spec *rule)
{
@@ -865,12 +868,12 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
			&spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
			&ip_entry->ip4src, &ip_entry->psrc);
		EFX_WARN_ON_PARANOID(rc);
		ip_mask->ip4src = ~0;
		ip_mask->psrc = ~0;
		ip_mask->ip4src = IP4_ADDR_FULL_MASK;
		ip_mask->psrc = PORT_FULL_MASK;
	}
	rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
	ip_mask->ip4dst = ~0;
	ip_mask->pdst = ~0;
	ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
	ip_mask->pdst = PORT_FULL_MASK;
	return rc;
}

@@ -971,7 +974,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,

	/* Check for unsupported extensions */
	if ((rule->flow_type & FLOW_EXT) &&
	    (rule->m_ext.vlan_etype | rule->m_ext.data[0] |
	    (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
	     rule->m_ext.data[1]))
		return -EINVAL;

@@ -986,16 +989,16 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
			    IPPROTO_TCP : IPPROTO_UDP);

		/* Must match all of destination, */
		if ((__force u32)~ip_mask->ip4dst |
		    (__force u16)~ip_mask->pdst)
		if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK &&
		      ip_mask->pdst == PORT_FULL_MASK))
			return -EINVAL;
		/* all or none of source, */
		if ((ip_mask->ip4src | ip_mask->psrc) &&
		    ((__force u32)~ip_mask->ip4src |
		     (__force u16)~ip_mask->psrc))
		if ((ip_mask->ip4src || ip_mask->psrc) &&
		    !(ip_mask->ip4src == IP4_ADDR_FULL_MASK &&
		      ip_mask->psrc == PORT_FULL_MASK))
			return -EINVAL;
		/* and nothing else */
		if (ip_mask->tos | rule->m_ext.vlan_tci)
		if (ip_mask->tos || rule->m_ext.vlan_tci)
			return -EINVAL;

		if (ip_mask->ip4src)
+2 −0
Original line number Diff line number Diff line
@@ -1792,6 +1792,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
	.remove_port = falcon_remove_port,
	.handle_global_event = falcon_handle_global_event,
	.prepare_flush = falcon_prepare_flush,
	.finish_flush = efx_port_dummy_op_void,
	.update_stats = falcon_update_nic_stats,
	.start_stats = falcon_start_nic_stats,
	.stop_stats = falcon_stop_nic_stats,
@@ -1834,6 +1835,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
	.remove_port = falcon_remove_port,
	.handle_global_event = falcon_handle_global_event,
	.prepare_flush = falcon_prepare_flush,
	.finish_flush = efx_port_dummy_op_void,
	.update_stats = falcon_update_nic_stats,
	.start_stats = falcon_start_nic_stats,
	.stop_stats = falcon_stop_nic_stats,
+14 −29
Original line number Diff line number Diff line
@@ -22,22 +22,21 @@
 *
 * Notes on locking strategy:
 *
 * Most CSRs are 128-bit (oword) and therefore cannot be read or
 * written atomically.  Access from the host is buffered by the Bus
 * Interface Unit (BIU).  Whenever the host reads from the lowest
 * address of such a register, or from the address of a different such
 * register, the BIU latches the register's value.  Subsequent reads
 * from higher addresses of the same register will read the latched
 * value.  Whenever the host writes part of such a register, the BIU
 * collects the written value and does not write to the underlying
 * register until all 4 dwords have been written.  A similar buffering
 * scheme applies to host access to the NIC's 64-bit SRAM.
 * Many CSRs are very wide and cannot be read or written atomically.
 * Writes from the host are buffered by the Bus Interface Unit (BIU)
 * up to 128 bits.  Whenever the host writes part of such a register,
 * the BIU collects the written value and does not write to the
 * underlying register until all 4 dwords have been written.  A
 * similar buffering scheme applies to host access to the NIC's 64-bit
 * SRAM.
 *
 * Access to different CSRs and 64-bit SRAM words must be serialised,
 * since interleaved access can result in lost writes or lost
 * information from read-to-clear fields.  We use efx_nic::biu_lock
 * for this.  (We could use separate locks for read and write, but
 * this is not normally a performance bottleneck.)
 * Writes to different CSRs and 64-bit SRAM words must be serialised,
 * since interleaved access can result in lost writes.  We use
 * efx_nic::biu_lock for this.
 *
 * We also serialise reads from 128-bit CSRs and SRAM with the same
 * spinlock.  This may not be necessary, but it doesn't really matter
 * as there are no such reads on the fast path.
 *
 * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
 * 128-bit but are special-cased in the BIU to avoid the need for
@@ -204,20 +203,6 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
	efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}

/* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */
static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
				       unsigned int reg, unsigned int index)
{
	efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
}

/* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */
static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
				   unsigned int reg, unsigned int index)
{
	efx_readd(efx, value, reg + index * sizeof(efx_dword_t));
}

/* Page-mapped register block size */
#define EFX_PAGE_BLOCK_SIZE 0x2000

Loading