Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f984d024 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
  igb: fix link reporting when using sgmii
  igb: prevent skb_over panic w/ mtu smaller than 1K
  igb: Fix DCA errors and do not use context index for 82576
  ipv6: compile fix for ip6mr.c
  packet: Avoid lock_sock in mmap handler
  sfc: Replace stats_enabled flag with a disable count
  sfc: SFX7101/SFT9001: Fix AN advertisements
  sfc: SFT9001: Always enable XNP exchange on SFT9001 rev B
  sfc: Update board info for hardware monitor on SFN4111T-R5 and later
  sfc: Test for PHYXS faults whenever we cannot test link state bits
  sfc: Reinitialise the PHY completely in case of a PHY or NIC reset
  sfc: Fix post-reset MAC selection
  sfc: SFN4111T: Fix GPIO sharing between I2C and FLASH_CFG_1
  sfc: SFT9001: Fix speed reporting in 1G PHY loopback
  sfc: SFX7101: Remove workaround for bad link training
  sfc: SFT9001: Enable robust link training
  sky2: fix hard hang with netconsoling and iface going up
parents fc8744ad 5d0932a5
Loading
Loading
Loading
Loading
+9 −2
Original line number Diff line number Diff line
@@ -699,11 +699,18 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)

	/* SGMII link check is done through the PCS register. */
	if ((hw->phy.media_type != e1000_media_type_copper) ||
	    (igb_sgmii_active_82575(hw)))
	    (igb_sgmii_active_82575(hw))) {
		ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
		                                             &duplex);
	else
		/*
		 * Use this flag to determine if link needs to be checked or
		 * not.  If  we have link clear the flag so that we do not
		 * continue to check for link.
		 */
		hw->mac.get_link_status = !hw->mac.serdes_has_link;
	} else {
		ret_val = igb_check_for_copper_link(hw);
	}

	return ret_val;
}
+4 −5
Original line number Diff line number Diff line
@@ -300,11 +300,10 @@ struct igb_adapter {

#define IGB_FLAG_HAS_MSI           (1 << 0)
#define IGB_FLAG_MSI_ENABLE        (1 << 1)
#define IGB_FLAG_HAS_DCA           (1 << 2)
#define IGB_FLAG_DCA_ENABLED       (1 << 3)
#define IGB_FLAG_IN_NETPOLL        (1 << 5)
#define IGB_FLAG_QUAD_PORT_A       (1 << 6)
#define IGB_FLAG_NEED_CTX_IDX      (1 << 7)
#define IGB_FLAG_DCA_ENABLED       (1 << 2)
#define IGB_FLAG_IN_NETPOLL        (1 << 3)
#define IGB_FLAG_QUAD_PORT_A       (1 << 4)
#define IGB_FLAG_NEED_CTX_IDX      (1 << 5)

enum e1000_state_t {
	__IGB_TESTING,
+10 −14
Original line number Diff line number Diff line
@@ -206,10 +206,11 @@ static int __init igb_init_module(void)

	global_quad_port_a = 0;

	ret = pci_register_driver(&igb_driver);
#ifdef CONFIG_IGB_DCA
	dca_register_notify(&dca_notifier);
#endif

	ret = pci_register_driver(&igb_driver);
	return ret;
}

@@ -1156,11 +1157,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,

	/* set flags */
	switch (hw->mac.type) {
	case e1000_82576:
	case e1000_82575:
		adapter->flags |= IGB_FLAG_HAS_DCA;
		adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
		break;
	case e1000_82576:
	default:
		break;
	}
@@ -1310,8 +1310,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
		goto err_register;

#ifdef CONFIG_IGB_DCA
	if ((adapter->flags & IGB_FLAG_HAS_DCA) &&
	    (dca_add_requester(&pdev->dev) == 0)) {
	if (dca_add_requester(&pdev->dev) == 0) {
		adapter->flags |= IGB_FLAG_DCA_ENABLED;
		dev_info(&pdev->dev, "DCA enabled\n");
		/* Always use CB2 mode, difference is masked
@@ -1835,11 +1834,11 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
	rctl |= E1000_RCTL_SECRC;

	/*
	 * disable store bad packets, long packet enable, and clear size bits.
	 * disable store bad packets and clear size bits.
	 */
	rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_LPE | E1000_RCTL_SZ_256);
	rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);

	if (adapter->netdev->mtu > ETH_DATA_LEN)
	/* enable LPE when to prevent packets larger than max_frame_size */
		rctl |= E1000_RCTL_LPE;

	/* Setup buffer sizes */
@@ -1865,7 +1864,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
	 */
	/* allocations using alloc_page take too long for regular MTU
	 * so only enable packet split for jumbo frames */
	if (rctl & E1000_RCTL_LPE) {
	if (adapter->netdev->mtu > ETH_DATA_LEN) {
		adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
		srrctl |= adapter->rx_ps_hdr_size <<
			 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
@@ -3473,19 +3472,16 @@ static int __igb_notify_dca(struct device *dev, void *data)
	struct e1000_hw *hw = &adapter->hw;
	unsigned long event = *(unsigned long *)data;

	if (!(adapter->flags & IGB_FLAG_HAS_DCA))
		goto out;

	switch (event) {
	case DCA_PROVIDER_ADD:
		/* if already enabled, don't do it again */
		if (adapter->flags & IGB_FLAG_DCA_ENABLED)
			break;
		adapter->flags |= IGB_FLAG_DCA_ENABLED;
		/* Always use CB2 mode, difference is masked
		 * in the CB driver. */
		wr32(E1000_DCA_CTRL, 2);
		if (dca_add_requester(dev) == 0) {
			adapter->flags |= IGB_FLAG_DCA_ENABLED;
			dev_info(&adapter->pdev->dev, "DCA enabled\n");
			igb_setup_dca(adapter);
			break;
@@ -3502,7 +3498,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
		}
		break;
	}
out:

	return 0;
}

+41 −18
Original line number Diff line number Diff line
@@ -676,9 +676,8 @@ static int efx_init_port(struct efx_nic *efx)
	rc = efx->phy_op->init(efx);
	if (rc)
		return rc;
	efx->phy_op->reconfigure(efx);

	mutex_lock(&efx->mac_lock);
	efx->phy_op->reconfigure(efx);
	rc = falcon_switch_mac(efx);
	mutex_unlock(&efx->mac_lock);
	if (rc)
@@ -686,7 +685,7 @@ static int efx_init_port(struct efx_nic *efx)
	efx->mac_op->reconfigure(efx);

	efx->port_initialized = true;
	efx->stats_enabled = true;
	efx_stats_enable(efx);
	return 0;

fail:
@@ -735,6 +734,7 @@ static void efx_fini_port(struct efx_nic *efx)
	if (!efx->port_initialized)
		return;

	efx_stats_disable(efx);
	efx->phy_op->fini(efx);
	efx->port_initialized = false;

@@ -1361,6 +1361,20 @@ static int efx_net_stop(struct net_device *net_dev)
	return 0;
}

void efx_stats_disable(struct efx_nic *efx)
{
	spin_lock(&efx->stats_lock);
	++efx->stats_disable_count;
	spin_unlock(&efx->stats_lock);
}

void efx_stats_enable(struct efx_nic *efx)
{
	spin_lock(&efx->stats_lock);
	--efx->stats_disable_count;
	spin_unlock(&efx->stats_lock);
}

/* Context: process, dev_base_lock or RTNL held, non-blocking. */
static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
{
@@ -1369,12 +1383,12 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
	struct net_device_stats *stats = &net_dev->stats;

	/* Update stats if possible, but do not wait if another thread
	 * is updating them (or resetting the NIC); slightly stale
	 * stats are acceptable.
	 * is updating them or if MAC stats fetches are temporarily
	 * disabled; slightly stale stats are acceptable.
	 */
	if (!spin_trylock(&efx->stats_lock))
		return stats;
	if (efx->stats_enabled) {
	if (!efx->stats_disable_count) {
		efx->mac_op->update_stats(efx);
		falcon_update_nic_stats(efx);
	}
@@ -1622,16 +1636,12 @@ static void efx_unregister_netdev(struct efx_nic *efx)

/* Tears down the entire software state and most of the hardware state
 * before reset.  */
void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
void efx_reset_down(struct efx_nic *efx, enum reset_type method,
		    struct ethtool_cmd *ecmd)
{
	EFX_ASSERT_RESET_SERIALISED(efx);

	/* The net_dev->get_stats handler is quite slow, and will fail
	 * if a fetch is pending over reset. Serialise against it. */
	spin_lock(&efx->stats_lock);
	efx->stats_enabled = false;
	spin_unlock(&efx->stats_lock);

	efx_stats_disable(efx);
	efx_stop_all(efx);
	mutex_lock(&efx->mac_lock);
	mutex_lock(&efx->spi_lock);
@@ -1639,6 +1649,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
	efx->phy_op->get_settings(efx, ecmd);

	efx_fini_channels(efx);
	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
		efx->phy_op->fini(efx);
}

/* This function will always ensure that the locks acquired in
@@ -1646,7 +1658,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 * that we were unable to reinitialise the hardware, and the
 * driver should be disabled. If ok is false, then the rx and tx
 * engines are not restarted, pending a RESET_DISABLE. */
int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
int efx_reset_up(struct efx_nic *efx, enum reset_type method,
		 struct ethtool_cmd *ecmd, bool ok)
{
	int rc;

@@ -1658,6 +1671,15 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
		ok = false;
	}

	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
		if (ok) {
			rc = efx->phy_op->init(efx);
			if (rc)
				ok = false;
		} else
			efx->port_initialized = false;
	}

	if (ok) {
		efx_init_channels(efx);

@@ -1670,7 +1692,7 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)

	if (ok) {
		efx_start_all(efx);
		efx->stats_enabled = true;
		efx_stats_enable(efx);
	}
	return rc;
}
@@ -1702,7 +1724,7 @@ static int efx_reset(struct efx_nic *efx)

	EFX_INFO(efx, "resetting (%d)\n", method);

	efx_reset_down(efx, &ecmd);
	efx_reset_down(efx, method, &ecmd);

	rc = falcon_reset_hw(efx, method);
	if (rc) {
@@ -1721,10 +1743,10 @@ static int efx_reset(struct efx_nic *efx)

	/* Leave device stopped if necessary */
	if (method == RESET_TYPE_DISABLE) {
		efx_reset_up(efx, &ecmd, false);
		efx_reset_up(efx, method, &ecmd, false);
		rc = -EIO;
	} else {
		rc = efx_reset_up(efx, &ecmd, true);
		rc = efx_reset_up(efx, method, &ecmd, true);
	}

out_disable:
@@ -1876,6 +1898,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
	efx->rx_checksum_enabled = true;
	spin_lock_init(&efx->netif_stop_lock);
	spin_lock_init(&efx->stats_lock);
	efx->stats_disable_count = 1;
	mutex_init(&efx->mac_lock);
	efx->mac_op = &efx_dummy_mac_operations;
	efx->phy_op = &efx_dummy_phy_operations;
+6 −3
Original line number Diff line number Diff line
@@ -36,13 +36,16 @@ extern void efx_process_channel_now(struct efx_channel *channel);
extern void efx_flush_queues(struct efx_nic *efx);

/* Ports */
extern void efx_stats_disable(struct efx_nic *efx);
extern void efx_stats_enable(struct efx_nic *efx);
extern void efx_reconfigure_port(struct efx_nic *efx);
extern void __efx_reconfigure_port(struct efx_nic *efx);

/* Reset handling */
extern void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd);
extern int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd,
			bool ok);
extern void efx_reset_down(struct efx_nic *efx, enum reset_type method,
			   struct ethtool_cmd *ecmd);
extern int efx_reset_up(struct efx_nic *efx, enum reset_type method,
			struct ethtool_cmd *ecmd, bool ok);

/* Global */
extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
Loading