Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 393159e9 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'xgbe-next'



Tom Lendacky says:

====================
amd-xgbe: AMD XGBE driver updates 2015-09-30

The following patches are included in this driver update series:

- Remove unneeded semi-colon
- Follow the DT/ACPI precedence used by the device_ APIs
- Add ethtool support for getting and setting the msglevel
- Add ethtool support error and debug messages
- Simplify the hardware FIFO assignment calculations
- Add receive buffer unavailable statistic
- Use the device workqueue instead of the system workqueue
- Remove the use of a link state bit

This patch series is based on net-next.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ac8cfc7b 50789845
Loading
Loading
Loading
Loading
+15 −68
Original line number Diff line number Diff line
@@ -1940,84 +1940,31 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
						  unsigned int queue_count)
{
	unsigned int q_fifo_size = 0;
	enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
	unsigned int q_fifo_size;
	unsigned int p_fifo;

	/* Calculate Tx/Rx fifo share per queue */
	switch (fifo_size) {
	case 0:
		q_fifo_size = XGBE_FIFO_SIZE_B(128);
		break;
	case 1:
		q_fifo_size = XGBE_FIFO_SIZE_B(256);
		break;
	case 2:
		q_fifo_size = XGBE_FIFO_SIZE_B(512);
		break;
	case 3:
		q_fifo_size = XGBE_FIFO_SIZE_KB(1);
		break;
	case 4:
		q_fifo_size = XGBE_FIFO_SIZE_KB(2);
		break;
	case 5:
		q_fifo_size = XGBE_FIFO_SIZE_KB(4);
		break;
	case 6:
		q_fifo_size = XGBE_FIFO_SIZE_KB(8);
		break;
	case 7:
		q_fifo_size = XGBE_FIFO_SIZE_KB(16);
		break;
	case 8:
		q_fifo_size = XGBE_FIFO_SIZE_KB(32);
		break;
	case 9:
		q_fifo_size = XGBE_FIFO_SIZE_KB(64);
		break;
	case 10:
		q_fifo_size = XGBE_FIFO_SIZE_KB(128);
		break;
	case 11:
		q_fifo_size = XGBE_FIFO_SIZE_KB(256);
		break;
	}
	/* Calculate the configured fifo size */
	q_fifo_size = 1 << (fifo_size + 7);

	/* The configured value is not the actual amount of fifo RAM */
	/* The configured value may not be the actual amount of fifo RAM */
	q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);

	q_fifo_size = q_fifo_size / queue_count;

	/* Set the queue fifo size programmable value */
	if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
		p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
		p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
		p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
		p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
		p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
		p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
		p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
		p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
		p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
	else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
		p_fifo = XGMAC_MTL_FIFO_SIZE_512;
	else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
		p_fifo = XGMAC_MTL_FIFO_SIZE_256;
	/* Each increment in the queue fifo size represents 256 bytes of
	 * fifo, with 0 representing 256 bytes. Distribute the fifo equally
	 * between the queues.
	 */
	p_fifo = q_fifo_size / 256;
	if (p_fifo)
		p_fifo--;

	return p_fifo;
}

static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
{
	enum xgbe_mtl_fifo_size fifo_size;
	unsigned int fifo_size;
	unsigned int i;

	fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
@@ -2033,7 +1980,7 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)

static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
{
	enum xgbe_mtl_fifo_size fifo_size;
	unsigned int fifo_size;
	unsigned int i;

	fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
@@ -2224,7 +2171,7 @@ static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)

	default:
		read_hi = false;
	};
	}

	val = XGMAC_IOREAD(pdata, reg_lo);

+9 −5
Original line number Diff line number Diff line
@@ -360,9 +360,12 @@ static irqreturn_t xgbe_isr(int irq, void *data)
			}
		}

		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
			pdata->ext_stats.rx_buffer_unavailable++;

		/* Restart the device on a Fatal Bus Error */
		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
			schedule_work(&pdata->restart_work);
			queue_work(pdata->dev_workqueue, &pdata->restart_work);

		/* Clear all interrupt signals */
		XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
@@ -384,7 +387,8 @@ static irqreturn_t xgbe_isr(int irq, void *data)
				/* Read Tx Timestamp to clear interrupt */
				pdata->tx_tstamp =
					hw_if->get_tx_tstamp(pdata);
				schedule_work(&pdata->tx_tstamp_work);
				queue_work(pdata->dev_workqueue,
					   &pdata->tx_tstamp_work);
			}
		}
	}
@@ -450,7 +454,7 @@ static void xgbe_service_timer(unsigned long data)
{
	struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;

	schedule_work(&pdata->service_work);
	queue_work(pdata->dev_workqueue, &pdata->service_work);

	mod_timer(&pdata->service_timer, jiffies + HZ);
}
@@ -891,7 +895,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
	netif_tx_start_all_queues(netdev);

	xgbe_start_timers(pdata);
	schedule_work(&pdata->service_work);
	queue_work(pdata->dev_workqueue, &pdata->service_work);

	DBGPR("<--xgbe_start\n");

@@ -1533,7 +1537,7 @@ static void xgbe_tx_timeout(struct net_device *netdev)
	struct xgbe_prv_data *pdata = netdev_priv(netdev);

	netdev_warn(netdev, "tx timeout, device restarting\n");
	schedule_work(&pdata->restart_work);
	queue_work(pdata->dev_workqueue, &pdata->restart_work);
}

static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
+54 −52
Original line number Diff line number Diff line
@@ -179,6 +179,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
	XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
	XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
	XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
	XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable),
};

#define XGBE_STATS_COUNT	ARRAY_SIZE(xgbe_gstring_stats)
@@ -187,8 +188,6 @@ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
	int i;

	DBGPR("-->%s\n", __func__);

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < XGBE_STATS_COUNT; i++) {
@@ -198,8 +197,6 @@ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
		}
		break;
	}

	DBGPR("<--%s\n", __func__);
}

static void xgbe_get_ethtool_stats(struct net_device *netdev,
@@ -209,23 +206,17 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
	u8 *stat;
	int i;

	DBGPR("-->%s\n", __func__);

	pdata->hw_if.read_mmc_stats(pdata);
	for (i = 0; i < XGBE_STATS_COUNT; i++) {
		stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
		*data++ = *(u64 *)stat;
	}

	DBGPR("<--%s\n", __func__);
}

static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
{
	int ret;

	DBGPR("-->%s\n", __func__);

	switch (stringset) {
	case ETH_SS_STATS:
		ret = XGBE_STATS_COUNT;
@@ -235,8 +226,6 @@ static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
		ret = -EOPNOTSUPP;
	}

	DBGPR("<--%s\n", __func__);

	return ret;
}

@@ -245,13 +234,9 @@ static void xgbe_get_pauseparam(struct net_device *netdev,
{
	struct xgbe_prv_data *pdata = netdev_priv(netdev);

	DBGPR("-->xgbe_get_pauseparam\n");

	pause->autoneg = pdata->phy.pause_autoneg;
	pause->tx_pause = pdata->phy.tx_pause;
	pause->rx_pause = pdata->phy.rx_pause;

	DBGPR("<--xgbe_get_pauseparam\n");
}

static int xgbe_set_pauseparam(struct net_device *netdev,
@@ -260,13 +245,11 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
	struct xgbe_prv_data *pdata = netdev_priv(netdev);
	int ret = 0;

	DBGPR("-->xgbe_set_pauseparam\n");

	DBGPR("  autoneg = %d, tx_pause = %d, rx_pause = %d\n",
	      pause->autoneg, pause->tx_pause, pause->rx_pause);

	if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE))
	if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) {
		netdev_err(netdev,
			   "autoneg disabled, pause autoneg not avialable\n");
		return -EINVAL;
	}

	pdata->phy.pause_autoneg = pause->autoneg;
	pdata->phy.tx_pause = pause->tx_pause;
@@ -286,8 +269,6 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
	if (netif_running(netdev))
		ret = pdata->phy_if.phy_config_aneg(pdata);

	DBGPR("<--xgbe_set_pauseparam\n");

	return ret;
}

@@ -296,8 +277,6 @@ static int xgbe_get_settings(struct net_device *netdev,
{
	struct xgbe_prv_data *pdata = netdev_priv(netdev);

	DBGPR("-->xgbe_get_settings\n");

	cmd->phy_address = pdata->phy.address;

	cmd->supported = pdata->phy.supported;
@@ -311,8 +290,6 @@ static int xgbe_get_settings(struct net_device *netdev,
	cmd->port = PORT_NONE;
	cmd->transceiver = XCVR_INTERNAL;

	DBGPR("<--xgbe_get_settings\n");

	return 0;
}

@@ -323,16 +300,20 @@ static int xgbe_set_settings(struct net_device *netdev,
	u32 speed;
	int ret;

	DBGPR("-->xgbe_set_settings\n");

	speed = ethtool_cmd_speed(cmd);

	if (cmd->phy_address != pdata->phy.address)
	if (cmd->phy_address != pdata->phy.address) {
		netdev_err(netdev, "invalid phy address %hhu\n",
			   cmd->phy_address);
		return -EINVAL;
	}

	if ((cmd->autoneg != AUTONEG_ENABLE) &&
	    (cmd->autoneg != AUTONEG_DISABLE))
	    (cmd->autoneg != AUTONEG_DISABLE)) {
		netdev_err(netdev, "unsupported autoneg %hhu\n",
			   cmd->autoneg);
		return -EINVAL;
	}

	if (cmd->autoneg == AUTONEG_DISABLE) {
		switch (speed) {
@@ -341,16 +322,27 @@ static int xgbe_set_settings(struct net_device *netdev,
		case SPEED_1000:
			break;
		default:
			netdev_err(netdev, "unsupported speed %u\n", speed);
			return -EINVAL;
		}

		if (cmd->duplex != DUPLEX_FULL)
		if (cmd->duplex != DUPLEX_FULL) {
			netdev_err(netdev, "unsupported duplex %hhu\n",
				   cmd->duplex);
			return -EINVAL;
		}
	}

	netif_dbg(pdata, link, netdev,
		  "requested advertisement %#x, phy supported %#x\n",
		  cmd->advertising, pdata->phy.supported);

	cmd->advertising &= pdata->phy.supported;
	if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
	if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising) {
		netdev_err(netdev,
			   "unsupported requested advertisement\n");
		return -EINVAL;
	}

	ret = 0;
	pdata->phy.autoneg = cmd->autoneg;
@@ -366,8 +358,6 @@ static int xgbe_set_settings(struct net_device *netdev,
	if (netif_running(netdev))
		ret = pdata->phy_if.phy_config_aneg(pdata);

	DBGPR("<--xgbe_set_settings\n");

	return ret;
}

@@ -388,13 +378,25 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
	drvinfo->n_stats = XGBE_STATS_COUNT;
}

static u32 xgbe_get_msglevel(struct net_device *netdev)
{
	struct xgbe_prv_data *pdata = netdev_priv(netdev);

	return pdata->msg_enable;
}

static void xgbe_set_msglevel(struct net_device *netdev, u32 msglevel)
{
	struct xgbe_prv_data *pdata = netdev_priv(netdev);

	pdata->msg_enable = msglevel;
}

static int xgbe_get_coalesce(struct net_device *netdev,
			     struct ethtool_coalesce *ec)
{
	struct xgbe_prv_data *pdata = netdev_priv(netdev);

	DBGPR("-->xgbe_get_coalesce\n");

	memset(ec, 0, sizeof(struct ethtool_coalesce));

	ec->rx_coalesce_usecs = pdata->rx_usecs;
@@ -402,8 +404,6 @@ static int xgbe_get_coalesce(struct net_device *netdev,

	ec->tx_max_coalesced_frames = pdata->tx_frames;

	DBGPR("<--xgbe_get_coalesce\n");

	return 0;
}

@@ -415,8 +415,6 @@ static int xgbe_set_coalesce(struct net_device *netdev,
	unsigned int rx_frames, rx_riwt, rx_usecs;
	unsigned int tx_frames;

	DBGPR("-->xgbe_set_coalesce\n");

	/* Check for not supported parameters  */
	if ((ec->rx_coalesce_usecs_irq) ||
	    (ec->rx_max_coalesced_frames_irq) ||
@@ -436,8 +434,10 @@ static int xgbe_set_coalesce(struct net_device *netdev,
	    (ec->rx_max_coalesced_frames_high) ||
	    (ec->tx_coalesce_usecs_high) ||
	    (ec->tx_max_coalesced_frames_high) ||
	    (ec->rate_sample_interval))
	    (ec->rate_sample_interval)) {
		netdev_err(netdev, "unsupported coalescing parameter\n");
		return -EOPNOTSUPP;
	}

	rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
	rx_usecs = ec->rx_coalesce_usecs;
@@ -449,12 +449,12 @@ static int xgbe_set_coalesce(struct net_device *netdev,

	/* Check the bounds of values for Rx */
	if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
		netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
		netdev_err(netdev, "rx-usec is limited to %d usecs\n",
			   hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
		return -EINVAL;
	}
	if (rx_frames > pdata->rx_desc_count) {
		netdev_alert(netdev, "rx-frames is limited to %d frames\n",
		netdev_err(netdev, "rx-frames is limited to %d frames\n",
			   pdata->rx_desc_count);
		return -EINVAL;
	}
@@ -463,7 +463,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,

	/* Check the bounds of values for Tx */
	if (tx_frames > pdata->tx_desc_count) {
		netdev_alert(netdev, "tx-frames is limited to %d frames\n",
		netdev_err(netdev, "tx-frames is limited to %d frames\n",
			   pdata->tx_desc_count);
		return -EINVAL;
	}
@@ -476,8 +476,6 @@ static int xgbe_set_coalesce(struct net_device *netdev,
	pdata->tx_frames = tx_frames;
	hw_if->config_tx_coalesce(pdata);

	DBGPR("<--xgbe_set_coalesce\n");

	return 0;
}

@@ -539,8 +537,10 @@ static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
	struct xgbe_hw_if *hw_if = &pdata->hw_if;
	unsigned int ret;

	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
		netdev_err(netdev, "unsupported hash function\n");
		return -EOPNOTSUPP;
	}

	if (indir) {
		ret = hw_if->set_rss_lookup_table(pdata, indir);
@@ -594,6 +594,8 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
	.get_settings = xgbe_get_settings,
	.set_settings = xgbe_set_settings,
	.get_drvinfo = xgbe_get_drvinfo,
	.get_msglevel = xgbe_get_msglevel,
	.set_msglevel = xgbe_set_msglevel,
	.get_link = ethtool_op_get_link,
	.get_coalesce = xgbe_get_coalesce,
	.set_coalesce = xgbe_set_coalesce,
+1 −1
Original line number Diff line number Diff line
@@ -371,7 +371,7 @@ static int xgbe_probe(struct platform_device *pdev)
	set_bit(XGBE_DOWN, &pdata->dev_state);

	/* Check if we should use ACPI or DT */
	pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
	pdata->use_acpi = dev->of_node ? 0 : 1;

	phy_pdev = xgbe_get_phy_pdev(pdata);
	if (!phy_pdev) {
+4 −12
Original line number Diff line number Diff line
@@ -1115,7 +1115,6 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
	unsigned int reg, link_aneg;

	if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
		if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
		netif_carrier_off(pdata->netdev);

		pdata->phy.link = 0;
@@ -1142,10 +1141,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
		if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
			clear_bit(XGBE_LINK_INIT, &pdata->dev_state);

		if (!test_bit(XGBE_LINK, &pdata->dev_state)) {
			set_bit(XGBE_LINK, &pdata->dev_state);
		netif_carrier_on(pdata->netdev);
		}
	} else {
		if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
			xgbe_check_link_timeout(pdata);
@@ -1156,11 +1152,8 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)

		xgbe_phy_status_aneg(pdata);

		if (test_bit(XGBE_LINK, &pdata->dev_state)) {
			clear_bit(XGBE_LINK, &pdata->dev_state);
		netif_carrier_off(pdata->netdev);
	}
	}

adjust_link:
	xgbe_phy_adjust_link(pdata);
@@ -1179,7 +1172,6 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
	devm_free_irq(pdata->dev, pdata->an_irq, pdata);

	pdata->phy.link = 0;
	if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
	netif_carrier_off(pdata->netdev);

	xgbe_phy_adjust_link(pdata);
Loading