Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bd8255d8 authored by Lendacky, Thomas's avatar Lendacky, Thomas Committed by David S. Miller
Browse files

amd-xgbe: Prepare for supporting PCI devices



Update the driver framework to separate out platform/ACPI specific code
from general code during device initialization. This will allow for the
introduction of PCI device support.

Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4b8acdf5
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -3,7 +3,8 @@ obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
		 xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
		 xgbe-ptp.o \
		 xgbe-phy-v1.o
		 xgbe-phy-v1.o \
		 xgbe-platform.o

amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o
amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
+4 −12
Original line number Diff line number Diff line
@@ -2088,24 +2088,16 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)

static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
{
	unsigned int fifo_size;

	/* Calculate the configured fifo size */
	fifo_size = 1 << (pdata->hw_feat.tx_fifo_size + 7);

	/* The configured value may not be the actual amount of fifo RAM */
	return min_t(unsigned int, XGMAC_FIFO_TX_MAX, fifo_size);
	return min_t(unsigned int, pdata->tx_max_fifo_size,
		     pdata->hw_feat.tx_fifo_size);
}

static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
{
	unsigned int fifo_size;

	/* Calculate the configured fifo size */
	fifo_size = 1 << (pdata->hw_feat.rx_fifo_size + 7);

	/* The configured value may not be the actual amount of fifo RAM */
	return min_t(unsigned int, XGMAC_FIFO_RX_MAX, fifo_size);
	return min_t(unsigned int, pdata->rx_max_fifo_size,
		     pdata->hw_feat.rx_fifo_size);
}

static void xgbe_calculate_equal_fifo(unsigned int fifo_size,
+6 −16
Original line number Diff line number Diff line
@@ -114,7 +114,6 @@
 *     THE POSSIBILITY OF SUCH DAMAGE.
 */

#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
@@ -160,18 +159,8 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
		channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
				    (DMA_CH_INC * i);

		if (pdata->per_channel_irq) {
			/* Get the DMA interrupt (offset 1) */
			ret = platform_get_irq(pdata->pdev, i + 1);
			if (ret < 0) {
				netdev_err(pdata->netdev,
					   "platform_get_irq %u failed\n",
					   i + 1);
				goto err_irq;
			}

			channel->dma_irq = ret;
		}
		if (pdata->per_channel_irq)
			channel->dma_irq = pdata->channel_irq[i];

		if (i < pdata->tx_ring_count) {
			spin_lock_init(&tx_ring->lock);
@@ -194,9 +183,6 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)

	return 0;

err_irq:
	kfree(rx_ring);

err_rx_ring:
	kfree(tx_ring);

@@ -590,6 +576,10 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
	hw_feat->tx_ch_cnt++;
	hw_feat->tc_cnt++;

	/* Translate the fifo sizes into actual numbers */
	hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
	hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);

	DBGPR("<--xgbe_get_all_hw_features\n");
}

+92 −477
Original line number Diff line number Diff line
@@ -116,20 +116,10 @@

#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/property.h>
#include <linux/acpi.h>
#include <linux/mdio.h>

#include "xgbe.h"
#include "xgbe-common.h"
@@ -176,196 +166,21 @@ static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
	pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
}

#ifdef CONFIG_ACPI
static const struct acpi_device_id xgbe_acpi_match[];

static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
{
	const struct acpi_device_id *id;

	id = acpi_match_device(xgbe_acpi_match, pdata->dev);

	return id ? (struct xgbe_version_data *)id->driver_data : NULL;
}

static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
{
	struct device *dev = pdata->dev;
	u32 property;
	int ret;

	/* Obtain the system clock setting */
	ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
	if (ret) {
		dev_err(dev, "unable to obtain %s property\n",
			XGBE_ACPI_DMA_FREQ);
		return ret;
	}
	pdata->sysclk_rate = property;

	/* Obtain the PTP clock setting */
	ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
	if (ret) {
		dev_err(dev, "unable to obtain %s property\n",
			XGBE_ACPI_PTP_FREQ);
		return ret;
	}
	pdata->ptpclk_rate = property;

	return 0;
}
#else   /* CONFIG_ACPI */
static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
{
	return NULL;
}

static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
{
	return -EINVAL;
}
#endif  /* CONFIG_ACPI */

#ifdef CONFIG_OF
static const struct of_device_id xgbe_of_match[];

static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
{
	const struct of_device_id *id;

	id = of_match_device(xgbe_of_match, pdata->dev);

	return id ? (struct xgbe_version_data *)id->data : NULL;
}

static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
	struct device *dev = pdata->dev;

	/* Obtain the system clock setting */
	pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
	if (IS_ERR(pdata->sysclk)) {
		dev_err(dev, "dma devm_clk_get failed\n");
		return PTR_ERR(pdata->sysclk);
	}
	pdata->sysclk_rate = clk_get_rate(pdata->sysclk);

	/* Obtain the PTP clock setting */
	pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
	if (IS_ERR(pdata->ptpclk)) {
		dev_err(dev, "ptp devm_clk_get failed\n");
		return PTR_ERR(pdata->ptpclk);
	}
	pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);

	return 0;
}

static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
{
	struct device *dev = pdata->dev;
	struct device_node *phy_node;
	struct platform_device *phy_pdev;

	phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
	if (phy_node) {
		/* Old style device tree:
		 *   The XGBE and PHY resources are separate
		 */
		phy_pdev = of_find_device_by_node(phy_node);
		of_node_put(phy_node);
	} else {
		/* New style device tree:
		 *   The XGBE and PHY resources are grouped together with
		 *   the PHY resources listed last
		 */
		get_device(dev);
		phy_pdev = pdata->pdev;
	}

	return phy_pdev;
}
#else   /* CONFIG_OF */
static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
{
	return NULL;
}

static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
	return -EINVAL;
}

static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
{
	return NULL;
}
#endif  /* CONFIG_OF */

static unsigned int xgbe_resource_count(struct platform_device *pdev,
					unsigned int type)
{
	unsigned int count;
	int i;

	for (i = 0, count = 0; i < pdev->num_resources; i++) {
		struct resource *res = &pdev->resource[i];

		if (type == resource_type(res))
			count++;
	}

	return count;
}

static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
{
	struct platform_device *phy_pdev;

	if (pdata->use_acpi) {
		get_device(pdata->dev);
		phy_pdev = pdata->pdev;
	} else {
		phy_pdev = xgbe_of_get_phy_pdev(pdata);
	}

	return phy_pdev;
}

static struct xgbe_version_data *xgbe_get_vdata(struct xgbe_prv_data *pdata)
{
	return pdata->use_acpi ? xgbe_acpi_vdata(pdata)
			       : xgbe_of_vdata(pdata);
}

static int xgbe_probe(struct platform_device *pdev)
struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
{
	struct xgbe_prv_data *pdata;
	struct net_device *netdev;
	struct device *dev = &pdev->dev;
	struct platform_device *phy_pdev;
	struct resource *res;
	const char *phy_mode;
	unsigned int i, phy_memnum, phy_irqnum;
	enum dev_dma_attr attr;
	int ret;

	DBGPR("--> xgbe_probe\n");

	netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
				   XGBE_MAX_DMA_CHANNELS);
	if (!netdev) {
		dev_err(dev, "alloc_etherdev failed\n");
		ret = -ENOMEM;
		goto err_alloc;
		dev_err(dev, "alloc_etherdev_mq failed\n");
		return ERR_PTR(-ENOMEM);
	}
	SET_NETDEV_DEV(netdev, dev);
	pdata = netdev_priv(netdev);
	pdata->netdev = netdev;
	pdata->pdev = pdev;
	pdata->adev = ACPI_COMPANION(dev);
	pdata->dev = dev;
	platform_set_drvdata(pdev, netdev);

	spin_lock_init(&pdata->lock);
	spin_lock_init(&pdata->xpcs_lock);
@@ -376,186 +191,82 @@ static int xgbe_probe(struct platform_device *pdev)

	set_bit(XGBE_DOWN, &pdata->dev_state);

	/* Check if we should use ACPI or DT */
	pdata->use_acpi = dev->of_node ? 0 : 1;
	return pdata;
}

	/* Get the version data */
	pdata->vdata = xgbe_get_vdata(pdata);
void xgbe_free_pdata(struct xgbe_prv_data *pdata)
{
	struct net_device *netdev = pdata->netdev;

	phy_pdev = xgbe_get_phy_pdev(pdata);
	if (!phy_pdev) {
		dev_err(dev, "unable to obtain phy device\n");
		ret = -EINVAL;
		goto err_phydev;
	free_netdev(netdev);
}
	pdata->phy_pdev = phy_pdev;
	pdata->phy_dev = &phy_pdev->dev;

	if (pdev == phy_pdev) {
		/* New style device tree or ACPI:
		 *   The XGBE and PHY resources are grouped together with
		 *   the PHY resources listed last
		 */
		phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
		phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
	} else {
		/* Old style device tree:
		 *   The XGBE and PHY resources are separate
		 */
		phy_memnum = 0;
		phy_irqnum = 0;
	}
void xgbe_set_counts(struct xgbe_prv_data *pdata)
{
	/* Set all the function pointers */
	xgbe_init_all_fptrs(pdata);

	/* Set and validate the number of descriptors for a ring */
	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
	pdata->tx_desc_count = XGBE_TX_DESC_CNT;
	if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
		dev_err(dev, "tx descriptor count (%d) is not valid\n",
			pdata->tx_desc_count);
		ret = -EINVAL;
		goto err_io;
	}
	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
	pdata->rx_desc_count = XGBE_RX_DESC_CNT;
	if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
		dev_err(dev, "rx descriptor count (%d) is not valid\n",
			pdata->rx_desc_count);
		ret = -EINVAL;
		goto err_io;
	}
	/* Populate the hardware features */
	xgbe_get_all_hw_features(pdata);

	/* Obtain the mmio areas for the device */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	pdata->xgmac_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->xgmac_regs)) {
		dev_err(dev, "xgmac ioremap failed\n");
		ret = PTR_ERR(pdata->xgmac_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	pdata->xpcs_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->xpcs_regs)) {
		dev_err(dev, "xpcs ioremap failed\n");
		ret = PTR_ERR(pdata->xpcs_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);

	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
	pdata->rxtx_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->rxtx_regs)) {
		dev_err(dev, "rxtx ioremap failed\n");
		ret = PTR_ERR(pdata->rxtx_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "rxtx_regs  = %p\n", pdata->rxtx_regs);

	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
	pdata->sir0_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->sir0_regs)) {
		dev_err(dev, "sir0 ioremap failed\n");
		ret = PTR_ERR(pdata->sir0_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "sir0_regs  = %p\n", pdata->sir0_regs);

	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
	pdata->sir1_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->sir1_regs)) {
		dev_err(dev, "sir1 ioremap failed\n");
		ret = PTR_ERR(pdata->sir1_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "sir1_regs  = %p\n", pdata->sir1_regs);

	/* Retrieve the MAC address */
	ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
					    pdata->mac_addr,
					    sizeof(pdata->mac_addr));
	if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
		dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
		if (!ret)
			ret = -EINVAL;
		goto err_io;
	}
	/* Set default max values if not provided */
	if (!pdata->tx_max_channel_count)
		pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
	if (!pdata->rx_max_channel_count)
		pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;

	/* Retrieve the PHY mode - it must be "xgmii" */
	ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
					  &phy_mode);
	if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
		dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
		if (!ret)
			ret = -EINVAL;
		goto err_io;
	}
	pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
	if (!pdata->tx_max_q_count)
		pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
	if (!pdata->rx_max_q_count)
		pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;

	/* Check for per channel interrupt support */
	if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
		pdata->per_channel_irq = 1;
	/* Calculate the number of Tx and Rx rings to be created
	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
	 *   the number of Tx queues to the number of Tx channels
	 *   enabled
	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
	 *   number of Rx queues or maximum allowed
	 */
	pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
				     pdata->hw_feat.tx_ch_cnt);
	pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
				     pdata->tx_max_channel_count);
	pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
				     pdata->tx_max_q_count);

	/* Obtain device settings unique to ACPI/OF */
	if (pdata->use_acpi)
		ret = xgbe_acpi_support(pdata);
	else
		ret = xgbe_of_support(pdata);
	if (ret)
		goto err_io;

	/* Set the DMA coherency values */
	attr = device_get_dma_attr(dev);
	if (attr == DEV_DMA_NOT_SUPPORTED) {
		dev_err(dev, "DMA is not supported");
		ret = -ENODEV;
		goto err_io;
	}
	pdata->coherent = (attr == DEV_DMA_COHERENT);
	if (pdata->coherent) {
		pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
		pdata->arcache = XGBE_DMA_OS_ARCACHE;
		pdata->awcache = XGBE_DMA_OS_AWCACHE;
	} else {
		pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
		pdata->arcache = XGBE_DMA_SYS_ARCACHE;
		pdata->awcache = XGBE_DMA_SYS_AWCACHE;
	}
	pdata->tx_q_count = pdata->tx_ring_count;

	/* Get the device interrupt */
	ret = platform_get_irq(pdev, 0);
	if (ret < 0) {
		dev_err(dev, "platform_get_irq 0 failed\n");
		goto err_io;
	}
	pdata->dev_irq = ret;
	pdata->rx_ring_count = min_t(unsigned int,
				     netif_get_num_default_rss_queues(),
				     pdata->hw_feat.rx_ch_cnt);
	pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
				     pdata->rx_max_channel_count);

	pdata->rx_q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt,
				  pdata->rx_max_q_count);

	/* Get the auto-negotiation interrupt */
	ret = platform_get_irq(phy_pdev, phy_irqnum++);
	if (ret < 0) {
		dev_err(dev, "platform_get_irq phy 0 failed\n");
		goto err_io;
	if (netif_msg_probe(pdata)) {
		dev_dbg(pdata->dev, "TX/RX DMA channel count = %u/%u\n",
			pdata->tx_ring_count, pdata->rx_ring_count);
		dev_dbg(pdata->dev, "TX/RX hardware queue count = %u/%u\n",
			pdata->tx_q_count, pdata->rx_q_count);
	}
}
	pdata->an_irq = ret;

int xgbe_config_netdev(struct xgbe_prv_data *pdata)
{
	struct net_device *netdev = pdata->netdev;
	struct device *dev = pdata->dev;
	unsigned int i;
	int ret;

	netdev->irq = pdata->dev_irq;
	netdev->base_addr = (unsigned long)pdata->xgmac_regs;
	memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);

	/* Set all the function pointers */
	xgbe_init_all_fptrs(pdata);

	/* Issue software reset to device */
	pdata->hw_if.exit(pdata);

	/* Populate the hardware features */
	xgbe_get_all_hw_features(pdata);

	/* Set default configuration data */
	xgbe_default_config(pdata);

@@ -564,33 +275,33 @@ static int xgbe_probe(struct platform_device *pdev)
					DMA_BIT_MASK(pdata->hw_feat.dma_width));
	if (ret) {
		dev_err(dev, "dma_set_mask_and_coherent failed\n");
		goto err_io;
		return ret;
	}

	/* Calculate the number of Tx and Rx rings to be created
	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
	 *   the number of Tx queues to the number of Tx channels
	 *   enabled
	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
	 *   number of Rx queues
	 */
	pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
				     pdata->hw_feat.tx_ch_cnt);
	pdata->tx_q_count = pdata->tx_ring_count;
	/* Set default max values if not provided */
	if (!pdata->tx_max_fifo_size)
		pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
	if (!pdata->rx_max_fifo_size)
		pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;

	/* Set and validate the number of descriptors for a ring */
	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
	pdata->tx_desc_count = XGBE_TX_DESC_CNT;

	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
	pdata->rx_desc_count = XGBE_RX_DESC_CNT;

	/* Set the number of queues */
	ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
	if (ret) {
		dev_err(dev, "error setting real tx queue count\n");
		goto err_io;
		return ret;
	}

	pdata->rx_ring_count = min_t(unsigned int,
				     netif_get_num_default_rss_queues(),
				     pdata->hw_feat.rx_ch_cnt);
	pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
	ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
	if (ret) {
		dev_err(dev, "error setting real rx queue count\n");
		goto err_io;
		return ret;
	}

	/* Initialize RSS hash key and lookup table */
@@ -607,7 +318,7 @@ static int xgbe_probe(struct platform_device *pdev)
	/* Call MDIO/PHY initialization routine */
	ret = pdata->phy_if.phy_init(pdata);
	if (ret)
		goto err_io;
		return ret;

	/* Set device operations */
	netdev->netdev_ops = xgbe_get_netdev_ops();
@@ -654,7 +365,7 @@ static int xgbe_probe(struct platform_device *pdev)
	ret = register_netdev(netdev);
	if (ret) {
		dev_err(dev, "net device registration failed\n");
		goto err_io;
		return ret;
	}

	/* Create the PHY/ANEG name based on netdev name */
@@ -682,10 +393,6 @@ static int xgbe_probe(struct platform_device *pdev)

	xgbe_debugfs_init(pdata);

	netdev_notice(netdev, "net device enabled\n");

	DBGPR("<-- xgbe_probe\n");

	return 0;

err_wq:
@@ -694,24 +401,12 @@ static int xgbe_probe(struct platform_device *pdev)
err_netdev:
	unregister_netdev(netdev);

err_io:
	platform_device_put(phy_pdev);

err_phydev:
	free_netdev(netdev);

err_alloc:
	dev_notice(dev, "net device not enabled\n");

	return ret;
}

static int xgbe_remove(struct platform_device *pdev)
void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
{
	struct net_device *netdev = platform_get_drvdata(pdev);
	struct xgbe_prv_data *pdata = netdev_priv(netdev);

	DBGPR("-->xgbe_remove\n");
	struct net_device *netdev = pdata->netdev;

	xgbe_debugfs_exit(pdata);

@@ -726,103 +421,23 @@ static int xgbe_remove(struct platform_device *pdev)
	destroy_workqueue(pdata->dev_workqueue);

	unregister_netdev(netdev);

	platform_device_put(pdata->phy_pdev);

	free_netdev(netdev);

	DBGPR("<--xgbe_remove\n");

	return 0;
}

#ifdef CONFIG_PM
static int xgbe_suspend(struct device *dev)
static int __init xgbe_mod_init(void)
{
	struct net_device *netdev = dev_get_drvdata(dev);
	struct xgbe_prv_data *pdata = netdev_priv(netdev);
	int ret = 0;

	DBGPR("-->xgbe_suspend\n");

	if (netif_running(netdev))
		ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);

	pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
	pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);

	DBGPR("<--xgbe_suspend\n");
	int ret;

	ret = xgbe_platform_init();
	if (ret)
		return ret;
}

static int xgbe_resume(struct device *dev)
{
	struct net_device *netdev = dev_get_drvdata(dev);
	struct xgbe_prv_data *pdata = netdev_priv(netdev);
	int ret = 0;

	DBGPR("-->xgbe_resume\n");

	pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);

	if (netif_running(netdev)) {
		ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);

		/* Schedule a restart in case the link or phy state changed
		 * while we were powered down.
		 */
		schedule_work(&pdata->restart_work);
	return 0;
}

	DBGPR("<--xgbe_resume\n");

	return ret;
static void __exit xgbe_mod_exit(void)
{
	xgbe_platform_exit();
}
#endif /* CONFIG_PM */

static const struct xgbe_version_data xgbe_v1 = {
	.init_function_ptrs_phy_impl	= xgbe_init_function_ptrs_phy_v1,
	.xpcs_access			= XGBE_XPCS_ACCESS_V1,
};

#ifdef CONFIG_ACPI
static const struct acpi_device_id xgbe_acpi_match[] = {
	{ .id = "AMDI8001",
	  .driver_data = (kernel_ulong_t)&xgbe_v1 },
	{},
};

MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
#endif

#ifdef CONFIG_OF
static const struct of_device_id xgbe_of_match[] = {
	{ .compatible = "amd,xgbe-seattle-v1a",
	  .data = &xgbe_v1 },
	{},
};

MODULE_DEVICE_TABLE(of, xgbe_of_match);
#endif

static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);

static struct platform_driver xgbe_driver = {
	.driver = {
		.name = "amd-xgbe",
#ifdef CONFIG_ACPI
		.acpi_match_table = xgbe_acpi_match,
#endif
#ifdef CONFIG_OF
		.of_match_table = xgbe_of_match,
#endif
		.pm = &xgbe_pm_ops,
	},
	.probe = xgbe_probe,
	.remove = xgbe_remove,
};

module_platform_driver(xgbe_driver);
module_init(xgbe_mod_init);
module_exit(xgbe_mod_exit);
+632 −0

File added.

Preview size limit exceeded, changes collapsed.

Loading