Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 535a69e9 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'calxedaxgmac'



A set of calxedaxgmac bug fixes from Rob Herring.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 13c7bf08 92cd4253
Loading
Loading
Loading
Loading
+116 −79
Original line number Original line Diff line number Diff line
@@ -353,11 +353,9 @@ struct xgmac_extra_stats {
	/* Receive errors */
	/* Receive errors */
	unsigned long rx_watchdog;
	unsigned long rx_watchdog;
	unsigned long rx_da_filter_fail;
	unsigned long rx_da_filter_fail;
	unsigned long rx_sa_filter_fail;
	unsigned long rx_payload_error;
	unsigned long rx_payload_error;
	unsigned long rx_ip_header_error;
	unsigned long rx_ip_header_error;
	/* Tx/Rx IRQ errors */
	/* Tx/Rx IRQ errors */
	unsigned long tx_undeflow;
	unsigned long tx_process_stopped;
	unsigned long tx_process_stopped;
	unsigned long rx_buf_unav;
	unsigned long rx_buf_unav;
	unsigned long rx_process_stopped;
	unsigned long rx_process_stopped;
@@ -393,6 +391,7 @@ struct xgmac_priv {
	char rx_pause;
	char rx_pause;
	char tx_pause;
	char tx_pause;
	int wolopts;
	int wolopts;
	struct work_struct tx_timeout_work;
};
};


/* XGMAC Configuration Settings */
/* XGMAC Configuration Settings */
@@ -409,6 +408,9 @@ struct xgmac_priv {
#define dma_ring_space(h, t, s)	CIRC_SPACE(h, t, s)
#define dma_ring_space(h, t, s)	CIRC_SPACE(h, t, s)
#define dma_ring_cnt(h, t, s)	CIRC_CNT(h, t, s)
#define dma_ring_cnt(h, t, s)	CIRC_CNT(h, t, s)


#define tx_dma_ring_space(p) \
	dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)

/* XGMAC Descriptor Access Helpers */
/* XGMAC Descriptor Access Helpers */
static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
{
{
@@ -421,7 +423,7 @@ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)


static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
{
{
	u32 len = cpu_to_le32(p->flags);
	u32 len = le32_to_cpu(p->buf_size);
	return (len & DESC_BUFFER1_SZ_MASK) +
	return (len & DESC_BUFFER1_SZ_MASK) +
		((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
		((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
}
}
@@ -464,11 +466,23 @@ static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
	p->flags = cpu_to_le32(tmpflags);
	p->flags = cpu_to_le32(tmpflags);
}
}


static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p)
{
	u32 tmpflags = le32_to_cpu(p->flags);
	tmpflags &= TXDESC_END_RING;
	p->flags = cpu_to_le32(tmpflags);
}

static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
{
{
	return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
	return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
}
}


static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
{
	return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
}

static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
{
{
	return le32_to_cpu(p->buf1_addr);
	return le32_to_cpu(p->buf1_addr);
@@ -609,10 +623,15 @@ static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
{
{
	u32 data;
	u32 data;


	if (addr) {
		data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
		data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
		writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
		writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
		data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
		data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
		writel(data, ioaddr + XGMAC_ADDR_LOW(num));
		writel(data, ioaddr + XGMAC_ADDR_LOW(num));
	} else {
		writel(0, ioaddr + XGMAC_ADDR_HIGH(num));
		writel(0, ioaddr + XGMAC_ADDR_LOW(num));
	}
}
}


static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
@@ -683,9 +702,14 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
			if (unlikely(skb == NULL))
			if (unlikely(skb == NULL))
				break;
				break;


			priv->rx_skbuff[entry] = skb;
			paddr = dma_map_single(priv->device, skb->data,
			paddr = dma_map_single(priv->device, skb->data,
					       bufsz, DMA_FROM_DEVICE);
					       priv->dma_buf_sz - NET_IP_ALIGN,
					       DMA_FROM_DEVICE);
			if (dma_mapping_error(priv->device, paddr)) {
				dev_kfree_skb_any(skb);
				break;
			}
			priv->rx_skbuff[entry] = skb;
			desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
			desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
		}
		}


@@ -782,20 +806,21 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
		return;
		return;


	for (i = 0; i < DMA_RX_RING_SZ; i++) {
	for (i = 0; i < DMA_RX_RING_SZ; i++) {
		if (priv->rx_skbuff[i] == NULL)
		struct sk_buff *skb = priv->rx_skbuff[i];
		if (skb == NULL)
			continue;
			continue;


		p = priv->dma_rx + i;
		p = priv->dma_rx + i;
		dma_unmap_single(priv->device, desc_get_buf_addr(p),
		dma_unmap_single(priv->device, desc_get_buf_addr(p),
				 priv->dma_buf_sz, DMA_FROM_DEVICE);
				 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
		dev_kfree_skb_any(priv->rx_skbuff[i]);
		dev_kfree_skb_any(skb);
		priv->rx_skbuff[i] = NULL;
		priv->rx_skbuff[i] = NULL;
	}
	}
}
}


static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
{
{
	int i, f;
	int i;
	struct xgmac_dma_desc *p;
	struct xgmac_dma_desc *p;


	if (!priv->tx_skbuff)
	if (!priv->tx_skbuff)
@@ -806,15 +831,14 @@ static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
			continue;
			continue;


		p = priv->dma_tx + i;
		p = priv->dma_tx + i;
		if (desc_get_tx_fs(p))
			dma_unmap_single(priv->device, desc_get_buf_addr(p),
			dma_unmap_single(priv->device, desc_get_buf_addr(p),
					 desc_get_buf_len(p), DMA_TO_DEVICE);
					 desc_get_buf_len(p), DMA_TO_DEVICE);

		else
		for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
			p = priv->dma_tx + i++;
			dma_unmap_page(priv->device, desc_get_buf_addr(p),
			dma_unmap_page(priv->device, desc_get_buf_addr(p),
				       desc_get_buf_len(p), DMA_TO_DEVICE);
				       desc_get_buf_len(p), DMA_TO_DEVICE);
		}


		if (desc_get_tx_ls(p))
			dev_kfree_skb_any(priv->tx_skbuff[i]);
			dev_kfree_skb_any(priv->tx_skbuff[i]);
		priv->tx_skbuff[i] = NULL;
		priv->tx_skbuff[i] = NULL;
	}
	}
@@ -852,8 +876,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
 */
 */
static void xgmac_tx_complete(struct xgmac_priv *priv)
static void xgmac_tx_complete(struct xgmac_priv *priv)
{
{
	int i;

	while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
	while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
		unsigned int entry = priv->tx_tail;
		unsigned int entry = priv->tx_tail;
		struct sk_buff *skb = priv->tx_skbuff[entry];
		struct sk_buff *skb = priv->tx_skbuff[entry];
@@ -863,55 +885,45 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
		if (desc_get_owner(p))
		if (desc_get_owner(p))
			break;
			break;


		/* Verify tx error by looking at the last segment */
		if (desc_get_tx_ls(p))
			desc_get_tx_status(priv, p);

		netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
		netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
			priv->tx_head, priv->tx_tail);
			priv->tx_head, priv->tx_tail);


		if (desc_get_tx_fs(p))
			dma_unmap_single(priv->device, desc_get_buf_addr(p),
			dma_unmap_single(priv->device, desc_get_buf_addr(p),
					 desc_get_buf_len(p), DMA_TO_DEVICE);
					 desc_get_buf_len(p), DMA_TO_DEVICE);

		else
		priv->tx_skbuff[entry] = NULL;
		priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);

		if (!skb) {
			continue;
		}

		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
			entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
							      DMA_TX_RING_SZ);
			p = priv->dma_tx + priv->tx_tail;

			dma_unmap_page(priv->device, desc_get_buf_addr(p),
			dma_unmap_page(priv->device, desc_get_buf_addr(p),
				       desc_get_buf_len(p), DMA_TO_DEVICE);
				       desc_get_buf_len(p), DMA_TO_DEVICE);
		}


		/* Check tx error on the last segment */
		if (desc_get_tx_ls(p)) {
			desc_get_tx_status(priv, p);
			dev_kfree_skb(skb);
			dev_kfree_skb(skb);
		}
		}


	if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
		priv->tx_skbuff[entry] = NULL;
	    MAX_SKB_FRAGS)
		priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
	}

	/* Ensure tx_tail is visible to xgmac_xmit */
	smp_mb();
	if (unlikely(netif_queue_stopped(priv->dev) &&
	    (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)))
		netif_wake_queue(priv->dev);
		netif_wake_queue(priv->dev);
}
}


/**
static void xgmac_tx_timeout_work(struct work_struct *work)
 * xgmac_tx_err:
 * @priv: pointer to the private device structure
 * Description: it cleans the descriptors and restarts the transmission
 * in case of errors.
 */
static void xgmac_tx_err(struct xgmac_priv *priv)
{
{
	u32 reg, value, inten;
	u32 reg, value;
	struct xgmac_priv *priv =
		container_of(work, struct xgmac_priv, tx_timeout_work);


	netif_stop_queue(priv->dev);
	napi_disable(&priv->napi);


	inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
	writel(0, priv->base + XGMAC_DMA_INTR_ENA);


	netif_tx_lock(priv->dev);

	reg = readl(priv->base + XGMAC_DMA_CONTROL);
	reg = readl(priv->base + XGMAC_DMA_CONTROL);
	writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
	writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
	do {
	do {
@@ -927,9 +939,15 @@ static void xgmac_tx_err(struct xgmac_priv *priv)


	writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
	writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
		priv->base + XGMAC_DMA_STATUS);
		priv->base + XGMAC_DMA_STATUS);
	writel(inten, priv->base + XGMAC_DMA_INTR_ENA);


	netif_tx_unlock(priv->dev);
	netif_wake_queue(priv->dev);
	netif_wake_queue(priv->dev);

	napi_enable(&priv->napi);

	/* Enable interrupts */
	writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS);
	writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
}
}


static int xgmac_hw_init(struct net_device *dev)
static int xgmac_hw_init(struct net_device *dev)
@@ -957,9 +975,7 @@ static int xgmac_hw_init(struct net_device *dev)
		DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
		DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
	writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
	writel(value, ioaddr + XGMAC_DMA_BUS_MODE);


	/* Enable interrupts */
	writel(0, ioaddr + XGMAC_DMA_INTR_ENA);
	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);


	/* Mask power mgt interrupt */
	/* Mask power mgt interrupt */
	writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
	writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
@@ -1027,6 +1043,10 @@ static int xgmac_open(struct net_device *dev)
	napi_enable(&priv->napi);
	napi_enable(&priv->napi);
	netif_start_queue(dev);
	netif_start_queue(dev);


	/* Enable interrupts */
	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);

	return 0;
	return 0;
}
}


@@ -1087,7 +1107,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
	paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
	paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
	if (dma_mapping_error(priv->device, paddr)) {
	if (dma_mapping_error(priv->device, paddr)) {
		dev_kfree_skb(skb);
		dev_kfree_skb(skb);
		return -EIO;
		return NETDEV_TX_OK;
	}
	}
	priv->tx_skbuff[entry] = skb;
	priv->tx_skbuff[entry] = skb;
	desc_set_buf_addr_and_size(desc, paddr, len);
	desc_set_buf_addr_and_size(desc, paddr, len);
@@ -1099,14 +1119,12 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)


		paddr = skb_frag_dma_map(priv->device, frag, 0, len,
		paddr = skb_frag_dma_map(priv->device, frag, 0, len,
					 DMA_TO_DEVICE);
					 DMA_TO_DEVICE);
		if (dma_mapping_error(priv->device, paddr)) {
		if (dma_mapping_error(priv->device, paddr))
			dev_kfree_skb(skb);
			goto dma_err;
			return -EIO;
		}


		entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
		entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
		desc = priv->dma_tx + entry;
		desc = priv->dma_tx + entry;
		priv->tx_skbuff[entry] = NULL;
		priv->tx_skbuff[entry] = skb;


		desc_set_buf_addr_and_size(desc, paddr, len);
		desc_set_buf_addr_and_size(desc, paddr, len);
		if (i < (nfrags - 1))
		if (i < (nfrags - 1))
@@ -1124,13 +1142,35 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
	wmb();
	wmb();
	desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
	desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);


	writel(1, priv->base + XGMAC_DMA_TX_POLL);

	priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
	priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);


	writel(1, priv->base + XGMAC_DMA_TX_POLL);
	/* Ensure tx_head update is visible to tx completion */
	if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
	smp_mb();
	    MAX_SKB_FRAGS)
	if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) {
		netif_stop_queue(dev);
		netif_stop_queue(dev);
		/* Ensure netif_stop_queue is visible to tx completion */
		smp_mb();
		if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)
			netif_start_queue(dev);
	}
	return NETDEV_TX_OK;


dma_err:
	entry = priv->tx_head;
	for ( ; i > 0; i--) {
		entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
		desc = priv->dma_tx + entry;
		priv->tx_skbuff[entry] = NULL;
		dma_unmap_page(priv->device, desc_get_buf_addr(desc),
			       desc_get_buf_len(desc), DMA_TO_DEVICE);
		desc_clear_tx_owner(desc);
	}
	desc = first;
	dma_unmap_single(priv->device, desc_get_buf_addr(desc),
			 desc_get_buf_len(desc), DMA_TO_DEVICE);
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
	return NETDEV_TX_OK;
}
}


@@ -1174,7 +1214,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)


		skb_put(skb, frame_len);
		skb_put(skb, frame_len);
		dma_unmap_single(priv->device, desc_get_buf_addr(p),
		dma_unmap_single(priv->device, desc_get_buf_addr(p),
				 frame_len, DMA_FROM_DEVICE);
				 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);


		skb->protocol = eth_type_trans(skb, priv->dev);
		skb->protocol = eth_type_trans(skb, priv->dev);
		skb->ip_summed = ip_checksum;
		skb->ip_summed = ip_checksum;
@@ -1225,9 +1265,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
static void xgmac_tx_timeout(struct net_device *dev)
static void xgmac_tx_timeout(struct net_device *dev)
{
{
	struct xgmac_priv *priv = netdev_priv(dev);
	struct xgmac_priv *priv = netdev_priv(dev);

	schedule_work(&priv->tx_timeout_work);
	/* Clear Tx resources and restart transmitting again */
	xgmac_tx_err(priv);
}
}


/**
/**
@@ -1286,6 +1324,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
	if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
	if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
		use_hash = true;
		use_hash = true;
		value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
		value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
	} else {
		use_hash = false;
	}
	}
	netdev_for_each_mc_addr(ha, dev) {
	netdev_for_each_mc_addr(ha, dev) {
		if (use_hash) {
		if (use_hash) {
@@ -1302,6 +1342,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
	}
	}


out:
out:
	for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
		xgmac_set_mac_addr(ioaddr, NULL, reg);
	for (i = 0; i < XGMAC_NUM_HASH; i++)
	for (i = 0; i < XGMAC_NUM_HASH; i++)
		writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
		writel(hash_filter[i], ioaddr + XGMAC_HASH(i));


@@ -1366,7 +1408,6 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
{
{
	u32 intr_status;
	u32 intr_status;
	bool tx_err = false;
	struct net_device *dev = (struct net_device *)dev_id;
	struct net_device *dev = (struct net_device *)dev_id;
	struct xgmac_priv *priv = netdev_priv(dev);
	struct xgmac_priv *priv = netdev_priv(dev);
	struct xgmac_extra_stats *x = &priv->xstats;
	struct xgmac_extra_stats *x = &priv->xstats;
@@ -1396,16 +1437,12 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
		if (intr_status & DMA_STATUS_TPS) {
		if (intr_status & DMA_STATUS_TPS) {
			netdev_err(priv->dev, "transmit process stopped\n");
			netdev_err(priv->dev, "transmit process stopped\n");
			x->tx_process_stopped++;
			x->tx_process_stopped++;
			tx_err = true;
			schedule_work(&priv->tx_timeout_work);
		}
		}
		if (intr_status & DMA_STATUS_FBI) {
		if (intr_status & DMA_STATUS_FBI) {
			netdev_err(priv->dev, "fatal bus error\n");
			netdev_err(priv->dev, "fatal bus error\n");
			x->fatal_bus_error++;
			x->fatal_bus_error++;
			tx_err = true;
		}
		}

		if (tx_err)
			xgmac_tx_err(priv);
	}
	}


	/* TX/RX NORMAL interrupts */
	/* TX/RX NORMAL interrupts */
@@ -1569,7 +1606,6 @@ static const struct xgmac_stats xgmac_gstrings_stats[] = {
	XGMAC_STAT(rx_payload_error),
	XGMAC_STAT(rx_payload_error),
	XGMAC_STAT(rx_ip_header_error),
	XGMAC_STAT(rx_ip_header_error),
	XGMAC_STAT(rx_da_filter_fail),
	XGMAC_STAT(rx_da_filter_fail),
	XGMAC_STAT(rx_sa_filter_fail),
	XGMAC_STAT(fatal_bus_error),
	XGMAC_STAT(fatal_bus_error),
	XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
	XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
	XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
	XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
@@ -1708,6 +1744,7 @@ static int xgmac_probe(struct platform_device *pdev)
	ndev->netdev_ops = &xgmac_netdev_ops;
	ndev->netdev_ops = &xgmac_netdev_ops;
	SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
	SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
	spin_lock_init(&priv->stats_lock);
	spin_lock_init(&priv->stats_lock);
	INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);


	priv->device = &pdev->dev;
	priv->device = &pdev->dev;
	priv->dev = ndev;
	priv->dev = ndev;
@@ -1759,7 +1796,7 @@ static int xgmac_probe(struct platform_device *pdev)
	if (device_can_wakeup(priv->device))
	if (device_can_wakeup(priv->device))
		priv->wolopts = WAKE_MAGIC;	/* Magic Frame as default */
		priv->wolopts = WAKE_MAGIC;	/* Magic Frame as default */


	ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
	ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
	if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
	if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
		ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
		ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
				     NETIF_F_RXCSUM;
				     NETIF_F_RXCSUM;