Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 34bf65df authored by Lendacky, Thomas's avatar Lendacky, Thomas Committed by David S. Miller
Browse files

amd-xgbe: Add netif_* message support to the driver



Add support for the network interface message level settings for
determining whether to issue some of the driver messages. Make
use of the netif_* interface where appropriate.

Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5452b2df
Loading
Loading
Loading
Loading
+11 −6
Original line number Original line Diff line number Diff line
@@ -150,9 +150,12 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
	tc_ets = 0;
	tc_ets = 0;
	tc_ets_weight = 0;
	tc_ets_weight = 0;
	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
		DBGPR("  TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
		netif_dbg(pdata, drv, netdev,
		      ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]);
			  "TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
		DBGPR("  PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]);
			  ets->tc_tx_bw[i], ets->tc_rx_bw[i],
			  ets->tc_tsa[i]);
		netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i,
			  ets->prio_tc[i]);


		if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
		if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
		    (i >= pdata->hw_feat.tc_cnt))
		    (i >= pdata->hw_feat.tc_cnt))
@@ -214,7 +217,8 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
{
{
	struct xgbe_prv_data *pdata = netdev_priv(netdev);
	struct xgbe_prv_data *pdata = netdev_priv(netdev);


	DBGPR("  cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n",
	netif_dbg(pdata, drv, netdev,
		  "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
		  pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
		  pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);


	if (!pdata->pfc) {
	if (!pdata->pfc) {
@@ -238,9 +242,10 @@ static u8 xgbe_dcb_getdcbx(struct net_device *netdev)


static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
{
{
	struct xgbe_prv_data *pdata = netdev_priv(netdev);
	u8 support = xgbe_dcb_getdcbx(netdev);
	u8 support = xgbe_dcb_getdcbx(netdev);


	DBGPR("  DCBX=%#hhx\n", dcbx);
	netif_dbg(pdata, drv, netdev, "DCBX=%#hhx\n", dcbx);


	if (dcbx & ~support)
	if (dcbx & ~support)
		return 1;
		return 1;
+21 −12
Original line number Original line Diff line number Diff line
@@ -208,8 +208,9 @@ static int xgbe_init_ring(struct xgbe_prv_data *pdata,
	if (!ring->rdata)
	if (!ring->rdata)
		return -ENOMEM;
		return -ENOMEM;


	DBGPR("    rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
	netif_dbg(pdata, drv, pdata->netdev,
	      ring->rdesc, ring->rdesc_dma, ring->rdata);
		  "rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
		  ring->rdesc, &ring->rdesc_dma, ring->rdata);


	DBGPR("<--xgbe_init_ring\n");
	DBGPR("<--xgbe_init_ring\n");


@@ -226,7 +227,9 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)


	channel = pdata->channel;
	channel = pdata->channel;
	for (i = 0; i < pdata->channel_count; i++, channel++) {
	for (i = 0; i < pdata->channel_count; i++, channel++) {
		DBGPR("  %s - tx_ring:\n", channel->name);
		netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
			  channel->name);

		ret = xgbe_init_ring(pdata, channel->tx_ring,
		ret = xgbe_init_ring(pdata, channel->tx_ring,
				     pdata->tx_desc_count);
				     pdata->tx_desc_count);
		if (ret) {
		if (ret) {
@@ -235,12 +238,14 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
			goto err_ring;
			goto err_ring;
		}
		}


		DBGPR("  %s - rx_ring:\n", channel->name);
		netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
			  channel->name);

		ret = xgbe_init_ring(pdata, channel->rx_ring,
		ret = xgbe_init_ring(pdata, channel->rx_ring,
				     pdata->rx_desc_count);
				     pdata->rx_desc_count);
		if (ret) {
		if (ret) {
			netdev_alert(pdata->netdev,
			netdev_alert(pdata->netdev,
				     "error initializing Tx ring\n");
				     "error initializing Rx ring\n");
			goto err_ring;
			goto err_ring;
		}
		}
	}
	}
@@ -518,8 +523,6 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
	rdata = XGBE_GET_DESC_DATA(ring, cur_index);
	rdata = XGBE_GET_DESC_DATA(ring, cur_index);


	if (tso) {
	if (tso) {
		DBGPR("  TSO packet\n");

		/* Map the TSO header */
		/* Map the TSO header */
		skb_dma = dma_map_single(pdata->dev, skb->data,
		skb_dma = dma_map_single(pdata->dev, skb->data,
					 packet->header_len, DMA_TO_DEVICE);
					 packet->header_len, DMA_TO_DEVICE);
@@ -529,6 +532,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
		}
		}
		rdata->skb_dma = skb_dma;
		rdata->skb_dma = skb_dma;
		rdata->skb_dma_len = packet->header_len;
		rdata->skb_dma_len = packet->header_len;
		netif_dbg(pdata, tx_queued, pdata->netdev,
			  "skb header: index=%u, dma=%pad, len=%u\n",
			  cur_index, &skb_dma, packet->header_len);


		offset = packet->header_len;
		offset = packet->header_len;


@@ -550,8 +556,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
		}
		}
		rdata->skb_dma = skb_dma;
		rdata->skb_dma = skb_dma;
		rdata->skb_dma_len = len;
		rdata->skb_dma_len = len;
		DBGPR("  skb data: index=%u, dma=0x%llx, len=%u\n",
		netif_dbg(pdata, tx_queued, pdata->netdev,
		      cur_index, skb_dma, len);
			  "skb data: index=%u, dma=%pad, len=%u\n",
			  cur_index, &skb_dma, len);


		datalen -= len;
		datalen -= len;
		offset += len;
		offset += len;
@@ -563,7 +570,8 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
	}
	}


	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		DBGPR("  mapping frag %u\n", i);
		netif_dbg(pdata, tx_queued, pdata->netdev,
			  "mapping frag %u\n", i);


		frag = &skb_shinfo(skb)->frags[i];
		frag = &skb_shinfo(skb)->frags[i];
		offset = 0;
		offset = 0;
@@ -582,8 +590,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
			rdata->skb_dma = skb_dma;
			rdata->skb_dma = skb_dma;
			rdata->skb_dma_len = len;
			rdata->skb_dma_len = len;
			rdata->mapped_as_page = 1;
			rdata->mapped_as_page = 1;
			DBGPR("  skb data: index=%u, dma=0x%llx, len=%u\n",
			netif_dbg(pdata, tx_queued, pdata->netdev,
			      cur_index, skb_dma, len);
				  "skb frag: index=%u, dma=%pad, len=%u\n",
				  cur_index, &skb_dma, len);


			datalen -= len;
			datalen -= len;
			offset += len;
			offset += len;
+41 −29
Original line number Original line Diff line number Diff line
@@ -710,7 +710,8 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
	if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
	if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
		return 0;
		return 0;


	DBGPR("  %s promiscuous mode\n", enable ? "entering" : "leaving");
	netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
		  enable ? "entering" : "leaving");
	XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
	XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);


	return 0;
	return 0;
@@ -724,7 +725,8 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
	if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
	if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
		return 0;
		return 0;


	DBGPR("  %s allmulti mode\n", enable ? "entering" : "leaving");
	netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
		  enable ? "entering" : "leaving");
	XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
	XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);


	return 0;
	return 0;
@@ -749,8 +751,9 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
		mac_addr[0] = ha->addr[4];
		mac_addr[0] = ha->addr[4];
		mac_addr[1] = ha->addr[5];
		mac_addr[1] = ha->addr[5];


		DBGPR("  adding mac address %pM at 0x%04x\n", ha->addr,
		netif_dbg(pdata, drv, pdata->netdev,
		      *mac_reg);
			  "adding mac address %pM at %#x\n",
			  ha->addr, *mac_reg);


		XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
		XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
	}
	}
@@ -1322,7 +1325,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
	for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
	for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
		switch (ets->tc_tsa[i]) {
		switch (ets->tc_tsa[i]) {
		case IEEE_8021QAZ_TSA_STRICT:
		case IEEE_8021QAZ_TSA_STRICT:
			DBGPR("  TC%u using SP\n", i);
			netif_dbg(pdata, drv, pdata->netdev,
				  "TC%u using SP\n", i);
			XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
			XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
					       MTL_TSA_SP);
					       MTL_TSA_SP);
			break;
			break;
@@ -1330,7 +1334,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
			weight = total_weight * ets->tc_tx_bw[i] / 100;
			weight = total_weight * ets->tc_tx_bw[i] / 100;
			weight = clamp(weight, min_weight, total_weight);
			weight = clamp(weight, min_weight, total_weight);


			DBGPR("  TC%u using DWRR (weight %u)\n", i, weight);
			netif_dbg(pdata, drv, pdata->netdev,
				  "TC%u using DWRR (weight %u)\n", i, weight);
			XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
			XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
					       MTL_TSA_ETS);
					       MTL_TSA_ETS);
			XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
			XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
@@ -1359,7 +1364,8 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
		}
		}
		mask &= 0xff;
		mask &= 0xff;


		DBGPR("  TC%u PFC mask=%#x\n", tc, mask);
		netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n",
			  tc, mask);
		reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
		reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
		reg_val = XGMAC_IOREAD(pdata, reg);
		reg_val = XGMAC_IOREAD(pdata, reg);


@@ -1457,7 +1463,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
	/* Create a context descriptor if this is a TSO packet */
	/* Create a context descriptor if this is a TSO packet */
	if (tso_context || vlan_context) {
	if (tso_context || vlan_context) {
		if (tso_context) {
		if (tso_context) {
			DBGPR("  TSO context descriptor, mss=%u\n",
			netif_dbg(pdata, tx_queued, pdata->netdev,
				  "TSO context descriptor, mss=%u\n",
				  packet->mss);
				  packet->mss);


			/* Set the MSS size */
			/* Set the MSS size */
@@ -1476,7 +1483,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
		}
		}


		if (vlan_context) {
		if (vlan_context) {
			DBGPR("  VLAN context descriptor, ctag=%u\n",
			netif_dbg(pdata, tx_queued, pdata->netdev,
				  "VLAN context descriptor, ctag=%u\n",
				  packet->vlan_ctag);
				  packet->vlan_ctag);


			/* Mark it as a CONTEXT descriptor */
			/* Mark it as a CONTEXT descriptor */
@@ -1596,9 +1604,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
	rdesc = rdata->rdesc;
	rdesc = rdata->rdesc;
	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);


#ifdef XGMAC_ENABLE_TX_DESC_DUMP
	if (netif_msg_tx_queued(pdata))
	xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
		xgbe_dump_tx_desc(pdata, ring, start_index,
#endif
				  packet->rdesc_count, 1);


	/* Make sure ownership is written to the descriptor */
	/* Make sure ownership is written to the descriptor */
	dma_wmb();
	dma_wmb();
@@ -1640,9 +1648,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
	/* Make sure descriptor fields are read after reading the OWN bit */
	/* Make sure descriptor fields are read after reading the OWN bit */
	dma_rmb();
	dma_rmb();


#ifdef XGMAC_ENABLE_RX_DESC_DUMP
	if (netif_msg_rx_status(pdata))
	xgbe_dump_rx_desc(ring, rdesc, ring->cur);
		xgbe_dump_rx_desc(pdata, ring, ring->cur);
#endif


	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
		/* Timestamp Context Descriptor */
		/* Timestamp Context Descriptor */
@@ -1713,7 +1720,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
	/* Check for errors (only valid in last descriptor) */
	/* Check for errors (only valid in last descriptor) */
	err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
	err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
	etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
	etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
	DBGPR("  err=%u, etlt=%#x\n", err, etlt);
	netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);


	if (!err || !etlt) {
	if (!err || !etlt) {
		/* No error if err is 0 or etlt is 0 */
		/* No error if err is 0 or etlt is 0 */
@@ -1724,7 +1731,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
			packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
			packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
							      RX_NORMAL_DESC0,
							      RX_NORMAL_DESC0,
							      OVT);
							      OVT);
			DBGPR("  vlan-ctag=0x%04x\n", packet->vlan_ctag);
			netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
				  packet->vlan_ctag);
		}
		}
	} else {
	} else {
		if ((etlt == 0x05) || (etlt == 0x06))
		if ((etlt == 0x05) || (etlt == 0x06))
@@ -2032,7 +2040,7 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
	for (i = 0; i < pdata->tx_q_count; i++)
	for (i = 0; i < pdata->tx_q_count; i++)
		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);


	netdev_notice(pdata->netdev,
	netif_info(pdata, drv, pdata->netdev,
		   "%d Tx hardware queues, %d byte fifo per queue\n",
		   "%d Tx hardware queues, %d byte fifo per queue\n",
		   pdata->tx_q_count, ((fifo_size + 1) * 256));
		   pdata->tx_q_count, ((fifo_size + 1) * 256));
}
}
@@ -2048,7 +2056,7 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
	for (i = 0; i < pdata->rx_q_count; i++)
	for (i = 0; i < pdata->rx_q_count; i++)
		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);


	netdev_notice(pdata->netdev,
	netif_info(pdata, drv, pdata->netdev,
		   "%d Rx hardware queues, %d byte fifo per queue\n",
		   "%d Rx hardware queues, %d byte fifo per queue\n",
		   pdata->rx_q_count, ((fifo_size + 1) * 256));
		   pdata->rx_q_count, ((fifo_size + 1) * 256));
}
}
@@ -2069,14 +2077,16 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)


	for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
	for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
		for (j = 0; j < qptc; j++) {
		for (j = 0; j < qptc; j++) {
			DBGPR("  TXq%u mapped to TC%u\n", queue, i);
			netif_dbg(pdata, drv, pdata->netdev,
				  "TXq%u mapped to TC%u\n", queue, i);
			XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
			XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
					       Q2TCMAP, i);
					       Q2TCMAP, i);
			pdata->q2tc_map[queue++] = i;
			pdata->q2tc_map[queue++] = i;
		}
		}


		if (i < qptc_extra) {
		if (i < qptc_extra) {
			DBGPR("  TXq%u mapped to TC%u\n", queue, i);
			netif_dbg(pdata, drv, pdata->netdev,
				  "TXq%u mapped to TC%u\n", queue, i);
			XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
			XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
					       Q2TCMAP, i);
					       Q2TCMAP, i);
			pdata->q2tc_map[queue++] = i;
			pdata->q2tc_map[queue++] = i;
@@ -2094,13 +2104,15 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
	for (i = 0, prio = 0; i < prio_queues;) {
	for (i = 0, prio = 0; i < prio_queues;) {
		mask = 0;
		mask = 0;
		for (j = 0; j < ppq; j++) {
		for (j = 0; j < ppq; j++) {
			DBGPR("  PRIO%u mapped to RXq%u\n", prio, i);
			netif_dbg(pdata, drv, pdata->netdev,
				  "PRIO%u mapped to RXq%u\n", prio, i);
			mask |= (1 << prio);
			mask |= (1 << prio);
			pdata->prio2q_map[prio++] = i;
			pdata->prio2q_map[prio++] = i;
		}
		}


		if (i < ppq_extra) {
		if (i < ppq_extra) {
			DBGPR("  PRIO%u mapped to RXq%u\n", prio, i);
			netif_dbg(pdata, drv, pdata->netdev,
				  "PRIO%u mapped to RXq%u\n", prio, i);
			mask |= (1 << prio);
			mask |= (1 << prio);
			pdata->prio2q_map[prio++] = i;
			pdata->prio2q_map[prio++] = i;
		}
		}
+56 −45
Original line number Original line Diff line number Diff line
@@ -183,9 +183,10 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
			channel->rx_ring = rx_ring++;
			channel->rx_ring = rx_ring++;
		}
		}


		DBGPR("  %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
		netif_dbg(pdata, drv, pdata->netdev,
		      channel->name, channel->queue_index, channel->dma_regs,
			  "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
		      channel->dma_irq, channel->tx_ring, channel->rx_ring);
			  channel->name, channel->dma_regs, channel->dma_irq,
			  channel->tx_ring, channel->rx_ring);
	}
	}


	pdata->channel = channel_mem;
	pdata->channel = channel_mem;
@@ -235,7 +236,8 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
	struct xgbe_prv_data *pdata = channel->pdata;
	struct xgbe_prv_data *pdata = channel->pdata;


	if (count > xgbe_tx_avail_desc(ring)) {
	if (count > xgbe_tx_avail_desc(ring)) {
		DBGPR("  Tx queue stopped, not enough descriptors available\n");
		netif_info(pdata, drv, pdata->netdev,
			   "Tx queue stopped, not enough descriptors available\n");
		netif_stop_subqueue(pdata->netdev, channel->queue_index);
		netif_stop_subqueue(pdata->netdev, channel->queue_index);
		ring->tx.queue_stopped = 1;
		ring->tx.queue_stopped = 1;


@@ -330,7 +332,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
	if (!dma_isr)
	if (!dma_isr)
		goto isr_done;
		goto isr_done;


	DBGPR("  DMA_ISR = %08x\n", dma_isr);
	netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);


	for (i = 0; i < pdata->channel_count; i++) {
	for (i = 0; i < pdata->channel_count; i++) {
		if (!(dma_isr & (1 << i)))
		if (!(dma_isr & (1 << i)))
@@ -339,7 +341,8 @@ static irqreturn_t xgbe_isr(int irq, void *data)
		channel = pdata->channel + i;
		channel = pdata->channel + i;


		dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
		dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
		DBGPR("  DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
		netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
			  i, dma_ch_isr);


		/* The TI or RI interrupt bits may still be set even if using
		/* The TI or RI interrupt bits may still be set even if using
		 * per channel DMA interrupts. Check to be sure those are not
		 * per channel DMA interrupts. Check to be sure those are not
@@ -386,8 +389,6 @@ static irqreturn_t xgbe_isr(int irq, void *data)
		}
		}
	}
	}


	DBGPR("  DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));

isr_done:
isr_done:
	return IRQ_HANDLED;
	return IRQ_HANDLED;
}
}
@@ -448,7 +449,6 @@ static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
		if (!channel->tx_ring)
		if (!channel->tx_ring)
			break;
			break;


		DBGPR("  %s adding tx timer\n", channel->name);
		setup_timer(&channel->tx_timer, xgbe_tx_timer,
		setup_timer(&channel->tx_timer, xgbe_tx_timer,
			    (unsigned long)channel);
			    (unsigned long)channel);
	}
	}
@@ -468,7 +468,6 @@ static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
		if (!channel->tx_ring)
		if (!channel->tx_ring)
			break;
			break;


		DBGPR("  %s deleting tx timer\n", channel->name);
		del_timer_sync(&channel->tx_timer);
		del_timer_sync(&channel->tx_timer);
	}
	}


@@ -848,8 +847,9 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
		ret = -ENODEV;
		ret = -ENODEV;
		goto err_phy_connect;
		goto err_phy_connect;
	}
	}
	DBGPR("  phy_connect_direct succeeded for PHY %s, link=%d\n",
	netif_dbg(pdata, ifup, pdata->netdev,
	      dev_name(&phydev->dev), phydev->link);
		  "phy_connect_direct succeeded for PHY %s\n",
		  dev_name(&phydev->dev));


	return 0;
	return 0;


@@ -1478,7 +1478,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
	ret = NETDEV_TX_OK;
	ret = NETDEV_TX_OK;


	if (skb->len == 0) {
	if (skb->len == 0) {
		netdev_err(netdev, "empty skb received from stack\n");
		netif_err(pdata, tx_err, netdev,
			  "empty skb received from stack\n");
		dev_kfree_skb_any(skb);
		dev_kfree_skb_any(skb);
		goto tx_netdev_return;
		goto tx_netdev_return;
	}
	}
@@ -1494,7 +1495,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)


	ret = xgbe_prep_tso(skb, packet);
	ret = xgbe_prep_tso(skb, packet);
	if (ret) {
	if (ret) {
		netdev_err(netdev, "error processing TSO packet\n");
		netif_err(pdata, tx_err, netdev,
			  "error processing TSO packet\n");
		dev_kfree_skb_any(skb);
		dev_kfree_skb_any(skb);
		goto tx_netdev_return;
		goto tx_netdev_return;
	}
	}
@@ -1513,9 +1515,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
	/* Configure required descriptor fields for transmission */
	/* Configure required descriptor fields for transmission */
	hw_if->dev_xmit(channel);
	hw_if->dev_xmit(channel);


#ifdef XGMAC_ENABLE_TX_PKT_DUMP
	if (netif_msg_pktdata(pdata))
		xgbe_print_pkt(netdev, skb, true);
		xgbe_print_pkt(netdev, skb, true);
#endif


	/* Stop the queue in advance if there may not be enough descriptors */
	/* Stop the queue in advance if there may not be enough descriptors */
	xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
	xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
@@ -1710,7 +1711,8 @@ static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
			       (pdata->q2tc_map[queue] == i))
			       (pdata->q2tc_map[queue] == i))
				queue++;
				queue++;


			DBGPR("  TC%u using TXq%u-%u\n", i, offset, queue - 1);
			netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n",
				  i, offset, queue - 1);
			netdev_set_tc_queue(netdev, i, queue - offset, offset);
			netdev_set_tc_queue(netdev, i, queue - offset, offset);
			offset = queue;
			offset = queue;
		}
		}
@@ -1877,9 +1879,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
		 * bit */
		 * bit */
		dma_rmb();
		dma_rmb();


#ifdef XGMAC_ENABLE_TX_DESC_DUMP
		if (netif_msg_tx_done(pdata))
		xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
			xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
#endif


		if (hw_if->is_last_desc(rdesc)) {
		if (hw_if->is_last_desc(rdesc)) {
			tx_packets += rdata->tx.packets;
			tx_packets += rdata->tx.packets;
@@ -1983,7 +1984,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)


		if (error || packet->errors) {
		if (error || packet->errors) {
			if (packet->errors)
			if (packet->errors)
				DBGPR("Error in received packet\n");
				netif_err(pdata, rx_err, netdev,
					  "error in received packet\n");
			dev_kfree_skb(skb);
			dev_kfree_skb(skb);
			goto next_packet;
			goto next_packet;
		}
		}
@@ -2033,14 +2035,14 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
			max_len += VLAN_HLEN;
			max_len += VLAN_HLEN;


		if (skb->len > max_len) {
		if (skb->len > max_len) {
			DBGPR("packet length exceeds configured MTU\n");
			netif_err(pdata, rx_err, netdev,
				  "packet length exceeds configured MTU\n");
			dev_kfree_skb(skb);
			dev_kfree_skb(skb);
			goto next_packet;
			goto next_packet;
		}
		}


#ifdef XGMAC_ENABLE_RX_PKT_DUMP
		if (netif_msg_pktdata(pdata))
			xgbe_print_pkt(netdev, skb, false);
			xgbe_print_pkt(netdev, skb, false);
#endif


		skb_checksum_none_assert(skb);
		skb_checksum_none_assert(skb);
		if (XGMAC_GET_BITS(packet->attributes,
		if (XGMAC_GET_BITS(packet->attributes,
@@ -2164,8 +2166,8 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
	return processed;
	return processed;
}
}


void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
		       unsigned int count, unsigned int flag)
		       unsigned int idx, unsigned int count, unsigned int flag)
{
{
	struct xgbe_ring_data *rdata;
	struct xgbe_ring_data *rdata;
	struct xgbe_ring_desc *rdesc;
	struct xgbe_ring_desc *rdesc;
@@ -2173,20 +2175,29 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
	while (count--) {
	while (count--) {
		rdata = XGBE_GET_DESC_DATA(ring, idx);
		rdata = XGBE_GET_DESC_DATA(ring, idx);
		rdesc = rdata->rdesc;
		rdesc = rdata->rdesc;
		pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
		netdev_dbg(pdata->netdev,
			   "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
			   (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
			   (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
			 le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
			   le32_to_cpu(rdesc->desc0),
			 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
			   le32_to_cpu(rdesc->desc1),
			   le32_to_cpu(rdesc->desc2),
			   le32_to_cpu(rdesc->desc3));
		idx++;
		idx++;
	}
	}
}
}


void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
		       unsigned int idx)
		       unsigned int idx)
{
{
	pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
	struct xgbe_ring_data *rdata;
		 le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
	struct xgbe_ring_desc *rdesc;
		 le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));

	rdata = XGBE_GET_DESC_DATA(ring, idx);
	rdesc = rdata->rdesc;
	netdev_dbg(pdata->netdev,
		   "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
		   idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
		   le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
}
}


void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
@@ -2196,21 +2207,21 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
	unsigned char buffer[128];
	unsigned char buffer[128];
	unsigned int i, j;
	unsigned int i, j;


	netdev_alert(netdev, "\n************** SKB dump ****************\n");
	netdev_dbg(netdev, "\n************** SKB dump ****************\n");


	netdev_alert(netdev, "%s packet of %d bytes\n",
	netdev_dbg(netdev, "%s packet of %d bytes\n",
		   (tx_rx ? "TX" : "RX"), skb->len);
		   (tx_rx ? "TX" : "RX"), skb->len);


	netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
	netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
	netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
	netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
	netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
	netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));


	for (i = 0, j = 0; i < skb->len;) {
	for (i = 0, j = 0; i < skb->len;) {
		j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
		j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
			      buf[i++]);
			      buf[i++]);


		if ((i % 32) == 0) {
		if ((i % 32) == 0) {
			netdev_alert(netdev, "  0x%04x: %s\n", i - 32, buffer);
			netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
			j = 0;
			j = 0;
		} else if ((i % 16) == 0) {
		} else if ((i % 16) == 0) {
			buffer[j++] = ' ';
			buffer[j++] = ' ';
@@ -2220,7 +2231,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
		}
		}
	}
	}
	if (i % 32)
	if (i % 32)
		netdev_alert(netdev, "  0x%04x: %s\n", i - (i % 32), buffer);
		netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);


	netdev_alert(netdev, "\n************** SKB dump ****************\n");
	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
}
}
+13 −2
Original line number Original line Diff line number Diff line
@@ -136,6 +136,13 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(XGBE_DRV_VERSION);
MODULE_VERSION(XGBE_DRV_VERSION);
MODULE_DESCRIPTION(XGBE_DRV_DESC);
MODULE_DESCRIPTION(XGBE_DRV_DESC);


static int debug = -1;
module_param(debug, int, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(debug, " Network interface message level setting");

static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
				      NETIF_MSG_IFUP);

static void xgbe_default_config(struct xgbe_prv_data *pdata)
static void xgbe_default_config(struct xgbe_prv_data *pdata)
{
{
	DBGPR("-->xgbe_default_config\n");
	DBGPR("-->xgbe_default_config\n");
@@ -289,6 +296,8 @@ static int xgbe_probe(struct platform_device *pdev)
	mutex_init(&pdata->rss_mutex);
	mutex_init(&pdata->rss_mutex);
	spin_lock_init(&pdata->tstamp_lock);
	spin_lock_init(&pdata->tstamp_lock);


	pdata->msg_enable = netif_msg_init(debug, default_msg_level);

	/* Check if we should use ACPI or DT */
	/* Check if we should use ACPI or DT */
	pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
	pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;


@@ -318,7 +327,8 @@ static int xgbe_probe(struct platform_device *pdev)
		ret = PTR_ERR(pdata->xgmac_regs);
		ret = PTR_ERR(pdata->xgmac_regs);
		goto err_io;
		goto err_io;
	}
	}
	DBGPR("  xgmac_regs = %p\n", pdata->xgmac_regs);
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);


	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	pdata->xpcs_regs = devm_ioremap_resource(dev, res);
	pdata->xpcs_regs = devm_ioremap_resource(dev, res);
@@ -327,7 +337,8 @@ static int xgbe_probe(struct platform_device *pdev)
		ret = PTR_ERR(pdata->xpcs_regs);
		ret = PTR_ERR(pdata->xpcs_regs);
		goto err_io;
		goto err_io;
	}
	}
	DBGPR("  xpcs_regs  = %p\n", pdata->xpcs_regs);
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);


	/* Retrieve the MAC address */
	/* Retrieve the MAC address */
	ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
	ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
Loading