Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8f7807ae authored by Vipul Pandya's avatar Vipul Pandya Committed by David S. Miller
Browse files

net: sxgbe: add Checksum offload support for Samsung sxgbe



This patch adds TX and RX checksum offload support.

Signed-off-by: default avatarVipul Pandya <vipul.pandya@samsung.com>
Neatening-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarByungho An <bh74.an@samsung.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1051125d
Loading
Loading
Loading
Loading
+5 −0
Original line number Original line Diff line number Diff line
@@ -340,6 +340,10 @@ struct sxgbe_core_ops {
	void (*set_eee_timer)(void __iomem *ioaddr, const int ls,
	void (*set_eee_timer)(void __iomem *ioaddr, const int ls,
			      const int tw);
			      const int tw);
	void (*set_eee_pls)(void __iomem *ioaddr, const int link);
	void (*set_eee_pls)(void __iomem *ioaddr, const int link);

	/* Enable disable checksum offload operations */
	void (*enable_rx_csum)(void __iomem *ioaddr);
	void (*disable_rx_csum)(void __iomem *ioaddr);
};
};


const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
@@ -452,6 +456,7 @@ struct sxgbe_priv_data {
	struct sxgbe_ops *hw;	/* sxgbe specific ops */
	struct sxgbe_ops *hw;	/* sxgbe specific ops */
	int no_csum_insertion;
	int no_csum_insertion;
	int irq;
	int irq;
	int rxcsum_insertion;
	spinlock_t stats_lock;	/* lock for tx/rx statatics */
	spinlock_t stats_lock;	/* lock for tx/rx statatics */


	struct phy_device *phydev;
	struct phy_device *phydev;
+20 −0
Original line number Original line Diff line number Diff line
@@ -218,6 +218,24 @@ static void sxgbe_set_eee_timer(void __iomem *ioaddr,
	writel(value, ioaddr + SXGBE_CORE_LPI_TIMER_CTRL);
	writel(value, ioaddr + SXGBE_CORE_LPI_TIMER_CTRL);
}
}


static void sxgbe_enable_rx_csum(void __iomem *ioaddr)
{
	u32 ctrl;

	ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
	ctrl |= SXGBE_RX_CSUMOFFLOAD_ENABLE;
	writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
}

static void sxgbe_disable_rx_csum(void __iomem *ioaddr)
{
	u32 ctrl;

	ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
	ctrl &= ~SXGBE_RX_CSUMOFFLOAD_ENABLE;
	writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
}

const struct sxgbe_core_ops core_ops = {
const struct sxgbe_core_ops core_ops = {
	.core_init		= sxgbe_core_init,
	.core_init		= sxgbe_core_init,
	.dump_regs		= sxgbe_core_dump_regs,
	.dump_regs		= sxgbe_core_dump_regs,
@@ -234,6 +252,8 @@ const struct sxgbe_core_ops core_ops = {
	.reset_eee_mode		= sxgbe_reset_eee_mode,
	.reset_eee_mode		= sxgbe_reset_eee_mode,
	.set_eee_timer		= sxgbe_set_eee_timer,
	.set_eee_timer		= sxgbe_set_eee_timer,
	.set_eee_pls		= sxgbe_set_eee_pls,
	.set_eee_pls		= sxgbe_set_eee_pls,
	.enable_rx_csum		= sxgbe_enable_rx_csum,
	.disable_rx_csum	= sxgbe_disable_rx_csum,
};
};


const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
+1 −1
Original line number Original line Diff line number Diff line
@@ -113,7 +113,7 @@ struct sxgbe_rx_norm_desc {
			/* WB RDES3 */
			/* WB RDES3 */
			u32 pkt_len:14;
			u32 pkt_len:14;
			u32 rdes3_reserved:1;
			u32 rdes3_reserved:1;
			u32 err_summary:15;
			u32 err_summary:1;
			u32 err_l2_type:4;
			u32 err_l2_type:4;
			u32 layer34_pkt_type:4;
			u32 layer34_pkt_type:4;
			u32 no_coagulation_pkt:1;
			u32 no_coagulation_pkt:1;
+35 −11
Original line number Original line Diff line number Diff line
@@ -1252,6 +1252,7 @@ void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
{
{
	unsigned int entry, frag_num;
	unsigned int entry, frag_num;
	int cksum_flag = 0;
	struct netdev_queue *dev_txq;
	struct netdev_queue *dev_txq;
	unsigned txq_index = skb_get_queue_mapping(skb);
	unsigned txq_index = skb_get_queue_mapping(skb);
	struct sxgbe_priv_data *priv = netdev_priv(dev);
	struct sxgbe_priv_data *priv = netdev_priv(dev);
@@ -1332,7 +1333,7 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
					   __func__);
					   __func__);


			priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
			priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
							no_pagedlen, 0);
							no_pagedlen, cksum_flag);
		}
		}
	}
	}


@@ -1350,7 +1351,7 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)


		/* prepare the descriptor */
		/* prepare the descriptor */
		priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
		priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
						len, 0);
						len, cksum_flag);
		/* memory barrier to flush descriptor */
		/* memory barrier to flush descriptor */
		wmb();
		wmb();


@@ -1471,6 +1472,8 @@ static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
	unsigned int entry = priv->rxq[qnum]->cur_rx;
	unsigned int entry = priv->rxq[qnum]->cur_rx;
	unsigned int next_entry = 0;
	unsigned int next_entry = 0;
	unsigned int count = 0;
	unsigned int count = 0;
	int checksum;
	int status;


	while (count < limit) {
	while (count < limit) {
		struct sxgbe_rx_norm_desc *p;
		struct sxgbe_rx_norm_desc *p;
@@ -1487,7 +1490,18 @@ static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
		next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
		next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
		prefetch(priv->rxq[qnum]->dma_rx + next_entry);
		prefetch(priv->rxq[qnum]->dma_rx + next_entry);


		/*TO DO read the status of the incoming frame */
		/* Read the status of the incoming frame and also get checksum
		 * value based on whether it is enabled in SXGBE hardware or
		 * not.
		 */
		status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
						     &checksum);
		if (unlikely(status < 0)) {
			entry = next_entry;
			continue;
		}
		if (unlikely(!priv->rxcsum_insertion))
			checksum = CHECKSUM_NONE;


		skb = priv->rxq[qnum]->rx_skbuff[entry];
		skb = priv->rxq[qnum]->rx_skbuff[entry];


@@ -1501,7 +1515,11 @@ static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)


		skb_put(skb, frame_len);
		skb_put(skb, frame_len);


		skb->ip_summed = checksum;
		if (checksum == CHECKSUM_NONE)
			netif_receive_skb(skb);
			netif_receive_skb(skb);
		else
			napi_gro_receive(&priv->napi, skb);


		entry = next_entry;
		entry = next_entry;
	}
	}
@@ -1748,15 +1766,15 @@ static int sxgbe_set_features(struct net_device *dev,
{
{
	struct sxgbe_priv_data *priv = netdev_priv(dev);
	struct sxgbe_priv_data *priv = netdev_priv(dev);
	netdev_features_t changed = dev->features ^ features;
	netdev_features_t changed = dev->features ^ features;
	u32 ctrl;


	if (changed & NETIF_F_RXCSUM) {
	if (changed & NETIF_F_RXCSUM) {
		ctrl = readl(priv->ioaddr + SXGBE_CORE_RX_CONFIG_REG);
		if (features & NETIF_F_RXCSUM) {
		if (features & NETIF_F_RXCSUM)
			priv->hw->mac->enable_rx_csum(priv->ioaddr);
			ctrl |= SXGBE_RX_CSUMOFFLOAD_ENABLE;
			priv->rxcsum_insertion = true;
		else
		} else {
			ctrl &= ~SXGBE_RX_CSUMOFFLOAD_ENABLE;
			priv->hw->mac->disable_rx_csum(priv->ioaddr);
		writel(ctrl, priv->ioaddr + SXGBE_CORE_RX_CONFIG_REG);
			priv->rxcsum_insertion = false;
		}
	}
	}


	return 0;
	return 0;
@@ -2115,6 +2133,12 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
		}
		}
	}
	}


	/* Enable Rx checksum offload */
	if (priv->hw_cap.rx_csum_offload) {
		priv->hw->mac->enable_rx_csum(priv->ioaddr);
		priv->rxcsum_insertion = true;
	}

	/* Rx Watchdog is available, enable depend on platform data */
	/* Rx Watchdog is available, enable depend on platform data */
	if (!priv->plat->riwt_off) {
	if (!priv->plat->riwt_off) {
		priv->use_riwt = 1;
		priv->use_riwt = 1;