Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 29d37fa1 authored by Emil Tantilov's avatar Emil Tantilov Committed by David S. Miller
Browse files

ixgbevf: merge ixgbevf_tx_map and ixgbevf_tx_queue into a single function



This change merges the ixgbevf_tx_map call and the ixgbevf_tx_queue call
into a single function.  In order to make room for this setting of cmd_type
and olinfo flags is done in separate functions.

Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: default avatarEmil Tantilov <emil.s.tantilov@intel.com>
Tested-by: default avatarPhil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: default avatarAaron Brown <aaron.f.brown@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9bdfefd2
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -183,6 +183,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
#define IXGBE_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
#define IXGBE_TXD_STAT_DD    0x00000001 /* Descriptor Done */
#define IXGBE_TXD_CMD	     (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)

/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
+132 −140
Original line number Diff line number Diff line
@@ -233,8 +233,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,

		/* unmap remaining buffers */
		while (tx_desc != eop_desc) {
			tx_desc->wb.status = 0;

			tx_buffer++;
			tx_desc++;
			i++;
@@ -254,8 +252,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
			}
		}

		tx_desc->wb.status = 0;

		/* move us one more past the eop_desc for start of next pkt */
		tx_buffer++;
		tx_desc++;
@@ -2915,166 +2911,171 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
			    type_tucmd, mss_l4len_idx);
}

static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
			  struct ixgbevf_tx_buffer *first)
static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
{
	dma_addr_t dma;
	struct sk_buff *skb = first->skb;
	struct ixgbevf_tx_buffer *tx_buffer_info;
	unsigned int len;
	unsigned int total = skb->len;
	unsigned int offset = 0, size;
	int count = 0;
	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
	unsigned int f;
	int i;
	/* set type for advanced descriptor with frame checksum insertion */
	__le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
				      IXGBE_ADVTXD_DCMD_IFCS |
				      IXGBE_ADVTXD_DCMD_DEXT);

	i = tx_ring->next_to_use;
	/* set HW vlan bit if vlan is present */
	if (tx_flags & IXGBE_TX_FLAGS_VLAN)
		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);

	len = min(skb_headlen(skb), total);
	while (len) {
		tx_buffer_info = &tx_ring->tx_buffer_info[i];
		size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
	/* set segmentation enable bits for TSO/FSO */
	if (tx_flags & IXGBE_TX_FLAGS_TSO)
		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);

		tx_buffer_info->tx_flags = first->tx_flags;
		dma = dma_map_single(tx_ring->dev, skb->data + offset,
				     size, DMA_TO_DEVICE);
		if (dma_mapping_error(tx_ring->dev, dma))
			goto dma_error;
	return cmd_type;
}

		/* record length, and DMA address */
		dma_unmap_len_set(tx_buffer_info, len, size);
		dma_unmap_addr_set(tx_buffer_info, dma, dma);
static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
				     u32 tx_flags, unsigned int paylen)
{
	__le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);

		len -= size;
		total -= size;
		offset += size;
		count++;
		i++;
		if (i == tx_ring->count)
			i = 0;
	/* enable L4 checksum for TSO and TX checksum offload */
	if (tx_flags & IXGBE_TX_FLAGS_CSUM)
		olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);

	/* enble IPv4 checksum for TSO */
	if (tx_flags & IXGBE_TX_FLAGS_IPV4)
		olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);

	/* use index 1 context for TSO/FSO/FCOE */
	if (tx_flags & IXGBE_TX_FLAGS_TSO)
		olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);

	/* Check Context must be set if Tx switch is enabled, which it
	 * always is for case where virtual functions are running
	 */
	olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);

	tx_desc->read.olinfo_status = olinfo_status;
}

	for (f = 0; f < nr_frags; f++) {
		const struct skb_frag_struct *frag;
static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
			   struct ixgbevf_tx_buffer *first,
			   const u8 hdr_len)
{
	dma_addr_t dma;
	struct sk_buff *skb = first->skb;
	struct ixgbevf_tx_buffer *tx_buffer;
	union ixgbe_adv_tx_desc *tx_desc;
	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
	unsigned int data_len = skb->data_len;
	unsigned int size = skb_headlen(skb);
	unsigned int paylen = skb->len - hdr_len;
	u32 tx_flags = first->tx_flags;
	__le32 cmd_type;
	u16 i = tx_ring->next_to_use;

		frag = &skb_shinfo(skb)->frags[f];
		len = min((unsigned int)skb_frag_size(frag), total);
		offset = 0;
	tx_desc = IXGBEVF_TX_DESC(tx_ring, i);

		while (len) {
			tx_buffer_info = &tx_ring->tx_buffer_info[i];
			size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
	ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
	cmd_type = ixgbevf_tx_cmd_type(tx_flags);

			dma = skb_frag_dma_map(tx_ring->dev, frag,
					       offset, size, DMA_TO_DEVICE);
	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
	if (dma_mapping_error(tx_ring->dev, dma))
		goto dma_error;

	/* record length, and DMA address */
			dma_unmap_len_set(tx_buffer_info, len, size);
			dma_unmap_addr_set(tx_buffer_info, dma, dma);
	dma_unmap_len_set(first, len, size);
	dma_unmap_addr_set(first, dma, dma);

	tx_desc->read.buffer_addr = cpu_to_le64(dma);

	for (;;) {
		while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
			tx_desc->read.cmd_type_len =
				cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);

			len -= size;
			total -= size;
			offset += size;
			count++;
			i++;
			if (i == tx_ring->count)
			tx_desc++;
			if (i == tx_ring->count) {
				tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
				i = 0;
			}
		if (total == 0)
			break;
	}

	if (i == 0)
		i = tx_ring->count - 1;
	else
		i = i - 1;
			dma += IXGBE_MAX_DATA_PER_TXD;
			size -= IXGBE_MAX_DATA_PER_TXD;

	first->next_to_watch = IXGBEVF_TX_DESC(tx_ring, i);
	first->time_stamp = jiffies;

	return count;
			tx_desc->read.buffer_addr = cpu_to_le64(dma);
			tx_desc->read.olinfo_status = 0;
		}

dma_error:
	dev_err(tx_ring->dev, "TX DMA map failed\n");
		if (likely(!data_len))
			break;

	/* clear timestamp and dma mappings for failed tx_buffer_info map */
	tx_buffer_info->dma = 0;
	count--;
		tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);

	/* clear timestamp and dma mappings for remaining portion of packet */
	while (count >= 0) {
		count--;
		i--;
		if (i < 0)
			i += tx_ring->count;
		tx_buffer_info = &tx_ring->tx_buffer_info[i];
		ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
	}

	return count;
		i++;
		tx_desc++;
		if (i == tx_ring->count) {
			tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
			i = 0;
		}

static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring,
			     struct ixgbevf_tx_buffer *first,
			     int count, u8 hdr_len)
{
	union ixgbe_adv_tx_desc *tx_desc = NULL;
	struct sk_buff *skb = first->skb;
	struct ixgbevf_tx_buffer *tx_buffer_info;
	u32 olinfo_status = 0, cmd_type_len = 0;
	u32 tx_flags = first->tx_flags;
	unsigned int i;

	u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
		size = skb_frag_size(frag);
		data_len -= size;

	cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
				       DMA_TO_DEVICE);
		if (dma_mapping_error(tx_ring->dev, dma))
			goto dma_error;

	cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
		tx_buffer = &tx_ring->tx_buffer_info[i];
		dma_unmap_len_set(tx_buffer, len, size);
		dma_unmap_addr_set(tx_buffer, dma, dma);

	if (tx_flags & IXGBE_TX_FLAGS_VLAN)
		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
		tx_desc->read.buffer_addr = cpu_to_le64(dma);
		tx_desc->read.olinfo_status = 0;

	if (tx_flags & IXGBE_TX_FLAGS_CSUM)
		olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
		frag++;
	}

	if (tx_flags & IXGBE_TX_FLAGS_TSO) {
		cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
	/* write last descriptor with RS and EOP bits */
	cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
	tx_desc->read.cmd_type_len = cmd_type;

		/* use index 1 context for tso */
		olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
		if (tx_flags & IXGBE_TX_FLAGS_IPV4)
			olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
	}
	/* set the timestamp */
	first->time_stamp = jiffies;

	/*
	 * Check Context must be set if Tx switch is enabled, which it
	 * always is for case where virtual functions are running
	/* Force memory writes to complete before letting h/w know there
	 * are new descriptors to fetch.  (Only applicable for weak-ordered
	 * memory model archs, such as IA-64).
	 *
	 * We also need this memory barrier (wmb) to make certain all of the
	 * status bits have been updated before next_to_watch is written.
	 */
	olinfo_status |= IXGBE_ADVTXD_CC;
	wmb();

	olinfo_status |= ((skb->len - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
	/* set next_to_watch value indicating a packet is present */
	first->next_to_watch = tx_desc;

	i = tx_ring->next_to_use;
	while (count--) {
		dma_addr_t dma;
		unsigned int len;

		tx_buffer_info = &tx_ring->tx_buffer_info[i];
		dma = dma_unmap_addr(tx_buffer_info, dma);
		len = dma_unmap_len(tx_buffer_info, len);
		tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
		tx_desc->read.buffer_addr = cpu_to_le64(dma);
		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type_len | len);
		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
	i++;
	if (i == tx_ring->count)
		i = 0;
	}

	tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
	tx_ring->next_to_use = i;

	/* notify HW of packet */
	writel(i, tx_ring->tail);

	return;
dma_error:
	dev_err(tx_ring->dev, "TX DMA map failed\n");

	/* clear dma mappings for failed tx_buffer_info map */
	for (;;) {
		tx_buffer = &tx_ring->tx_buffer_info[i];
		ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
		if (tx_buffer == first)
			break;
		if (i == 0)
			i = tx_ring->count;
		i--;
	}

	tx_ring->next_to_use = i;
}
@@ -3167,17 +3168,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
	else
		ixgbevf_tx_csum(tx_ring, first);

	ixgbevf_tx_queue(tx_ring, first,
			 ixgbevf_tx_map(tx_ring, first), hdr_len);

	/* Force memory writes to complete before letting h/w
	 * know there are new descriptors to fetch.  (Only
	 * applicable for weak-ordered memory model archs,
	 * such as IA-64).
	 */
	wmb();
	ixgbevf_tx_map(tx_ring, first, hdr_len);

	writel(tx_ring->next_to_use, tx_ring->tail);
	ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);

	return NETDEV_TX_OK;