Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 80f0fe09 authored by Thomas Falcon's avatar Thomas Falcon Committed by David S. Miller
Browse files

ibmvnic: Unmap DMA address of TX descriptor buffers after use



There's no need to wait until a completion is received to unmap
TX descriptor buffers that have been passed to the hypervisor.
Instead unmap it when the hypervisor call has completed. This patch
avoids the possibility that a buffer will not be unmapped because
a TX completion is lost or mishandled.

Reported-by: default avatarAbdul Haleem <abdhalee@linux.vnet.ibm.com>
Tested-by: default avatarDevesh K. Singh <devesh_singh@in.ibm.com>
Signed-off-by: default avatarThomas Falcon <tlfalcon@linux.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 37b0a733
Loading
Loading
Loading
Loading
+2 −9
Original line number Diff line number Diff line
@@ -1568,6 +1568,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
		lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
					       (u64)tx_buff->indir_dma,
					       (u64)num_entries);
		dma_unmap_single(dev, tx_buff->indir_dma,
				 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
	} else {
		tx_buff->num_entries = num_entries;
		lpar_rc = send_subcrq(adapter, handle_array[queue_num],
@@ -2788,7 +2790,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
	union sub_crq *next;
	int index;
	int i, j;
	u8 *first;

restart_loop:
	while (pending_scrq(adapter, scrq)) {
@@ -2818,14 +2819,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,

				txbuff->data_dma[j] = 0;
			}
			/* if sub_crq was sent indirectly */
			first = &txbuff->indir_arr[0].generic.first;
			if (*first == IBMVNIC_CRQ_CMD) {
				dma_unmap_single(dev, txbuff->indir_dma,
						 sizeof(txbuff->indir_arr),
						 DMA_TO_DEVICE);
				*first = 0;
			}

			if (txbuff->last_frag) {
				dev_kfree_skb_any(txbuff->skb);