Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ef71ff83 authored by Rajesh K Borundia's avatar Rajesh K Borundia Committed by David S. Miller
Browse files

qlcnic: fix race in tx stop queue



There is a race between netif_stop_queue and netif_stopped_queue
check. So check once again if buffers are available to avoid race.
With above logic we can also get rid of tx lock in process_cmd_ring.

Signed-off-by: default avatarRajesh K Borundia <rajesh.borundia@qlogic.com>
Signed-off-by: default avatarAmit Kumar Salecha <amit.salecha@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8f891387
Loading
Loading
Loading
Loading
+5 −3
Original line number Diff line number Diff line
@@ -113,8 +113,10 @@
#define TX_UDPV6_PKT	0x0c

/* Tx defines */
#define MAX_BUFFERS_PER_CMD	32
#define TX_STOP_THRESH		((MAX_SKB_FRAGS >> 2) + 4)
#define MAX_TSO_HEADER_DESC	2
#define MGMT_CMD_DESC_RESV	4
#define TX_STOP_THRESH		((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
							+ MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS	2

/*
@@ -369,7 +371,7 @@ struct qlcnic_recv_crb {
 */
struct qlcnic_cmd_buffer {
	struct sk_buff *skb;
	struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
	struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
	u32 frag_count;
};

+9 −3
Original line number Diff line number Diff line
@@ -338,10 +338,16 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,

	if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
		netif_tx_stop_queue(tx_ring->txq);
		__netif_tx_unlock_bh(tx_ring->txq);
		smp_mb();
		if (qlcnic_tx_avail(tx_ring) > nr_desc) {
			if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
				netif_tx_wake_queue(tx_ring->txq);
		} else {
			adapter->stats.xmit_off++;
			__netif_tx_unlock_bh(tx_ring->txq);
			return -EBUSY;
		}
	}

	do {
		cmd_desc = &cmd_desc_arr[i];
+2 −0
Original line number Diff line number Diff line
@@ -181,7 +181,9 @@ void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)

	tx_ring = adapter->tx_ring;
	vfree(tx_ring->cmd_buf_arr);
	tx_ring->cmd_buf_arr = NULL;
	kfree(adapter->tx_ring);
	adapter->tx_ring = NULL;
}

int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
+10 −13
Original line number Diff line number Diff line
@@ -132,12 +132,6 @@ qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
		struct qlcnic_host_tx_ring *tx_ring)
{
	writel(tx_ring->producer, tx_ring->crb_cmd_producer);

	if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
		netif_stop_queue(adapter->netdev);
		smp_mb();
		adapter->stats.xmit_off++;
	}
}

static const u32 msi_tgt_status[8] = {
@@ -1137,7 +1131,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
	adapter->max_mc_count = 38;

	netdev->netdev_ops	   = &qlcnic_netdev_ops;
	netdev->watchdog_timeo     = 2*HZ;
	netdev->watchdog_timeo     = 5*HZ;

	qlcnic_change_mtu(netdev, netdev->mtu);

@@ -1709,11 +1703,16 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
	/* 4 fragments per cmd des */
	no_of_desc = (frag_count + 3) >> 2;

	if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
	if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
		netif_stop_queue(netdev);
		smp_mb();
		if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
			netif_start_queue(netdev);
		else {
			adapter->stats.xmit_off++;
			return NETDEV_TX_BUSY;
		}
	}

	producer = tx_ring->producer;
	pbuf = &tx_ring->cmd_buf_arr[producer];
@@ -2018,14 +2017,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
		smp_mb();

		if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
			__netif_tx_lock(tx_ring->txq, smp_processor_id());
			if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
				netif_wake_queue(netdev);
				adapter->tx_timeo_cnt = 0;
				adapter->stats.xmit_on++;
			}
			__netif_tx_unlock(tx_ring->txq);
		}
		adapter->tx_timeo_cnt = 0;
	}
	/*
	 * If everything is freed up to consumer then check if the ring is full