Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 737b0396 authored by Hrishikesh Vidwans's avatar Hrishikesh Vidwans Committed by Ahmad Masri
Browse files

wil6210: allow sending special packets when tx ring is full



When traffic is very high, some special packets such as ARP may get
dropped causing the tx traffic to stall intermittently.
This patch preserves spare tx descriptors so that special packets can
be sent even when the tx ring is almost full. The number of reserved
entries can be changed by writing to tx_reserved_entries via debugfs,
taking effect in subsequent connections.

This feature is enabled when either drop_if_ring_full or ac_queues are
enabled.

Change-Id: I6c5215500966840445f2631fb32494750e60303d
Signed-off-by: default avatarHrishikesh Vidwans <hvidwans@codeaurora.org>
Signed-off-by: default avatarAhmad Masri <amasri@codeaurora.org>
parent 9cc0bd82
Loading
Loading
Loading
Loading
+17 −0
Original line number Diff line number Diff line
@@ -99,6 +99,22 @@ static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil,

		v = (ring_id % 2 ? (v >> 16) : (v & 0xffff));
		seq_printf(s, "  hwhead = %u\n", v);
		if (!ring->is_rx) {
			struct wil_ring_tx_data *txdata =
				&wil->ring_tx_data[ring_id];

			seq_printf(s, "  available = %d\n",
				   wil_ring_avail_tx(ring) -
				   txdata->tx_reserved_count);
			seq_printf(s, "  used = %d\n",
				   wil_ring_used_tx(ring));
			seq_printf(s, "\n  tx_res_count = %d\n",
				   txdata->tx_reserved_count);
			seq_printf(s, "  tx_res_count_used = %d\n",
				   txdata->tx_reserved_count_used);
			seq_printf(s, "  tx_res_count_unavail = %d\n",
				   txdata->tx_reserved_count_not_avail);
		}
	}
	seq_printf(s, "  hwtail = [0x%08x] -> ", ring->hwtail);
	x = wmi_addr(wil, ring->hwtail);
@@ -2640,6 +2656,7 @@ static const struct dbg_off dbg_wil_off[] = {
	WIL_FIELD(amsdu_en, 0644,	doff_u8),
	WIL_FIELD(force_edmg_channel, 0644,	doff_u8),
	WIL_FIELD(ap_ps, 0644, doff_u8),
	WIL_FIELD(tx_reserved_entries, 0644, doff_u32),
	{},
};

+4 −0
Original line number Diff line number Diff line
@@ -1890,6 +1890,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
						 ftm->rx_offset);
		}

		wil->tx_reserved_entries = ((drop_if_ring_full || ac_queues) ?
					    WIL_DEFAULT_TX_RESERVED_ENTRIES :
					    0);

		if (wil->platform_ops.notify) {
			rc = wil->platform_ops.notify(wil->platform_handle,
						      WIL_PLATFORM_EVT_FW_RDY);
+48 −7
Original line number Diff line number Diff line
@@ -981,7 +981,8 @@ static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
	return 0;
}

void wil_tx_data_init(struct wil_ring_tx_data *txdata)
void wil_tx_data_init(const struct wil6210_priv *wil,
		      struct wil_ring_tx_data *txdata)
{
	spin_lock_bh(&txdata->lock);
	txdata->dot1x_open = 0;
@@ -994,6 +995,9 @@ void wil_tx_data_init(struct wil_ring_tx_data *txdata)
	txdata->agg_amsdu = 0;
	txdata->addba_in_progress = false;
	txdata->mid = U8_MAX;
	txdata->tx_reserved_count = wil->tx_reserved_entries;
	txdata->tx_reserved_count_used = 0;
	txdata->tx_reserved_count_not_avail = 0;
	spin_unlock_bh(&txdata->lock);
}

@@ -1048,7 +1052,7 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
		goto out;
	}

	wil_tx_data_init(txdata);
	wil_tx_data_init(wil, txdata);
	vring->is_rx = false;
	vring->size = size;
	rc = wil_vring_alloc(wil, vring);
@@ -1217,7 +1221,7 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
		goto out;
	}

	wil_tx_data_init(txdata);
	wil_tx_data_init(wil, txdata);
	vring->is_rx = false;
	vring->size = size;
	rc = wil_vring_alloc(wil, vring);
@@ -1858,6 +1862,20 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
	return rc;
}

static inline bool is_special_packet(const struct sk_buff *skb)
{
	if (skb->protocol == cpu_to_be16(ETH_P_ARP) ||
	    skb->protocol == cpu_to_be16(ETH_P_RARP) ||
	    (skb->protocol == cpu_to_be16(ETH_P_IP) &&
	     ip_hdr(skb)->protocol == IPPROTO_ICMP) ||
	    (skb->protocol == cpu_to_be16(ETH_P_IPV6) &&
	     ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) ||
	    skb->protocol == cpu_to_be16(ETH_P_PAE))
		return true;

	return false;
}

static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
			 struct wil_ring *ring, struct sk_buff *skb)
{
@@ -1865,7 +1883,6 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
	struct vring_tx_desc dd, *d = &dd;
	volatile struct vring_tx_desc *_d;
	u32 swhead = ring->swhead;
	int avail = wil_ring_avail_tx(ring);
	int nr_frags = skb_shinfo(skb)->nr_frags;
	uint f = 0;
	int ring_index = ring - wil->ring_tx;
@@ -1875,6 +1892,11 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
	int used;
	bool mcast = (ring_index == vif->bcast_ring);
	uint len = skb_headlen(skb);
	bool special_packet = (wil->tx_reserved_entries != 0 &&
			       is_special_packet(skb));
	int avail = wil_ring_avail_tx(ring) -
		(special_packet ? 0 : txdata->tx_reserved_count);
	u8 ctx_flags = special_packet ? WIL_CTX_FLAG_RESERVED_USED : 0;

	wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n",
		     skb->len, ring_index, nr_frags);
@@ -1883,9 +1905,17 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
		return -EINVAL;

	if (unlikely(avail < 1 + nr_frags)) {
		if (special_packet) {
			txdata->tx_reserved_count_not_avail++;
			wil_err_ratelimited(wil,
					    "TX ring[%2d] full. No space for %d fragments for special packet. Tx-reserved-count is %d\n",
					    ring_index, 1 + nr_frags,
					    txdata->tx_reserved_count);
		} else {
			wil_err_ratelimited(wil,
					    "Tx ring[%2d] full. No space for %d fragments\n",
					    ring_index, 1 + nr_frags);
		}
		return -ENOMEM;
	}
	_d = &ring->va[i].tx.legacy;
@@ -1900,6 +1930,7 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
	if (unlikely(dma_mapping_error(dev, pa)))
		return -EINVAL;
	ring->ctx[i].mapped_as = wil_mapped_as_single;
	ring->ctx[i].flags = ctx_flags;
	/* 1-st segment */
	wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len,
				   ring_index);
@@ -1938,6 +1969,8 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
			goto dma_error;
		}
		ring->ctx[i].mapped_as = wil_mapped_as_page;
		ring->ctx[i].flags = ctx_flags;

		wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
					   pa, len, ring_index);
		/* no need to check return code -
@@ -1970,6 +2003,14 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
			     ring_index, used, used + nr_frags + 1);
	}

	if (special_packet) {
		txdata->tx_reserved_count -= (f + 1);
		txdata->tx_reserved_count_used += (f + 1);
		wil_dbg_txrx(wil,
			     "Ring[%2d] tx_reserved_count: %d, reduced by %d\n",
			     ring_index, txdata->tx_reserved_count, f + 1);
	}

	/* Make sure to advance the head only after descriptor update is done.
	 * This will prevent a race condition where the completion thread
	 * will see the DU bit set from previous run and will handle the
+2 −1
Original line number Diff line number Diff line
@@ -650,7 +650,8 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
						int size, u16 ssn);
void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
			   struct wil_tid_ampdu_rx *r);
void wil_tx_data_init(struct wil_ring_tx_data *txdata);
void wil_tx_data_init(const struct wil6210_priv *wil,
		      struct wil_ring_tx_data *txdata);
void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil);
void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
			 struct wil_sta_info *sta);
+6 −3
Original line number Diff line number Diff line
@@ -750,7 +750,7 @@ static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
		     "init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n",
		     ring_id, cid, tid, wil->tx_sring_idx);

	wil_tx_data_init(txdata);
	wil_tx_data_init(wil, txdata);
	ring->size = size;
	rc = wil_ring_alloc_desc_ring(wil, ring, true);
	if (rc)
@@ -1293,6 +1293,9 @@ int wil_tx_sring_handler(struct wil6210_priv *wil,
					  (const void *)&msg, sizeof(msg),
					  false);

			if (ctx->flags & WIL_CTX_FLAG_RESERVED_USED)
				txdata->tx_reserved_count++;

			wil_tx_desc_unmap_edma(dev,
					       (union wil_tx_desc *)d,
					       ctx);
@@ -1455,7 +1458,7 @@ static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil,
	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
	int nr_frags = skb_shinfo(skb)->nr_frags;
	int min_desc_required = nr_frags + 2; /* Headers, Head, Fragments */
	int used, avail = wil_ring_avail_tx(ring);
	int used, avail = wil_ring_avail_tx(ring) - txdata->tx_reserved_count;
	int f, hdrlen, headlen;
	int gso_type;
	bool is_ipv4;
@@ -1617,7 +1620,7 @@ static int wil_ring_init_bcast_edma(struct wil6210_vif *vif, int ring_id,
		wil_ipa_set_bcast_sring_id(wil, sring_id);
	}

	wil_tx_data_init(txdata);
	wil_tx_data_init(wil, txdata);
	ring->size = size;
	ring->is_rx = false;
	rc = wil_ring_alloc_desc_ring(wil, ring, true);
Loading