Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 18235664 authored by Rajkumar Manoharan's avatar Rajkumar Manoharan Committed by Kalle Valo
Browse files

ath10k: cleanup amsdu processing for rx indication



Make amsdu handlers (i.e amsdu_pop and rx_h_handler) common to both
rx_ind and frag_ind htt events. It is sufficient to hold rx_ring lock
for amsdu_pop alone and no need to hold it until the packets are
delivered to mac80211. This helps to reduce rx_lock contention as well.

Signed-off-by: default avatarRajkumar Manoharan <rmanohar@qti.qualcomm.com>
Signed-off-by: default avatarKalle Valo <kvalo@qca.qualcomm.com>
parent 6b61d663
Loading
Loading
Loading
Loading
+41 −59
Original line number Diff line number Diff line
@@ -1528,36 +1528,23 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar,
	__skb_queue_purge(amsdu);
}

static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
				  struct htt_rx_indication *rx)
static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
{
	struct ath10k *ar = htt->ar;
	struct ieee80211_rx_status *rx_status = &htt->rx_status;
	struct htt_rx_indication_mpdu_range *mpdu_ranges;
	static struct ieee80211_rx_status rx_status;
	struct sk_buff_head amsdu;
	int num_mpdu_ranges;
	int i, ret, mpdu_count = 0;

	lockdep_assert_held(&htt->rx_ring.lock);

	if (htt->rx_confused)
		return;

	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);

	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
			rx, sizeof(*rx) +
			(sizeof(struct htt_rx_indication_mpdu_range) *
				num_mpdu_ranges));

	for (i = 0; i < num_mpdu_ranges; i++)
		mpdu_count += mpdu_ranges[i].mpdu_count;
	int ret;

	while (mpdu_count--) {
	__skb_queue_head_init(&amsdu);

	spin_lock_bh(&htt->rx_ring.lock);
	if (htt->rx_confused) {
		spin_unlock_bh(&htt->rx_ring.lock);
		return -EIO;
	}
	ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
	spin_unlock_bh(&htt->rx_ring.lock);

	if (ret < 0) {
		ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
		__skb_queue_purge(&amsdu);
@@ -1565,54 +1552,51 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
		 * device instead of leaving it inoperable.
		 */
		htt->rx_confused = true;
			break;
		return ret;
	}

		ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
	ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
	ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
		ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
		ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
		ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
	}
	ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
	ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
	ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);

	tasklet_schedule(&htt->rx_replenish_task);
	return 0;
}

static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
				       struct htt_rx_fragment_indication *frag)
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
				  struct htt_rx_indication *rx)
{
	struct ath10k *ar = htt->ar;
	struct ieee80211_rx_status *rx_status = &htt->rx_status;
	struct sk_buff_head amsdu;
	int ret;

	__skb_queue_head_init(&amsdu);
	struct htt_rx_indication_mpdu_range *mpdu_ranges;
	int num_mpdu_ranges;
	int i, mpdu_count = 0;

	spin_lock_bh(&htt->rx_ring.lock);
	ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
	spin_unlock_bh(&htt->rx_ring.lock);
	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);

	tasklet_schedule(&htt->rx_replenish_task);
	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
			rx, sizeof(*rx) +
			(sizeof(struct htt_rx_indication_mpdu_range) *
				num_mpdu_ranges));

	ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
	for (i = 0; i < num_mpdu_ranges; i++)
		mpdu_count += mpdu_ranges[i].mpdu_count;

	if (ret) {
		ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
			    ret);
		__skb_queue_purge(&amsdu);
		return;
	while (mpdu_count--) {
		if (ath10k_htt_rx_handle_amsdu(htt) < 0)
			break;
	}

	if (skb_queue_len(&amsdu) != 1) {
		ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
		__skb_queue_purge(&amsdu);
		return;
	tasklet_schedule(&htt->rx_replenish_task);
}

	ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
	ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt)
{
	ath10k_htt_rx_handle_amsdu(htt);

	tasklet_schedule(&htt->rx_replenish_task);
}

static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
@@ -2331,7 +2315,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
	case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
				skb->data, skb->len);
		ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
		ath10k_htt_rx_frag_handler(htt);
		break;
	}
	case HTT_T2H_MSG_TYPE_TEST:
@@ -2472,9 +2456,7 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)

	while ((skb = __skb_dequeue(&rx_q))) {
		resp = (struct htt_resp *)skb->data;
		spin_lock_bh(&htt->rx_ring.lock);
		ath10k_htt_rx_handler(htt, &resp->rx_ind);
		spin_unlock_bh(&htt->rx_ring.lock);
		dev_kfree_skb_any(skb);
	}