Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8a7f9fd8 authored by Brian Norris's avatar Brian Norris Committed by Kalle Valo
Browse files

mwifiex: don't disable hardirqs; just softirqs



main_proc_lock and int_lock (in mwifiex_adapter) are the only spinlocks
used in hardirq contexts. The rest are only in task or softirq contexts.

Convert every other lock from *_irq{save,restore}() variants to _bh()
variants.

This is a mechanical transformation of all spinlock usage in mwifiex
using the following:

Step 1:
I ran this nasty sed script:

    sed -i -E '/spin_lock_irqsave|spin_unlock_irqrestore/ {
      /main_proc_lock|int_lock/! {
        s:(spin_(un|)lock)_irq(save|restore):\1_bh: ;
        # Join broken lines.
        :a /;$/! {
          N;
          s/\s*\n\s*//;
          ba
        }
        /,.*\);$/ s:,.*\):\):
      }
    }' drivers/net/wireless/marvell/mwifiex/*

Step 2:
Manually delete the flags / ra_list_flags args from:

  mwifiex_send_single_packet()
  mwifiex_11n_aggregate_pkt()
  mwifiex_send_processed_packet()

which are now unused.

Step 3:
Apply this semantic patch (coccinelle) to remove the unused 'flags'
variables:

// <smpl>
@@
type T;
identifier i;
@@

(
extern T i;
|
- T i;
  ... when != i
)
// </smpl>

(Usage is something like this:

  make coccicheck COCCI=./patch.cocci MODE=patch M=drivers/net/wireless/marvell/mwifiex/

although this skips *.h files for some reasons, so I had to massage
stuff.)

Testing: I've played with a variety of stress tests, including download
stress tests on the same APs which caught regressions with commit
5188d545 ("mwifiex: restructure rx_reorder_tbl_lock usage"). I've
primarily tested on Marvell 8997 / PCIe, although I've given 8897 / SDIO
a quick spin as well.

Signed-off-by: default avatarBrian Norris <briannorris@chromium.org>
Signed-off-by: default avatarKalle Valo <kvalo@codeaurora.org>
parent ce2e942e
Loading
Loading
Loading
Loading
+21 −32
Original line number Diff line number Diff line
@@ -84,17 +84,15 @@ mwifiex_get_ba_status(struct mwifiex_private *priv,
		      enum mwifiex_ba_status ba_status)
{
	struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
	unsigned long flags;

	spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
	spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
	list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
		if (tx_ba_tsr_tbl->ba_status == ba_status) {
			spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
					       flags);
			spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
			return tx_ba_tsr_tbl;
		}
	}
	spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
	spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
	return NULL;
}

@@ -516,13 +514,12 @@ void mwifiex_11n_delete_all_tx_ba_stream_tbl(struct mwifiex_private *priv)
{
	int i;
	struct mwifiex_tx_ba_stream_tbl *del_tbl_ptr, *tmp_node;
	unsigned long flags;

	spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
	spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
	list_for_each_entry_safe(del_tbl_ptr, tmp_node,
				 &priv->tx_ba_stream_tbl_ptr, list)
		mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, del_tbl_ptr);
	spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
	spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);

	INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);

@@ -539,18 +536,16 @@ struct mwifiex_tx_ba_stream_tbl *
mwifiex_get_ba_tbl(struct mwifiex_private *priv, int tid, u8 *ra)
{
	struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
	unsigned long flags;

	spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
	spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
	list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
		if (ether_addr_equal_unaligned(tx_ba_tsr_tbl->ra, ra) &&
		    tx_ba_tsr_tbl->tid == tid) {
			spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
					       flags);
			spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
			return tx_ba_tsr_tbl;
		}
	}
	spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
	spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
	return NULL;
}

@@ -563,7 +558,6 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
{
	struct mwifiex_tx_ba_stream_tbl *new_node;
	struct mwifiex_ra_list_tbl *ra_list;
	unsigned long flags;
	int tid_down;

	if (!mwifiex_get_ba_tbl(priv, tid, ra)) {
@@ -584,9 +578,9 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
		new_node->ba_status = ba_status;
		memcpy(new_node->ra, ra, ETH_ALEN);

		spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
		spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
		list_add_tail(&new_node->list, &priv->tx_ba_stream_tbl_ptr);
		spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
		spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
	}
}

@@ -599,7 +593,6 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
	u32 tx_win_size = priv->add_ba_param.tx_win_size;
	static u8 dialog_tok;
	int ret;
	unsigned long flags;
	u16 block_ack_param_set;

	mwifiex_dbg(priv->adapter, CMD, "cmd: %s: tid %d\n", __func__, tid);
@@ -612,10 +605,10 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
	    memcmp(priv->cfg_bssid, peer_mac, ETH_ALEN)) {
		struct mwifiex_sta_node *sta_ptr;

		spin_lock_irqsave(&priv->sta_list_spinlock, flags);
		spin_lock_bh(&priv->sta_list_spinlock);
		sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
		if (!sta_ptr) {
			spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
			spin_unlock_bh(&priv->sta_list_spinlock);
			mwifiex_dbg(priv->adapter, ERROR,
				    "BA setup with unknown TDLS peer %pM!\n",
				    peer_mac);
@@ -623,7 +616,7 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
		}
		if (sta_ptr->is_11ac_enabled)
			tx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE;
		spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
		spin_unlock_bh(&priv->sta_list_spinlock);
	}

	block_ack_param_set = (u16)((tid << BLOCKACKPARAM_TID_POS) |
@@ -687,9 +680,8 @@ int mwifiex_send_delba(struct mwifiex_private *priv, int tid, u8 *peer_mac,
void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
{
	struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
	unsigned long flags;

	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
	spin_lock_bh(&priv->rx_reorder_tbl_lock);
	list_for_each_entry(rx_reor_tbl_ptr, &priv->rx_reorder_tbl_ptr, list) {
		if (rx_reor_tbl_ptr->tid == tid) {
			dev_dbg(priv->adapter->dev,
@@ -700,7 +692,7 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
		}
	}
exit:
	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
	spin_unlock_bh(&priv->rx_reorder_tbl_lock);
}

/*
@@ -729,9 +721,8 @@ int mwifiex_get_rx_reorder_tbl(struct mwifiex_private *priv,
	struct mwifiex_ds_rx_reorder_tbl *rx_reo_tbl = buf;
	struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr;
	int count = 0;
	unsigned long flags;

	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
	spin_lock_bh(&priv->rx_reorder_tbl_lock);
	list_for_each_entry(rx_reorder_tbl_ptr, &priv->rx_reorder_tbl_ptr,
			    list) {
		rx_reo_tbl->tid = (u16) rx_reorder_tbl_ptr->tid;
@@ -750,7 +741,7 @@ int mwifiex_get_rx_reorder_tbl(struct mwifiex_private *priv,
		if (count >= MWIFIEX_MAX_RX_BASTREAM_SUPPORTED)
			break;
	}
	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
	spin_unlock_bh(&priv->rx_reorder_tbl_lock);

	return count;
}
@@ -764,9 +755,8 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
	struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
	struct mwifiex_ds_tx_ba_stream_tbl *rx_reo_tbl = buf;
	int count = 0;
	unsigned long flags;

	spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
	spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
	list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
		rx_reo_tbl->tid = (u16) tx_ba_tsr_tbl->tid;
		mwifiex_dbg(priv->adapter, DATA, "data: %s tid=%d\n",
@@ -778,7 +768,7 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
		if (count >= MWIFIEX_MAX_TX_BASTREAM_SUPPORTED)
			break;
	}
	spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
	spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);

	return count;
}
@@ -790,16 +780,15 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra)
{
	struct mwifiex_tx_ba_stream_tbl *tbl, *tmp;
	unsigned long flags;

	if (!ra)
		return;

	spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
	spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
	list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list)
		if (!memcmp(tbl->ra, ra, ETH_ALEN))
			mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, tbl);
	spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
	spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);

	return;
}
+2 −3
Original line number Diff line number Diff line
@@ -147,11 +147,10 @@ mwifiex_find_stream_to_delete(struct mwifiex_private *priv, int ptr_tid,
	int tid;
	u8 ret = false;
	struct mwifiex_tx_ba_stream_tbl *tx_tbl;
	unsigned long flags;

	tid = priv->aggr_prio_tbl[ptr_tid].ampdu_user;

	spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
	spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
	list_for_each_entry(tx_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
		if (tid > priv->aggr_prio_tbl[tx_tbl->tid].ampdu_user) {
			tid = priv->aggr_prio_tbl[tx_tbl->tid].ampdu_user;
@@ -160,7 +159,7 @@ mwifiex_find_stream_to_delete(struct mwifiex_private *priv, int ptr_tid,
			ret = true;
		}
	}
	spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
	spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);

	return ret;
}
+10 −16
Original line number Diff line number Diff line
@@ -155,7 +155,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
int
mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
			  struct mwifiex_ra_list_tbl *pra_list,
			  int ptrindex, unsigned long ra_list_flags)
			  int ptrindex)
			  __releases(&priv->wmm.ra_list_spinlock)
{
	struct mwifiex_adapter *adapter = priv->adapter;
@@ -168,8 +168,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,

	skb_src = skb_peek(&pra_list->skb_head);
	if (!skb_src) {
		spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
				       ra_list_flags);
		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
		return 0;
	}

@@ -177,8 +176,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
	skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size,
					       GFP_ATOMIC);
	if (!skb_aggr) {
		spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
				       ra_list_flags);
		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
		return -1;
	}

@@ -208,17 +206,15 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
		pra_list->total_pkt_count--;
		atomic_dec(&priv->wmm.tx_pkts_queued);
		aggr_num++;
		spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
				       ra_list_flags);
		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
		mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);

		mwifiex_write_data_complete(adapter, skb_src, 0, 0);

		spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
		spin_lock_bh(&priv->wmm.ra_list_spinlock);

		if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
			spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
					       ra_list_flags);
			spin_unlock_bh(&priv->wmm.ra_list_spinlock);
			return -1;
		}

@@ -232,7 +228,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,

	} while (skb_src);

	spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
	spin_unlock_bh(&priv->wmm.ra_list_spinlock);

	/* Last AMSDU packet does not need padding */
	skb_trim(skb_aggr, skb_aggr->len - pad);
@@ -265,10 +261,9 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
	}
	switch (ret) {
	case -EBUSY:
		spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
		spin_lock_bh(&priv->wmm.ra_list_spinlock);
		if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
			spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
					       ra_list_flags);
			spin_unlock_bh(&priv->wmm.ra_list_spinlock);
			mwifiex_write_data_complete(adapter, skb_aggr, 1, -1);
			return -1;
		}
@@ -286,8 +281,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
		atomic_inc(&priv->wmm.tx_pkts_queued);

		tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
		spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
				       ra_list_flags);
		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
		mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
		break;
	case -1:
+1 −1
Original line number Diff line number Diff line
@@ -27,7 +27,7 @@ int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv,
				struct sk_buff *skb);
int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
			      struct mwifiex_ra_list_tbl *ptr,
			      int ptr_index, unsigned long flags)
			      int ptr_index)
			      __releases(&priv->wmm.ra_list_spinlock);

#endif /* !_MWIFIEX_11N_AGGR_H_ */
+36 −50
Original line number Diff line number Diff line
@@ -113,10 +113,9 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
	struct sk_buff_head list;
	struct sk_buff *skb;
	int pkt_to_send, i;
	unsigned long flags;

	__skb_queue_head_init(&list);
	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
	spin_lock_bh(&priv->rx_reorder_tbl_lock);

	pkt_to_send = (start_win > tbl->start_win) ?
		      min((start_win - tbl->start_win), tbl->win_size) :
@@ -140,7 +139,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
	}

	tbl->start_win = start_win;
	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
	spin_unlock_bh(&priv->rx_reorder_tbl_lock);

	while ((skb = __skb_dequeue(&list)))
		mwifiex_11n_dispatch_pkt(priv, skb);
@@ -161,10 +160,9 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
	struct sk_buff_head list;
	struct sk_buff *skb;
	int i, j, xchg;
	unsigned long flags;

	__skb_queue_head_init(&list);
	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
	spin_lock_bh(&priv->rx_reorder_tbl_lock);

	for (i = 0; i < tbl->win_size; ++i) {
		if (!tbl->rx_reorder_ptr[i])
@@ -187,7 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
	}
	tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);

	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
	spin_unlock_bh(&priv->rx_reorder_tbl_lock);

	while ((skb = __skb_dequeue(&list)))
		mwifiex_11n_dispatch_pkt(priv, skb);
@@ -203,19 +201,18 @@ static void
mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
			     struct mwifiex_rx_reorder_tbl *tbl)
{
	unsigned long flags;
	int start_win;

	if (!tbl)
		return;

	spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags);
	spin_lock_bh(&priv->adapter->rx_proc_lock);
	priv->adapter->rx_locked = true;
	if (priv->adapter->rx_processing) {
		spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
		spin_unlock_bh(&priv->adapter->rx_proc_lock);
		flush_workqueue(priv->adapter->rx_workqueue);
	} else {
		spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
		spin_unlock_bh(&priv->adapter->rx_proc_lock);
	}

	start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1);
@@ -224,16 +221,16 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
	del_timer_sync(&tbl->timer_context.timer);
	tbl->timer_context.timer_is_set = false;

	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
	spin_lock_bh(&priv->rx_reorder_tbl_lock);
	list_del(&tbl->list);
	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
	spin_unlock_bh(&priv->rx_reorder_tbl_lock);

	kfree(tbl->rx_reorder_ptr);
	kfree(tbl);

	spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags);
	spin_lock_bh(&priv->adapter->rx_proc_lock);
	priv->adapter->rx_locked = false;
	spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
	spin_unlock_bh(&priv->adapter->rx_proc_lock);

}

@@ -245,17 +242,15 @@ struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
{
	struct mwifiex_rx_reorder_tbl *tbl;
	unsigned long flags;

	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
	spin_lock_bh(&priv->rx_reorder_tbl_lock);
	list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
		if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
					       flags);
			spin_unlock_bh(&priv->rx_reorder_tbl_lock);
			return tbl;
		}
	}
	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
	spin_unlock_bh(&priv->rx_reorder_tbl_lock);

	return NULL;
}
@@ -266,21 +261,19 @@ mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
{
	struct mwifiex_rx_reorder_tbl *tbl, *tmp;
	unsigned long flags;

	if (!ta)
		return;

	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
	spin_lock_bh(&priv->rx_reorder_tbl_lock);
	list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
		if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
					       flags);
			spin_unlock_bh(&priv->rx_reorder_tbl_lock);
			mwifiex_del_rx_reorder_entry(priv, tbl);
			spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
			spin_lock_bh(&priv->rx_reorder_tbl_lock);
		}
	}
	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
	spin_unlock_bh(&priv->rx_reorder_tbl_lock);

	return;
}
@@ -294,18 +287,16 @@ mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
{
	struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
	struct mwifiex_private *priv = ctx->priv;
	unsigned long flags;
	int i;

	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
	spin_lock_bh(&priv->rx_reorder_tbl_lock);
	for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
		if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
					       flags);
			spin_unlock_bh(&priv->rx_reorder_tbl_lock);
			return i;
		}
	}
	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
	spin_unlock_bh(&priv->rx_reorder_tbl_lock);

	return -1;
}
@@ -353,7 +344,6 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
	int i;
	struct mwifiex_rx_reorder_tbl *tbl, *new_node;
	u16 last_seq = 0;
	unsigned long flags;
	struct mwifiex_sta_node *node;

	/*
@@ -377,7 +367,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
	new_node->init_win = seq_num;
	new_node->flags = 0;

	spin_lock_irqsave(&priv->sta_list_spinlock, flags);
	spin_lock_bh(&priv->sta_list_spinlock);
	if (mwifiex_queuing_ra_based(priv)) {
		if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
			node = mwifiex_get_sta_entry(priv, ta);
@@ -391,7 +381,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
		else
			last_seq = priv->rx_seq[tid];
	}
	spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
	spin_unlock_bh(&priv->sta_list_spinlock);

	mwifiex_dbg(priv->adapter, INFO,
		    "info: last_seq=%d start_win=%d\n",
@@ -423,9 +413,9 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
	for (i = 0; i < win_size; ++i)
		new_node->rx_reorder_ptr[i] = NULL;

	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
	spin_lock_bh(&priv->rx_reorder_tbl_lock);
	list_add_tail(&new_node->list, &priv->rx_reorder_tbl_ptr);
	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
	spin_unlock_bh(&priv->rx_reorder_tbl_lock);
}

static void
@@ -481,18 +471,17 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
	u32 rx_win_size = priv->add_ba_param.rx_win_size;
	u8 tid;
	int win_size;
	unsigned long flags;
	uint16_t block_ack_param_set;

	if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
	    ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
	    priv->adapter->is_hw_11ac_capable &&
	    memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) {
		spin_lock_irqsave(&priv->sta_list_spinlock, flags);
		spin_lock_bh(&priv->sta_list_spinlock);
		sta_ptr = mwifiex_get_sta_entry(priv,
						cmd_addba_req->peer_mac_addr);
		if (!sta_ptr) {
			spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
			spin_unlock_bh(&priv->sta_list_spinlock);
			mwifiex_dbg(priv->adapter, ERROR,
				    "BA setup with unknown TDLS peer %pM!\n",
				    cmd_addba_req->peer_mac_addr);
@@ -500,7 +489,7 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
		}
		if (sta_ptr->is_11ac_enabled)
			rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
		spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
		spin_unlock_bh(&priv->sta_list_spinlock);
	}

	cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
@@ -687,7 +676,6 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
	struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
	struct mwifiex_ra_list_tbl *ra_list;
	u8 cleanup_rx_reorder_tbl;
	unsigned long flags;
	int tid_down;

	if (type == TYPE_DELBA_RECEIVE)
@@ -721,9 +709,9 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
			ra_list->amsdu_in_ampdu = false;
			ra_list->ba_status = BA_SETUP_NONE;
		}
		spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
		spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
		mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl);
		spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
		spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
	}
}

@@ -809,17 +797,16 @@ void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv,
void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
{
	struct mwifiex_rx_reorder_tbl *del_tbl_ptr, *tmp_node;
	unsigned long flags;

	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
	spin_lock_bh(&priv->rx_reorder_tbl_lock);
	list_for_each_entry_safe(del_tbl_ptr, tmp_node,
				 &priv->rx_reorder_tbl_ptr, list) {
		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
		spin_unlock_bh(&priv->rx_reorder_tbl_lock);
		mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
		spin_lock_bh(&priv->rx_reorder_tbl_lock);
	}
	INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
	spin_unlock_bh(&priv->rx_reorder_tbl_lock);

	mwifiex_reset_11n_rx_seq_num(priv);
}
@@ -831,7 +818,6 @@ void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
{
	struct mwifiex_private *priv;
	struct mwifiex_rx_reorder_tbl *tbl;
	unsigned long lock_flags;
	int i;

	for (i = 0; i < adapter->priv_num; i++) {
@@ -839,10 +825,10 @@ void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
		if (!priv)
			continue;

		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags);
		spin_lock_bh(&priv->rx_reorder_tbl_lock);
		list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
			tbl->flags = flags;
		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags);
		spin_unlock_bh(&priv->rx_reorder_tbl_lock);
	}

	return;
Loading