Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 24afba76 authored by Liad Kaufman's avatar Liad Kaufman Committed by Emmanuel Grumbach
Browse files

iwlwifi: mvm: support bss dynamic alloc/dealloc of queues



"DQA" is shorthand for "dynamic queue allocation". This
enables on-demand allocation of queues per RA/TID rather than
statically allocating per vif, thus allowing a potential
benefit of various factors.

Please refer to the DOC section this patch adds to sta.h to
see a more in-depth explanation of this feature.

There are many things to take into consideration when working
in DQA mode, and this patch is only one in a series. Note that
default operation mode is non-DQA mode, unless the FW
indicates that it supports DQA mode.

This patch enables support of DQA for a station connected to
an AP, and works in a non-aggregated mode.

When a frame for an unused RA/TID arrives at the driver, it
isn't TXed immediately, but deferred first until a suitable
queue is first allocated for it, and then TXed by a worker
that both allocates the queues and TXes deferred traffic.

When a STA is removed, its queues goes back into the queue
pools for reuse as needed.

Signed-off-by: default avatarLiad Kaufman <liad.kaufman@intel.com>
Signed-off-by: default avatarEmmanuel Grumbach <emmanuel.grumbach@intel.com>
parent 7ec54716
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -723,7 +723,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
		return -EIO;
	}

	ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false);
	ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
	if (ret)
		return ret;
	rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
+21 −1
Original line number Diff line number Diff line
@@ -80,12 +80,32 @@
#include "fw-api-stats.h"
#include "fw-api-tof.h"

/* Tx queue numbers */
/* Tx queue numbers for non-DQA mode */
enum {
	IWL_MVM_OFFCHANNEL_QUEUE = 8,
	IWL_MVM_CMD_QUEUE = 9,
};

/*
 * DQA queue numbers
 *
 * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames.
 *	Each MGMT queue is mapped to a single STA
 *	MGMT frames are frames that return true on ieee80211_is_mgmt()
 * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames
 * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames.
 *	DATA frames are intended for !ieee80211_is_mgmt() frames, but if
 *	the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues
 *	as well
 * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames
 */
enum iwl_mvm_dqa_txq {
	IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
	IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
	IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
	IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
};

enum iwl_mvm_tx_fifo {
	IWL_MVM_TX_FIFO_BK = 0,
	IWL_MVM_TX_FIFO_BE,
+19 −2
Original line number Diff line number Diff line
@@ -425,12 +425,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
		return 0;
	}

	/* Find available queues, and allocate them to the ACs */
	/*
	 * Find available queues, and allocate them to the ACs. When in
	 * DQA-mode they aren't really used, and this is done only so the
	 * mac80211 ieee80211_check_queues() function won't fail
	 */
	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
		u8 queue = find_first_zero_bit(&used_hw_queues,
					       mvm->first_agg_queue);

		if (queue >= mvm->first_agg_queue) {
		if (!iwl_mvm_is_dqa_supported(mvm) &&
		    queue >= mvm->first_agg_queue) {
			IWL_ERR(mvm, "Failed to allocate queue\n");
			ret = -EIO;
			goto exit_fail;
@@ -495,6 +500,10 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
				      IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
		/* fall through */
	default:
		/* If DQA is supported - queues will be enabled when needed */
		if (iwl_mvm_is_dqa_supported(mvm))
			break;

		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
			iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
					      vif->hw_queue[ac],
@@ -523,6 +532,14 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
				    IWL_MAX_TID_COUNT, 0);
		/* fall through */
	default:
		/*
		 * If DQA is supported - queues were already disabled, since in
		 * DQA-mode the queues are a property of the STA and not of the
		 * vif, and at this point the STA was already deleted
		 */
		if (iwl_mvm_is_dqa_supported(mvm))
			break;

		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
			iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
					    vif->hw_queue[ac],
+49 −0
Original line number Diff line number Diff line
@@ -992,6 +992,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
	iwl_mvm_reset_phy_ctxts(mvm);
	memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
	memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
	memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
	memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
	memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
	memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
@@ -1178,6 +1179,7 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)

	flush_work(&mvm->d0i3_exit_work);
	flush_work(&mvm->async_handlers_wk);
	flush_work(&mvm->add_stream_wk);
	cancel_delayed_work_sync(&mvm->fw_dump_wk);
	iwl_mvm_free_fw_dump_desc(mvm);

@@ -2382,6 +2384,22 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
				    peer_addr, action);
}

static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
					     struct iwl_mvm_sta *mvm_sta)
{
	struct iwl_mvm_tid_data *tid_data;
	struct sk_buff *skb;
	int i;

	spin_lock_bh(&mvm_sta->lock);
	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
		tid_data = &mvm_sta->tid_data[i];
		while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
			ieee80211_free_txskb(mvm->hw, skb);
	}
	spin_unlock_bh(&mvm_sta->lock);
}

static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
				 struct ieee80211_vif *vif,
				 struct ieee80211_sta *sta,
@@ -2402,6 +2420,33 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
	/* if a STA is being removed, reuse its ID */
	flush_work(&mvm->sta_drained_wk);

	/*
	 * If we are in a STA removal flow and in DQA mode:
	 *
	 * This is after the sync_rcu part, so the queues have already been
	 * flushed. No more TXs on their way in mac80211's path, and no more in
	 * the queues.
	 * Also, we won't be getting any new TX frames for this station.
	 * What we might have are deferred TX frames that need to be taken care
	 * of.
	 *
	 * Drop any still-queued deferred-frame before removing the STA, and
	 * make sure the worker is no longer handling frames for this STA.
	 */
	if (old_state == IEEE80211_STA_NONE &&
	    new_state == IEEE80211_STA_NOTEXIST &&
	    iwl_mvm_is_dqa_supported(mvm)) {
		struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);

		iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
		flush_work(&mvm->add_stream_wk);

		/*
		 * No need to make sure deferred TX indication is off since the
		 * worker will already remove it if it was on
		 */
	}

	mutex_lock(&mvm->mutex);
	if (old_state == IEEE80211_STA_NOTEXIST &&
	    new_state == IEEE80211_STA_NONE) {
@@ -3738,6 +3783,10 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
	if (!vif || vif->type != NL80211_IFTYPE_STATION)
		return;

	/* Make sure we're done with the deferred traffic before flushing */
	if (iwl_mvm_is_dqa_supported(mvm))
		flush_work(&mvm->add_stream_wk);

	mutex_lock(&mvm->mutex);
	mvmvif = iwl_mvm_vif_from_mac80211(vif);

+7 −0
Original line number Diff line number Diff line
@@ -665,10 +665,16 @@ struct iwl_mvm {
		/* Map to HW queue */
		u32 hw_queue_to_mac80211;
		u8 hw_queue_refcount;
		/*
		 * This is to mark that queue is reserved for a STA but not yet
		 * allocated. This is needed to make sure we have at least one
		 * available queue to use when adding a new STA
		 */
		bool setup_reserved;
		u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
	} queue_info[IWL_MAX_HW_QUEUES];
	spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
	struct work_struct add_stream_wk; /* To add streams to queues */
	atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];

	const char *nvm_file_name;
@@ -688,6 +694,7 @@ struct iwl_mvm {
	struct iwl_rx_phy_info last_phy_info;
	struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
	struct work_struct sta_drained_wk;
	unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
	unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
	atomic_t pending_frames[IWL_MVM_STATION_COUNT];
	u32 tfd_drained[IWL_MVM_STATION_COUNT];
Loading