Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5888a40c authored by Luca Coelho's avatar Luca Coelho Committed by Emmanuel Grumbach
Browse files

iwlwifi: mvm: let any command flag be passed to iwl_mvm_flushtx_path()



Instead of only allowing the caller to decide whether the CMD_ASYNC
flag is set, let it pass the entire flags bitmask.  This allows more
flexibility and will be needed when we call this function in the
suspend flow (where other flags are needed).

Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
Signed-off-by: default avatarEmmanuel Grumbach <emmanuel.grumbach@intel.com>
parent c84af35d
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -85,7 +85,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
	IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk);
	IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk);


	mutex_lock(&mvm->mutex);
	mutex_lock(&mvm->mutex);
	ret =  iwl_mvm_flush_tx_path(mvm, scd_q_msk, true) ? : count;
	ret =  iwl_mvm_flush_tx_path(mvm, scd_q_msk, 0) ? : count;
	mutex_unlock(&mvm->mutex);
	mutex_unlock(&mvm->mutex);


	return ret;
	return ret;
+2 −2
Original line number Original line Diff line number Diff line
@@ -1781,7 +1781,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
		 * Flush them here.
		 * Flush them here.
		 */
		 */
		mutex_lock(&mvm->mutex);
		mutex_lock(&mvm->mutex);
		iwl_mvm_flush_tx_path(mvm, tfd_msk, true);
		iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
		mutex_unlock(&mvm->mutex);
		mutex_unlock(&mvm->mutex);


		/*
		/*
@@ -3924,7 +3924,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
	}
	}


	if (drop) {
	if (drop) {
		if (iwl_mvm_flush_tx_path(mvm, msk, true))
		if (iwl_mvm_flush_tx_path(mvm, msk, 0))
			IWL_ERR(mvm, "flush request fail\n");
			IWL_ERR(mvm, "flush request fail\n");
		mutex_unlock(&mvm->mutex);
		mutex_unlock(&mvm->mutex);
	} else {
	} else {
+1 −1
Original line number Original line Diff line number Diff line
@@ -1031,7 +1031,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
#else
#else
static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
#endif
#endif
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);


static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
+2 −2
Original line number Original line Diff line number Diff line
@@ -501,7 +501,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
		if (ret)
		if (ret)
			return ret;
			return ret;
		/* flush its queues here since we are freeing mvm_sta */
		/* flush its queues here since we are freeing mvm_sta */
		ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
		ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
		if (ret)
		if (ret)
			return ret;
			return ret;
		ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
		ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
@@ -1155,7 +1155,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,


	if (old_state >= IWL_AGG_ON) {
	if (old_state >= IWL_AGG_ON) {
		iwl_mvm_drain_sta(mvm, mvmsta, true);
		iwl_mvm_drain_sta(mvm, mvmsta, true);
		if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
		if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
			IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
			IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
		iwl_trans_wait_tx_queue_empty(mvm->trans,
		iwl_trans_wait_tx_queue_empty(mvm->trans,
					      mvmsta->tfd_queue_msk);
					      mvmsta->tfd_queue_msk);
+1 −1
Original line number Original line Diff line number Diff line
@@ -129,7 +129,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
	 * issue as it will have to complete before the next command is
	 * issue as it will have to complete before the next command is
	 * executed, and a new time event means a new command.
	 * executed, and a new time event means a new command.
	 */
	 */
	iwl_mvm_flush_tx_path(mvm, queues, false);
	iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC);
}
}


static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
Loading