Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 86271e46 authored by Felix Fietkau's avatar Felix Fietkau Committed by John W. Linville
Browse files

ath9k: fix the .flush driver op implementation



This patch simplifies the flush op and reuses ath_drain_all_txq for
flushing out pending frames if necessary. It also uses a global timeout
of 200ms instead of the per-queue 60ms timeout.

Signed-off-by: default avatarFelix Fietkau <nbd@openwrt.org>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 0d51cccc
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -189,7 +189,6 @@ struct ath_txq {
	u32 axq_ampdu_depth;
	bool stopped;
	bool axq_tx_inprogress;
	bool txq_flush_inprogress;
	struct list_head axq_acq;
	struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
	struct list_head txq_fifo_pending;
+21 −35
Original line number Diff line number Diff line
@@ -2128,56 +2128,42 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)

static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
{
#define ATH_FLUSH_TIMEOUT	60 /* ms */
	struct ath_softc *sc = hw->priv;
	struct ath_txq *txq = NULL;
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
	int i, j, npend = 0;
	int timeout = 200; /* ms */
	int i, j;

	ath9k_ps_wakeup(sc);
	mutex_lock(&sc->mutex);

	cancel_delayed_work_sync(&sc->tx_complete_work);

	if (drop)
		timeout = 1;

	for (j = 0; j < timeout; j++) {
		int npend = 0;

		if (j)
			usleep_range(1000, 2000);

		for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
			if (!ATH_TXQ_SETUP(sc, i))
				continue;
		txq = &sc->tx.txq[i];

		if (!drop) {
			for (j = 0; j < ATH_FLUSH_TIMEOUT; j++) {
				if (!ath9k_has_pending_frames(sc, txq))
					break;
				usleep_range(1000, 2000);
			npend += ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
		}
		}

		if (drop || ath9k_has_pending_frames(sc, txq)) {
			ath_dbg(common, ATH_DBG_QUEUE, "Drop frames from hw queue:%d\n",
				txq->axq_qnum);
			spin_lock_bh(&txq->axq_lock);
			txq->txq_flush_inprogress = true;
			spin_unlock_bh(&txq->axq_lock);

			ath9k_ps_wakeup(sc);
			ath9k_hw_stoptxdma(ah, txq->axq_qnum);
			npend = ath9k_hw_numtxpending(ah, txq->axq_qnum);
			ath9k_ps_restore(sc);
			if (npend)
				break;

			ath_draintxq(sc, txq, false);
			txq->txq_flush_inprogress = false;
		}
		if (!npend)
		    goto out;
	}

	if (npend) {
	if (!ath_drain_all_txq(sc, false))
		ath_reset(sc, false);
		txq->txq_flush_inprogress = false;
	}

out:
	ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
	mutex_unlock(&sc->mutex);
	ath9k_ps_restore(sc);
}

struct ieee80211_ops ath9k_ops = {
+13 −15
Original line number Diff line number Diff line
@@ -2012,8 +2012,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
		spin_lock_bh(&txq->axq_lock);
		if (list_empty(&txq->axq_q)) {
			txq->axq_link = NULL;
			if (sc->sc_flags & SC_OP_TXAGGR &&
			    !txq->txq_flush_inprogress)
			if (sc->sc_flags & SC_OP_TXAGGR)
				ath_txq_schedule(sc, txq);
			spin_unlock_bh(&txq->axq_lock);
			break;
@@ -2094,7 +2093,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)

		spin_lock_bh(&txq->axq_lock);

		if (sc->sc_flags & SC_OP_TXAGGR && !txq->txq_flush_inprogress)
		if (sc->sc_flags & SC_OP_TXAGGR)
			ath_txq_schedule(sc, txq);
		spin_unlock_bh(&txq->axq_lock);
	}
@@ -2265,7 +2264,6 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)

		spin_lock_bh(&txq->axq_lock);

		if (!txq->txq_flush_inprogress) {
		if (!list_empty(&txq->txq_fifo_pending)) {
			INIT_LIST_HEAD(&bf_head);
			bf = list_first_entry(&txq->txq_fifo_pending,
@@ -2276,7 +2274,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
			ath_tx_txqaddbuf(sc, txq, &bf_head);
		} else if (sc->sc_flags & SC_OP_TXAGGR)
			ath_txq_schedule(sc, txq);
		}

		spin_unlock_bh(&txq->axq_lock);
	}
}