Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 88046b2c authored by Felix Fietkau's avatar Felix Fietkau
Browse files

mt76: add support for reporting tx status with skb



MT76x2/MT76x0 has somewhat unreliable tx status reporting, and for that
reason the driver currently does not report per-skb tx ack status at all.
This breaks things like client idle polling, which relies on the tx ack
status of a transmitted nullfunc frame.

This patch adds code to report skb-attached tx status if requested by
mac80211 or the rate control module. Since tx status is polled from a
simple FIFO register, the code needs to account for the possibility of
tx status events getting lost.

The code keeps a list of skbs for which tx status is required and passes
them to mac80211 once tx status has been filled in and the DMA queue is
done with it.
If a tx status event is not received after one second, the status rates
are cleared, and a succesful ACK is indicated to avoid spurious disassoc
during assoc or client polling.

Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 59b55d06
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -258,6 +258,7 @@ int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
		return -ENOMEM;
	}

	skb->prev = skb->next = NULL;
	dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
				DMA_TO_DEVICE);
	ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
+3 −0
Original line number Diff line number Diff line
@@ -285,6 +285,7 @@ mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops)
	spin_lock_init(&dev->cc_lock);
	mutex_init(&dev->mutex);
	init_waitqueue_head(&dev->tx_wait);
	skb_queue_head_init(&dev->status_list);

	return dev;
}
@@ -326,6 +327,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
	ieee80211_hw_set(hw, TX_FRAG_LIST);
	ieee80211_hw_set(hw, MFP_CAPABLE);
	ieee80211_hw_set(hw, AP_LINK_PS);
	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);

	wiphy->flags |= WIPHY_FLAG_IBSS_RSN;

@@ -357,6 +359,7 @@ void mt76_unregister_device(struct mt76_dev *dev)
{
	struct ieee80211_hw *hw = dev->hw;

	mt76_tx_status_flush(dev, NULL);
	ieee80211_unregister_hw(hw);
	mt76_tx_free(dev);
}
+48 −0
Original line number Diff line number Diff line
@@ -195,6 +195,8 @@ struct mt76_wcid {
	u8 tx_rate_nss;
	s8 max_txpwr_adj;
	bool sw_iv;

	u8 packet_id;
};

struct mt76_txq {
@@ -233,6 +235,22 @@ struct mt76_rx_tid {
	struct sk_buff *reorder_buf[];
};

#define MT_TX_CB_DMA_DONE		BIT(0)
#define MT_TX_CB_TXS_DONE		BIT(1)
#define MT_TX_CB_TXS_FAILED		BIT(2)

#define MT_PACKET_ID_MASK		GENMASK(7, 0)
#define MT_PACKET_ID_NO_ACK		MT_PACKET_ID_MASK

#define MT_TX_STATUS_SKB_TIMEOUT	HZ

struct mt76_tx_cb {
	unsigned long jiffies;
	u8 wcid;
	u8 pktid;
	u8 flags;
};

enum {
	MT76_STATE_INITIALIZED,
	MT76_STATE_RUNNING,
@@ -400,6 +418,7 @@ struct mt76_dev {
	const struct mt76_queue_ops *queue_ops;

	wait_queue_head_t tx_wait;
	struct sk_buff_head status_list;

	unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];

@@ -594,6 +613,13 @@ wcid_to_sta(struct mt76_wcid *wcid)
	return container_of(ptr, struct ieee80211_sta, drv_priv);
}

static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
{
	BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
		     sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
	return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data);
}

int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
			  struct sk_buff *skb, struct mt76_wcid *wcid,
			  struct ieee80211_sta *sta);
@@ -624,6 +650,28 @@ void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);

void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
			 struct ieee80211_key_conf *key);
int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
			   struct sk_buff *skb);
struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
				       struct mt76_wcid *wcid, int pktid);
void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb);
void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);

static inline void
mt76_tx_status_check(struct mt76_dev *dev)
{
	spin_lock_bh(&dev->status_list.lock);
	mt76_tx_status_skb_get(dev, NULL, 0);
	spin_unlock_bh(&dev->status_list.lock);
}

static inline void
mt76_tx_status_flush(struct mt76_dev *dev, struct mt76_wcid *wcid)
{
	spin_lock_bh(&dev->status_list.lock);
	mt76_tx_status_skb_get(dev, wcid, -1);
	spin_unlock_bh(&dev->status_list.lock);
}

struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);

+0 −1
Original line number Diff line number Diff line
@@ -139,7 +139,6 @@ void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
int mt76x02_insert_hdr_pad(struct sk_buff *skb);
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb);
bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
			  struct sk_buff *skb);
+40 −34
Original line number Diff line number Diff line
@@ -324,8 +324,6 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
	else
		txwi->wcid = 0xff;

	txwi->pktid = 1;

	if (wcid && wcid->sw_iv && key) {
		u64 pn = atomic64_inc_return(&key->tx_pn);
		ccmp_pn[0] = pn;
@@ -371,8 +369,6 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
		txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
		txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
		txwi->pktid |= MT_TXWI_PKTID_PROBE;
	if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
		u8 ba_size = IEEE80211_MIN_AMPDU_BUF;

@@ -425,9 +421,6 @@ mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev,
	info->status.ampdu_len = n_frames;
	info->status.ampdu_ack_len = st->success ? n_frames : 0;

	if (st->pktid & MT_TXWI_PKTID_PROBE)
		info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;

	if (st->aggr)
		info->flags |= IEEE80211_TX_CTL_AMPDU |
			       IEEE80211_TX_STAT_AMPDU;
@@ -442,11 +435,19 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
			    struct mt76x02_tx_status *stat, u8 *update)
{
	struct ieee80211_tx_info info = {};
	struct ieee80211_sta *sta = NULL;
	struct ieee80211_tx_status status = {
		.info = &info
	};
	struct mt76_wcid *wcid = NULL;
	struct mt76x02_sta *msta = NULL;
	struct mt76_dev *mdev = &dev->mt76;

	if (stat->pktid == MT_PACKET_ID_NO_ACK)
		return;

	rcu_read_lock();
	spin_lock_bh(&mdev->status_list.lock);

	if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
		wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);

@@ -454,11 +455,19 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
		void *priv;

		priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
		sta = container_of(priv, struct ieee80211_sta,
		status.sta = container_of(priv, struct ieee80211_sta,
					  drv_priv);
	}

	if (msta && stat->aggr) {
	if (wcid) {
		if (stat->pktid)
			status.skb = mt76_tx_status_skb_get(mdev, wcid,
							    stat->pktid);
		if (status.skb)
			status.info = IEEE80211_SKB_CB(status.skb);
	}

	if (msta && stat->aggr && !status.skb) {
		u32 stat_val, stat_cache;

		stat_val = stat->rate;
@@ -472,20 +481,24 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
			goto out;
		}

		mt76x02_mac_fill_tx_status(dev, &info, &msta->status,
		mt76x02_mac_fill_tx_status(dev, status.info, &msta->status,
					   msta->n_frames);

		msta->status = *stat;
		msta->n_frames = 1;
		*update = 0;
	} else {
		mt76x02_mac_fill_tx_status(dev, &info, stat, 1);
		mt76x02_mac_fill_tx_status(dev, status.info, stat, 1);
		*update = 1;
	}

	ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info);
	if (status.skb)
		mt76_tx_status_skb_done(mdev, status.skb);
	else
		ieee80211_tx_status_ext(mt76_hw(dev), &status);

out:
	spin_unlock_bh(&mdev->status_list.lock);
	rcu_read_unlock();
}

@@ -707,32 +720,23 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
	}
}

static void
mt76x02_mac_queue_txdone(struct mt76x02_dev *dev, struct sk_buff *skb,
			 void *txwi_ptr)
{
	struct mt76x02_tx_info *txi = mt76x02_skb_tx_info(skb);
	struct mt76x02_txwi *txwi = txwi_ptr;

	mt76x02_mac_poll_tx_status(dev, false);

	txi->tries = 0;
	txi->jiffies = jiffies;
	txi->wcid = txwi->wcid;
	txi->pktid = txwi->pktid;
	trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
	mt76x02_tx_complete(&dev->mt76, skb);
}

void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
			     struct mt76_queue_entry *e, bool flush)
{
	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
	struct mt76x02_txwi *txwi;

	if (e->txwi)
		mt76x02_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
	else
	if (!e->txwi) {
		dev_kfree_skb_any(e->skb);
		return;
	}

	mt76x02_mac_poll_tx_status(dev, false);

	txwi = (struct mt76x02_txwi *) &e->txwi->txwi;
	trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);

	mt76_tx_complete_skb(mdev, e->skb);
}
EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);

@@ -817,6 +821,8 @@ void mt76x02_mac_work(struct work_struct *work)
	if (!dev->beacon_mask)
		mt76x02_check_mac_err(dev);

	mt76_tx_status_check(&dev->mt76);

	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
				     MT_CALIBRATE_INTERVAL);
}
Loading