Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 65879b92 authored by Rakesh Pillai's avatar Rakesh Pillai
Browse files

ath10k: dma unmap mgmt tx buffer if wmi cmd send fails



WCN3990 sends mgmt frames by reference via WMI. If
the wmi command send fails, the frame is not being
dma unmapped.

Fix the missing dma unmapping of mgmt tx frame when
wmi command sending fails. Add a separate wmi-tlv op
for mgmt tx, via ref, which accepts dma mapped address
which is sent to the firmware.

CRs-Fixed: 2181836
Change-Id: I3f036023ac0ecbd845d842b649be882fd17619e5
Signed-off-by: default avatarRakesh Pillai <pillair@codeaurora.org>
parent 091cb27f
Loading
Loading
Loading
Loading
+21 −5
Original line number Diff line number Diff line
@@ -3704,6 +3704,7 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
{
	struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
	struct sk_buff *skb;
	dma_addr_t paddr;
	int ret;

	for (;;) {
@@ -3711,6 +3712,20 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
		if (!skb)
			break;

		if (QCA_REV_WCN3990(ar)) {
			paddr = dma_map_single(ar->dev, skb->data,
					       skb->len, DMA_TO_DEVICE);
			if (!paddr)
				continue;
			ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr);
			if (ret) {
				ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
					    ret);
				dma_unmap_single(ar->dev, paddr, skb->len,
						 DMA_FROM_DEVICE);
				ieee80211_free_txskb(ar->hw, skb);
			}
		} else {
			ret = ath10k_wmi_mgmt_tx(ar, skb);
			if (ret) {
				ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
@@ -3719,6 +3734,7 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
			}
		}
	}
}

static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
{
+27 −7
Original line number Diff line number Diff line
@@ -127,6 +127,9 @@ struct wmi_ops {
					     enum wmi_force_fw_hang_type type,
					     u32 delay_ms);
	struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
	struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
					    struct sk_buff *skb,
					    dma_addr_t paddr);
	struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
					  u32 log_level);
	struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
@@ -390,13 +393,34 @@ ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
	return ar->wmi.ops->get_txbf_conf_scheme(ar);
}

static inline int
ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
			dma_addr_t paddr)
{
	struct sk_buff *skb;
	int ret;

	if (!ar->wmi.ops->gen_mgmt_tx_send)
		return -EOPNOTSUPP;

	skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
	if (IS_ERR(skb))
		return PTR_ERR(skb);

	ret = ath10k_wmi_cmd_send(ar, skb,
				  ar->wmi.cmd->mgmt_tx_send_cmdid);
	if (ret)
		return ret;

	return 0;
}

static inline int
ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
	struct sk_buff *skb;
	int ret;
	u32 mgmt_tx_cmdid;

	if (!ar->wmi.ops->gen_mgmt_tx)
		return -EOPNOTSUPP;
@@ -405,12 +429,8 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
	if (IS_ERR(skb))
		return PTR_ERR(skb);

	if (QCA_REV_WCN3990(ar))
		mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_send_cmdid;
	else
		mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_cmdid;

	ret = ath10k_wmi_cmd_send(ar, skb, mgmt_tx_cmdid);
	ret = ath10k_wmi_cmd_send(ar, skb,
				  ar->wmi.cmd->mgmt_tx_cmdid);
	if (ret)
		return ret;

+8 −16
Original line number Diff line number Diff line
@@ -2504,21 +2504,20 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
}

static struct sk_buff *
ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
				   dma_addr_t paddr)
{
	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
	struct wmi_tlv_mgmt_tx_cmd *cmd;
	struct wmi_tlv *tlv;
	struct ieee80211_hdr *hdr;
	struct ath10k_vif *arvif;
	u32 buf_len = msdu->len;
	struct wmi_tlv *tlv;
	struct sk_buff *skb;
	u32 vdev_id;
	void *ptr;
	int len;
	u32 buf_len = (msdu->len < WMI_TX_DL_FRM_LEN) ? msdu->len :
					 WMI_TX_DL_FRM_LEN;
	u16 fc;
	struct ath10k_vif *arvif;
	dma_addr_t mgmt_frame_dma;
	u32 vdev_id;

	hdr = (struct ieee80211_hdr *)msdu->data;
	fc = le16_to_cpu(hdr->frame_control);
@@ -2559,14 +2558,7 @@ ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
	cmd->hdr.chanfreq = 0;
	cmd->hdr.buf_len = __cpu_to_le32(buf_len);
	cmd->hdr.frame_len = __cpu_to_le32(msdu->len);
	mgmt_frame_dma = dma_map_single(arvif->ar->dev, msdu->data,
					msdu->len, DMA_TO_DEVICE);
	if (!mgmt_frame_dma)
		return ERR_PTR(-ENOMEM);

	cmd->hdr.paddr_lo = (uint32_t)(mgmt_frame_dma & 0xffffffff);
	cmd->hdr.paddr_hi  = (uint32_t)(upper_32_bits(mgmt_frame_dma) &
						HTT_WCN3990_PADDR_MASK);
	cmd->hdr.paddr = __cpu_to_le64(paddr);
	cmd->data_len = buf_len;
	cmd->data_tag = 0x11;

@@ -3796,7 +3788,7 @@ static const struct wmi_ops wmi_tlv_ops = {
	.gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
	.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
	.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
	.gen_mgmt_tx =  ath10k_wmi_tlv_op_gen_mgmt_tx,
	.gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,
	.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
	.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
	.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
+1 −2
Original line number Diff line number Diff line
@@ -1729,8 +1729,7 @@ struct wmi_tlv_mgmt_tx_hdr {
	__le32 vdev_id;
	__le32 desc_id;
	__le32 chanfreq;
	__le32 paddr_lo;
	__le32 paddr_hi;
	__le64 paddr;
	__le32 frame_len;
	__le32 buf_len;
} __packed;