Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0b286aa1 authored by Manikanta Pubbisetty's avatar Manikanta Pubbisetty Committed by Gerrit - the friendly Code Review server
Browse files

qcacmn: defer reo queue descriptor free

In some RX backpressure cases, we see the HW accessing REO
queue descriptors of a deleted peer(after the queue descriptors
are unmapped/freed), this is leading to SMMU faults. There are
cases where the HW is accessing the stale REO queue descriptors
after ~12seconds after the queue descriptors were freed.

In order to avoid the problem, HW team has suggested to defer
unmapping/free of REO Queue descriptors. Add the logic for the
same.

Change-Id: I5b1fb966dc75b963ccc9d22c40272c8d1d8d6026
CRs-Fixed: 2939223
parent 1c69cdfc
Loading
Loading
Loading
Loading
+56 −0
Original line number Diff line number Diff line
@@ -4775,6 +4775,59 @@ static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
}

#ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
/*
 * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
 *                                          for deferred reo desc list
 * @psoc: Datapath soc handle
 *
 * Return: void
 */
static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
{
	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
	qdf_list_create(&soc->reo_desc_deferred_freelist,
			REO_DESC_DEFERRED_FREELIST_SIZE);
	soc->reo_desc_deferred_freelist_init = true;
}

/*
 * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
 *                                           free the leftover REO QDESCs
 * @psoc: Datapath soc handle
 *
 * Return: void
 */
static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
{
	struct reo_desc_deferred_freelist_node *desc;

	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
	soc->reo_desc_deferred_freelist_init = false;
	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
		qdf_mem_unmap_nbytes_single(soc->osdev,
					    desc->hw_qdesc_paddr,
					    QDF_DMA_BIDIRECTIONAL,
					    desc->hw_qdesc_alloc_size);
		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
		qdf_mem_free(desc);
	}
	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);

	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
}
#else
static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
{
}

static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
{
}
#endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */

/*
 * dp_soc_reset_txrx_ring_map() - reset tx ring map
 * @soc: DP SOC handle
@@ -4855,6 +4908,7 @@ static void dp_soc_deinit(void *txrx_soc)
	dp_soc_reset_txrx_ring_map(soc);

	dp_reo_desc_freelist_destroy(soc);
	dp_reo_desc_deferred_freelist_destroy(soc);

	DEINIT_RX_HW_STATS_LOCK(soc);

@@ -12449,6 +12503,8 @@ void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
	/* initialize work queue for stats processing */
	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);

	dp_reo_desc_deferred_freelist_create(soc);

	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
		qdf_dma_mem_stats_read(),
		qdf_heap_mem_stats_read(),
+129 −0
Original line number Diff line number Diff line
@@ -95,6 +95,32 @@ dp_rx_reo_qdesc_history_add(struct reo_desc_list_node *free_desc,
	evt->type = type;
}

#ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
static inline void
dp_rx_reo_qdesc_deferred_evt_add(struct reo_desc_deferred_freelist_node *desc,
				 enum reo_qdesc_event_type type)
{
	struct reo_qdesc_event *evt;
	uint32_t idx;

	reo_qdesc_history_idx++;
	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));

	evt = &reo_qdesc_history[idx];

	qdf_mem_copy(evt->peer_mac, desc->peer_mac, QDF_MAC_ADDR_SIZE);
	evt->qdesc_addr = desc->hw_qdesc_paddr;
	evt->ts = qdf_get_log_timestamp();
	evt->type = type;
}

#define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc) \
	dp_rx_reo_qdesc_deferred_evt_add((desc), REO_QDESC_FREE)

#define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc) \
	qdf_mem_copy(desc->peer_mac, freedesc->peer_mac, QDF_MAC_ADDR_SIZE)
#endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */

#define DP_RX_REO_QDESC_GET_MAC(freedesc, peer) \
	qdf_mem_copy(freedesc->peer_mac, peer->mac_addr.raw, QDF_MAC_ADDR_SIZE)

@@ -103,12 +129,17 @@ dp_rx_reo_qdesc_history_add(struct reo_desc_list_node *free_desc,

#define DP_RX_REO_QDESC_FREE_EVT(free_desc) \
	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_FREE)

#else
#define DP_RX_REO_QDESC_GET_MAC(freedesc, peer)

#define DP_RX_REO_QDESC_UPDATE_EVT(free_desc)

#define DP_RX_REO_QDESC_FREE_EVT(free_desc)

#define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc)

#define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc)
#endif

static inline void
@@ -2319,6 +2350,95 @@ QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
	return QDF_STATUS_SUCCESS;
}

#ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
/*
 * dp_reo_desc_defer_free_enqueue() - enqueue REO QDESC to be freed into
 *                                    the deferred list
 * @soc: Datapath soc handle
 * @free_desc: REO DESC reference that needs to be freed
 *
 * Return: true if enqueued, else false
 */
static bool dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
					   struct reo_desc_list_node *freedesc)
{
	struct reo_desc_deferred_freelist_node *desc;

	if (!qdf_atomic_read(&soc->cmn_init_done))
		return false;

	desc = qdf_mem_malloc(sizeof(*desc));
	if (!desc)
		return false;

	desc->hw_qdesc_paddr = freedesc->rx_tid.hw_qdesc_paddr;
	desc->hw_qdesc_alloc_size = freedesc->rx_tid.hw_qdesc_alloc_size;
	desc->hw_qdesc_vaddr_unaligned =
			freedesc->rx_tid.hw_qdesc_vaddr_unaligned;
	desc->free_ts = qdf_get_system_timestamp();
	DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc);

	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
	if (!soc->reo_desc_deferred_freelist_init) {
		qdf_mem_free(desc);
		qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
		return false;
	}
	qdf_list_insert_back(&soc->reo_desc_deferred_freelist,
			     (qdf_list_node_t *)desc);
	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);

	return true;
}

/*
 * dp_reo_desc_defer_free() - free the REO QDESC in the deferred list
 *                            based on time threshold
 * @soc: Datapath soc handle
 * @free_desc: REO DESC reference that needs to be freed
 *
 * Return: true if enqueued, else false
 */
static void dp_reo_desc_defer_free(struct dp_soc *soc)
{
	struct reo_desc_deferred_freelist_node *desc;
	unsigned long curr_ts = qdf_get_system_timestamp();

	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);

	while ((qdf_list_peek_front(&soc->reo_desc_deferred_freelist,
	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
	       (curr_ts > (desc->free_ts + REO_DESC_DEFERRED_FREE_MS))) {
		qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
				      (qdf_list_node_t **)&desc);

		DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc);

		qdf_mem_unmap_nbytes_single(soc->osdev,
					    desc->hw_qdesc_paddr,
					    QDF_DMA_BIDIRECTIONAL,
					    desc->hw_qdesc_alloc_size);
		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
		qdf_mem_free(desc);

		curr_ts = qdf_get_system_timestamp();
	}

	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
}
#else
static inline bool
dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
			       struct reo_desc_list_node *freedesc)
{
	return false;
}

static void dp_reo_desc_defer_free(struct dp_soc *soc)
{
}
#endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */

/*
 * dp_reo_desc_free() - Callback free reo descriptor memory after
 * HW cache flush
@@ -2350,6 +2470,12 @@ static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
		  curr_ts,
		  (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);

	/* REO desc is enqueued to be freed at a later point
	 * in time, just free the freedesc alone and return
	 */
	if (dp_reo_desc_defer_free_enqueue(soc, freedesc))
		goto out;

	DP_RX_REO_QDESC_FREE_EVT(freedesc);

	qdf_mem_unmap_nbytes_single(soc->osdev,
@@ -2357,6 +2483,7 @@ static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
		QDF_DMA_BIDIRECTIONAL,
		rx_tid->hw_qdesc_alloc_size);
	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
out:
	qdf_mem_free(freedesc);
}

@@ -2822,6 +2949,8 @@ void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
		}
	}
	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);

	dp_reo_desc_defer_free(soc);
}

/*
+22 −0
Original line number Diff line number Diff line
@@ -832,6 +832,22 @@ struct reo_desc_list_node {
#endif
};

#ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
#define REO_DESC_DEFERRED_FREELIST_SIZE 256
#define REO_DESC_DEFERRED_FREE_MS 30000

struct reo_desc_deferred_freelist_node {
	qdf_list_node_t node;
	unsigned long free_ts;
	void *hw_qdesc_vaddr_unaligned;
	qdf_dma_addr_t hw_qdesc_paddr;
	uint32_t hw_qdesc_alloc_size;
#ifdef REO_QDESC_HISTORY
	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
#endif /* REO_QDESC_HISTORY */
};
#endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */

#ifdef WLAN_FEATURE_DP_EVENT_HISTORY
/**
 * struct reo_cmd_event_record: Elements to record for each reo command
@@ -1847,6 +1863,12 @@ struct dp_soc {
	/* Dp runtime refcount */
	qdf_atomic_t dp_runtime_refcount;
#endif

#ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
	qdf_list_t reo_desc_deferred_freelist;
	qdf_spinlock_t reo_desc_deferred_freelist_lock;
	bool reo_desc_deferred_freelist_init;
#endif
};

#ifdef IPA_OFFLOAD