Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit da1063f7 authored by Linux Build Service Account's avatar Linux Build Service Account
Browse files

Merge 13cf7adc on remote branch

Change-Id: I2b879338736848ae17655df8c3bda022ca7a14b4
parents b3fcc401 13cf7adc
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -426,6 +426,7 @@ static inline void dp_wds_ext_peer_init(struct dp_peer *peer)
 * 2.4GHz band uses lmac_id = 1
 * 5GHz/6GHz band uses lmac_id=0
 */
#define DP_INVALID_LMAC_ID	(-1)
#define DP_MON_INVALID_LMAC_ID	(-1)
#define DP_MON_2G_LMAC_ID	1
#define DP_MON_5G_LMAC_ID	0
+41 −0
Original line number Diff line number Diff line
@@ -207,6 +207,28 @@ static QDF_STATUS __dp_ipa_tx_buf_smmu_mapping(
	return ret;
}

#ifndef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS
static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc,
						     bool lock_required)
{
	hal_ring_handle_t hal_ring_hdl;
	int ring;

	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
		hal_ring_hdl = soc->reo_dest_ring[ring].hal_srng;
		hal_srng_lock(hal_ring_hdl);
		soc->ipa_reo_ctx_lock_required[ring] = lock_required;
		hal_srng_unlock(hal_ring_hdl);
	}
}
#else
static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc,
						     bool lock_required)
{
}

#endif

#ifdef RX_DESC_MULTI_PAGE_ALLOC
static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
							 struct dp_pdev *pdev,
@@ -226,7 +248,9 @@ static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
	pdev_id = pdev->pdev_id;
	rx_pool = &soc->rx_desc_buf[pdev_id];

	dp_ipa_set_reo_ctx_mapping_lock_required(soc, true);
	qdf_spin_lock_bh(&rx_pool->lock);
	dp_ipa_rx_buf_smmu_mapping_lock(soc);
	num_desc = rx_pool->pool_size;
	num_desc_per_page = rx_pool->desc_pages.num_element_per_page;
	for (i = 0; i < num_desc; i++) {
@@ -256,7 +280,9 @@ static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf,
						 rx_pool->buf_size, create);
	}
	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
	qdf_spin_unlock_bh(&rx_pool->lock);
	dp_ipa_set_reo_ctx_mapping_lock_required(soc, false);

	return QDF_STATUS_SUCCESS;
}
@@ -276,7 +302,9 @@ static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
	pdev_id = pdev->pdev_id;
	rx_pool = &soc->rx_desc_buf[pdev_id];

	dp_ipa_set_reo_ctx_mapping_lock_required(soc, true);
	qdf_spin_lock_bh(&rx_pool->lock);
	dp_ipa_rx_buf_smmu_mapping_lock(soc);
	for (i = 0; i < rx_pool->pool_size; i++) {
		if ((!(rx_pool->array[i].rx_desc.in_use)) ||
		    rx_pool->array[i].rx_desc.unmapped)
@@ -300,7 +328,9 @@ static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf,
						 rx_pool->buf_size, create);
	}
	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
	qdf_spin_unlock_bh(&rx_pool->lock);
	dp_ipa_set_reo_ctx_mapping_lock_required(soc, false);

	return QDF_STATUS_SUCCESS;
}
@@ -1381,6 +1411,9 @@ QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,

	soc->ipa_first_tx_db_access = true;

	qdf_spinlock_create(&soc->ipa_rx_buf_map_lock);
	soc->ipa_rx_buf_map_lock_initialized = true;

	return QDF_STATUS_SUCCESS;
}

@@ -1596,6 +1629,9 @@ QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,

	soc->ipa_first_tx_db_access = true;

	qdf_spinlock_create(&soc->ipa_rx_buf_map_lock);
	soc->ipa_rx_buf_map_lock_initialized = true;

	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
		  "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
		  __func__,
@@ -1732,6 +1768,11 @@ QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
		status = QDF_STATUS_E_FAILURE;
	}

	if (soc->ipa_rx_buf_map_lock_initialized) {
		qdf_spinlock_destroy(&soc->ipa_rx_buf_map_lock);
		soc->ipa_rx_buf_map_lock_initialized = false;
	}

	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
	if (qdf_unlikely(!pdev)) {
		dp_err_rl("Invalid pdev for pdev_id %d", pdev_id);
+81 −0
Original line number Diff line number Diff line
@@ -283,6 +283,65 @@ QDF_STATUS dp_ipa_tx_buf_smmu_mapping(
QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(
	struct cdp_soc_t *soc_hdl, uint8_t pdev_id);

#ifndef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS
static inline void
dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc *soc)
{
	if (soc->ipa_rx_buf_map_lock_initialized)
		qdf_spin_lock_bh(&soc->ipa_rx_buf_map_lock);
}

static inline void
dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc *soc)
{
	if (soc->ipa_rx_buf_map_lock_initialized)
		qdf_spin_unlock_bh(&soc->ipa_rx_buf_map_lock);
}

static inline void
dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc *soc,
				uint32_t reo_ring_num)
{
	if (!soc->ipa_reo_ctx_lock_required[reo_ring_num])
		return;

	qdf_spin_lock_bh(&soc->ipa_rx_buf_map_lock);
}

static inline void
dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc *soc,
				  uint32_t reo_ring_num)
{
	if (!soc->ipa_reo_ctx_lock_required[reo_ring_num])
		return;

	qdf_spin_unlock_bh(&soc->ipa_rx_buf_map_lock);
}
#else

static inline void
dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc *soc)
{
}

static inline void
dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc *soc)
{
}

static inline void
dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc *soc,
				uint32_t reo_ring_num)
{
}

static inline void
dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc *soc,
				  uint32_t reo_ring_num)
{
}
#endif

#else
static inline int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
{
@@ -308,6 +367,28 @@ static inline QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
	return QDF_STATUS_SUCCESS;
}

static inline void
dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc *soc)
{
}

static inline void
dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc *soc)
{
}

static inline void
dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc *soc,
				uint32_t reo_ring_num)
{
}

static inline void
dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc *soc,
				  uint32_t reo_ring_num)
{
}

static inline qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc,
						       qdf_nbuf_t nbuf)
{
+49 −32
Original line number Diff line number Diff line
@@ -2624,6 +2624,7 @@ static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
	if (soc->intr_mode == DP_INTR_POLL) {
		qdf_timer_free(&soc->int_timer);
	} else {
		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
	}

@@ -3367,13 +3368,15 @@ bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
 *
 * @tx_ring_num: Tx ring number
 * @tx_ipa_ring_sz: Return param only updated for IPA.
 * @soc_cfg_ctx: dp soc cfg context
 *
 * Return: None
 */
static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz)
static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
{
	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
		*tx_ipa_ring_sz = WLAN_CFG_IPA_TX_RING_SIZE;
		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
}

/**
@@ -3381,14 +3384,17 @@ static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz)
 *
 * @tx_comp_ring_num: Tx comp ring number
 * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
 * @soc_cfg_ctx: dp soc cfg context
 *
 * Return: None
 */
static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
					 int *tx_comp_ipa_ring_sz)
					 int *tx_comp_ipa_ring_sz,
				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
{
	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
		*tx_comp_ipa_ring_sz = WLAN_CFG_IPA_TX_COMP_RING_SIZE;
		*tx_comp_ipa_ring_sz =
				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
}
#else
static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
@@ -3522,12 +3528,14 @@ static bool dp_reo_remap_config(struct dp_soc *soc,
	return true;
}

static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz)
static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
{
}

static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
					 int *tx_comp_ipa_ring_sz)
					 int *tx_comp_ipa_ring_sz,
				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
{
}
#endif /* IPA_OFFLOAD */
@@ -3660,7 +3668,7 @@ static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
	int cached = 0;

	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
	dp_ipa_get_tx_ring_size(index, &tx_ring_size);
	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);

	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
			  tx_ring_size, cached)) {
@@ -3669,7 +3677,7 @@ static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
	}

	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size);
	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
	/* Enable cached TCL desc if NSS offload is disabled */
	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
		cached = WLAN_CFG_DST_RING_CACHED_DESC;
@@ -4169,7 +4177,7 @@ static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
{
	soc->tx_hw_desc_history = dp_context_alloc_mem(
			soc, DP_TX_HW_DESC_HIST_TYPE,
			sizeof(struct dp_tx_hw_desc_evt));
			sizeof(*soc->tx_hw_desc_history));
	if (soc->tx_hw_desc_history)
		soc->tx_hw_desc_history->index = 0;
}
@@ -11466,6 +11474,29 @@ static struct cdp_mscs_ops dp_ops_mscs = {
#endif

#ifdef FEATURE_RUNTIME_PM
/**
 * dp_flush_ring_hptp() - Update ring shadow
 *			  register HP/TP address when runtime
 *                        resume
 * @opaque_soc: DP soc context
 *
 * Return: None
 */
static
void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
{
	if (hal_srng && hal_srng_get_clear_event(hal_srng,
						 HAL_SRNG_FLUSH_EVENT)) {
		/* Acquire the lock */
		hal_srng_access_start(soc->hal_soc, hal_srng);

		hal_srng_access_end(soc->hal_soc, hal_srng);

		hal_srng_set_flush_last_ts(hal_srng);
		dp_debug("flushed");
	}
}

/**
 * dp_runtime_suspend() - ensure DP is ready to runtime suspend
 * @soc_hdl: Datapath soc handle
@@ -11479,6 +11510,7 @@ static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
{
	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
	struct dp_pdev *pdev;
	uint8_t i;

	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
	if (!pdev) {
@@ -11490,6 +11522,14 @@ static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
	if (dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev)) > 0) {
		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
			  FL("Abort suspend due to pending TX packets"));

		/* perform a force flush if tx is pending */
		for (i = 0; i < soc->num_tcl_data_rings; i++) {
			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
					   HAL_SRNG_FLUSH_EVENT);
			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
		}

		return QDF_STATUS_E_AGAIN;
	}

@@ -11507,29 +11547,6 @@ static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
	return QDF_STATUS_SUCCESS;
}

/**
 * dp_flush_ring_hptp() - Update ring shadow
 *			  register HP/TP address when runtime
 *                        resume
 * @opaque_soc: DP soc context
 *
 * Return: None
 */
static
void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
{
	if (hal_srng && hal_srng_get_clear_event(hal_srng,
						 HAL_SRNG_FLUSH_EVENT)) {
		/* Acquire the lock */
		hal_srng_access_start(soc->hal_soc, hal_srng);

		hal_srng_access_end(soc->hal_soc, hal_srng);

		hal_srng_set_flush_last_ts(hal_srng);
		dp_debug("flushed");
	}
}

#define DP_FLUSH_WAIT_CNT 10
#define DP_RUNTIME_SUSPEND_WAIT_MS 10
/**
+17 −1
Original line number Diff line number Diff line
/*
 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
 *
 * Permission to use, copy, modify, and/or distribute this software for
 * any purpose with or without fee is hereby granted, provided that the
@@ -548,6 +548,9 @@ static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
	txrx_ast_free_cb cb = ast->callback;
	void *cookie = ast->cookie;

	dp_debug("mac_addr: " QDF_MAC_ADDR_FMT ", cb: %pK, cookie: %pK",
		 QDF_MAC_ADDR_REF(ast->mac_addr.raw), cb, cookie);

	/* Call the callbacks to free up the cookie */
	if (cb) {
		ast->callback = NULL;
@@ -576,6 +579,8 @@ static void dp_peer_ast_hash_detach(struct dp_soc *soc)
	if (!soc->ast_hash.bins)
		return;

	dp_debug("%pK: num_ast_entries: %u", soc, soc->num_ast_entries);

	qdf_spin_lock_bh(&soc->ast_lock);
	for (index = 0; index <= soc->ast_hash.mask; index++) {
		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
@@ -653,6 +658,9 @@ void dp_peer_ast_hash_remove(struct dp_soc *soc,
	/* Check if tail is not empty before delete*/
	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));

	dp_debug("ID: %u idx: %u mac_addr: " QDF_MAC_ADDR_FMT,
		 ase->peer_id, index, QDF_MAC_ADDR_REF(ase->mac_addr.raw));

	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
		if (tmpase == ase) {
			found = 1;
@@ -1228,6 +1236,10 @@ void dp_peer_free_ast_entry(struct dp_soc *soc,
	 * NOTE: Ensure that call to this API is done
	 * after soc->ast_lock is taken
	 */
	dp_debug("type: %d ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
		 ast_entry->type, ast_entry->peer_id, ast_entry->vdev_id,
		 QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));

	ast_entry->callback = NULL;
	ast_entry->cookie = NULL;

@@ -1312,6 +1324,10 @@ void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
		return;
	}

	dp_debug("call by %ps: ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
		 (void *)_RET_IP_, ast_entry->peer_id, ast_entry->vdev_id,
		 QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));

	ast_entry->delete_in_progress = true;

	/* In teardown del ast is called after setting logical delete state
Loading