Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 64bc95c7 authored by Linux Build Service Account's avatar Linux Build Service Account
Browse files

Merge 7cb07734 on remote branch

Change-Id: I0ff2996c3d3fd762cdf9332c74b742fd6a9d391d
parents f1375a38 7cb07734
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
/*
 * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
 *
 * Permission to use, copy, modify, and/or distribute this software for
 * any purpose with or without fee is hereby granted, provided that the
@@ -288,6 +289,7 @@ enum cdp_host_txrx_stats {
	TXRX_SOC_INTERRUPT_STATS = 12,
	TXRX_SOC_FSE_STATS = 13,
	TXRX_HAL_REG_WRITE_STATS = 14,
	TXRX_SOC_REO_HW_DESC_DUMP = 15,
	TXRX_HOST_STATS_MAX,
};

@@ -2251,6 +2253,7 @@ enum cdp_dp_cfg {
	cfg_dp_tso_enable,
	cfg_dp_lro_enable,
	cfg_dp_gro_enable,
	cfg_dp_sg_enable,
	cfg_dp_tx_flow_start_queue_offset,
	cfg_dp_tx_flow_stop_queue_threshold,
	cfg_dp_ipa_uc_tx_buf_size,
+1 −0
Original line number Diff line number Diff line
@@ -314,6 +314,7 @@ struct txrx_pdev_cfg_param_t {
	bool gro_enable;
	bool tso_enable;
	bool lro_enable;
	bool sg_enable;
	bool enable_data_stall_detection;
	bool enable_flow_steering;
	bool disable_intra_bss_fwd;
+9 −0
Original line number Diff line number Diff line
/*
 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
 *
 * Permission to use, copy, modify, and/or distribute this software for
 * any purpose with or without fee is hereby granted, provided that the
@@ -347,6 +348,7 @@ const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
	{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
};

/* MCL specific functions */
@@ -7918,6 +7920,10 @@ dp_print_host_stats(struct dp_vdev *vdev,
		hal_dump_reg_write_stats(pdev->soc->hal_soc);
		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
		break;
	case TXRX_SOC_REO_HW_DESC_DUMP:
		dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
					 vdev->vdev_id);
		break;
	default:
		dp_info("Wrong Input For TxRx Host Stats");
		dp_txrx_stats_help();
@@ -10169,6 +10175,9 @@ static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
	case cfg_dp_gro_enable:
		value = dpsoc->wlan_cfg_ctx->gro_enabled;
		break;
	case cfg_dp_sg_enable:
		value = dpsoc->wlan_cfg_ctx->sg_enabled;
		break;
	case cfg_dp_tx_flow_start_queue_offset:
		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
		break;
+84 −0
Original line number Diff line number Diff line
/*
 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
 *
 * Permission to use, copy, modify, and/or distribute this software for
 * any purpose with or without fee is hereby granted, provided that the
@@ -2014,6 +2015,7 @@ QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
	} else {
		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
	}
	rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr;

	/* TODO: Ensure that sec_type is set before ADDBA is received.
	 * Currently this is set based on htt indication
@@ -3893,3 +3895,85 @@ void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,

	dp_peer_unref_delete(peer);
}

#ifdef DUMP_REO_QUEUE_INFO_IN_DDR
void dp_dump_rx_reo_queue_info(
	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status)
{
	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;

	if (!rx_tid)
		return;

	if (reo_status->fl_cache_status.header.status !=
		HAL_REO_CMD_SUCCESS) {
		dp_err_rl("Rx tid REO HW desc flush failed(%d)",
			  reo_status->rx_queue_status.header.status);
		return;
	}
	qdf_spin_lock_bh(&rx_tid->tid_lock);
	hal_dump_rx_reo_queue_desc(rx_tid->hw_qdesc_vaddr_aligned);
	qdf_spin_unlock_bh(&rx_tid->tid_lock);
}

void dp_send_cache_flush_for_rx_tid(
	struct dp_soc *soc, struct dp_peer *peer)
{
	int i;
	struct dp_rx_tid *rx_tid;
	struct hal_reo_cmd_params params;

	if (!peer) {
		dp_err_rl("Peer is NULL");
		return;
	}

	for (i = 0; i < DP_MAX_TIDS; i++) {
		rx_tid = &peer->rx_tid[i];
		if (!rx_tid)
			continue;
		qdf_spin_lock_bh(&rx_tid->tid_lock);
		if (rx_tid->hw_qdesc_vaddr_aligned) {
			qdf_mem_zero(&params, sizeof(params));
			params.std.need_status = 1;
			params.std.addr_lo =
				rx_tid->hw_qdesc_paddr & 0xffffffff;
			params.std.addr_hi =
				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
			params.u.fl_cache_params.flush_no_inval = 0;
			if (QDF_STATUS_SUCCESS !=
				dp_reo_send_cmd(
					soc, CMD_FLUSH_CACHE,
					&params, dp_dump_rx_reo_queue_info,
					(void *)rx_tid)) {
				dp_err_rl("cache flush send failed tid %d",
					  rx_tid->tid);
				qdf_spin_unlock_bh(&rx_tid->tid_lock);
				break;
			}
		}
		qdf_spin_unlock_bh(&rx_tid->tid_lock);
	}
}

void dp_get_rx_reo_queue_info(
	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
{
	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
	struct dp_peer *peer = NULL;

	if (!vdev) {
		dp_err_rl("vdev is null for vdev_id: %u", vdev_id);
		return;
	}

	peer = vdev->vap_bss_peer;

	if (!peer) {
		dp_err_rl("Peer is NULL");
		return;
	}
	dp_send_cache_flush_for_rx_tid(soc, peer);
}
#endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
+56 −0
Original line number Diff line number Diff line
/*
 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
 *
 * Permission to use, copy, modify, and/or distribute this software for
 * any purpose with or without fee is hereby granted, provided that the
@@ -22,6 +23,10 @@
#include <qdf_lock.h>
#include "dp_types.h"

#ifdef DUMP_REO_QUEUE_INFO_IN_DDR
#include "hal_reo.h"
#endif

#define DP_INVALID_PEER_ID 0xffff

#define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
@@ -338,4 +343,55 @@ dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
void dp_rx_tid_delete_cb(struct dp_soc *soc,
			 void *cb_ctxt,
			 union hal_reo_status *reo_status);

#ifdef DUMP_REO_QUEUE_INFO_IN_DDR
/**
 * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
 * @soc : dp_soc handle
 * @peer: peer
 *
 * This function is used to send cache flush cmd to reo and
 * to register the callback to handle the dumping of the reo
 * queue stas from DDR
 *
 * Return: none
 */
void dp_send_cache_flush_for_rx_tid(
	struct dp_soc *soc, struct dp_peer *peer);

/**
 * dp_get_rx_reo_queue_info() - Handler to get rx tid info
 * @soc : cdp_soc_t handle
 * @vdev_id: vdev id
 *
 * Handler to get rx tid info from DDR after h/w cache is
 * invalidated first using the cache flush cmd.
 *
 * Return: none
 */
void dp_get_rx_reo_queue_info(
	struct cdp_soc_t *soc_hdl, uint8_t vdev_id);

/**
 * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
 * @soc : dp_soc handle
 * @cb_ctxt - callback context
 * @reo_status: vdev id
 *
 * This is the callback function registered after sending the reo cmd
 * to flush the h/w cache and invalidate it. In the callback the reo
 * queue desc info is dumped from DDR.
 *
 * Return: none
 */
void dp_dump_rx_reo_queue_info(
	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);

#else /* DUMP_REO_QUEUE_INFO_IN_DDR */

static inline void dp_get_rx_reo_queue_info(
	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
{
}
#endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
#endif /* _DP_PEER_H_ */
Loading