Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3aab12cd authored by Linux Build Service Account's avatar Linux Build Service Account
Browse files

Merge 2b13d7fd on remote branch

Change-Id: I0de10bd6bec6a3d00569ffd255c0add7e82c291e
parents a29c57b4 2b13d7fd
Loading
Loading
Loading
Loading
+22 −0
Original line number Diff line number Diff line
@@ -92,4 +92,26 @@ static inline void cdp_process_wow_ack_rsp(ol_txrx_soc_handle soc,
		return soc->ops->bus_ops->process_wow_ack_rsp(soc, pdev_id);
}

/**
 * cdp_process_target_suspend_req() - Process target suspend request
 * @soc: data path soc handle
 * @pdev_id: id of dp pdev handle
 *
 * Complete the datapath specific work before target suspend
 *
 * Return: None
 */
static inline void cdp_process_target_suspend_req(ol_txrx_soc_handle soc,
						  uint8_t pdev_id)
{
	if (!soc || !soc->ops || !soc->ops->bus_ops) {
		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
			  "%s invalid instance", __func__);
		return;
	}

	if (soc->ops->bus_ops->process_target_suspend_req)
		return soc->ops->bus_ops->process_target_suspend_req(soc,
								     pdev_id);
}
#endif /* _CDP_TXRX_BUS_H_ */
+3 −0
Original line number Diff line number Diff line
@@ -1570,11 +1570,14 @@ struct cdp_tx_delay_ops {
 * @bus_suspend: handler for bus suspend
 * @bus_resume: handler for bus resume
 * @process_wow_ack_rsp: handler for wow ack response
 * @process_target_suspend_req: handler for target suspend request
 */
struct cdp_bus_ops {
	QDF_STATUS (*bus_suspend)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
	QDF_STATUS (*bus_resume)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
	void (*process_wow_ack_rsp)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
	void (*process_target_suspend_req)(struct cdp_soc_t *soc_hdl,
					   uint8_t pdev_id);
};
#endif

+34 −2
Original line number Diff line number Diff line
@@ -5403,8 +5403,6 @@ static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,

	pdev = vdev->pdev;

	soc->vdev_id_map[vdev->vdev_id] = NULL;

	if (wlan_op_mode_sta == vdev->opmode)
		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
				     vdev->vap_self_peer->mac_addr.raw, 0);
@@ -5420,6 +5418,12 @@ static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true);

	dp_rx_vdev_detach(vdev);
	/*
	 * move it after dp_rx_vdev_detach(),
	 * as the call back done in dp_rx_vdev_detach()
	 * still need to get vdev pointer by vdev_id.
	 */
	soc->vdev_id_map[vdev->vdev_id] = NULL;
	/*
	 * Use peer_ref_mutex while accessing peer_list, in case
	 * a peer is in the process of being removed from the list.
@@ -10765,10 +10769,38 @@ static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
	}
}

/**
 * dp_process_target_suspend_req() - process target suspend request
 * @soc_hdl: datapath soc handle
 * @pdev_id: data path pdev handle id
 *
 * Return: none
 */
static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
					  uint8_t pdev_id)
{
	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);

	if (qdf_unlikely(!pdev)) {
		dp_err("pdev is NULL");
		return;
	}

	/* Stop monitor reap timer and reap any pending frames in ring */
	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
	    soc->reap_timer_init) {
		qdf_timer_sync_cancel(&soc->mon_reap_timer);
		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
	}
}

static struct cdp_bus_ops dp_ops_bus = {
	.bus_suspend = dp_bus_suspend,
	.bus_resume = dp_bus_resume,
	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
	.process_target_suspend_req = dp_process_target_suspend_req
};
#endif

+39 −1
Original line number Diff line number Diff line
@@ -1847,6 +1847,7 @@ static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
	struct reo_desc_list_node *freedesc =
		(struct reo_desc_list_node *)cb_ctxt;
	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
	unsigned long curr_ts = qdf_get_system_timestamp();

	if ((reo_status->fl_cache_status.header.status !=
		HAL_REO_CMD_SUCCESS) &&
@@ -1859,7 +1860,8 @@ static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
			  freedesc->rx_tid.tid);
	}
	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
		  "%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
		  "%s:%lu hw_qdesc_paddr: %pK, tid:%d", __func__,
		  curr_ts,
		  (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
	qdf_mem_unmap_nbytes_single(soc->osdev,
		rx_tid->hw_qdesc_paddr,
@@ -2088,6 +2090,23 @@ static void dp_reo_desc_clean_up(struct dp_soc *soc,
			     (qdf_list_node_t *)desc);
}

/*
 * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
 * ring in aviod of REO hang
 *
 * @list_size: REO desc list size to be cleaned
 */
static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
{
	unsigned long curr_ts = qdf_get_system_timestamp();

	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
		dp_err_log("%lu:freedesc number %d in freelist",
			   curr_ts, *list_size);
		/* limit the batch queue size */
		*list_size = REO_DESC_FREELIST_SIZE;
	}
}
#else
/*
 * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
@@ -2107,6 +2126,16 @@ static void dp_reo_desc_clean_up(struct dp_soc *soc,
		dp_reo_desc_free(soc, (void *)desc, reo_status);
	}
}

/*
 * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
 * ring in aviod of REO hang
 *
 * @list_size: REO desc list size to be cleaned
 */
static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
{
}
#endif

/*
@@ -2175,6 +2204,7 @@ void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
		qdf_mem_zero(reo_status, sizeof(*reo_status));
		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
		return;
	} else if (reo_status->rx_queue_status.header.status !=
		HAL_REO_CMD_SUCCESS) {
@@ -2196,6 +2226,14 @@ void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
	qdf_list_insert_back_size(&soc->reo_desc_freelist,
		(qdf_list_node_t *)freedesc, &list_size);

	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
	 * failed. it may cause the number of REO queue pending  in free
	 * list is even larger than REO_CMD_RING max size and lead REO CMD
	 * flood then cause REO HW in an unexpected condition. So it's
	 * needed to limit the number REO cmds in a batch operation.
	 */
	dp_reo_limit_clean_batch_sz(&list_size);

	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
		((list_size >= REO_DESC_FREELIST_SIZE) ||
+15 −4
Original line number Diff line number Diff line
@@ -148,14 +148,16 @@ QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
 *	       or NULL during dp rx initialization or out of buffer
 *	       interrupt.
 * @tail: tail of descs list
 * @func_name: name of the caller function
 * Return: return success or failure
 */
QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
				struct dp_srng *dp_rxdma_srng,
				struct rx_desc_pool *rx_desc_pool,
				uint32_t num_req_buffers,
				union dp_rx_desc_list_elem_t **desc_list,
				union dp_rx_desc_list_elem_t **tail)
				union dp_rx_desc_list_elem_t **tail,
				const char *func_name)
{
	uint32_t num_alloc_desc;
	uint16_t num_desc_to_free = 0;
@@ -284,7 +286,8 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
		qdf_assert_always((*desc_list)->rx_desc.in_use == 0);

		(*desc_list)->rx_desc.in_use = 1;

		dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc,
					   func_name, RX_DESC_REPLENISHED);
		dp_verbose_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d",
				 rx_netbuf, qdf_nbuf_data(rx_netbuf),
				 (unsigned long long)paddr,
@@ -1773,7 +1776,10 @@ void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
			   l2_hdr_offset);

	if (dp_rx_is_special_frame(nbuf, frame_mask)) {
		vdev->osif_rx(vdev->osif_vdev, nbuf);
		qdf_nbuf_set_exc_frame(nbuf, 1);
		if (QDF_STATUS_SUCCESS !=
		    vdev->osif_rx(vdev->osif_vdev, nbuf))
			goto deliver_fail;
		DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
		return;
	}
@@ -2695,6 +2701,10 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,

			dp_rx_desc_prep(&desc_list->rx_desc, nbuf);
			desc_list->rx_desc.in_use = 1;
			dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc);
			dp_rx_desc_update_dbg_info(&desc_list->rx_desc,
						   __func__,
						   RX_DESC_REPLENISHED);

			hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
						     desc_list->rx_desc.cookie,
@@ -2872,6 +2882,7 @@ bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
	qdf_nbuf_pull_head(nbuf, skip_len);

	if (dp_rx_is_special_frame(nbuf, frame_mask)) {
		qdf_nbuf_set_exc_frame(nbuf, 1);
		dp_rx_deliver_to_stack(soc, peer->vdev, peer,
				       nbuf, NULL);
		return true;
Loading