Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5c6b2ee5 authored by Ananya Gupta's avatar Ananya Gupta Committed by Madan Koyyalamudi
Browse files

qcacmn: Send EAPOL pkt to stack in rx_err path

Send only eapol packets to stack and drop other packets
when found in rx err path when either REO or RXDMA push
reason is set to ROUTE type.

Change-Id: If1f43426adf21f7d00f17d369cd7fde7f7f85866
CRs-Fixed: 3114312
parent 339a6e5c
Loading
Loading
Loading
Loading
+142 −1
Original line number Diff line number Diff line
/*
 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
 *
 * Permission to use, copy, modify, and/or distribute this software for
 * any purpose with or without fee is hereby granted, provided that the
@@ -1624,6 +1625,124 @@ void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
	return;
}

/**
 * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
 *                            Free any other packet which comes in
 *                            this path.
 *
 * @soc: core DP main context
 * @nbuf: buffer pointer
 * @peer: peer handle
 * @rx_tlv_hdr: start of rx tlv header
 * @err_src: rxdma/reo
 *
 * This function indicates EAPOL frame received in wbm error ring to stack.
 * Any other frame should be dropped.
 *
 * Return: SUCCESS if delivered to stack
 */
static void
dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
		    struct dp_peer *peer, uint8_t *rx_tlv_hdr,
		    enum hal_rx_wbm_error_source err_src)
{
	uint32_t pkt_len;
	uint16_t msdu_len;
	struct dp_vdev *vdev;
	struct hal_rx_msdu_metadata msdu_metadata;
	bool is_eapol;

	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
	msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + RX_PKT_TLVS_LEN;

	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
		if (dp_rx_check_pkt_len(soc, pkt_len))
			goto drop_nbuf;

		/* Set length in nbuf */
		qdf_nbuf_set_pktlen(
			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
		qdf_assert_always(nbuf->data == rx_tlv_hdr);
	}

	/*
	 * Check if DMA completed -- msdu_done is the last bit
	 * to be written
	 */
	if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
		dp_err_rl("MSDU DONE failure");
		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
				     QDF_TRACE_LEVEL_INFO);
		qdf_assert(0);
	}

	if (!peer)
		goto drop_nbuf;

	vdev = peer->vdev;
	if (!vdev) {
		dp_err_rl("Null vdev!");
		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
		goto drop_nbuf;
	}

	/*
	 * Advance the packet start pointer by total size of
	 * pre-header TLV's
	 */
	if (qdf_nbuf_is_frag(nbuf))
		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
	else
		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
					  RX_PKT_TLVS_LEN));

	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);

	/*
	 * Indicate EAPOL frame to stack only when vap mac address
	 * matches the destination address.
	 */
	is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
	if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
		qdf_ether_header_t *eh =
			(qdf_ether_header_t *)qdf_nbuf_data(nbuf);
		if (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
				QDF_MAC_ADDR_SIZE) == 0) {
			DP_STATS_INC_PKT(vdev, rx.to_stack, 1,
					 qdf_nbuf_len(nbuf));

			/*
			 * Update the protocol tag in SKB based on
			 * CCE metadata.
			 */
			dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
						  EXCEPTION_DEST_RING_ID,
						  true, true);
			/* Update the flow tag in SKB based on FSE metadata */
			dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
					      true);
			DP_STATS_INC_PKT(peer, rx.to_stack, 1,
					 qdf_nbuf_len(nbuf));
			qdf_nbuf_set_exc_frame(nbuf, 1);
			qdf_nbuf_set_next(nbuf, NULL);

			dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL);

			return;
		}
	}

drop_nbuf:

	DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
		      err_src == HAL_RX_WBM_ERR_SRC_REO);
	DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
		      err_src == HAL_RX_WBM_ERR_SRC_RXDMA);

	qdf_nbuf_free(nbuf);
}

#ifdef DP_RX_DESC_COOKIE_INVALIDATE
/**
 * dp_rx_link_cookie_check() - Validate link desc cookie
@@ -2466,6 +2585,17 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
						   wbm_err_info.reo_err_code);
					qdf_nbuf_free(nbuf);
				}
			} else if (wbm_err_info.reo_psh_rsn
					== HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
				dp_rx_err_route_hdl(soc, nbuf, peer,
						    rx_tlv_hdr,
						    HAL_RX_WBM_ERR_SRC_REO);
			} else {
				/* should not enter here */
				dp_alert("invalid reo push reason %u",
					 wbm_err_info.reo_psh_rsn);
				qdf_nbuf_free(nbuf);
				qdf_assert_always(0);
			}
		} else if (wbm_err_info.wbm_err_src ==
					HAL_RX_WBM_ERR_SRC_RXDMA) {
@@ -2530,6 +2660,17 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
					dp_err_rl("RXDMA error %d",
						  wbm_err_info.rxdma_err_code);
				}
			} else if (wbm_err_info.rxdma_psh_rsn
					== HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
				dp_rx_err_route_hdl(soc, nbuf, peer,
						    rx_tlv_hdr,
						    HAL_RX_WBM_ERR_SRC_RXDMA);
			} else {
				/* should not enter here */
				dp_alert("invalid rxdma push reason %u",
					 wbm_err_info.rxdma_psh_rsn);
				qdf_nbuf_free(nbuf);
				qdf_assert_always(0);
			}
		} else {
			/* Should not come here */
+4 −0
Original line number Diff line number Diff line
@@ -6652,6 +6652,10 @@ dp_print_soc_rx_stats(struct dp_soc *soc)
		       soc->stats.rx.err.reo_cmd_send_fail);

	DP_PRINT_STATS("Rx BAR frames:%d", soc->stats.rx.bar_frame);
	DP_PRINT_STATS("Rxdma2rel route drop:%d",
		       soc->stats.rx.rxdma2rel_route_drop);
	DP_PRINT_STATS("Reo2rel route drop:%d",
		       soc->stats.rx.reo2rel_route_drop);
}

#ifdef FEATURE_TSO_STATS
+4 −0
Original line number Diff line number Diff line
@@ -972,6 +972,10 @@ struct dp_soc_stats {
		uint32_t msdu_scatter_wait_break;
		/* Number of bar frames received */
		uint32_t bar_frame;
		/* Number of frames routed from rxdma */
		uint32_t rxdma2rel_route_drop;
		/* Number of frames routed from reo*/
		uint32_t reo2rel_route_drop;

		struct {
			/* Invalid RBM error count */