Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e6aa9514 authored by Yeshwanth Sriram Guntuka's avatar Yeshwanth Sriram Guntuka
Browse files

qcacmn: Decrement peer ref cnt after dp_rx_deliver_to_stack

Ths issue scenario is that valid peer is fetched from
peer_id in dp_rx_process and peer ref count is released
prior to invoking dp_rx_deliver_to_stack. In parallel,
the peer is freed in a different context. This results in
use after free within dp_rx_check_delivery_to_stack since
stale peer is dereferenced to update stats.

Fix is to decrement peer ref cnt after dp_rx_deliver_to_stack

Change-Id: I145247f7795f926faba66c05927fdae0599f0cad
CRs-Fixed: 2720396
parent adffa0e2
Loading
Loading
Loading
Loading
+10 −10
Original line number Diff line number Diff line
@@ -2216,7 +2216,13 @@ uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
			tid = qdf_nbuf_get_tid_val(nbuf);

		peer_id =  QDF_NBUF_CB_RX_PEER_ID(nbuf);

		if (qdf_unlikely(!peer)) {
			peer = dp_peer_find_by_id(soc, peer_id);
		} else if (peer && peer->peer_ids[0] != peer_id) {
			dp_peer_unref_del_find_by_id(peer);
			peer = dp_peer_find_by_id(soc, peer_id);
		}

		if (peer) {
			QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
@@ -2241,7 +2247,6 @@ uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
			qdf_nbuf_free(nbuf);
			nbuf = next;
			DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
			dp_peer_unref_del_find_by_id(peer);
			continue;
		}

@@ -2329,7 +2334,6 @@ uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
				dp_info_rl("scatter msdu len %d, dropped",
					   msdu_len);
				nbuf = next;
				dp_peer_unref_del_find_by_id(peer);
				continue;
			}
		} else {
@@ -2351,7 +2355,6 @@ uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
				DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1);
				qdf_nbuf_free(nbuf);
				nbuf = next;
				dp_peer_unref_del_find_by_id(peer);
				continue;
			}
		}
@@ -2365,7 +2368,6 @@ uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
			qdf_nbuf_free(nbuf);
			/* Statistics */
			nbuf = next;
			dp_peer_unref_del_find_by_id(peer);
			continue;
		}

@@ -2378,7 +2380,6 @@ uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
			DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
			qdf_nbuf_free(nbuf);
			nbuf = next;
			dp_peer_unref_del_find_by_id(peer);
			continue;
		}

@@ -2407,7 +2408,6 @@ uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,

				qdf_nbuf_free(nbuf);
				nbuf = next;
				dp_peer_unref_del_find_by_id(peer);
				continue;
			}
			dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
@@ -2434,7 +2434,6 @@ uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
				qdf_nbuf_free(nbuf);
				nbuf = next;
				DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
				dp_peer_unref_del_find_by_id(peer);
				continue;
			}
			/* WDS Source Port Learning */
@@ -2453,7 +2452,6 @@ uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
							nbuf,
							msdu_metadata)) {
					nbuf = next;
					dp_peer_unref_del_find_by_id(peer);
					tid_stats->intrabss_cnt++;
					continue; /* Get next desc */
				}
@@ -2469,7 +2467,6 @@ uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,

		tid_stats->delivered_to_stack++;
		nbuf = next;
		dp_peer_unref_del_find_by_id(peer);
	}

	if (qdf_likely(deliver_list_head)) {
@@ -2488,6 +2485,9 @@ uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
		}
	}

	if (qdf_likely(peer))
		dp_peer_unref_del_find_by_id(peer);

	if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
		if (quota) {
			num_pending =