Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a21a3f1f authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan
Browse files

drivers: rmnet_shs: Remove rmnet ep access



Rmnet driver allocates rmnet_endpoint which rmnet_shs was using
to keep track of endpoints that needed. However rmnet driver
frees the memory before endpoint unregistration so this leaves a
potential race condition where the wq can run after freeing.

Change is to instead use net_dev refrerences we keep track of from
net_dev_cb and no longer use rmnet_endpoints allocated by rmnet driver.
Rmnet_shs was only using netdev references in rmnet_endpoint so no
impact should be expected.

This use-after-free would cause the following crash-signature.

<6> Unable to handle kernel paging request at virtual address 00005000
<6> Mem abort info:
<6>   Exception class = DABT (current EL), IL = 32 bits
<6>   SET = 0, FnV = 0
<6>   EA = 0, S1PTW = 0
<6>   FSC = 5
<6> Data abort info:
<6>   ISV = 0, ISS = 0x00000005
<6>   CM = 0, WnR = 0
<6> user pgtable: 4k pages, 39-bit VAs, pgd = 0000000070b0b425
<6> Internal error: Oops: 96000005 [#1] PREEMPT SMP
<6> Workqueue: rmnet_shs_wq rmnet_shs_wq_process_wq [rmnet_shs]
<6> task: 00000000deaad59d task.stack: 00000000053e0949
<2> pc : rmnet_shs_wq_update_ep_rps_msk+0x3c/0xd8 [rmnet_shs]
<2> lr : rmnet_shs_wq_update_ep_rps_msk+0x28/0xd8 [rmnet_shs]
<2> Call trace:
<2>  rmnet_shs_wq_update_ep_rps_msk+0x3c/0xd8 [rmnet_shs]
<2>  rmnet_shs_wq_update_stats+0x98/0x928 [rmnet_shs]
<2>  rmnet_shs_wq_process_wq+0x10c/0x248 [rmnet_shs]
<2>  process_one_work+0x1f0/0x458
<2>  worker_thread+0x2ec/0x450
<2>  kthread+0x11c/0x130
<2>  ret_from_fork+0x10/0x1c

CRs-Fixed: 2541604
Change-Id: I7026f2564c463f4ca989af97572e2a8fe5652087
Acked-by: default avatarRaul Martinez <mraul@qti.qualcomm.com>
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent 1ff99175
Loading
Loading
Loading
Loading
+4 −3
Original line number Diff line number Diff line
@@ -164,7 +164,8 @@ static int rmnet_shs_dev_notify_cb(struct notifier_block *nb,
			if (ret)
				pr_err("%s(): rmnet ps_ind registration fail\n",
				       __func__);

			rmnet_shs_update_cfg_mask();
			rmnet_shs_wq_refresh_new_flow_list();
			rmnet_shs_cfg.is_reg_dl_mrk_ind = 1;
			trace_rmnet_shs_high(RMNET_SHS_MODULE,
					     RMNET_SHS_MODULE_INIT_WQ,
+1 −1
Original line number Diff line number Diff line
@@ -1354,7 +1354,7 @@ void rmnet_shs_init(struct net_device *dev, struct net_device *vnd)
		map_len = 0;
	} else {
		map_mask = rmnet_shs_mask_from_map(map);
		map_len = rmnet_shs_get_mask_len(rmnet_shs_cfg.map_mask);
		map_len = rmnet_shs_get_mask_len(map_mask);
	}

	rmnet_shs_cfg.port = rmnet_get_port(dev);
+27 −82
Original line number Diff line number Diff line
@@ -168,24 +168,16 @@ static struct list_head rmnet_shs_wq_ep_tbl =
 */
void rmnet_shs_wq_ep_tbl_add(struct rmnet_shs_wq_ep_s *ep)
{
	unsigned long flags;
	trace_rmnet_shs_wq_low(RMNET_SHS_WQ_EP_TBL, RMNET_SHS_WQ_EP_TBL_ADD,
				0xDEF, 0xDEF, 0xDEF, 0xDEF, ep, NULL);
	spin_lock_irqsave(&rmnet_shs_hstat_tbl_lock, flags);
	list_add(&ep->ep_list_id, &rmnet_shs_wq_ep_tbl);
	spin_unlock_irqrestore(&rmnet_shs_hstat_tbl_lock, flags);
}

void rmnet_shs_wq_ep_tbl_remove(struct rmnet_shs_wq_ep_s *ep)
{
	unsigned long flags;
	trace_rmnet_shs_wq_low(RMNET_SHS_WQ_EP_TBL, RMNET_SHS_WQ_EP_TBL_DEL,
				0xDEF, 0xDEF, 0xDEF, 0xDEF, ep, NULL);

	spin_lock_irqsave(&rmnet_shs_hstat_tbl_lock, flags);
	list_del_init(&ep->ep_list_id);
	spin_unlock_irqrestore(&rmnet_shs_hstat_tbl_lock, flags);

}

/* Helper functions to add and remove entries to the table
@@ -415,7 +407,7 @@ void rmnet_shs_wq_update_hstat_rps_msk(struct rmnet_shs_wq_hstat_s *hstat_p)
	/*Map RPS mask from the endpoint associated with this flow*/
	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {

		if (ep && (node_p->dev == ep->ep->egress_dev)) {
		if (ep && (node_p->dev == ep->ep)) {
			hstat_p->rps_config_msk = ep->rps_config_msk;
			hstat_p->def_core_msk = ep->default_core_msk;
			hstat_p->pri_core_msk = ep->pri_core_msk;
@@ -556,7 +548,7 @@ static void rmnet_shs_wq_refresh_dl_mrkr_stats(void)
	tbl_p->dl_mrk_last_rx_bytes = tbl_p->dl_mrk_rx_bytes;
	tbl_p->dl_mrk_last_rx_pkts = tbl_p->dl_mrk_rx_pkts;

	port = rmnet_get_port(rmnet_shs_delayed_wq->netdev);
	port = rmnet_shs_cfg.port;
	if (!port) {
		rmnet_shs_crit_err[RMNET_SHS_WQ_GET_RMNET_PORT_ERR]++;
		return;
@@ -735,7 +727,7 @@ void rmnet_shs_wq_chng_suggested_cpu(u16 old_cpu, u16 new_cpu,
		hstat_p = node_p->hstats;

		if ((hstat_p->suggested_cpu == old_cpu) &&
		    (node_p->dev == ep->ep->egress_dev)) {
		    (node_p->dev == ep->ep)) {

			trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
				RMNET_SHS_WQ_FLOW_STATS_SUGGEST_NEW_CPU,
@@ -762,24 +754,6 @@ u64 rmnet_shs_wq_get_max_pps_among_cores(u32 core_msk)
	return max_pps;
}

u32 rmnet_shs_wq_get_dev_rps_msk(struct net_device *dev)
{
	u32 dev_rps_msk = 0;
	struct rmnet_shs_wq_ep_s *ep = NULL;

	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
		if (!ep)
			continue;

		if (!ep->is_ep_active)
			continue;

		if (ep->ep->egress_dev == dev)
			dev_rps_msk = ep->rps_config_msk;
	}
	return dev_rps_msk;
}

/* Returns the least utilized core from a core mask
 * In order of priority
 *    1) Returns leftmost core with no flows (Fully Idle)
@@ -1102,7 +1076,7 @@ int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev)
		if (!ep->is_ep_active)
			continue;

		if (ep->ep->egress_dev == dev) {
		if (ep->ep == dev) {
			is_match_found = 1;
			break;
		}
@@ -1152,7 +1126,7 @@ int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev)
		if (!ep->is_ep_active)
			continue;

		if (ep->ep->egress_dev == dev) {
		if (ep->ep == dev) {
			is_match_found = 1;
			break;
		}
@@ -1254,18 +1228,18 @@ void rmnet_shs_wq_update_ep_rps_msk(struct rmnet_shs_wq_ep_s *ep)
	struct rps_map *map;
	u8 len = 0;

	if (!ep || !ep->ep || !ep->ep->egress_dev) {
	if (!ep || !ep->ep ) {
		rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
		return;
	}

	rcu_read_lock();
	if (!ep->ep || !ep->ep->egress_dev) {
	if (!ep->ep) {
		pr_info(" rmnet_shs invalid state %p", ep->ep);
		rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
		return;
	}
	map = rcu_dereference(ep->ep->egress_dev->_rx->rps_map);
	map = rcu_dereference(ep->ep->_rx->rps_map);

	ep->rps_config_msk = 0;
	if (map != NULL) {
@@ -1281,20 +1255,23 @@ void rmnet_shs_wq_update_ep_rps_msk(struct rmnet_shs_wq_ep_s *ep)
void rmnet_shs_wq_reset_ep_active(struct net_device *dev)
{
	struct rmnet_shs_wq_ep_s *ep = NULL;
	struct rmnet_shs_wq_ep_s *tmp = NULL;
	unsigned long flags;

	spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
	list_for_each_entry_safe(ep, tmp, &rmnet_shs_wq_ep_tbl, ep_list_id) {
		if (!ep)
			continue;

		if (ep->netdev == dev){
		if (ep->ep == dev){
			ep->is_ep_active = 0;
			ep->netdev = NULL;
			rmnet_shs_wq_ep_tbl_remove(ep);
			kfree(ep);
			break;
		}
	}
	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);

	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
}

void rmnet_shs_wq_set_ep_active(struct net_device *dev)
@@ -1303,16 +1280,21 @@ void rmnet_shs_wq_set_ep_active(struct net_device *dev)
	unsigned long flags;

	spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
		if (!ep)
			continue;

		if (ep->ep->egress_dev == dev){
			ep->is_ep_active = 1;
			ep->netdev = dev;
	ep = kzalloc(sizeof(*ep), GFP_ATOMIC);

	if (!ep) {
		rmnet_shs_crit_err[RMNET_SHS_WQ_ALLOC_EP_TBL_ERR]++;
		spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
		return;
	}
	}
	ep->ep = dev;
	ep->is_ep_active = 1;

	INIT_LIST_HEAD(&ep->ep_list_id);
	rmnet_shs_wq_update_ep_rps_msk(ep);
	rmnet_shs_wq_ep_tbl_add(ep);

	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
}

@@ -1443,34 +1425,6 @@ void rmnet_shs_wq_exit(void)
				   0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
}

void rmnet_shs_wq_gather_rmnet_ep(struct net_device *dev)
{
	u8 mux_id;
	struct rmnet_port *port;
	struct rmnet_endpoint *ep;
	struct rmnet_shs_wq_ep_s *ep_wq;

	port = rmnet_get_port(dev);

	for (mux_id = 1; mux_id < 255; mux_id++) {
		ep = rmnet_get_endpoint(port, mux_id);
		if (!ep)
			continue;

		trace_rmnet_shs_wq_high(RMNET_SHS_WQ_EP_TBL,
					RMNET_SHS_WQ_EP_TBL_INIT,
					0xDEF, 0xDEF, 0xDEF, 0xDEF, ep, NULL);
		ep_wq = kzalloc(sizeof(*ep_wq), GFP_ATOMIC);
		if (!ep_wq) {
			rmnet_shs_crit_err[RMNET_SHS_WQ_ALLOC_EP_TBL_ERR]++;
			return;
		}
		INIT_LIST_HEAD(&ep_wq->ep_list_id);
		ep_wq->ep = ep;
		rmnet_shs_wq_update_ep_rps_msk(ep_wq);
		rmnet_shs_wq_ep_tbl_add(ep_wq);
	}
}
void rmnet_shs_wq_init_cpu_rx_flow_tbl(void)
{
	u8 cpu_num;
@@ -1527,21 +1481,12 @@ void rmnet_shs_wq_init(struct net_device *dev)
		return;
	}

	rmnet_shs_delayed_wq->netdev = dev;
	rmnet_shs_wq_gather_rmnet_ep(dev);

	/*All hstat nodes allocated during Wq init will be held for ever*/
	rmnet_shs_wq_hstat_alloc_nodes(RMNET_SHS_MIN_HSTAT_NODES_REQD, 1);
	rmnet_shs_wq_init_cpu_rx_flow_tbl();
	INIT_DEFERRABLE_WORK(&rmnet_shs_delayed_wq->wq,
			     rmnet_shs_wq_process_wq);

	/* During initialization, we can start workqueue without a delay
	 * to initialize all meta data and pre allocated memory
	 * for hash stats, if required
	 */
	queue_delayed_work(rmnet_shs_wq, &rmnet_shs_delayed_wq->wq, 0);

	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_INIT, RMNET_SHS_WQ_INIT_END,
				0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
}
+4 −4
Original line number Diff line number Diff line
@@ -32,8 +32,7 @@

struct rmnet_shs_wq_ep_s {
	struct list_head ep_list_id;
	struct rmnet_endpoint *ep;
	struct net_device *netdev;
	struct net_device *ep;
	int  new_lo_core[MAX_CPUS];
	int  new_hi_core[MAX_CPUS];
	u16 default_core_msk;
@@ -133,7 +132,6 @@ struct rmnet_shs_wq_rx_flow_s {

struct rmnet_shs_delay_wq_s {
	struct delayed_work wq;
	struct net_device *netdev;
};


@@ -214,6 +212,8 @@ void rmnet_shs_wq_exit(void);
void rmnet_shs_wq_restart(void);
void rmnet_shs_wq_pause(void);

void rmnet_shs_update_cfg_mask(void);

u64 rmnet_shs_wq_get_max_pps_among_cores(u32 core_msk);
void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p);
int rmnet_shs_wq_get_least_utilized_core(u16 core_msk);
@@ -221,9 +221,9 @@ int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev);
int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev);
u64 rmnet_shs_wq_get_max_allowed_pps(u16 cpu);
void rmnet_shs_wq_inc_cpu_flow(u16 cpu);
u32 rmnet_shs_wq_get_dev_rps_msk(struct net_device *dev);
void rmnet_shs_wq_dec_cpu_flow(u16 cpu);
void rmnet_shs_hstat_tbl_delete(void);
void rmnet_shs_wq_set_ep_active(struct net_device *dev);
void rmnet_shs_wq_reset_ep_active(struct net_device *dev);
void rmnet_shs_wq_refresh_new_flow_list(void);
#endif /*_RMNET_SHS_WQ_H_*/