Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit de10cbac authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan
Browse files

drivers: rmnet_shs: Fix ep down race condition



Previously when a vnd would go down we would clear it's struct
in netdev_notifier_cb context. However if rmnet_shs_wq is running
it can get into a state where the struct has been cleared but has
passed error checking. Resulting in a use after clear error of our own
internal ep structs.

This error can cause the following stack error.

<6> Unable to handle kernel NULL pointer dereference at virtual address 00000328
<6> Mem abort info:
<6>   Exception class = DABT (current EL), IL = 32 bits
<6>   SET = 0, FnV = 0
<6>   EA = 0, S1PTW = 0
<6>   FSC = 5
<6> Data abort info:
<6>   ISV = 0, ISS = 0x00000005
<6>   CM = 0, WnR = 0
<6> user pgtable: 4k pages, 39-bit VAs, pgd = 0000000071c11f76
<6> [0000000000000328] *pgd=0000000000000000, *pud=0000000000000000
<6> Internal error: Oops: 96000005 [#1] PREEMPT SMP
<2> pc : rmnet_shs_wq_update_ep_rps_msk+0x24/0xb0 [rmnet_shs]
<2> lr : rmnet_shs_wq_update_ep_rps_msk+0x1c/0xb0 [rmnet_shs]
<2> Call trace:
<2>  rmnet_shs_wq_update_ep_rps_msk+0x24/0xb0 [rmnet_shs]
<2>  rmnet_shs_wq_refresh_ep_masks+0x3c/0x54 [rmnet_shs]
<2>  rmnet_shs_wq_process_wq+0x140/0x83c [rmnet_shs]
<2>  process_one_work+0x1e0/0x410
<2>  worker_thread+0x27c/0x38c
<2>  kthread+0x12c/0x13c
<2>  ret_from_fork+0x10/0x18
<6> Code: b40003b3 94000900 f9400a68 f9400508 (f9419508)

Change-Id: Ic1529b0e2645df08432c1ba22821db68d1c58951
Acked-by: default avatarRaul Martinez <mraul@qti.qualcomm.com>
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent bc79c266
Loading
Loading
Loading
Loading
+1 −3
Original line number Diff line number Diff line
@@ -41,11 +41,9 @@ static int rmnet_shs_dev_notify_cb(struct notifier_block *nb,

static struct notifier_block rmnet_shs_dev_notifier __read_mostly = {
	.notifier_call = rmnet_shs_dev_notify_cb,
	.priority = 2,
};

static int rmnet_shs_dev_notify_cb(struct notifier_block *nb,
				    unsigned long event, void *data);

static int rmnet_vnd_total;
/* Enable smart hashing capability upon call to initialize module*/
int __init rmnet_shs_module_init(void)
+19 −8
Original line number Diff line number Diff line
@@ -149,8 +149,8 @@ unsigned long long rmnet_shs_flow_rx_pps[MAX_SUPPORTED_FLOWS_DEBUG];
module_param_array(rmnet_shs_flow_rx_pps, ullong, 0, 0444);
MODULE_PARM_DESC(rmnet_shs_flow_rx_pps, "SHS stamp pkt enq rate per flow");

static spinlock_t rmnet_shs_wq_splock;
static DEFINE_SPINLOCK(rmnet_shs_hstat_tbl_lock);
static DEFINE_SPINLOCK(rmnet_shs_ep_lock);

static time_t rmnet_shs_wq_tnsec;
static struct workqueue_struct *rmnet_shs_wq;
@@ -726,7 +726,7 @@ void rmnet_shs_wq_update_cpu_rx_tbl(struct rmnet_shs_wq_hstat_s *hstat_p)

}

static void rmnet_shs_wq_chng_suggested_cpu(u16 old_cpu, u16 new_cpu,
void rmnet_shs_wq_chng_suggested_cpu(u16 old_cpu, u16 new_cpu,
					      struct rmnet_shs_wq_ep_s *ep)
{
	struct rmnet_shs_skbn_s *node_p;
@@ -785,7 +785,6 @@ u32 rmnet_shs_wq_get_dev_rps_msk(struct net_device *dev)
		if (ep->ep->egress_dev == dev)
			dev_rps_msk = ep->rps_config_msk;
	}

	return dev_rps_msk;
}

@@ -1250,13 +1249,14 @@ void rmnet_shs_wq_cleanup_hash_tbl(u8 force_clean)

void rmnet_shs_wq_update_ep_rps_msk(struct rmnet_shs_wq_ep_s *ep)
{
	u8 len = 0;
	struct rps_map *map;
	u8 len = 0;

	if (!ep) {
	if (!ep || !ep->ep || !ep->ep->egress_dev) {
		rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
		return;
	}

	rcu_read_lock();
	map = rcu_dereference(ep->ep->egress_dev->_rx->rps_map);
	ep->rps_config_msk = 0;
@@ -1265,6 +1265,7 @@ void rmnet_shs_wq_update_ep_rps_msk(struct rmnet_shs_wq_ep_s *ep)
			ep->rps_config_msk |= (1 << map->cpus[len]);
	}
	rcu_read_unlock();

	ep->default_core_msk = ep->rps_config_msk & 0x0F;
	ep->pri_core_msk = ep->rps_config_msk & 0xF0;
}
@@ -1272,7 +1273,9 @@ void rmnet_shs_wq_update_ep_rps_msk(struct rmnet_shs_wq_ep_s *ep)
void rmnet_shs_wq_reset_ep_active(struct net_device *dev)
{
	struct rmnet_shs_wq_ep_s *ep = NULL;
	unsigned long flags;

	spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
		if (!ep)
			continue;
@@ -1282,13 +1285,16 @@ void rmnet_shs_wq_reset_ep_active(struct net_device *dev)
			ep->netdev = NULL;
		}
	}
	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);

}

void rmnet_shs_wq_set_ep_active(struct net_device *dev)
{
	struct rmnet_shs_wq_ep_s *ep = NULL;
	unsigned long flags;

	spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
		if (!ep)
			continue;
@@ -1299,7 +1305,7 @@ void rmnet_shs_wq_set_ep_active(struct net_device *dev)

		}
	}

	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
}

void rmnet_shs_wq_refresh_ep_masks(void)
@@ -1336,7 +1342,7 @@ void rmnet_shs_update_cfg_mask(void)
	rmnet_shs_cfg.map_len = rmnet_shs_get_mask_len(mask);
}

static void rmnet_shs_wq_update_stats(void)
void rmnet_shs_wq_update_stats(void)
{
	struct timespec time;
	struct rmnet_shs_wq_hstat_s *hnode = NULL;
@@ -1369,10 +1375,16 @@ static void rmnet_shs_wq_update_stats(void)

void rmnet_shs_wq_process_wq(struct work_struct *work)
{
	unsigned long flags;

	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_PROCESS_WQ,
				RMNET_SHS_WQ_PROCESS_WQ_START,
				0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);

	spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
	rmnet_shs_wq_update_stats();
	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);

	queue_delayed_work(rmnet_shs_wq, &rmnet_shs_delayed_wq->wq,
					rmnet_shs_wq_frequency);

@@ -1491,7 +1503,6 @@ void rmnet_shs_wq_init(struct net_device *dev)

	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_INIT, RMNET_SHS_WQ_INIT_START,
				0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
	spin_lock_init(&rmnet_shs_wq_splock);
	rmnet_shs_wq = alloc_workqueue("rmnet_shs_wq",
					WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
	if (!rmnet_shs_wq) {