Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7d6e8c40 authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan
Browse files

drivers: shs: Change allocation context of shs allocations within spin_lock



The allocation of the shs memory for cpu and flow level stats is done is
done in the atomic context due to the invocation of the spin_lock_irqsave which
disables preemption. Fixes the following-

230.251419:   <6> sleeping function called from invalid context at mm/slab.h:422
230.277265:   <6> in_atomic(): 1, irqs_disabled(): 128, pid: 62, name: kworker/6:0
230.277267:   <2> INFO: lockdep is turned off.
230.284504:   <2> irq event stamp: 90
230.284514:   <2> hardirqs last  enabled at (89): [<ffffff9ddee82594>] _raw_spin_unlock_irq+0x34/0x68
230.284517:   <2> hardirqs last disabled at (90): [<ffffff9ddee7b5a8>] __schedule+0x138/0x1128
230.284524:   <2> softirqs last  enabled at (0): [<ffffff9ddd8b7f24>] copy_process+0x60c/0x1c28
230.284525:   <2> softirqs last disabled at (0): [<0000000000000000>]           (null)
230.284526:   <4> Preemption disabled at:
230.284535:   <2> [<ffffff9d7fa2b63c>] rmnet_shs_wq_process_wq+0x18c/0x350 [rmnet_shs]
230.288129:   <6> ------------[ cut here ]------------
230.292958:   <6> at kernel/sched/core.c:6786!
230.305980:   <6> Internal error: Oops - BUG: 0 [#1] PREEMPT SMP
230.358297:   <6> Process kworker/6:0 (pid: 62, stack limit = 0xffffff80083c0000)
230.365454:   <6> CPU: 6 PID: 62 Comm: kworker/6:0 Tainted: G S         O      4.19.81+ #1
230.379937:   <6> Workqueue: rmnet_shs_wq rmnet_shs_wq_process_wq [rmnet_shs]
230.386741:   <2> pc : ___might_sleep+0x204/0x208
230.401745:   <2> lr : ___might_sleep+0x204/0x208
230.598414:   <2> Call trace:
230.598419:   <2>  ___might_sleep+0x204/0x208
230.598420:   <2>  __might_sleep+0x50/0x88
230.598423:   <2>  kmem_cache_alloc_trace+0x74/0x420
230.598430:   <2>  rmnet_shs_wq_cpu_caps_list_add+0x64/0x118 [rmnet_shs]
230.598433:   <2>  rmnet_shs_wq_update_stats+0x4dc/0xea0 [rmnet_shs]
230.598435:   <2>  rmnet_shs_wq_process_wq+0x194/0x350 [rmnet_shs]
230.598438:   <2>  process_one_work+0x328/0x6b0
230.598439:   <2>  worker_thread+0x330/0x4d0
230.598441:   <2>  kthread+0x128/0x138
230.598443:   <2>  ret_from_fork+0x10/0x1c

Also fixes structure padding of shared mem structs, which was causing memcpy overrun.

CRs-Fixed: 2570479
Change-Id: Ia58b0bee544afb030353ad1d3cd45d8c16a94f75
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent ceaa8bcb
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -68,7 +68,7 @@ int rmnet_shs_genl_send_int_to_userspace(struct genl_info *info, int val)
	void *msg_head;
	int rc;

	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
	if (skb == NULL)
		goto out;

@@ -109,7 +109,7 @@ int rmnet_shs_genl_send_int_to_userspace_no_info(int val)
		return -1;
	}

	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
	if (skb == NULL)
		goto out;

@@ -148,7 +148,7 @@ int rmnet_shs_genl_send_msg_to_userspace(void)
	int val = rmnet_shs_genl_seqnum++;

	rm_err("SHS_GNL: Trying to send msg %d\n", val);
	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
	if (skb == NULL)
		goto out;

+3 −3
Original line number Diff line number Diff line
@@ -255,7 +255,7 @@ void rmnet_shs_wq_ssflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
		return;
	}

	ssflow_node = kzalloc(sizeof(*ssflow_node), GFP_KERNEL);
	ssflow_node = kzalloc(sizeof(*ssflow_node), GFP_ATOMIC);
	if (ssflow_node != NULL) {
		ssflow_node->avg_pps = hnode->avg_pps;
		ssflow_node->cpu_num = hnode->current_cpu;
@@ -304,7 +304,7 @@ void rmnet_shs_wq_gflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
	}

	if (!rmnet_shs_is_lpwr_cpu(hnode->current_cpu)) {
		gflow_node = kzalloc(sizeof(*gflow_node), GFP_KERNEL);
		gflow_node = kzalloc(sizeof(*gflow_node), GFP_ATOMIC);
		if (gflow_node != NULL) {
			gflow_node->avg_pps = hnode->avg_pps;
			gflow_node->cpu_num = hnode->current_cpu;
@@ -361,7 +361,7 @@ void rmnet_shs_wq_cpu_caps_list_add(
	pps_uthresh = rmnet_shs_cpu_rx_max_pps_thresh[cpu_node->cpu_num];
	pps_lthresh = rmnet_shs_cpu_rx_min_pps_thresh[cpu_node->cpu_num];

	cap_node = kzalloc(sizeof(*cap_node), GFP_KERNEL);
	cap_node = kzalloc(sizeof(*cap_node), GFP_ATOMIC);
	if (cap_node == NULL) {
		rmnet_shs_crit_err[RMNET_SHS_WQ_NODE_MALLOC_ERR]++;
		return;
+3 −3
Original line number Diff line number Diff line
@@ -26,14 +26,14 @@

#define RMNET_SHS_MAX_USRFLOWS (128)

struct rmnet_shs_wq_cpu_cap_usr_s {
struct __attribute__((__packed__)) rmnet_shs_wq_cpu_cap_usr_s {
	u64 pps_capacity;
	u64 avg_pps_capacity;
	u64 bps_capacity;
	u16 cpu_num;
};

struct rmnet_shs_wq_gflows_usr_s {
struct __attribute__((__packed__)) rmnet_shs_wq_gflows_usr_s {
	u64 rx_pps;
	u64 avg_pps;
	u64 rx_bps;
@@ -41,7 +41,7 @@ struct rmnet_shs_wq_gflows_usr_s {
	u16 cpu_num;
};

struct rmnet_shs_wq_ssflows_usr_s {
struct __attribute__((__packed__)) rmnet_shs_wq_ssflows_usr_s {
	u64 rx_pps;
	u64 avg_pps;
	u64 rx_bps;