Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 514a4d4e authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan Committed by Gerrit - the friendly Code Review server
Browse files

drivers: rmnet_shs: Fix DEBUG_LIST/spinlock error



Previously during SSR the nodes were not cleaned up on reinit
old nodes would be cleanup incorrectly as reinit would invalidate
their contents. This change correctly does a clean up on old
nodes during SSR to fix this.

Spinlock deadlock was occuring between shs wq and rx context
when spinlock_irqsave was not being called.
Change all spinlock calls to spinlock_irqsave.

CRs-Fixed: 2340672
Change-Id: I3bc3708e8f900738b1835a0d164ff69c138846ca
Acked-by: default avatarRaul Martinez <mraul@qti.qualcomm.com>
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent 8c35db1d
Loading
Loading
Loading
Loading
+43 −22
Original line number Diff line number Diff line
@@ -25,6 +25,10 @@ MODULE_LICENSE("GPL v2");
#define RMNET_SHS_BYTE_TO_BIT(x) ((x)*8)
#define RMNET_SHS_MIN_HSTAT_NODES_REQD 16
#define RMNET_SHS_WQ_DELAY_TICKS  10

#define PERIODIC_CLEAN 0
/* FORCE_CLEAN should only used during module de-ini.*/
#define FORCE_CLEAN 1
/* Time to wait (in time ticks) before re-triggering the workqueue
 *	1   tick  = 10 ms (Maximum possible resolution)
 *	100 ticks = 1 second
@@ -153,21 +157,23 @@ static struct list_head rmnet_shs_wq_ep_tbl =
 */
void rmnet_shs_wq_ep_tbl_add(struct rmnet_shs_wq_ep_s *ep)
{
	unsigned long flags;
	trace_rmnet_shs_wq_low(RMNET_SHS_WQ_EP_TBL, RMNET_SHS_WQ_EP_TBL_ADD,
				0xDEF, 0xDEF, 0xDEF, 0xDEF, ep, NULL);
	spin_lock(&rmnet_shs_hstat_tbl_lock);
	spin_lock_irqsave(&rmnet_shs_hstat_tbl_lock, flags);
	list_add(&ep->ep_list_id, &rmnet_shs_wq_ep_tbl);
	spin_unlock(&rmnet_shs_hstat_tbl_lock);
	spin_unlock_irqrestore(&rmnet_shs_hstat_tbl_lock, flags);
}

void rmnet_shs_wq_ep_tbl_remove(struct rmnet_shs_wq_ep_s *ep)
{
	unsigned long flags;
	trace_rmnet_shs_wq_low(RMNET_SHS_WQ_EP_TBL, RMNET_SHS_WQ_EP_TBL_DEL,
				0xDEF, 0xDEF, 0xDEF, 0xDEF, ep, NULL);

	spin_lock(&rmnet_shs_hstat_tbl_lock);
	spin_lock_irqsave(&rmnet_shs_hstat_tbl_lock, flags);
	list_del_init(&ep->ep_list_id);
	spin_unlock(&rmnet_shs_hstat_tbl_lock);
	spin_unlock_irqrestore(&rmnet_shs_hstat_tbl_lock, flags);

}

@@ -176,23 +182,27 @@ void rmnet_shs_wq_ep_tbl_remove(struct rmnet_shs_wq_ep_s *ep)
 */
void rmnet_shs_wq_hstat_tbl_add(struct rmnet_shs_wq_hstat_s *hnode)
{
	unsigned long flags;

	trace_rmnet_shs_wq_low(RMNET_SHS_WQ_HSTAT_TBL,
			       RMNET_SHS_WQ_HSTAT_TBL_ADD,
				0xDEF, 0xDEF, 0xDEF, 0xDEF, hnode, NULL);
	spin_lock(&rmnet_shs_hstat_tbl_lock);
	spin_lock_irqsave(&rmnet_shs_hstat_tbl_lock, flags);
	list_add(&hnode->hstat_node_id, &rmnet_shs_wq_hstat_tbl);
	spin_unlock(&rmnet_shs_hstat_tbl_lock);
	spin_unlock_irqrestore(&rmnet_shs_hstat_tbl_lock, flags);
}

void rmnet_shs_wq_hstat_tbl_remove(struct rmnet_shs_wq_hstat_s *hnode)
{
	unsigned long flags;

	trace_rmnet_shs_wq_low(RMNET_SHS_WQ_HSTAT_TBL,
			       RMNET_SHS_WQ_HSTAT_TBL_DEL,
				0xDEF, 0xDEF, 0xDEF, 0xDEF, hnode, NULL);

	spin_lock(&rmnet_shs_hstat_tbl_lock);
	spin_lock_irqsave(&rmnet_shs_hstat_tbl_lock, flags);
	list_del_init(&hnode->hstat_node_id);
	spin_unlock(&rmnet_shs_hstat_tbl_lock);
	spin_unlock_irqrestore(&rmnet_shs_hstat_tbl_lock, flags);

}

@@ -202,37 +212,43 @@ void rmnet_shs_wq_hstat_tbl_remove(struct rmnet_shs_wq_hstat_s *hnode)
 */
void rmnet_shs_wq_cpu_list_remove(struct rmnet_shs_wq_hstat_s *hnode)
{
	unsigned long flags;

	trace_rmnet_shs_wq_low(RMNET_SHS_WQ_CPU_HSTAT_TBL,
			    RMNET_SHS_WQ_CPU_HSTAT_TBL_DEL,
			    0xDEF, 0xDEF, 0xDEF, 0xDEF, hnode, NULL);
	spin_lock(&rmnet_shs_hstat_tbl_lock);
	spin_lock_irqsave(&rmnet_shs_hstat_tbl_lock, flags);
	list_del_init(&hnode->cpu_node_id);
	spin_unlock(&rmnet_shs_hstat_tbl_lock);
	spin_unlock_irqrestore(&rmnet_shs_hstat_tbl_lock, flags);

}

void rmnet_shs_wq_cpu_list_add(struct rmnet_shs_wq_hstat_s *hnode,
			    struct list_head *head)
{
	unsigned long flags;

	trace_rmnet_shs_wq_low(RMNET_SHS_WQ_CPU_HSTAT_TBL,
			    RMNET_SHS_WQ_CPU_HSTAT_TBL_ADD,
			    0xDEF, 0xDEF, 0xDEF, 0xDEF, hnode, NULL);

	spin_lock(&rmnet_shs_hstat_tbl_lock);
	spin_lock_irqsave(&rmnet_shs_hstat_tbl_lock, flags);
	list_add(&hnode->cpu_node_id, head);
	spin_unlock(&rmnet_shs_hstat_tbl_lock);
	spin_unlock_irqrestore(&rmnet_shs_hstat_tbl_lock, flags);
}

void rmnet_shs_wq_cpu_list_move(struct rmnet_shs_wq_hstat_s *hnode,
			     struct list_head *head)
{
	unsigned long flags;

	trace_rmnet_shs_wq_low(RMNET_SHS_WQ_CPU_HSTAT_TBL,
			    RMNET_SHS_WQ_CPU_HSTAT_TBL_MOVE,
			    hnode->current_cpu,
			    0xDEF, 0xDEF, 0xDEF, hnode, NULL);
	spin_lock(&rmnet_shs_hstat_tbl_lock);
	spin_lock_irqsave(&rmnet_shs_hstat_tbl_lock, flags);
	list_move(&hnode->cpu_node_id, head);
	spin_unlock(&rmnet_shs_hstat_tbl_lock);
	spin_unlock_irqrestore(&rmnet_shs_hstat_tbl_lock, flags);

}

@@ -300,8 +316,9 @@ struct rmnet_shs_wq_hstat_s *rmnet_shs_wq_get_new_hstat_node(void)
{
	struct rmnet_shs_wq_hstat_s *hnode;
	struct rmnet_shs_wq_hstat_s *ret_node = NULL;
	unsigned long flags;

	spin_lock(&rmnet_shs_hstat_tbl_lock);
	spin_lock_irqsave(&rmnet_shs_hstat_tbl_lock, flags);
	list_for_each_entry(hnode, &rmnet_shs_wq_hstat_tbl, hstat_node_id) {
		if (hnode == NULL)
			continue;
@@ -313,7 +330,7 @@ struct rmnet_shs_wq_hstat_s *rmnet_shs_wq_get_new_hstat_node(void)
			break;
		}
	}
	spin_unlock(&rmnet_shs_hstat_tbl_lock);
	spin_unlock_irqrestore(&rmnet_shs_hstat_tbl_lock, flags);

	if (ret_node) {
		trace_rmnet_shs_wq_low(RMNET_SHS_WQ_HSTAT_TBL,
@@ -1119,7 +1136,7 @@ int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev)
	return cpu_assigned;
}

void rmnet_shs_wq_cleanup_hash_tbl(void)
void rmnet_shs_wq_cleanup_hash_tbl(u8 force_clean)
{
	struct rmnet_shs_skbn_s *node_p;
	time_t tns2s;
@@ -1133,9 +1150,12 @@ void rmnet_shs_wq_cleanup_hash_tbl(void)
		if (hnode == NULL)
			continue;

		if (hnode->node == NULL)
			continue;

		node_p = hnode->node;
		tns2s = RMNET_SHS_NSEC_TO_SEC(hnode->inactive_duration);
		if (tns2s > rmnet_shs_max_flow_inactivity_sec) {
		if (tns2s > rmnet_shs_max_flow_inactivity_sec || force_clean) {

			trace_rmnet_shs_wq_low(RMNET_SHS_WQ_FLOW_STATS,
			    RMNET_SHS_WQ_FLOW_STATS_FLOW_INACTIVE_TIMEOUT,
@@ -1149,11 +1169,12 @@ void rmnet_shs_wq_cleanup_hash_tbl(void)
				kfree(node_p);
			}
			rmnet_shs_wq_cpu_list_remove(hnode);
			if (hnode->is_perm == 0) {
			if (hnode->is_perm == 0 || force_clean) {
				rmnet_shs_wq_hstat_tbl_remove(hnode);
				kfree(hnode);
			} else
			} else {
				rmnet_shs_wq_hstat_reset_node(hnode);
			}
			spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
		}
	}
@@ -1250,7 +1271,7 @@ static void rmnet_shs_wq_update_stats(void)
	rmnet_shs_wq_eval_suggested_cpu();
	rmnet_shs_wq_refresh_new_flow_list();
	/*Invoke after both the locks are released*/
	rmnet_shs_wq_cleanup_hash_tbl();
	rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN);
}

void rmnet_shs_wq_process_wq(struct work_struct *work)
@@ -1304,8 +1325,8 @@ void rmnet_shs_wq_exit(void)

	rmnet_shs_delayed_wq = NULL;
	rmnet_shs_wq = NULL;
	rmnet_shs_wq_cleanup_hash_tbl(FORCE_CLEAN);
	rmnet_shs_wq_clean_ep_tbl();

	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_EXIT, RMNET_SHS_WQ_EXIT_END,
				   0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
}