Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c390153e authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan
Browse files

drivers: rmnet: shs: Snapshot of data.lnx.5.1



Snapshot of shs driver on data.lnx.5.1 up to the following
change id.

drivers: rmnet: shs: Unrevert Deadlock fix
I1307d82ffa12d0cc1115baa25a19df8ada924e89

Change-Id: I868f2fff8a90d1e99860803c994cee0f69af60b2
Acked-by: default avatarRaul Martinez <mraul@qti.qualcomm.com>
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent af399a16
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
/* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,7 @@ enum rmnet_shs_crit_err_e {
	RMNET_SHS_WQ_INVALID_PTR_ERR,
	RMNET_SHS_WQ_NODE_MALLOC_ERR,
	RMNET_SHS_WQ_NL_SOCKET_ERR,
	RMNET_SHS_CPU_FLOWS_BNDS_ERR,
	RMNET_SHS_CRIT_ERR_MAX
};

+15 −5
Original line number Diff line number Diff line
/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
/* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -44,6 +44,8 @@
#define GET_CTIMER(CPU) rmnet_shs_cfg.core_flush[CPU].core_timer

#define SKB_FLUSH 0
#define INCREMENT 1
#define DECREMENT 0
/* Local Definitions and Declarations */
DEFINE_SPINLOCK(rmnet_shs_ht_splock);
DEFINE_HASHTABLE(RMNET_SHS_HT, RMNET_SHS_HT_SIZE);
@@ -125,13 +127,21 @@ unsigned int rmnet_shs_cpu_max_coresum[MAX_CPUS];
module_param_array(rmnet_shs_cpu_max_coresum, uint, 0, 0644);
MODULE_PARM_DESC(rmnet_shs_cpu_max_coresum, "Max coresum seen of each core");

static void rmnet_shs_change_cpu_num_flows(u16 map_cpu, bool inc)
{
	if (map_cpu < MAX_CPUS)
		(inc) ? cpu_num_flows[map_cpu]++: cpu_num_flows[map_cpu]--;
	else
		rmnet_shs_crit_err[RMNET_SHS_CPU_FLOWS_BNDS_ERR]++;
}

void rmnet_shs_cpu_node_remove(struct rmnet_shs_skbn_s *node)
{
	SHS_TRACE_LOW(RMNET_SHS_CPU_NODE, RMNET_SHS_CPU_NODE_FUNC_REMOVE,
			    0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);

	list_del_init(&node->node_id);
	cpu_num_flows[node->map_cpu]--;
	rmnet_shs_change_cpu_num_flows(node->map_cpu, DECREMENT);

}

@@ -142,7 +152,7 @@ void rmnet_shs_cpu_node_add(struct rmnet_shs_skbn_s *node,
			    0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);

	list_add(&node->node_id, hd);
	cpu_num_flows[node->map_cpu]++;
	rmnet_shs_change_cpu_num_flows(node->map_cpu, INCREMENT);
}

void rmnet_shs_cpu_node_move(struct rmnet_shs_skbn_s *node,
@@ -152,8 +162,8 @@ void rmnet_shs_cpu_node_move(struct rmnet_shs_skbn_s *node,
			    0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);

	list_move(&node->node_id, hd);
	cpu_num_flows[node->map_cpu]++;
	cpu_num_flows[oldcpu]--;
	rmnet_shs_change_cpu_num_flows(node->map_cpu, INCREMENT);
	rmnet_shs_change_cpu_num_flows((u16) oldcpu, DECREMENT);
}

static void rmnet_shs_cpu_ooo(u8 cpu, int count)
+51 −16
Original line number Diff line number Diff line
@@ -32,23 +32,19 @@ MODULE_LICENSE("GPL v2");
#define RMNET_SHS_FILTER_FLOW_RATE 100

#define PERIODIC_CLEAN 0
/* FORCE_CLEAN should only used during module de-ini.*/
/* FORCE_CLEAN should only used during module de-init.*/
#define FORCE_CLEAN 1
/* Time to wait (in time ticks) before re-triggering the workqueue
 *	1   tick  = 10 ms (Maximum possible resolution)
 *	100 ticks = 1 second
 */

/* Local Definitions and Declarations */
unsigned int rmnet_shs_cpu_prio_dur __read_mostly = 3;
module_param(rmnet_shs_cpu_prio_dur, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_cpu_prio_dur, "Priority ignore duration(ticks)");
MODULE_PARM_DESC(rmnet_shs_cpu_prio_dur, "Priority ignore duration (wq intervals)");

#define PRIO_BACKOFF ((!rmnet_shs_cpu_prio_dur) ? 2 : rmnet_shs_cpu_prio_dur)

unsigned int rmnet_shs_wq_frequency __read_mostly = RMNET_SHS_WQ_DELAY_TICKS;
module_param(rmnet_shs_wq_frequency, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_wq_frequency, "Priodicity of Wq trigger(in ticks)");
unsigned int rmnet_shs_wq_interval_ms __read_mostly = RMNET_SHS_WQ_INTERVAL_MS;
module_param(rmnet_shs_wq_interval_ms, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_wq_interval_ms, "Interval between wq runs (ms)");

unsigned long rmnet_shs_max_flow_inactivity_sec __read_mostly =
						RMNET_SHS_MAX_SKB_INACTIVE_TSEC;
@@ -91,7 +87,7 @@ module_param_array(rmnet_shs_cpu_rx_flows, uint, 0, 0444);
MODULE_PARM_DESC(rmnet_shs_cpu_rx_flows, "Num flows processed per core");

unsigned int rmnet_shs_cpu_rx_filter_flows[MAX_CPUS];
module_param_array(rmnet_shs_cpu_rx_filter_flows, uint, 0, 0644);
module_param_array(rmnet_shs_cpu_rx_filter_flows, uint, 0, 0444);
MODULE_PARM_DESC(rmnet_shs_cpu_rx_filter_flows, "Num filtered flows per core");

unsigned long long rmnet_shs_cpu_rx_bytes[MAX_CPUS];
@@ -183,8 +179,7 @@ static struct rmnet_shs_wq_rx_flow_s rmnet_shs_rx_flow_tbl;
static struct list_head rmnet_shs_wq_hstat_tbl =
				LIST_HEAD_INIT(rmnet_shs_wq_hstat_tbl);
static int rmnet_shs_flow_dbg_stats_idx_cnt;
static struct list_head rmnet_shs_wq_ep_tbl =
				LIST_HEAD_INIT(rmnet_shs_wq_ep_tbl);
struct list_head rmnet_shs_wq_ep_tbl = LIST_HEAD_INIT(rmnet_shs_wq_ep_tbl);

/* Helper functions to add and remove entries to the table
 * that maintains a list of all endpoints (vnd's) available on this device.
@@ -544,6 +539,17 @@ void rmnet_shs_wq_update_hstat_rps_msk(struct rmnet_shs_wq_hstat_s *hstat_p)
			hstat_p->rps_config_msk = ep->rps_config_msk;
			hstat_p->def_core_msk = ep->default_core_msk;
			hstat_p->pri_core_msk = ep->pri_core_msk;

			/* Update ep tput stats while we're here */
			if (hstat_p->skb_tport_proto == IPPROTO_TCP) {
				rm_err("SHS_UDP: adding TCP bps %lu to ep_total %lu ep name %s",
				       hstat_p->rx_bps, ep->tcp_rx_bps, node_p->dev->name);
				ep->tcp_rx_bps += hstat_p->rx_bps;
			} else if (hstat_p->skb_tport_proto == IPPROTO_UDP) {
				rm_err("SHS_UDP: adding UDP rx_bps %lu to ep_total %lu ep name %s",
				       hstat_p->rx_bps, ep->udp_rx_bps, node_p->dev->name);
				ep->udp_rx_bps += hstat_p->rx_bps;
			}
			break;
		}
	}
@@ -1240,6 +1246,7 @@ int rmnet_shs_wq_check_cpu_move_for_ep(u16 current_cpu, u16 dest_cpu,
int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
				  u32 sugg_type)
{
	unsigned long flags;
	struct rmnet_shs_wq_ep_s *ep;

	if (cur_cpu >= MAX_CPUS || dest_cpu >= MAX_CPUS) {
@@ -1251,6 +1258,7 @@ int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
	 * on it if is online, rps mask, isolation, etc. then make
	 * suggestion to change the cpu for the flow by passing its hash
	 */
	spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
		if (!ep)
			continue;
@@ -1272,9 +1280,13 @@ int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
			rm_err("SHS_FDESC: >> flow 0x%x was suggested to"
			       " move from cpu[%d] to cpu[%d] sugg_type [%d]",
			       hash_to_move, cur_cpu, dest_cpu, sugg_type);

			spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
			return 1;
		}
	}

	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
	return 0;
}

@@ -1283,8 +1295,10 @@ int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable)
{
	struct rmnet_shs_skbn_s *node_p;
	struct rmnet_shs_wq_hstat_s *hstat_p;
	unsigned long ht_flags;
	u16 bkt;

	spin_lock_irqsave(&rmnet_shs_ht_splock, ht_flags);
	hash_for_each(RMNET_SHS_HT, bkt, node_p, list) {
		if (!node_p)
			continue;
@@ -1306,8 +1320,10 @@ int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable)
				0xDEF, 0xDEF, hstat_p, NULL);

		node_p->hstats->segment_enable = seg_enable;
		spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
		return 1;
	}
	spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);

	rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u not set - hash not found",
	       hash_to_set, seg_enable);
@@ -1452,6 +1468,7 @@ void rmnet_shs_wq_eval_cpus_caps_and_flows(struct list_head *cpu_caps,
	rmnet_shs_wq_mem_update_cached_cpu_caps(cpu_caps);
	rmnet_shs_wq_mem_update_cached_sorted_gold_flows(gold_flows);
	rmnet_shs_wq_mem_update_cached_sorted_ss_flows(ss_flows);
	rmnet_shs_wq_mem_update_cached_netdevs();

	rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_RESP_INT);

@@ -1614,12 +1631,14 @@ int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev)
	int cpu_assigned = -1;
	u8 is_match_found = 0;
	struct rmnet_shs_wq_ep_s *ep = NULL;
	unsigned long flags;

	if (!dev) {
		rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
		return cpu_assigned;
	}

	spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
		if (!ep)
			continue;
@@ -1635,6 +1654,7 @@ int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev)

	if (!is_match_found) {
		rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
		spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
		return cpu_assigned;
	}

@@ -1652,6 +1672,7 @@ int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev)
	/* Increment CPU assignment idx to be ready for next flow assignment*/
	if ((cpu_assigned >= 0) || ((ep->new_lo_idx + 1) >= ep->new_lo_max))
		ep->new_lo_idx = ((ep->new_lo_idx + 1) % ep->new_lo_max);
	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);

	return cpu_assigned;
}
@@ -1663,12 +1684,14 @@ int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev)
	u8 hi_idx;
	u8 hi_max;
	u8 is_match_found = 0;
	unsigned long flags;

	if (!dev) {
		rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
		return cpu_assigned;
	}

	spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
		if (!ep)
			continue;
@@ -1684,6 +1707,7 @@ int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev)

	if (!is_match_found) {
		rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
		spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
		return cpu_assigned;
	}

@@ -1700,6 +1724,7 @@ int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev)
	/* Increment CPU assignment idx to be ready for next flow assignment*/
	if (cpu_assigned >= 0)
		ep->new_hi_idx = ((hi_idx + 1) % hi_max);
	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);

	return cpu_assigned;
}
@@ -1874,6 +1899,11 @@ void rmnet_shs_wq_refresh_ep_masks(void)
		if (!ep->is_ep_active)
			continue;
		rmnet_shs_wq_update_ep_rps_msk(ep);

		/* These tput totals get re-added as we go through each flow */
		ep->udp_rx_bps = 0;
		ep->tcp_rx_bps = 0;

	}
}

@@ -1993,15 +2023,13 @@ void rmnet_shs_wq_update_stats(void)
	}

	rmnet_shs_wq_refresh_new_flow_list();
	/*Invoke after both the locks are released*/
	rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN);
	rmnet_shs_wq_debug_print_flows();
	rmnet_shs_wq_filter();
}

void rmnet_shs_wq_process_wq(struct work_struct *work)
{
	unsigned long flags;
	unsigned long jiffies;

	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_PROCESS_WQ,
				RMNET_SHS_WQ_PROCESS_WQ_START,
@@ -2011,8 +2039,14 @@ void rmnet_shs_wq_process_wq(struct work_struct *work)
	rmnet_shs_wq_update_stats();
	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);

        /*Invoke after both the locks are released*/
        rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN);
        rmnet_shs_wq_debug_print_flows();

	jiffies = msecs_to_jiffies(rmnet_shs_wq_interval_ms);

	queue_delayed_work(rmnet_shs_wq, &rmnet_shs_delayed_wq->wq,
					rmnet_shs_wq_frequency);
			   jiffies);

	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_PROCESS_WQ,
				RMNET_SHS_WQ_PROCESS_WQ_END,
@@ -2046,6 +2080,7 @@ void rmnet_shs_wq_exit(void)
		return;

	rmnet_shs_wq_mem_deinit();
	rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_WQ_EXIT);

	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_EXIT, RMNET_SHS_WQ_EXIT_START,
				   0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+7 −2
Original line number Diff line number Diff line
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -31,14 +31,18 @@
#define RMNET_SHS_NSEC_TO_SEC(x) ((x)/1000000000)
#define RMNET_SHS_BYTE_TO_BIT(x) ((x)*8)
#define RMNET_SHS_MIN_HSTAT_NODES_REQD 16
#define RMNET_SHS_WQ_DELAY_TICKS  10
#define RMNET_SHS_WQ_INTERVAL_MS  100

extern unsigned long long rmnet_shs_cpu_rx_max_pps_thresh[MAX_CPUS]__read_mostly;
extern unsigned long long rmnet_shs_cpu_rx_min_pps_thresh[MAX_CPUS]__read_mostly;

extern struct list_head rmnet_shs_wq_ep_tbl;

/* stores wq and end point details */

struct rmnet_shs_wq_ep_s {
	u64 tcp_rx_bps;
	u64 udp_rx_bps;
	struct list_head ep_list_id;
	struct net_device *ep;
	int  new_lo_core[MAX_CPUS];
@@ -161,6 +165,7 @@ struct rmnet_shs_wq_cpu_cap_s {
	struct list_head cpu_cap_list;
	u64 pps_capacity;
	u64 avg_pps_capacity;
	u64 bps;
	u16 cpu_num;
};

+3 −1
Original line number Diff line number Diff line
/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
/* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -349,6 +349,8 @@ int rmnet_shs_wq_genl_deinit(void)
{
	int ret;

	rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_WQ_EXIT);

	ret = genl_unregister_family(&rmnet_shs_genl_family);
	if(ret != 0){
		rm_err("SHS_GNL: unregister family failed: %i\n",ret);
Loading