Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b56e5265 authored by Satya Durga Srinivasu Prabhala's avatar Satya Durga Srinivasu Prabhala
Browse files

sched: clean-up unused/duplicate functions & variables



Deprecate unused/duplicate functions & variables for better
readability.

Change-Id: Ifd91080585f516c4d8290fa4bb4f8dbaddd27d98
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent a9f61565
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -33,9 +33,6 @@ extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS];
extern unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS];
#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
extern unsigned int sysctl_sched_walt_init_task_load_pct;
extern unsigned int sysctl_sched_cpu_high_irqload;
extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_group_upmigrate_pct;
+3 −20
Original line number Diff line number Diff line
@@ -940,10 +940,7 @@ TRACE_EVENT(sched_load_rt_rq,
);

#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
extern unsigned int sched_ravg_window;
extern unsigned int walt_disabled;
#endif

/*
@@ -973,7 +970,6 @@ TRACE_EVENT(sched_load_avg_cpu,
		__entry->util_avg_walt  = div64_ul(cpu_rq(cpu)->prev_runnable_sum,
					  sched_ravg_window >> SCHED_CAPACITY_SHIFT);

		if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
		__entry->util_avg       = __entry->util_avg_walt;
#endif
	),
@@ -1002,8 +998,6 @@ TRACE_EVENT(sched_load_se,
		__field(	unsigned long,	load			      )
		__field(	unsigned long,	rbl_load		      )
		__field(	unsigned long,	util			      )
		__field(	unsigned long,	util_pelt		      )
		__field(	u32,		util_walt		      )
	),

	TP_fast_assign(
@@ -1020,22 +1014,11 @@ TRACE_EVENT(sched_load_se,
		__entry->load = se->avg.load_avg;
		__entry->rbl_load = se->avg.runnable_load_avg;
		__entry->util = se->avg.util_avg;
		__entry->util_pelt  = __entry->util;
		__entry->util_walt  = 0;
#ifdef CONFIG_SCHED_WALT
		if (!se->my_q) {
			struct task_struct *p = container_of(se, struct task_struct, se);
			__entry->util_walt = p->ravg.demand / (sched_ravg_window >> SCHED_CAPACITY_SHIFT);
			if (!walt_disabled && sysctl_sched_use_walt_task_util)
				__entry->util = __entry->util_walt;
		}
#endif
	),

	TP_printk("cpu=%d path=%s comm=%s pid=%d load=%lu rbl_load=%lu util=%lu util_pelt=%lu util_walt=%u",
	TP_printk("cpu=%d path=%s comm=%s pid=%d load=%lu rbl_load=%lu util=%lu",
		  __entry->cpu, __get_str(path), __entry->comm, __entry->pid,
		  __entry->load, __entry->rbl_load, __entry->util,
		  __entry->util_pelt, __entry->util_walt)
		  __entry->load, __entry->rbl_load, __entry->util)
);

/*
+1 −52
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
 */

#ifdef CONFIG_SCHED_WALT
@@ -491,57 +491,6 @@ TRACE_EVENT(sched_load_balance_skip_tasks,
		__entry->affinity, __entry->task_util, __entry->h_load)
);

DECLARE_EVENT_CLASS(sched_cpu_load,

	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),

	TP_ARGS(rq, idle, irqload, power_cost),

	TP_STRUCT__entry(
		__field(unsigned int, cpu)
		__field(unsigned int, idle)
		__field(unsigned int, nr_running)
		__field(unsigned int, nr_big_tasks)
		__field(unsigned int, load_scale_factor)
		__field(unsigned int, capacity)
		__field(u64,	      cumulative_runnable_avg)
		__field(u64,	      irqload)
		__field(unsigned int, max_freq)
		__field(unsigned int, power_cost)
		__field(int,	      cstate)
		__field(int,	      dstate)
	),

	TP_fast_assign(
		__entry->cpu			= rq->cpu;
		__entry->idle			= idle;
		__entry->nr_running		= rq->nr_running;
		__entry->nr_big_tasks		= rq->walt_stats.nr_big_tasks;
		__entry->load_scale_factor	=
						cpu_load_scale_factor(rq->cpu);
		__entry->capacity		= cpu_capacity(rq->cpu);
		__entry->cumulative_runnable_avg =
				rq->walt_stats.cumulative_runnable_avg_scaled;
		__entry->irqload		= irqload;
		__entry->max_freq		= cpu_max_freq(rq->cpu);
		__entry->power_cost		= power_cost;
		__entry->cstate			= rq->cstate;
		__entry->dstate			= rq->cluster->dstate;
	),

	TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d",
		__entry->cpu, __entry->idle, __entry->nr_running,
		__entry->nr_big_tasks, __entry->load_scale_factor,
		__entry->capacity, __entry->cumulative_runnable_avg,
		__entry->irqload, __entry->max_freq, __entry->power_cost,
		__entry->cstate, __entry->dstate)
);

DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),
	TP_ARGS(rq, idle, irqload, power_cost)
);

TRACE_EVENT(sched_load_to_gov,

	TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load,
+4 −4
Original line number Diff line number Diff line
@@ -127,7 +127,7 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
static inline bool use_pelt(void)
{
#ifdef CONFIG_SCHED_WALT
	return (!sysctl_sched_use_walt_cpu_util || walt_disabled);
	return false;
#else
	return true;
#endif
@@ -180,7 +180,7 @@ static void sugov_track_cycles(struct sugov_policy *sg_policy,
{
	u64 delta_ns, cycles;

	if (unlikely(!sysctl_sched_use_walt_cpu_util))
	if (use_pelt())
		return;

	/* Track cycles in current window */
@@ -198,7 +198,7 @@ static void sugov_calc_avg_cap(struct sugov_policy *sg_policy, u64 curr_ws,
	u64 last_ws = sg_policy->last_ws;
	unsigned int avg_freq;

	if (unlikely(!sysctl_sched_use_walt_cpu_util))
	if (use_pelt())
		return;

	BUG_ON(curr_ws < last_ws);
@@ -571,7 +571,7 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
	unsigned long cpu_util = sg_cpu->util;
	bool is_hiload;

	if (unlikely(!sysctl_sched_use_walt_cpu_util))
	if (use_pelt())
		return;

	is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
+3 −8
Original line number Diff line number Diff line
@@ -135,8 +135,6 @@ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
DEFINE_PER_CPU_READ_MOSTLY(int, sched_load_boost);

#ifdef CONFIG_SCHED_WALT
unsigned int sysctl_sched_use_walt_cpu_util = 1;
unsigned int sysctl_sched_use_walt_task_util = 1;
__read_mostly unsigned int sysctl_sched_walt_cpu_high_irqload =
						(10 * NSEC_PER_MSEC);
#endif
@@ -3708,7 +3706,6 @@ static inline unsigned long _task_util_est(struct task_struct *p)
static inline unsigned long task_util_est(struct task_struct *p)
{
#ifdef CONFIG_SCHED_WALT
	if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
	return p->ravg.demand_scaled;
#endif
	return max(task_util(p), _task_util_est(p));
@@ -6554,8 +6551,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
	 * utilization from cpu utilization. Instead just use
	 * cpu_util for this case.
	 */
	if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util) &&
						p->state == TASK_WAKING)
	if (p->state == TASK_WAKING)
		return cpu_util(cpu);
#endif

@@ -7437,8 +7433,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
	}

#ifdef CONFIG_SCHED_WALT
	if (!walt_disabled && sysctl_sched_use_walt_cpu_util &&
						 p->state == TASK_WAKING)
	if (p->state == TASK_WAKING)
		delta = task_util(p);
#endif
	if (task_placement_boost_enabled(p) || need_idle || boosted ||
Loading