Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eb8492ac authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

Merge remote-tracking branch 'dev/msm-4.9-sched' into msm-4.9



* origin/dev/msm-4.9-sched:
  sched: Fix compilation issues with schedutil for !SCHED_WALT
  sched/fair: Bring sched_smp_overlap_capacity out of WALT
  sched/fair: Fix is_packing_eligible() for !SCHED_WALT
  sched: Fix compilation issue in task_tick_fair() for !SCHED_WALT
  sched: Move sched_boost defines out of SCHED_WALT
  sched: Fix a compilation issue in find_best_target() for !SCHED_WALT
  sched: Fix a compilation issue when WALT is disabled
  sched: Add a stub for walt_cpu_high_irqload() for !SCHED_WALT
  sched/tune: Fix compilation issue when WALT is disabled
  sched: Define a stub function for sched_irqload when WALT is disabled
  sched: Get sched_task_util trace point working for !SCHED_WALT

Change-Id: I7b12496da6ddcd6518e6d5123ddf4337e2298dca
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parents 34e1614a b1bd5e38
Loading
Loading
Loading
Loading
+3 −5
Original line number Original line Diff line number Diff line
@@ -714,10 +714,10 @@ TRACE_EVENT(sched_task_util,


	TP_PROTO(struct task_struct *p, int next_cpu, int backup_cpu,
	TP_PROTO(struct task_struct *p, int next_cpu, int backup_cpu,
		 int target_cpu, bool sync, bool need_idle,
		 int target_cpu, bool sync, bool need_idle,
		 bool placement_boost, int rtg_cpu),
		 bool placement_boost, int rtg_cpu, u64 start_t),


	TP_ARGS(p, next_cpu, backup_cpu, target_cpu, sync, need_idle,
	TP_ARGS(p, next_cpu, backup_cpu, target_cpu, sync, need_idle,
		placement_boost, rtg_cpu),
		placement_boost, rtg_cpu, start_t),


	TP_STRUCT__entry(
	TP_STRUCT__entry(
		__field(int, pid			)
		__field(int, pid			)
@@ -746,9 +746,7 @@ TRACE_EVENT(sched_task_util,
		__entry->need_idle		= need_idle;
		__entry->need_idle		= need_idle;
		__entry->placement_boost	= placement_boost;
		__entry->placement_boost	= placement_boost;
		__entry->rtg_cpu		= rtg_cpu;
		__entry->rtg_cpu		= rtg_cpu;
		__entry->latency		= p->ravg.mark_start ?
		__entry->latency		= (sched_clock() - start_t);
						  ktime_get_ns() -
						  p->ravg.mark_start : 0;
	),
	),


	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d next_cpu=%d backup_cpu=%d target_cpu=%d sync=%d need_idle=%d placement_boost=%d rtg_cpu=%d latency=%llu",
	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d next_cpu=%d backup_cpu=%d target_cpu=%d sync=%d need_idle=%d placement_boost=%d rtg_cpu=%d latency=%llu",
+57 −20
Original line number Original line Diff line number Diff line
@@ -6814,12 +6814,33 @@ struct find_best_target_env {
	bool avoid_prev_cpu;
	bool avoid_prev_cpu;
};
};


#ifdef CONFIG_SCHED_WALT
static unsigned long cpu_estimated_capacity(int cpu, struct task_struct *p)
{
	unsigned long tutil, estimated_capacity;

	if (task_in_cum_window_demand(cpu_rq(cpu), p))
		tutil = 0;
	else
		tutil = task_util(p);

	estimated_capacity = cpu_util_cum(cpu, tutil);

	return estimated_capacity;
}
#else
static unsigned long cpu_estimated_capacity(int cpu, struct task_struct *p)
{
	return cpu_util_wake(cpu, p);
}
#endif

static bool is_packing_eligible(struct task_struct *p, int target_cpu,
static bool is_packing_eligible(struct task_struct *p, int target_cpu,
				struct find_best_target_env *fbt_env,
				struct find_best_target_env *fbt_env,
				unsigned int target_cpus_count,
				unsigned int target_cpus_count,
				int best_idle_cstate)
				int best_idle_cstate)
{
{
	unsigned long tutil, estimated_capacity;
	unsigned long estimated_capacity;


	if (fbt_env->placement_boost || fbt_env->need_idle)
	if (fbt_env->placement_boost || fbt_env->need_idle)
		return false;
		return false;
@@ -6830,12 +6851,7 @@ static bool is_packing_eligible(struct task_struct *p, int target_cpu,
	if (target_cpus_count != 1)
	if (target_cpus_count != 1)
		return true;
		return true;


	if (task_in_cum_window_demand(cpu_rq(target_cpu), p))
	estimated_capacity = cpu_estimated_capacity(target_cpu, p);
		tutil = 0;
	else
		tutil = task_util(p);

	estimated_capacity = cpu_util_cum(target_cpu, tutil);
	estimated_capacity = add_capacity_margin(estimated_capacity,
	estimated_capacity = add_capacity_margin(estimated_capacity,
						 target_cpu);
						 target_cpu);


@@ -6874,6 +6890,7 @@ static int start_cpu(bool boosted)
	return walt_start_cpu(start_cpu);
	return walt_start_cpu(start_cpu);
}
}


unsigned int sched_smp_overlap_capacity;
static inline int find_best_target(struct task_struct *p, int *backup_cpu,
static inline int find_best_target(struct task_struct *p, int *backup_cpu,
				   bool boosted, bool prefer_idle,
				   bool boosted, bool prefer_idle,
				   struct find_best_target_env *fbt_env)
				   struct find_best_target_env *fbt_env)
@@ -7273,6 +7290,7 @@ bias_to_waker_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target)
	       task_fits_max(p, cpu);
	       task_fits_max(p, cpu);
}
}


#ifdef CONFIG_SCHED_WALT
static inline struct cpumask *find_rtg_target(struct task_struct *p)
static inline struct cpumask *find_rtg_target(struct task_struct *p)
{
{
	struct related_thread_group *grp;
	struct related_thread_group *grp;
@@ -7293,6 +7311,12 @@ static inline struct cpumask *find_rtg_target(struct task_struct *p)


	return rtg_target;
	return rtg_target;
}
}
#else
static inline struct cpumask *find_rtg_target(struct task_struct *p)
{
	return NULL;
}
#endif


static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
{
{
@@ -7303,6 +7327,10 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
	int next_cpu = -1;
	int next_cpu = -1;
	struct cpumask *rtg_target = find_rtg_target(p);
	struct cpumask *rtg_target = find_rtg_target(p);
	struct find_best_target_env fbt_env;
	struct find_best_target_env fbt_env;
	u64 start_t = 0;

	if (trace_sched_task_util_enabled())
		start_t = sched_clock();


	schedstat_inc(p->se.statistics.nr_wakeups_secb_attempts);
	schedstat_inc(p->se.statistics.nr_wakeups_secb_attempts);
	schedstat_inc(this_rq()->eas_stats.secb_attempts);
	schedstat_inc(this_rq()->eas_stats.secb_attempts);
@@ -7427,7 +7455,8 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
unlock:
unlock:
	trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync,
	trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync,
			      fbt_env.need_idle, fbt_env.placement_boost,
			      fbt_env.need_idle, fbt_env.placement_boost,
			      rtg_target ? cpumask_first(rtg_target) : -1);
			      rtg_target ? cpumask_first(rtg_target) : -1,
			      start_t);
	rcu_read_unlock();
	rcu_read_unlock();
	return target_cpu;
	return target_cpu;
}
}
@@ -10871,6 +10900,24 @@ static void rq_offline_fair(struct rq *rq)


#endif /* CONFIG_SMP */
#endif /* CONFIG_SMP */


#ifdef CONFIG_SCHED_WALT
static inline void
walt_update_misfit_task(struct rq *rq, struct task_struct *curr)
{
	bool misfit = rq->misfit_task;

	if (curr->misfit != misfit) {
		walt_fixup_nr_big_tasks(rq, curr, 1, misfit);
		curr->misfit = misfit;
	}
}
#else
static inline void
walt_update_misfit_task(struct rq *rq, struct task_struct *curr)
{
}
#endif

/*
/*
 * scheduler tick hitting a task of our scheduling class:
 * scheduler tick hitting a task of our scheduling class:
 */
 */
@@ -10878,10 +10925,6 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
{
{
	struct cfs_rq *cfs_rq;
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &curr->se;
	struct sched_entity *se = &curr->se;
#ifdef CONFIG_SMP
	bool old_misfit = curr->misfit;
	bool misfit;
#endif


	for_each_sched_entity(se) {
	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);
		cfs_rq = cfs_rq_of(se);
@@ -10897,15 +10940,9 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
		trace_sched_overutilized(true);
		trace_sched_overutilized(true);
	}
	}


	misfit = !task_fits_max(curr, rq->cpu);
	rq->misfit_task = !task_fits_max(curr, rq->cpu);
	rq->misfit_task = misfit;

	if (old_misfit != misfit) {
		walt_fixup_nr_big_tasks(rq, curr, 1, misfit);
		curr->misfit = misfit;
	}
#endif
#endif

	walt_update_misfit_task(rq, curr);
}
}


/*
/*
+13 −5
Original line number Original line Diff line number Diff line
@@ -1112,6 +1112,11 @@ enum sched_boost_policy {
	SCHED_BOOST_ON_ALL,
	SCHED_BOOST_ON_ALL,
};
};


#define NO_BOOST 0
#define FULL_THROTTLE_BOOST 1
#define CONSERVATIVE_BOOST 2
#define RESTRAINED_BOOST 3

/*
/*
 * Returns the rq capacity of any rq in a group. This does not play
 * Returns the rq capacity of any rq in a group. This does not play
 * well with groups where rq capacity can change independently.
 * well with groups where rq capacity can change independently.
@@ -1910,6 +1915,9 @@ cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
	return cpu_util_freq_pelt(cpu);
	return cpu_util_freq_pelt(cpu);
}
}


#define sched_ravg_window TICK_NSEC
#define sysctl_sched_use_walt_cpu_util 0

#endif /* CONFIG_SCHED_WALT */
#endif /* CONFIG_SCHED_WALT */


extern unsigned long
extern unsigned long
@@ -2367,11 +2375,6 @@ extern void set_preferred_cluster(struct related_thread_group *grp);
extern void add_new_task_to_grp(struct task_struct *new);
extern void add_new_task_to_grp(struct task_struct *new);
extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);
extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);


#define NO_BOOST 0
#define FULL_THROTTLE_BOOST 1
#define CONSERVATIVE_BOOST 2
#define RESTRAINED_BOOST 3

static inline int cpu_capacity(int cpu)
static inline int cpu_capacity(int cpu)
{
{
	return cpu_rq(cpu)->cluster->capacity;
	return cpu_rq(cpu)->cluster->capacity;
@@ -2789,6 +2792,11 @@ static inline int cpu_max_power_cost(int cpu)


static inline void clear_walt_request(int cpu) { }
static inline void clear_walt_request(int cpu) { }


static inline int is_reserved(int cpu)
{
	return 0;
}

static inline int got_boost_kick(void)
static inline int got_boost_kick(void)
{
{
	return 0;
	return 0;
+4 −0
Original line number Original line Diff line number Diff line
@@ -752,6 +752,10 @@ static void schedtune_attach(struct cgroup_taskset *tset)
	cgroup_taskset_for_each(task, css, tset)
	cgroup_taskset_for_each(task, css, tset)
		sync_cgroup_colocation(task, colocate);
		sync_cgroup_colocation(task, colocate);


}
#else
static void schedtune_attach(struct cgroup_taskset *tset)
{
}
}
#endif
#endif


+0 −1
Original line number Original line Diff line number Diff line
@@ -2193,7 +2193,6 @@ static void sort_clusters(void)


int __read_mostly min_power_cpu;
int __read_mostly min_power_cpu;


unsigned int sched_smp_overlap_capacity;
void walt_sched_energy_populated_callback(void)
void walt_sched_energy_populated_callback(void)
{
{
	struct sched_cluster *cluster;
	struct sched_cluster *cluster;
Loading