Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 55529000 authored by Kyle Yan's avatar Kyle Yan Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: Refactor HMP code to allow easier rebasing" into msm-4.8

parents 3a17e49d d056dbcb
Loading
Loading
Loading
Loading
+38 −38
Original line number Diff line number Diff line
@@ -2320,44 +2320,6 @@ void __dl_clear_params(struct task_struct *p)
	dl_se->dl_yielded = 0;
}

#ifdef CONFIG_SCHED_HMP
/*
 * sched_exit() - Set EXITING_TASK_MARKER in task's ravg.demand field
 *
 * Stop accounting (exiting) task's future cpu usage
 *
 * We need this so that reset_all_windows_stats() can function correctly.
 * reset_all_window_stats() depends on do_each_thread/for_each_thread task
 * iterators to reset *all* task's statistics. Exiting tasks however become
 * invisible to those iterators. sched_exit() is called on a exiting task prior
 * to being removed from task_list, which will let reset_all_window_stats()
 * function correctly.
 */
void sched_exit(struct task_struct *p)
{
	struct rq_flags rf;
	struct rq *rq;
	u64 wallclock;

	sched_set_group_id(p, 0);

	rq = task_rq_lock(p, &rf);

	/* rq->curr == p */
	wallclock = sched_ktime_clock();
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	dequeue_task(rq, p, 0);
	reset_task_stats(p);
	p->ravg.mark_start = wallclock;
	p->ravg.sum_history[0] = EXITING_TASK_MARKER;
	free_task_load_ptrs(p);

	enqueue_task(rq, p, 0);
	clear_ed_task(p, rq);
	task_rq_unlock(rq, p, &rf);
}
#endif /* CONFIG_SCHED_HMP */

/*
 * Perform scheduler related setup for a newly forked process p.
 * p is forked by current.
@@ -9464,3 +9426,41 @@ const u32 sched_prio_to_wmult[40] = {
 /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
 /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
};

#ifdef CONFIG_SCHED_HMP
/*
 * sched_exit() - Set EXITING_TASK_MARKER in task's ravg.demand field
 *
 * Stop accounting (exiting) task's future cpu usage
 *
 * We need this so that reset_all_windows_stats() can function correctly.
 * reset_all_window_stats() depends on do_each_thread/for_each_thread task
 * iterators to reset *all* task's statistics. Exiting tasks however become
 * invisible to those iterators. sched_exit() is called on a exiting task prior
 * to being removed from task_list, which will let reset_all_window_stats()
 * function correctly.
 */
void sched_exit(struct task_struct *p)
{
	struct rq_flags rf;
	struct rq *rq;
	u64 wallclock;

	sched_set_group_id(p, 0);

	rq = task_rq_lock(p, &rf);

	/* rq->curr == p */
	wallclock = sched_ktime_clock();
	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
	dequeue_task(rq, p, 0);
	reset_task_stats(p);
	p->ravg.mark_start = wallclock;
	p->ravg.sum_history[0] = EXITING_TASK_MARKER;
	free_task_load_ptrs(p);

	enqueue_task(rq, p, 0);
	clear_ed_task(p, rq);
	task_rq_unlock(rq, p, &rf);
}
#endif /* CONFIG_SCHED_HMP */
+35 −35
Original line number Diff line number Diff line
@@ -18,6 +18,41 @@

#include <linux/slab.h>

#ifdef CONFIG_SCHED_HMP

static void
inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
{
	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
}

static void
dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
{
	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}

static void
fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
			 u32 new_task_load, u32 new_pred_demand)
{
	s64 task_load_delta = (s64)new_task_load - task_load(p);
	s64 pred_demand_delta = PRED_DEMAND_DELTA;

	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
				      pred_demand_delta);
}

#else	/* CONFIG_SCHED_HMP */

static inline void
inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }

static inline void
dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }

#endif	/* CONFIG_SCHED_HMP */

struct dl_bandwidth def_dl_bandwidth;

static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
@@ -820,41 +855,6 @@ static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}

#endif /* CONFIG_SMP */

#ifdef CONFIG_SCHED_HMP

static void
inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
{
	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
}

static void
dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
{
	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}

static void
fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
			 u32 new_task_load, u32 new_pred_demand)
{
	s64 task_load_delta = (s64)new_task_load - task_load(p);
	s64 pred_demand_delta = PRED_DEMAND_DELTA;

	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
				      pred_demand_delta);
}

#else	/* CONFIG_SCHED_HMP */

static inline void
inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }

static inline void
dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }

#endif	/* CONFIG_SCHED_HMP */

static inline
void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
+6237 −6210

File changed.

File size exceeds preview limit.

+52 −51
Original line number Diff line number Diff line
@@ -9,6 +9,58 @@
#include <linux/irq_work.h>
#include <trace/events/sched.h>

#ifdef CONFIG_SCHED_HMP

static void
inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
{
	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
}

static void
dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
{
	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}

static void
fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
			 u32 new_task_load, u32 new_pred_demand)
{
	s64 task_load_delta = (s64)new_task_load - task_load(p);
	s64 pred_demand_delta = PRED_DEMAND_DELTA;

	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
				      pred_demand_delta);
}

#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);

static int
select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
{
	int target;

	rcu_read_lock();
	target = find_lowest_rq(p);
	if (target != -1)
		cpu = target;
	rcu_read_unlock();

	return cpu;
}
#endif /* CONFIG_SMP */
#else  /* CONFIG_SCHED_HMP */

static inline void
inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }

static inline void
dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }

#endif	/* CONFIG_SCHED_HMP */

int sched_rr_timeslice = RR_TIMESLICE;

static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
@@ -1197,41 +1249,6 @@ void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}

#endif /* CONFIG_RT_GROUP_SCHED */

#ifdef CONFIG_SCHED_HMP

static void
inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
{
	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
}

static void
dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
{
	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}

static void
fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
			 u32 new_task_load, u32 new_pred_demand)
{
	s64 task_load_delta = (s64)new_task_load - task_load(p);
	s64 pred_demand_delta = PRED_DEMAND_DELTA;

	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
				      pred_demand_delta);
}

#else	/* CONFIG_SCHED_HMP */

static inline void
inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }

static inline void
dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }

#endif	/* CONFIG_SCHED_HMP */

static inline
unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
{
@@ -1467,22 +1484,6 @@ static void yield_task_rt(struct rq *rq)
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);

#ifdef CONFIG_SCHED_HMP
static int
select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
{
	int target;

	rcu_read_lock();
	target = find_lowest_rq(p);
	if (target != -1)
		cpu = target;
	rcu_read_unlock();

	return cpu;
}
#endif /* CONFIG_SCHED_HMP */

static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
{
Loading