Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c0687276 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/fair: Add bias towards previous CPU for high wakeup rate tasks"

parents a539131c 7cc02920
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -1768,6 +1768,7 @@ struct task_struct {
	struct sched_entity se;
	struct sched_rt_entity rt;
	u64 last_sleep_ts;
	u64 last_cpu_selected_ts;
#ifdef CONFIG_SCHED_WALT
	struct ravg ravg;
	/*
+1 −0
Original line number Diff line number Diff line
@@ -2330,6 +2330,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
	p->se.nr_migrations		= 0;
	p->se.vruntime			= 0;
	p->last_sleep_ts		= 0;
	p->last_cpu_selected_ts		= 0;

	INIT_LIST_HEAD(&p->se.group_node);

+36 −0
Original line number Diff line number Diff line
@@ -7296,6 +7296,39 @@ bias_to_waker_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target)
	       task_fits_max(p, cpu);
}

#define SCHED_SELECT_PREV_CPU_NSEC	2000000
#define SCHED_FORCE_CPU_SELECTION_NSEC	20000000

static inline bool
bias_to_prev_cpu(struct task_struct *p, struct cpumask *rtg_target)
{
	int prev_cpu = task_cpu(p);
#ifdef CONFIG_SCHED_WALT
	u64 ms = p->ravg.mark_start;
#else
	u64 ms = sched_clock();
#endif

	if (cpu_isolated(prev_cpu) || !idle_cpu(prev_cpu))
		return false;

	if (!ms)
		return false;

	if (ms - p->last_cpu_selected_ts >= SCHED_SELECT_PREV_CPU_NSEC) {
		p->last_cpu_selected_ts = ms;
		return false;
	}

	if (ms - p->last_sleep_ts >= SCHED_SELECT_PREV_CPU_NSEC)
		return false;

	if (rtg_target && !cpumask_test_cpu(prev_cpu, rtg_target))
		return false;

	return true;
}

#ifdef CONFIG_SCHED_WALT
static inline struct cpumask *find_rtg_target(struct task_struct *p)
{
@@ -7374,6 +7407,9 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
		}
	}

	if (bias_to_prev_cpu(p, rtg_target))
		return prev_cpu;

	rcu_read_lock();

	sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
+2 −0
Original line number Diff line number Diff line
@@ -823,6 +823,8 @@ struct rq {
	int prev_top;
	int curr_top;
	bool notif_pending;
	u64 last_cc_update;
	u64 cycles;
#endif

#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+26 −7
Original line number Diff line number Diff line
@@ -301,10 +301,27 @@ int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
	return 0;
}

static void update_task_cpu_cycles(struct task_struct *p, int cpu)
/*
 * Assumes rq_lock is held and wallclock was recorded in the same critical
 * section as this function's invocation.
 */
static inline u64 read_cycle_counter(int cpu, u64 wallclock)
{
	struct rq *rq = cpu_rq(cpu);

	if (rq->last_cc_update != wallclock) {
		rq->cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
		rq->last_cc_update = wallclock;
	}

	return rq->cycles;
}

static void update_task_cpu_cycles(struct task_struct *p, int cpu,
				   u64 wallclock)
{
	if (use_cycle_counter)
		p->cpu_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
		p->cpu_cycles = read_cycle_counter(cpu, wallclock);
}

void clear_ed_task(struct task_struct *p, struct rq *rq)
@@ -348,7 +365,7 @@ void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
	if (is_idle_task(curr)) {
		/* We're here without rq->lock held, IRQ disabled */
		raw_spin_lock(&rq->lock);
		update_task_cpu_cycles(curr, cpu);
		update_task_cpu_cycles(curr, cpu, ktime_get_ns());
		raw_spin_unlock(&rq->lock);
	}
}
@@ -757,7 +774,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
	update_task_ravg(p, task_rq(p), TASK_MIGRATE,
			 wallclock, 0);

	update_task_cpu_cycles(p, new_cpu);
	update_task_cpu_cycles(p, new_cpu, wallclock);

	/*
	 * When a task is migrating during the wakeup, adjust
@@ -1839,7 +1856,7 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
		return;
	}

	cur_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
	cur_cycles = read_cycle_counter(cpu, wallclock);

	/*
	 * If current task is idle task and irqtime == 0 CPU was
@@ -1904,7 +1921,7 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
	old_window_start = update_window_start(rq, wallclock, event);

	if (!p->ravg.mark_start) {
		update_task_cpu_cycles(p, cpu_of(rq));
		update_task_cpu_cycles(p, cpu_of(rq), wallclock);
		goto done;
	}

@@ -2035,7 +2052,7 @@ void mark_task_starting(struct task_struct *p)
	p->ravg.mark_start = p->last_wake_ts = wallclock;
	p->last_enqueued_ts = wallclock;
	p->last_switch_out_ts = 0;
	update_task_cpu_cycles(p, cpu_of(rq));
	update_task_cpu_cycles(p, cpu_of(rq), wallclock);
}

static cpumask_t all_cluster_cpus = CPU_MASK_NONE;
@@ -3255,6 +3272,8 @@ void walt_sched_init(struct rq *rq)
	rq->curr_table = 0;
	rq->prev_top = 0;
	rq->curr_top = 0;
	rq->last_cc_update = 0;
	rq->cycles = 0;
	for (j = 0; j < NUM_TRACKED_WINDOWS; j++) {
		memset(&rq->load_subs[j], 0,
				sizeof(struct load_subtractions));