Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4a1ca3b2 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/fair: Add bias towards previous CPU for high wakeup rate tasks"

parents 1b3ce1cf d660893f
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -705,6 +705,7 @@ struct task_struct {
	struct sched_entity		se;
	struct sched_rt_entity		rt;
	u64 last_sleep_ts;
	u64 last_cpu_selected_ts;
#ifdef CONFIG_SCHED_WALT
	struct ravg ravg;
	/*
+4 −0
Original line number Diff line number Diff line
@@ -2276,6 +2276,8 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
	p->se.nr_migrations		= 0;
	p->se.vruntime			= 0;
	p->last_sleep_ts		= 0;
	p->last_cpu_selected_ts		= 0;

	INIT_LIST_HEAD(&p->se.group_node);

#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -3118,6 +3120,8 @@ unsigned long long task_sched_runtime(struct task_struct *p)
	return ns;
}

unsigned int capacity_margin_freq = 1280; /* ~20% margin */

/*
 * This function gets called by the timer code, with HZ frequency.
 * We call it with interrupts disabled.
+75 −5
Original line number Diff line number Diff line
@@ -7003,6 +7003,38 @@ struct find_best_target_env {
	bool need_idle;
};

static bool is_packing_eligible(struct task_struct *p, int target_cpu,
				struct find_best_target_env *fbt_env,
				unsigned int target_cpus_count,
				int best_idle_cstate)
{
	unsigned long tutil, estimated_capacity;

	if (fbt_env->placement_boost || fbt_env->need_idle)
		return false;

	if (best_idle_cstate == -1)
		return false;

	if (target_cpus_count != 1)
		return true;

	if (task_in_cum_window_demand(cpu_rq(target_cpu), p))
		tutil = 0;
	else
		tutil = task_util(p);

	estimated_capacity = cpu_util_cum(target_cpu, tutil);
	estimated_capacity = add_capacity_margin(estimated_capacity,
						target_cpu);

	/*
	 * If there is only one active CPU and it is already above its current
	 * capacity, avoid placing additional task on the CPU.
	 */
	return (estimated_capacity <= capacity_curr_of(target_cpu));
}

static inline bool skip_sg(struct task_struct *p, struct sched_group *sg,
			   struct cpumask *rtg_target)
{
@@ -7047,6 +7079,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
	int target_cpu = -1;
	int cpu, i;
	unsigned long spare_cap;
	unsigned int active_cpus_count = 0;

	*backup_cpu = -1;

@@ -7270,6 +7303,8 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			 * capacity.
			 */

			active_cpus_count++;

			/* Favor CPUs with maximum spare capacity */
			if ((capacity_orig - new_util) < target_max_spare_cap)
				continue;
@@ -7290,12 +7325,11 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,

	} while (sg = sg->next, sg != sd->groups);

	if (fbt_env->need_idle || fbt_env->placement_boost) {
		if (best_idle_cpu != -1) {
	if (best_idle_cpu != -1 && !is_packing_eligible(p, target_cpu, fbt_env,
					active_cpus_count, best_idle_cstate)) {
		target_cpu = best_idle_cpu;
		best_idle_cpu = -1;
	}
	}

	/*
	 * For non latency sensitive tasks, cases B and C in the previous loop,
@@ -7503,6 +7537,39 @@ static inline int wake_to_idle(struct task_struct *p)
		 (p->flags & PF_WAKE_UP_IDLE);
}

#define SCHED_SELECT_PREV_CPU_NSEC	2000000
#define SCHED_FORCE_CPU_SELECTION_NSEC	20000000

static inline bool
bias_to_prev_cpu(struct task_struct *p, struct cpumask *rtg_target)
{
	int prev_cpu = task_cpu(p);
#ifdef CONFIG_SCHED_WALT
	u64 ms = p->ravg.mark_start;
#else
	u64 ms = sched_clock();
#endif

	if (cpu_isolated(prev_cpu) || !idle_cpu(prev_cpu))
		return false;

	if (!ms)
		return false;

	if (ms - p->last_cpu_selected_ts >= SCHED_SELECT_PREV_CPU_NSEC) {
		p->last_cpu_selected_ts = ms;
		return false;
	}

	if (ms - p->last_sleep_ts >= SCHED_SELECT_PREV_CPU_NSEC)
		return false;

	if (rtg_target && !cpumask_test_cpu(prev_cpu, rtg_target))
		return false;

	return true;
}

#ifdef CONFIG_SCHED_WALT
static inline struct cpumask *find_rtg_target(struct task_struct *p)
{
@@ -7603,6 +7670,9 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
		prefer_idle = sched_feat(EAS_PREFER_IDLE) ?
				(schedtune_prefer_idle(p) > 0) : 0;

		if (bias_to_prev_cpu(p, rtg_target))
			return prev_cpu;

		eenv->max_cpu_count = EAS_CPU_BKP + 1;

		fbt_env.rtg_target = rtg_target;
+2 −0
Original line number Diff line number Diff line
@@ -877,6 +877,8 @@ struct rq {
	int prev_top;
	int curr_top;
	bool notif_pending;
	u64 last_cc_update;
	u64 cycles;
#endif /* CONFIG_SCHED_WALT */

#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+26 −7
Original line number Diff line number Diff line
@@ -291,10 +291,27 @@ update_window_start(struct rq *rq, u64 wallclock, int event)
	return old_window_start;
}

static void update_task_cpu_cycles(struct task_struct *p, int cpu)
/*
 * Assumes rq_lock is held and wallclock was recorded in the same critical
 * section as this function's invocation.
 */
static inline u64 read_cycle_counter(int cpu, u64 wallclock)
{
	struct rq *rq = cpu_rq(cpu);

	if (rq->last_cc_update != wallclock) {
		rq->cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
		rq->last_cc_update = wallclock;
	}

	return rq->cycles;
}

static void update_task_cpu_cycles(struct task_struct *p, int cpu,
				   u64 wallclock)
{
	if (use_cycle_counter)
		p->cpu_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
		p->cpu_cycles = read_cycle_counter(cpu, wallclock);
}

void clear_ed_task(struct task_struct *p, struct rq *rq)
@@ -338,7 +355,7 @@ void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
	if (is_idle_task(curr)) {
		/* We're here without rq->lock held, IRQ disabled */
		raw_spin_lock(&rq->lock);
		update_task_cpu_cycles(curr, cpu);
		update_task_cpu_cycles(curr, cpu, ktime_get_ns());
		raw_spin_unlock(&rq->lock);
	}
}
@@ -747,7 +764,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
	update_task_ravg(p, task_rq(p), TASK_MIGRATE,
			 wallclock, 0);

	update_task_cpu_cycles(p, new_cpu);
	update_task_cpu_cycles(p, new_cpu, wallclock);

	/*
	 * When a task is migrating during the wakeup, adjust
@@ -1831,7 +1848,7 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
		return;
	}

	cur_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
	cur_cycles = read_cycle_counter(cpu, wallclock);

	/*
	 * If current task is idle task and irqtime == 0 CPU was
@@ -1896,7 +1913,7 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
	old_window_start = update_window_start(rq, wallclock, event);

	if (!p->ravg.mark_start) {
		update_task_cpu_cycles(p, cpu_of(rq));
		update_task_cpu_cycles(p, cpu_of(rq), wallclock);
		goto done;
	}

@@ -2019,7 +2036,7 @@ void mark_task_starting(struct task_struct *p)
	p->ravg.mark_start = p->last_wake_ts = wallclock;
	p->last_enqueued_ts = wallclock;
	p->last_switch_out_ts = 0;
	update_task_cpu_cycles(p, cpu_of(rq));
	update_task_cpu_cycles(p, cpu_of(rq), wallclock);
}

static cpumask_t all_cluster_cpus = CPU_MASK_NONE;
@@ -3203,6 +3220,8 @@ void walt_sched_init(struct rq *rq)
	rq->curr_table = 0;
	rq->prev_top = 0;
	rq->curr_top = 0;
	rq->last_cc_update = 0;
	rq->cycles = 0;
	for (j = 0; j < NUM_TRACKED_WINDOWS; j++) {
		memset(&rq->load_subs[j], 0,
				sizeof(struct load_subtractions));