Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4a1aae64 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "hrtimer: reprogram remote CPU's timer hardware"

parents 14d89ee3 301bc715
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -1485,6 +1485,17 @@ overcommitted scenario. See notes on sched_spill_nr_run and sched_spill_load for
how overcommitment threshold is defined and also notes on
'sched_upmigrate_min_nice' tunable.

** 7.26 power_aware_timer_migration

Appears at: /proc/sys/kernel/power_aware_timer_migration

Default value: 0

This tunable helps save power by offloading timer activity to cpus of lesser
power cost (power cluster). Both high-res and low-res timers that are not pinned
to one cpu are now enqueued on an online CPU found in least shallow C-state in
the power-cluster. The current CPU is preferred in case of a tie.

=========================
8. HMP SCHEDULER TRACE POINTS
=========================
+1 −0
Original line number Diff line number Diff line
@@ -67,6 +67,7 @@ extern unsigned int sysctl_sched_downmigrate_pct;
extern int sysctl_sched_upmigrate_min_nice;
extern unsigned int sysctl_sched_powerband_limit_pct;
extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_power_aware_timer_migration;

#ifdef CONFIG_SCHED_QHMP
extern unsigned int sysctl_sched_min_runtime;
+69 −2
Original line number Diff line number Diff line
@@ -648,6 +648,64 @@ void resched_cpu(int cpu)

#ifdef CONFIG_SMP
#ifdef CONFIG_NO_HZ_COMMON

#ifdef CONFIG_SCHED_HMP

__read_mostly unsigned int sysctl_power_aware_timer_migration;

/*
 * Return the CPU found in shallowest C-state in least power-cost
 * cluster. Prefer the current CPU when there is a tie.
 */
static int _get_nohz_timer_target_hmp(void)
{
	int i, best_cpu = smp_processor_id();
	int min_cost = INT_MAX, min_cstate = INT_MAX;

	if (!sysctl_power_aware_timer_migration)
		return nr_cpu_ids;

	rcu_read_lock();

	for_each_online_cpu(i) {
		struct rq *rq = cpu_rq(i);
		int cpu_cost = power_cost(i, 0);
		int cstate = rq->cstate;

		if (power_delta_exceeded(cpu_cost, min_cost)) {
			if (cpu_cost > min_cost)
				continue;

			best_cpu = i;
			min_cost = cpu_cost;
			min_cstate = cstate;
			continue;
		}

		if (cstate < min_cstate || (i == smp_processor_id() &&
					cstate == min_cstate)) {
			best_cpu = i;
			min_cstate = cstate;
		}
	}
	rcu_read_unlock();

	return best_cpu;
}

#else

static int _get_nohz_timer_target_hmp(void)
{
	/*
	 * sched_enable_hmp = 0 for !CONFIG_SCHED_HMP, which means we should not
	 * come here for !CONFIG_SCHED_HMP
	 */
	return raw_smp_processor_id();
}

#endif

/*
 * In the semi idle case, use the nearest busy cpu for migrating timers
 * from an idle cpu.  This is good for power-savings.
@@ -659,10 +717,19 @@ void resched_cpu(int cpu)
int get_nohz_timer_target(int pinned)
{
	int cpu = smp_processor_id();
	int i;
	int i, lower_power_cpu;
	struct sched_domain *sd;

	if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu))
	if (pinned || !get_sysctl_timer_migration())
		return cpu;

	if (sched_enable_hmp) {
		lower_power_cpu =  _get_nohz_timer_target_hmp();
		if (lower_power_cpu < nr_cpu_ids)
			return lower_power_cpu;
	}

	if (!idle_cpu(cpu))
		return cpu;

	rcu_read_lock();
+69 −2
Original line number Diff line number Diff line
@@ -648,6 +648,64 @@ void resched_cpu(int cpu)

#ifdef CONFIG_SMP
#ifdef CONFIG_NO_HZ_COMMON

#ifdef CONFIG_SCHED_HMP

__read_mostly unsigned int sysctl_power_aware_timer_migration;

/*
 * Return the CPU found in shallowest C-state in least power-cost
 * cluster. Prefer the current CPU when there is a tie.
 */
static int _get_nohz_timer_target_hmp(void)
{
	int i, best_cpu = smp_processor_id();
	int min_cost = INT_MAX, min_cstate = INT_MAX;

	if (!sysctl_power_aware_timer_migration)
		return nr_cpu_ids;

	rcu_read_lock();

	for_each_online_cpu(i) {
		struct rq *rq = cpu_rq(i);
		int cpu_cost = power_cost(0, i);
		int cstate = rq->cstate;

		if (power_delta_exceeded(cpu_cost, min_cost)) {
			if (cpu_cost > min_cost)
				continue;

			best_cpu = i;
			min_cost = cpu_cost;
			min_cstate = cstate;
			continue;
		}

		if (cstate < min_cstate || (i == smp_processor_id() &&
					cstate == min_cstate)) {
			best_cpu = i;
			min_cstate = cstate;
		}
	}
	rcu_read_unlock();

	return best_cpu;
}

#else

static int _get_nohz_timer_target_hmp(void)
{
	/*
	 * sched_enable_hmp = 0 for !CONFIG_SCHED_HMP, which means we should not
	 * come here for !CONFIG_SCHED_HMP
	 */
	return raw_smp_processor_id();
}

#endif

/*
 * In the semi idle case, use the nearest busy cpu for migrating timers
 * from an idle cpu.  This is good for power-savings.
@@ -659,10 +717,19 @@ void resched_cpu(int cpu)
int get_nohz_timer_target(int pinned)
{
	int cpu = smp_processor_id();
	int i;
	int i, lower_power_cpu;
	struct sched_domain *sd;

	if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu))
	if (pinned || !get_sysctl_timer_migration())
		return cpu;

	if (sched_enable_hmp) {
		lower_power_cpu =  _get_nohz_timer_target_hmp();
		if (lower_power_cpu < nr_cpu_ids)
			return lower_power_cpu;
	}

	if (!idle_cpu(cpu))
		return cpu;

	rcu_read_lock();
+1 −0
Original line number Diff line number Diff line
@@ -934,6 +934,7 @@ extern void sched_account_irqtime(int cpu, struct task_struct *curr,
unsigned int cpu_temp(int cpu);
extern unsigned int nr_eligible_big_tasks(int cpu);
extern void update_up_down_migrate(void);
extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);

/*
 * 'load' is in reference to "best cpu" at its best frequency.
Loading