Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dd8171dd authored by Pavankumar Kondeti's avatar Pavankumar Kondeti Committed by Sai Harshini Nimmala
Browse files

sched/walt: Improve the scheduler



This change is for general scheduler improvement.

Change-Id: I1aa4c378452c936a443a17e77834d1191b7b0563
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
Signed-off-by: default avatarSai Harshini Nimmala <snimmala@codeaurora.org>
parent a0c2b0d2
Loading
Loading
Loading
Loading
+31 −5
Original line number Diff line number Diff line
@@ -123,6 +123,7 @@ __read_mostly unsigned int sysctl_sched_window_stats_policy =

/* Window size (in ns) */
__read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
__read_mostly unsigned int new_sched_ravg_window = MIN_SCHED_RAVG_WINDOW;

/*
 * A after-boot constant divisor for cpu_util_freq_walt() to apply the load
@@ -3219,6 +3220,13 @@ u64 get_rtgb_active_time(void)
	return 0;
}

static void walt_init_window_dep(void);
static void walt_tunables_fixup(void)
{
	walt_update_group_thresholds();
	walt_init_window_dep();
}

/*
 * Runs in hard-irq context. This should ideally run just after the latest
 * window roll-over.
@@ -3324,6 +3332,20 @@ void walt_irq_work(struct irq_work *irq_work)
		}
	}

	/*
	 * If the window change request is in pending, good place to
	 * change sched_ravg_window since all rq locks are acquired.
	 */
	if (!is_migration) {
		if (sched_ravg_window != new_sched_ravg_window) {
			printk_deferred("ALERT: changing window size from %u to %u\n",
					sched_ravg_window,
					new_sched_ravg_window);
			sched_ravg_window = new_sched_ravg_window;
			walt_tunables_fixup();
		}
	}

	for_each_cpu(cpu, cpu_possible_mask)
		raw_spin_unlock(&cpu_rq(cpu)->lock);

@@ -3436,12 +3458,8 @@ int walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
	return ret;
}

static void walt_init_once(void)
static void walt_init_window_dep(void)
{
	init_irq_work(&walt_migration_irq_work, walt_irq_work);
	init_irq_work(&walt_cpufreq_irq_work, walt_irq_work);
	walt_rotate_work_init();

	walt_cpu_util_freq_divisor =
	    (sched_ravg_window >> SCHED_CAPACITY_SHIFT) * 100;
	walt_scale_demand_divisor = sched_ravg_window >> SCHED_CAPACITY_SHIFT;
@@ -3453,6 +3471,14 @@ static void walt_init_once(void)
		scale_demand(sched_init_task_load_windows);
}

static void walt_init_once(void)
{
	init_irq_work(&walt_migration_irq_work, walt_irq_work);
	init_irq_work(&walt_cpufreq_irq_work, walt_irq_work);
	walt_rotate_work_init();
	walt_init_window_dep();
}

void walt_sched_init_rq(struct rq *rq)
{
	int j;