Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6b519ff7 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "Revert "sched: fix single cpu running issue""

parents 87c161fe 97b4c065
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -340,7 +340,6 @@ static void __init reset_cpu_topology(void)
		cpu_topo->llc_id = -1;

		clear_cpu_topology(cpu);
		cpumask_set_cpu(cpu, &cpu_topo->core_possible_sibling);
	}
}

+4 −5
Original line number Diff line number Diff line
@@ -31,9 +31,9 @@ extern unsigned int sysctl_sched_sync_hint_enable;
extern unsigned int sysctl_sched_cstate_aware;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS];
extern unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS];
#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_user_hint;
extern const int sched_user_hint_max;
extern unsigned int sysctl_sched_cpu_high_irqload;
@@ -70,6 +70,9 @@ sched_ravg_window_handler(struct ctl_table *table, int write,
			 void __user *buffer, size_t *lenp,
			 loff_t *ppos);

extern int sched_updown_migrate_handler(struct ctl_table *table,
					int write, void __user *buffer,
					size_t *lenp, loff_t *ppos);
#endif

#if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_DEBUG_PREEMPT)
@@ -131,10 +134,6 @@ extern int sched_rt_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp,
		loff_t *ppos);

extern int sched_updown_migrate_handler(struct ctl_table *table,
					int write, void __user *buffer,
					size_t *lenp, loff_t *ppos);

extern int sysctl_numa_balancing(struct ctl_table *table, int write,
				 void __user *buffer, size_t *lenp,
				 loff_t *ppos);
+0 −149
Original line number Diff line number Diff line
@@ -6875,155 +6875,6 @@ void ia64_set_curr_task(int cpu, struct task_struct *p)
{
	cpu_curr(cpu) = p;
}

#endif

#ifdef CONFIG_PROC_SYSCTL
static int find_capacity_margin_levels(void)
{
	int cpu, max_clusters;

	for (cpu = max_clusters = 0; cpu < num_possible_cpus();) {
		cpu += cpumask_weight(topology_possible_sibling_cpumask(cpu));
		max_clusters++;
	}

	/*
	 * Capacity margin levels is number of clusters available in
	 * the system subtracted by 1.
	 */
	return max_clusters - 1;
}

static void sched_update_up_migrate_values(int cap_margin_levels,
				const struct cpumask *cluster_cpus[])
{
	int i, cpu;

	if (cap_margin_levels > 1) {
		/*
		 * No need to worry about CPUs in last cluster
		 * if there are more than 2 clusters in the system
		 */
		for (i = 0; i < cap_margin_levels; i++)
			if (cluster_cpus[i])
				for_each_cpu(cpu, cluster_cpus[i])
					sched_capacity_margin_up[cpu] =
					sysctl_sched_capacity_margin_up[i];
	} else {
		for_each_possible_cpu(cpu)
			sched_capacity_margin_up[cpu] =
				sysctl_sched_capacity_margin_up[0];
	}
}

static void sched_update_down_migrate_values(int cap_margin_levels,
				const struct cpumask *cluster_cpus[])
{
	int i, cpu;

	if (cap_margin_levels > 1) {
		/*
		 * Skip last cluster as down migrate value isn't needed.
		 * Because there is no downmigration to it.
		 */
		for (i = 0; i < cap_margin_levels; i++)
			if (cluster_cpus[i])
				for_each_cpu(cpu, cluster_cpus[i])
					sched_capacity_margin_down[cpu] =
					sysctl_sched_capacity_margin_down[i];
	} else {
		for_each_possible_cpu(cpu)
			sched_capacity_margin_down[cpu] =
				sysctl_sched_capacity_margin_down[0];
	}
}

static void sched_update_updown_migrate_values(unsigned int *data,
					      int cap_margin_levels)
{
	int i, cpu;
	static const struct cpumask *cluster_cpus[MAX_CLUSTERS];

	for (i = cpu = 0; i < MAX_CLUSTERS &&
				cpu < num_possible_cpus(); i++) {
		cluster_cpus[i] = topology_possible_sibling_cpumask(cpu);
		cpu += cpumask_weight(topology_possible_sibling_cpumask(cpu));
	}

	if (data == &sysctl_sched_capacity_margin_up[0])
		sched_update_up_migrate_values(cap_margin_levels, cluster_cpus);
	else
		sched_update_down_migrate_values(cap_margin_levels,
						 cluster_cpus);
}

int sched_updown_migrate_handler(struct ctl_table *table, int write,
				void __user *buffer, size_t *lenp,
				loff_t *ppos)
{
	int ret, i;
	unsigned int *data = (unsigned int *)table->data;
	unsigned int *old_val;
	static DEFINE_MUTEX(mutex);
	static int cap_margin_levels = -1;

	mutex_lock(&mutex);

	if (cap_margin_levels == -1 ||
		table->maxlen != (sizeof(unsigned int) * cap_margin_levels)) {
		cap_margin_levels = find_capacity_margin_levels();
		table->maxlen = sizeof(unsigned int) * cap_margin_levels;
	}

	if (cap_margin_levels <= 0) {
		ret = -EINVAL;
		goto unlock_mutex;
	}

	if (!write) {
		ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);
		goto unlock_mutex;
	}

	/*
	 * Cache the old values so that they can be restored
	 * if either the write fails (for example out of range values)
	 * or the downmigrate and upmigrate are not in sync.
	 */
	old_val = kzalloc(table->maxlen, GFP_KERNEL);
	if (!old_val) {
		ret = -ENOMEM;
		goto unlock_mutex;
	}

	memcpy(old_val, data, table->maxlen);

	ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);

	if (ret) {
		memcpy(data, old_val, table->maxlen);
		goto free_old_val;
	}

	for (i = 0; i < cap_margin_levels; i++) {
		if (sysctl_sched_capacity_margin_up[i] >
				sysctl_sched_capacity_margin_down[i]) {
			memcpy(data, old_val, table->maxlen);
			ret = -EINVAL;
			goto free_old_val;
		}
	}

	sched_update_updown_migrate_values(data, cap_margin_levels);

free_old_val:
	kfree(old_val);
unlock_mutex:
	mutex_unlock(&mutex);

	return ret;
}
#endif

#ifdef CONFIG_CGROUP_SCHED
+0 −6
Original line number Diff line number Diff line
@@ -165,12 +165,6 @@ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
 * (default: ~20%)
 */
unsigned int capacity_margin				= 1280;

/* Migration margins */
unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS] = {
			[0 ... MAX_MARGIN_LEVELS-1] = 1078}; /* ~5% margin */
unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS] = {
			[0 ... MAX_MARGIN_LEVELS-1] = 1205}; /* ~15% margin */
unsigned int sched_capacity_margin_up[NR_CPUS] = {
			[0 ... NR_CPUS-1] = 1078}; /* ~5% margin */
unsigned int sched_capacity_margin_down[NR_CPUS] = {
+1 −7
Original line number Diff line number Diff line
@@ -2861,8 +2861,6 @@ static inline void restore_cgroup_boost_settings(void) { }

extern int alloc_related_thread_groups(void);

extern unsigned long all_cluster_ids[];

extern void check_for_migration(struct rq *rq, struct task_struct *p);

static inline int is_reserved(int cpu)
@@ -2944,11 +2942,7 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)

static inline bool is_min_capacity_cluster(struct sched_cluster *cluster)
{
	int cpu = cluster_first_cpu(cluster);

	if (cpu >= num_possible_cpus())
		return false;
	return is_min_capacity_cpu(cpu);
	return is_min_capacity_cpu(cluster_first_cpu(cluster));
}

#else	/* CONFIG_SCHED_WALT */
Loading