Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 05c7d0a1 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: Fix deadlock between cpu hotplug and upmigrate change"

parents 804ccaca 927d2c14
Loading
Loading
Loading
Loading
+20 −9
Original line number Original line Diff line number Diff line
@@ -3805,7 +3805,21 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
	int ret;
	int ret;
	unsigned int old_val;
	unsigned int old_val;
	unsigned int *data = (unsigned int *)table->data;
	unsigned int *data = (unsigned int *)table->data;
	int update_min_nice = 0;
	int update_task_count = 0;

	if (!sched_enable_hmp)
		return 0;

	/*
	 * The policy mutex is acquired with cpu_hotplug.lock
	 * held from cpu_up()->cpufreq_governor_interactive()->
	 * sched_set_window(). So enforce the same order here.
	 */
	if (write && (data == &sysctl_sched_upmigrate_pct ||
	    data == (unsigned int *)&sysctl_sched_upmigrate_min_nice)) {
		update_task_count = 1;
		get_online_cpus();
	}


	mutex_lock(&policy_mutex);
	mutex_lock(&policy_mutex);


@@ -3813,7 +3827,7 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,


	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);


	if (ret || !write || !sched_enable_hmp)
	if (ret || !write)
		goto done;
		goto done;


	if (write && (old_val == *data))
	if (write && (old_val == *data))
@@ -3828,7 +3842,6 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
			ret = -EINVAL;
			ret = -EINVAL;
			goto done;
			goto done;
		}
		}
		update_min_nice = 1;
	} else if (data != &sysctl_sched_select_prev_cpu_us) {
	} else if (data != &sysctl_sched_select_prev_cpu_us) {
		/*
		/*
		 * all tunables other than min_nice and prev_cpu_us are
		 * all tunables other than min_nice and prev_cpu_us are
@@ -3850,19 +3863,17 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
	 * includes taking runqueue lock of all online cpus and re-initiatizing
	 * includes taking runqueue lock of all online cpus and re-initiatizing
	 * their big counter values based on changed criteria.
	 * their big counter values based on changed criteria.
	 */
	 */
	if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) {
	if (update_task_count)
		get_online_cpus();
		pre_big_task_count_change(cpu_online_mask);
		pre_big_task_count_change(cpu_online_mask);
	}


	set_hmp_defaults();
	set_hmp_defaults();


	if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) {
	if (update_task_count)
		post_big_task_count_change(cpu_online_mask);
		post_big_task_count_change(cpu_online_mask);
		put_online_cpus();
	}


done:
done:
	if (update_task_count)
		put_online_cpus();
	mutex_unlock(&policy_mutex);
	mutex_unlock(&policy_mutex);
	return ret;
	return ret;
}
}
+20 −10
Original line number Original line Diff line number Diff line
@@ -3883,7 +3883,22 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
	int ret;
	int ret;
	unsigned int old_val;
	unsigned int old_val;
	unsigned int *data = (unsigned int *)table->data;
	unsigned int *data = (unsigned int *)table->data;
	int update_min_nice = 0;
	int update_task_count = 0;

	if (!sched_enable_hmp)
		return 0;

	/*
	 * The policy mutex is acquired with cpu_hotplug.lock
	 * held from cpu_up()->cpufreq_governor_interactive()->
	 * sched_set_window(). So enforce the same order here.
	 */
	if (write && (data == &sysctl_sched_upmigrate_pct ||
	    data == &sysctl_sched_small_task_pct ||
	    data == (unsigned int *)&sysctl_sched_upmigrate_min_nice)) {
		update_task_count = 1;
		get_online_cpus();
	}


	mutex_lock(&policy_mutex);
	mutex_lock(&policy_mutex);


@@ -3908,7 +3923,6 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
			ret = -EINVAL;
			ret = -EINVAL;
			goto done;
			goto done;
		}
		}
		update_min_nice = 1;
	} else {
	} else {
		/* all tunables other than min_nice are in percentage */
		/* all tunables other than min_nice are in percentage */
		if (sysctl_sched_downmigrate_pct >
		if (sysctl_sched_downmigrate_pct >
@@ -3927,21 +3941,17 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
	 * includes taking runqueue lock of all online cpus and re-initiatizing
	 * includes taking runqueue lock of all online cpus and re-initiatizing
	 * their big/small counter values based on changed criteria.
	 * their big/small counter values based on changed criteria.
	 */
	 */
	if ((data == &sysctl_sched_upmigrate_pct ||
	if (update_task_count)
	     data == &sysctl_sched_small_task_pct || update_min_nice)) {
		get_online_cpus();
		pre_big_small_task_count_change(cpu_online_mask);
		pre_big_small_task_count_change(cpu_online_mask);
	}


	set_hmp_defaults();
	set_hmp_defaults();


	if ((data == &sysctl_sched_upmigrate_pct ||
	if (update_task_count)
	     data == &sysctl_sched_small_task_pct || update_min_nice)) {
		post_big_small_task_count_change(cpu_online_mask);
		post_big_small_task_count_change(cpu_online_mask);
		put_online_cpus();
	}


done:
done:
	if (update_task_count)
		put_online_cpus();
	mutex_unlock(&policy_mutex);
	mutex_unlock(&policy_mutex);
	return ret;
	return ret;
}
}