Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 927d2c14 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched: Fix deadlock between cpu hotplug and upmigrate change



There is a circular dependency between cpu_hotplug.lock and
HMP scheduler policy mutex. Prevent this by enforcing the
same lock order.

Here CPU0 and CPU2 are governed by different cpufreq policies.

----------------                        --------------------
    CPU 0                                          CPU 2
---------------                         --------------------

proc_sys_call_handler()                 cpu_up()

                                        --> acquired cpu_hotplug.lock

sched_hmp_proc_update_handler()         cpufreq_cpu_callback()

--> acquired policy_mutex

                                        cpufreq_governor_interactive()

get_online_cpus()                       sched_set_window()

--> waiting for cpu_hotplug.lock        --> waiting for policy_mutex

Change-Id: I39efc394f4f00815b72adc975021fdb16fe6e30a
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 0112f3c5
Loading
Loading
Loading
Loading
+20 −9
Original line number Diff line number Diff line
@@ -3805,7 +3805,21 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
	int ret;
	unsigned int old_val;
	unsigned int *data = (unsigned int *)table->data;
	int update_min_nice = 0;
	int update_task_count = 0;

	if (!sched_enable_hmp)
		return 0;

	/*
	 * The policy mutex is acquired with cpu_hotplug.lock
	 * held from cpu_up()->cpufreq_governor_interactive()->
	 * sched_set_window(). So enforce the same order here.
	 */
	if (write && (data == &sysctl_sched_upmigrate_pct ||
	    data == (unsigned int *)&sysctl_sched_upmigrate_min_nice)) {
		update_task_count = 1;
		get_online_cpus();
	}

	mutex_lock(&policy_mutex);

@@ -3813,7 +3827,7 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,

	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);

	if (ret || !write || !sched_enable_hmp)
	if (ret || !write)
		goto done;

	if (write && (old_val == *data))
@@ -3828,7 +3842,6 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
			ret = -EINVAL;
			goto done;
		}
		update_min_nice = 1;
	} else if (data != &sysctl_sched_select_prev_cpu_us) {
		/*
		 * all tunables other than min_nice and prev_cpu_us are
@@ -3850,19 +3863,17 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
	 * includes taking runqueue lock of all online cpus and re-initiatizing
	 * their big counter values based on changed criteria.
	 */
	if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) {
		get_online_cpus();
	if (update_task_count)
		pre_big_task_count_change(cpu_online_mask);
	}

	set_hmp_defaults();

	if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) {
	if (update_task_count)
		post_big_task_count_change(cpu_online_mask);
		put_online_cpus();
	}

done:
	if (update_task_count)
		put_online_cpus();
	mutex_unlock(&policy_mutex);
	return ret;
}
+20 −10
Original line number Diff line number Diff line
@@ -3883,7 +3883,22 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
	int ret;
	unsigned int old_val;
	unsigned int *data = (unsigned int *)table->data;
	int update_min_nice = 0;
	int update_task_count = 0;

	if (!sched_enable_hmp)
		return 0;

	/*
	 * The policy mutex is acquired with cpu_hotplug.lock
	 * held from cpu_up()->cpufreq_governor_interactive()->
	 * sched_set_window(). So enforce the same order here.
	 */
	if (write && (data == &sysctl_sched_upmigrate_pct ||
	    data == &sysctl_sched_small_task_pct ||
	    data == (unsigned int *)&sysctl_sched_upmigrate_min_nice)) {
		update_task_count = 1;
		get_online_cpus();
	}

	mutex_lock(&policy_mutex);

@@ -3908,7 +3923,6 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
			ret = -EINVAL;
			goto done;
		}
		update_min_nice = 1;
	} else {
		/* all tunables other than min_nice are in percentage */
		if (sysctl_sched_downmigrate_pct >
@@ -3927,21 +3941,17 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
	 * includes taking runqueue lock of all online cpus and re-initiatizing
	 * their big/small counter values based on changed criteria.
	 */
	if ((data == &sysctl_sched_upmigrate_pct ||
	     data == &sysctl_sched_small_task_pct || update_min_nice)) {
		get_online_cpus();
	if (update_task_count)
		pre_big_small_task_count_change(cpu_online_mask);
	}

	set_hmp_defaults();

	if ((data == &sysctl_sched_upmigrate_pct ||
	     data == &sysctl_sched_small_task_pct || update_min_nice)) {
	if (update_task_count)
		post_big_small_task_count_change(cpu_online_mask);
		put_online_cpus();
	}

done:
	if (update_task_count)
		put_online_cpus();
	mutex_unlock(&policy_mutex);
	return ret;
}