Loading kernel/sched/fair.c +20 −9 Original line number Original line Diff line number Diff line Loading @@ -3805,7 +3805,21 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write, int ret; int ret; unsigned int old_val; unsigned int old_val; unsigned int *data = (unsigned int *)table->data; unsigned int *data = (unsigned int *)table->data; int update_min_nice = 0; int update_task_count = 0; if (!sched_enable_hmp) return 0; /* * The policy mutex is acquired with cpu_hotplug.lock * held from cpu_up()->cpufreq_governor_interactive()-> * sched_set_window(). So enforce the same order here. */ if (write && (data == &sysctl_sched_upmigrate_pct || data == (unsigned int *)&sysctl_sched_upmigrate_min_nice)) { update_task_count = 1; get_online_cpus(); } mutex_lock(&policy_mutex); mutex_lock(&policy_mutex); Loading @@ -3813,7 +3827,7 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write, ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret || !write || !sched_enable_hmp) if (ret || !write) goto done; goto done; if (write && (old_val == *data)) if (write && (old_val == *data)) Loading @@ -3828,7 +3842,6 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write, ret = -EINVAL; ret = -EINVAL; goto done; goto done; } } update_min_nice = 1; } else if (data != &sysctl_sched_select_prev_cpu_us) { } else if (data != &sysctl_sched_select_prev_cpu_us) { /* /* * all tunables other than min_nice and prev_cpu_us are * all tunables other than min_nice and prev_cpu_us are Loading @@ -3850,19 +3863,17 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write, * includes taking runqueue lock of all online cpus and re-initiatizing * includes taking runqueue lock of all online cpus and re-initiatizing * their big counter values based on changed criteria. * their big counter values based on changed criteria. */ */ if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) { if (update_task_count) get_online_cpus(); pre_big_task_count_change(cpu_online_mask); pre_big_task_count_change(cpu_online_mask); } set_hmp_defaults(); set_hmp_defaults(); if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) { if (update_task_count) post_big_task_count_change(cpu_online_mask); post_big_task_count_change(cpu_online_mask); put_online_cpus(); } done: done: if (update_task_count) put_online_cpus(); mutex_unlock(&policy_mutex); mutex_unlock(&policy_mutex); return ret; return ret; } } Loading kernel/sched/qhmp_fair.c +20 −10 Original line number Original line Diff line number Diff line Loading @@ -3883,7 +3883,22 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write, int ret; int ret; unsigned int old_val; unsigned int old_val; unsigned int *data = (unsigned int *)table->data; unsigned int *data = (unsigned int *)table->data; int update_min_nice = 0; int update_task_count = 0; if (!sched_enable_hmp) return 0; /* * The policy mutex is acquired with cpu_hotplug.lock * held from cpu_up()->cpufreq_governor_interactive()-> * sched_set_window(). So enforce the same order here. */ if (write && (data == &sysctl_sched_upmigrate_pct || data == &sysctl_sched_small_task_pct || data == (unsigned int *)&sysctl_sched_upmigrate_min_nice)) { update_task_count = 1; get_online_cpus(); } mutex_lock(&policy_mutex); mutex_lock(&policy_mutex); Loading @@ -3908,7 +3923,6 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write, ret = -EINVAL; ret = -EINVAL; goto done; goto done; } } update_min_nice = 1; } else { } else { /* all tunables other than min_nice are in percentage */ /* all tunables other than min_nice are in percentage */ if (sysctl_sched_downmigrate_pct > if (sysctl_sched_downmigrate_pct > Loading @@ -3927,21 +3941,17 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write, * includes taking runqueue lock of all online cpus and re-initiatizing * includes taking runqueue lock of all online cpus and re-initiatizing * their big/small counter values based on changed criteria. * their big/small counter values based on changed criteria. */ */ if ((data == &sysctl_sched_upmigrate_pct || if (update_task_count) data == &sysctl_sched_small_task_pct || update_min_nice)) { get_online_cpus(); pre_big_small_task_count_change(cpu_online_mask); pre_big_small_task_count_change(cpu_online_mask); } set_hmp_defaults(); set_hmp_defaults(); if ((data == &sysctl_sched_upmigrate_pct || if (update_task_count) data == &sysctl_sched_small_task_pct || update_min_nice)) { post_big_small_task_count_change(cpu_online_mask); post_big_small_task_count_change(cpu_online_mask); put_online_cpus(); } done: done: if (update_task_count) put_online_cpus(); mutex_unlock(&policy_mutex); mutex_unlock(&policy_mutex); return ret; return ret; } } Loading Loading
kernel/sched/fair.c +20 −9 Original line number Original line Diff line number Diff line Loading @@ -3805,7 +3805,21 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write, int ret; int ret; unsigned int old_val; unsigned int old_val; unsigned int *data = (unsigned int *)table->data; unsigned int *data = (unsigned int *)table->data; int update_min_nice = 0; int update_task_count = 0; if (!sched_enable_hmp) return 0; /* * The policy mutex is acquired with cpu_hotplug.lock * held from cpu_up()->cpufreq_governor_interactive()-> * sched_set_window(). So enforce the same order here. */ if (write && (data == &sysctl_sched_upmigrate_pct || data == (unsigned int *)&sysctl_sched_upmigrate_min_nice)) { update_task_count = 1; get_online_cpus(); } mutex_lock(&policy_mutex); mutex_lock(&policy_mutex); Loading @@ -3813,7 +3827,7 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write, ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret || !write || !sched_enable_hmp) if (ret || !write) goto done; goto done; if (write && (old_val == *data)) if (write && (old_val == *data)) Loading @@ -3828,7 +3842,6 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write, ret = -EINVAL; ret = -EINVAL; goto done; goto done; } } update_min_nice = 1; } else if (data != &sysctl_sched_select_prev_cpu_us) { } else if (data != &sysctl_sched_select_prev_cpu_us) { /* /* * all tunables other than min_nice and prev_cpu_us are * all tunables other than min_nice and prev_cpu_us are Loading @@ -3850,19 +3863,17 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write, * includes taking runqueue lock of all online cpus and re-initiatizing * includes taking runqueue lock of all online cpus and re-initiatizing * their big counter values based on changed criteria. * their big counter values based on changed criteria. */ */ if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) { if (update_task_count) get_online_cpus(); pre_big_task_count_change(cpu_online_mask); pre_big_task_count_change(cpu_online_mask); } set_hmp_defaults(); set_hmp_defaults(); if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) { if (update_task_count) post_big_task_count_change(cpu_online_mask); post_big_task_count_change(cpu_online_mask); put_online_cpus(); } done: done: if (update_task_count) put_online_cpus(); mutex_unlock(&policy_mutex); mutex_unlock(&policy_mutex); return ret; return ret; } } Loading
kernel/sched/qhmp_fair.c +20 −10 Original line number Original line Diff line number Diff line Loading @@ -3883,7 +3883,22 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write, int ret; int ret; unsigned int old_val; unsigned int old_val; unsigned int *data = (unsigned int *)table->data; unsigned int *data = (unsigned int *)table->data; int update_min_nice = 0; int update_task_count = 0; if (!sched_enable_hmp) return 0; /* * The policy mutex is acquired with cpu_hotplug.lock * held from cpu_up()->cpufreq_governor_interactive()-> * sched_set_window(). So enforce the same order here. */ if (write && (data == &sysctl_sched_upmigrate_pct || data == &sysctl_sched_small_task_pct || data == (unsigned int *)&sysctl_sched_upmigrate_min_nice)) { update_task_count = 1; get_online_cpus(); } mutex_lock(&policy_mutex); mutex_lock(&policy_mutex); Loading @@ -3908,7 +3923,6 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write, ret = -EINVAL; ret = -EINVAL; goto done; goto done; } } update_min_nice = 1; } else { } else { /* all tunables other than min_nice are in percentage */ /* all tunables other than min_nice are in percentage */ if (sysctl_sched_downmigrate_pct > if (sysctl_sched_downmigrate_pct > Loading @@ -3927,21 +3941,17 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write, * includes taking runqueue lock of all online cpus and re-initiatizing * includes taking runqueue lock of all online cpus and re-initiatizing * their big/small counter values based on changed criteria. * their big/small counter values based on changed criteria. */ */ if ((data == &sysctl_sched_upmigrate_pct || if (update_task_count) data == &sysctl_sched_small_task_pct || update_min_nice)) { get_online_cpus(); pre_big_small_task_count_change(cpu_online_mask); pre_big_small_task_count_change(cpu_online_mask); } set_hmp_defaults(); set_hmp_defaults(); if ((data == &sysctl_sched_upmigrate_pct || if (update_task_count) data == &sysctl_sched_small_task_pct || update_min_nice)) { post_big_small_task_count_change(cpu_online_mask); post_big_small_task_count_change(cpu_online_mask); put_online_cpus(); } done: done: if (update_task_count) put_online_cpus(); mutex_unlock(&policy_mutex); mutex_unlock(&policy_mutex); return ret; return ret; } } Loading