Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d35fc8ec authored by Joel Fernandes (Google)'s avatar Joel Fernandes (Google) Committed by Lingutla Chandrasekhar
Browse files

ANDROID: sched/fair: Fix incorrect usage of RCU in CPU select path



The RCU readlock needs to be acquired before accessing schedtune, if not
its possible we may access stale/corrupted data. Move up the
rcu_read_lock to fix the issue.

Issue found with lockdep, which fires a warning on boot:
[    0.132198]
<kernel_src>include/linux/cgroup.h:455
suspicious rcu_dereference_check() usage!
[    0.132203] aother info that might help us debug this:a
[    0.132211] arcu_scheduler_active = 1, debug_locks = 0
[    0.132219] 3 locks held by swapper/0/1:
[    0.132225] #0:  (cpu_hotplug.dep_map){.+.+.+}, at: [<ffffff80080a7ee4>]
[    0.132261] #1:  (smpboot_threads_lock){+.+.+.}, at: [<ffffff80080d5f04>]
[    0.132282] #2:  (&p->pi_lock){......}, at: [<ffffff80080e05fc>] try_to_wake_up+0x4c/0x598
[    0.132298] astack backtrace:
[    0.132307] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.9.84-g6cedde4aedec
[    0.132314] Hardware name: MSM sdm845 C1 EVT v1.0 (DT)
[    0.132322] Call trace:
[    0.132337] [<ffffff8008089678>] dump_backtrace+0x0/0x3c0
[    0.132344] [<ffffff800808966c>] show_stack+0x20/0x2c
[    0.132357] [<ffffff800847fbf0>] dump_stack+0xdc/0x12c
[    0.132367] [<ffffff800812261c>] lockdep_rcu_suspicious+0x13c/0x154
[    0.132377] [<ffffff80081148e4>] task_sched_boost+0x88/0xa0
[    0.132386] [<ffffff80080f738c>] select_energy_cpu_brute+0x284/0x23a4
[    0.132390] [<ffffff80080f4bdc>] select_task_rq_fair+0x390/0x1434
[    0.132395] [<ffffff80080e089c>] try_to_wake_up+0x2ec/0x598
[    0.132403] [<ffffff80080de1fc>] wake_up_process+0x24/0x30
[    0.132412] [<ffffff80080d0434>] __kthread_create_on_node+0xbc/0x1a8
[    0.132418] [<ffffff80080d0348>] kthread_create_on_node+0xa4/0xd4
[    0.132424] [<ffffff80080d066c>] kthread_create_on_cpu+0x40/0xe4
[    0.132432] [<ffffff80080d5cf0>] __smpboot_create_thread+0x8c/0x10c
[    0.132437] [<ffffff80080d5f50>] smpboot_register_percpu_thread_cpumask+0x90/0x14c
[    0.132450] [<ffffff8009a09744>] spawn_ksoftirqd+0x40/0x5c
[    0.132456] [<ffffff8008083900>] do_one_initcall+0x118/0x1d8
[    0.132466] [<ffffff8009a00ef0>] kernel_init_freeable+0x12c/0x290
[    0.132479] [<ffffff8009157b80>] kernel_init+0x14/0x220

Bug: 110360156
Change-Id: Ie908090225e491fde6e0c0c836e3e986b262ca85
Signed-off-by: default avatarJoel Fernandes (Google) <joel@joelfernandes.org>
[clingutla@codeaurora.org: Moved up RCU readlock to 'select_task_rq_fair' to
 protect total function.]
Signed-off-by: default avatarLingutla Chandrasekhar <clingutla@codeaurora.org>
parent 0c4a0666
Loading
Loading
Loading
Loading
+13 −13
Original line number Diff line number Diff line
@@ -7451,12 +7451,10 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
		goto out;
	}

	rcu_read_lock();

	sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
	if (!sd) {
		target_cpu = prev_cpu;
		goto unlock;
		goto out;
	}

	sync_entity_load_avg(&p->se);
@@ -7466,14 +7464,14 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
				    &fbt_env);
	if (next_cpu == -1) {
		target_cpu = prev_cpu;
		goto unlock;
		goto out;
	}

	if (fbt_env.placement_boost || fbt_env.need_idle ||
			fbt_env.avoid_prev_cpu || (rtg_target &&
			!cpumask_test_cpu(prev_cpu, rtg_target))) {
		target_cpu = next_cpu;
		goto unlock;
		goto out;
	}

	/* Unconditionally prefer IDLE CPUs for boosted/prefer_idle tasks */
@@ -7481,7 +7479,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
		schedstat_inc(p->se.statistics.nr_wakeups_secb_idle_bt);
		schedstat_inc(this_rq()->eas_stats.secb_idle_bt);
		target_cpu = next_cpu;
		goto unlock;
		goto out;
	}

	target_cpu = prev_cpu;
@@ -7515,7 +7513,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
			schedstat_inc(p->se.statistics.nr_wakeups_secb_insuff_cap);
			schedstat_inc(this_rq()->eas_stats.secb_insuff_cap);
			target_cpu = next_cpu;
			goto unlock;
			goto out;
		}

		/* Check if EAS_CPU_NXT is a more energy efficient CPU */
@@ -7523,20 +7521,18 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
			schedstat_inc(p->se.statistics.nr_wakeups_secb_nrg_sav);
			schedstat_inc(this_rq()->eas_stats.secb_nrg_sav);
			target_cpu = eenv.cpu[eenv.next_idx].cpu_id;
			goto unlock;
			goto out;
		}

		schedstat_inc(p->se.statistics.nr_wakeups_secb_no_nrg_sav);
		schedstat_inc(this_rq()->eas_stats.secb_no_nrg_sav);
		target_cpu = prev_cpu;
		goto unlock;
		goto out;
	}

	schedstat_inc(p->se.statistics.nr_wakeups_secb_count);
	schedstat_inc(this_rq()->eas_stats.secb_count);

unlock:
	rcu_read_unlock();
out:
	trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync,
			      fbt_env.need_idle, fastpath,
@@ -7572,8 +7568,12 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
			cpumask_test_cpu(cpu, tsk_cpus_allowed(p)));
	}

	if (energy_aware())
		return select_energy_cpu_brute(p, prev_cpu, sync);
	if (energy_aware()) {
		rcu_read_lock();
		new_cpu = select_energy_cpu_brute(p, prev_cpu, sync);
		rcu_read_unlock();
		return new_cpu;
	}

	rcu_read_lock();
	for_each_domain(cpu, tmp) {