Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d0ff1c04 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched: Disable interrupts while holding related_thread_group_lock



There is a potential deadlock condition if interrupts are enabled
while holding the related_thread_group_lock. Prevent this.

----------------                              --------------------
    CPU 0                                          CPU 1
---------------                               --------------------

check_for_migration()                         cgroup_file_write(p)

check_for_freq_change()                       cgroup_attach_task(p)

send_notification()                           schedtune_attach(p)

read_lock(&related_thread_group_lock)         sched_set_group_id(p)

                                              raw_spin_lock_irqsave(
					       &p->pi_lock, flags)

					      write_lock_irqsave(
					       &related_thread_group_lock)

					       waiting on CPU#0

raw_spin_lock_irqsave(&rq->lock, flags)

raw_spin_unlock_irqrestore(&rq->lock, flags)

--> interrupt()

----> ttwu(p)

-------> waiting for p's pi_lock on CPU#1

Change-Id: I6f0f8f742d6e1b3ff735dcbeabd54ef101329cdf
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 841264c5
Loading
Loading
Loading
Loading
+10 −7
Original line number Diff line number Diff line
@@ -1767,20 +1767,20 @@ static int send_notification(struct rq *rq, int check_pred, int check_groups)
		if (freq_required < cur_freq + sysctl_sched_pred_alert_freq)
			return 0;
	} else {
		read_lock(&related_thread_group_lock);
		read_lock_irqsave(&related_thread_group_lock, flags);
		/*
		 * Protect from concurrent update of rq->prev_runnable_sum and
		 * group cpu load
		 */
		raw_spin_lock_irqsave(&rq->lock, flags);
		raw_spin_lock(&rq->lock);
		if (check_groups)
			_group_load_in_cpu(cpu_of(rq), &group_load, NULL);

		new_load = rq->prev_runnable_sum + group_load;
		new_load = freq_policy_load(rq, new_load);

		raw_spin_unlock_irqrestore(&rq->lock, flags);
		read_unlock(&related_thread_group_lock);
		raw_spin_unlock(&rq->lock);
		read_unlock_irqrestore(&related_thread_group_lock, flags);

		cur_freq = load_to_freq(rq, rq->old_busy_time);
		freq_required = load_to_freq(rq, new_load);
@@ -3202,14 +3202,16 @@ void sched_get_cpus_busy(struct sched_load *busy,
	if (unlikely(cpus == 0))
		return;

	local_irq_save(flags);

	read_lock(&related_thread_group_lock);

	/*
	 * This function could be called in timer context, and the
	 * current task may have been executing for a long time. Ensure
	 * that the window stats are current by doing an update.
	 */
	read_lock(&related_thread_group_lock);

	local_irq_save(flags);
	for_each_cpu(cpu, query_cpus)
		raw_spin_lock(&cpu_rq(cpu)->lock);

@@ -3309,10 +3311,11 @@ skip_early:

	for_each_cpu(cpu, query_cpus)
		raw_spin_unlock(&(cpu_rq(cpu))->lock);
	local_irq_restore(flags);

	read_unlock(&related_thread_group_lock);

	local_irq_restore(flags);

	i = 0;
	for_each_cpu(cpu, query_cpus) {
		rq = cpu_rq(cpu);