Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6c9d7be1 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

hrtimer: Don't drop the base lock when migration during isolation



The current code drops the base lock and wait for the running
hrtimer's expiry event to be processed on the isolated CPU.
This leaves a window, where the running hrtimer can migrate
to a different CPU or even get freed. The pinned hrtimers that
are maintained in a temporarily list also can get freed while
the lock is dropped. The only reason for waiting for the running
hrtimer is to make sure that this hrtimer is migrated away from
the isolated CPU. This is a problem only if this hrtimer gets
rearmed from it's callback. As the possibility of this race is
very rare, it is better to have this limitation instead of fixing
the above mentioned bugs with more intrusive changes.

Change-Id: I14ba67cacb321d8b561195935592bb9979996a27
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent efc62c2e
Loading
Loading
Loading
Loading
+14 −35
Original line number Diff line number Diff line
@@ -49,7 +49,6 @@
#include <linux/sched/deadline.h>
#include <linux/timer.h>
#include <linux/freezer.h>
#include <linux/delay.h>

#include <asm/uaccess.h>

@@ -1579,41 +1578,22 @@ int hrtimers_prepare_cpu(unsigned int cpu)
}

#ifdef CONFIG_HOTPLUG_CPU
static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
				 struct hrtimer_cpu_base *new_base,
				 unsigned int i, bool wait,
static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
				 struct hrtimer_clock_base *new_base,
				 bool remove_pinned)
{
	struct hrtimer *timer;
	struct timerqueue_node *node;
	struct timerqueue_head pinned;
	int is_pinned;
	struct hrtimer_clock_base *old_c_base = &old_base->clock_base[i];
	struct hrtimer_clock_base *new_c_base = &new_base->clock_base[i];
	bool is_hotplug = !cpu_online(old_base->cpu_base->cpu);

	timerqueue_init_head(&pinned);

	while ((node = timerqueue_getnext(&old_c_base->active))) {
	while ((node = timerqueue_getnext(&old_base->active))) {
		timer = container_of(node, struct hrtimer, node);
		if (wait) {
			/* Ensure timers are done running before continuing */
			while (hrtimer_callback_running(timer)) {
				raw_spin_unlock(&old_base->lock);
				raw_spin_unlock(&new_base->lock);
				cpu_relax();
				/*
				 * cpu_relax may just be a barrier. Grant the
				 * run_hrtimer_list code some time to obtain
				 * the spinlock.
				 */
				udelay(1);
				raw_spin_lock(&new_base->lock);
				raw_spin_lock_nested(&old_base->lock,
							SINGLE_DEPTH_NESTING);
			}
		} else {
		if (is_hotplug)
			BUG_ON(hrtimer_callback_running(timer));
		}
		debug_deactivate(timer);

		/*
@@ -1621,7 +1601,7 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
		 * timer could be seen as !active and just vanish away
		 * under us on another CPU
		 */
		__remove_hrtimer(timer, old_c_base, HRTIMER_STATE_ENQUEUED, 0);
		__remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);

		is_pinned = timer->state & HRTIMER_STATE_PINNED;
		if (!remove_pinned && is_pinned) {
@@ -1629,7 +1609,7 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
			continue;
		}

		timer->base = new_c_base;
		timer->base = new_base;
		/*
		 * Enqueue the timers on the new cpu. This does not
		 * reprogram the event device in case the timer
@@ -1638,7 +1618,7 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
		 * sort out already expired timers and reprogram the
		 * event device.
		 */
		enqueue_hrtimer(timer, new_c_base);
		enqueue_hrtimer(timer, new_base);
	}

	/* Re-queue pinned timers for non-hotplug usecase */
@@ -1646,12 +1626,11 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
		timer = container_of(node, struct hrtimer, node);

		timerqueue_del(&pinned, &timer->node);
		enqueue_hrtimer(timer, old_c_base);
		enqueue_hrtimer(timer, old_base);
	}
}

static void
__migrate_hrtimers(unsigned int scpu, bool wait, bool remove_pinned)
static void __migrate_hrtimers(unsigned int scpu, bool remove_pinned)
{
	struct hrtimer_cpu_base *old_base, *new_base;
	unsigned long flags;
@@ -1668,8 +1647,8 @@ __migrate_hrtimers(unsigned int scpu, bool wait, bool remove_pinned)
	raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		migrate_hrtimer_list(old_base, new_base, i, wait,
								remove_pinned);
		migrate_hrtimer_list(&old_base->clock_base[i],
				     &new_base->clock_base[i], remove_pinned);
	}

	raw_spin_unlock(&old_base->lock);
@@ -1685,13 +1664,13 @@ int hrtimers_dead_cpu(unsigned int scpu)
	BUG_ON(cpu_online(scpu));
	tick_cancel_sched_timer(scpu);

	__migrate_hrtimers(scpu, false, true);
	__migrate_hrtimers(scpu, true);
	return 0;
}

void hrtimer_quiesce_cpu(void *cpup)
{
	__migrate_hrtimers(*(int *)cpup, true, false);
	__migrate_hrtimers(*(int *)cpup, false);
}

#endif /* CONFIG_HOTPLUG_CPU */