Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3832c3d3 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti Committed by Satya Durga Srinivasu Prabhala
Browse files

hrtimer: Don't drop the base lock when migration during isolation



The current code drops the base lock and wait for the running
hrtimer's expiry event to be processed on the isolated CPU.
This leaves a window, where the running hrtimer can migrate
to a different CPU or even get freed. The pinned hrtimers that
are maintained in a temporarily list also can get freed while
the lock is dropped. The only reason for waiting for the running
hrtimer is to make sure that this hrtimer is migrated away from
the isolated CPU. This is a problem only if this hrtimer gets
rearmed from it's callback. As the possibility of this race is
very rare, it is better to have this limitation instead of fixing
the above mentioned bugs with more intrusive changes.

Change-Id: I49bd79ced8d593a084ded1aefed648d5f9e83199
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
[markivx: Forward port to 4.14, fix header include merge conflict]
Signed-off-by: default avatarVikram Mulukutla <markivx@codeaurora.org>
[satyap@codeaurora.org: Port to 4.19 and fix merge conflict]
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent 5fadc772
Loading
Loading
Loading
Loading
+14 −35
Original line number Diff line number Diff line
@@ -51,7 +51,6 @@
#include <linux/timer.h>
#include <linux/freezer.h>
#include <linux/compat.h>
#include <linux/delay.h>

#include <linux/uaccess.h>

@@ -1833,41 +1832,22 @@ int hrtimers_prepare_cpu(unsigned int cpu)
}

#ifdef CONFIG_HOTPLUG_CPU
static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
				 struct hrtimer_cpu_base *new_base,
				 unsigned int i, bool wait,
static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
				 struct hrtimer_clock_base *new_base,
				 bool remove_pinned)
{
	struct hrtimer *timer;
	struct timerqueue_node *node;
	struct timerqueue_head pinned;
	int is_pinned;
	struct hrtimer_clock_base *old_c_base = &old_base->clock_base[i];
	struct hrtimer_clock_base *new_c_base = &new_base->clock_base[i];
	bool is_hotplug = !cpu_online(old_base->cpu_base->cpu);

	timerqueue_init_head(&pinned);

	while ((node = timerqueue_getnext(&old_c_base->active))) {
	while ((node = timerqueue_getnext(&old_base->active))) {
		timer = container_of(node, struct hrtimer, node);
		if (wait) {
			/* Ensure timers are done running before continuing */
			while (hrtimer_callback_running(timer)) {
				raw_spin_unlock(&old_base->lock);
				raw_spin_unlock(&new_base->lock);
				cpu_relax();
				/*
				 * cpu_relax may just be a barrier. Grant the
				 * run_hrtimer_list code some time to obtain
				 * the spinlock.
				 */
				udelay(1);
				raw_spin_lock(&new_base->lock);
				raw_spin_lock_nested(&old_base->lock,
							SINGLE_DEPTH_NESTING);
			}
		} else {
		if (is_hotplug)
			BUG_ON(hrtimer_callback_running(timer));
		}
		debug_deactivate(timer);

		/*
@@ -1875,7 +1855,7 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
		 * timer could be seen as !active and just vanish away
		 * under us on another CPU
		 */
		__remove_hrtimer(timer, old_c_base, HRTIMER_STATE_ENQUEUED, 0);
		__remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);

		is_pinned = timer->state & HRTIMER_STATE_PINNED;
		if (!remove_pinned && is_pinned) {
@@ -1883,7 +1863,7 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
			continue;
		}

		timer->base = new_c_base;
		timer->base = new_base;
		/*
		 * Enqueue the timers on the new cpu. This does not
		 * reprogram the event device in case the timer
@@ -1892,7 +1872,7 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
		 * sort out already expired timers and reprogram the
		 * event device.
		 */
		enqueue_hrtimer(timer, new_c_base, HRTIMER_MODE_ABS);
		enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
	}

	/* Re-queue pinned timers for non-hotplug usecase */
@@ -1900,12 +1880,11 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
		timer = container_of(node, struct hrtimer, node);

		timerqueue_del(&pinned, &timer->node);
		enqueue_hrtimer(timer, old_c_base, HRTIMER_MODE_ABS);
		enqueue_hrtimer(timer, old_base, HRTIMER_MODE_ABS);
	}
}

static void
__migrate_hrtimers(unsigned int scpu, bool wait, bool remove_pinned)
static void __migrate_hrtimers(unsigned int scpu, bool remove_pinned)
{
	struct hrtimer_cpu_base *old_base, *new_base;
	unsigned long flags;
@@ -1928,8 +1907,8 @@ __migrate_hrtimers(unsigned int scpu, bool wait, bool remove_pinned)
	raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		migrate_hrtimer_list(old_base, new_base, i, wait,
								remove_pinned);
		migrate_hrtimer_list(&old_base->clock_base[i],
				     &new_base->clock_base[i], remove_pinned);
	}

	/*
@@ -1952,13 +1931,13 @@ int hrtimers_dead_cpu(unsigned int scpu)
	BUG_ON(cpu_online(scpu));
	tick_cancel_sched_timer(scpu);

	__migrate_hrtimers(scpu, false, true);
	__migrate_hrtimers(scpu, true);
	return 0;
}

void hrtimer_quiesce_cpu(void *cpup)
{
	__migrate_hrtimers(*(int *)cpup, true, false);
	__migrate_hrtimers(*(int *)cpup, false);
}

#endif /* CONFIG_HOTPLUG_CPU */