Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c458b1d1 authored by Anna-Maria Gleixner's avatar Anna-Maria Gleixner Committed by Ingo Molnar
Browse files

hrtimer: Prepare handling of hard and softirq based hrtimers



The softirq based hrtimer can utilize most of the existing hrtimers
functions, but need to operate on a different data set.

Add an 'active_mask' parameter to various functions so the hard and soft bases
can be selected. Fixup the existing callers and hand in the ACTIVE_HARD
mask.

Signed-off-by: default avatarAnna-Maria Gleixner <anna-maria@linutronix.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: keescook@chromium.org
Link: http://lkml.kernel.org/r/20171221104205.7269-28-anna-maria@linutronix.de


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 98ecadd4
Loading
Loading
Loading
Loading
+29 −9
Original line number Diff line number Diff line
@@ -59,6 +59,15 @@

#include "tick-internal.h"

/*
 * Masks for selecting the soft and hard context timers from
 * cpu_base->active
 */
#define MASK_SHIFT		(HRTIMER_BASE_MONOTONIC_SOFT)
#define HRTIMER_ACTIVE_HARD	((1U << MASK_SHIFT) - 1)
#define HRTIMER_ACTIVE_SOFT	(HRTIMER_ACTIVE_HARD << MASK_SHIFT)
#define HRTIMER_ACTIVE_ALL	(HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)

/*
 * The timer bases:
 *
@@ -507,13 +516,24 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
	return expires_next;
}

static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
/*
 * Recomputes cpu_base::*next_timer and returns the earliest expires_next but
 * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
 *
 * @active_mask must be one of:
 *  - HRTIMER_ACTIVE,
 *  - HRTIMER_ACTIVE_SOFT, or
 *  - HRTIMER_ACTIVE_HARD.
 */
static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base,
					unsigned int active_mask)
{
	unsigned int active = cpu_base->active_bases;
	unsigned int active;
	ktime_t expires_next = KTIME_MAX;

	cpu_base->next_timer = NULL;

	active = cpu_base->active_bases & active_mask;
	expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);

	return expires_next;
@@ -553,7 +573,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
{
	ktime_t expires_next;

	expires_next = __hrtimer_get_next_event(cpu_base);
	expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);

	if (skip_equal && expires_next == cpu_base->expires_next)
		return;
@@ -1074,7 +1094,7 @@ u64 hrtimer_get_next_event(void)
	raw_spin_lock_irqsave(&cpu_base->lock, flags);

	if (!__hrtimer_hres_active(cpu_base))
		expires = __hrtimer_get_next_event(cpu_base);
		expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);

	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);

@@ -1248,10 +1268,10 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
}

static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
				 unsigned long flags)
				 unsigned long flags, unsigned int active_mask)
{
	struct hrtimer_clock_base *base;
	unsigned int active = cpu_base->active_bases;
	unsigned int active = cpu_base->active_bases & active_mask;

	for_each_active_base(base, cpu_base, active) {
		struct timerqueue_node *node;
@@ -1314,10 +1334,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
	 */
	cpu_base->expires_next = KTIME_MAX;

	__hrtimer_run_queues(cpu_base, now, flags);
	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);

	/* Reevaluate the clock bases for the next expiry */
	expires_next = __hrtimer_get_next_event(cpu_base);
	expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
	/*
	 * Store the new expiry value so the migration code can verify
	 * against it.
@@ -1421,7 +1441,7 @@ void hrtimer_run_queues(void)

	raw_spin_lock_irqsave(&cpu_base->lock, flags);
	now = hrtimer_update_base(cpu_base);
	__hrtimer_run_queues(cpu_base, now, flags);
	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
}