Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a28dc9ed authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "timer: make deferrable cpu unbound timers really not bound to a cpu"

parents 16a4b732 a40f7521
Loading
Loading
Loading
Loading
+84 −29
Original line number Diff line number Diff line
@@ -91,6 +91,9 @@ struct tvec_base {
struct tvec_base boot_tvec_bases;
EXPORT_SYMBOL(boot_tvec_bases);
static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
#ifdef CONFIG_SMP
static struct tvec_base *tvec_base_deferral = &boot_tvec_bases;
#endif

/* Functions below help us manage 'deferrable' flag */
static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
@@ -622,7 +625,14 @@ static inline void debug_assert_init(struct timer_list *timer)
static void do_init_timer(struct timer_list *timer, unsigned int flags,
			  const char *name, struct lock_class_key *key)
{
	struct tvec_base *base = __raw_get_cpu_var(tvec_bases);
	struct tvec_base *base;

#ifdef CONFIG_SMP
	if (flags & TIMER_DEFERRABLE)
		base = tvec_base_deferral;
	else
#endif
		base = __raw_get_cpu_var(tvec_bases);

	timer->entry.next = NULL;
	timer->base = (void *)((unsigned long)base | flags);
@@ -740,6 +750,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires,

	debug_activate(timer, expires);

#ifdef CONFIG_SMP
	if (base != tvec_base_deferral) {
#endif
		 cpu = smp_processor_id();

#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
@@ -751,10 +764,10 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
		if (base != new_base) {
			/*
			 * We are trying to schedule the timer on the local CPU.
		 * However we can't change timer's base while it is running,
		 * otherwise del_timer_sync() can't detect that the timer's
		 * handler yet has not finished. This also guarantees that
		 * the timer is serialized wrt itself.
			 * However we can't change timer's base while it is
			 * running, otherwise del_timer_sync() can't detect that
			 * the timer's * handler yet has not finished. This also
			 * guarantees that * the timer is serialized wrt itself.
			 */
			if (likely(base->running_timer != timer)) {
				/* See the comment in lock_timer_base() */
@@ -765,6 +778,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
				timer_set_base(timer, base);
			}
		}
#ifdef CONFIG_SMP
	}
#endif

	timer->expires = expires;
	internal_add_timer(base, timer);
@@ -1138,15 +1154,20 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
/**
 * __run_timers - run all expired timers (if any) on this CPU.
 * @base: the timer vector to be processed.
 * @try: try and just return if base's lock already acquired.
 *
 * This function cascades all vectors and executes all expired timer
 * vectors.
 */
static inline void __run_timers(struct tvec_base *base)
static inline void __run_timers(struct tvec_base *base, bool try)
{
	struct timer_list *timer;

	if (!try)
		spin_lock_irq(&base->lock);
	else if (!spin_trylock_irq(&base->lock))
		return;

	while (time_after_eq(jiffies, base->timer_jiffies)) {
		struct list_head work_list;
		struct list_head *head = &work_list;
@@ -1373,8 +1394,17 @@ static void run_timer_softirq(struct softirq_action *h)

	hrtimer_run_pending();

#ifdef CONFIG_SMP
	if (time_after_eq(jiffies, tvec_base_deferral->timer_jiffies))
		/*
		 * if other cpu is handling cpu unbound deferrable timer base,
		 * current cpu doesn't need to handle it so pass try=true.
		 */
		__run_timers(tvec_base_deferral, true);
#endif

	if (time_after_eq(jiffies, base->timer_jiffies))
		__run_timers(base);
		__run_timers(base, false);
}

/*
@@ -1510,7 +1540,7 @@ static int __cpuinit init_timers_cpu(int cpu)
{
	int j;
	struct tvec_base *base;
	static char __cpuinitdata tvec_base_done[NR_CPUS];
	static char __cpuinitdata tvec_base_done[NR_CPUS + 1];

	if (!tvec_base_done[cpu]) {
		static char boot_done;
@@ -1519,9 +1549,14 @@ static int __cpuinit init_timers_cpu(int cpu)
			/*
			 * The APs use this path later in boot
			 */
			if (cpu != NR_CPUS)
				base = kmalloc_node(sizeof(*base),
						    GFP_KERNEL | __GFP_ZERO,
						    cpu_to_node(cpu));
			else
				base = kmalloc(sizeof(*base),
					       GFP_KERNEL | __GFP_ZERO);

			if (!base)
				return -ENOMEM;

@@ -1531,7 +1566,12 @@ static int __cpuinit init_timers_cpu(int cpu)
				kfree(base);
				return -ENOMEM;
			}
			if (cpu != NR_CPUS)
				per_cpu(tvec_bases, cpu) = base;
#ifdef CONFIG_SMP
			else
				tvec_base_deferral = base;
#endif
		} else {
			/*
			 * This is for the boot CPU - we use compile-time
@@ -1545,7 +1585,12 @@ static int __cpuinit init_timers_cpu(int cpu)
		spin_lock_init(&base->lock);
		tvec_base_done[cpu] = 1;
	} else {
		if (cpu != NR_CPUS)
			base = per_cpu(tvec_bases, cpu);
#ifdef CONFIG_SMP
		else
			base = tvec_base_deferral;
#endif
	}


@@ -1653,6 +1698,16 @@ void __init init_timers(void)
	init_timer_stats();

	BUG_ON(err != NOTIFY_OK);

#ifdef CONFIG_SMP
	/*
	 * initialize cpu unbound deferrable timer base only when CONFIG_SMP.
	 * UP kernel handles the timers with cpu 0 timer base.
	 */
	err = init_timers_cpu(NR_CPUS);
	BUG_ON(err);
#endif

	register_cpu_notifier(&timers_nb);
	open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
}