Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 47dcd356 authored by Olof Johansson's avatar Olof Johansson
Browse files

Merge tag 'remove-local-timers' of...

Merge tag 'remove-local-timers' of git://git.kernel.org/pub/scm/linux/kernel/git/davidb/linux-msm into next/cleanup

From Stephen Boyd:

Now that we have a generic arch hook for broadcast we can remove the
local timer API entirely. Doing so will reduce code in ARM core, reduce
the architecture dependencies of our timer drivers, and simplify the code
because we no longer go through an architecture layer that is essentially
a hotplug notifier.

* tag 'remove-local-timers' of git://git.kernel.org/pub/scm/linux/kernel/git/davidb/linux-msm

:
  ARM: smp: Remove local timer API
  clocksource: time-armada-370-xp: Divorce from local timer API
  clocksource: time-armada-370-xp: Fix sparse warning
  ARM: msm: Divorce msm_timer from local timer API
  ARM: PRIMA2: Divorce timer-marco from local timer API
  ARM: EXYNOS4: Divorce mct from local timer API
  ARM: OMAP2+: Divorce from local timer API
  ARM: smp_twd: Divorce smp_twd from local timer API
  ARM: smp: Remove duplicate dummy timer implementation

Resolved a large number of conflicts due to __cpuinit cleanups, etc.

Signed-off-by: default avatarOlof Johansson <olof@lixom.net>
parents 3b2f64d0 060fd304
Loading
Loading
Loading
Loading
+1 −11
Original line number Diff line number Diff line
@@ -645,7 +645,7 @@ config ARCH_SHMOBILE
	select CLKDEV_LOOKUP
	select GENERIC_CLOCKEVENTS
	select HAVE_ARM_SCU if SMP
	select HAVE_ARM_TWD if LOCAL_TIMERS
	select HAVE_ARM_TWD if SMP
	select HAVE_CLK
	select HAVE_MACH_CLKDEV
	select HAVE_SMP
@@ -1584,16 +1584,6 @@ config ARM_PSCI
	  0022A ("Power State Coordination Interface System Software on
	  ARM processors").

config LOCAL_TIMERS
	bool "Use local timer interrupts"
	depends on SMP
	default y
	help
	  Enable support for local timers on SMP platforms, rather then the
	  legacy IPI broadcast method.  Local timers allows the system
	  accounting to be spread across the timer interval, preventing a
	  "thundering herd" at every timer tick.

# The GPIO number here must be sorted by descending number. In case of
# a multiplatform kernel, we just want the highest value required by the
# selected platforms.

arch/arm/include/asm/localtimer.h

deleted100644 → 0
+0 −34
Original line number Diff line number Diff line
/*
 *  arch/arm/include/asm/localtimer.h
 *
 *  Copyright (C) 2004-2005 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#ifndef __ASM_ARM_LOCALTIMER_H
#define __ASM_ARM_LOCALTIMER_H

#include <linux/errno.h>

struct clock_event_device;

struct local_timer_ops {
	int  (*setup)(struct clock_event_device *);
	void (*stop)(struct clock_event_device *);
};

#ifdef CONFIG_LOCAL_TIMERS
/*
 * Register a local timer driver
 */
int local_timer_register(struct local_timer_ops *);
#else
static inline int local_timer_register(struct local_timer_ops *ops)
{
	return -ENXIO;
}
#endif

#endif
+0 −87
Original line number Diff line number Diff line
@@ -41,7 +41,6 @@
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
#include <asm/localtimer.h>
#include <asm/smp_plat.h>
#include <asm/virt.h>
#include <asm/mach/arch.h>
@@ -146,8 +145,6 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle)
}

#ifdef CONFIG_HOTPLUG_CPU
static void percpu_timer_stop(void);

static int platform_cpu_kill(unsigned int cpu)
{
	if (smp_ops.cpu_kill)
@@ -190,11 +187,6 @@ int __cpu_disable(void)
	 */
	migrate_irqs();

	/*
	 * Stop the local timer for this CPU.
	 */
	percpu_timer_stop();

	/*
	 * Flush user cache and TLB mappings, and then remove this CPU
	 * from the vm mask set of all processes.
@@ -316,8 +308,6 @@ static void smp_store_cpu_info(unsigned int cpuid)
	store_cpu_topology(cpuid);
}

static void percpu_timer_setup(void);

/*
 * This is the secondary CPU boot entry.  We're using this CPUs
 * idle thread stack, but a set of temporary page tables.
@@ -372,11 +362,6 @@ asmlinkage void secondary_start_kernel(void)
	set_cpu_online(cpu, true);
	complete(&cpu_running);

	/*
	 * Setup the percpu timer for this CPU.
	 */
	percpu_timer_setup();

	local_irq_enable();
	local_fiq_enable();

@@ -422,12 +407,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
	if (max_cpus > ncores)
		max_cpus = ncores;
	if (ncores > 1 && max_cpus) {
		/*
		 * Enable the local timer or broadcast device for the
		 * boot CPU, but only if we have more than one CPU.
		 */
		percpu_timer_setup();

		/*
		 * Initialise the present map, which describes the set of CPUs
		 * actually populated at the present time. A platform should
@@ -504,11 +483,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
	return sum;
}

/*
 * Timer (local or broadcast) support
 */
static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);

#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
@@ -516,67 +490,6 @@ void tick_broadcast(const struct cpumask *mask)
}
#endif

static void broadcast_timer_set_mode(enum clock_event_mode mode,
	struct clock_event_device *evt)
{
}

static void broadcast_timer_setup(struct clock_event_device *evt)
{
	evt->name	= "dummy_timer";
	evt->features	= CLOCK_EVT_FEAT_ONESHOT |
			  CLOCK_EVT_FEAT_PERIODIC |
			  CLOCK_EVT_FEAT_DUMMY;
	evt->rating	= 100;
	evt->mult	= 1;
	evt->set_mode	= broadcast_timer_set_mode;

	clockevents_register_device(evt);
}

static struct local_timer_ops *lt_ops;

#ifdef CONFIG_LOCAL_TIMERS
int local_timer_register(struct local_timer_ops *ops)
{
	if (!is_smp() || !setup_max_cpus)
		return -ENXIO;

	if (lt_ops)
		return -EBUSY;

	lt_ops = ops;
	return 0;
}
#endif

static void percpu_timer_setup(void)
{
	unsigned int cpu = smp_processor_id();
	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);

	evt->cpumask = cpumask_of(cpu);

	if (!lt_ops || lt_ops->setup(evt))
		broadcast_timer_setup(evt);
}

#ifdef CONFIG_HOTPLUG_CPU
/*
 * The generic clock events code purposely does not stop the local timer
 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
 * manually here.
 */
static void percpu_timer_stop(void)
{
	unsigned int cpu = smp_processor_id();
	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);

	if (lt_ops)
		lt_ops->stop(evt);
}
#endif

static DEFINE_RAW_SPINLOCK(stop_lock);

/*
+43 −21
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -24,7 +25,6 @@

#include <asm/smp_plat.h>
#include <asm/smp_twd.h>
#include <asm/localtimer.h>

/* set up by the platform code */
static void __iomem *twd_base;
@@ -33,7 +33,7 @@ static struct clk *twd_clk;
static unsigned long twd_timer_rate;
static DEFINE_PER_CPU(bool, percpu_setup_called);

static struct clock_event_device __percpu **twd_evt;
static struct clock_event_device __percpu *twd_evt;
static int twd_ppi;

static void twd_set_mode(enum clock_event_mode mode,
@@ -90,8 +90,10 @@ static int twd_timer_ack(void)
	return 0;
}

static void twd_timer_stop(struct clock_event_device *clk)
static void twd_timer_stop(void)
{
	struct clock_event_device *clk = __this_cpu_ptr(twd_evt);

	twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
	disable_percpu_irq(clk->irq);
}
@@ -106,7 +108,7 @@ static void twd_update_frequency(void *new_rate)
{
	twd_timer_rate = *((unsigned long *) new_rate);

	clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
	clockevents_update_freq(__this_cpu_ptr(twd_evt), twd_timer_rate);
}

static int twd_rate_change(struct notifier_block *nb,
@@ -132,7 +134,7 @@ static struct notifier_block twd_clk_nb = {

static int twd_clk_init(void)
{
	if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
	if (twd_evt && __this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
		return clk_notifier_register(twd_clk, &twd_clk_nb);

	return 0;
@@ -151,7 +153,7 @@ static void twd_update_frequency(void *data)
{
	twd_timer_rate = clk_get_rate(twd_clk);

	clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
	clockevents_update_freq(__this_cpu_ptr(twd_evt), twd_timer_rate);
}

static int twd_cpufreq_transition(struct notifier_block *nb,
@@ -177,7 +179,7 @@ static struct notifier_block twd_cpufreq_nb = {

static int twd_cpufreq_init(void)
{
	if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
	if (twd_evt && __this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
		return cpufreq_register_notifier(&twd_cpufreq_nb,
			CPUFREQ_TRANSITION_NOTIFIER);

@@ -228,7 +230,7 @@ static void twd_calibrate_rate(void)

static irqreturn_t twd_handler(int irq, void *dev_id)
{
	struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
	struct clock_event_device *evt = dev_id;

	if (twd_timer_ack()) {
		evt->event_handler(evt);
@@ -265,9 +267,9 @@ static void twd_get_clock(struct device_node *np)
/*
 * Setup the local clock events for a CPU.
 */
static int twd_timer_setup(struct clock_event_device *clk)
static void twd_timer_setup(void)
{
	struct clock_event_device **this_cpu_clk;
	struct clock_event_device *clk = __this_cpu_ptr(twd_evt);
	int cpu = smp_processor_id();

	/*
@@ -276,9 +278,9 @@ static int twd_timer_setup(struct clock_event_device *clk)
	 */
	if (per_cpu(percpu_setup_called, cpu)) {
		__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
		clockevents_register_device(*__this_cpu_ptr(twd_evt));
		clockevents_register_device(clk);
		enable_percpu_irq(clk->irq, 0);
		return 0;
		return;
	}
	per_cpu(percpu_setup_called, cpu) = true;

@@ -297,27 +299,37 @@ static int twd_timer_setup(struct clock_event_device *clk)
	clk->set_mode = twd_set_mode;
	clk->set_next_event = twd_set_next_event;
	clk->irq = twd_ppi;

	this_cpu_clk = __this_cpu_ptr(twd_evt);
	*this_cpu_clk = clk;
	clk->cpumask = cpumask_of(cpu);

	clockevents_config_and_register(clk, twd_timer_rate,
					0xf, 0xffffffff);
	enable_percpu_irq(clk->irq, 0);
}

	return 0;
static int twd_timer_cpu_notify(struct notifier_block *self,
				unsigned long action, void *hcpu)
{
	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_STARTING:
		twd_timer_setup();
		break;
	case CPU_DYING:
		twd_timer_stop();
		break;
	}

	return NOTIFY_OK;
}

static struct local_timer_ops twd_lt_ops = {
	.setup	= twd_timer_setup,
	.stop	= twd_timer_stop,
static struct notifier_block twd_timer_cpu_nb = {
	.notifier_call = twd_timer_cpu_notify,
};

static int __init twd_local_timer_common_register(struct device_node *np)
{
	int err;

	twd_evt = alloc_percpu(struct clock_event_device *);
	twd_evt = alloc_percpu(struct clock_event_device);
	if (!twd_evt) {
		err = -ENOMEM;
		goto out_free;
@@ -329,12 +341,22 @@ static int __init twd_local_timer_common_register(struct device_node *np)
		goto out_free;
	}

	err = local_timer_register(&twd_lt_ops);
	err = register_cpu_notifier(&twd_timer_cpu_nb);
	if (err)
		goto out_irq;

	twd_get_clock(np);

	/*
	 * Immediately configure the timer on the boot CPU, unless we need
	 * jiffies to be incrementing to calibrate the rate in which case
	 * setup the timer in late_time_init.
	 */
	if (twd_timer_rate)
		twd_timer_setup();
	else
		late_time_init = twd_timer_setup;

	return 0;

out_irq:
+1 −1
Original line number Diff line number Diff line
@@ -12,7 +12,7 @@ config ARCH_HIGHBANK
	select CPU_V7
	select GENERIC_CLOCKEVENTS
	select HAVE_ARM_SCU
	select HAVE_ARM_TWD if LOCAL_TIMERS
	select HAVE_ARM_TWD if SMP
	select HAVE_SMP
	select MAILBOX
	select PL320_MBOX
Loading