Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5c622029 authored by Olof Johansson's avatar Olof Johansson
Browse files

Merge branch 'for_3.6/pm/coupled-cpuidle' of...

Merge branch 'for_3.6/pm/coupled-cpuidle' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman/linux-omap-pm into late2/pm

* 'for_3.6/pm/coupled-cpuidle' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman/linux-omap-pm:
  ARM: OMAP4: CPUidle: Open broadcast clock-event device.
  ARM: OMAP4: CPUidle: add synchronization for coupled idle states
  ARM: OMAP4: CPUidle: Use coupled cpuidle states to implement SMP cpuidle.
  ARM: OMAP: timer: allow gp timer clock-event to be used on both cpus
parents 28a33cbc b93d70ae
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -55,6 +55,7 @@ config ARCH_OMAP4
	select PM_OPP if PM
	select USB_ARCH_HAS_EHCI if USB_SUPPORT
	select ARM_CPU_SUSPEND if PM
	select ARCH_NEEDS_CPU_IDLE_COUPLED

comment "OMAP Core Type"
	depends on ARCH_OMAP2
+97 −44
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include "common.h"
#include "pm.h"
#include "prm.h"
#include "clockdomain.h"

#ifdef CONFIG_CPU_IDLE

@@ -49,10 +50,14 @@ static struct omap4_idle_statedata omap4_idle_data[] = {
	},
};

static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd;
static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
static struct clockdomain *cpu_clkdm[NR_CPUS];

static atomic_t abort_barrier;
static bool cpu_done[NR_CPUS];

/**
 * omap4_enter_idle - Programs OMAP4 to enter the specified state
 * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions
 * @dev: cpuidle device
 * @drv: cpuidle driver
 * @index: the index of state to be entered
@@ -61,40 +66,58 @@ static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd;
 * specified low power state selected by the governor.
 * Returns the amount of time spent in the low power state.
 */
static int omap4_enter_idle(struct cpuidle_device *dev,
static int omap4_enter_idle_simple(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	local_fiq_disable();
	omap_do_wfi();
	local_fiq_enable();

	return index;
}

static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	struct omap4_idle_statedata *cx = &omap4_idle_data[index];
	u32 cpu1_state;
	int cpu_id = smp_processor_id();

	local_fiq_disable();

	/*
	 * CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state.
	 * CPU0 has to wait and stay ON until CPU1 is OFF state.
	 * This is necessary to honour hardware recommondation
	 * of triggeing all the possible low power modes once CPU1 is
	 * out of coherency and in OFF mode.
	 * Update dev->last_state so that governor stats reflects right
	 * data.
	 */
	cpu1_state = pwrdm_read_pwrst(cpu1_pd);
	if (cpu1_state != PWRDM_POWER_OFF) {
		index = drv->safe_state_index;
		cx = &omap4_idle_data[index];
	if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
		while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
			cpu_relax();

			/*
			 * CPU1 could have already entered & exited idle
			 * without hitting off because of a wakeup
			 * or a failed attempt to hit off mode.  Check for
			 * that here, otherwise we could spin forever
			 * waiting for CPU1 off.
			 */
			if (cpu_done[1])
			    goto fail;

		}
	}

	if (index > 0)
	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);

	/*
	 * Call idle CPU PM enter notifier chain so that
	 * VFP and per CPU interrupt context is saved.
	 */
	if (cx->cpu_state == PWRDM_POWER_OFF)
	cpu_pm_enter();

	if (dev->cpu == 0) {
		pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
		omap_set_pwrdm_state(mpu_pd, cx->mpu_state);

@@ -105,15 +128,21 @@ static int omap4_enter_idle(struct cpuidle_device *dev,
		if ((cx->mpu_state == PWRDM_POWER_RET) &&
			(cx->mpu_logic_state == PWRDM_POWER_OFF))
				cpu_cluster_pm_enter();
	}

	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
	cpu_done[dev->cpu] = true;

	/* Wakeup CPU1 only if it is not offlined */
	if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
		clkdm_wakeup(cpu_clkdm[1]);
		clkdm_allow_idle(cpu_clkdm[1]);
	}

	/*
	 * Call idle CPU PM exit notifier chain to restore
	 * VFP and per CPU IRQ context. Only CPU0 state is
	 * considered since CPU1 is managed by CPU hotplug.
	 * VFP and per CPU IRQ context.
	 */
	if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF)
	cpu_pm_exit();

	/*
@@ -123,9 +152,12 @@ static int omap4_enter_idle(struct cpuidle_device *dev,
	if (omap4_mpuss_read_prev_context_state())
		cpu_cluster_pm_exit();

	if (index > 0)
	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);

fail:
	cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
	cpu_done[dev->cpu] = false;

	local_fiq_enable();

	return index;
@@ -143,7 +175,7 @@ struct cpuidle_driver omap4_idle_driver = {
			.exit_latency = 2 + 2,
			.target_residency = 5,
			.flags = CPUIDLE_FLAG_TIME_VALID,
			.enter = omap4_enter_idle,
			.enter = omap4_enter_idle_simple,
			.name = "C1",
			.desc = "MPUSS ON"
		},
@@ -151,8 +183,8 @@ struct cpuidle_driver omap4_idle_driver = {
                        /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
			.exit_latency = 328 + 440,
			.target_residency = 960,
			.flags = CPUIDLE_FLAG_TIME_VALID,
			.enter = omap4_enter_idle,
			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
			.enter = omap4_enter_idle_coupled,
			.name = "C2",
			.desc = "MPUSS CSWR",
		},
@@ -160,8 +192,8 @@ struct cpuidle_driver omap4_idle_driver = {
			/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
			.exit_latency = 460 + 518,
			.target_residency = 1100,
			.flags = CPUIDLE_FLAG_TIME_VALID,
			.enter = omap4_enter_idle,
			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
			.enter = omap4_enter_idle_coupled,
			.name = "C3",
			.desc = "MPUSS OSWR",
		},
@@ -170,6 +202,16 @@ struct cpuidle_driver omap4_idle_driver = {
	.safe_state_index = 0,
};

/*
 * For each cpu, setup the broadcast timer because local timers
 * stops for the states above C1.
 */
static void omap_setup_broadcast_timer(void *arg)
{
	int cpu = smp_processor_id();
	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
}

/**
 * omap4_idle_init - Init routine for OMAP4 idle
 *
@@ -182,20 +224,31 @@ int __init omap4_idle_init(void)
	unsigned int cpu_id = 0;

	mpu_pd = pwrdm_lookup("mpu_pwrdm");
	cpu0_pd = pwrdm_lookup("cpu0_pwrdm");
	cpu1_pd = pwrdm_lookup("cpu1_pwrdm");
	if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd))
	cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
	cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
	if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
		return -ENODEV;

	cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
	cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
	if (!cpu_clkdm[0] || !cpu_clkdm[1])
		return -ENODEV;

	/* Configure the broadcast timer on each cpu */
	on_each_cpu(omap_setup_broadcast_timer, NULL, 1);

	for_each_cpu(cpu_id, cpu_online_mask) {
		dev = &per_cpu(omap4_idle_dev, cpu_id);
		dev->cpu = cpu_id;
		dev->coupled_cpus = *cpu_online_mask;

		cpuidle_register_driver(&omap4_idle_driver);

		if (cpuidle_register_device(dev)) {
		pr_err("%s: CPUidle register device failed\n", __func__);
			pr_err("%s: CPUidle register failed\n", __func__);
			return -EIO;
		}
	}

	return 0;
}
+3 −1
Original line number Diff line number Diff line
@@ -135,6 +135,7 @@ static struct clock_event_device clockevent_gpt = {
	.name		= "gp_timer",
	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
	.shift		= 32,
	.rating		= 300,
	.set_next_event	= omap2_gp_timer_set_next_event,
	.set_mode	= omap2_gp_timer_set_mode,
};
@@ -228,7 +229,8 @@ static void __init omap2_gp_clockevent_init(int gptimer_id,
		clockevent_delta2ns(3, &clockevent_gpt);
		/* Timer internal resynch latency. */

	clockevent_gpt.cpumask = cpumask_of(0);
	clockevent_gpt.cpumask = cpu_possible_mask;
	clockevent_gpt.irq = omap_dm_timer_get_irq(&clkev);
	clockevents_register_device(&clockevent_gpt);

	pr_info("OMAP clockevent source: GPTIMER%d at %lu Hz\n",