Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4c89aad9 authored by Tony Lindgren's avatar Tony Lindgren
Browse files

Merge branch 'for_3.3/pm/omap4-mpuss' of...

Merge branch 'for_3.3/pm/omap4-mpuss' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman/linux-omap-pm into omap4
parents deee6d53 ff819da4
Loading
Loading
Loading
Loading
+21 −0
Original line number Original line Diff line number Diff line
@@ -353,6 +353,27 @@ config OMAP3_SDRC_AC_TIMING
	  wish to say no.  Selecting yes without understanding what is
	  wish to say no.  Selecting yes without understanding what is
	  going on could result in system crashes;
	  going on could result in system crashes;


config OMAP4_ERRATA_I688
	bool "OMAP4 errata: Async Bridge Corruption"
	depends on ARCH_OMAP4
	select ARCH_HAS_BARRIERS
	help
	  If a data is stalled inside asynchronous bridge because of back
	  pressure, it may be accepted multiple times, creating pointer
	  misalignment that will corrupt next transfers on that data path
	  until next reset of the system (No recovery procedure once the
	  issue is hit, the path remains consistently broken). Async bridge
	  can be found on path between MPU to EMIF and MPU to L3 interconnect.
	  This situation can happen only when the idle is initiated by a
	  Master Request Disconnection (which is trigged by software when
	  executing WFI on CPU).
	  The work-around for this errata needs all the initiators connected
	  through async bridge must ensure that data path is properly drained
	  before issuing WFI. This condition will be met if one Strongly ordered
	  access is performed to the target right before executing the WFI.
	  In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
	  IO barrier ensure that there is no synchronisation loss on initiators
	  operating on both interconnect port simultaneously.
endmenu
endmenu


endif
endif
+10 −6
Original line number Original line Diff line number Diff line
@@ -11,10 +11,11 @@ hwmod-common = omap_hwmod.o \
					  omap_hwmod_common_data.o
					  omap_hwmod_common_data.o
clock-common				= clock.o clock_common_data.o \
clock-common				= clock.o clock_common_data.o \
					  clkt_dpll.o clkt_clksel.o
					  clkt_dpll.o clkt_clksel.o
secure-common                          = omap-smc.o omap-secure.o


obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common)
obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common)
obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) $(secure-common)


obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o


@@ -24,11 +25,13 @@ obj-$(CONFIG_TWL4030_CORE) += omap_twl.o
obj-$(CONFIG_SMP)			+= omap-smp.o omap-headsmp.o
obj-$(CONFIG_SMP)			+= omap-smp.o omap-headsmp.o
obj-$(CONFIG_LOCAL_TIMERS)		+= timer-mpu.o
obj-$(CONFIG_LOCAL_TIMERS)		+= timer-mpu.o
obj-$(CONFIG_HOTPLUG_CPU)		+= omap-hotplug.o
obj-$(CONFIG_HOTPLUG_CPU)		+= omap-hotplug.o
obj-$(CONFIG_ARCH_OMAP4)		+= omap44xx-smc.o omap4-common.o
obj-$(CONFIG_ARCH_OMAP4)		+= omap4-common.o omap-wakeupgen.o \
					   sleep44xx.o


plus_sec := $(call as-instr,.arch_extension sec,+sec)
plus_sec := $(call as-instr,.arch_extension sec,+sec)
AFLAGS_omap-headsmp.o			:=-Wa,-march=armv7-a$(plus_sec)
AFLAGS_omap-headsmp.o			:=-Wa,-march=armv7-a$(plus_sec)
AFLAGS_omap44xx-smc.o			:=-Wa,-march=armv7-a$(plus_sec)
AFLAGS_omap-smc.o			:=-Wa,-march=armv7-a$(plus_sec)
AFLAGS_sleep44xx.o			:=-Wa,-march=armv7-a$(plus_sec)


# Functions loaded to SRAM
# Functions loaded to SRAM
obj-$(CONFIG_SOC_OMAP2420)		+= sram242x.o
obj-$(CONFIG_SOC_OMAP2420)		+= sram242x.o
@@ -62,7 +65,8 @@ obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o
obj-$(CONFIG_ARCH_OMAP2)		+= sleep24xx.o
obj-$(CONFIG_ARCH_OMAP2)		+= sleep24xx.o
obj-$(CONFIG_ARCH_OMAP3)		+= pm34xx.o sleep34xx.o \
obj-$(CONFIG_ARCH_OMAP3)		+= pm34xx.o sleep34xx.o \
					   cpuidle34xx.o
					   cpuidle34xx.o
obj-$(CONFIG_ARCH_OMAP4)		+= pm44xx.o
obj-$(CONFIG_ARCH_OMAP4)		+= pm44xx.o omap-mpuss-lowpower.o \
					   cpuidle44xx.o
obj-$(CONFIG_PM_DEBUG)			+= pm-debug.o
obj-$(CONFIG_PM_DEBUG)			+= pm-debug.o
obj-$(CONFIG_OMAP_SMARTREFLEX)          += sr_device.o smartreflex.o
obj-$(CONFIG_OMAP_SMARTREFLEX)          += sr_device.o smartreflex.o
obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3)	+= smartreflex-class3.o
obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3)	+= smartreflex-class3.o
+53 −11
Original line number Original line Diff line number Diff line
@@ -24,9 +24,11 @@


#ifndef __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
#ifndef __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
#define __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
#define __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
#ifndef __ASSEMBLER__


#include <linux/delay.h>
#include <linux/delay.h>
#include <plat/common.h>
#include <plat/common.h>
#include <asm/proc-fns.h>


#ifdef CONFIG_SOC_OMAP2420
#ifdef CONFIG_SOC_OMAP2420
extern void omap242x_map_common_io(void);
extern void omap242x_map_common_io(void);
@@ -156,23 +158,23 @@ void omap3_intc_resume_idle(void);
void omap2_intc_handle_irq(struct pt_regs *regs);
void omap2_intc_handle_irq(struct pt_regs *regs);
void omap3_intc_handle_irq(struct pt_regs *regs);
void omap3_intc_handle_irq(struct pt_regs *regs);


/*
#ifdef CONFIG_CACHE_L2X0
 * wfi used in low power code. Directly opcode is used instead
extern void __iomem *omap4_get_l2cache_base(void);
 * of instruction to avoid mulit-omap build break
 */
#ifdef CONFIG_THUMB2_KERNEL
#define do_wfi() __asm__ __volatile__ ("wfi" : : : "memory")
#else
#define do_wfi()			\
		__asm__ __volatile__ (".word	0xe320f003" : : : "memory")
#endif
#endif


#ifdef CONFIG_CACHE_L2X0
#ifdef CONFIG_SMP
extern void __iomem *l2cache_base;
extern void __iomem *omap4_get_scu_base(void);
#else
static inline void __iomem *omap4_get_scu_base(void)
{
	return NULL;
}
#endif
#endif


extern void __init gic_init_irq(void);
extern void __init gic_init_irq(void);
extern void omap_smc1(u32 fn, u32 arg);
extern void omap_smc1(u32 fn, u32 arg);
extern void __iomem *omap4_get_sar_ram_base(void);
extern void omap_do_wfi(void);


#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
/* Needed for secondary core boot */
/* Needed for secondary core boot */
@@ -182,4 +184,44 @@ extern void omap_auxcoreboot_addr(u32 cpu_addr);
extern u32 omap_read_auxcoreboot0(void);
extern u32 omap_read_auxcoreboot0(void);
#endif
#endif


#if defined(CONFIG_SMP) && defined(CONFIG_PM)
extern int omap4_mpuss_init(void);
extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
extern int omap4_finish_suspend(unsigned long cpu_state);
extern void omap4_cpu_resume(void);
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
extern u32 omap4_mpuss_read_prev_context_state(void);
#else
static inline int omap4_enter_lowpower(unsigned int cpu,
					unsigned int power_state)
{
	cpu_do_idle();
	return 0;
}

static inline int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
{
	cpu_do_idle();
	return 0;
}

static inline int omap4_mpuss_init(void)
{
	return 0;
}

static inline int omap4_finish_suspend(unsigned long cpu_state)
{
	return 0;
}

static inline void omap4_cpu_resume(void)
{}

static inline u32 omap4_mpuss_read_prev_context_state(void)
{
	return 0;
}
#endif
#endif /* __ASSEMBLER__ */
#endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
#endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
+15 −0
Original line number Original line Diff line number Diff line
@@ -25,6 +25,7 @@
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/cpuidle.h>
#include <linux/cpuidle.h>
#include <linux/export.h>
#include <linux/export.h>
#include <linux/cpu_pm.h>


#include <plat/prcm.h>
#include <plat/prcm.h>
#include <plat/irqs.h>
#include <plat/irqs.h>
@@ -124,9 +125,23 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
	}
	}


	/*
	 * Call idle CPU PM enter notifier chain so that
	 * VFP context is saved.
	 */
	if (mpu_state == PWRDM_POWER_OFF)
		cpu_pm_enter();

	/* Execute ARM wfi */
	/* Execute ARM wfi */
	omap_sram_idle();
	omap_sram_idle();


	/*
	 * Call idle CPU PM enter notifier chain to restore
	 * VFP context.
	 */
	if (pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
		cpu_pm_exit();

	/* Re-allow idle for C1 */
	/* Re-allow idle for C1 */
	if (index == 0) {
	if (index == 0) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
+245 −0
Original line number Original line Diff line number Diff line
/*
 * OMAP4 CPU idle Routines
 *
 * Copyright (C) 2011 Texas Instruments, Inc.
 * Santosh Shilimkar <santosh.shilimkar@ti.com>
 * Rajendra Nayak <rnayak@ti.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/sched.h>
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/clockchips.h>

#include <asm/proc-fns.h>

#include "common.h"
#include "pm.h"
#include "prm.h"

#ifdef CONFIG_CPU_IDLE

/* Machine specific information to be recorded in the C-state driver_data */
struct omap4_idle_statedata {
	u32 cpu_state;
	u32 mpu_logic_state;
	u32 mpu_state;
	u8 valid;
};

static struct cpuidle_params cpuidle_params_table[] = {
	/* C1 - CPU0 ON + CPU1 ON + MPU ON */
	{.exit_latency = 2 + 2 , .target_residency = 5, .valid = 1},
	/* C2- CPU0 OFF + CPU1 OFF + MPU CSWR */
	{.exit_latency = 328 + 440 , .target_residency = 960, .valid = 1},
	/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
	{.exit_latency = 460 + 518 , .target_residency = 1100, .valid = 1},
};

#define OMAP4_NUM_STATES ARRAY_SIZE(cpuidle_params_table)

struct omap4_idle_statedata omap4_idle_data[OMAP4_NUM_STATES];
static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd;

/**
 * omap4_enter_idle - Programs OMAP4 to enter the specified state
 * @dev: cpuidle device
 * @drv: cpuidle driver
 * @index: the index of state to be entered
 *
 * Called from the CPUidle framework to program the device to the
 * specified low power state selected by the governor.
 * Returns the amount of time spent in the low power state.
 */
static int omap4_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	struct omap4_idle_statedata *cx =
			cpuidle_get_statedata(&dev->states_usage[index]);
	struct timespec ts_preidle, ts_postidle, ts_idle;
	u32 cpu1_state;
	int idle_time;
	int new_state_idx;
	int cpu_id = smp_processor_id();

	/* Used to keep track of the total time in idle */
	getnstimeofday(&ts_preidle);

	local_irq_disable();
	local_fiq_disable();

	/*
	 * CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state.
	 * This is necessary to honour hardware recommondation
	 * of triggeing all the possible low power modes once CPU1 is
	 * out of coherency and in OFF mode.
	 * Update dev->last_state so that governor stats reflects right
	 * data.
	 */
	cpu1_state = pwrdm_read_pwrst(cpu1_pd);
	if (cpu1_state != PWRDM_POWER_OFF) {
		new_state_idx = drv->safe_state_index;
		cx = cpuidle_get_statedata(&dev->states_usage[new_state_idx]);
	}

	if (index > 0)
		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);

	/*
	 * Call idle CPU PM enter notifier chain so that
	 * VFP and per CPU interrupt context is saved.
	 */
	if (cx->cpu_state == PWRDM_POWER_OFF)
		cpu_pm_enter();

	pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
	omap_set_pwrdm_state(mpu_pd, cx->mpu_state);

	/*
	 * Call idle CPU cluster PM enter notifier chain
	 * to save GIC and wakeupgen context.
	 */
	if ((cx->mpu_state == PWRDM_POWER_RET) &&
		(cx->mpu_logic_state == PWRDM_POWER_OFF))
			cpu_cluster_pm_enter();

	omap4_enter_lowpower(dev->cpu, cx->cpu_state);

	/*
	 * Call idle CPU PM exit notifier chain to restore
	 * VFP and per CPU IRQ context. Only CPU0 state is
	 * considered since CPU1 is managed by CPU hotplug.
	 */
	if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF)
		cpu_pm_exit();

	/*
	 * Call idle CPU cluster PM exit notifier chain
	 * to restore GIC and wakeupgen context.
	 */
	if (omap4_mpuss_read_prev_context_state())
		cpu_cluster_pm_exit();

	if (index > 0)
		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);

	getnstimeofday(&ts_postidle);
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	local_irq_enable();
	local_fiq_enable();

	idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
								USEC_PER_SEC;

	/* Update cpuidle counters */
	dev->last_residency = idle_time;

	return index;
}

DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);

struct cpuidle_driver omap4_idle_driver = {
	.name =		"omap4_idle",
	.owner =	THIS_MODULE,
};

static inline void _fill_cstate(struct cpuidle_driver *drv,
					int idx, const char *descr)
{
	struct cpuidle_state *state = &drv->states[idx];

	state->exit_latency	= cpuidle_params_table[idx].exit_latency;
	state->target_residency	= cpuidle_params_table[idx].target_residency;
	state->flags		= CPUIDLE_FLAG_TIME_VALID;
	state->enter		= omap4_enter_idle;
	sprintf(state->name, "C%d", idx + 1);
	strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
}

static inline struct omap4_idle_statedata *_fill_cstate_usage(
					struct cpuidle_device *dev,
					int idx)
{
	struct omap4_idle_statedata *cx = &omap4_idle_data[idx];
	struct cpuidle_state_usage *state_usage = &dev->states_usage[idx];

	cx->valid		= cpuidle_params_table[idx].valid;
	cpuidle_set_statedata(state_usage, cx);

	return cx;
}



/**
 * omap4_idle_init - Init routine for OMAP4 idle
 *
 * Registers the OMAP4 specific cpuidle driver to the cpuidle
 * framework with the valid set of states.
 */
int __init omap4_idle_init(void)
{
	struct omap4_idle_statedata *cx;
	struct cpuidle_device *dev;
	struct cpuidle_driver *drv = &omap4_idle_driver;
	unsigned int cpu_id = 0;

	mpu_pd = pwrdm_lookup("mpu_pwrdm");
	cpu0_pd = pwrdm_lookup("cpu0_pwrdm");
	cpu1_pd = pwrdm_lookup("cpu1_pwrdm");
	if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd))
		return -ENODEV;


	drv->safe_state_index = -1;
	dev = &per_cpu(omap4_idle_dev, cpu_id);
	dev->cpu = cpu_id;

	/* C1 - CPU0 ON + CPU1 ON + MPU ON */
	_fill_cstate(drv, 0, "MPUSS ON");
	drv->safe_state_index = 0;
	cx = _fill_cstate_usage(dev, 0);
	cx->valid = 1;	/* C1 is always valid */
	cx->cpu_state = PWRDM_POWER_ON;
	cx->mpu_state = PWRDM_POWER_ON;
	cx->mpu_logic_state = PWRDM_POWER_RET;

	/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
	_fill_cstate(drv, 1, "MPUSS CSWR");
	cx = _fill_cstate_usage(dev, 1);
	cx->cpu_state = PWRDM_POWER_OFF;
	cx->mpu_state = PWRDM_POWER_RET;
	cx->mpu_logic_state = PWRDM_POWER_RET;

	/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
	_fill_cstate(drv, 2, "MPUSS OSWR");
	cx = _fill_cstate_usage(dev, 2);
	cx->cpu_state = PWRDM_POWER_OFF;
	cx->mpu_state = PWRDM_POWER_RET;
	cx->mpu_logic_state = PWRDM_POWER_OFF;

	drv->state_count = OMAP4_NUM_STATES;
	cpuidle_register_driver(&omap4_idle_driver);

	dev->state_count = OMAP4_NUM_STATES;
	if (cpuidle_register_device(dev)) {
		pr_err("%s: CPUidle register device failed\n", __func__);
			return -EIO;
		}

	return 0;
}
#else
int __init omap4_idle_init(void)
{
	return 0;
}
#endif /* CONFIG_CPU_IDLE */
Loading