Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5e94c6e3 authored by Santosh Shilimkar's avatar Santosh Shilimkar Committed by Kevin Hilman
Browse files

ARM: OMAP4: PM: Add L2X0 cache lowpower support



When MPUSS hits off-mode, L2 cache is lost. This patch adds L2X0
necessary maintenance operations and context restoration in the
low power code.

Signed-off-by: default avatarSantosh Shilimkar <santosh.shilimkar@ti.com>
Acked-by: default avatarJean Pihet <j-pihet@ti.com>
Reviewed-by: default avatarKevin Hilman <khilman@ti.com>
Tested-by: default avatarVishwanath BS <vishwanath.bs@ti.com>
Signed-off-by: default avatarKevin Hilman <khilman@ti.com>
parent 0f3cf2ec
Loading
Loading
Loading
Loading
+5 −0
Original line number Original line Diff line number Diff line
@@ -37,8 +37,13 @@


/* Secure Monitor mode APIs */
/* Secure Monitor mode APIs */
#define OMAP4_MON_SCU_PWR_INDEX		0x108
#define OMAP4_MON_SCU_PWR_INDEX		0x108
#define OMAP4_MON_L2X0_DBG_CTRL_INDEX	0x100
#define OMAP4_MON_L2X0_CTRL_INDEX	0x102
#define OMAP4_MON_L2X0_AUXCTRL_INDEX	0x109
#define OMAP4_MON_L2X0_PREFETCH_INDEX	0x113


/* Secure PPA(Primary Protected Application) APIs */
/* Secure PPA(Primary Protected Application) APIs */
#define OMAP4_PPA_L2_POR_INDEX		0x23
#define OMAP4_PPA_CPU_ACTRL_SMP_INDEX	0x25
#define OMAP4_PPA_CPU_ACTRL_SMP_INDEX	0x25


#ifndef __ASSEMBLER__
#ifndef __ASSEMBLER__
+40 −1
Original line number Original line Diff line number Diff line
@@ -49,6 +49,7 @@
#include <asm/system.h>
#include <asm/system.h>
#include <asm/pgalloc.h>
#include <asm/pgalloc.h>
#include <asm/suspend.h>
#include <asm/suspend.h>
#include <asm/hardware/cache-l2x0.h>


#include <plat/omap44xx.h>
#include <plat/omap44xx.h>


@@ -63,10 +64,12 @@ struct omap4_cpu_pm_info {
	struct powerdomain *pwrdm;
	struct powerdomain *pwrdm;
	void __iomem *scu_sar_addr;
	void __iomem *scu_sar_addr;
	void __iomem *wkup_sar_addr;
	void __iomem *wkup_sar_addr;
	void __iomem *l2x0_sar_addr;
};
};


static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
static struct powerdomain *mpuss_pd;
static struct powerdomain *mpuss_pd;
static void __iomem *sar_base;


/*
/*
 * Program the wakeup routine address for the CPU0 and CPU1
 * Program the wakeup routine address for the CPU0 and CPU1
@@ -135,6 +138,36 @@ static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state)
	__raw_writel(scu_pwr_st, pm_info->scu_sar_addr);
	__raw_writel(scu_pwr_st, pm_info->scu_sar_addr);
}
}


/*
 * Store the CPU cluster state for L2X0 low power operations.
 */
static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state)
{
	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);

	__raw_writel(save_state, pm_info->l2x0_sar_addr);
}

/*
 * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to
 * in every restore MPUSS OFF path.
 */
#ifdef CONFIG_CACHE_L2X0
static void save_l2x0_context(void)
{
	u32 val;
	void __iomem *l2x0_base = omap4_get_l2cache_base();

	val = __raw_readl(l2x0_base + L2X0_AUX_CTRL);
	__raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET);
	val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL);
	__raw_writel(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET);
}
#else
static void save_l2x0_context(void)
{}
#endif

/**
/**
 * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
 * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
 * The purpose of this function is to manage low power programming
 * The purpose of this function is to manage low power programming
@@ -182,6 +215,7 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
	set_cpu_next_pwrst(cpu, power_state);
	set_cpu_next_pwrst(cpu, power_state);
	set_cpu_wakeup_addr(cpu, virt_to_phys(omap4_cpu_resume));
	set_cpu_wakeup_addr(cpu, virt_to_phys(omap4_cpu_resume));
	scu_pwrst_prepare(cpu, power_state);
	scu_pwrst_prepare(cpu, power_state);
	l2x0_pwrst_prepare(cpu, save_state);


	/*
	/*
	 * Call low level function  with targeted low power state.
	 * Call low level function  with targeted low power state.
@@ -239,17 +273,19 @@ int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
int __init omap4_mpuss_init(void)
int __init omap4_mpuss_init(void)
{
{
	struct omap4_cpu_pm_info *pm_info;
	struct omap4_cpu_pm_info *pm_info;
	void __iomem *sar_base = omap4_get_sar_ram_base();


	if (omap_rev() == OMAP4430_REV_ES1_0) {
	if (omap_rev() == OMAP4430_REV_ES1_0) {
		WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
		WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
		return -ENODEV;
		return -ENODEV;
	}
	}


	sar_base = omap4_get_sar_ram_base();

	/* Initilaise per CPU PM information */
	/* Initilaise per CPU PM information */
	pm_info = &per_cpu(omap4_pm_info, 0x0);
	pm_info = &per_cpu(omap4_pm_info, 0x0);
	pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
	pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
	pm_info->wkup_sar_addr = sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
	pm_info->wkup_sar_addr = sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
	pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0;
	pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
	pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
	if (!pm_info->pwrdm) {
	if (!pm_info->pwrdm) {
		pr_err("Lookup failed for CPU0 pwrdm\n");
		pr_err("Lookup failed for CPU0 pwrdm\n");
@@ -265,6 +301,7 @@ int __init omap4_mpuss_init(void)
	pm_info = &per_cpu(omap4_pm_info, 0x1);
	pm_info = &per_cpu(omap4_pm_info, 0x1);
	pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
	pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
	pm_info->wkup_sar_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
	pm_info->wkup_sar_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
	pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1;
	pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
	pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
	if (!pm_info->pwrdm) {
	if (!pm_info->pwrdm) {
		pr_err("Lookup failed for CPU1 pwrdm\n");
		pr_err("Lookup failed for CPU1 pwrdm\n");
@@ -290,6 +327,8 @@ int __init omap4_mpuss_init(void)
	else
	else
		__raw_writel(0, sar_base + OMAP_TYPE_OFFSET);
		__raw_writel(0, sar_base + OMAP_TYPE_OFFSET);


	save_l2x0_context();

	return 0;
	return 0;
}
}


+4 −0
Original line number Original line Diff line number Diff line
@@ -23,6 +23,10 @@
#define SCU_OFFSET0				0xd00
#define SCU_OFFSET0				0xd00
#define SCU_OFFSET1				0xd04
#define SCU_OFFSET1				0xd04
#define OMAP_TYPE_OFFSET			0xd10
#define OMAP_TYPE_OFFSET			0xd10
#define L2X0_SAVE_OFFSET0			0xd14
#define L2X0_SAVE_OFFSET1			0xd18
#define L2X0_AUXCTRL_OFFSET			0xd1c
#define L2X0_PREFETCH_CTRL_OFFSET		0xd20


/* CPUx Wakeup Non-Secure Physical Address offsets in SAR_BANK3 */
/* CPUx Wakeup Non-Secure Physical Address offsets in SAR_BANK3 */
#define CPU0_WAKEUP_NS_PA_ADDR_OFFSET		0xa04
#define CPU0_WAKEUP_NS_PA_ADDR_OFFSET		0xa04
+95 −0
Original line number Original line Diff line number Diff line
@@ -32,6 +32,9 @@
ppa_zero_params:
ppa_zero_params:
	.word		0x0
	.word		0x0


ppa_por_params:
	.word		1, 0

/*
/*
 * =============================
 * =============================
 * == CPU suspend finisher ==
 * == CPU suspend finisher ==
@@ -132,6 +135,54 @@ skip_scu_gp_set:
	mcrne	p15, 0, r0, c1, c0, 1
	mcrne	p15, 0, r0, c1, c0, 1
	isb
	isb
	dsb
	dsb
#ifdef CONFIG_CACHE_L2X0
	/*
	 * Clean and invalidate the L2 cache.
	 * Common cache-l2x0.c functions can't be used here since it
	 * uses spinlocks. We are out of coherency here with data cache
	 * disabled. The spinlock implementation uses exclusive load/store
	 * instruction which can fail without data cache being enabled.
	 * OMAP4 hardware doesn't support exclusive monitor which can
	 * overcome exclusive access issue. Because of this, CPU can
	 * lead to deadlock.
	 */
	bl	omap4_get_sar_ram_base
	mov	r8, r0
	mrc	p15, 0, r5, c0, c0, 5		@ Read MPIDR
	ands	r5, r5, #0x0f
	ldreq	r0, [r8, #L2X0_SAVE_OFFSET0]	@ Retrieve L2 state from SAR
	ldrne	r0, [r8, #L2X0_SAVE_OFFSET1]	@ memory.
	cmp	r0, #3
	bne	do_WFI
#ifdef CONFIG_PL310_ERRATA_727915
	mov	r0, #0x03
	mov	r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
	DO_SMC
#endif
	bl	omap4_get_l2cache_base
	mov	r2, r0
	ldr	r0, =0xffff
	str	r0, [r2, #L2X0_CLEAN_INV_WAY]
wait:
	ldr	r0, [r2, #L2X0_CLEAN_INV_WAY]
	ldr	r1, =0xffff
	ands	r0, r0, r1
	bne	wait
#ifdef CONFIG_PL310_ERRATA_727915
	mov	r0, #0x00
	mov	r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
	DO_SMC
#endif
l2x_sync:
	bl	omap4_get_l2cache_base
	mov	r2, r0
	mov	r0, #0x0
	str	r0, [r2, #L2X0_CACHE_SYNC]
sync:
	ldr	r0, [r2, #L2X0_CACHE_SYNC]
	ands	r0, r0, #0x1
	bne	sync
#endif


do_WFI:
do_WFI:
	bl	omap_do_wfi
	bl	omap_do_wfi
@@ -225,6 +276,50 @@ enable_smp_bit:
	mcreq	p15, 0, r0, c1, c0, 1
	mcreq	p15, 0, r0, c1, c0, 1
	isb
	isb
skip_ns_smp_enable:
skip_ns_smp_enable:
#ifdef CONFIG_CACHE_L2X0
	/*
	 * Restore the L2 AUXCTRL and enable the L2 cache.
	 * OMAP4_MON_L2X0_AUXCTRL_INDEX =  Program the L2X0 AUXCTRL
	 * OMAP4_MON_L2X0_CTRL_INDEX =  Enable the L2 using L2X0 CTRL
	 * register r0 contains value to be programmed.
	 * L2 cache is already invalidate by ROM code as part
	 * of MPUSS OFF wakeup path.
	 */
	ldr	r2, =OMAP44XX_L2CACHE_BASE
	ldr	r0, [r2, #L2X0_CTRL]
	and	r0, #0x0f
	cmp	r0, #1
	beq	skip_l2en			@ Skip if already enabled
	ldr	r3, =OMAP44XX_SAR_RAM_BASE
	ldr	r1, [r3, #OMAP_TYPE_OFFSET]
	cmp	r1, #0x1			@ Check for HS device
	bne     set_gp_por
	ldr     r0, =OMAP4_PPA_L2_POR_INDEX
	ldr     r1, =OMAP44XX_SAR_RAM_BASE
	ldr     r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
	adr     r3, ppa_por_params
	str     r4, [r3, #0x04]
	mov	r1, #0x0			@ Process ID
	mov	r2, #0x4			@ Flag
	mov	r6, #0xff
	mov	r12, #0x00			@ Secure Service ID
	DO_SMC
	b	set_aux_ctrl
set_gp_por:
	ldr     r1, =OMAP44XX_SAR_RAM_BASE
	ldr     r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
	ldr	r12, =OMAP4_MON_L2X0_PREFETCH_INDEX	@ Setup L2 PREFETCH
	DO_SMC
set_aux_ctrl:
	ldr     r1, =OMAP44XX_SAR_RAM_BASE
	ldr	r0, [r1, #L2X0_AUXCTRL_OFFSET]
	ldr	r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX	@ Setup L2 AUXCTRL
	DO_SMC
	mov	r0, #0x1
	ldr	r12, =OMAP4_MON_L2X0_CTRL_INDEX		@ Enable L2 cache
	DO_SMC
skip_l2en:
#endif


	b	cpu_resume			@ Jump to generic resume
	b	cpu_resume			@ Jump to generic resume
ENDPROC(omap4_cpu_resume)
ENDPROC(omap4_cpu_resume)