Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 13eae144 authored by Nicolas Pitre's avatar Nicolas Pitre
Browse files

ARM: vexpress/dcscb: add CPU use counts to the power up/down API implementation



It is possible for a CPU to be told to power up before it managed
to power itself down.  Solve this race with a usage count to deal
with this possibility as mandated by the MCPM API definition.

Signed-off-by: default avatarnicolas Pitre <nico@linaro.org>
Reviewed-by: default avatarSantosh Shilimkar <santosh.shilimkar@ti.com>
Acked-by: default avatarPawel Moll <pawel.moll@arm.com>
parent 1e904e1b
Loading
Loading
Loading
Loading
+59 −15
Original line number Diff line number Diff line
@@ -44,6 +44,7 @@
static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED;

static void __iomem *dcscb_base;
static int dcscb_use_count[4][2];

static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
{
@@ -60,6 +61,8 @@ static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
	local_irq_disable();
	arch_spin_lock(&dcscb_lock);

	dcscb_use_count[cpu][cluster]++;
	if (dcscb_use_count[cpu][cluster] == 1) {
		rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
		if (rst_hold & (1 << 8)) {
			/* remove cluster reset and add individual CPU's reset */
@@ -68,6 +71,17 @@ static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
		}
		rst_hold &= ~(cpumask | (cpumask << 4));
		writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
	} else if (dcscb_use_count[cpu][cluster] != 2) {
		/*
		 * The only possible values are:
		 * 0 = CPU down
		 * 1 = CPU (still) up
		 * 2 = CPU requested to be up before it had a chance
		 *     to actually make itself down.
		 * Any other value is a bug.
		 */
		BUG();
	}

	arch_spin_unlock(&dcscb_lock);
	local_irq_enable();
@@ -77,7 +91,8 @@ static int dcscb_power_up(unsigned int cpu, unsigned int cluster)

static void dcscb_power_down(void)
{
	unsigned int mpidr, cpu, cluster, rst_hold, cpumask, last_man;
	unsigned int mpidr, cpu, cluster, rst_hold, cpumask;
	bool last_man = false, skip_wfi = false;

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
@@ -88,13 +103,26 @@ static void dcscb_power_down(void)
	BUG_ON(cpu >= 4 || cluster >= 2);

	arch_spin_lock(&dcscb_lock);
	dcscb_use_count[cpu][cluster]--;
	if (dcscb_use_count[cpu][cluster] == 0) {
		rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
		rst_hold |= cpumask;
	if (((rst_hold | (rst_hold >> 4)) & 0xf) == 0xf)
		if (((rst_hold | (rst_hold >> 4)) & 0xf) == 0xf) {
			rst_hold |= (1 << 8);
			last_man = true;
		}
		writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
	} else if (dcscb_use_count[cpu][cluster] == 1) {
		/*
		 * A power_up request went ahead of us.
		 * Even if we do not want to shut this CPU down,
		 * the caller expects a certain state as if the WFI
		 * was aborted.  So let's continue with cache cleaning.
		 */
		skip_wfi = true;
	} else
		BUG();
	arch_spin_unlock(&dcscb_lock);
	last_man = (rst_hold & (1 << 8));

	/*
	 * Now let's clean our L1 cache and shut ourself down.
@@ -122,6 +150,7 @@ static void dcscb_power_down(void)

	/* Now we are prepared for power-down, do it: */
	dsb();
	if (!skip_wfi)
		wfi();

	/* Not dead at this point?  Let our caller cope. */
@@ -132,6 +161,19 @@ static const struct mcpm_platform_ops dcscb_power_ops = {
	.power_down	= dcscb_power_down,
};

static void __init dcscb_usage_count_init(void)
{
	unsigned int mpidr, cpu, cluster;

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);

	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
	BUG_ON(cpu >= 4 || cluster >= 2);
	dcscb_use_count[cpu][cluster] = 1;
}

static int __init dcscb_init(void)
{
	struct device_node *node;
@@ -144,6 +186,8 @@ static int __init dcscb_init(void)
	if (!dcscb_base)
		return -EADDRNOTAVAIL;

	dcscb_usage_count_init();

	ret = mcpm_platform_register(&dcscb_power_ops);
	if (ret) {
		iounmap(dcscb_base);