Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8f08e867 authored by Mahesh Sivasubramanian's avatar Mahesh Sivasubramanian Committed by Gerrit - the friendly Code Review server
Browse files

ARM64: smp: Prevent cluster LPM modes when pending IPIs on cluster CPUs



LPM modes can fail if there is a pending IPI interrupt at GIC CPU
interface. On some usecases frequent failure of LPM modes can
cause power and performance degradation. Hence, prevent cluster
low power modes when there is a pending IPI on cluster CPUs.

Change-Id: Id8a0ac24e4867ef824e0a6f11d989f1e1a2b0e93
Signed-off-by: default avatarMahesh Sivasubramanian <msivasub@codeaurora.org>
Signed-off-by: default avatarMurali Nalajala <mnalajal@codeaurora.org>
[satyap: trivial merge conflict resolution]
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent 2d6d73f1
Loading
Loading
Loading
Loading
+29 −8
Original line number Diff line number Diff line
@@ -468,6 +468,17 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}

static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
DEFINE_PER_CPU(bool, pending_ipi);
static void smp_cross_call_common(const struct cpumask *cpumask,
						unsigned int func)
{
	unsigned int cpu;

	for_each_cpu(cpu, cpumask)
		per_cpu(pending_ipi, cpu) = true;

	__smp_cross_call(cpumask, func);
}

void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
@@ -520,31 +531,32 @@ u64 smp_irq_stat_cpu(unsigned int cpu)

void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_CALL_FUNC);
	smp_cross_call_common(mask, IPI_CALL_FUNC);
}

void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_WAKEUP);
	smp_cross_call_common(mask, IPI_WAKEUP);
}

void arch_send_call_function_single_ipi(int cpu)
{
	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
	smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}

#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
	if (arch_irq_work_has_interrupt())
		smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
		smp_cross_call_common(cpumask_of(smp_processor_id()),
				IPI_IRQ_WORK);
}
#endif

#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_TIMER);
	smp_cross_call_common(mask, IPI_TIMER);
}
#endif

@@ -665,7 +677,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)

void smp_send_reschedule(int cpu)
{
	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
	smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
}

void smp_send_stop(void)
@@ -676,7 +688,7 @@ void smp_send_stop(void)
	cpumask_copy(&mask, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), &mask);
	if (!cpumask_empty(&mask))
		smp_cross_call(&mask, IPI_CPU_STOP);
		smp_cross_call_common(&mask, IPI_CPU_STOP);

	/* Wait up to one second for other CPUs to stop */
	timeout = USEC_PER_SEC;
@@ -749,7 +761,16 @@ core_initcall(register_cpufreq_notifier);

static void raise_nmi(cpumask_t *mask)
{
	smp_cross_call(mask, IPI_CPU_BACKTRACE);
	/*
	 * Generate the backtrace directly if we are running in a calling
	 * context that is not preemptible by the backtrace IPI. Note
	 * that nmi_cpu_backtrace() automatically removes the current cpu
	 * from mask.
	 */
	if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
		nmi_cpu_backtrace(NULL);

	smp_cross_call_common(mask, IPI_CPU_BACKTRACE);
}

void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+19 −8
Original line number Diff line number Diff line
@@ -610,6 +610,18 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
#else
#define acpi_table_parse_madt(...)	do { } while (0)
#endif
void (*__smp_cross_call)(const struct cpumask *, unsigned int);
DEFINE_PER_CPU(bool, pending_ipi);

void smp_cross_call_common(const struct cpumask *cpumask, unsigned int func)
{
	unsigned int cpu;

	for_each_cpu(cpu, cpumask)
		per_cpu(pending_ipi, cpu) = true;

	__smp_cross_call(cpumask, func);
}

/*
 * Enumerate the possible CPU set from the device tree and build the
@@ -756,8 +768,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
	}
}

void (*__smp_cross_call)(const struct cpumask *, unsigned int);

void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
	__smp_cross_call = fn;
@@ -807,18 +817,18 @@ u64 smp_irq_stat_cpu(unsigned int cpu)

void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_CALL_FUNC);
	smp_cross_call_common(mask, IPI_CALL_FUNC);
}

void arch_send_call_function_single_ipi(int cpu)
{
	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
	smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
}

#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_WAKEUP);
	smp_cross_call_common(mask, IPI_WAKEUP);
}
#endif

@@ -945,19 +955,20 @@ void handle_IPI(int ipinr, struct pt_regs *regs)

	if ((unsigned)ipinr < NR_IPI)
		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
	per_cpu(pending_ipi, cpu) = false;
	set_irq_regs(old_regs);
}

void smp_send_reschedule(int cpu)
{
	BUG_ON(cpu_is_offline(cpu));
	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
	smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
}

#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_TIMER);
	smp_cross_call_common(mask, IPI_TIMER);
}
#endif

@@ -973,7 +984,7 @@ void smp_send_stop(void)

		if (system_state <= SYSTEM_RUNNING)
			pr_crit("SMP: stopping secondary CPUs\n");
		smp_cross_call(&mask, IPI_CPU_STOP);
		smp_cross_call_common(&mask, IPI_CPU_STOP);
	}

	/* Wait up to one second for other CPUs to stop */