Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4b490f2e authored by Mahesh Sivasubramanian's avatar Mahesh Sivasubramanian Committed by Lina Iyer
Browse files

ARM64: smp: Prevent cluster LPM modes when pending IPIs on cluster CPUs



LPM modes can fail if there is a pending IPI interrupt at GIC CPU
interface. On some usecases frequent failure of LPM modes can cause
power and performance degradation. Hence, prevent cluster low power
modes when there is a pending IPI on cluster CPUs.

This patch has been combined with the following patches:
	- 'commit c3d3af5dce1fb52 ("ARM: smp: Remove use of
	  IPI_CALL_FUNC_SINGLE")'
	- 'commit 0a061eca16bf0d8 ("ARM: kernel: smp: Clear Pending IPI
	  flag after handling")'

Change-Id: Id8a0ac24e4867ef824e0a6f11d989f1e1a2b0e93
Signed-off-by: default avatarMahesh Sivasubramanian <msivasub@codeaurora.org>
Signed-off-by: default avatarMurali Nalajala <mnalajal@codeaurora.org>
[satyap: trivial merge conflict resolution]
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent 99a3b027
Loading
Loading
Loading
Loading
+32 −8
Original line number Diff line number Diff line
@@ -498,6 +498,18 @@ static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
	__smp_cross_call(target, ipinr);
}

DEFINE_PER_CPU(bool, pending_ipi);
static void smp_cross_call_common(const struct cpumask *cpumask,
						unsigned int func)
{
	unsigned int cpu;

	for_each_cpu(cpu, cpumask)
		per_cpu(pending_ipi, cpu) = true;

	smp_cross_call(cpumask, func);
}

void show_ipi_list(struct seq_file *p, int prec)
{
	unsigned int cpu, i;
@@ -526,31 +538,32 @@ u64 smp_irq_stat_cpu(unsigned int cpu)

void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_CALL_FUNC);
	smp_cross_call_common(mask, IPI_CALL_FUNC);
}

void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_WAKEUP);
	smp_cross_call_common(mask, IPI_WAKEUP);
}

void arch_send_call_function_single_ipi(int cpu)
{
	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
	smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
}

#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
	if (arch_irq_work_has_interrupt())
		smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
		smp_cross_call_common(cpumask_of(smp_processor_id()),
				IPI_IRQ_WORK);
}
#endif

#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_TIMER);
	smp_cross_call_common(mask, IPI_TIMER);
}
#endif

@@ -666,12 +679,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs)

	if ((unsigned)ipinr < NR_IPI)
		trace_ipi_exit_rcuidle(ipi_types[ipinr]);

	per_cpu(pending_ipi, cpu) = false;
	set_irq_regs(old_regs);
}

void smp_send_reschedule(int cpu)
{
	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
	smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
}

void smp_send_stop(void)
@@ -682,7 +697,7 @@ void smp_send_stop(void)
	cpumask_copy(&mask, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), &mask);
	if (!cpumask_empty(&mask))
		smp_cross_call(&mask, IPI_CPU_STOP);
		smp_cross_call_common(&mask, IPI_CPU_STOP);

	/* Wait up to one second for other CPUs to stop */
	timeout = USEC_PER_SEC;
@@ -755,7 +770,16 @@ core_initcall(register_cpufreq_notifier);

static void raise_nmi(cpumask_t *mask)
{
	smp_cross_call(mask, IPI_CPU_BACKTRACE);
	/*
	 * Generate the backtrace directly if we are running in a calling
	 * context that is not preemptible by the backtrace IPI. Note
	 * that nmi_cpu_backtrace() automatically removes the current cpu
	 * from mask.
	 */
	if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
		nmi_cpu_backtrace(NULL);

	smp_cross_call_common(mask, IPI_CPU_BACKTRACE);
}

void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+19 −8
Original line number Diff line number Diff line
@@ -594,6 +594,18 @@ static void __init acpi_parse_and_init_cpus(void)
#else
#define acpi_parse_and_init_cpus(...)	do { } while (0)
#endif
void (*__smp_cross_call)(const struct cpumask *, unsigned int);
DEFINE_PER_CPU(bool, pending_ipi);

void smp_cross_call_common(const struct cpumask *cpumask, unsigned int func)
{
	unsigned int cpu;

	for_each_cpu(cpu, cpumask)
		per_cpu(pending_ipi, cpu) = true;

	__smp_cross_call(cpumask, func);
}

/*
 * Enumerate the possible CPU set from the device tree and build the
@@ -735,8 +747,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
	}
}

void (*__smp_cross_call)(const struct cpumask *, unsigned int);

void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
	__smp_cross_call = fn;
@@ -786,18 +796,18 @@ u64 smp_irq_stat_cpu(unsigned int cpu)

void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_CALL_FUNC);
	smp_cross_call_common(mask, IPI_CALL_FUNC);
}

void arch_send_call_function_single_ipi(int cpu)
{
	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
	smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
}

#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_WAKEUP);
	smp_cross_call_common(mask, IPI_WAKEUP);
}
#endif

@@ -933,19 +943,20 @@ void handle_IPI(int ipinr, struct pt_regs *regs)

	if ((unsigned)ipinr < NR_IPI)
		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
	per_cpu(pending_ipi, cpu) = false;
	set_irq_regs(old_regs);
}

void smp_send_reschedule(int cpu)
{
	BUG_ON(cpu_is_offline(cpu));
	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
	smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
}

#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_TIMER);
	smp_cross_call_common(mask, IPI_TIMER);
}
#endif

@@ -961,7 +972,7 @@ void smp_send_stop(void)

		if (system_state <= SYSTEM_RUNNING)
			pr_crit("SMP: stopping secondary CPUs\n");
		smp_cross_call(&mask, IPI_CPU_STOP);
		smp_cross_call_common(&mask, IPI_CPU_STOP);
	}

	/* Wait up to one second for other CPUs to stop */