Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5b73151f authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman
Browse files

powerpc: NMI IPI make NMI IPIs fully sychronous



There is an asynchronous aspect to smp_send_nmi_ipi. The caller waits
for all CPUs to call in to the handler, but it does not wait for
completion of the handler. This is a needless complication, so remove
it and always wait synchronously.

The synchronous wait allows the caller to easily time out and clear
the wait for completion (zero nmi_ipi_busy_count) in the case of badly
behaved handlers. This would have prevented the recent smp_send_stop
NMI IPI bug from causing the system to hang.

Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 9b81c021
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -56,7 +56,6 @@ struct smp_ops_t {
	int   (*cpu_bootable)(unsigned int nr);
};

extern void smp_flush_nmi_ipi(u64 delay_us);
extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
extern void smp_send_debugger_break(void);
+27 −23
Original line number Diff line number Diff line
@@ -423,6 +423,7 @@ int smp_handle_nmi_ipi(struct pt_regs *regs)
	fn(regs);

	nmi_ipi_lock();
	if (nmi_ipi_busy_count > 1) /* Can race with caller time-out */
		nmi_ipi_busy_count--;
out:
	nmi_ipi_unlock_end(&flags);
@@ -448,29 +449,11 @@ static void do_smp_send_nmi_ipi(int cpu, bool safe)
	}
}

void smp_flush_nmi_ipi(u64 delay_us)
{
	unsigned long flags;

	nmi_ipi_lock_start(&flags);
	while (nmi_ipi_busy_count) {
		nmi_ipi_unlock_end(&flags);
		udelay(1);
		if (delay_us) {
			delay_us--;
			if (!delay_us)
				return;
		}
		nmi_ipi_lock_start(&flags);
	}
	nmi_ipi_unlock_end(&flags);
}

/*
 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
 * - fn is the target callback function.
 * - delay_us > 0 is the delay before giving up waiting for targets to
 *   enter the handler, == 0 specifies indefinite delay.
 *   complete executing the handler, == 0 specifies indefinite delay.
 */
int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
{
@@ -507,8 +490,12 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool

	do_smp_send_nmi_ipi(cpu, safe);

	nmi_ipi_lock();
	/* nmi_ipi_busy_count is held here, so unlock/lock is okay */
	while (!cpumask_empty(&nmi_ipi_pending_mask)) {
		nmi_ipi_unlock();
		udelay(1);
		nmi_ipi_lock();
		if (delay_us) {
			delay_us--;
			if (!delay_us)
@@ -516,12 +503,28 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool
		}
	}

	while (nmi_ipi_busy_count > 1) {
		nmi_ipi_unlock();
		udelay(1);
		nmi_ipi_lock();
		if (delay_us) {
			delay_us--;
			if (!delay_us)
				break;
		}
	}

	if (!cpumask_empty(&nmi_ipi_pending_mask)) {
		/* Could not gather all CPUs */
		/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
		ret = 0;
		cpumask_clear(&nmi_ipi_pending_mask);
	}
	if (nmi_ipi_busy_count > 1) {
		/* Timeout waiting for CPUs to execute fn */
		ret = 0;
		nmi_ipi_busy_count = 1;
	}

	nmi_ipi_busy_count--;
	nmi_ipi_unlock_end(&flags);

@@ -597,6 +600,7 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
	 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
	 */
	nmi_ipi_lock();
	if (nmi_ipi_busy_count > 1)
		nmi_ipi_busy_count--;
	nmi_ipi_unlock();

+0 −1
Original line number Diff line number Diff line
@@ -174,7 +174,6 @@ static void watchdog_smp_panic(int cpu, u64 tb)
				continue;
			smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
		}
		smp_flush_nmi_ipi(1000000);
	}

	/* Take the stuck CPUs out of the watch group */