Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3ebc7033 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull powerpc fixes friom Michael Ellerman:
 "Apologies for the late pull request, but Ben has been busy finding bugs.

   - Userspace was semi-randomly segfaulting on radix due to us
     incorrectly handling a fault triggered by autonuma, caused by a
     patch we merged earlier in v4.10 to prevent the kernel executing
     userspace.

   - We weren't marking host IPIs properly for KVM in the OPAL ICP
     backend.

   - The ERAT flushing on radix was missing an isync and was incorrectly
     marked as DD1 only.

   - The powernv CPU hotplug code was missing a wakeup type and failing
     to flush the interrupt correctly when using OPAL ICP

  Thanks to Benjamin Herrenschmidt"

* tag 'powerpc-4.10-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/powernv: Properly set "host-ipi" on IPIs
  powerpc/powernv: Fix CPU hotplug to handle waking on HVI
  powerpc/mm/radix: Update ERAT flushes when invalidating TLB
  powerpc/mm: Fix spurrious segfaults on radix with autonuma
parents 3d88460d f83e6862
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -649,9 +649,10 @@
#define   SRR1_ISI_N_OR_G	0x10000000 /* ISI: Access is no-exec or G */
#define   SRR1_ISI_PROT		0x08000000 /* ISI: Other protection fault */
#define   SRR1_WAKEMASK		0x00380000 /* reason for wakeup */
#define   SRR1_WAKEMASK_P8	0x003c0000 /* reason for wakeup on POWER8 */
#define   SRR1_WAKEMASK_P8	0x003c0000 /* reason for wakeup on POWER8 and 9 */
#define   SRR1_WAKESYSERR	0x00300000 /* System error */
#define   SRR1_WAKEEE		0x00200000 /* External interrupt */
#define   SRR1_WAKEHVI		0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
#define   SRR1_WAKEMT		0x00280000 /* mtctrl */
#define	  SRR1_WAKEHMI		0x00280000 /* Hypervisor maintenance */
#define   SRR1_WAKEDEC		0x00180000 /* Decrementer interrupt */
+1 −0
Original line number Diff line number Diff line
@@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; }

#ifdef CONFIG_PPC_POWERNV
extern int icp_opal_init(void);
extern void icp_opal_flush_interrupt(void);
#else
static inline int icp_opal_init(void) { return -ENODEV; }
#endif
+5 −16
Original line number Diff line number Diff line
@@ -253,8 +253,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
	if (unlikely(debugger_fault_handler(regs)))
		goto bail;

	/* On a kernel SLB miss we can only check for a valid exception entry */
	if (!user_mode(regs) && (address >= TASK_SIZE)) {
	/*
	 * The kernel should never take an execute fault nor should it
	 * take a page fault to a kernel address.
	 */
	if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) {
		rc = SIGSEGV;
		goto bail;
	}
@@ -390,20 +393,6 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
#endif /* CONFIG_8xx */

	if (is_exec) {
		/*
		 * An execution fault + no execute ?
		 *
		 * On CPUs that don't have CPU_FTR_COHERENT_ICACHE we
		 * deliberately create NX mappings, and use the fault to do the
		 * cache flush. This is usually handled in hash_page_do_lazy_icache()
		 * but we could end up here if that races with a concurrent PTE
		 * update. In that case we need to fall through here to the VMA
		 * check below.
		 */
		if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
			(regs->msr & SRR1_ISI_N_OR_G))
			goto bad_area;

		/*
		 * Allow execution from readable areas if the MMU does not
		 * provide separate controls over reading and executing.
+1 −5
Original line number Diff line number Diff line
@@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
	for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
		__tlbiel_pid(pid, set, ric);
	}
	if (cpu_has_feature(CPU_FTR_POWER9_DD1))
		asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
	return;
	asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
}

static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
@@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
	asm volatile("ptesync": : :"memory");
	if (cpu_has_feature(CPU_FTR_POWER9_DD1))
		asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
}

static inline void _tlbie_va(unsigned long va, unsigned long pid,
+10 −2
Original line number Diff line number Diff line
@@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void)
		wmask = SRR1_WAKEMASK_P8;

	idle_states = pnv_get_supported_cpuidle_states();

	/* We don't want to take decrementer interrupts while we are offline,
	 * so clear LPCR:PECE1. We keep PECE2 enabled.
	 * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
	 * enabled as to let IPIs in.
	 */
	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);

@@ -206,7 +208,11 @@ static void pnv_smp_cpu_kill_self(void)
		 * contains 0.
		 */
		if (((srr1 & wmask) == SRR1_WAKEEE) ||
		    ((srr1 & wmask) == SRR1_WAKEHVI) ||
		    (local_paca->irq_happened & PACA_IRQ_EE)) {
			if (cpu_has_feature(CPU_FTR_ARCH_300))
				icp_opal_flush_interrupt();
			else
				icp_native_flush_interrupt();
		} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
			unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
@@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void)
		if (srr1 && !generic_check_cpu_restart(cpu))
			DBG("CPU%d Unexpected exit while offline !\n", cpu);
	}

	/* Re-enable decrementer interrupts */
	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
	DBG("CPU%d coming online...\n", cpu);
}
Loading