Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e5a489ab authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull powerpc fixes from Michael Ellerman:
 "The headliner is a fix for FP/VMX register corruption when using
  transactional memory, and a new selftest to go with it.

  Then there's the virt_addr_valid() fix, currently HARDENDED_USERCOPY
  is tripping on that causing some machines to crash.

  A few other fairly minor fixes for long tail things, and a couple of
  fixes for code we just merged.

  Thanks to: Breno Leitao, Gautham Shenoy, Michael Neuling, Naveen Rao.
  Nicholas Piggin, Paul Mackerras"

* tag 'powerpc-4.12-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/mm: Fix virt_addr_valid() etc. on 64-bit hash
  powerpc/mm: Fix crash in page table dump with huge pages
  powerpc/kprobes: Fix handling of instruction emulation on probe re-entry
  powerpc/powernv: Set NAPSTATELOST after recovering paca on P9 DD1
  selftests/powerpc: Test TM and VMX register state
  powerpc/tm: Fix FP and VMX register corruption
  powerpc/modules: If mprofile-kernel is enabled add it to vermagic
parents 8b4822de e41e53cd
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -14,6 +14,10 @@
#include <asm-generic/module.h>


#ifdef CC_USING_MPROFILE_KERNEL
#define MODULE_ARCH_VERMAGIC	"mprofile-kernel"
#endif

#ifndef __powerpc64__
/*
 * Thanks to Paul M for explaining this.
+12 −0
Original line number Diff line number Diff line
@@ -132,7 +132,19 @@ extern long long virt_phys_offset;
#define virt_to_pfn(kaddr)	(__pa(kaddr) >> PAGE_SHIFT)
#define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
#define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)

#ifdef CONFIG_PPC_BOOK3S_64
/*
 * On hash the vmalloc and other regions alias to the kernel region when passed
 * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
 * return true for some vmalloc addresses, which is incorrect. So explicitly
 * check that the address is in the kernel region.
 */
#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
				pfn_valid(virt_to_pfn(kaddr)))
#else
#define virt_addr_valid(kaddr)	pfn_valid(virt_to_pfn(kaddr))
#endif

/*
 * On Book-E parts we need __va to parse the device tree and we can't
+1 −1
Original line number Diff line number Diff line
@@ -416,7 +416,7 @@ power9_dd1_recover_paca:
	 * which needs to be restored from the stack.
	 */
	li	r3, 1
	stb	r0,PACA_NAPSTATELOST(r13)
	stb	r3,PACA_NAPSTATELOST(r13)
	blr

/*
+2 −1
Original line number Diff line number Diff line
@@ -305,16 +305,17 @@ int kprobe_handler(struct pt_regs *regs)
			save_previous_kprobe(kcb);
			set_current_kprobe(p, regs, kcb);
			kprobes_inc_nmissed_count(p);
			prepare_singlestep(p, regs);
			kcb->kprobe_status = KPROBE_REENTER;
			if (p->ainsn.boostable >= 0) {
				ret = try_to_emulate(p, regs);

				if (ret > 0) {
					restore_previous_kprobe(kcb);
					preempt_enable_no_resched();
					return 1;
				}
			}
			prepare_singlestep(p, regs);
			return 1;
		} else {
			if (*addr != BREAKPOINT_INSTRUCTION) {
+19 −0
Original line number Diff line number Diff line
@@ -864,6 +864,25 @@ static void tm_reclaim_thread(struct thread_struct *thr,
	if (!MSR_TM_SUSPENDED(mfmsr()))
		return;

	/*
	 * If we are in a transaction and FP is off then we can't have
	 * used FP inside that transaction. Hence the checkpointed
	 * state is the same as the live state. We need to copy the
	 * live state to the checkpointed state so that when the
	 * transaction is restored, the checkpointed state is correct
	 * and the aborted transaction sees the correct state. We use
	 * ckpt_regs.msr here as that's what tm_reclaim will use to
	 * determine if it's going to write the checkpointed state or
	 * not. So either this will write the checkpointed registers,
	 * or reclaim will. Similarly for VMX.
	 */
	if ((thr->ckpt_regs.msr & MSR_FP) == 0)
		memcpy(&thr->ckfp_state, &thr->fp_state,
		       sizeof(struct thread_fp_state));
	if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
		memcpy(&thr->ckvr_state, &thr->vr_state,
		       sizeof(struct thread_vr_state));

	giveup_all(container_of(thr, struct task_struct, thread));

	tm_reclaim(thr, thr->ckpt_regs.msr, cause);
Loading