Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6fcc8cea authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull powerpc fixes from Michael Ellerman:
 "Fixes marked for stable:
   - Convert cmp to cmpd in idle enter sequence (Segher Boessenkool)
   - cxl: Fix leaking pid refs in some error paths (Vaibhav Jain)
   - Re-fix race condition between going idle and entering guest (Paul Mackerras)
   - Fix race condition in setting lock bit in idle/wakeup code (Paul Mackerras)
   - radix: Use tlbiel only if we ever ran on the current cpu (Aneesh Kumar K.V)
   - relocation, register save fixes for system reset interrupt (Nicholas Piggin)

  Fixes for code merged this cycle:
   - Fix CONFIG_ALIVEC typo in restore_tm_state() (Valentin Rothberg)
   - KVM: PPC: Book3S HV: Fix build error when SMP=n (Michael Ellerman)"

* tag 'powerpc-4.9-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/64s: relocation, register save fixes for system reset interrupt
  powerpc/mm/radix: Use tlbiel only if we ever ran on the current cpu
  powerpc/process: Fix CONFIG_ALIVEC typo in restore_tm_state()
  powerpc/64: Fix race condition in setting lock bit in idle/wakeup code
  powerpc/64: Re-fix race condition between going idle and entering guest
  cxl: Fix leaking pid refs in some error paths
  powerpc: Convert cmp to cmpd in idle enter sequence
  KVM: PPC: Book3S HV: Fix build error when SMP=n
parents b49c3170 fb479e44
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -26,7 +26,7 @@ extern u64 pnv_first_deep_stop_state;
	std	r0,0(r1);					\
	ptesync;						\
	ld	r0,0(r1);					\
1:	cmp	cr0,r0,r0;					\
1:	cmpd	cr0,r0,r0;					\
	bne	1b;						\
	IDLE_INST;						\
	b	.
+16 −0
Original line number Diff line number Diff line
@@ -93,6 +93,10 @@
	ld	reg,PACAKBASE(r13);	/* get high part of &label */	\
	ori	reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;

#define __LOAD_HANDLER(reg, label)					\
	ld	reg,PACAKBASE(r13);					\
	ori	reg,reg,(ABS_ADDR(label))@l;

/* Exception register prefixes */
#define EXC_HV	H
#define EXC_STD
@@ -208,6 +212,18 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define kvmppc_interrupt kvmppc_interrupt_pr
#endif

#ifdef CONFIG_RELOCATABLE
#define BRANCH_TO_COMMON(reg, label)					\
	__LOAD_HANDLER(reg, label);					\
	mtctr	reg;							\
	bctr

#else
#define BRANCH_TO_COMMON(reg, label)					\
	b	label

#endif

#define __KVM_HANDLER_PROLOG(area, n)					\
	BEGIN_FTR_SECTION_NESTED(947)					\
	ld	r10,area+EX_CFAR(r13);					\
+12 −0
Original line number Diff line number Diff line
@@ -52,11 +52,23 @@ static inline int mm_is_core_local(struct mm_struct *mm)
	return cpumask_subset(mm_cpumask(mm),
			      topology_sibling_cpumask(smp_processor_id()));
}

static inline int mm_is_thread_local(struct mm_struct *mm)
{
	return cpumask_equal(mm_cpumask(mm),
			      cpumask_of(smp_processor_id()));
}

#else
static inline int mm_is_core_local(struct mm_struct *mm)
{
	return 1;
}

static inline int mm_is_thread_local(struct mm_struct *mm)
{
	return 1;
}
#endif

#endif /* __KERNEL__ */
+29 −21
Original line number Diff line number Diff line
@@ -95,19 +95,35 @@ __start_interrupts:
/* No virt vectors corresponding with 0x0..0x100 */
EXC_VIRT_NONE(0x4000, 0x4100)

EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
	SET_SCRATCH0(r13)

#ifdef CONFIG_PPC_P7_NAP
BEGIN_FTR_SECTION
	/* Running native on arch 2.06 or later, check if we are
	 * waking up from nap/sleep/winkle.
	/*
	 * If running native on arch 2.06 or later, check if we are waking up
	 * from nap/sleep/winkle, and branch to idle handler.
	 */
	mfspr	r13,SPRN_SRR1
	rlwinm.	r13,r13,47-31,30,31
	beq	9f
#define IDLETEST(n)							\
	BEGIN_FTR_SECTION ;						\
	mfspr	r10,SPRN_SRR1 ;						\
	rlwinm.	r10,r10,47-31,30,31 ;					\
	beq-	1f ;							\
	cmpwi	cr3,r10,2 ;						\
	BRANCH_TO_COMMON(r10, system_reset_idle_common) ;		\
1:									\
	END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#else
#define IDLETEST NOTEST
#endif

	cmpwi	cr3,r13,2
	GET_PACA(r13)
EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
	SET_SCRATCH0(r13)
	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
				 IDLETEST, 0x100)

EXC_REAL_END(system_reset, 0x100, 0x200)
EXC_VIRT_NONE(0x4100, 0x4200)

#ifdef CONFIG_PPC_P7_NAP
EXC_COMMON_BEGIN(system_reset_idle_common)
	bl	pnv_restore_hyp_resource

	li	r0,PNV_THREAD_RUNNING
@@ -130,14 +146,8 @@ BEGIN_FTR_SECTION
	blt	cr3,2f
	b	pnv_wakeup_loss
2:	b	pnv_wakeup_noloss
#endif

9:
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif /* CONFIG_PPC_P7_NAP */
	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
				 NOTEST, 0x100)
EXC_REAL_END(system_reset, 0x100, 0x200)
EXC_VIRT_NONE(0x4100, 0x4200)
EXC_COMMON(system_reset_common, 0x100, system_reset_exception)

#ifdef CONFIG_PPC_PSERIES
@@ -817,10 +827,8 @@ EXC_VIRT(trap_0b, 0x4b00, 0x4c00, 0xb00)
TRAMP_KVM(PACA_EXGEN, 0xb00)
EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)


#define LOAD_SYSCALL_HANDLER(reg)					\
	ld	reg,PACAKBASE(r13);				\
	ori	reg,reg,(ABS_ADDR(system_call_common))@l;
	__LOAD_HANDLER(reg, system_call_common)

/* Syscall routine is used twice, in reloc-off and reloc-on paths */
#define SYSCALL_PSERIES_1 					\
+29 −6
Original line number Diff line number Diff line
@@ -90,6 +90,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
 * Threads will spin in HMT_LOW until the lock bit is cleared.
 * r14 - pointer to core_idle_state
 * r15 - used to load contents of core_idle_state
 * r9  - used as a temporary variable
 */

core_idle_lock_held:
@@ -99,6 +100,8 @@ core_idle_lock_held:
	bne	3b
	HMT_MEDIUM
	lwarx	r15,0,r14
	andi.	r9,r15,PNV_CORE_IDLE_LOCK_BIT
	bne	core_idle_lock_held
	blr

/*
@@ -163,12 +166,6 @@ _GLOBAL(pnv_powersave_common)
	std	r9,_MSR(r1)
	std	r1,PACAR1(r13)

#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	/* Tell KVM we're entering idle */
	li	r4,KVM_HWTHREAD_IN_IDLE
	stb	r4,HSTATE_HWTHREAD_STATE(r13)
#endif

	/*
	 * Go to real mode to do the nap, as required by the architecture.
	 * Also, we need to be in real mode before setting hwthread_state,
@@ -185,6 +182,26 @@ _GLOBAL(pnv_powersave_common)

	.globl pnv_enter_arch207_idle_mode
pnv_enter_arch207_idle_mode:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	/* Tell KVM we're entering idle */
	li	r4,KVM_HWTHREAD_IN_IDLE
	/******************************************************/
	/*  N O T E   W E L L    ! ! !    N O T E   W E L L   */
	/* The following store to HSTATE_HWTHREAD_STATE(r13)  */
	/* MUST occur in real mode, i.e. with the MMU off,    */
	/* and the MMU must stay off until we clear this flag */
	/* and test HSTATE_HWTHREAD_REQ(r13) in the system    */
	/* reset interrupt vector in exceptions-64s.S.        */
	/* The reason is that another thread can switch the   */
	/* MMU to a guest context whenever this flag is set   */
	/* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on,    */
	/* that would potentially cause this thread to start  */
	/* executing instructions from guest memory in        */
	/* hypervisor mode, leading to a host crash or data   */
	/* corruption, or worse.                              */
	/******************************************************/
	stb	r4,HSTATE_HWTHREAD_STATE(r13)
#endif
	stb	r3,PACA_THREAD_IDLE_STATE(r13)
	cmpwi	cr3,r3,PNV_THREAD_SLEEP
	bge	cr3,2f
@@ -250,6 +267,12 @@ enter_winkle:
 * r3 - requested stop state
 */
power_enter_stop:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	/* Tell KVM we're entering idle */
	li	r4,KVM_HWTHREAD_IN_IDLE
	/* DO THIS IN REAL MODE!  See comment above. */
	stb	r4,HSTATE_HWTHREAD_STATE(r13)
#endif
/*
 * Check if the requested state is a deep idle state.
 */
Loading