Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 15114c7e authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull powerpc fixes from Benjamin Herrenschmidt:
 "Here are a few powerpc fixes.  Arguably some of this should have come
  to you earlier but I'm only just catching up after my medical leave.

  Mostly these fixes regressions, a couple are long standing bugs."

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  powerpc/pseries: Fix software invalidate TCE
  powerpc: check_and_cede_processor() never cedes
  powerpc/ftrace: Do not trace restore_interrupts()
  powerpc: Fix Section mismatch warnings in prom_init.c
  ppc64: fix missing to check all bits of _TIF_USER_WORK_MASK in preempt
  powerpc: Fix uninitialised error in numa.c
  powerpc: Fix BPF_JIT code to link with multiple TOCs
parents 15b77435 bc6dc752
Loading
Loading
Loading
Loading
+5 −0
Original line number Original line Diff line number Diff line
@@ -103,6 +103,11 @@ static inline void hard_irq_disable(void)
/* include/linux/interrupt.h needs hard_irq_disable to be a macro */
/* include/linux/interrupt.h needs hard_irq_disable to be a macro */
#define hard_irq_disable	hard_irq_disable
#define hard_irq_disable	hard_irq_disable


static inline bool lazy_irq_pending(void)
{
	return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
}

/*
/*
 * This is called by asynchronous interrupts to conditionally
 * This is called by asynchronous interrupts to conditionally
 * re-enable hard interrupts when soft-disabled after having
 * re-enable hard interrupts when soft-disabled after having
+40 −57
Original line number Original line Diff line number Diff line
@@ -558,27 +558,54 @@ _GLOBAL(ret_from_except_lite)
	mtmsrd	r10,1		  /* Update machine state */
	mtmsrd	r10,1		  /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
#endif /* CONFIG_PPC_BOOK3E */


#ifdef CONFIG_PREEMPT
	clrrdi	r9,r1,THREAD_SHIFT	/* current_thread_info() */
	clrrdi	r9,r1,THREAD_SHIFT	/* current_thread_info() */
	li	r0,_TIF_NEED_RESCHED	/* bits to check */
	ld	r3,_MSR(r1)
	ld	r3,_MSR(r1)
	ld	r4,TI_FLAGS(r9)
	ld	r4,TI_FLAGS(r9)
	/* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
	rlwimi	r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
	and.	r0,r4,r0	/* check NEED_RESCHED and maybe SIGPENDING */
	bne	do_work

#else /* !CONFIG_PREEMPT */
	ld	r3,_MSR(r1)	/* Returning to user mode? */
	andi.	r3,r3,MSR_PR
	andi.	r3,r3,MSR_PR
	beq	restore		/* if not, just restore regs and return */
	beq	resume_kernel


	/* Check current_thread_info()->flags */
	/* Check current_thread_info()->flags */
	andi.	r0,r4,_TIF_USER_WORK_MASK
	beq	restore

	andi.	r0,r4,_TIF_NEED_RESCHED
	beq	1f
	bl	.restore_interrupts
	bl	.schedule
	b	.ret_from_except_lite

1:	bl	.save_nvgprs
	bl	.restore_interrupts
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	.do_notify_resume
	b	.ret_from_except

resume_kernel:
#ifdef CONFIG_PREEMPT
	/* Check if we need to preempt */
	andi.	r0,r4,_TIF_NEED_RESCHED
	beq+	restore
	/* Check that preempt_count() == 0 and interrupts are enabled */
	lwz	r8,TI_PREEMPT(r9)
	cmpwi	cr1,r8,0
	ld	r0,SOFTE(r1)
	cmpdi	r0,0
	crandc	eq,cr1*4+eq,eq
	bne	restore

	/*
	 * Here we are preempting the current task. We want to make
	 * sure we are soft-disabled first
	 */
	SOFT_DISABLE_INTS(r3,r4)
1:	bl	.preempt_schedule_irq

	/* Re-test flags and eventually loop */
	clrrdi	r9,r1,THREAD_SHIFT
	clrrdi	r9,r1,THREAD_SHIFT
	ld	r4,TI_FLAGS(r9)
	ld	r4,TI_FLAGS(r9)
	andi.	r0,r4,_TIF_USER_WORK_MASK
	andi.	r0,r4,_TIF_NEED_RESCHED
	bne	do_work
	bne	1b
#endif /* !CONFIG_PREEMPT */
#endif /* CONFIG_PREEMPT */


	.globl	fast_exc_return_irq
	.globl	fast_exc_return_irq
fast_exc_return_irq:
fast_exc_return_irq:
@@ -759,50 +786,6 @@ restore_check_irq_replay:
#endif /* CONFIG_PPC_BOOK3E */
#endif /* CONFIG_PPC_BOOK3E */
1:	b	.ret_from_except /* What else to do here ? */
1:	b	.ret_from_except /* What else to do here ? */
 
 


3:
do_work:
#ifdef CONFIG_PREEMPT
	andi.	r0,r3,MSR_PR	/* Returning to user mode? */
	bne	user_work
	/* Check that preempt_count() == 0 and interrupts are enabled */
	lwz	r8,TI_PREEMPT(r9)
	cmpwi	cr1,r8,0
	ld	r0,SOFTE(r1)
	cmpdi	r0,0
	crandc	eq,cr1*4+eq,eq
	bne	restore

	/*
	 * Here we are preempting the current task. We want to make
	 * sure we are soft-disabled first
	 */
	SOFT_DISABLE_INTS(r3,r4)
1:	bl	.preempt_schedule_irq

	/* Re-test flags and eventually loop */
	clrrdi	r9,r1,THREAD_SHIFT
	ld	r4,TI_FLAGS(r9)
	andi.	r0,r4,_TIF_NEED_RESCHED
	bne	1b
	b	restore

user_work:
#endif /* CONFIG_PREEMPT */

	andi.	r0,r4,_TIF_NEED_RESCHED
	beq	1f
	bl	.restore_interrupts
	bl	.schedule
	b	.ret_from_except_lite

1:	bl	.save_nvgprs
	bl	.restore_interrupts
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	.do_notify_resume
	b	.ret_from_except

unrecov_restore:
unrecov_restore:
	addi	r3,r1,STACK_FRAME_OVERHEAD
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	.unrecoverable_exception
	bl	.unrecoverable_exception
+1 −1
Original line number Original line Diff line number Diff line
@@ -277,7 +277,7 @@ EXPORT_SYMBOL(arch_local_irq_restore);
 * NOTE: This is called with interrupts hard disabled but not marked
 * NOTE: This is called with interrupts hard disabled but not marked
 * as such in paca->irq_happened, so we need to resync this.
 * as such in paca->irq_happened, so we need to resync this.
 */
 */
void restore_interrupts(void)
void notrace restore_interrupts(void)
{
{
	if (irqs_disabled()) {
	if (irqs_disabled()) {
		local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
		local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+2 −2
Original line number Original line Diff line number Diff line
@@ -1312,7 +1312,7 @@ static struct opal_secondary_data {


extern char opal_secondary_entry;
extern char opal_secondary_entry;


static void prom_query_opal(void)
static void __init prom_query_opal(void)
{
{
	long rc;
	long rc;


@@ -1436,7 +1436,7 @@ static void __init prom_opal_hold_cpus(void)
	prom_debug("prom_opal_hold_cpus: end...\n");
	prom_debug("prom_opal_hold_cpus: end...\n");
}
}


static void prom_opal_takeover(void)
static void __init prom_opal_takeover(void)
{
{
	struct opal_secondary_data *data = &RELOC(opal_secondary_data);
	struct opal_secondary_data *data = &RELOC(opal_secondary_data);
	struct opal_takeover_args *args = &data->args;
	struct opal_takeover_args *args = &data->args;
+1 −1
Original line number Original line Diff line number Diff line
@@ -635,7 +635,7 @@ static inline int __init read_usm_ranges(const u32 **usm)
 */
 */
static void __init parse_drconf_memory(struct device_node *memory)
static void __init parse_drconf_memory(struct device_node *memory)
{
{
	const u32 *dm, *usm;
	const u32 *uninitialized_var(dm), *usm;
	unsigned int n, rc, ranges, is_kexec_kdump = 0;
	unsigned int n, rc, ranges, is_kexec_kdump = 0;
	unsigned long lmb_size, base, size, sz;
	unsigned long lmb_size, base, size, sz;
	int nid;
	int nid;
Loading