Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ac78884e authored by Russell King's avatar Russell King
Browse files

ARM: lockdep: fix unannotated irqs-on



CPU: Testing write buffer coherency: ok
------------[ cut here ]------------
WARNING: at kernel/lockdep.c:3145 check_flags+0xcc/0x1dc()
Modules linked in:
[<c0035120>] (unwind_backtrace+0x0/0xf8) from [<c0355374>] (dump_stack+0x20/0x24)
[<c0355374>] (dump_stack+0x20/0x24) from [<c0060c04>] (warn_slowpath_common+0x58/0x70)
[<c0060c04>] (warn_slowpath_common+0x58/0x70) from [<c0060c3c>] (warn_slowpath_null+0x20/0x24)
[<c0060c3c>] (warn_slowpath_null+0x20/0x24) from [<c008f224>] (check_flags+0xcc/0x1dc)
[<c008f224>] (check_flags+0xcc/0x1dc) from [<c00945dc>] (lock_acquire+0x50/0x140)
[<c00945dc>] (lock_acquire+0x50/0x140) from [<c0358434>] (_raw_spin_lock+0x50/0x88)
[<c0358434>] (_raw_spin_lock+0x50/0x88) from [<c00fd114>] (set_task_comm+0x2c/0x60)
[<c00fd114>] (set_task_comm+0x2c/0x60) from [<c007e184>] (kthreadd+0x30/0x108)
[<c007e184>] (kthreadd+0x30/0x108) from [<c0030104>] (kernel_thread_exit+0x0/0x8)
---[ end trace 1b75b31a2719ed1c ]---
possible reason: unannotated irqs-on.
irq event stamp: 3
hardirqs last  enabled at (2): [<c0059bb0>] finish_task_switch+0x48/0xb0
hardirqs last disabled at (3): [<c002f0b0>] ret_slow_syscall+0xc/0x1c
softirqs last  enabled at (0): [<c005f3e0>] copy_process+0x394/0xe5c
softirqs last disabled at (0): [<(null)>] (null)

Fix this by ensuring that the lockdep interrupt state is manipulated in
the appropriate places.  We essentially treat userspace as an entirely
separate environment which isn't relevant to lockdep (lockdep doesn't
monitor userspace.)  We don't tell lockdep that IRQs will be enabled
in that environment.

Instead, when creating kernel threads (which is a rare event compared
to entering/leaving userspace) we have to update the lockdep state.  Do
this by starting threads with IRQs disabled, and in the kthread helper,
tell lockdep that IRQs are enabled, and enable them.

This provides lockdep with a consistent view of the current IRQ state
in kernel space.

This also revert portions of 0d928b0b
which didn't fix the problem.

Tested-by: default avatarMing Lei <tom.leiming@gmail.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent d9e38040
Loading
Loading
Loading
Loading
+6 −10
Original line number Diff line number Diff line
@@ -162,8 +162,6 @@ ENDPROC(__und_invalid)
	@  r4 - orig_r0 (see pt_regs definition in ptrace.h)
	@
	stmia	r5, {r0 - r4}

	asm_trace_hardirqs_off
	.endm

	.align	5
@@ -204,7 +202,7 @@ __dabt_svc:
	@
	@ IRQs off again before pulling preserved data off the stack
	@
	disable_irq
	disable_irq_notrace

	@
	@ restore SPSR and restart the instruction
@@ -218,6 +216,9 @@ ENDPROC(__dabt_svc)
__irq_svc:
	svc_entry

#ifdef CONFIG_TRACE_IRQFLAGS
	bl	trace_hardirqs_off
#endif
#ifdef CONFIG_PREEMPT
	get_thread_info tsk
	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
@@ -291,7 +292,7 @@ __und_svc:
	@
	@ IRQs off again before pulling preserved data off the stack
	@
1:	disable_irq
1:	disable_irq_notrace

	@
	@ restore SPSR and restart the instruction
@@ -327,7 +328,7 @@ __pabt_svc:
	@
	@ IRQs off again before pulling preserved data off the stack
	@
	disable_irq
	disable_irq_notrace

	@
	@ restore SPSR and restart the instruction
@@ -393,8 +394,6 @@ ENDPROC(__pabt_svc)
	@ Clear FP to mark the first stack frame
	@
	zero_fp

	asm_trace_hardirqs_off
	.endm

	.macro	kuser_cmpxchg_check
@@ -465,9 +464,6 @@ __irq_usr:
 THUMB(	movne	r0, #0		)
 THUMB(	strne	r0, [r0]	)
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	bl	trace_hardirqs_on
#endif

	mov	why, #0
	b	ret_to_user
+14 −9
Original line number Diff line number Diff line
@@ -351,17 +351,21 @@ EXPORT_SYMBOL(dump_fpu);

/*
 * Shuffle the argument into the correct register before calling the
 * thread function.  r1 is the thread argument, r2 is the pointer to
 * the thread function, and r3 points to the exit function.
 * thread function.  r4 is the thread argument, r5 is the pointer to
 * the thread function, and r6 points to the exit function.
 */
extern void kernel_thread_helper(void);
asm(	".pushsection .text\n"
"	.align\n"
"	.type	kernel_thread_helper, #function\n"
"kernel_thread_helper:\n"
"	mov	r0, r1\n"
"	mov	lr, r3\n"
"	mov	pc, r2\n"
#ifdef CONFIG_TRACE_IRQFLAGS
"	bl	trace_hardirqs_on\n"
#endif
"	msr	cpsr_c, r7\n"
"	mov	r0, r4\n"
"	mov	lr, r6\n"
"	mov	pc, r5\n"
"	.size	kernel_thread_helper, . - kernel_thread_helper\n"
"	.popsection");

@@ -391,11 +395,12 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)

	memset(&regs, 0, sizeof(regs));

	regs.ARM_r1 = (unsigned long)arg;
	regs.ARM_r2 = (unsigned long)fn;
	regs.ARM_r3 = (unsigned long)kernel_thread_exit;
	regs.ARM_r4 = (unsigned long)arg;
	regs.ARM_r5 = (unsigned long)fn;
	regs.ARM_r6 = (unsigned long)kernel_thread_exit;
	regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
	regs.ARM_pc = (unsigned long)kernel_thread_helper;
	regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
	regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT;

	return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}