Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e17fdf5c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86/asm changes from Ingo Molnar

* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86: Include probe_roms.h in probe_roms.c
  x86/32: Print control and debug registers for kerenel context
  x86: Tighten dependencies of CPU_SUP_*_32
  x86/numa: Improve internode cache alignment
  x86: Fix the NMI nesting comments
  x86-64: Improve insn scheduling in SAVE_ARGS_IRQ
  x86-64: Fix CFI annotations for NMI nesting code
  bitops: Add missing parentheses to new get_order macro
  bitops: Optimise get_order()
  bitops: Adjust the comment on get_order() to describe the size==0 case
  x86/spinlocks: Eliminate TICKET_MASK
  x86-64: Handle byte-wise tail copying in memcpy() without a loop
  x86-64: Fix memcpy() to support sizes of 4Gb and above
  x86-64: Fix memset() to support sizes of 4Gb and above
  x86-64: Slightly shorten copy_page()
parents 95211279 a240ada2
Loading
Loading
Loading
Loading
+2 −3
Original line number Original line Diff line number Diff line
@@ -303,7 +303,6 @@ config X86_GENERIC
config X86_INTERNODE_CACHE_SHIFT
config X86_INTERNODE_CACHE_SHIFT
	int
	int
	default "12" if X86_VSMP
	default "12" if X86_VSMP
	default "7" if NUMA
	default X86_L1_CACHE_SHIFT
	default X86_L1_CACHE_SHIFT


config X86_CMPXCHG
config X86_CMPXCHG
@@ -441,7 +440,7 @@ config CPU_SUP_INTEL
config CPU_SUP_CYRIX_32
config CPU_SUP_CYRIX_32
	default y
	default y
	bool "Support Cyrix processors" if PROCESSOR_SELECT
	bool "Support Cyrix processors" if PROCESSOR_SELECT
	depends on !64BIT
	depends on M386 || M486 || M586 || M586TSC || M586MMX || (EXPERT && !64BIT)
	---help---
	---help---
	  This enables detection, tunings and quirks for Cyrix processors
	  This enables detection, tunings and quirks for Cyrix processors


@@ -495,7 +494,7 @@ config CPU_SUP_TRANSMETA_32
config CPU_SUP_UMC_32
config CPU_SUP_UMC_32
	default y
	default y
	bool "Support UMC processors" if PROCESSOR_SELECT
	bool "Support UMC processors" if PROCESSOR_SELECT
	depends on !64BIT
	depends on M386 || M486 || (EXPERT && !64BIT)
	---help---
	---help---
	  This enables detection, tunings and quirks for UMC processors
	  This enables detection, tunings and quirks for UMC processors


+2 −2
Original line number Original line Diff line number Diff line
@@ -88,14 +88,14 @@ static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
{
	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);


	return !!(tmp.tail ^ tmp.head);
	return tmp.tail != tmp.head;
}
}


static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
{
	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);


	return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
	return (__ticket_t)(tmp.tail - tmp.head) > 1;
}
}


#ifndef CONFIG_PARAVIRT_SPINLOCKS
#ifndef CONFIG_PARAVIRT_SPINLOCKS
+0 −1
Original line number Original line Diff line number Diff line
@@ -16,7 +16,6 @@ typedef u32 __ticketpair_t;
#endif
#endif


#define TICKET_SHIFT	(sizeof(__ticket_t) * 8)
#define TICKET_SHIFT	(sizeof(__ticket_t) * 8)
#define TICKET_MASK	((__ticket_t)((1 << TICKET_SHIFT) - 1))


typedef struct arch_spinlock {
typedef struct arch_spinlock {
	union {
	union {
+1 −1
Original line number Original line Diff line number Diff line
@@ -87,7 +87,7 @@ void show_registers(struct pt_regs *regs)
	int i;
	int i;


	print_modules();
	print_modules();
	__show_regs(regs, 0);
	__show_regs(regs, !user_mode_vm(regs));


	printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
	printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
		TASK_COMM_LEN, current->comm, task_pid_nr(current),
		TASK_COMM_LEN, current->comm, task_pid_nr(current),
+38 −33
Original line number Original line Diff line number Diff line
@@ -320,7 +320,7 @@ ENDPROC(native_usergs_sysret64)
	movq %rsp, %rsi
	movq %rsp, %rsi


	leaq -RBP(%rsp),%rdi	/* arg1 for handler */
	leaq -RBP(%rsp),%rdi	/* arg1 for handler */
	testl $3, CS(%rdi)
	testl $3, CS-RBP(%rsi)
	je 1f
	je 1f
	SWAPGS
	SWAPGS
	/*
	/*
@@ -330,11 +330,10 @@ ENDPROC(native_usergs_sysret64)
	 * moving irq_enter into assembly, which would be too much work)
	 * moving irq_enter into assembly, which would be too much work)
	 */
	 */
1:	incl PER_CPU_VAR(irq_count)
1:	incl PER_CPU_VAR(irq_count)
	jne 2f
	cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
	mov PER_CPU_VAR(irq_stack_ptr),%rsp
	CFI_DEF_CFA_REGISTER	rsi
	CFI_DEF_CFA_REGISTER	rsi


2:	/* Store previous stack value */
	/* Store previous stack value */
	pushq %rsi
	pushq %rsi
	CFI_ESCAPE	0x0f /* DW_CFA_def_cfa_expression */, 6, \
	CFI_ESCAPE	0x0f /* DW_CFA_def_cfa_expression */, 6, \
			0x77 /* DW_OP_breg7 */, 0, \
			0x77 /* DW_OP_breg7 */, 0, \
@@ -1530,6 +1529,7 @@ ENTRY(nmi)


	/* Use %rdx as out temp variable throughout */
	/* Use %rdx as out temp variable throughout */
	pushq_cfi %rdx
	pushq_cfi %rdx
	CFI_REL_OFFSET rdx, 0


	/*
	/*
	 * If %cs was not the kernel segment, then the NMI triggered in user
	 * If %cs was not the kernel segment, then the NMI triggered in user
@@ -1554,6 +1554,7 @@ ENTRY(nmi)
	 */
	 */
	lea 6*8(%rsp), %rdx
	lea 6*8(%rsp), %rdx
	test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi
	test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi
	CFI_REMEMBER_STATE


nested_nmi:
nested_nmi:
	/*
	/*
@@ -1585,10 +1586,12 @@ nested_nmi:


nested_nmi_out:
nested_nmi_out:
	popq_cfi %rdx
	popq_cfi %rdx
	CFI_RESTORE rdx


	/* No need to check faults here */
	/* No need to check faults here */
	INTERRUPT_RETURN
	INTERRUPT_RETURN


	CFI_RESTORE_STATE
first_nmi:
first_nmi:
	/*
	/*
	 * Because nested NMIs will use the pushed location that we
	 * Because nested NMIs will use the pushed location that we
@@ -1620,10 +1623,15 @@ first_nmi:
	 * | pt_regs                 |
	 * | pt_regs                 |
	 * +-------------------------+
	 * +-------------------------+
	 *
	 *
	 * The saved RIP is used to fix up the copied RIP that a nested
	 * The saved stack frame is used to fix up the copied stack frame
	 * NMI may zero out. The original stack frame and the temp storage
	 * that a nested NMI may change to make the interrupted NMI iret jump
	 * to the repeat_nmi. The original stack frame and the temp storage
	 * is also used by nested NMIs and can not be trusted on exit.
	 * is also used by nested NMIs and can not be trusted on exit.
	 */
	 */
	/* Do not pop rdx, nested NMIs will corrupt that part of the stack */
	movq (%rsp), %rdx
	CFI_RESTORE rdx

	/* Set the NMI executing variable on the stack. */
	/* Set the NMI executing variable on the stack. */
	pushq_cfi $1
	pushq_cfi $1


@@ -1631,22 +1639,39 @@ first_nmi:
	.rept 5
	.rept 5
	pushq_cfi 6*8(%rsp)
	pushq_cfi 6*8(%rsp)
	.endr
	.endr
	CFI_DEF_CFA_OFFSET SS+8-RIP

	/* Everything up to here is safe from nested NMIs */

	/*
	 * If there was a nested NMI, the first NMI's iret will return
	 * here. But NMIs are still enabled and we can take another
	 * nested NMI. The nested NMI checks the interrupted RIP to see
	 * if it is between repeat_nmi and end_repeat_nmi, and if so
	 * it will just return, as we are about to repeat an NMI anyway.
	 * This makes it safe to copy to the stack frame that a nested
	 * NMI will update.
	 */
repeat_nmi:
	/*
	 * Update the stack variable to say we are still in NMI (the update
	 * is benign for the non-repeat case, where 1 was pushed just above
	 * to this very stack slot).
	 */
	movq $1, 5*8(%rsp)


	/* Make another copy, this one may be modified by nested NMIs */
	/* Make another copy, this one may be modified by nested NMIs */
	.rept 5
	.rept 5
	pushq_cfi 4*8(%rsp)
	pushq_cfi 4*8(%rsp)
	.endr
	.endr

	CFI_DEF_CFA_OFFSET SS+8-RIP
	/* Do not pop rdx, nested NMIs will corrupt it */
end_repeat_nmi:
	movq 11*8(%rsp), %rdx


	/*
	/*
	 * Everything below this point can be preempted by a nested
	 * Everything below this point can be preempted by a nested
	 * NMI if the first NMI took an exception. Repeated NMIs
	 * NMI if the first NMI took an exception and reset our iret stack
	 * caused by an exception and nested NMI will start here, and
	 * so that we repeat another NMI.
	 * can still be preempted by another NMI.
	 */
	 */
restart_nmi:
	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
	subq $ORIG_RAX-R15, %rsp
	subq $ORIG_RAX-R15, %rsp
	CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
	CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
@@ -1675,26 +1700,6 @@ nmi_restore:
	CFI_ENDPROC
	CFI_ENDPROC
END(nmi)
END(nmi)


	/*
	 * If an NMI hit an iret because of an exception or breakpoint,
	 * it can lose its NMI context, and a nested NMI may come in.
	 * In that case, the nested NMI will change the preempted NMI's
	 * stack to jump to here when it does the final iret.
	 */
repeat_nmi:
	INTR_FRAME
	/* Update the stack variable to say we are still in NMI */
	movq $1, 5*8(%rsp)

	/* copy the saved stack back to copy stack */
	.rept 5
	pushq_cfi 4*8(%rsp)
	.endr

	jmp restart_nmi
	CFI_ENDPROC
end_repeat_nmi:

ENTRY(ignore_sysret)
ENTRY(ignore_sysret)
	CFI_STARTPROC
	CFI_STARTPROC
	mov $-ENOSYS,%eax
	mov $-ENOSYS,%eax
Loading