Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 197fe6b0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 asm updates from Ingo Molnar:
 "The changes in this cycle were:

   - Speed up the x86 __preempt_schedule() implementation
   - Fix/improve low level asm code debug info annotations"

* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86: Unwind-annotate thunk_32.S
  x86: Improve cmpxchg8b_emu.S
  x86: Improve cmpxchg16b_emu.S
  x86/lib/Makefile: Remove the unnecessary "+= thunk_64.o"
  x86: Speed up ___preempt_schedule*() by using THUNK helpers
parents faafcba3 f74954f0
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -39,8 +39,6 @@ obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y			+= pci-iommu_table.o
obj-y			+= resource.o

obj-$(CONFIG_PREEMPT)	+= preempt.o

obj-y				+= process.o
obj-y				+= i387.o xsave.o
obj-y				+= ptrace.o

arch/x86/kernel/preempt.S

deleted100644 → 0
+0 −25
Original line number Diff line number Diff line

#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/asm.h>
#include <asm/calling.h>

ENTRY(___preempt_schedule)
	CFI_STARTPROC
	SAVE_ALL
	call preempt_schedule
	RESTORE_ALL
	ret
	CFI_ENDPROC

#ifdef CONFIG_CONTEXT_TRACKING

ENTRY(___preempt_schedule_context)
	CFI_STARTPROC
	SAVE_ALL
	call preempt_schedule_context
	RESTORE_ALL
	ret
	CFI_ENDPROC

#endif
+1 −1
Original line number Diff line number Diff line
@@ -38,7 +38,7 @@ endif
else
        obj-y += iomap_copy_64.o
        lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
        lib-y += thunk_64.o clear_page_64.o copy_page_64.o
        lib-y += clear_page_64.o copy_page_64.o
        lib-y += memmove_64.o memset_64.o
        lib-y += copy_user_64.o copy_user_nocache_64.o
	lib-y += cmpxchg16b_emu.o
+13 −19
Original line number Diff line number Diff line
@@ -6,15 +6,8 @@
 *
 */
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/frame.h>
#include <asm/dwarf2.h>

#ifdef CONFIG_SMP
#define SEG_PREFIX %gs:
#else
#define SEG_PREFIX
#endif
#include <asm/percpu.h>

.text

@@ -39,24 +32,25 @@ CFI_STARTPROC
# *atomic* on a single cpu (as provided by the this_cpu_xx class of
# macros).
#
this_cpu_cmpxchg16b_emu:
	pushf
	pushfq_cfi
	cli

	cmpq SEG_PREFIX(%rsi), %rax
	jne not_same
	cmpq SEG_PREFIX 8(%rsi), %rdx
	jne not_same
	cmpq PER_CPU_VAR((%rsi)), %rax
	jne .Lnot_same
	cmpq PER_CPU_VAR(8(%rsi)), %rdx
	jne .Lnot_same

	movq %rbx, SEG_PREFIX(%rsi)
	movq %rcx, SEG_PREFIX 8(%rsi)
	movq %rbx, PER_CPU_VAR((%rsi))
	movq %rcx, PER_CPU_VAR(8(%rsi))

	popf
	CFI_REMEMBER_STATE
	popfq_cfi
	mov $1, %al
	ret

 not_same:
	popf
	CFI_RESTORE_STATE
.Lnot_same:
	popfq_cfi
	xor %al,%al
	ret

+9 −11
Original line number Diff line number Diff line
@@ -7,11 +7,8 @@
 */

#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/frame.h>
#include <asm/dwarf2.h>


.text

/*
@@ -30,27 +27,28 @@ CFI_STARTPROC
# set the whole ZF thing (caller will just compare
# eax:edx with the expected value)
#
cmpxchg8b_emu:
	pushfl
	pushfl_cfi
	cli

	cmpl  (%esi), %eax
	jne not_same
	jne .Lnot_same
	cmpl 4(%esi), %edx
	jne half_same
	jne .Lhalf_same

	movl %ebx,  (%esi)
	movl %ecx, 4(%esi)

	popfl
	CFI_REMEMBER_STATE
	popfl_cfi
	ret

 not_same:
	CFI_RESTORE_STATE
.Lnot_same:
	movl  (%esi), %eax
 half_same:
.Lhalf_same:
	movl 4(%esi), %edx

	popfl
	popfl_cfi
	ret

CFI_ENDPROC
Loading