Loading arch/x86/ia32/ia32entry.S +4 −4 Original line number Diff line number Diff line Loading @@ -112,8 +112,8 @@ ENTRY(ia32_sysenter_target) CFI_DEF_CFA rsp,0 CFI_REGISTER rsp,rbp SWAPGS_UNSAFE_STACK movq %gs:pda_kernelstack, %rsp addq $(PDA_STACKOFFSET),%rsp movq PER_CPU_VAR(kernel_stack), %rsp addq $(KERNEL_STACK_OFFSET),%rsp /* * No need to follow this irqs on/off section: the syscall * disabled irqs, here we enable it straight after entry: Loading Loading @@ -273,13 +273,13 @@ ENDPROC(ia32_sysenter_target) ENTRY(ia32_cstar_target) CFI_STARTPROC32 simple CFI_SIGNAL_FRAME CFI_DEF_CFA rsp,PDA_STACKOFFSET CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET CFI_REGISTER rip,rcx /*CFI_REGISTER rflags,r11*/ SWAPGS_UNSAFE_STACK movl %esp,%r8d CFI_REGISTER rsp,r8 movq %gs:pda_kernelstack,%rsp movq PER_CPU_VAR(kernel_stack),%rsp /* * No need to follow this irqs on/off section: the syscall * disabled irqs and here we enable it straight after entry: Loading arch/x86/include/asm/current.h +3 −21 Original line number Diff line number Diff line #ifndef _ASM_X86_CURRENT_H #define _ASM_X86_CURRENT_H #ifdef CONFIG_X86_32 #include <linux/compiler.h> #include <asm/percpu.h> #ifndef __ASSEMBLY__ struct task_struct; DECLARE_PER_CPU(struct task_struct *, current_task); static __always_inline struct task_struct *get_current(void) { return percpu_read(current_task); } #else /* X86_32 */ #ifndef __ASSEMBLY__ #include <asm/pda.h> struct task_struct; static __always_inline struct task_struct *get_current(void) { return read_pda(pcurrent); return percpu_read(current_task); } #else /* __ASSEMBLY__ */ #include <asm/asm-offsets.h> #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg #define current get_current() #endif /* __ASSEMBLY__ */ #endif /* X86_32 */ #define current get_current() #endif /* _ASM_X86_CURRENT_H */ arch/x86/include/asm/hardirq_64.h +19 −5 Original line number Diff line number Diff line Loading @@ -3,22 +3,36 @@ #include <linux/threads.h> #include <linux/irq.h> #include <asm/pda.h> #include <asm/apic.h> typedef struct { unsigned int __softirq_pending; unsigned int __nmi_count; /* arch dependent */ unsigned int apic_timer_irqs; /* arch dependent */ unsigned int irq0_irqs; unsigned int irq_resched_count; unsigned int irq_call_count; unsigned int irq_tlb_count; unsigned int irq_thermal_count; unsigned int irq_spurious_count; unsigned int irq_threshold_count; } ____cacheline_aligned irq_cpustat_t; DECLARE_PER_CPU(irq_cpustat_t, irq_stat); /* We can have at most NR_VECTORS irqs routed to a cpu at a time */ #define MAX_HARDIRQS_PER_CPU NR_VECTORS #define __ARCH_IRQ_STAT 1 #define inc_irq_stat(member) add_pda(member, 1) #define inc_irq_stat(member) percpu_add(irq_stat.member, 1) #define local_softirq_pending() read_pda(__softirq_pending) #define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) #define __ARCH_SET_SOFTIRQ_PENDING 1 #define set_softirq_pending(x) write_pda(__softirq_pending, (x)) #define or_softirq_pending(x) or_pda(__softirq_pending, (x)) #define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x)) #define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x)) extern void ack_bad_irq(unsigned int irq); Loading arch/x86/include/asm/mmu_context_64.h +7 −9 Original line number Diff line number Diff line #ifndef _ASM_X86_MMU_CONTEXT_64_H #define _ASM_X86_MMU_CONTEXT_64_H #include <asm/pda.h> static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { #ifdef CONFIG_SMP if (read_pda(mmu_state) == TLBSTATE_OK) write_pda(mmu_state, TLBSTATE_LAZY); if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); #endif } Loading @@ -19,8 +17,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, /* stop flush ipis for the previous mm */ cpu_clear(cpu, prev->cpu_vm_mask); #ifdef CONFIG_SMP write_pda(mmu_state, TLBSTATE_OK); write_pda(active_mm, next); percpu_write(cpu_tlbstate.state, TLBSTATE_OK); percpu_write(cpu_tlbstate.active_mm, next); #endif cpu_set(cpu, next->cpu_vm_mask); load_cr3(next->pgd); Loading @@ -30,9 +28,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, } #ifdef CONFIG_SMP else { write_pda(mmu_state, TLBSTATE_OK); if (read_pda(active_mm) != next) BUG(); percpu_write(cpu_tlbstate.state, TLBSTATE_OK); BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload CR3 Loading arch/x86/include/asm/page_64.h +2 −2 Original line number Diff line number Diff line Loading @@ -13,8 +13,8 @@ #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) #define IRQSTACK_ORDER 2 #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER) #define IRQ_STACK_ORDER 2 #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) #define STACKFAULT_STACK 1 #define DOUBLEFAULT_STACK 2 Loading Loading
arch/x86/ia32/ia32entry.S +4 −4 Original line number Diff line number Diff line Loading @@ -112,8 +112,8 @@ ENTRY(ia32_sysenter_target) CFI_DEF_CFA rsp,0 CFI_REGISTER rsp,rbp SWAPGS_UNSAFE_STACK movq %gs:pda_kernelstack, %rsp addq $(PDA_STACKOFFSET),%rsp movq PER_CPU_VAR(kernel_stack), %rsp addq $(KERNEL_STACK_OFFSET),%rsp /* * No need to follow this irqs on/off section: the syscall * disabled irqs, here we enable it straight after entry: Loading Loading @@ -273,13 +273,13 @@ ENDPROC(ia32_sysenter_target) ENTRY(ia32_cstar_target) CFI_STARTPROC32 simple CFI_SIGNAL_FRAME CFI_DEF_CFA rsp,PDA_STACKOFFSET CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET CFI_REGISTER rip,rcx /*CFI_REGISTER rflags,r11*/ SWAPGS_UNSAFE_STACK movl %esp,%r8d CFI_REGISTER rsp,r8 movq %gs:pda_kernelstack,%rsp movq PER_CPU_VAR(kernel_stack),%rsp /* * No need to follow this irqs on/off section: the syscall * disabled irqs and here we enable it straight after entry: Loading
arch/x86/include/asm/current.h +3 −21 Original line number Diff line number Diff line #ifndef _ASM_X86_CURRENT_H #define _ASM_X86_CURRENT_H #ifdef CONFIG_X86_32 #include <linux/compiler.h> #include <asm/percpu.h> #ifndef __ASSEMBLY__ struct task_struct; DECLARE_PER_CPU(struct task_struct *, current_task); static __always_inline struct task_struct *get_current(void) { return percpu_read(current_task); } #else /* X86_32 */ #ifndef __ASSEMBLY__ #include <asm/pda.h> struct task_struct; static __always_inline struct task_struct *get_current(void) { return read_pda(pcurrent); return percpu_read(current_task); } #else /* __ASSEMBLY__ */ #include <asm/asm-offsets.h> #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg #define current get_current() #endif /* __ASSEMBLY__ */ #endif /* X86_32 */ #define current get_current() #endif /* _ASM_X86_CURRENT_H */
arch/x86/include/asm/hardirq_64.h +19 −5 Original line number Diff line number Diff line Loading @@ -3,22 +3,36 @@ #include <linux/threads.h> #include <linux/irq.h> #include <asm/pda.h> #include <asm/apic.h> typedef struct { unsigned int __softirq_pending; unsigned int __nmi_count; /* arch dependent */ unsigned int apic_timer_irqs; /* arch dependent */ unsigned int irq0_irqs; unsigned int irq_resched_count; unsigned int irq_call_count; unsigned int irq_tlb_count; unsigned int irq_thermal_count; unsigned int irq_spurious_count; unsigned int irq_threshold_count; } ____cacheline_aligned irq_cpustat_t; DECLARE_PER_CPU(irq_cpustat_t, irq_stat); /* We can have at most NR_VECTORS irqs routed to a cpu at a time */ #define MAX_HARDIRQS_PER_CPU NR_VECTORS #define __ARCH_IRQ_STAT 1 #define inc_irq_stat(member) add_pda(member, 1) #define inc_irq_stat(member) percpu_add(irq_stat.member, 1) #define local_softirq_pending() read_pda(__softirq_pending) #define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) #define __ARCH_SET_SOFTIRQ_PENDING 1 #define set_softirq_pending(x) write_pda(__softirq_pending, (x)) #define or_softirq_pending(x) or_pda(__softirq_pending, (x)) #define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x)) #define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x)) extern void ack_bad_irq(unsigned int irq); Loading
arch/x86/include/asm/mmu_context_64.h +7 −9 Original line number Diff line number Diff line #ifndef _ASM_X86_MMU_CONTEXT_64_H #define _ASM_X86_MMU_CONTEXT_64_H #include <asm/pda.h> static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { #ifdef CONFIG_SMP if (read_pda(mmu_state) == TLBSTATE_OK) write_pda(mmu_state, TLBSTATE_LAZY); if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); #endif } Loading @@ -19,8 +17,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, /* stop flush ipis for the previous mm */ cpu_clear(cpu, prev->cpu_vm_mask); #ifdef CONFIG_SMP write_pda(mmu_state, TLBSTATE_OK); write_pda(active_mm, next); percpu_write(cpu_tlbstate.state, TLBSTATE_OK); percpu_write(cpu_tlbstate.active_mm, next); #endif cpu_set(cpu, next->cpu_vm_mask); load_cr3(next->pgd); Loading @@ -30,9 +28,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, } #ifdef CONFIG_SMP else { write_pda(mmu_state, TLBSTATE_OK); if (read_pda(active_mm) != next) BUG(); percpu_write(cpu_tlbstate.state, TLBSTATE_OK); BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload CR3 Loading
arch/x86/include/asm/page_64.h +2 −2 Original line number Diff line number Diff line Loading @@ -13,8 +13,8 @@ #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) #define IRQSTACK_ORDER 2 #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER) #define IRQ_STACK_ORDER 2 #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) #define STACKFAULT_STACK 1 #define DOUBLEFAULT_STACK 2 Loading