Loading arch/powerpc/include/asm/hardirq.h +6 −1 Original line number Diff line number Diff line Loading @@ -21,7 +21,12 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); #define __ARCH_IRQ_STAT #define local_softirq_pending() __get_cpu_var(irq_stat).__softirq_pending #define local_softirq_pending() __this_cpu_read(irq_stat.__softirq_pending) #define __ARCH_SET_SOFTIRQ_PENDING #define set_softirq_pending(x) __this_cpu_write(irq_stat.__softirq_pending, (x)) #define or_softirq_pending(x) __this_cpu_or(irq_stat.__softirq_pending, (x)) static inline void ack_bad_irq(unsigned int irq) { Loading arch/powerpc/include/asm/tlbflush.h +2 −2 Original line number Diff line number Diff line Loading @@ -107,14 +107,14 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); static inline void arch_enter_lazy_mmu_mode(void) { struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); batch->active = 1; } static inline void arch_leave_lazy_mmu_mode(void) { struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); if (batch->index) __flush_tlb_pending(batch); Loading arch/powerpc/include/asm/xics.h +4 −4 Original line number Diff line number Diff line Loading @@ -98,7 +98,7 @@ DECLARE_PER_CPU(struct xics_cppr, xics_cppr); static inline void xics_push_cppr(unsigned int vec) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) return; Loading @@ -111,7 +111,7 @@ static inline void xics_push_cppr(unsigned int vec) static inline unsigned char xics_pop_cppr(void) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); if (WARN_ON(os_cppr->index < 1)) return LOWEST_PRIORITY; Loading @@ -121,7 +121,7 @@ static inline unsigned char xics_pop_cppr(void) static inline void xics_set_base_cppr(unsigned char cppr) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); /* we only really want to set the priority when there's * just one cppr value on the stack Loading @@ -133,7 +133,7 @@ static inline void xics_set_base_cppr(unsigned char cppr) static inline unsigned char xics_cppr_top(void) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); return os_cppr->stack[os_cppr->index]; } Loading arch/powerpc/kernel/dbell.c +1 −1 Original line number Diff line number Diff line Loading @@ -41,7 +41,7 @@ void doorbell_exception(struct pt_regs *regs) may_hard_irq_enable(); __get_cpu_var(irq_stat).doorbell_irqs++; __this_cpu_inc(irq_stat.doorbell_irqs); smp_ipi_demux(); Loading arch/powerpc/kernel/hw_breakpoint.c +3 −3 Original line number Diff line number Diff line Loading @@ -63,7 +63,7 @@ int hw_breakpoint_slots(int type) int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); struct perf_event **slot = &__get_cpu_var(bp_per_reg); struct perf_event **slot = this_cpu_ptr(&bp_per_reg); *slot = bp; Loading @@ -88,7 +88,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) */ void arch_uninstall_hw_breakpoint(struct perf_event *bp) { struct perf_event **slot = &__get_cpu_var(bp_per_reg); struct perf_event **slot = this_cpu_ptr(&bp_per_reg); if (*slot != bp) { WARN_ONCE(1, "Can't find the breakpoint"); Loading Loading @@ -226,7 +226,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) */ rcu_read_lock(); bp = __get_cpu_var(bp_per_reg); bp = __this_cpu_read(bp_per_reg); if (!bp) goto out; info = counter_arch_bp(bp); Loading Loading
arch/powerpc/include/asm/hardirq.h +6 −1 Original line number Diff line number Diff line Loading @@ -21,7 +21,12 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); #define __ARCH_IRQ_STAT #define local_softirq_pending() __get_cpu_var(irq_stat).__softirq_pending #define local_softirq_pending() __this_cpu_read(irq_stat.__softirq_pending) #define __ARCH_SET_SOFTIRQ_PENDING #define set_softirq_pending(x) __this_cpu_write(irq_stat.__softirq_pending, (x)) #define or_softirq_pending(x) __this_cpu_or(irq_stat.__softirq_pending, (x)) static inline void ack_bad_irq(unsigned int irq) { Loading
arch/powerpc/include/asm/tlbflush.h +2 −2 Original line number Diff line number Diff line Loading @@ -107,14 +107,14 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); static inline void arch_enter_lazy_mmu_mode(void) { struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); batch->active = 1; } static inline void arch_leave_lazy_mmu_mode(void) { struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); if (batch->index) __flush_tlb_pending(batch); Loading
arch/powerpc/include/asm/xics.h +4 −4 Original line number Diff line number Diff line Loading @@ -98,7 +98,7 @@ DECLARE_PER_CPU(struct xics_cppr, xics_cppr); static inline void xics_push_cppr(unsigned int vec) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) return; Loading @@ -111,7 +111,7 @@ static inline void xics_push_cppr(unsigned int vec) static inline unsigned char xics_pop_cppr(void) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); if (WARN_ON(os_cppr->index < 1)) return LOWEST_PRIORITY; Loading @@ -121,7 +121,7 @@ static inline unsigned char xics_pop_cppr(void) static inline void xics_set_base_cppr(unsigned char cppr) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); /* we only really want to set the priority when there's * just one cppr value on the stack Loading @@ -133,7 +133,7 @@ static inline void xics_set_base_cppr(unsigned char cppr) static inline unsigned char xics_cppr_top(void) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); return os_cppr->stack[os_cppr->index]; } Loading
arch/powerpc/kernel/dbell.c +1 −1 Original line number Diff line number Diff line Loading @@ -41,7 +41,7 @@ void doorbell_exception(struct pt_regs *regs) may_hard_irq_enable(); __get_cpu_var(irq_stat).doorbell_irqs++; __this_cpu_inc(irq_stat.doorbell_irqs); smp_ipi_demux(); Loading
arch/powerpc/kernel/hw_breakpoint.c +3 −3 Original line number Diff line number Diff line Loading @@ -63,7 +63,7 @@ int hw_breakpoint_slots(int type) int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); struct perf_event **slot = &__get_cpu_var(bp_per_reg); struct perf_event **slot = this_cpu_ptr(&bp_per_reg); *slot = bp; Loading @@ -88,7 +88,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) */ void arch_uninstall_hw_breakpoint(struct perf_event *bp) { struct perf_event **slot = &__get_cpu_var(bp_per_reg); struct perf_event **slot = this_cpu_ptr(&bp_per_reg); if (*slot != bp) { WARN_ONCE(1, "Can't find the breakpoint"); Loading Loading @@ -226,7 +226,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) */ rcu_read_lock(); bp = __get_cpu_var(bp_per_reg); bp = __this_cpu_read(bp_per_reg); if (!bp) goto out; info = counter_arch_bp(bp); Loading