Loading arch/x86/kernel/paravirt.c +4 −0 Original line number Original line Diff line number Diff line Loading @@ -270,11 +270,13 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) void __init paravirt_use_bytelocks(void) void __init paravirt_use_bytelocks(void) { { #ifdef CONFIG_SMP pv_lock_ops.spin_is_locked = __byte_spin_is_locked; pv_lock_ops.spin_is_locked = __byte_spin_is_locked; pv_lock_ops.spin_is_contended = __byte_spin_is_contended; pv_lock_ops.spin_is_contended = __byte_spin_is_contended; pv_lock_ops.spin_lock = __byte_spin_lock; pv_lock_ops.spin_lock = __byte_spin_lock; pv_lock_ops.spin_trylock = __byte_spin_trylock; pv_lock_ops.spin_trylock = __byte_spin_trylock; pv_lock_ops.spin_unlock = __byte_spin_unlock; pv_lock_ops.spin_unlock = __byte_spin_unlock; #endif } } struct pv_info pv_info = { struct pv_info pv_info = { Loading Loading @@ -461,12 +463,14 @@ struct pv_mmu_ops pv_mmu_ops = { }; }; struct pv_lock_ops pv_lock_ops = { struct pv_lock_ops pv_lock_ops = { #ifdef CONFIG_SMP .spin_is_locked = __ticket_spin_is_locked, .spin_is_locked = __ticket_spin_is_locked, .spin_is_contended = __ticket_spin_is_contended, .spin_is_contended = __ticket_spin_is_contended, .spin_lock = __ticket_spin_lock, .spin_lock = __ticket_spin_lock, .spin_trylock = __ticket_spin_trylock, .spin_trylock = __ticket_spin_trylock, .spin_unlock = __ticket_spin_unlock, .spin_unlock = __ticket_spin_unlock, #endif }; }; EXPORT_SYMBOL_GPL(pv_time_ops); EXPORT_SYMBOL_GPL(pv_time_ops); Loading include/asm-x86/paravirt.h +4 −0 Original line number Original line Diff line number Diff line Loading @@ -1387,6 +1387,8 @@ void _paravirt_nop(void); void paravirt_use_bytelocks(void); void paravirt_use_bytelocks(void); #ifdef CONFIG_SMP static inline int __raw_spin_is_locked(struct raw_spinlock *lock) static inline int __raw_spin_is_locked(struct raw_spinlock *lock) { { return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); Loading @@ -1412,6 +1414,8 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) return PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); return PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); } } #endif /* These all sit in the .parainstructions section to tell us what to patch. */ /* These all sit in the .parainstructions section to tell us what to patch. */ struct paravirt_patch_site { struct paravirt_patch_site { u8 *instr; /* original instructions */ u8 *instr; /* original instructions */ Loading Loading
arch/x86/kernel/paravirt.c +4 −0 Original line number Original line Diff line number Diff line Loading @@ -270,11 +270,13 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) void __init paravirt_use_bytelocks(void) void __init paravirt_use_bytelocks(void) { { #ifdef CONFIG_SMP pv_lock_ops.spin_is_locked = __byte_spin_is_locked; pv_lock_ops.spin_is_locked = __byte_spin_is_locked; pv_lock_ops.spin_is_contended = __byte_spin_is_contended; pv_lock_ops.spin_is_contended = __byte_spin_is_contended; pv_lock_ops.spin_lock = __byte_spin_lock; pv_lock_ops.spin_lock = __byte_spin_lock; pv_lock_ops.spin_trylock = __byte_spin_trylock; pv_lock_ops.spin_trylock = __byte_spin_trylock; pv_lock_ops.spin_unlock = __byte_spin_unlock; pv_lock_ops.spin_unlock = __byte_spin_unlock; #endif } } struct pv_info pv_info = { struct pv_info pv_info = { Loading Loading @@ -461,12 +463,14 @@ struct pv_mmu_ops pv_mmu_ops = { }; }; struct pv_lock_ops pv_lock_ops = { struct pv_lock_ops pv_lock_ops = { #ifdef CONFIG_SMP .spin_is_locked = __ticket_spin_is_locked, .spin_is_locked = __ticket_spin_is_locked, .spin_is_contended = __ticket_spin_is_contended, .spin_is_contended = __ticket_spin_is_contended, .spin_lock = __ticket_spin_lock, .spin_lock = __ticket_spin_lock, .spin_trylock = __ticket_spin_trylock, .spin_trylock = __ticket_spin_trylock, .spin_unlock = __ticket_spin_unlock, .spin_unlock = __ticket_spin_unlock, #endif }; }; EXPORT_SYMBOL_GPL(pv_time_ops); EXPORT_SYMBOL_GPL(pv_time_ops); Loading
include/asm-x86/paravirt.h +4 −0 Original line number Original line Diff line number Diff line Loading @@ -1387,6 +1387,8 @@ void _paravirt_nop(void); void paravirt_use_bytelocks(void); void paravirt_use_bytelocks(void); #ifdef CONFIG_SMP static inline int __raw_spin_is_locked(struct raw_spinlock *lock) static inline int __raw_spin_is_locked(struct raw_spinlock *lock) { { return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); Loading @@ -1412,6 +1414,8 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) return PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); return PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); } } #endif /* These all sit in the .parainstructions section to tell us what to patch. */ /* These all sit in the .parainstructions section to tell us what to patch. */ struct paravirt_patch_site { struct paravirt_patch_site { u8 *instr; /* original instructions */ u8 *instr; /* original instructions */ Loading