Loading include/linux/kernel.h +2 −5 Original line number Diff line number Diff line Loading @@ -193,13 +193,10 @@ extern int _cond_resched(void); (__x < 0) ? -__x : __x; \ }) #ifdef CONFIG_PROVE_LOCKING #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) void might_fault(void); #else static inline void might_fault(void) { __might_sleep(__FILE__, __LINE__, 0); } static inline void might_fault(void) { } #endif extern struct atomic_notifier_head panic_notifier_list; Loading mm/memory.c +7 −4 Original line number Diff line number Diff line Loading @@ -4229,7 +4229,7 @@ void print_vma_addr(char *prefix, unsigned long ip) up_read(&mm->mmap_sem); } #ifdef CONFIG_PROVE_LOCKING #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) void might_fault(void) { /* Loading @@ -4241,14 +4241,17 @@ void might_fault(void) if (segment_eq(get_fs(), KERNEL_DS)) return; __might_sleep(__FILE__, __LINE__, 0); /* * it would be nicer only to annotate paths which are not under * pagefault_disable, however that requires a larger audit and * providing helpers like get_user_atomic. */ if (!in_atomic() && current->mm) if (in_atomic()) return; __might_sleep(__FILE__, __LINE__, 0); if (current->mm) might_lock_read(¤t->mm->mmap_sem); } EXPORT_SYMBOL(might_fault); Loading Loading
include/linux/kernel.h +2 −5 Original line number Diff line number Diff line Loading @@ -193,13 +193,10 @@ extern int _cond_resched(void); (__x < 0) ? -__x : __x; \ }) #ifdef CONFIG_PROVE_LOCKING #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) void might_fault(void); #else static inline void might_fault(void) { __might_sleep(__FILE__, __LINE__, 0); } static inline void might_fault(void) { } #endif extern struct atomic_notifier_head panic_notifier_list; Loading
mm/memory.c +7 −4 Original line number Diff line number Diff line Loading @@ -4229,7 +4229,7 @@ void print_vma_addr(char *prefix, unsigned long ip) up_read(&mm->mmap_sem); } #ifdef CONFIG_PROVE_LOCKING #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) void might_fault(void) { /* Loading @@ -4241,14 +4241,17 @@ void might_fault(void) if (segment_eq(get_fs(), KERNEL_DS)) return; __might_sleep(__FILE__, __LINE__, 0); /* * it would be nicer only to annotate paths which are not under * pagefault_disable, however that requires a larger audit and * providing helpers like get_user_atomic. */ if (!in_atomic() && current->mm) if (in_atomic()) return; __might_sleep(__FILE__, __LINE__, 0); if (current->mm) might_lock_read(¤t->mm->mmap_sem); } EXPORT_SYMBOL(might_fault); Loading