Loading include/linux/mm_types.h +0 −2 Original line number Diff line number Diff line Loading @@ -14,7 +14,6 @@ #include <linux/page-debug-flags.h> #include <linux/uprobes.h> #include <linux/page-flags-layout.h> #include <linux/workqueue.h> #include <asm/page.h> #include <asm/mmu.h> Loading Loading @@ -459,7 +458,6 @@ struct mm_struct { bool tlb_flush_pending; #endif struct uprobes_state uprobes_state; struct work_struct async_put_work; }; /* first nid will either be a valid NID or one of these values */ Loading include/linux/sched.h +0 −5 Original line number Diff line number Diff line Loading @@ -2348,11 +2348,6 @@ static inline bool mmget_still_valid(struct mm_struct *mm) /* mmput gets rid of the mappings and all user-space */ extern int mmput(struct mm_struct *); /* same as above but performs the slow path from the async kontext. Can * be called from the atomic context as well */ extern void mmput_async(struct mm_struct *); /* Grab a reference to a task's mm, if it is not already going away */ extern struct mm_struct *get_task_mm(struct task_struct *task); /* Loading kernel/fork.c +16 −38 Original line number Diff line number Diff line Loading @@ -629,10 +629,16 @@ void __mmdrop(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(__mmdrop); static inline void __mmput(struct mm_struct *mm) /* * Decrement the use count and release all resources for an mm. */ int mmput(struct mm_struct *mm) { VM_BUG_ON(atomic_read(&mm->mm_users)); int mm_freed = 0; might_sleep(); if (atomic_dec_and_test(&mm->mm_users)) { uprobe_clear_state(mm); exit_aio(mm); ksm_exit(mm); khugepaged_exit(mm); /* must run before exit_mmap */ Loading @@ -646,40 +652,12 @@ static inline void __mmput(struct mm_struct *mm) if (mm->binfmt) module_put(mm->binfmt->module); mmdrop(mm); } /* * Decrement the use count and release all resources for an mm. */ int mmput(struct mm_struct *mm) { int mm_freed = 0; might_sleep(); if (atomic_dec_and_test(&mm->mm_users)) { uprobe_clear_state(mm); mm_freed = 1; __mmput(mm); } return mm_freed; } EXPORT_SYMBOL_GPL(mmput); static void mmput_async_fn(struct work_struct *work) { struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); __mmput(mm); } void mmput_async(struct mm_struct *mm) { if (atomic_dec_and_test(&mm->mm_users)) { INIT_WORK(&mm->async_put_work, mmput_async_fn); schedule_work(&mm->async_put_work); } } void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) { if (new_exe_file) Loading Loading
include/linux/mm_types.h +0 −2 Original line number Diff line number Diff line Loading @@ -14,7 +14,6 @@ #include <linux/page-debug-flags.h> #include <linux/uprobes.h> #include <linux/page-flags-layout.h> #include <linux/workqueue.h> #include <asm/page.h> #include <asm/mmu.h> Loading Loading @@ -459,7 +458,6 @@ struct mm_struct { bool tlb_flush_pending; #endif struct uprobes_state uprobes_state; struct work_struct async_put_work; }; /* first nid will either be a valid NID or one of these values */ Loading
include/linux/sched.h +0 −5 Original line number Diff line number Diff line Loading @@ -2348,11 +2348,6 @@ static inline bool mmget_still_valid(struct mm_struct *mm) /* mmput gets rid of the mappings and all user-space */ extern int mmput(struct mm_struct *); /* same as above but performs the slow path from the async kontext. Can * be called from the atomic context as well */ extern void mmput_async(struct mm_struct *); /* Grab a reference to a task's mm, if it is not already going away */ extern struct mm_struct *get_task_mm(struct task_struct *task); /* Loading
kernel/fork.c +16 −38 Original line number Diff line number Diff line Loading @@ -629,10 +629,16 @@ void __mmdrop(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(__mmdrop); static inline void __mmput(struct mm_struct *mm) /* * Decrement the use count and release all resources for an mm. */ int mmput(struct mm_struct *mm) { VM_BUG_ON(atomic_read(&mm->mm_users)); int mm_freed = 0; might_sleep(); if (atomic_dec_and_test(&mm->mm_users)) { uprobe_clear_state(mm); exit_aio(mm); ksm_exit(mm); khugepaged_exit(mm); /* must run before exit_mmap */ Loading @@ -646,40 +652,12 @@ static inline void __mmput(struct mm_struct *mm) if (mm->binfmt) module_put(mm->binfmt->module); mmdrop(mm); } /* * Decrement the use count and release all resources for an mm. */ int mmput(struct mm_struct *mm) { int mm_freed = 0; might_sleep(); if (atomic_dec_and_test(&mm->mm_users)) { uprobe_clear_state(mm); mm_freed = 1; __mmput(mm); } return mm_freed; } EXPORT_SYMBOL_GPL(mmput); static void mmput_async_fn(struct work_struct *work) { struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); __mmput(mm); } void mmput_async(struct mm_struct *mm) { if (atomic_dec_and_test(&mm->mm_users)) { INIT_WORK(&mm->async_put_work, mmput_async_fn); schedule_work(&mm->async_put_work); } } void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) { if (new_exe_file) Loading