Loading arch/arm64/Kconfig +1 −0 Original line number Diff line number Diff line Loading @@ -189,6 +189,7 @@ config ARM64 select SWIOTLB select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK select ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT help ARM 64-bit (AArch64) Linux support. Loading arch/arm64/mm/fault.c +23 −3 Original line number Diff line number Diff line Loading @@ -406,10 +406,9 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re #define VM_FAULT_BADMAP 0x010000 #define VM_FAULT_BADACCESS 0x020000 static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, static int __do_page_fault(struct vm_area_struct *vma, unsigned long addr, unsigned int mm_flags, unsigned long vm_flags) { struct vm_area_struct *vma = find_vma(mm, addr); if (unlikely(!vma)) return VM_FAULT_BADMAP; Loading Loading @@ -456,6 +455,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, vm_fault_t fault, major = 0; unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; struct vm_area_struct *vma = NULL; if (kprobe_page_fault(regs, esr)) return 0; Loading Loading @@ -495,6 +495,14 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); /* * let's try a speculative page fault without grabbing the * mmap_sem. */ fault = handle_speculative_fault(mm, addr, mm_flags, &vma); if (fault != VM_FAULT_RETRY) goto done; /* * As per x86, we may deadlock here. However, since the kernel only * validly references user space from well defined areas of the code, Loading @@ -519,7 +527,10 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, #endif } fault = __do_page_fault(mm, addr, mm_flags, vm_flags); if (!vma || !can_reuse_spf_vma(vma, addr)) vma = find_vma(mm, addr); fault = __do_page_fault(vma, addr, mm_flags, vm_flags); major |= fault & VM_FAULT_MAJOR; if (fault & VM_FAULT_RETRY) { Loading @@ -542,11 +553,20 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, if (mm_flags & FAULT_FLAG_ALLOW_RETRY) { mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; mm_flags |= FAULT_FLAG_TRIED; /* * Do not try to reuse this vma and fetch it * again since we will release the mmap_sem. */ vma = NULL; goto retry; } } up_read(&mm->mmap_sem); done: /* * Handle the "normal" (no error) case first. */ Loading fs/proc/task_mmu.c +4 −1 Original line number Diff line number Diff line Loading @@ -1277,8 +1277,11 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, goto out_mm; } for (vma = mm->mmap; vma; vma = vma->vm_next) { vma->vm_flags &= ~VM_SOFTDIRTY; vm_write_begin(vma); WRITE_ONCE(vma->vm_flags, vma->vm_flags & ~VM_SOFTDIRTY); vma_set_page_prot(vma); vm_write_end(vma); } downgrade_write(&mm->mmap_sem); break; Loading fs/userfaultfd.c +13 −4 Original line number Diff line number Diff line Loading @@ -675,8 +675,11 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) octx = vma->vm_userfaultfd_ctx.ctx; if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { vm_write_begin(vma); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); WRITE_ONCE(vma->vm_flags, vma->vm_flags & ~(VM_UFFD_WP | VM_UFFD_MISSING)); vm_write_end(vma); return 0; } Loading Loading @@ -919,8 +922,10 @@ static int userfaultfd_release(struct inode *inode, struct file *file) else prev = vma; } vma->vm_flags = new_flags; vm_write_begin(vma); WRITE_ONCE(vma->vm_flags, new_flags); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; vm_write_end(vma); } up_write(&mm->mmap_sem); mmput(mm); Loading Loading @@ -1487,8 +1492,10 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, * the next vma was merged into the current one and * the current one has not been updated yet. */ vma->vm_flags = new_flags; vm_write_begin(vma); WRITE_ONCE(vma->vm_flags, new_flags); vma->vm_userfaultfd_ctx.ctx = ctx; vm_write_end(vma); skip: prev = vma; Loading Loading @@ -1650,8 +1657,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, * the next vma was merged into the current one and * the current one has not been updated yet. */ vma->vm_flags = new_flags; vm_write_begin(vma); WRITE_ONCE(vma->vm_flags, new_flags); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; vm_write_end(vma); skip: prev = vma; Loading include/linux/hugetlb_inline.h +1 −1 Original line number Diff line number Diff line Loading @@ -8,7 +8,7 @@ static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { return !!(vma->vm_flags & VM_HUGETLB); return !!(READ_ONCE(vma->vm_flags) & VM_HUGETLB); } #else Loading Loading
arch/arm64/Kconfig +1 −0 Original line number Diff line number Diff line Loading @@ -189,6 +189,7 @@ config ARM64 select SWIOTLB select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK select ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT help ARM 64-bit (AArch64) Linux support. Loading
arch/arm64/mm/fault.c +23 −3 Original line number Diff line number Diff line Loading @@ -406,10 +406,9 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re #define VM_FAULT_BADMAP 0x010000 #define VM_FAULT_BADACCESS 0x020000 static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, static int __do_page_fault(struct vm_area_struct *vma, unsigned long addr, unsigned int mm_flags, unsigned long vm_flags) { struct vm_area_struct *vma = find_vma(mm, addr); if (unlikely(!vma)) return VM_FAULT_BADMAP; Loading Loading @@ -456,6 +455,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, vm_fault_t fault, major = 0; unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; struct vm_area_struct *vma = NULL; if (kprobe_page_fault(regs, esr)) return 0; Loading Loading @@ -495,6 +495,14 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); /* * let's try a speculative page fault without grabbing the * mmap_sem. */ fault = handle_speculative_fault(mm, addr, mm_flags, &vma); if (fault != VM_FAULT_RETRY) goto done; /* * As per x86, we may deadlock here. However, since the kernel only * validly references user space from well defined areas of the code, Loading @@ -519,7 +527,10 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, #endif } fault = __do_page_fault(mm, addr, mm_flags, vm_flags); if (!vma || !can_reuse_spf_vma(vma, addr)) vma = find_vma(mm, addr); fault = __do_page_fault(vma, addr, mm_flags, vm_flags); major |= fault & VM_FAULT_MAJOR; if (fault & VM_FAULT_RETRY) { Loading @@ -542,11 +553,20 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, if (mm_flags & FAULT_FLAG_ALLOW_RETRY) { mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; mm_flags |= FAULT_FLAG_TRIED; /* * Do not try to reuse this vma and fetch it * again since we will release the mmap_sem. */ vma = NULL; goto retry; } } up_read(&mm->mmap_sem); done: /* * Handle the "normal" (no error) case first. */ Loading
fs/proc/task_mmu.c +4 −1 Original line number Diff line number Diff line Loading @@ -1277,8 +1277,11 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, goto out_mm; } for (vma = mm->mmap; vma; vma = vma->vm_next) { vma->vm_flags &= ~VM_SOFTDIRTY; vm_write_begin(vma); WRITE_ONCE(vma->vm_flags, vma->vm_flags & ~VM_SOFTDIRTY); vma_set_page_prot(vma); vm_write_end(vma); } downgrade_write(&mm->mmap_sem); break; Loading
fs/userfaultfd.c +13 −4 Original line number Diff line number Diff line Loading @@ -675,8 +675,11 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) octx = vma->vm_userfaultfd_ctx.ctx; if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { vm_write_begin(vma); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); WRITE_ONCE(vma->vm_flags, vma->vm_flags & ~(VM_UFFD_WP | VM_UFFD_MISSING)); vm_write_end(vma); return 0; } Loading Loading @@ -919,8 +922,10 @@ static int userfaultfd_release(struct inode *inode, struct file *file) else prev = vma; } vma->vm_flags = new_flags; vm_write_begin(vma); WRITE_ONCE(vma->vm_flags, new_flags); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; vm_write_end(vma); } up_write(&mm->mmap_sem); mmput(mm); Loading Loading @@ -1487,8 +1492,10 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, * the next vma was merged into the current one and * the current one has not been updated yet. */ vma->vm_flags = new_flags; vm_write_begin(vma); WRITE_ONCE(vma->vm_flags, new_flags); vma->vm_userfaultfd_ctx.ctx = ctx; vm_write_end(vma); skip: prev = vma; Loading Loading @@ -1650,8 +1657,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, * the next vma was merged into the current one and * the current one has not been updated yet. */ vma->vm_flags = new_flags; vm_write_begin(vma); WRITE_ONCE(vma->vm_flags, new_flags); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; vm_write_end(vma); skip: prev = vma; Loading
include/linux/hugetlb_inline.h +1 −1 Original line number Diff line number Diff line Loading @@ -8,7 +8,7 @@ static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { return !!(vma->vm_flags & VM_HUGETLB); return !!(READ_ONCE(vma->vm_flags) & VM_HUGETLB); } #else Loading