Loading kernel/fork.c +28 −2 Original line number Diff line number Diff line Loading @@ -424,7 +424,7 @@ EXPORT_SYMBOL(free_task); static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { struct vm_area_struct *mpnt, *tmp, *prev, **pprev; struct vm_area_struct *mpnt, *tmp, *prev, **pprev, *last = NULL; struct rb_node **rb_link, *rb_parent; int retval; unsigned long charge; Loading Loading @@ -543,8 +543,18 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, rb_parent = &tmp->vm_rb; mm->map_count++; if (!(tmp->vm_flags & VM_WIPEONFORK)) if (!(tmp->vm_flags & VM_WIPEONFORK)) { if (IS_ENABLED(CONFIG_SPECULATIVE_PAGE_FAULT)) { /* * Mark this VMA as changing to prevent the * speculative page fault hanlder to process * it until the TLB are flushed below. */ last = mpnt; vm_write_begin(mpnt); } retval = copy_page_range(mm, oldmm, mpnt); } if (tmp->vm_ops && tmp->vm_ops->open) tmp->vm_ops->open(tmp); Loading @@ -557,6 +567,22 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, out: up_write(&mm->mmap_sem); flush_tlb_mm(oldmm); if (IS_ENABLED(CONFIG_SPECULATIVE_PAGE_FAULT)) { /* * Since the TLB has been flush, we can safely unmark the * copied VMAs and allows the speculative page fault handler to * process them again. * Walk back the VMA list from the last marked VMA. */ for (; last; last = last->vm_prev) { if (last->vm_flags & VM_DONTCOPY) continue; if (!(last->vm_flags & VM_WIPEONFORK)) vm_write_end(last); } } up_write(&oldmm->mmap_sem); dup_userfaultfd_complete(&uf); fail_uprobe_end: Loading Loading
kernel/fork.c +28 −2 Original line number Diff line number Diff line Loading @@ -424,7 +424,7 @@ EXPORT_SYMBOL(free_task); static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { struct vm_area_struct *mpnt, *tmp, *prev, **pprev; struct vm_area_struct *mpnt, *tmp, *prev, **pprev, *last = NULL; struct rb_node **rb_link, *rb_parent; int retval; unsigned long charge; Loading Loading @@ -543,8 +543,18 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, rb_parent = &tmp->vm_rb; mm->map_count++; if (!(tmp->vm_flags & VM_WIPEONFORK)) if (!(tmp->vm_flags & VM_WIPEONFORK)) { if (IS_ENABLED(CONFIG_SPECULATIVE_PAGE_FAULT)) { /* * Mark this VMA as changing to prevent the * speculative page fault hanlder to process * it until the TLB are flushed below. */ last = mpnt; vm_write_begin(mpnt); } retval = copy_page_range(mm, oldmm, mpnt); } if (tmp->vm_ops && tmp->vm_ops->open) tmp->vm_ops->open(tmp); Loading @@ -557,6 +567,22 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, out: up_write(&mm->mmap_sem); flush_tlb_mm(oldmm); if (IS_ENABLED(CONFIG_SPECULATIVE_PAGE_FAULT)) { /* * Since the TLB has been flush, we can safely unmark the * copied VMAs and allows the speculative page fault handler to * process them again. * Walk back the VMA list from the last marked VMA. */ for (; last; last = last->vm_prev) { if (last->vm_flags & VM_DONTCOPY) continue; if (!(last->vm_flags & VM_WIPEONFORK)) vm_write_end(last); } } up_write(&oldmm->mmap_sem); dup_userfaultfd_complete(&uf); fail_uprobe_end: Loading