Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 897ab3e0 authored by Mike Rapoport's avatar Mike Rapoport Committed by Linus Torvalds
Browse files

userfaultfd: non-cooperative: add event for memory unmaps

When a non-cooperative userfaultfd monitor copies pages in the
background, it may encounter regions that were already unmapped.
Addition of UFFD_EVENT_UNMAP allows the uffd monitor to track precisely
changes in the virtual memory layout.

Since there might be different uffd contexts for the affected VMAs, we
first should create a temporary representation for the unmap event for
each uffd context and then notify them one by one to the appropriate
userfault file descriptors.

The event notification occurs after the mmap_sem has been released.

[arnd@arndb.de: fix nommu build]
  Link: http://lkml.kernel.org/r/20170203165141.3665284-1-arnd@arndb.de
[mhocko@suse.com: fix nommu build]
  Link: http://lkml.kernel.org/r/20170202091503.GA22823@dhcp22.suse.cz
Link: http://lkml.kernel.org/r/1485542673-24387-3-git-send-email-rppt@linux.vnet.ibm.com


Signed-off-by: default avatarMike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: default avatarMichal Hocko <mhocko@suse.com>
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Acked-by: default avatarHillf Danton <hillf.zj@alibaba-inc.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Pavel Emelyanov <xemul@virtuozzo.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 846b1a0f
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -111,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
	base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
			   VM_READ|VM_WRITE|VM_EXEC|
			   VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
			   0);
			   0, NULL);
	if (IS_ERR_VALUE(base)) {
		ret = base;
		goto out;
+1 −1
Original line number Diff line number Diff line
@@ -143,7 +143,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
		unsigned long addr = MEM_USER_INTRPT;
		addr = mmap_region(NULL, addr, INTRPT_SIZE,
				   VM_READ|VM_EXEC|
				   VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 0);
				   VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 0, NULL);
		if (addr > (unsigned long) -PAGE_SIZE)
			retval = (int) addr;
	}
+1 −1
Original line number Diff line number Diff line
@@ -186,7 +186,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)

	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		do_munmap(mm, text_start, image->size);
		do_munmap(mm, text_start, image->size, NULL);
	} else {
		current->mm->context.vdso = (void __user *)text_start;
		current->mm->context.vdso_image = image;
+2 −2
Original line number Diff line number Diff line
@@ -51,7 +51,7 @@ static unsigned long mpx_mmap(unsigned long len)

	down_write(&mm->mmap_sem);
	addr = do_mmap(NULL, 0, len, PROT_READ | PROT_WRITE,
			MAP_ANONYMOUS | MAP_PRIVATE, VM_MPX, 0, &populate);
		       MAP_ANONYMOUS | MAP_PRIVATE, VM_MPX, 0, &populate, NULL);
	up_write(&mm->mmap_sem);
	if (populate)
		mm_populate(addr, populate);
@@ -893,7 +893,7 @@ static int unmap_entire_bt(struct mm_struct *mm,
	 * avoid recursion, do_munmap() will check whether it comes
	 * from one bounds table through VM_MPX flag.
	 */
	return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm));
	return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm), NULL);
}

static int try_unmap_single_bt(struct mm_struct *mm,
+1 −1
Original line number Diff line number Diff line
@@ -512,7 +512,7 @@ static int aio_setup_ring(struct kioctx *ctx)

	ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
				       PROT_READ | PROT_WRITE,
				       MAP_SHARED, 0, &unused);
				       MAP_SHARED, 0, &unused, NULL);
	up_write(&mm->mmap_sem);
	if (IS_ERR((void *)ctx->mmap_base)) {
		ctx->mmap_size = 0;
Loading