Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 50873dd8 authored by Dmitry Safonov's avatar Dmitry Safonov Committed by Michael Bestas
Browse files

UPSTREAM: mm/mremap: don't account pages in vma_to_resize()

All this vm_unacct_memory(charged) dance seems to complicate the life
without a good reason.  Furthermore, it seems not always done right on
error-pathes in mremap_to().  And worse than that: this `charged'
difference is sometimes double-accounted for growing MREMAP_DONTUNMAP
mremap()s in move_vma():

	if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))

Let's not do this.  Account memory in mremap() fast-path for growing
VMAs or in move_vma() for actually moving things.  The same simpler way
as it's done by vm_stat_account(), but with a difference to call
security_vm_enough_memory_mm() before copying/adjusting VMA.

Originally noticed by Chen Wandun:
https://lkml.kernel.org/r/20210717101942.120607-1-chenwandun@huawei.com

Link: https://lkml.kernel.org/r/20210721131320.522061-1-dima@arista.com


Fixes: e346b3813067 ("mm/mremap: add MREMAP_DONTUNMAP to mremap()")
Signed-off-by: default avatarDmitry Safonov <dima@arista.com>
Acked-by: default avatarBrian Geffon <bgeffon@google.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chen Wandun <chenwandun@huawei.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yongjun <weiyongjun1@huawei.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit fdbef61491359947753c13581098878e8d038286)
Signed-off-by: default avatarLee Jones <lee.jones@linaro.org>
Change-Id: Id0833f3e84c4a6119e2ec069afff5122cb0c6b35
parent 2af548dd
Loading
Loading
Loading
Loading
+22 −28
Original line number Diff line number Diff line
@@ -520,6 +520,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
		bool *locked, unsigned long flags,
		struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
{
	long to_account = new_len - old_len;
	struct mm_struct *mm = vma->vm_mm;
	struct vm_area_struct *new_vma;
	unsigned long vm_flags = vma->vm_flags;
@@ -538,6 +539,9 @@ static unsigned long move_vma(struct vm_area_struct *vma,
	if (mm->map_count >= sysctl_max_map_count - 3)
		return -ENOMEM;

	if (unlikely(flags & MREMAP_DONTUNMAP))
		to_account = new_len;

	/*
	 * Advise KSM to break any KSM pages in the area to be moved:
	 * it would be confusing if they were to turn up at the new
@@ -550,8 +554,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
	if (err)
		return err;

	if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) {
		if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))
	if (vm_flags & VM_ACCOUNT) {
		if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT))
			return -ENOMEM;
	}

@@ -559,8 +563,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
			   &need_rmap_locks);
	if (!new_vma) {
		if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT))
			vm_unacct_memory(new_len >> PAGE_SHIFT);
		if (vm_flags & VM_ACCOUNT)
			vm_unacct_memory(to_account >> PAGE_SHIFT);
		return -ENOMEM;
	}

@@ -661,8 +665,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
}

static struct vm_area_struct *vma_to_resize(unsigned long addr,
	unsigned long old_len, unsigned long new_len, unsigned long flags,
	unsigned long *p)
	unsigned long old_len, unsigned long new_len, unsigned long flags)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma = find_vma(mm, addr);
@@ -720,13 +723,6 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
				(new_len - old_len) >> PAGE_SHIFT))
		return ERR_PTR(-ENOMEM);

	if (vma->vm_flags & VM_ACCOUNT) {
		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
		if (security_vm_enough_memory_mm(mm, charged))
			return ERR_PTR(-ENOMEM);
		*p = charged;
	}

	return vma;
}

@@ -739,7 +735,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long ret = -EINVAL;
	unsigned long charged = 0;
	unsigned long map_flags = 0;

	if (offset_in_page(new_addr))
@@ -782,7 +777,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
		old_len = new_len;
	}

	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
	vma = vma_to_resize(addr, old_len, new_len, flags);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto out;
@@ -805,7 +800,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
				((addr - vma->vm_start) >> PAGE_SHIFT),
				map_flags);
	if (offset_in_page(ret))
		goto out1;
		goto out;

	/* We got a new mapping */
	if (!(flags & MREMAP_FIXED))
@@ -814,12 +809,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
		       uf_unmap);

	if (!(offset_in_page(ret)))
		goto out;

out1:
	vm_unacct_memory(charged);

out:
	return ret;
}
@@ -851,7 +840,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long ret = -EINVAL;
	unsigned long charged = 0;
	bool locked = false;
	bool downgraded = false;
	struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
@@ -933,7 +921,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
	/*
	 * Ok, we need to grow..
	 */
	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
	vma = vma_to_resize(addr, old_len, new_len, flags);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto out;
@@ -944,10 +932,18 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
	if (old_len == vma->vm_end - addr) {
		/* can we just expand the current mapping? */
		if (vma_expandable(vma, new_len - old_len)) {
			int pages = (new_len - old_len) >> PAGE_SHIFT;
			long pages = (new_len - old_len) >> PAGE_SHIFT;

			if (vma->vm_flags & VM_ACCOUNT) {
				if (security_vm_enough_memory_mm(mm, pages)) {
					ret = -ENOMEM;
					goto out;
				}
			}

			if (vma_adjust(vma, vma->vm_start, addr + new_len,
				       vma->vm_pgoff, NULL)) {
				vm_unacct_memory(pages);
				ret = -ENOMEM;
				goto out;
			}
@@ -986,10 +982,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
			       &locked, flags, &uf, &uf_unmap);
	}
out:
	if (offset_in_page(ret)) {
		vm_unacct_memory(charged);
	if (offset_in_page(ret))
		locked = 0;
	}
	if (downgraded)
		up_read(&current->mm->mmap_sem);
	else