Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d05f0cdc authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds
Browse files

mm: fix crashes from mbind() merging vmas



In v2.6.34 commit 9d8cebd4 ("mm: fix mbind vma merge problem")
introduced vma merging to mbind(), but it should have also changed the
convention of passing start vma from queue_pages_range() (formerly
check_range()) to new_vma_page(): vma merging may have already freed
that structure, resulting in BUG at mm/mempolicy.c:1738 and probably
worse crashes.

Fixes: 9d8cebd4 ("mm: fix mbind vma merge problem")
Reported-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Tested-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: <stable@vger.kernel.org>	[2.6.34+]
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b43ae21b
Loading
Loading
Loading
Loading
+20 −26
Original line number Diff line number Diff line
@@ -656,19 +656,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
 * @nodes and @flags,) it's isolated and queued to the pagelist which is
 * passed via @private.)
 */
static struct vm_area_struct *
static int
queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
		const nodemask_t *nodes, unsigned long flags, void *private)
{
	int err;
	struct vm_area_struct *first, *vma, *prev;

	int err = 0;
	struct vm_area_struct *vma, *prev;

	first = find_vma(mm, start);
	if (!first)
		return ERR_PTR(-EFAULT);
	vma = find_vma(mm, start);
	if (!vma)
		return -EFAULT;
	prev = NULL;
	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
	for (; vma && vma->vm_start < end; vma = vma->vm_next) {
		unsigned long endvma = vma->vm_end;

		if (endvma > end)
@@ -678,9 +677,9 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,

		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
			if (!vma->vm_next && vma->vm_end < end)
				return ERR_PTR(-EFAULT);
				return -EFAULT;
			if (prev && prev->vm_end < vma->vm_start)
				return ERR_PTR(-EFAULT);
				return -EFAULT;
		}

		if (flags & MPOL_MF_LAZY) {
@@ -694,15 +693,13 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,

			err = queue_pages_pgd_range(vma, start, endvma, nodes,
						flags, private);
			if (err) {
				first = ERR_PTR(err);
			if (err)
				break;
		}
		}
next:
		prev = vma;
	}
	return first;
	return err;
}

/*
@@ -1156,16 +1153,17 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,

/*
 * Allocate a new page for page migration based on vma policy.
 * Start assuming that page is mapped by vma pointed to by @private.
 * Start by assuming the page is mapped by the same vma as contains @start.
 * Search forward from there, if not.  N.B., this assumes that the
 * list of pages handed to migrate_pages()--which is how we get here--
 * is in virtual address order.
 */
static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
static struct page *new_page(struct page *page, unsigned long start, int **x)
{
	struct vm_area_struct *vma = (struct vm_area_struct *)private;
	struct vm_area_struct *vma;
	unsigned long uninitialized_var(address);

	vma = find_vma(current->mm, start);
	while (vma) {
		address = page_address_in_vma(page, vma);
		if (address != -EFAULT)
@@ -1195,7 +1193,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
	return -ENOSYS;
}

static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
static struct page *new_page(struct page *page, unsigned long start, int **x)
{
	return NULL;
}
@@ -1205,7 +1203,6 @@ static long do_mbind(unsigned long start, unsigned long len,
		     unsigned short mode, unsigned short mode_flags,
		     nodemask_t *nmask, unsigned long flags)
{
	struct vm_area_struct *vma;
	struct mm_struct *mm = current->mm;
	struct mempolicy *new;
	unsigned long end;
@@ -1271,11 +1268,9 @@ static long do_mbind(unsigned long start, unsigned long len,
	if (err)
		goto mpol_out;

	vma = queue_pages_range(mm, start, end, nmask,
	err = queue_pages_range(mm, start, end, nmask,
			  flags | MPOL_MF_INVERT, &pagelist);

	err = PTR_ERR(vma);	/* maybe ... */
	if (!IS_ERR(vma))
	if (!err)
		err = mbind_range(mm, start, end, new);

	if (!err) {
@@ -1283,9 +1278,8 @@ static long do_mbind(unsigned long start, unsigned long len,

		if (!list_empty(&pagelist)) {
			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
			nr_failed = migrate_pages(&pagelist, new_vma_page,
					NULL, (unsigned long)vma,
					MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
			nr_failed = migrate_pages(&pagelist, new_page, NULL,
				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
			if (nr_failed)
				putback_movable_pages(&pagelist);
		}