Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1c59827d authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds
Browse files

[PATCH] mm: hugetlb truncation fixes



hugetlbfs allows truncation of its files (should it?), but hugetlb.c often
forgets that: crashes and misaccounting ensue.

copy_hugetlb_page_range better grab the src page_table_lock since we don't
want to guess what happens if concurrently truncated.  unmap_hugepage_range
rss accounting must not assume the full range was mapped.  follow_hugetlb_page
must guard with page_table_lock and be prepared to exit early.

Restyle copy_hugetlb_page_range with a for loop like the others there.

Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e03d13e9
Loading
Loading
Loading
Loading
+21 −14
Original line number Original line Diff line number Diff line
@@ -274,21 +274,22 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
{
{
	pte_t *src_pte, *dst_pte, entry;
	pte_t *src_pte, *dst_pte, entry;
	struct page *ptepage;
	struct page *ptepage;
	unsigned long addr = vma->vm_start;
	unsigned long addr;
	unsigned long end = vma->vm_end;


	while (addr < end) {
	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
		dst_pte = huge_pte_alloc(dst, addr);
		dst_pte = huge_pte_alloc(dst, addr);
		if (!dst_pte)
		if (!dst_pte)
			goto nomem;
			goto nomem;
		spin_lock(&src->page_table_lock);
		src_pte = huge_pte_offset(src, addr);
		src_pte = huge_pte_offset(src, addr);
		BUG_ON(!src_pte || pte_none(*src_pte)); /* prefaulted */
		if (src_pte && !pte_none(*src_pte)) {
			entry = *src_pte;
			entry = *src_pte;
			ptepage = pte_page(entry);
			ptepage = pte_page(entry);
			get_page(ptepage);
			get_page(ptepage);
			add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
			add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
			set_huge_pte_at(dst, addr, dst_pte, entry);
			set_huge_pte_at(dst, addr, dst_pte, entry);
		addr += HPAGE_SIZE;
		}
		spin_unlock(&src->page_table_lock);
	}
	}
	return 0;
	return 0;


@@ -323,8 +324,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,


		page = pte_page(pte);
		page = pte_page(pte);
		put_page(page);
		put_page(page);
		add_mm_counter(mm, rss,  - (HPAGE_SIZE / PAGE_SIZE));
	}
	}
	add_mm_counter(mm, rss,  -((end - start) >> PAGE_SHIFT));
	flush_tlb_range(vma, start, end);
	flush_tlb_range(vma, start, end);
}
}


@@ -403,6 +404,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
	BUG_ON(!is_vm_hugetlb_page(vma));
	BUG_ON(!is_vm_hugetlb_page(vma));


	vpfn = vaddr/PAGE_SIZE;
	vpfn = vaddr/PAGE_SIZE;
	spin_lock(&mm->page_table_lock);
	while (vaddr < vma->vm_end && remainder) {
	while (vaddr < vma->vm_end && remainder) {


		if (pages) {
		if (pages) {
@@ -415,8 +417,13 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
			 * indexing below to work. */
			 * indexing below to work. */
			pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
			pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);


			/* hugetlb should be locked, and hence, prefaulted */
			/* the hugetlb file might have been truncated */
			WARN_ON(!pte || pte_none(*pte));
			if (!pte || pte_none(*pte)) {
				remainder = 0;
				if (!i)
					i = -EFAULT;
				break;
			}


			page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
			page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];


@@ -434,7 +441,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
		--remainder;
		--remainder;
		++i;
		++i;
	}
	}

	spin_unlock(&mm->page_table_lock);
	*length = remainder;
	*length = remainder;
	*position = vaddr;
	*position = vaddr;