Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 47ad8475 authored by Andrea Arcangeli's avatar Andrea Arcangeli Committed by Linus Torvalds
Browse files

thp: clear_copy_huge_page



Move the copy/clear_huge_page functions to common code to share between
hugetlb.c and huge_memory.c.

Signed-off-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3f04f62f
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -1589,5 +1589,14 @@ static inline int is_hwpoison_address(unsigned long addr)

extern void dump_page(struct page *page);

#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
extern void clear_huge_page(struct page *page,
			    unsigned long addr,
			    unsigned int pages_per_huge_page);
extern void copy_user_huge_page(struct page *dst, struct page *src,
				unsigned long addr, struct vm_area_struct *vma,
				unsigned int pages_per_huge_page);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */

#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
+3 −67
Original line number Diff line number Diff line
@@ -394,71 +394,6 @@ static int vma_has_reserves(struct vm_area_struct *vma)
	return 0;
}

static void clear_gigantic_page(struct page *page,
			unsigned long addr, unsigned long sz)
{
	int i;
	struct page *p = page;

	might_sleep();
	for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
		cond_resched();
		clear_user_highpage(p, addr + i * PAGE_SIZE);
	}
}
static void clear_huge_page(struct page *page,
			unsigned long addr, unsigned long sz)
{
	int i;

	if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
		clear_gigantic_page(page, addr, sz);
		return;
	}

	might_sleep();
	for (i = 0; i < sz/PAGE_SIZE; i++) {
		cond_resched();
		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
	}
}

static void copy_user_gigantic_page(struct page *dst, struct page *src,
			   unsigned long addr, struct vm_area_struct *vma)
{
	int i;
	struct hstate *h = hstate_vma(vma);
	struct page *dst_base = dst;
	struct page *src_base = src;

	for (i = 0; i < pages_per_huge_page(h); ) {
		cond_resched();
		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);

		i++;
		dst = mem_map_next(dst, dst_base, i);
		src = mem_map_next(src, src_base, i);
	}
}

static void copy_user_huge_page(struct page *dst, struct page *src,
			   unsigned long addr, struct vm_area_struct *vma)
{
	int i;
	struct hstate *h = hstate_vma(vma);

	if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
		copy_user_gigantic_page(dst, src, addr, vma);
		return;
	}

	might_sleep();
	for (i = 0; i < pages_per_huge_page(h); i++) {
		cond_resched();
		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
	}
}

static void copy_gigantic_page(struct page *dst, struct page *src)
{
	int i;
@@ -2454,7 +2389,8 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
		return VM_FAULT_OOM;
	}

	copy_user_huge_page(new_page, old_page, address, vma);
	copy_user_huge_page(new_page, old_page, address, vma,
			    pages_per_huge_page(h));
	__SetPageUptodate(new_page);

	/*
@@ -2558,7 +2494,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
			ret = -PTR_ERR(page);
			goto out;
		}
		clear_huge_page(page, address, huge_page_size(h));
		clear_huge_page(page, address, pages_per_huge_page(h));
		__SetPageUptodate(page);

		if (vma->vm_flags & VM_MAYSHARE) {
+71 −0
Original line number Diff line number Diff line
@@ -3645,3 +3645,74 @@ void might_fault(void)
}
EXPORT_SYMBOL(might_fault);
#endif

#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
static void clear_gigantic_page(struct page *page,
				unsigned long addr,
				unsigned int pages_per_huge_page)
{
	int i;
	struct page *p = page;

	might_sleep();
	for (i = 0; i < pages_per_huge_page;
	     i++, p = mem_map_next(p, page, i)) {
		cond_resched();
		clear_user_highpage(p, addr + i * PAGE_SIZE);
	}
}
void clear_huge_page(struct page *page,
		     unsigned long addr, unsigned int pages_per_huge_page)
{
	int i;

	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
		clear_gigantic_page(page, addr, pages_per_huge_page);
		return;
	}

	might_sleep();
	for (i = 0; i < pages_per_huge_page; i++) {
		cond_resched();
		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
	}
}

static void copy_user_gigantic_page(struct page *dst, struct page *src,
				    unsigned long addr,
				    struct vm_area_struct *vma,
				    unsigned int pages_per_huge_page)
{
	int i;
	struct page *dst_base = dst;
	struct page *src_base = src;

	for (i = 0; i < pages_per_huge_page; ) {
		cond_resched();
		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);

		i++;
		dst = mem_map_next(dst, dst_base, i);
		src = mem_map_next(src, src_base, i);
	}
}

void copy_user_huge_page(struct page *dst, struct page *src,
			 unsigned long addr, struct vm_area_struct *vma,
			 unsigned int pages_per_huge_page)
{
	int i;

	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
		copy_user_gigantic_page(dst, src, addr, vma,
					pages_per_huge_page);
		return;
	}

	might_sleep();
	for (i = 0; i < pages_per_huge_page; i++) {
		cond_resched();
		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
	}
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */