Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 86e5216f authored by Adam Litke's avatar Adam Litke Committed by Linus Torvalds
Browse files

[PATCH] Hugetlb: Reorganize hugetlb_fault to prepare for COW



This patch splits the "no_page()" type activity into its own function,
hugetlb_no_page().  hugetlb_fault() becomes the entry point for hugetlb faults
and delegates to the appropriate handler depending on the type of fault.
Right now we still have only hugetlb_no_page() but a later patch introduces a
COW fault.

Signed-off-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarAdam Litke <agl@us.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: "Seth, Rohit" <rohit.seth@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 85ef47f7
Loading
Loading
Loading
Loading
+25 −9
Original line number Diff line number Diff line
@@ -376,20 +376,15 @@ static struct page *find_or_alloc_huge_page(struct address_space *mapping,
	return page;
}

int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, int write_access)
int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, pte_t *ptep)
{
	int ret = VM_FAULT_SIGBUS;
	unsigned long idx;
	unsigned long size;
	pte_t *pte;
	struct page *page;
	struct address_space *mapping;

	pte = huge_pte_alloc(mm, address);
	if (!pte)
		goto out;

	mapping = vma->vm_file->f_mapping;
	idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
		+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
@@ -408,11 +403,11 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
		goto backout;

	ret = VM_FAULT_MINOR;
	if (!pte_none(*pte))
	if (!pte_none(*ptep))
		goto backout;

	add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
	set_huge_pte_at(mm, address, pte, make_huge_pte(vma, page));
	set_huge_pte_at(mm, address, ptep, make_huge_pte(vma, page));
	spin_unlock(&mm->page_table_lock);
	unlock_page(page);
out:
@@ -426,6 +421,27 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
	goto out;
}

int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, int write_access)
{
	pte_t *ptep;
	pte_t entry;

	ptep = huge_pte_alloc(mm, address);
	if (!ptep)
		return VM_FAULT_OOM;

	entry = *ptep;
	if (pte_none(entry))
		return hugetlb_no_page(mm, vma, address, ptep);

	/*
	 * We could get here if another thread instantiated the pte
	 * before the test above.
	 */
	return VM_FAULT_MINOR;
}

int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
			struct page **pages, struct vm_area_struct **vmas,
			unsigned long *position, int *length, int i)