Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 05ea8860 authored by Dan Williams's avatar Dan Williams Committed by Linus Torvalds
Browse files

mm, hugetlbfs: introduce ->pagesize() to vm_operations_struct

When device-dax is operating in huge-page mode we want it to behave like
hugetlbfs and report the MMU page mapping size that is being enforced by
the vma.

Similar to commit 31383c68 "mm, hugetlbfs: introduce ->split() to
vm_operations_struct" it would be messy to teach vma_mmu_pagesize()
about device-dax page mapping sizes in the same (hstate) way that
hugetlbfs communicates this attribute.  Instead, these patches introduce
a new ->pagesize() vm operation.

Link: http://lkml.kernel.org/r/151996254734.27922.15813097401404359642.stgit@dwillia2-desk3.amr.corp.intel.com


Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Reported-by: default avatarJane Chu <jane.chu@oracle.com>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 09135cc5
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -390,6 +390,7 @@ struct vm_operations_struct {
	int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
	void (*map_pages)(struct vm_fault *vmf,
			pgoff_t start_pgoff, pgoff_t end_pgoff);
	unsigned long (*pagesize)(struct vm_area_struct * area);

	/* notification that a previously read-only page is about to become
	 * writable, if an error is returned it will cause a SIGBUS */
+11 −8
Original line number Diff line number Diff line
@@ -637,14 +637,9 @@ EXPORT_SYMBOL_GPL(linear_hugepage_index);
 */
unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
{
	struct hstate *hstate;

	if (!is_vm_hugetlb_page(vma))
	if (vma->vm_ops && vma->vm_ops->pagesize)
		return vma->vm_ops->pagesize(vma);
	return PAGE_SIZE;

	hstate = hstate_vma(vma);

	return 1UL << huge_page_shift(hstate);
}
EXPORT_SYMBOL_GPL(vma_kernel_pagesize);

@@ -3151,6 +3146,13 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
	return 0;
}

static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
{
	struct hstate *hstate = hstate_vma(vma);

	return 1UL << huge_page_shift(hstate);
}

/*
 * We cannot handle pagefaults against hugetlb pages at all.  They cause
 * handle_mm_fault() to try to instantiate regular-sized pages in the
@@ -3168,6 +3170,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
	.open = hugetlb_vm_op_open,
	.close = hugetlb_vm_op_close,
	.split = hugetlb_vm_op_split,
	.pagesize = hugetlb_vm_op_pagesize,
};

static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,