Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b96375f7 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds
Browse files

mm: add a pmd_fault handler



Allow non-anonymous VMAs to provide huge pages in response to a page fault.

Signed-off-by: default avatarMatthew Wilcox <willy@linux.intel.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Jan Kara <jack@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4897c765
Loading
Loading
Loading
Loading
+2 −0
Original line number Original line Diff line number Diff line
@@ -249,6 +249,8 @@ struct vm_operations_struct {
	void (*close)(struct vm_area_struct * area);
	void (*close)(struct vm_area_struct * area);
	int (*mremap)(struct vm_area_struct * area);
	int (*mremap)(struct vm_area_struct * area);
	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
	int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
						pmd_t *, unsigned int flags);
	void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
	void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);


	/* notification that a previously read-only page is about to become
	/* notification that a previously read-only page is about to become
+24 −6
Original line number Original line Diff line number Diff line
@@ -3232,6 +3232,27 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
	return 0;
	return 0;
}
}


static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, pmd_t *pmd, unsigned int flags)
{
	if (!vma->vm_ops)
		return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags);
	if (vma->vm_ops->pmd_fault)
		return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
	return VM_FAULT_FALLBACK;
}

static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, pmd_t *pmd, pmd_t orig_pmd,
			unsigned int flags)
{
	if (!vma->vm_ops)
		return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd);
	if (vma->vm_ops->pmd_fault)
		return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
	return VM_FAULT_FALLBACK;
}

/*
/*
 * These routines also need to handle stuff like marking pages dirty
 * These routines also need to handle stuff like marking pages dirty
 * and/or accessed for architectures that don't do it in hardware (most
 * and/or accessed for architectures that don't do it in hardware (most
@@ -3334,10 +3355,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
	if (!pmd)
	if (!pmd)
		return VM_FAULT_OOM;
		return VM_FAULT_OOM;
	if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
	if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
		int ret = VM_FAULT_FALLBACK;
		int ret = create_huge_pmd(mm, vma, address, pmd, flags);
		if (!vma->vm_ops)
			ret = do_huge_pmd_anonymous_page(mm, vma, address,
					pmd, flags);
		if (!(ret & VM_FAULT_FALLBACK))
		if (!(ret & VM_FAULT_FALLBACK))
			return ret;
			return ret;
	} else {
	} else {
@@ -3361,8 +3379,8 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
							     orig_pmd, pmd);
							     orig_pmd, pmd);


			if (dirty && !pmd_write(orig_pmd)) {
			if (dirty && !pmd_write(orig_pmd)) {
				ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
				ret = wp_huge_pmd(mm, vma, address, pmd,
							  orig_pmd);
							orig_pmd, flags);
				if (!(ret & VM_FAULT_FALLBACK))
				if (!(ret & VM_FAULT_FALLBACK))
					return ret;
					return ret;
			} else {
			} else {