Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 667240e0 authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds
Browse files

mm: use passed vm_fault structure in __do_fault()

Instead of creating another vm_fault structure, use the one passed to
__do_fault() for passing arguments into fault handler.

Link: http://lkml.kernel.org/r/1479460644-25076-5-git-send-email-jack@suse.cz


Signed-off-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0721ec8b
Loading
Loading
Loading
Loading
+10 −15
Original line number Diff line number Diff line
@@ -2848,37 +2848,31 @@ static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
		      struct page **page, void **entry)
{
	struct vm_area_struct *vma = vmf->vma;
	struct vm_fault vmf2;
	int ret;

	vmf2.address = vmf->address;
	vmf2.pgoff = vmf->pgoff;
	vmf2.flags = vmf->flags;
	vmf2.page = NULL;
	vmf2.gfp_mask = __get_fault_gfp_mask(vma);
	vmf2.cow_page = cow_page;
	vmf->cow_page = cow_page;

	ret = vma->vm_ops->fault(vma, &vmf2);
	ret = vma->vm_ops->fault(vma, vmf);
	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
		return ret;
	if (ret & VM_FAULT_DAX_LOCKED) {
		*entry = vmf2.entry;
		*entry = vmf->entry;
		return ret;
	}

	if (unlikely(PageHWPoison(vmf2.page))) {
	if (unlikely(PageHWPoison(vmf->page))) {
		if (ret & VM_FAULT_LOCKED)
			unlock_page(vmf2.page);
		put_page(vmf2.page);
			unlock_page(vmf->page);
		put_page(vmf->page);
		return VM_FAULT_HWPOISON;
	}

	if (unlikely(!(ret & VM_FAULT_LOCKED)))
		lock_page(vmf2.page);
		lock_page(vmf->page);
	else
		VM_BUG_ON_PAGE(!PageLocked(vmf2.page), vmf2.page);
		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);

	*page = vmf2.page;
	*page = vmf->page;
	return ret;
}

@@ -3614,6 +3608,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
		.address = address & PAGE_MASK,
		.flags = flags,
		.pgoff = linear_page_index(vma, address),
		.gfp_mask = __get_fault_gfp_mask(vma),
	};
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;