Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0721ec8b authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds
Browse files

mm: use pgoff in struct vm_fault instead of passing it separately

struct vm_fault has already pgoff entry.  Use it instead of passing
pgoff as a separate argument and then assigning it later.

Link: http://lkml.kernel.org/r/1479460644-25076-4-git-send-email-jack@suse.cz


Signed-off-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1a29d85e
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -882,6 +882,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
		.address = address,
		.flags = FAULT_FLAG_ALLOW_RETRY,
		.pmd = pmd,
		.pgoff = linear_page_index(vma, address),
	};

	/* we only decide to swapin, if there is enough young ptes */
+18 −17
Original line number Diff line number Diff line
@@ -2275,7 +2275,7 @@ static int wp_pfn_shared(struct vm_fault *vmf, pte_t orig_pte)
	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
		struct vm_fault vmf2 = {
			.page = NULL,
			.pgoff = linear_page_index(vma, vmf->address),
			.pgoff = vmf->pgoff,
			.address = vmf->address,
			.flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE,
		};
@@ -2844,15 +2844,15 @@ static int do_anonymous_page(struct vm_fault *vmf)
 * released depending on flags and vma->vm_ops->fault() return value.
 * See filemap_fault() and __lock_page_retry().
 */
static int __do_fault(struct vm_fault *vmf, pgoff_t pgoff,
		struct page *cow_page, struct page **page, void **entry)
static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
		      struct page **page, void **entry)
{
	struct vm_area_struct *vma = vmf->vma;
	struct vm_fault vmf2;
	int ret;

	vmf2.address = vmf->address;
	vmf2.pgoff = pgoff;
	vmf2.pgoff = vmf->pgoff;
	vmf2.flags = vmf->flags;
	vmf2.page = NULL;
	vmf2.gfp_mask = __get_fault_gfp_mask(vma);
@@ -3156,9 +3156,10 @@ late_initcall(fault_around_debugfs);
 * fault_around_pages() value (and therefore to page order).  This way it's
 * easier to guarantee that we don't cross page table boundaries.
 */
static int do_fault_around(struct vm_fault *vmf, pgoff_t start_pgoff)
static int do_fault_around(struct vm_fault *vmf)
{
	unsigned long address = vmf->address, nr_pages, mask;
	pgoff_t start_pgoff = vmf->pgoff;
	pgoff_t end_pgoff;
	int off, ret = 0;

@@ -3210,7 +3211,7 @@ static int do_fault_around(struct vm_fault *vmf, pgoff_t start_pgoff)
	return ret;
}

static int do_read_fault(struct vm_fault *vmf, pgoff_t pgoff)
static int do_read_fault(struct vm_fault *vmf)
{
	struct vm_area_struct *vma = vmf->vma;
	struct page *fault_page;
@@ -3222,12 +3223,12 @@ static int do_read_fault(struct vm_fault *vmf, pgoff_t pgoff)
	 * something).
	 */
	if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
		ret = do_fault_around(vmf, pgoff);
		ret = do_fault_around(vmf);
		if (ret)
			return ret;
	}

	ret = __do_fault(vmf, pgoff, NULL, &fault_page, NULL);
	ret = __do_fault(vmf, NULL, &fault_page, NULL);
	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
		return ret;

@@ -3240,7 +3241,7 @@ static int do_read_fault(struct vm_fault *vmf, pgoff_t pgoff)
	return ret;
}

static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff)
static int do_cow_fault(struct vm_fault *vmf)
{
	struct vm_area_struct *vma = vmf->vma;
	struct page *fault_page, *new_page;
@@ -3261,7 +3262,7 @@ static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff)
		return VM_FAULT_OOM;
	}

	ret = __do_fault(vmf, pgoff, new_page, &fault_page, &fault_entry);
	ret = __do_fault(vmf, new_page, &fault_page, &fault_entry);
	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
		goto uncharge_out;

@@ -3276,7 +3277,7 @@ static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff)
		unlock_page(fault_page);
		put_page(fault_page);
	} else {
		dax_unlock_mapping_entry(vma->vm_file->f_mapping, pgoff);
		dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff);
	}
	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
		goto uncharge_out;
@@ -3287,7 +3288,7 @@ static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff)
	return ret;
}

static int do_shared_fault(struct vm_fault *vmf, pgoff_t pgoff)
static int do_shared_fault(struct vm_fault *vmf)
{
	struct vm_area_struct *vma = vmf->vma;
	struct page *fault_page;
@@ -3295,7 +3296,7 @@ static int do_shared_fault(struct vm_fault *vmf, pgoff_t pgoff)
	int dirtied = 0;
	int ret, tmp;

	ret = __do_fault(vmf, pgoff, NULL, &fault_page, NULL);
	ret = __do_fault(vmf, NULL, &fault_page, NULL);
	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
		return ret;

@@ -3356,16 +3357,15 @@ static int do_shared_fault(struct vm_fault *vmf, pgoff_t pgoff)
static int do_fault(struct vm_fault *vmf)
{
	struct vm_area_struct *vma = vmf->vma;
	pgoff_t pgoff = linear_page_index(vma, vmf->address);

	/* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
	if (!vma->vm_ops->fault)
		return VM_FAULT_SIGBUS;
	if (!(vmf->flags & FAULT_FLAG_WRITE))
		return do_read_fault(vmf, pgoff);
		return do_read_fault(vmf);
	if (!(vma->vm_flags & VM_SHARED))
		return do_cow_fault(vmf, pgoff);
	return do_shared_fault(vmf, pgoff);
		return do_cow_fault(vmf);
	return do_shared_fault(vmf);
}

static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
@@ -3613,6 +3613,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
		.vma = vma,
		.address = address & PAGE_MASK,
		.flags = flags,
		.pgoff = linear_page_index(vma, address),
	};
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;