Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 57da8b0b authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: introduce INIT_VMA()"

parents c79f4b79 ead04c98
Loading
Loading
Loading
Loading
+7 −1
Original line number Diff line number Diff line
@@ -453,6 +453,11 @@ struct vm_operations_struct {
					  unsigned long addr);
};

static inline void INIT_VMA(struct vm_area_struct *vma)
{
	INIT_LIST_HEAD(&vma->anon_vma_chain);
}

static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
{
	static const struct vm_operations_struct dummy_vm_ops = {};
@@ -460,7 +465,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
	memset(vma, 0, sizeof(*vma));
	vma->vm_mm = mm;
	vma->vm_ops = &dummy_vm_ops;
	INIT_LIST_HEAD(&vma->anon_vma_chain);
	INIT_VMA(vma);
}

static inline void vma_set_anonymous(struct vm_area_struct *vma)
@@ -1269,6 +1274,7 @@ static inline void clear_page_pfmemalloc(struct page *page)
#define VM_FAULT_NEEDDSYNC  0x2000	/* ->fault did not modify page tables
					 * and needs fsync() to complete (for
					 * synchronous page faults in DAX) */
#define VM_FAULT_PTNOTSAME 0x4000	/* Page table entries have changed */

#define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
			 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
+1 −1
Original line number Diff line number Diff line
@@ -325,7 +325,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)

	if (new) {
		*new = *orig;
		INIT_LIST_HEAD(&new->anon_vma_chain);
		INIT_VMA(new);
	}
	return new;
}
+98 −44
Original line number Diff line number Diff line
@@ -2305,6 +2305,20 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
}
EXPORT_SYMBOL_GPL(apply_to_page_range);

static inline bool pte_spinlock(struct vm_fault *vmf)
{
	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
	spin_lock(vmf->ptl);
	return true;
}

static inline bool pte_map_lock(struct vm_fault *vmf)
{
	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
				       vmf->address, &vmf->ptl);
	return true;
}

/*
 * handle_pte_fault chooses page fault handler according to an entry which was
 * read non-atomically.  Before making any commitment, on those architectures
@@ -2312,21 +2326,29 @@ EXPORT_SYMBOL_GPL(apply_to_page_range);
 * parts, do_swap_page must check under lock before unmapping the pte and
 * proceeding (but do_wp_page is only called after already making such a check;
 * and do_anonymous_page can safely check later on).
 *
 * pte_unmap_same() returns:
 *	0			if the PTE are the same
 *	VM_FAULT_PTNOTSAME	if the PTE are different
 *	VM_FAULT_RETRY		if the VMA has changed in our back during
 *				a speculative page fault handling.
 */
static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
				pte_t *page_table, pte_t orig_pte)
static inline int pte_unmap_same(struct vm_fault *vmf)
{
	int same = 1;
	int ret = 0;

#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
	if (sizeof(pte_t) > sizeof(unsigned long)) {
		spinlock_t *ptl = pte_lockptr(mm, pmd);
		spin_lock(ptl);
		same = pte_same(*page_table, orig_pte);
		spin_unlock(ptl);
		if (pte_spinlock(vmf)) {
			if (!pte_same(*vmf->pte, vmf->orig_pte))
				ret = VM_FAULT_PTNOTSAME;
			spin_unlock(vmf->ptl);
		} else
			ret = VM_FAULT_RETRY;
	}
#endif
	pte_unmap(page_table);
	return same;
	pte_unmap(vmf->pte);
	return ret;
}

static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
@@ -2494,25 +2516,26 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
	const unsigned long mmun_start = vmf->address & PAGE_MASK;
	const unsigned long mmun_end = mmun_start + PAGE_SIZE;
	struct mem_cgroup *memcg;
	int ret = VM_FAULT_OOM;

	if (unlikely(anon_vma_prepare(vma)))
		goto oom;
		goto out;

	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
		new_page = alloc_zeroed_user_highpage_movable(vma,
							      vmf->address);
		if (!new_page)
			goto oom;
			goto out;
	} else {
		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
				vmf->address);
		if (!new_page)
			goto oom;
			goto out;
		cow_user_page(new_page, old_page, vmf->address, vma);
	}

	if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false))
		goto oom_free_new;
		goto out_free_new;

	__SetPageUptodate(new_page);

@@ -2521,7 +2544,10 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
	/*
	 * Re-check the pte - we dropped the lock
	 */
	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
	if (!pte_map_lock(vmf)) {
		ret = VM_FAULT_RETRY;
		goto out_uncharge;
	}
	if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
		if (old_page) {
			if (!PageAnon(old_page)) {
@@ -2608,12 +2634,14 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
		put_page(old_page);
	}
	return page_copied ? VM_FAULT_WRITE : 0;
oom_free_new:
out_uncharge:
	mem_cgroup_cancel_charge(new_page, memcg, false);
out_free_new:
	put_page(new_page);
oom:
out:
	if (old_page)
		put_page(old_page);
	return VM_FAULT_OOM;
	return ret;
}

/**
@@ -2634,8 +2662,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
{
	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
				       &vmf->ptl);
	if (!pte_map_lock(vmf))
		return VM_FAULT_RETRY;
	/*
	 * We might have raced with another page fault while we released the
	 * pte_offset_map_lock.
@@ -2753,8 +2781,11 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
			get_page(vmf->page);
			pte_unmap_unlock(vmf->pte, vmf->ptl);
			lock_page(vmf->page);
			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
					vmf->address, &vmf->ptl);
			if (!pte_map_lock(vmf)) {
				unlock_page(vmf->page);
				put_page(vmf->page);
				return VM_FAULT_RETRY;
			}
			if (!pte_same(*vmf->pte, vmf->orig_pte)) {
				unlock_page(vmf->page);
				pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -2906,10 +2937,19 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
	pte_t pte;
	int locked;
	int exclusive = 0;
	vm_fault_t ret = 0;
	vm_fault_t ret;

	if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
	ret = pte_unmap_same(vmf);
	if (ret) {
		/*
		 * If pte != orig_pte, this means another thread did the
		 * swap operation in our back.
		 * So nothing else to do.
		 */
		if (ret == VM_FAULT_PTNOTSAME)
			ret = 0;
		goto out;
	}

	entry = pte_to_swp_entry(vmf->orig_pte);
	if (unlikely(non_swap_entry(entry))) {
@@ -2961,11 +3001,16 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)

		if (!page) {
			/*
			 * Back out if somebody else faulted in this pte
			 * while we released the pte lock.
			 * Back out if the VMA has changed in our back during
			 * a speculative page fault or if somebody else
			 * faulted in this pte while we released the pte lock.
			 */
			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
					vmf->address, &vmf->ptl);
			if (!pte_map_lock(vmf)) {
				delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
				ret = VM_FAULT_RETRY;
				goto out;
			}

			if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
				ret = VM_FAULT_OOM;
			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
@@ -3018,10 +3063,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
	}

	/*
	 * Back out if somebody else already faulted in this pte.
	 * Back out if the VMA has changed in our back during a speculative
	 * page fault or if somebody else already faulted in this pte.
	 */
	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
			&vmf->ptl);
	if (!pte_map_lock(vmf)) {
		ret = VM_FAULT_RETRY;
		goto out_cancel_cgroup;
	}
	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
		goto out_nomap;

@@ -3099,8 +3147,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
out:
	return ret;
out_nomap:
	mem_cgroup_cancel_charge(page, memcg, false);
	pte_unmap_unlock(vmf->pte, vmf->ptl);
out_cancel_cgroup:
	mem_cgroup_cancel_charge(page, memcg, false);
out_page:
	unlock_page(page);
out_release:
@@ -3151,8 +3200,8 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
			!mm_forbids_zeropage(vma->vm_mm)) {
		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
						vma->vm_page_prot));
		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
				vmf->address, &vmf->ptl);
		if (!pte_map_lock(vmf))
			return VM_FAULT_RETRY;
		if (!pte_none(*vmf->pte))
			goto unlock;
		ret = check_stable_address_space(vma->vm_mm);
@@ -3188,14 +3237,16 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
	if (vma->vm_flags & VM_WRITE)
		entry = pte_mkwrite(pte_mkdirty(entry));

	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
			&vmf->ptl);
	if (!pte_none(*vmf->pte))
	if (!pte_map_lock(vmf)) {
		ret = VM_FAULT_RETRY;
		goto release;
	}
	if (!pte_none(*vmf->pte))
		goto unlock_and_release;

	ret = check_stable_address_space(vma->vm_mm);
	if (ret)
		goto release;
		goto unlock_and_release;

	/* Deliver the page fault to userland, check inside PT lock */
	if (userfaultfd_missing(vma)) {
@@ -3217,10 +3268,12 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
unlock:
	pte_unmap_unlock(vmf->pte, vmf->ptl);
	return ret;
unlock_and_release:
	pte_unmap_unlock(vmf->pte, vmf->ptl);
release:
	mem_cgroup_cancel_charge(page, memcg, false);
	put_page(page);
	goto unlock;
	return ret;
oom_free_page:
	put_page(page);
oom:
@@ -3336,8 +3389,9 @@ static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
	 * pte_none() under vmf->ptl protection when we return to
	 * alloc_set_pte().
	 */
	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
			&vmf->ptl);
	if (!pte_map_lock(vmf))
		return VM_FAULT_RETRY;

	return 0;
}

@@ -3848,8 +3902,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
	 * validation through pte_unmap_same(). It's of NUMA type but
	 * the pfn may be screwed if the read is non atomic.
	 */
	vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
	spin_lock(vmf->ptl);
	if (!pte_spinlock(vmf))
		return VM_FAULT_RETRY;
	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
		pte_unmap_unlock(vmf->pte, vmf->ptl);
		goto out;
@@ -4042,8 +4096,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
		return do_numa_page(vmf);

	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
	spin_lock(vmf->ptl);
	if (!pte_spinlock(vmf))
		return VM_FAULT_RETRY;
	entry = vmf->orig_pte;
	if (unlikely(!pte_same(*vmf->pte, entry)))
		goto unlock;