Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Unverified Commit 1fe3771a authored by Minchan Kim's avatar Minchan Kim Committed by basamaryan
Browse files

Revert "mm: protect VMA modifications using VMA sequence count"



This reverts commit 41530993.

Bug: 128240262
Change-Id: If31a4c81badd891e6ca5740dbd022b5edbe47254
Signed-off-by: default avatarMinchan Kim <minchan@google.com>
parent 10e72457
Loading
Loading
Loading
Loading
+1 −4
Original line number Diff line number Diff line
@@ -1250,11 +1250,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
					goto out_mm;
				}
				for (vma = mm->mmap; vma; vma = vma->vm_next) {
					vm_write_begin(vma);
					WRITE_ONCE(vma->vm_flags,
						vma->vm_flags & ~VM_SOFTDIRTY);
					vma->vm_flags &= ~VM_SOFTDIRTY;
					vma_set_page_prot(vma);
					vm_write_end(vma);
				}
				downgrade_write(&mm->mmap_sem);
				break;
+4 −13
Original line number Diff line number Diff line
@@ -668,11 +668,8 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)

	octx = vma->vm_userfaultfd_ctx.ctx;
	if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
		vm_write_begin(vma);
		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
		WRITE_ONCE(vma->vm_flags,
			   vma->vm_flags & ~__VM_UFFD_FLAGS);
		vm_write_end(vma);
		vma->vm_flags &= ~__VM_UFFD_FLAGS;
		return 0;
	}

@@ -911,10 +908,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
			else
				prev = vma;
		}
		vm_write_begin(vma);
		WRITE_ONCE(vma->vm_flags, new_flags);
		vma->vm_flags = new_flags;
		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
		vm_write_end(vma);
	}
	up_write(&mm->mmap_sem);
	mmput(mm);
@@ -1500,10 +1495,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
		 * the next vma was merged into the current one and
		 * the current one has not been updated yet.
		 */
		vm_write_begin(vma);
		WRITE_ONCE(vma->vm_flags, new_flags);
		vma->vm_flags = new_flags;
		vma->vm_userfaultfd_ctx.ctx = ctx;
		vm_write_end(vma);

		if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
			hugetlb_unshare_all_pmds(vma);
@@ -1675,10 +1668,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
		 * the next vma was merged into the current one and
		 * the current one has not been updated yet.
		 */
		vm_write_begin(vma);
		WRITE_ONCE(vma->vm_flags, new_flags);
		vma->vm_flags = new_flags;
		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
		vm_write_end(vma);

	skip:
		prev = vma;
+0 −3
Original line number Diff line number Diff line
@@ -1041,7 +1041,6 @@ static void collapse_huge_page(struct mm_struct *mm,
	if (mm_find_pmd(mm, address) != pmd)
		goto out;

	vm_write_begin(vma);
	anon_vma_lock_write(vma->anon_vma);

	pte = pte_offset_map(pmd, address);
@@ -1078,7 +1077,6 @@ static void collapse_huge_page(struct mm_struct *mm,
		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
		spin_unlock(pmd_ptl);
		anon_vma_unlock_write(vma->anon_vma);
		vm_write_end(vma);
		result = SCAN_FAIL;
		goto out;
	}
@@ -1113,7 +1111,6 @@ static void collapse_huge_page(struct mm_struct *mm,
	set_pmd_at(mm, address, pmd, _pmd);
	update_mmu_cache_pmd(vma, address, pmd);
	spin_unlock(pmd_ptl);
	vm_write_end(vma);

	*hpage = NULL;

+1 −5
Original line number Diff line number Diff line
@@ -184,9 +184,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
	/*
	 * vm_flags is protected by the mmap_sem held in write mode.
	 */
	vm_write_begin(vma);
	WRITE_ONCE(vma->vm_flags, new_flags);
	vm_write_end(vma);
	vma->vm_flags = new_flags;
out:
	return error;
}
@@ -452,11 +450,9 @@ static void madvise_free_page_range(struct mmu_gather *tlb,
		.private = tlb,
	};

	vm_write_begin(vma);
	tlb_start_vma(tlb, vma);
	walk_page_range(addr, end, &free_walk);
	tlb_end_vma(tlb, vma);
	vm_write_end(vma);
}

static int madvise_free_single_vma(struct vm_area_struct *vma,
+17 −34
Original line number Diff line number Diff line
@@ -379,11 +379,8 @@ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
	struct vm_area_struct *vma;

	down_write(&mm->mmap_sem);
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		vm_write_begin(vma);
	for (vma = mm->mmap; vma; vma = vma->vm_next)
		mpol_rebind_policy(vma->vm_policy, new);
		vm_write_end(vma);
	}
	up_write(&mm->mmap_sem);
}

@@ -603,11 +600,9 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
{
	int nr_updated;

	vm_write_begin(vma);
	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
	if (nr_updated)
		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
	vm_write_end(vma);

	return nr_updated;
}
@@ -713,7 +708,6 @@ static int vma_replace_policy(struct vm_area_struct *vma,
	if (IS_ERR(new))
		return PTR_ERR(new);

	vm_write_begin(vma);
	if (vma->vm_ops && vma->vm_ops->set_policy) {
		err = vma->vm_ops->set_policy(vma, new);
		if (err)
@@ -721,17 +715,11 @@ static int vma_replace_policy(struct vm_area_struct *vma,
	}

	old = vma->vm_policy;
	/*
	 * The speculative page fault handler accesses this field without
	 * hodling the mmap_sem.
	 */
	WRITE_ONCE(vma->vm_policy,  new);
	vm_write_end(vma);
	vma->vm_policy = new; /* protected by mmap_sem */
	mpol_put(old);

	return 0;
 err_out:
	vm_write_end(vma);
	mpol_put(new);
	return err;
}
@@ -1620,20 +1608,14 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
						unsigned long addr)
{
	struct mempolicy *pol;

	if (!vma)
		return NULL;
	struct mempolicy *pol = NULL;

	if (vma->vm_ops && vma->vm_ops->get_policy)
		return vma->vm_ops->get_policy(vma, addr);
	if (vma) {
		if (vma->vm_ops && vma->vm_ops->get_policy) {
			pol = vma->vm_ops->get_policy(vma, addr);
		} else if (vma->vm_policy) {
			pol = vma->vm_policy;

	/*
	 * This could be called without holding the mmap_sem in the
	 * speculative page fault handler's path.
	 */
	pol = READ_ONCE(vma->vm_policy);
	if (pol) {
			/*
			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
@@ -1643,6 +1625,7 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
			if (mpol_needs_cond_ref(pol))
				mpol_get(pol);
		}
	}

	return pol;
}
Loading