Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Unverified Commit 10e72457 authored by Minchan Kim's avatar Minchan Kim Committed by basamaryan
Browse files

Revert "mm: protect mremap() against SPF hanlder"



This reverts commit f831a428.

Bug: 128240262
Change-Id: Ida9fb7e41a7905755470e20f5c72867bb3dad03f
Signed-off-by: default avatarMinchan Kim <minchan@google.com>
parent d4e1acfd
Loading
Loading
Loading
Loading
+5 −18
Original line number Diff line number Diff line
@@ -2268,29 +2268,16 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
	struct vm_area_struct *expand, bool keep_locked);
	struct vm_area_struct *expand);
static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
{
	return __vma_adjust(vma, start, end, pgoff, insert, NULL, false);
	return __vma_adjust(vma, start, end, pgoff, insert, NULL);
}

extern struct vm_area_struct *__vma_merge(struct mm_struct *mm,
	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
	unsigned long vm_flags, struct anon_vma *anon, struct file *file,
	pgoff_t pgoff, struct mempolicy *mpol, struct vm_userfaultfd_ctx uff,
	const char __user *user, bool keep_locked);

static inline struct vm_area_struct *vma_merge(struct mm_struct *mm,
extern struct vm_area_struct *vma_merge(struct mm_struct *,
	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
	unsigned long vm_flags, struct anon_vma *anon, struct file *file,
	pgoff_t off, struct mempolicy *pol, struct vm_userfaultfd_ctx uff,
	const char __user *user)
{
	return __vma_merge(mm, prev, addr, end, vm_flags, anon, file, off,
			   pol, uff, user, false);
}

	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
	struct mempolicy *, struct vm_userfaultfd_ctx, const char __user *);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
	unsigned long addr, int new_below);
+12 −41
Original line number Diff line number Diff line
@@ -675,7 +675,7 @@ static inline void __vma_unlink_prev(struct mm_struct *mm,
 */
int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
	struct vm_area_struct *expand, bool keep_locked)
	struct vm_area_struct *expand)
{
	struct mm_struct *mm = vma->vm_mm;
	struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
@@ -791,14 +791,10 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,

			importer->anon_vma = exporter->anon_vma;
			error = anon_vma_clone(importer, exporter);
			if (error) {
				if (next && next != vma)
					vm_raw_write_end(next);
				vm_raw_write_end(vma);
			if (error)
				return error;
		}
	}
	}
again:
	vma_adjust_trans_huge(orig_vma, start, end, adjust_next);

@@ -991,7 +987,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,

	if (next && next != vma)
		vm_raw_write_end(next);
	if (!keep_locked)
	vm_raw_write_end(vma);

	validate_mm(mm);
@@ -1133,13 +1128,13 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
 * parameter) may establish ptes with the wrong permissions of NNNN
 * instead of the right permissions of XXXX.
 */
struct vm_area_struct *__vma_merge(struct mm_struct *mm,
struct vm_area_struct *vma_merge(struct mm_struct *mm,
			struct vm_area_struct *prev, unsigned long addr,
			unsigned long end, unsigned long vm_flags,
			struct anon_vma *anon_vma, struct file *file,
			pgoff_t pgoff, struct mempolicy *policy,
			struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
			const char __user *anon_name, bool keep_locked)
			const char __user *anon_name)
{
	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
	struct vm_area_struct *area, *next;
@@ -1189,11 +1184,10 @@ struct vm_area_struct *__vma_merge(struct mm_struct *mm,
							/* cases 1, 6 */
			err = __vma_adjust(prev, prev->vm_start,
					 next->vm_end, prev->vm_pgoff, NULL,
					 prev, keep_locked);
					 prev);
		} else					/* cases 2, 5, 7 */
			err = __vma_adjust(prev, prev->vm_start,
					   end, prev->vm_pgoff, NULL, prev,
					   keep_locked);
					 end, prev->vm_pgoff, NULL, prev);
		if (err)
			return NULL;
		khugepaged_enter_vma_merge(prev, vm_flags);
@@ -1211,12 +1205,10 @@ struct vm_area_struct *__vma_merge(struct mm_struct *mm,
					     anon_name)) {
		if (prev && addr < prev->vm_end)	/* case 4 */
			err = __vma_adjust(prev, prev->vm_start,
					 addr, prev->vm_pgoff, NULL, next,
					 keep_locked);
					 addr, prev->vm_pgoff, NULL, next);
		else {					/* cases 3, 8 */
			err = __vma_adjust(area, addr, next->vm_end,
					 next->vm_pgoff - pglen, NULL, next,
					 keep_locked);
					 next->vm_pgoff - pglen, NULL, next);
			/*
			 * In case 3 area is already equal to next and
			 * this is a noop, but in case 8 "area" has
@@ -3211,21 +3203,9 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,

	if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
		return NULL;	/* should never get here */

	/* There is 3 cases to manage here in
	 *     AAAA            AAAA              AAAA              AAAA
	 * PPPP....      PPPP......NNNN      PPPP....NNNN      PP........NN
	 * PPPPPPPP(A)   PPPP..NNNNNNNN(B)   PPPPPPPPPPPP(1)       NULL
	 *                                   PPPPPPPPNNNN(2)
	 *                                   PPPPNNNNNNNN(3)
	 *
	 * new_vma == prev in case A,1,2
	 * new_vma == next in case B,3
	 */
	new_vma = __vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
			      vma->anon_vma, vma->vm_file, pgoff,
			      vma_policy(vma), vma->vm_userfaultfd_ctx,
				vma_get_anon_name(vma), true);
	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
			    vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
			    vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
	if (new_vma) {
		/*
		 * Source vma may have been merged into new_vma
@@ -3265,15 +3245,6 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
			get_file(new_vma->vm_file);
		if (new_vma->vm_ops && new_vma->vm_ops->open)
			new_vma->vm_ops->open(new_vma);
		/*
		 * As the VMA is linked right now, it may be hit by the
		 * speculative page fault handler. But we don't want it to
		 * to start mapping page in this area until the caller has
		 * potentially move the pte from the moved VMA. To prevent
		 * that we protect it right now, and let the caller unprotect
		 * it once the move is done.
		 */
		vm_raw_write_begin(new_vma);
		vma_link(mm, new_vma, prev, rb_link, rb_parent);
		*need_rmap_locks = false;
	}
+0 −13
Original line number Diff line number Diff line
@@ -528,14 +528,6 @@ static unsigned long move_vma(struct vm_area_struct *vma,
		return -ENOMEM;
	}

	/* new_vma is returned protected by copy_vma, to prevent speculative
	 * page fault to be done in the destination area before we move the pte.
	 * Now, we must also protect the source VMA since we don't want pages
	 * to be mapped in our back while we are copying the PTEs.
	 */
	if (vma != new_vma)
		vm_raw_write_begin(vma);

	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
				     need_rmap_locks);
	if (moved_len < old_len) {
@@ -552,8 +544,6 @@ static unsigned long move_vma(struct vm_area_struct *vma,
		 */
		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
				 true);
		if (vma != new_vma)
			vm_raw_write_end(vma);
		vma = new_vma;
		old_len = new_len;
		old_addr = new_addr;
@@ -562,10 +552,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
		mremap_userfaultfd_prep(new_vma, uf);
		arch_remap(mm, old_addr, old_addr + old_len,
			   new_addr, new_addr + new_len);
		if (vma != new_vma)
			vm_raw_write_end(vma);
	}
	vm_raw_write_end(new_vma);

	/* Conceal VM_ACCOUNT so old reservation is not undone */
	if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {