Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8597343a authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: protect SPF handler against anon_vma changes"

parents 57da8b0b 7875c0d5
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
@@ -1201,8 +1201,11 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
					goto out_mm;
				}
				for (vma = mm->mmap; vma; vma = vma->vm_next) {
					vma->vm_flags &= ~VM_SOFTDIRTY;
					vm_write_begin(vma);
					WRITE_ONCE(vma->vm_flags,
						vma->vm_flags & ~VM_SOFTDIRTY);
					vma_set_page_prot(vma);
					vm_write_end(vma);
				}
				downgrade_write(&mm->mmap_sem);
				break;
+13 −4
Original line number Diff line number Diff line
@@ -664,8 +664,11 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)

	octx = vma->vm_userfaultfd_ctx.ctx;
	if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
		vm_write_begin(vma);
		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
		vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
		WRITE_ONCE(vma->vm_flags,
			   vma->vm_flags & ~(VM_UFFD_WP | VM_UFFD_MISSING));
		vm_write_end(vma);
		return 0;
	}

@@ -904,8 +907,10 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
			vma = prev;
		else
			prev = vma;
		vma->vm_flags = new_flags;
		vm_write_begin(vma);
		WRITE_ONCE(vma->vm_flags, new_flags);
		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
		vm_write_end(vma);
	}
	up_write(&mm->mmap_sem);
	mmput(mm);
@@ -1468,8 +1473,10 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
		 * the next vma was merged into the current one and
		 * the current one has not been updated yet.
		 */
		vma->vm_flags = new_flags;
		vm_write_begin(vma);
		WRITE_ONCE(vma->vm_flags, new_flags);
		vma->vm_userfaultfd_ctx.ctx = ctx;
		vm_write_end(vma);

	skip:
		prev = vma;
@@ -1629,8 +1636,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
		 * the next vma was merged into the current one and
		 * the current one has not been updated yet.
		 */
		vma->vm_flags = new_flags;
		vm_write_begin(vma);
		WRITE_ONCE(vma->vm_flags, new_flags);
		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
		vm_write_end(vma);

	skip:
		prev = vma;
+62 −5
Original line number Diff line number Diff line
@@ -456,6 +456,9 @@ struct vm_operations_struct {
static inline void INIT_VMA(struct vm_area_struct *vma)
{
	INIT_LIST_HEAD(&vma->anon_vma_chain);
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
	seqcount_init(&vma->vm_sequence);
#endif
}

static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
@@ -1401,6 +1404,47 @@ int follow_phys(struct vm_area_struct *vma, unsigned long address,
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
			void *buf, int len, int write);

#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
static inline void vm_write_begin(struct vm_area_struct *vma)
{
	write_seqcount_begin(&vma->vm_sequence);
}
static inline void vm_write_begin_nested(struct vm_area_struct *vma,
					 int subclass)
{
	write_seqcount_begin_nested(&vma->vm_sequence, subclass);
}
static inline void vm_write_end(struct vm_area_struct *vma)
{
	write_seqcount_end(&vma->vm_sequence);
}
static inline void vm_raw_write_begin(struct vm_area_struct *vma)
{
	raw_write_seqcount_begin(&vma->vm_sequence);
}
static inline void vm_raw_write_end(struct vm_area_struct *vma)
{
	raw_write_seqcount_end(&vma->vm_sequence);
}
#else
static inline void vm_write_begin(struct vm_area_struct *vma)
{
}
static inline void vm_write_begin_nested(struct vm_area_struct *vma,
					 int subclass)
{
}
static inline void vm_write_end(struct vm_area_struct *vma)
{
}
static inline void vm_raw_write_begin(struct vm_area_struct *vma)
{
}
static inline void vm_raw_write_end(struct vm_area_struct *vma)
{
}
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */

extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
@@ -2233,16 +2277,29 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
	struct vm_area_struct *expand);
	struct vm_area_struct *expand, bool keep_locked);
static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
{
	return __vma_adjust(vma, start, end, pgoff, insert, NULL);
	return __vma_adjust(vma, start, end, pgoff, insert, NULL, false);
}
extern struct vm_area_struct *vma_merge(struct mm_struct *,

extern struct vm_area_struct *__vma_merge(struct mm_struct *mm,
	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
	unsigned long vm_flags, struct anon_vma *anon, struct file *file,
	pgoff_t pgoff, struct mempolicy *mpol, struct vm_userfaultfd_ctx uff,
	const char __user *user, bool keep_locked);

static inline struct vm_area_struct *vma_merge(struct mm_struct *mm,
	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
	struct mempolicy *, struct vm_userfaultfd_ctx, const char __user *);
	unsigned long vm_flags, struct anon_vma *anon, struct file *file,
	pgoff_t off, struct mempolicy *pol, struct vm_userfaultfd_ctx uff,
	const char __user *user)
{
	return __vma_merge(mm, prev, addr, end, vm_flags, anon, file, off,
			   pol, uff, user, false);
}

extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
	unsigned long addr, int new_below);
+3 −0
Original line number Diff line number Diff line
@@ -330,6 +330,9 @@ struct vm_area_struct {
	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
#endif
	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
	seqcount_t vm_sequence;
#endif
} __randomize_layout;

struct core_thread {
+3 −0
Original line number Diff line number Diff line
@@ -1012,6 +1012,7 @@ static void collapse_huge_page(struct mm_struct *mm,
	if (mm_find_pmd(mm, address) != pmd)
		goto out;

	vm_write_begin(vma);
	anon_vma_lock_write(vma->anon_vma);

	pte = pte_offset_map(pmd, address);
@@ -1047,6 +1048,7 @@ static void collapse_huge_page(struct mm_struct *mm,
		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
		spin_unlock(pmd_ptl);
		anon_vma_unlock_write(vma->anon_vma);
		vm_write_end(vma);
		result = SCAN_FAIL;
		goto out;
	}
@@ -1081,6 +1083,7 @@ static void collapse_huge_page(struct mm_struct *mm,
	set_pmd_at(mm, address, pmd, _pmd);
	update_mmu_cache_pmd(vma, address, pmd);
	spin_unlock(pmd_ptl);
	vm_write_end(vma);

	*hpage = NULL;

Loading