Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4e48a9f1 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: introduce __vm_normal_page()"

parents b1b0a4ec feb3e8b6
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -311,7 +311,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
	vma->vm_start = vma->vm_end - PAGE_SIZE;
	vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
	INIT_LIST_HEAD(&vma->anon_vma_chain);
	INIT_VMA(vma);

	err = insert_vm_struct(mm, vma);
	if (err)
+4 −1
Original line number Diff line number Diff line
@@ -1221,8 +1221,11 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
					goto out_mm;
				}
				for (vma = mm->mmap; vma; vma = vma->vm_next) {
					vma->vm_flags &= ~VM_SOFTDIRTY;
					vm_write_begin(vma);
					WRITE_ONCE(vma->vm_flags,
						vma->vm_flags & ~VM_SOFTDIRTY);
					vma_set_page_prot(vma);
					vm_write_end(vma);
				}
				downgrade_write(&mm->mmap_sem);
				break;
+13 −4
Original line number Diff line number Diff line
@@ -656,8 +656,11 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)

	octx = vma->vm_userfaultfd_ctx.ctx;
	if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
		vm_write_begin(vma);
		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
		vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
		WRITE_ONCE(vma->vm_flags,
			   vma->vm_flags & ~(VM_UFFD_WP | VM_UFFD_MISSING));
		vm_write_end(vma);
		return 0;
	}

@@ -883,8 +886,10 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
			vma = prev;
		else
			prev = vma;
		vma->vm_flags = new_flags;
		vm_write_begin(vma);
		WRITE_ONCE(vma->vm_flags, new_flags);
		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
		vm_write_end(vma);
	}
	up_write(&mm->mmap_sem);
	mmput(mm);
@@ -1443,8 +1448,10 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
		 * the next vma was merged into the current one and
		 * the current one has not been updated yet.
		 */
		vma->vm_flags = new_flags;
		vm_write_begin(vma);
		WRITE_ONCE(vma->vm_flags, new_flags);
		vma->vm_userfaultfd_ctx.ctx = ctx;
		vm_write_end(vma);

	skip:
		prev = vma;
@@ -1602,8 +1609,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
		 * the next vma was merged into the current one and
		 * the current one has not been updated yet.
		 */
		vma->vm_flags = new_flags;
		vm_write_begin(vma);
		WRITE_ONCE(vma->vm_flags, new_flags);
		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
		vm_write_end(vma);

	skip:
		prev = vma;
+2 −2
Original line number Diff line number Diff line
@@ -127,14 +127,14 @@ static inline void __ClearPageMovable(struct page *page)
#ifdef CONFIG_NUMA_BALANCING
extern bool pmd_trans_migrating(pmd_t pmd);
extern int migrate_misplaced_page(struct page *page,
				  struct vm_area_struct *vma, int node);
				  struct vm_fault *vmf, int node);
#else
static inline bool pmd_trans_migrating(pmd_t pmd)
{
	return false;
}
static inline int migrate_misplaced_page(struct page *page,
					 struct vm_area_struct *vma, int node)
					 struct vm_fault *vmf, int node)
{
	return -EAGAIN; /* can't migrate now */
}
+91 −10
Original line number Diff line number Diff line
@@ -350,6 +350,12 @@ struct vm_fault {
					 * page table to avoid allocation from
					 * atomic context.
					 */
	/*
	 * These entries are required when handling speculative page fault.
	 * This way the page handling is done using consistent field values.
	 */
	unsigned long vma_flags;
	pgprot_t vma_page_prot;
};

/* page entry size for vm->huge_fault() */
@@ -668,9 +674,9 @@ void free_compound_page(struct page *page);
 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
 * that do not have writing enabled, when used by access_process_vm.
 */
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
static inline pte_t maybe_mkwrite(pte_t pte, unsigned long vma_flags)
{
	if (likely(vma->vm_flags & VM_WRITE))
	if (likely(vma_flags & VM_WRITE))
		pte = pte_mkwrite(pte);
	return pte;
}
@@ -1185,6 +1191,7 @@ static inline void clear_page_pfmemalloc(struct page *page)
#define VM_FAULT_DONE_COW   0x1000	/* ->fault has fully handled COW */

#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
#define VM_FAULT_PTNOTSAME 0x4000	/* Page table entries have changed */

#define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
			 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
@@ -1239,9 +1246,29 @@ struct zap_details {
	pgoff_t last_index;			/* Highest page->index to unmap */
};

struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
			     pte_t pte, bool with_public_device);
#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false)
static inline void INIT_VMA(struct vm_area_struct *vma)
{
	INIT_LIST_HEAD(&vma->anon_vma_chain);
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
	seqcount_init(&vma->vm_sequence);
#endif
}

struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
			      pte_t pte, bool with_public_device,
			      unsigned long vma_flags);
static inline struct page *_vm_normal_page(struct vm_area_struct *vma,
					    unsigned long addr, pte_t pte,
					    bool with_public_device)
{
	return __vm_normal_page(vma, addr, pte, with_public_device,
				vma->vm_flags);
}
static inline struct page *vm_normal_page(struct vm_area_struct *vma,
					  unsigned long addr, pte_t pte)
{
	return _vm_normal_page(vma, addr, pte, false);
}

struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
				pmd_t pmd);
@@ -1321,6 +1348,47 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
	unmap_mapping_range(mapping, holebegin, holelen, 0);
}

#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
static inline void vm_write_begin(struct vm_area_struct *vma)
{
	write_seqcount_begin(&vma->vm_sequence);
}
static inline void vm_write_begin_nested(struct vm_area_struct *vma,
					 int subclass)
{
	write_seqcount_begin_nested(&vma->vm_sequence, subclass);
}
static inline void vm_write_end(struct vm_area_struct *vma)
{
	write_seqcount_end(&vma->vm_sequence);
}
static inline void vm_raw_write_begin(struct vm_area_struct *vma)
{
	raw_write_seqcount_begin(&vma->vm_sequence);
}
static inline void vm_raw_write_end(struct vm_area_struct *vma)
{
	raw_write_seqcount_end(&vma->vm_sequence);
}
#else
static inline void vm_write_begin(struct vm_area_struct *vma)
{
}
static inline void vm_write_begin_nested(struct vm_area_struct *vma,
					 int subclass)
{
}
static inline void vm_write_end(struct vm_area_struct *vma)
{
}
static inline void vm_raw_write_begin(struct vm_area_struct *vma)
{
}
static inline void vm_raw_write_end(struct vm_area_struct *vma)
{
}
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */

extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
@@ -2088,16 +2156,29 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
	struct vm_area_struct *expand);
	struct vm_area_struct *expand, bool keep_locked);
static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
{
	return __vma_adjust(vma, start, end, pgoff, insert, NULL);
	return __vma_adjust(vma, start, end, pgoff, insert, NULL, false);
}
extern struct vm_area_struct *vma_merge(struct mm_struct *,

extern struct vm_area_struct *__vma_merge(struct mm_struct *mm,
	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
	struct mempolicy *, struct vm_userfaultfd_ctx, const char __user *);
	unsigned long vm_flags, struct anon_vma *anon, struct file *file,
	pgoff_t pgoff, struct mempolicy *mpol, struct vm_userfaultfd_ctx uff,
	const char __user *user, bool keep_locked);

static inline struct vm_area_struct *vma_merge(struct mm_struct *mm,
	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
	unsigned long vm_flags, struct anon_vma *anon, struct file *file,
	pgoff_t off, struct mempolicy *pol, struct vm_userfaultfd_ctx uff,
	const char __user *user)
{
	return __vma_merge(mm, prev, addr, end, vm_flags, anon, file, off,
			   pol, uff, user, false);
}

extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
	unsigned long addr, int new_below);
Loading