Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f05af998 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: introduce __vm_normal_page()"

parents 8597343a d3061f2e
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -126,14 +126,14 @@ static inline void __ClearPageMovable(struct page *page)
#ifdef CONFIG_NUMA_BALANCING
extern bool pmd_trans_migrating(pmd_t pmd);
extern int migrate_misplaced_page(struct page *page,
				  struct vm_area_struct *vma, int node);
				  struct vm_fault *vmf, int node);
#else
static inline bool pmd_trans_migrating(pmd_t pmd)
{
	return false;
}
static inline int migrate_misplaced_page(struct page *page,
					 struct vm_area_struct *vma, int node)
					 struct vm_fault *vmf, int node)
{
	return -EAGAIN; /* can't migrate now */
}
+23 −5
Original line number Diff line number Diff line
@@ -377,6 +377,12 @@ struct vm_fault {
					 * page table to avoid allocation from
					 * atomic context.
					 */
	/*
	 * These entries are required when handling speculative page fault.
	 * This way the page handling is done using consistent field values.
	 */
	unsigned long vma_flags;
	pgprot_t vma_page_prot;
};

/* page entry size for vm->huge_fault() */
@@ -730,9 +736,9 @@ void free_compound_page(struct page *page);
 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
 * that do not have writing enabled, when used by access_process_vm.
 */
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
static inline pte_t maybe_mkwrite(pte_t pte, unsigned long vma_flags)
{
	if (likely(vma->vm_flags & VM_WRITE))
	if (likely(vma_flags & VM_WRITE))
		pte = pte_mkwrite(pte);
	return pte;
}
@@ -1330,9 +1336,21 @@ struct zap_details {
	pgoff_t last_index;			/* Highest page->index to unmap */
};

struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
			     pte_t pte, bool with_public_device);
#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false)
struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
			      pte_t pte, bool with_public_device,
			      unsigned long vma_flags);
static inline struct page *_vm_normal_page(struct vm_area_struct *vma,
					    unsigned long addr, pte_t pte,
					    bool with_public_device)
{
	return __vm_normal_page(vma, addr, pte, with_public_device,
				vma->vm_flags);
}
static inline struct page *vm_normal_page(struct vm_area_struct *vma,
					  unsigned long addr, pte_t pte)
{
	return _vm_normal_page(vma, addr, pte, false);
}

struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
				pmd_t pmd);
+8 −2
Original line number Diff line number Diff line
@@ -349,8 +349,14 @@ extern void deactivate_file_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);

extern void lru_cache_add_active_or_unevictable(struct page *page,
						struct vm_area_struct *vma);
extern void __lru_cache_add_active_or_unevictable(struct page *page,
						unsigned long vma_flags);

static inline void lru_cache_add_active_or_unevictable(struct page *page,
						struct vm_area_struct *vma)
{
	return __lru_cache_add_active_or_unevictable(page, vma->vm_flags);
}

/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
+3 −3
Original line number Diff line number Diff line
@@ -1192,8 +1192,8 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,

	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
		pte_t entry;
		entry = mk_pte(pages[i], vma->vm_page_prot);
		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
		entry = mk_pte(pages[i], vmf->vma_page_prot);
		entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags);
		memcg = (void *)page_private(pages[i]);
		set_page_private(pages[i], 0);
		page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false);
@@ -2169,7 +2169,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
				entry = pte_swp_mksoft_dirty(entry);
		} else {
			entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
			entry = maybe_mkwrite(entry, vma);
			entry = maybe_mkwrite(entry, vma->vm_flags);
			if (!write)
				entry = pte_wrprotect(entry);
			if (!young)
+2 −0
Original line number Diff line number Diff line
@@ -3763,6 +3763,8 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
				.vma = vma,
				.address = haddr,
				.flags = flags,
				.vma_flags = vma->vm_flags,
				.vma_page_prot = vma->vm_page_prot,
				/*
				 * Hard to debug if it ends up being
				 * used by a callee that assumes
Loading