Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 62cd1d97 authored by Laurent Dufour's avatar Laurent Dufour Committed by Vinayak Menon
Browse files

mm: introduce __lru_cache_add_active_or_unevictable



The speculative page fault handler which is run without holding the
mmap_sem is calling lru_cache_add_active_or_unevictable() but the vm_flags
is not guaranteed to remain constant.
Introducing __lru_cache_add_active_or_unevictable() which has the vma flags
value parameter instead of the vma pointer.

Change-Id: I68decbe0f80847403127c45c97565e47512532e9
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarLaurent Dufour <ldufour@linux.vnet.ibm.com>
Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:20
[vinmenon@codeaurora.org: trivial merge conflict fixes]
Signed-off-by: default avatarVinayak Menon <vinmenon@codeaurora.org>
parent 04febfe9
Loading
Loading
Loading
Loading
+8 −2
Original line number Diff line number Diff line
@@ -313,8 +313,14 @@ extern void swap_setup(void);

extern void add_page_to_unevictable_list(struct page *page);

extern void lru_cache_add_active_or_unevictable(struct page *page,
						struct vm_area_struct *vma);
extern void __lru_cache_add_active_or_unevictable(struct page *page,
						unsigned long vma_flags);

static inline void lru_cache_add_active_or_unevictable(struct page *page,
						struct vm_area_struct *vma)
{
	return __lru_cache_add_active_or_unevictable(page, vma->vm_flags);
}

/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
+4 −4
Original line number Diff line number Diff line
@@ -2229,7 +2229,7 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
		ptep_clear_flush_notify(vma, fe->address, fe->pte);
		page_add_new_anon_rmap(new_page, vma, fe->address, false);
		mem_cgroup_commit_charge(new_page, memcg, false, false);
		lru_cache_add_active_or_unevictable(new_page, vma);
		__lru_cache_add_active_or_unevictable(new_page, fe->vma_flags);
		/*
		 * We call the notify macro here because, when using secondary
		 * mmu page tables (such as kvm shadow page tables), we want the
@@ -2701,7 +2701,7 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte)
	} else { /* ksm created a completely new copy */
		page_add_new_anon_rmap(page, vma, fe->address, false);
		mem_cgroup_commit_charge(page, memcg, false, false);
		lru_cache_add_active_or_unevictable(page, vma);
		__lru_cache_add_active_or_unevictable(page, fe->vma_flags);
	}

	swap_free(entry);
@@ -2840,7 +2840,7 @@ static int do_anonymous_page(struct fault_env *fe)
	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
	page_add_new_anon_rmap(page, vma, fe->address, false);
	mem_cgroup_commit_charge(page, memcg, false, false);
	lru_cache_add_active_or_unevictable(page, vma);
	__lru_cache_add_active_or_unevictable(page, fe->vma_flags);
setpte:
	set_pte_at(vma->vm_mm, fe->address, fe->pte, entry);

@@ -3076,7 +3076,7 @@ int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg,
		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
		page_add_new_anon_rmap(page, vma, fe->address, false);
		mem_cgroup_commit_charge(page, memcg, false, false);
		lru_cache_add_active_or_unevictable(page, vma);
		__lru_cache_add_active_or_unevictable(page, fe->vma_flags);
	} else {
		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
		page_add_file_rmap(page, false);
+3 −3
Original line number Diff line number Diff line
@@ -468,12 +468,12 @@ void add_page_to_unevictable_list(struct page *page)
 * directly back onto it's zone's unevictable list, it does NOT use a
 * per cpu pagevec.
 */
void lru_cache_add_active_or_unevictable(struct page *page,
					 struct vm_area_struct *vma)
void __lru_cache_add_active_or_unevictable(struct page *page,
					   unsigned long vma_flags)
{
	VM_BUG_ON_PAGE(PageLRU(page), page);

	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
	if (likely((vma_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
		SetPageActive(page);
		lru_cache_add(page);
		return;