Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 808c47e1 authored by Laurent Dufour's avatar Laurent Dufour Committed by Gerrit - the friendly Code Review server
Browse files

mm: introduce __lru_cache_add_active_or_unevictable



The speculative page fault handler which is run without holding the
mmap_sem is calling lru_cache_add_active_or_unevictable() but the vm_flags
is not guaranteed to remain constant.
Introducing __lru_cache_add_active_or_unevictable() which has the vma flags
value parameter instead of the vma pointer.

Change-Id: I68decbe0f80847403127c45c97565e47512532e9
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarLaurent Dufour <ldufour@linux.vnet.ibm.com>
Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:20
[vinmenon@codeaurora.org: trivial merge conflict fixes]
Signed-off-by: default avatarVinayak Menon <vinmenon@codeaurora.org>
[charante@codeaurora.org: trivial merge conflict fixes]
Signed-off-by: default avatarCharan Teja Reddy <charante@codeaurora.org>
parent f86d0448
Loading
Loading
Loading
Loading
+8 −2
Original line number Diff line number Diff line
@@ -349,8 +349,14 @@ extern void deactivate_file_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);

extern void lru_cache_add_active_or_unevictable(struct page *page,
						struct vm_area_struct *vma);
extern void __lru_cache_add_active_or_unevictable(struct page *page,
						unsigned long vma_flags);

static inline void lru_cache_add_active_or_unevictable(struct page *page,
						struct vm_area_struct *vma)
{
	return __lru_cache_add_active_or_unevictable(page, vma->vm_flags);
}

/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
+4 −4
Original line number Diff line number Diff line
@@ -2576,7 +2576,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
		page_add_new_anon_rmap(new_page, vma, vmf->address, false);
		mem_cgroup_commit_charge(new_page, memcg, false, false);
		lru_cache_add_active_or_unevictable(new_page, vma);
		__lru_cache_add_active_or_unevictable(new_page, vmf->vma_flags);
		/*
		 * We call the notify macro here because, when using secondary
		 * mmu page tables (such as kvm shadow page tables), we want the
@@ -3114,7 +3114,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
	if (unlikely(page != swapcache && swapcache)) {
		page_add_new_anon_rmap(page, vma, vmf->address, false);
		mem_cgroup_commit_charge(page, memcg, false, false);
		lru_cache_add_active_or_unevictable(page, vma);
		__lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
	} else {
		do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
		mem_cgroup_commit_charge(page, memcg, true, false);
@@ -3265,7 +3265,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
	page_add_new_anon_rmap(page, vma, vmf->address, false);
	mem_cgroup_commit_charge(page, memcg, false, false);
	lru_cache_add_active_or_unevictable(page, vma);
	__lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
setpte:
	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);

@@ -3547,7 +3547,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
		page_add_new_anon_rmap(page, vma, vmf->address, false);
		mem_cgroup_commit_charge(page, memcg, false, false);
		lru_cache_add_active_or_unevictable(page, vma);
		__lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
	} else {
		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
		page_add_file_rmap(page, false);
+3 −3
Original line number Diff line number Diff line
@@ -456,12 +456,12 @@ void lru_cache_add(struct page *page)
 * directly back onto it's zone's unevictable list, it does NOT use a
 * per cpu pagevec.
 */
void lru_cache_add_active_or_unevictable(struct page *page,
					 struct vm_area_struct *vma)
void __lru_cache_add_active_or_unevictable(struct page *page,
					   unsigned long vma_flags)
{
	VM_BUG_ON_PAGE(PageLRU(page), page);

	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
	if (likely((vma_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
		SetPageActive(page);
	else if (!TestSetPageMlocked(page)) {
		/*