Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5a83f94a authored by Minchan Kim's avatar Minchan Kim Committed by Gerrit - the friendly Code Review server
Browse files

mm: Enhance per process reclaim to consider shared pages



Some pages could be shared by several processes. (ex, libc)
In case of that, it's too bad to reclaim them from the beginnig.

This patch causes VM to keep them on memory until last task
try to reclaim them so shared pages will be reclaimed only if
all of task has gone swapping out.

This feature doesn't handle non-linear mapping on ramfs because
it's very time-consuming and doesn't make sure of reclaiming and
not common.

Change-Id: I7e5f34f2e947f5db6d405867fe2ad34863ca40f7
Signed-off-by: default avatarSangseok Lee <sangseok.lee@lge.com>
Signed-off-by: default avatarMinchan Kim <minchan@kernel.org>
Patch-mainline: linux-mm @ 9 May 2013 16:21:27
[vinmenon@codeaurora.org: merge conflict fixes + fix for ksm]
Signed-off-by: default avatarVinayak Menon <vinmenon@codeaurora.org>
parent 9eb8c46c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1670,7 +1670,7 @@ static int reclaim_pte_range(pmd_t *pmd, unsigned long addr,
			break;
	}
	pte_unmap_unlock(pte - 1, ptl);
	reclaim_pages_from_list(&page_list);
	reclaim_pages_from_list(&page_list, vma);
	if (addr != end)
		goto cont;

+6 −3
Original line number Diff line number Diff line
@@ -14,7 +14,8 @@

extern int isolate_lru_page(struct page *page);
extern void putback_lru_page(struct page *page);
extern unsigned long reclaim_pages_from_list(struct list_head *page_list);
extern unsigned long reclaim_pages_from_list(struct list_head *page_list,
					     struct vm_area_struct *vma);

/*
 * The anon_vma heads a list of private "related" vmas, to scan if
@@ -207,7 +208,8 @@ static inline void page_dup_rmap(struct page *page, bool compound)
int page_referenced(struct page *, int is_locked,
			struct mem_cgroup *memcg, unsigned long *vm_flags);

bool try_to_unmap(struct page *, enum ttu_flags flags);
bool try_to_unmap(struct page *page, enum ttu_flags flags,
				struct vm_area_struct *vma);

/* Avoid racy checks */
#define PVMW_SYNC		(1 << 0)
@@ -273,6 +275,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
 */
struct rmap_walk_control {
	void *arg;
	struct vm_area_struct *target_vma;
	/*
	 * Return false if page table scanning in rmap_walk should be stopped.
	 * Otherwise, return true.
@@ -301,7 +304,7 @@ static inline int page_referenced(struct page *page, int is_locked,
	return 0;
}

#define try_to_unmap(page, refs) false
#define try_to_unmap(page, refs, vma) false

static inline int page_mkclean(struct page *page)
{
+1 −0
Original line number Diff line number Diff line
@@ -2593,6 +2593,7 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
	stable_node = page_stable_node(page);
	if (!stable_node)
		return;

again:
	hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
		struct anon_vma *anon_vma = rmap_item->anon_vma;
+1 −1
Original line number Diff line number Diff line
@@ -1029,7 +1029,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
	if (kill)
		collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);

	unmap_success = try_to_unmap(hpage, ttu);
	unmap_success = try_to_unmap(hpage, ttu, NULL);
	if (!unmap_success)
		pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
		       pfn, page_mapcount(hpage));
+3 −1
Original line number Diff line number Diff line
@@ -1490,7 +1490,9 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
			if (WARN_ON(PageLRU(page)))
				isolate_lru_page(page);
			if (page_mapped(page))
				try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS);
				try_to_unmap(page,
					TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS,
					NULL);
			continue;
		}

Loading