Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 22eccdd7 authored by Dan Carpenter's avatar Dan Carpenter Committed by Linus Torvalds
Browse files

ksm: check for ERR_PTR from follow_page()



The follow_page() function can potentially return -EFAULT so I added
checks for this.

Also I silenced an uninitialized variable warning on my version of gcc
(version 4.3.2).

Signed-off-by: default avatarDan Carpenter <error27@gmail.com>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Acked-by: default avatarIzik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 453dc659
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -365,7 +365,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
	do {
		cond_resched();
		page = follow_page(vma, addr, FOLL_GET);
		if (!page)
		if (IS_ERR_OR_NULL(page))
			break;
		if (PageKsm(page))
			ret = handle_mm_fault(vma->vm_mm, vma, addr,
@@ -447,7 +447,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
		goto out;

	page = follow_page(vma, addr, FOLL_GET);
	if (!page)
	if (IS_ERR_OR_NULL(page))
		goto out;
	if (PageAnon(page)) {
		flush_anon_page(vma, page, addr);
@@ -1086,7 +1086,7 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
		cond_resched();
		tree_rmap_item = rb_entry(*new, struct rmap_item, node);
		tree_page = get_mergeable_page(tree_rmap_item);
		if (!tree_page)
		if (IS_ERR_OR_NULL(tree_page))
			return NULL;

		/*
@@ -1294,7 +1294,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
			if (ksm_test_exit(mm))
				break;
			*page = follow_page(vma, ksm_scan.address, FOLL_GET);
			if (*page && PageAnon(*page)) {
			if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) {
				flush_anon_page(vma, *page, ksm_scan.address);
				flush_dcache_page(*page);
				rmap_item = get_next_rmap_item(slot,
@@ -1308,7 +1308,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
				up_read(&mm->mmap_sem);
				return rmap_item;
			}
			if (*page)
			if (!IS_ERR_OR_NULL(*page))
				put_page(*page);
			ksm_scan.address += PAGE_SIZE;
			cond_resched();
@@ -1367,7 +1367,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
static void ksm_do_scan(unsigned int scan_npages)
{
	struct rmap_item *rmap_item;
	struct page *page;
	struct page *uninitialized_var(page);

	while (scan_npages--) {
		cond_resched();