Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d516904b authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Linus Torvalds
Browse files

thp: merge page pre-alloc in khugepaged_loop into khugepaged_do_scan



There are two pre-alloc operations in these two function, the different is:
- it allows to sleep if page alloc fail in khugepaged_loop
- it exits immediately if page alloc fail in khugepaged_do_scan

Actually, in khugepaged_do_scan, we can allow the pre-alloc to sleep on
the first failure, then the operation in khugepaged_loop can be removed

Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9817626e
Loading
Loading
Loading
Loading
+45 −52
Original line number Diff line number Diff line
@@ -2222,10 +2222,40 @@ static int khugepaged_wait_event(void)
		kthread_should_stop();
}

static void khugepaged_do_scan(struct page **hpage)
static void khugepaged_alloc_sleep(void)
{
	wait_event_freezable_timeout(khugepaged_wait, false,
			msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}

#ifndef CONFIG_NUMA
static struct page *khugepaged_alloc_hugepage(bool *wait)
{
	struct page *hpage;

	do {
		hpage = alloc_hugepage(khugepaged_defrag());
		if (!hpage) {
			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
			if (!*wait)
				return NULL;

			*wait = false;
			khugepaged_alloc_sleep();
		} else
			count_vm_event(THP_COLLAPSE_ALLOC);
	} while (unlikely(!hpage) && likely(khugepaged_enabled()));

	return hpage;
}
#endif

static void khugepaged_do_scan(void)
{
	struct page *hpage = NULL;
	unsigned int progress = 0, pass_through_head = 0;
	unsigned int pages = khugepaged_pages_to_scan;
	bool wait = true;

	barrier(); /* write khugepaged_pages_to_scan to local stack */

@@ -2233,17 +2263,18 @@ static void khugepaged_do_scan(struct page **hpage)
		cond_resched();

#ifndef CONFIG_NUMA
		if (!*hpage) {
			*hpage = alloc_hugepage(khugepaged_defrag());
			if (unlikely(!*hpage)) {
				count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
		if (!hpage)
			hpage = khugepaged_alloc_hugepage(&wait);

		if (unlikely(!hpage))
			break;
			}
			count_vm_event(THP_COLLAPSE_ALLOC);
		}
#else
		if (IS_ERR(*hpage))
		if (IS_ERR(hpage)) {
			if (!wait)
				break;
			wait = false;
			khugepaged_alloc_sleep();
		}
#endif

		if (unlikely(kthread_should_stop() || freezing(current)))
@@ -2255,36 +2286,15 @@ static void khugepaged_do_scan(struct page **hpage)
		if (khugepaged_has_work() &&
		    pass_through_head < 2)
			progress += khugepaged_scan_mm_slot(pages - progress,
							    hpage);
							    &hpage);
		else
			progress = pages;
		spin_unlock(&khugepaged_mm_lock);
	}
}

static void khugepaged_alloc_sleep(void)
{
	wait_event_freezable_timeout(khugepaged_wait, false,
			msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}

#ifndef CONFIG_NUMA
static struct page *khugepaged_alloc_hugepage(void)
{
	struct page *hpage;

	do {
		hpage = alloc_hugepage(khugepaged_defrag());
		if (!hpage) {
			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
			khugepaged_alloc_sleep();
		} else
			count_vm_event(THP_COLLAPSE_ALLOC);
	} while (unlikely(!hpage) &&
		 likely(khugepaged_enabled()));
	return hpage;
	if (!IS_ERR_OR_NULL(hpage))
		put_page(hpage);
}
#endif

static void khugepaged_wait_work(void)
{
@@ -2306,25 +2316,8 @@ static void khugepaged_wait_work(void)

static void khugepaged_loop(void)
{
	struct page *hpage = NULL;

	while (likely(khugepaged_enabled())) {
#ifndef CONFIG_NUMA
		hpage = khugepaged_alloc_hugepage();
		if (unlikely(!hpage))
			break;
#else
		if (IS_ERR(hpage)) {
			khugepaged_alloc_sleep();
			hpage = NULL;
		}
#endif

		khugepaged_do_scan(&hpage);

		if (!IS_ERR_OR_NULL(hpage))
			put_page(hpage);

		khugepaged_do_scan();
		khugepaged_wait_work();
	}
}