Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 579c571e authored by Song Liu's avatar Song Liu Committed by Linus Torvalds
Browse files

khugepaged: rename collapse_shmem() and khugepaged_scan_shmem()

Next patch will add khugepaged support of non-shmem files.  This patch
renames these two functions to reflect the new functionality:

    collapse_shmem()        =>  collapse_file()
    khugepaged_scan_shmem() =>  khugepaged_scan_file()

Link: http://lkml.kernel.org/r/20190801184244.3169074-6-songliubraving@fb.com


Signed-off-by: default avatarSong Liu <songliubraving@fb.com>
Acked-by: default avatarRik van Riel <riel@surriel.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Hillf Danton <hdanton@sina.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 60fbf0ab
Loading
Loading
Loading
Loading
+11 −12
Original line number Diff line number Diff line
@@ -1287,7 +1287,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
}

/**
 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
 * collapse_file - collapse small tmpfs/shmem pages into huge one.
 *
 * Basic scheme is simple, details are more complex:
 *  - allocate and lock a new huge page;
@@ -1304,10 +1304,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
 *    + restore gaps in the page cache;
 *    + unlock and free huge page;
 */
static void collapse_shmem(struct mm_struct *mm,
		struct address_space *mapping, pgoff_t start,
static void collapse_file(struct mm_struct *mm,
		struct file *file, pgoff_t start,
		struct page **hpage, int node)
{
	struct address_space *mapping = file->f_mapping;
	gfp_t gfp;
	struct page *new_page;
	struct mem_cgroup *memcg;
@@ -1563,11 +1564,11 @@ static void collapse_shmem(struct mm_struct *mm,
	/* TODO: tracepoints */
}

static void khugepaged_scan_shmem(struct mm_struct *mm,
		struct address_space *mapping,
		pgoff_t start, struct page **hpage)
static void khugepaged_scan_file(struct mm_struct *mm,
		struct file *file, pgoff_t start, struct page **hpage)
{
	struct page *page = NULL;
	struct address_space *mapping = file->f_mapping;
	XA_STATE(xas, &mapping->i_pages, start);
	int present, swap;
	int node = NUMA_NO_NODE;
@@ -1631,16 +1632,15 @@ static void khugepaged_scan_shmem(struct mm_struct *mm,
			result = SCAN_EXCEED_NONE_PTE;
		} else {
			node = khugepaged_find_target_node();
			collapse_shmem(mm, mapping, start, hpage, node);
			collapse_file(mm, file, start, hpage, node);
		}
	}

	/* TODO: tracepoints */
}
#else
static void khugepaged_scan_shmem(struct mm_struct *mm,
		struct address_space *mapping,
		pgoff_t start, struct page **hpage)
static void khugepaged_scan_file(struct mm_struct *mm,
		struct file *file, pgoff_t start, struct page **hpage)
{
	BUILD_BUG();
}
@@ -1722,8 +1722,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
				file = get_file(vma->vm_file);
				up_read(&mm->mmap_sem);
				ret = 1;
				khugepaged_scan_shmem(mm, file->f_mapping,
						pgoff, hpage);
				khugepaged_scan_file(mm, file, pgoff, hpage);
				fput(file);
			} else {
				ret = khugepaged_scan_pmd(mm, vma,