Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 800d8c63 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds
Browse files

shmem: add huge pages support

Here's basic implementation of huge pages support for shmem/tmpfs.

It's all pretty streight-forward:

  - shmem_getpage() allcoates huge page if it can and try to inserd into
    radix tree with shmem_add_to_page_cache();

  - shmem_add_to_page_cache() puts the page onto radix-tree if there's
    space for it;

  - shmem_undo_range() removes huge pages, if it fully within range.
    Partial truncate of huge pages zero out this part of THP.

    This have visible effect on fallocate(FALLOC_FL_PUNCH_HOLE)
    behaviour. As we don't really create hole in this case,
    lseek(SEEK_HOLE) may have inconsistent results depending what
    pages happened to be allocated.

  - no need to change shmem_fault: core-mm will map an compound page as
    huge if VMA is suitable;

Link: http://lkml.kernel.org/r/1466021202-61880-30-git-send-email-kirill.shutemov@linux.intel.com


Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c01d5b30
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -156,6 +156,8 @@ void put_huge_zero_page(void);

#define transparent_hugepage_enabled(__vma) 0

static inline void prep_transhuge_page(struct page *page) {}

#define transparent_hugepage_flags 0UL
static inline int
split_huge_page_to_list(struct page *page, struct list_head *list)
+3 −0
Original line number Diff line number Diff line
@@ -71,6 +71,9 @@ static inline struct page *shmem_read_mapping_page(
					mapping_gfp_mask(mapping));
}

extern bool shmem_charge(struct inode *inode, long pages);
extern void shmem_uncharge(struct inode *inode, long pages);

#ifdef CONFIG_TMPFS

extern int shmem_add_seals(struct file *file, unsigned int seals);
+6 −1
Original line number Diff line number Diff line
@@ -219,8 +219,13 @@ void __delete_from_page_cache(struct page *page, void *shadow)
	/* hugetlb pages do not participate in page cache accounting. */
	if (!PageHuge(page))
		__mod_zone_page_state(page_zone(page), NR_FILE_PAGES, -nr);
	if (PageSwapBacked(page))
	if (PageSwapBacked(page)) {
		__mod_zone_page_state(page_zone(page), NR_SHMEM, -nr);
		if (PageTransHuge(page))
			__dec_zone_page_state(page, NR_SHMEM_THPS);
	} else {
		VM_BUG_ON_PAGE(PageTransHuge(page) && !PageHuge(page), page);
	}

	/*
	 * At this point page must be either written or cleaned by truncate.
+2 −0
Original line number Diff line number Diff line
@@ -3316,6 +3316,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
		if (head[i].index >= end) {
			__ClearPageDirty(head + i);
			__delete_from_page_cache(head + i, NULL);
			if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
				shmem_uncharge(head->mapping->host, 1);
			put_page(head + i);
		}
	}
+1 −1
Original line number Diff line number Diff line
@@ -1142,7 +1142,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
				 * unmap shared but keep private pages.
				 */
				if (details->check_mapping &&
				    details->check_mapping != page->mapping)
				    details->check_mapping != page_rmapping(page))
					continue;
			}
			ptent = ptep_get_and_clear_full(mm, addr, pte,
Loading