Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fc437044 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds
Browse files

mm: export various functions for the benefit of DAX



To use the huge zero page in DAX, we need these functions exported.

Signed-off-by: default avatarMatthew Wilcox <willy@linux.intel.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Jan Kara <jack@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b96375f7
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -155,6 +155,16 @@ static inline bool is_huge_zero_page(struct page *page)
	return ACCESS_ONCE(huge_zero_page) == page;
}

static inline bool is_huge_zero_pmd(pmd_t pmd)
{
	return is_huge_zero_page(pmd_page(pmd));
}

struct page *get_huge_zero_page(void);
bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
		struct vm_area_struct *vma, unsigned long haddr,
		pmd_t *pmd, struct page *zero_page);

#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
+1 −6
Original line number Diff line number Diff line
@@ -173,12 +173,7 @@ static int start_stop_khugepaged(void)
static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;

static inline bool is_huge_zero_pmd(pmd_t pmd)
{
	return is_huge_zero_page(pmd_page(pmd));
}

static struct page *get_huge_zero_page(void)
struct page *get_huge_zero_page(void)
{
	struct page *zero_page;
retry: