Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 092b50ba authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds
Browse files

pagemap: introduce data structure for pagemap entry



Currently a local variable of pagemap entry in pagemap_pte_range() is
named pfn and typed with u64, but it's not correct (pfn should be unsigned
long.)

This patch introduces special type for pagemap entries and replaces code
with it.

Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 807f0ccf
Loading
Loading
Loading
Loading
+38 −31
Original line number Diff line number Diff line
@@ -594,9 +594,13 @@ const struct file_operations proc_clear_refs_operations = {
	.llseek		= noop_llseek,
};

typedef struct {
	u64 pme;
} pagemap_entry_t;

struct pagemapread {
	int pos, len;
	u64 *buffer;
	pagemap_entry_t *buffer;
};

#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
@@ -619,10 +623,15 @@ struct pagemapread {
#define PM_NOT_PRESENT      PM_PSHIFT(PAGE_SHIFT)
#define PM_END_OF_BUFFER    1

static int add_to_pagemap(unsigned long addr, u64 pfn,
static inline pagemap_entry_t make_pme(u64 val)
{
	return (pagemap_entry_t) { .pme = val };
}

static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
			  struct pagemapread *pm)
{
	pm->buffer[pm->pos++] = pfn;
	pm->buffer[pm->pos++] = *pme;
	if (pm->pos >= pm->len)
		return PM_END_OF_BUFFER;
	return 0;
@@ -634,8 +643,10 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end,
	struct pagemapread *pm = walk->private;
	unsigned long addr;
	int err = 0;
	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);

	for (addr = start; addr < end; addr += PAGE_SIZE) {
		err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
		err = add_to_pagemap(addr, &pme, pm);
		if (err)
			break;
	}
@@ -648,36 +659,33 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte)
	return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
}

static u64 pte_to_pagemap_entry(pte_t pte)
static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte)
{
	u64 pme = 0;
	if (is_swap_pte(pte))
		pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
			| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
		*pme = make_pme(PM_PFRAME(swap_pte_to_pagemap_entry(pte))
				| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP);
	else if (pte_present(pte))
		pme = PM_PFRAME(pte_pfn(pte))
			| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
	return pme;
		*pme = make_pme(PM_PFRAME(pte_pfn(pte))
				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static u64 thp_pmd_to_pagemap_entry(pmd_t pmd, int offset)
static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
					pmd_t pmd, int offset)
{
	u64 pme = 0;
	/*
	 * Currently pmd for thp is always present because thp can not be
	 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
	 * This if-check is just to prepare for future implementation.
	 */
	if (pmd_present(pmd))
		pme = PM_PFRAME(pmd_pfn(pmd) + offset)
			| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
	return pme;
		*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
}
#else
static inline u64 thp_pmd_to_pagemap_entry(pmd_t pmd, int offset)
static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
						pmd_t pmd, int offset)
{
	return 0;
}
#endif

@@ -688,7 +696,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
	struct pagemapread *pm = walk->private;
	pte_t *pte;
	int err = 0;
	u64 pfn = PM_NOT_PRESENT;
	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);

	if (pmd_trans_unstable(pmd))
		return 0;
@@ -702,8 +710,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,

			offset = (addr & ~PAGEMAP_WALK_MASK) >>
					PAGE_SHIFT;
			pfn = thp_pmd_to_pagemap_entry(*pmd, offset);
			err = add_to_pagemap(addr, pfn, pm);
			thp_pmd_to_pagemap_entry(&pme, *pmd, offset);
			err = add_to_pagemap(addr, &pme, pm);
			if (err)
				break;
		}
@@ -723,11 +731,11 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
		if (vma && (vma->vm_start <= addr) &&
		    !is_vm_hugetlb_page(vma)) {
			pte = pte_offset_map(pmd, addr);
			pfn = pte_to_pagemap_entry(*pte);
			pte_to_pagemap_entry(&pme, *pte);
			/* unmap before userspace copy */
			pte_unmap(pte);
		}
		err = add_to_pagemap(addr, pfn, pm);
		err = add_to_pagemap(addr, &pme, pm);
		if (err)
			return err;
	}
@@ -738,13 +746,12 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
}

#ifdef CONFIG_HUGETLB_PAGE
static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme,
					pte_t pte, int offset)
{
	u64 pme = 0;
	if (pte_present(pte))
		pme = PM_PFRAME(pte_pfn(pte) + offset)
			| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
	return pme;
		*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
}

/* This function walks within one hugetlb entry in the single call */
@@ -754,12 +761,12 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
{
	struct pagemapread *pm = walk->private;
	int err = 0;
	u64 pfn;
	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);

	for (; addr != end; addr += PAGE_SIZE) {
		int offset = (addr & ~hmask) >> PAGE_SHIFT;
		pfn = huge_pte_to_pagemap_entry(*pte, offset);
		err = add_to_pagemap(addr, pfn, pm);
		huge_pte_to_pagemap_entry(&pme, *pte, offset);
		err = add_to_pagemap(addr, &pme, pm);
		if (err)
			return err;
	}