Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 517e2263 authored by Hugh Dickins's avatar Hugh Dickins Committed by Paul Mackerras
Browse files

[POWERPC] Don't use SLAB/SLUB for PTE pages



The SLUB allocator relies on struct page fields first_page and slab,
overwritten by ptl when SPLIT_PTLOCK: so the SLUB allocator cannot then
be used for the lowest level of pagetable pages.  This was obstructing
SLUB on PowerPC, which uses kmem_caches for its pagetables.  So convert
its pte level to use normal gfp pages (whereas pmd, pud and 64k-page pgd
want partpages, so continue to use kmem_caches for pmd, pud and pgd).

Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent f1fa74f4
Loading
Loading
Loading
Loading
+0 −13
Original line number Diff line number Diff line
@@ -120,19 +120,6 @@ config GENERIC_BUG
config SYS_SUPPORTS_APM_EMULATION
	bool

#
# Powerpc uses the slab allocator to manage its ptes and the
# page structs of ptes are used for splitting the page table
# lock for configurations supporting more than SPLIT_PTLOCK_CPUS.
#
# In that special configuration the page structs of slabs are modified.
# This setting disables the selection of SLUB as a slab allocator.
#
config ARCH_USES_SLAB_PAGE_STRUCT
	bool
	default y
	depends on SPLIT_PTLOCK_CPUS <= NR_CPUS

config DEFAULT_UIMAGE
	bool
	help
+6 −11
Original line number Diff line number Diff line
@@ -146,21 +146,16 @@ static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
	memset(addr, 0, kmem_cache_size(cache));
}

#ifdef CONFIG_PPC_64K_PAGES
static const unsigned int pgtable_cache_size[3] = {
	PTE_TABLE_SIZE, PMD_TABLE_SIZE, PGD_TABLE_SIZE
};
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
	"pte_pmd_cache", "pmd_cache", "pgd_cache",
};
#else
static const unsigned int pgtable_cache_size[2] = {
	PTE_TABLE_SIZE, PMD_TABLE_SIZE
	PGD_TABLE_SIZE, PMD_TABLE_SIZE
};
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
	"pgd_pte_cache", "pud_pmd_cache",
};
#ifdef CONFIG_PPC_64K_PAGES
	"pgd_cache", "pmd_cache",
#else
	"pgd_cache", "pud_pmd_cache",
#endif /* CONFIG_PPC_64K_PAGES */
};

#ifdef CONFIG_HUGETLB_PAGE
/* Hugepages need one extra cache, initialized in hugetlbpage.c.  We
+13 −18
Original line number Diff line number Diff line
@@ -14,18 +14,11 @@

extern struct kmem_cache *pgtable_cache[];

#ifdef CONFIG_PPC_64K_PAGES
#define PTE_CACHE_NUM	0
#define PMD_CACHE_NUM	1
#define PGD_CACHE_NUM	2
#define HUGEPTE_CACHE_NUM 3
#else
#define PTE_CACHE_NUM	0
#define PMD_CACHE_NUM	1
#define PUD_CACHE_NUM	1
#define PGD_CACHE_NUM		0
#define PUD_CACHE_NUM		1
#define PMD_CACHE_NUM		1
#define HUGEPTE_CACHE_NUM	2
#endif
#define PTE_NONCACHE_NUM	3  /* from GFP rather than kmem_cache */

static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
@@ -91,8 +84,7 @@ static inline void pmd_free(pmd_t *pmd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
					  unsigned long address)
{
	return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
				GFP_KERNEL|__GFP_REPEAT);
        return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
}

static inline struct page *pte_alloc_one(struct mm_struct *mm,
@@ -103,12 +95,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,

static inline void pte_free_kernel(pte_t *pte)
{
	kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte);
	free_page((unsigned long)pte);
}

static inline void pte_free(struct page *ptepage)
{
	pte_free_kernel(page_address(ptepage));
	__free_page(ptepage);
}

#define PGF_CACHENUM_MASK	0x3
@@ -130,6 +122,9 @@ static inline void pgtable_free(pgtable_free_t pgf)
	void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
	int cachenum = pgf.val & PGF_CACHENUM_MASK;

	if (cachenum == PTE_NONCACHE_NUM)
		free_page((unsigned long)p);
	else
		kmem_cache_free(pgtable_cache[cachenum], p);
}

@@ -137,7 +132,7 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);

#define __pte_free_tlb(tlb, ptepage)	\
	pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
		PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
		PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1))
#define __pmd_free_tlb(tlb, pmd) 	\
	pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
		PMD_CACHE_NUM, PMD_TABLE_SIZE-1))