Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a7595fe7 authored by Paul Mundt's avatar Paul Mundt
Browse files

Merge branch 'sh/pgtable' of git://github.com/mfleming/linux-2.6

parents 921a2208 2a5eacca
Loading
Loading
Loading
Loading
+6 −4
Original line number Diff line number Diff line
@@ -6,10 +6,13 @@

#define QUICK_PT 1	/* Other page table pages that are zero on free */

extern pgd_t *pgd_alloc(struct mm_struct *);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);

#ifdef CONFIG_PGTABLE_LEVELS_3
#include <asm/pgalloc_pmd.h>
#else
#include <asm/pgalloc_nopmd.h>
extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
#endif

static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
@@ -67,7 +70,6 @@ do { \

static inline void check_pgt_cache(void)
{
	__check_pgt_cache();
	quicklist_trim(QUICK_PT, NULL, 25, 16);
}

+0 −30
Original line number Diff line number Diff line
#ifndef __ASM_SH_PGALLOC_NOPMD_H
#define __ASM_SH_PGALLOC_NOPMD_H

#define QUICK_PGD 0	/* We preserve special mappings over free */

static inline void pgd_ctor(void *x)
{
	pgd_t *pgd = x;

	memcpy(pgd + USER_PTRS_PER_PGD,
	       swapper_pg_dir + USER_PTRS_PER_PGD,
	       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}

static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
	return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
}

static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
	quicklist_free(QUICK_PGD, NULL, pgd);
}

static inline void __check_pgt_cache(void)
{
	quicklist_trim(QUICK_PGD, NULL, 25, 16);
}

#endif /* __ASM_SH_PGALLOC_NOPMD_H */

arch/sh/include/asm/pgalloc_pmd.h

deleted100644 → 0
+0 −41
Original line number Diff line number Diff line
#ifndef __ASM_SH_PGALLOC_PMD_H
#define __ASM_SH_PGALLOC_PMD_H

static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
	pgd_t *pgd;
	int i;

	pgd = kzalloc(sizeof(*pgd) * PTRS_PER_PGD, GFP_KERNEL | __GFP_REPEAT);

	for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++)
		pgd[i] = swapper_pg_dir[i];

	return pgd;
}

static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
	kfree(pgd);
}

static inline void __check_pgt_cache(void)
{
}

static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
	set_pud(pud, __pud((unsigned long)pmd));
}

static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
	return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
}

static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
	quicklist_free(QUICK_PT, NULL, pmd);
}

#endif /* __ASM_SH_PGALLOC_PMD_H */
+2 −2
Original line number Diff line number Diff line
@@ -141,9 +141,9 @@ typedef pte_t *pte_addr_t;
#define pte_pfn(x)		((unsigned long)(((x).pte_low >> PAGE_SHIFT)))

/*
 * No page table caches to initialise
 * Initialise the page table caches
 */
#define pgtable_cache_init()	do { } while (0)
extern void pgtable_cache_init(void);

struct vm_area_struct;

+0 −11
Original line number Diff line number Diff line
@@ -43,11 +43,6 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
}
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)

static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
{
	pmd_val(*pmdp) = (unsigned long) ptep;
}

/*
 * PGD defines. Top level.
 */
@@ -202,12 +197,6 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)

/*
 * Handling allocation failures during page table setup.
 */
extern void __handle_bad_pmd_kernel(pmd_t * pmd);
#define __handle_bad_pmd(x)	__handle_bad_pmd_kernel(x)

/*
 * PTE level access routines.
 *
Loading