Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3610cce8 authored by Martin Schwidefsky's avatar Martin Schwidefsky
Browse files

[S390] Cleanup page table definitions.



- De-confuse the defines for the address-space-control-elements
  and the segment/region table entries.
- Create out of line functions for page table allocation / freeing.
- Simplify get_shadow_xxx functions.

Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent e4aa402e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2,6 +2,6 @@
# Makefile for the linux s390-specific parts of the memory manager.
#

obj-y	 := init.o fault.o extmem.o mmap.o vmem.o
obj-y	 := init.o fault.o extmem.o mmap.o vmem.o pgtable.o
obj-$(CONFIG_CMM) += cmm.o
+12 −16
Original line number Diff line number Diff line
@@ -103,32 +103,28 @@ static void __init setup_ro_region(void)
 */
void __init paging_init(void)
{
	pgd_t *pg_dir;
	int i;
	unsigned long pgdir_k;
	static const int ssm_mask = 0x04000000L;
	unsigned long max_zone_pfns[MAX_NR_ZONES];
	unsigned long pgd_type;

	pg_dir = swapper_pg_dir;
	
	init_mm.pgd = swapper_pg_dir;
	S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
#ifdef CONFIG_64BIT
	pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
	for (i = 0; i < PTRS_PER_PGD; i++)
		pgd_clear_kernel(pg_dir + i);
	S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
	pgd_type = _REGION3_ENTRY_EMPTY;
#else
	pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
	for (i = 0; i < PTRS_PER_PGD; i++)
		pmd_clear_kernel((pmd_t *)(pg_dir + i));
	S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
	pgd_type = _SEGMENT_ENTRY_EMPTY;
#endif
	clear_table((unsigned long *) init_mm.pgd, pgd_type,
		    sizeof(unsigned long)*2048);
	vmem_map_init();
	setup_ro_region();

	S390_lowcore.kernel_asce = pgdir_k;

        /* enable virtual mapping in kernel mode */
	__ctl_load(pgdir_k, 1, 1);
	__ctl_load(pgdir_k, 7, 7);
	__ctl_load(pgdir_k, 13, 13);
	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
	__raw_local_irq_ssm(ssm_mask);

	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));

arch/s390/mm/pgtable.c

0 → 100644
+94 −0
Original line number Diff line number Diff line
/*
 *  arch/s390/mm/pgtable.c
 *
 *    Copyright IBM Corp. 2007
 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
 */

#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/quicklist.h>

#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>

#ifndef CONFIG_64BIT
#define ALLOC_ORDER	1
#else
#define ALLOC_ORDER	2
#endif

unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
{
	struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);

	if (!page)
		return NULL;
	page->index = 0;
	if (noexec) {
		struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
		if (!shadow) {
			__free_pages(page, ALLOC_ORDER);
			return NULL;
		}
		page->index = page_to_phys(shadow);
	}
	return (unsigned long *) page_to_phys(page);
}

void crst_table_free(unsigned long *table)
{
	unsigned long *shadow = get_shadow_table(table);

	if (shadow)
		free_pages((unsigned long) shadow, ALLOC_ORDER);
	free_pages((unsigned long) table, ALLOC_ORDER);
}

/*
 * page table entry allocation/free routines.
 */
unsigned long *page_table_alloc(int noexec)
{
	struct page *page = alloc_page(GFP_KERNEL);
	unsigned long *table;

	if (!page)
		return NULL;
	page->index = 0;
	if (noexec) {
		struct page *shadow = alloc_page(GFP_KERNEL);
		if (!shadow) {
			__free_page(page);
			return NULL;
		}
		table = (unsigned long *) page_to_phys(shadow);
		clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
		page->index = (addr_t) table;
	}
	table = (unsigned long *) page_to_phys(page);
	clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
	return table;
}

void page_table_free(unsigned long *table)
{
	unsigned long *shadow = get_shadow_pte(table);

	if (shadow)
		free_page((unsigned long) shadow);
	free_page((unsigned long) table);

}
+7 −12
Original line number Diff line number Diff line
@@ -75,29 +75,24 @@ static void __init_refok *vmem_alloc_pages(unsigned int order)

static inline pmd_t *vmem_pmd_alloc(void)
{
	pmd_t *pmd;
	int i;
	pmd_t *pmd = NULL;

	pmd = vmem_alloc_pages(PMD_ALLOC_ORDER);
#ifdef CONFIG_64BIT
	pmd = vmem_alloc_pages(2);
	if (!pmd)
		return NULL;
	for (i = 0; i < PTRS_PER_PMD; i++)
		pmd_clear_kernel(pmd + i);
	clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE*4);
#endif
	return pmd;
}

static inline pte_t *vmem_pte_alloc(void)
{
	pte_t *pte;
	pte_t empty_pte;
	int i;
	pte_t *pte = vmem_alloc_pages(0);

	pte = vmem_alloc_pages(PTE_ALLOC_ORDER);
	if (!pte)
		return NULL;
	pte_val(empty_pte) = _PAGE_TYPE_EMPTY;
	for (i = 0; i < PTRS_PER_PTE; i++)
		pte[i] = empty_pte;
	clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, PAGE_SIZE);
	return pte;
}

+24 −26
Original line number Diff line number Diff line
@@ -21,45 +21,43 @@

#ifndef __s390x__
#define LCTL_OPCODE "lctl"
#define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK)
#else
#define LCTL_OPCODE "lctlg"
#define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK)
#endif

static inline void enter_lazy_tlb(struct mm_struct *mm,
                                  struct task_struct *tsk)
static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
{
}
	pgd_t *pgd = mm->pgd;
	unsigned long asce_bits;

static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
			     struct task_struct *tsk)
{
	pgd_t *shadow_pgd = get_shadow_pgd(next->pgd);

	if (prev != next) {
		S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) |
					 PGTABLE_BITS;
		if (shadow_pgd) {
			/* Load primary/secondary space page table origin. */
			S390_lowcore.user_exec_asce =
				(__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS;
			asm volatile(LCTL_OPCODE" 1,1,%0\n"
				     LCTL_OPCODE" 7,7,%1"
				     : : "m" (S390_lowcore.user_exec_asce),
					 "m" (S390_lowcore.user_asce) );
		} else if (switch_amode) {
	/* Calculate asce bits from the first pgd table entry. */
	asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
#ifdef CONFIG_64BIT
	asce_bits |= _ASCE_TYPE_REGION3;
#endif
	S390_lowcore.user_asce = asce_bits | __pa(pgd);
	if (switch_amode) {
		/* Load primary space page table origin. */
			asm volatile(LCTL_OPCODE" 1,1,%0"
				     : : "m" (S390_lowcore.user_asce) );
		pgd_t *shadow_pgd = get_shadow_table(pgd) ? : pgd;
		S390_lowcore.user_exec_asce = asce_bits | __pa(shadow_pgd);
		asm volatile(LCTL_OPCODE" 1,1,%0\n"
			     : : "m" (S390_lowcore.user_exec_asce) );
	} else
		/* Load home space page table origin. */
		asm volatile(LCTL_OPCODE" 13,13,%0"
			     : : "m" (S390_lowcore.user_asce) );
}

static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
			     struct task_struct *tsk)
{
	if (unlikely(prev == next))
		return;
	cpu_set(smp_processor_id(), next->cpu_vm_mask);
	update_mm(next, tsk);
}

#define enter_lazy_tlb(mm,tsk)	do { } while (0)
#define deactivate_mm(tsk,mm)	do { } while (0)

static inline void activate_mm(struct mm_struct *prev,
Loading