Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 26ff6c11 authored by Paul Mundt's avatar Paul Mundt
Browse files

sh: page table alloc cleanups and page fault optimizations.



Cleanup of page table allocators, using generic folded PMD and PUD
helpers. TLB flushing operations are moved to a more sensible spot.

The page fault handler is also optimized slightly, we no longer waste
cycles on IRQ disabling for flushing of the page from the ITLB, since
we're already under CLI protection by the initial exception handler.

Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 9359e757
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -21,7 +21,7 @@
#include <linux/mman.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/file.h>
#include <linux/utsname.h>
#include <linux/utsname.h>

#include <asm/cacheflush.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
#include <asm/ipc.h>
#include <asm/ipc.h>


+1 −1
Original line number Original line Diff line number Diff line
@@ -12,7 +12,7 @@ obj-$(CONFIG_DMA_PAGE_OPS) += pg-dma.o
obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o


mmu-y			:= fault-nommu.o tlb-nommu.o pg-nommu.o
mmu-y			:= fault-nommu.o tlb-nommu.o pg-nommu.o
mmu-$(CONFIG_MMU)	:= fault.o clear_page.o copy_page.o
mmu-$(CONFIG_MMU)	:= fault.o clear_page.o copy_page.o tlb-flush.o


obj-y			+= $(mmu-y)
obj-y			+= $(mmu-y)


+2 −0
Original line number Original line Diff line number Diff line
@@ -9,6 +9,8 @@
 */
 */
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include <asm/addrspace.h>
#include <asm/io.h>
#include <asm/io.h>


void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
+40 −162
Original line number Original line Diff line number Diff line
/* $Id: fault.c,v 1.14 2004/01/13 05:52:11 kkojima Exp $
/*
 * Page fault handler for SH with an MMU.
 *
 *
 *  linux/arch/sh/mm/fault.c
 *  Copyright (C) 1999  Niibe Yutaka
 *  Copyright (C) 1999  Niibe Yutaka
 *  Copyright (C) 2003  Paul Mundt
 *  Copyright (C) 2003  Paul Mundt
 *
 *
 *  Based on linux/arch/i386/mm/fault.c:
 *  Based on linux/arch/i386/mm/fault.c:
 *   Copyright (C) 1995  Linus Torvalds
 *   Copyright (C) 1995  Linus Torvalds
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/module.h>

#include <asm/system.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/kgdb.h>
#include <asm/kgdb.h>


extern void die(const char *,struct pt_regs *,long);
extern void die(const char *,struct pt_regs *,long);
@@ -187,14 +174,25 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
		goto no_context;
		goto no_context;
}
}


#ifdef CONFIG_SH_STORE_QUEUES
/*
 * This is a special case for the SH-4 store queues, as pages for this
 * space still need to be faulted in before it's possible to flush the
 * store queue cache for writeout to the remapped region.
 */
#define P3_ADDR_MAX		(P4SEG_STORE_QUE + 0x04000000)
#else
#define P3_ADDR_MAX		P4SEG
#endif

/*
/*
 * Called with interrupt disabled.
 * Called with interrupts disabled.
 */
 */
asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
			       unsigned long address)
			       unsigned long address)
{
{
	unsigned long addrmax = P4SEG;
	pgd_t *pgd;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pmd_t *pmd;
	pte_t *pte;
	pte_t *pte;
	pte_t entry;
	pte_t entry;
@@ -207,31 +205,36 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
		kgdb_bus_err_hook();
		kgdb_bus_err_hook();
#endif
#endif


#ifdef CONFIG_SH_STORE_QUEUES
	/*
	addrmax = P4SEG_STORE_QUE + 0x04000000;
	 * We don't take page faults for P1, P2, and parts of P4, these
#endif
	 * are always mapped, whether it be due to legacy behaviour in

	 * 29-bit mode, or due to PMB configuration in 32-bit mode.
	if (address >= P3SEG && address < addrmax) {
	 */
	if (address >= P3SEG && address < P3_ADDR_MAX)
		pgd = pgd_offset_k(address);
		pgd = pgd_offset_k(address);
		mm = NULL;
	else {
	} else if (address >= TASK_SIZE)
		if (unlikely(address >= TASK_SIZE || !current->mm))
			return 1;
			return 1;
	else if (!(mm = current->mm))
		return 1;
	else
		pgd = pgd_offset(mm, address);


	pmd = pmd_offset(pgd, address);
		pgd = pgd_offset(current->mm, address);
	}

	pud = pud_offset(pgd, address);
	if (pud_none_or_clear_bad(pud))
		return 1;
	pmd = pmd_offset(pud, address);
	if (pmd_none_or_clear_bad(pmd))
	if (pmd_none_or_clear_bad(pmd))
		return 1;
		return 1;

	if (mm)
	if (mm)
		pte = pte_offset_map_lock(mm, pmd, address, &ptl);
		pte = pte_offset_map_lock(mm, pmd, address, &ptl);
	else
	else
		pte = pte_offset_kernel(pmd, address);
		pte = pte_offset_kernel(pmd, address);


	entry = *pte;
	entry = *pte;
	if (pte_none(entry) || pte_not_present(entry)
	if (unlikely(pte_none(entry) || pte_not_present(entry)))
	    || (writeaccess && !pte_write(entry)))
		goto unlock;
	if (unlikely(writeaccess && !pte_write(entry)))
		goto unlock;
		goto unlock;


	if (writeaccess)
	if (writeaccess)
@@ -243,13 +246,7 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
	 * ITLB is not affected by "ldtlb" instruction.
	 * ITLB is not affected by "ldtlb" instruction.
	 * So, we need to flush the entry by ourselves.
	 * So, we need to flush the entry by ourselves.
	 */
	 */

	{
		unsigned long flags;
		local_irq_save(flags);
	__flush_tlb_page(get_asid(), address & PAGE_MASK);
	__flush_tlb_page(get_asid(), address & PAGE_MASK);
		local_irq_restore(flags);
	}
#endif
#endif


	set_pte(pte, entry);
	set_pte(pte, entry);
@@ -260,122 +257,3 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
		pte_unmap_unlock(pte, ptl);
		pte_unmap_unlock(pte, ptl);
	return ret;
	return ret;
}
}

void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
	if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
		unsigned long flags;
		unsigned long asid;
		unsigned long saved_asid = MMU_NO_ASID;

		asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
		page &= PAGE_MASK;

		local_irq_save(flags);
		if (vma->vm_mm != current->mm) {
			saved_asid = get_asid();
			set_asid(asid);
		}
		__flush_tlb_page(asid, page);
		if (saved_asid != MMU_NO_ASID)
			set_asid(saved_asid);
		local_irq_restore(flags);
	}
}

void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
		     unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;

	if (mm->context != NO_CONTEXT) {
		unsigned long flags;
		int size;

		local_irq_save(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
			mm->context = NO_CONTEXT;
			if (mm == current->mm)
				activate_context(mm);
		} else {
			unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
			unsigned long saved_asid = MMU_NO_ASID;

			start &= PAGE_MASK;
			end += (PAGE_SIZE - 1);
			end &= PAGE_MASK;
			if (mm != current->mm) {
				saved_asid = get_asid();
				set_asid(asid);
			}
			while (start < end) {
				__flush_tlb_page(asid, start);
				start += PAGE_SIZE;
			}
			if (saved_asid != MMU_NO_ASID)
				set_asid(saved_asid);
		}
		local_irq_restore(flags);
	}
}

void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	unsigned long flags;
	int size;

	local_irq_save(flags);
	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
	if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
		flush_tlb_all();
	} else {
		unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
		unsigned long saved_asid = get_asid();

		start &= PAGE_MASK;
		end += (PAGE_SIZE - 1);
		end &= PAGE_MASK;
		set_asid(asid);
		while (start < end) {
			__flush_tlb_page(asid, start);
			start += PAGE_SIZE;
		}
		set_asid(saved_asid);
	}
	local_irq_restore(flags);
}

void flush_tlb_mm(struct mm_struct *mm)
{
	/* Invalidate all TLB of this process. */
	/* Instead of invalidating each TLB, we get new MMU context. */
	if (mm->context != NO_CONTEXT) {
		unsigned long flags;

		local_irq_save(flags);
		mm->context = NO_CONTEXT;
		if (mm == current->mm)
			activate_context(mm);
		local_irq_restore(flags);
	}
}

void flush_tlb_all(void)
{
	unsigned long flags, status;

	/*
	 * Flush all the TLB.
	 *
	 * Write to the MMU control register's bit:
	 *	TF-bit for SH-3, TI-bit for SH-4.
	 *      It's same position, bit #2.
	 */
	local_irq_save(flags);
	status = ctrl_inl(MMUCR);
	status |= 0x04;
	ctrl_outl(status, MMUCR);
	ctrl_barrier();
	local_irq_restore(flags);
}
+12 −1
Original line number Original line Diff line number Diff line
@@ -80,6 +80,7 @@ void show_mem(void)
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
{
{
	pgd_t *pgd;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pmd_t *pmd;
	pte_t *pte;
	pte_t *pte;


@@ -89,7 +90,17 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
		return;
		return;
	}
	}


	pmd = pmd_offset(pgd, addr);
	pud = pud_offset(pgd, addr);
	if (pud_none(*pud)) {
		pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
		if (pmd != pmd_offset(pud, 0)) {
			pud_ERROR(*pud);
			return;
		}
	}

	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd)) {
	if (pmd_none(*pmd)) {
		pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
		pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
Loading