Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6a7f6ec9 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu: (25 commits)
  m68knommu: fix broken setting of irq_chip and handler
  m68knommu: switch to using -mcpu= flags for ColdFire targets
  m68knommu: arch/m68knommu/Kconfig whitespace cleanup
  m68knommu: create optimal separate instruction and data cache for ColdFire
  m68knommu: support ColdFire caches that do copyback and write-through
  m68knommu: support version 2 ColdFire split cache
  m68knommu: make cache push code ColdFire generic
  m68knommu: clean up ColdFire cache control code
  m68knommu: move inclusion of ColdFire v4 cache registers
  m68knommu: merge bit definitions for version 3 ColdFire cache controller
  m68knommu: create bit definitions for the version 2 ColdFire cache controller
  m68knommu: remove empty __iounmap() it is no used
  m68knommu: remove kernel_map() code, it is not used
  m68knommu: remove do_page_fault(), it is not used
  m68knommu: use user stack pointer hardware on some ColdFire cores
  m68knommu: remove command line printing DEBUG
  m68knommu: remove fasthandler interrupt code
  m68knommu: move UART addressing to part specific includes
  m68knommu: fix clock rate value reported for ColdFire 54xx parts
  m68knommu: move ColdFire CPU names into their headers
  ...
parents c6fa63c6 4c65595e
Loading
Loading
Loading
Loading
+41 −54
Original line number Original line Diff line number Diff line
@@ -2,21 +2,22 @@
#define _M68KNOMMU_CACHEFLUSH_H
#define _M68KNOMMU_CACHEFLUSH_H


/*
/*
 * (C) Copyright 2000-2004, Greg Ungerer <gerg@snapgear.com>
 * (C) Copyright 2000-2010, Greg Ungerer <gerg@snapgear.com>
 */
 */
#include <linux/mm.h>
#include <linux/mm.h>
#include <asm/mcfsim.h>


#define flush_cache_all()			__flush_cache_all()
#define flush_cache_all()			__flush_cache_all()
#define flush_cache_mm(mm)			do { } while (0)
#define flush_cache_mm(mm)			do { } while (0)
#define flush_cache_dup_mm(mm)			do { } while (0)
#define flush_cache_dup_mm(mm)			do { } while (0)
#define flush_cache_range(vma, start, end)	__flush_cache_all()
#define flush_cache_range(vma, start, end)	do { } while (0)
#define flush_cache_page(vma, vmaddr)		do { } while (0)
#define flush_cache_page(vma, vmaddr)		do { } while (0)
#define flush_dcache_range(start,len)		__flush_cache_all()
#define flush_dcache_range(start, len)		__flush_dcache_all()
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page)			do { } while (0)
#define flush_dcache_page(page)			do { } while (0)
#define flush_dcache_mmap_lock(mapping)		do { } while (0)
#define flush_dcache_mmap_lock(mapping)		do { } while (0)
#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
#define flush_icache_range(start,len)		__flush_cache_all()
#define flush_icache_range(start, len)		__flush_icache_all()
#define flush_icache_page(vma,pg)		do { } while (0)
#define flush_icache_page(vma,pg)		do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
#define flush_cache_vmap(start, end)		do { } while (0)
#define flush_cache_vmap(start, end)		do { } while (0)
@@ -27,66 +28,52 @@
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
	memcpy(dst, src, len)
	memcpy(dst, src, len)


void mcf_cache_push(void);

static inline void __flush_cache_all(void)
static inline void __flush_cache_all(void)
{
{
#if defined(CONFIG_M5407) || defined(CONFIG_M548x)
#ifdef CACHE_PUSH
	/*
	mcf_cache_push();
	 *	Use cpushl to push and invalidate all cache lines.
#endif
	 *	Gas doesn't seem to know how to generate the ColdFire
#ifdef CACHE_INVALIDATE
	 *	cpushl instruction... Oh well, bit stuff it for now.
	 */
	__asm__ __volatile__ (
		"nop\n\t"
		"clrl	%%d0\n\t"
		"1:\n\t"
		"movel	%%d0,%%a0\n\t"
		"2:\n\t"
		".word	0xf468\n\t"
		"addl	#0x10,%%a0\n\t"
		"cmpl	#0x00000800,%%a0\n\t"
		"blt	2b\n\t"
		"addql	#1,%%d0\n\t"
		"cmpil	#4,%%d0\n\t"
		"bne	1b\n\t"
		"movel	#0xb6088500,%%d0\n\t"
		"movec	%%d0,%%CACR\n\t"
		: : : "d0", "a0" );
#endif /* CONFIG_M5407 */
#if defined(CONFIG_M523x) || defined(CONFIG_M527x)
	__asm__ __volatile__ (
	__asm__ __volatile__ (
		"movel	#0x81400100, %%d0\n\t"
		"movel	%0, %%d0\n\t"
		"movec	%%d0, %%CACR\n\t"
		"movec	%%d0, %%CACR\n\t"
		"nop\n\t"
		"nop\n\t"
		: : : "d0" );
		: : "i" (CACHE_INVALIDATE) : "d0" );
#endif /* CONFIG_M523x || CONFIG_M527x */
#endif
#if defined(CONFIG_M528x)
}
	__asm__ __volatile__ (

		"movel	#0x81000200, %%d0\n\t"
/*
		"movec	%%d0, %%CACR\n\t"
 * Some ColdFire parts implement separate instruction and data caches,
		"nop\n\t"
 * on those we should just flush the appropriate cache. If we don't need
		: : : "d0" );
 * to do any specific flushing then this will be optimized away.
#endif /* CONFIG_M528x */
 */
#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || defined(CONFIG_M5272)
static inline void __flush_icache_all(void)
	__asm__ __volatile__ (
{
		"movel	#0x81000100, %%d0\n\t"
#ifdef CACHE_INVALIDATEI
		"movec	%%d0, %%CACR\n\t"
		"nop\n\t"
		: : : "d0" );
#endif /* CONFIG_M5206 || CONFIG_M5206e || CONFIG_M5272 */
#ifdef CONFIG_M5249
	__asm__ __volatile__ (
	__asm__ __volatile__ (
		"movel	#0xa1000200, %%d0\n\t"
		"movel	%0, %%d0\n\t"
		"movec	%%d0, %%CACR\n\t"
		"movec	%%d0, %%CACR\n\t"
		"nop\n\t"
		"nop\n\t"
		: : : "d0" );
		: : "i" (CACHE_INVALIDATEI) : "d0" );
#endif /* CONFIG_M5249 */
#endif
#ifdef CONFIG_M532x
}

static inline void __flush_dcache_all(void)
{
#ifdef CACHE_PUSH
	mcf_cache_push();
#endif
#ifdef CACHE_INVALIDATED
	__asm__ __volatile__ (
	__asm__ __volatile__ (
		"movel	#0x81000200, %%d0\n\t"
		"movel	%0, %%d0\n\t"
		"movec	%%d0, %%CACR\n\t"
		"movec	%%d0, %%CACR\n\t"
		"nop\n\t"
		"nop\n\t"
		: : : "d0" );
		: : "i" (CACHE_INVALIDATED) : "d0" );
#endif /* CONFIG_M532x */
#else
	/* Flush the wrtite buffer */
	__asm__ __volatile__ ( "nop" );
#endif
}
}

#endif /* _M68KNOMMU_CACHEFLUSH_H */
#endif /* _M68KNOMMU_CACHEFLUSH_H */
+1 −1
Original line number Original line Diff line number Diff line
@@ -32,7 +32,7 @@
 */
 */
#define	MCF_MBAR	0x10000000
#define	MCF_MBAR	0x10000000
#define	MCF_MBAR2	0x80000000
#define	MCF_MBAR2	0x80000000
#if defined(CONFIG_M548x)
#if defined(CONFIG_M54xx)
#define	MCF_IPSBAR	MCF_MBAR
#define	MCF_IPSBAR	MCF_MBAR
#elif defined(CONFIG_M520x)
#elif defined(CONFIG_M520x)
#define	MCF_IPSBAR	0xFC000000
#define	MCF_IPSBAR	0xFC000000
+34 −25
Original line number Original line Diff line number Diff line
@@ -42,12 +42,16 @@
 */
 */


#ifdef CONFIG_COLDFIRE
#ifdef CONFIG_COLDFIRE
#ifdef CONFIG_COLDFIRE_SW_A7
/*
/*
 * This is made a little more tricky on the ColdFire. There is no
 * This is made a little more tricky on older ColdFires. There is no
 * separate kernel and user stack pointers. Need to artificially
 * separate supervisor and user stack pointers. Need to artificially
 * construct a usp in software... When doing this we need to disable
 * construct a usp in software... When doing this we need to disable
 * interrupts, otherwise bad things could happen.
 * interrupts, otherwise bad things will happen.
 */
 */
.globl sw_usp
.globl sw_ksp

.macro SAVE_ALL
.macro SAVE_ALL
	move	#0x2700,%sr		/* disable intrs */
	move	#0x2700,%sr		/* disable intrs */
	btst	#5,%sp@(2)		/* from user? */
	btst	#5,%sp@(2)		/* from user? */
@@ -74,9 +78,7 @@
	7:
	7:
.endm
.endm


.macro RESTORE_ALL
.macro RESTORE_USER
	btst	#5,%sp@(PT_SR)		/* going user? */
	bnes	8f			/* no, skip */
	move	#0x2700,%sr		/* disable intrs */
	move	#0x2700,%sr		/* disable intrs */
	movel	sw_usp,%a0		/* get usp */
	movel	sw_usp,%a0		/* get usp */
	movel	%sp@(PT_OFF_PC),%a0@-	/* copy exception program counter */
	movel	%sp@(PT_OFF_PC),%a0@-	/* copy exception program counter */
@@ -91,19 +93,22 @@
	subql	#8,sw_usp		/* set exception */
	subql	#8,sw_usp		/* set exception */
	movel	sw_usp,%sp		/* restore usp */
	movel	sw_usp,%sp		/* restore usp */
	rte
	rte
	8:
	moveml	%sp@,%d1-%d5/%a0-%a2
	lea	%sp@(32),%sp		/* space for 8 regs */
	movel	%sp@+,%d0
	addql	#4,%sp			/* orig d0 */
	addl	%sp@+,%sp		/* stkadj */
	rte
.endm
.endm


.macro RDUSP
	movel	sw_usp,%a2
.endm

.macro WRUSP
	movel	%a0,sw_usp
.endm

#else /* !CONFIG_COLDFIRE_SW_A7 */
/*
/*
 * Quick exception save, use current stack only.
 * Modern ColdFire parts have separate supervisor and user stack
 * pointers. Simple load and restore macros for this case.
 */
 */
.macro SAVE_LOCAL
.macro SAVE_ALL
	move	#0x2700,%sr		/* disable intrs */
	move	#0x2700,%sr		/* disable intrs */
	clrl	%sp@-			/* stkadj */
	clrl	%sp@-			/* stkadj */
	movel	%d0,%sp@-		/* orig d0 */
	movel	%d0,%sp@-		/* orig d0 */
@@ -112,7 +117,7 @@
	moveml	%d1-%d5/%a0-%a2,%sp@
	moveml	%d1-%d5/%a0-%a2,%sp@
.endm
.endm


.macro RESTORE_LOCAL
.macro RESTORE_USER
	moveml	%sp@,%d1-%d5/%a0-%a2
	moveml	%sp@,%d1-%d5/%a0-%a2
	lea	%sp@(32),%sp		/* space for 8 regs */
	lea	%sp@(32),%sp		/* space for 8 regs */
	movel	%sp@+,%d0
	movel	%sp@+,%d0
@@ -121,6 +126,18 @@
	rte
	rte
.endm
.endm


.macro RDUSP
	/*move	%usp,%a2*/
	.word	0x4e6a
.endm

.macro WRUSP
	/*move	%a0,%usp*/
	.word	0x4e60
.endm

#endif /* !CONFIG_COLDFIRE_SW_A7 */

.macro SAVE_SWITCH_STACK
.macro SAVE_SWITCH_STACK
	lea	%sp@(-24),%sp		/* 6 regs */
	lea	%sp@(-24),%sp		/* 6 regs */
	moveml	%a3-%a6/%d6-%d7,%sp@
	moveml	%a3-%a6/%d6-%d7,%sp@
@@ -131,14 +148,6 @@
	lea	%sp@(24),%sp		/* 6 regs */
	lea	%sp@(24),%sp		/* 6 regs */
.endm
.endm


/*
 * Software copy of the user and kernel stack pointers... Ugh...
 * Need these to get around ColdFire not having separate kernel
 * and user stack pointers.
 */
.globl sw_usp
.globl sw_ksp

#else /* !CONFIG_COLDFIRE */
#else /* !CONFIG_COLDFIRE */


/*
/*
@@ -167,6 +176,6 @@
	moveml	%sp@+,%a3-%a6/%d6-%d7
	moveml	%sp@+,%a3-%a6/%d6-%d7
.endm
.endm


#endif /* !CONFIG_COLDFIRE */
#endif /* !COLDFIRE_SW_A7 */
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLY__ */
#endif /* __M68KNOMMU_ENTRY_H */
#endif /* __M68KNOMMU_ENTRY_H */
+1 −1
Original line number Original line Diff line number Diff line
@@ -37,7 +37,7 @@
#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
    defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
    defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
    defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
    defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
    defined(CONFIG_M532x) || defined(CONFIG_M548x)
    defined(CONFIG_M532x) || defined(CONFIG_M54xx)


/* These parts have GPIO organized by 8 bit ports */
/* These parts have GPIO organized by 8 bit ports */


+0 −1
Original line number Original line Diff line number Diff line
@@ -145,7 +145,6 @@ static inline void io_insl(unsigned int addr, void *buf, int len)
#define IOMAP_WRITETHROUGH		3
#define IOMAP_WRITETHROUGH		3


extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag);
extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag);
extern void __iounmap(void *addr, unsigned long size);


static inline void *ioremap(unsigned long physaddr, unsigned long size)
static inline void *ioremap(unsigned long physaddr, unsigned long size)
{
{
Loading