Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c4706628 authored by Paul Mundt's avatar Paul Mundt
Browse files

sh: Fixup SHMLBA definition for SH7705.



We need this set to something sensible anywhere were we have
an aliasing dcache..

Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent d7cdc9e8
Loading
Loading
Loading
Loading

include/asm-sh/cpu-sh2/shmparam.h

deleted100644 → 0
+0 −16
Original line number Original line Diff line number Diff line
/*
 * include/asm-sh/cpu-sh2/shmparam.h
 *
 * Copyright (C) 2003  Paul Mundt
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#ifndef __ASM_CPU_SH2_SHMPARAM_H
#define __ASM_CPU_SH2_SHMPARAM_H

#define	SHMLBA PAGE_SIZE		 /* attach addr a multiple of this */

#endif /* __ASM_CPU_SH2_SHMPARAM_H */
+21 −27
Original line number Original line Diff line number Diff line
@@ -35,47 +35,41 @@
 /* 32KB cache, 4kb PAGE sizes need to check bit 12 */
 /* 32KB cache, 4kb PAGE sizes need to check bit 12 */
#define CACHE_ALIAS 0x00001000
#define CACHE_ALIAS 0x00001000


extern void flush_cache_all(void);
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
                              unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
extern void flush_dcache_page(struct page *pg);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);

#define flush_dcache_mmap_lock(mapping)		do { } while (0)
#define flush_dcache_mmap_unlock(mapping)	do { } while (0)

/* SH3 has unified cache so no special action needed here */
#define flush_cache_sigtramp(vaddr)		do { } while (0)
#define flush_page_to_ram(page)			do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)

#define p3_cache_init()				do { } while (0)

#define PG_mapped	PG_arch_1
#define PG_mapped	PG_arch_1


/* We provide our own get_unmapped_area to avoid cache alias issue */
void flush_cache_all(void);
#define HAVE_ARCH_UNMAPPED_AREA
void flush_cache_mm(struct mm_struct *mm);

void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
                              unsigned long end);
void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
void flush_dcache_page(struct page *pg);
void flush_icache_range(unsigned long start, unsigned long end);
void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#else
#else

#define flush_cache_all()			do { } while (0)
#define flush_cache_all()			do { } while (0)
#define flush_cache_mm(mm)			do { } while (0)
#define flush_cache_mm(mm)			do { } while (0)
#define flush_cache_range(vma, start, end)	do { } while (0)
#define flush_cache_range(vma, start, end)	do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
#define flush_dcache_page(page)			do { } while (0)
#define flush_dcache_page(page)			do { } while (0)
#define flush_dcache_mmap_lock(mapping)		do { } while (0)
#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
#define flush_icache_range(start, end)		do { } while (0)
#define flush_icache_range(start, end)		do { } while (0)
#define flush_icache_page(vma,pg)		do { } while (0)
#define flush_icache_page(vma,pg)		do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
#endif

#define flush_dcache_mmap_lock(mapping)		do { } while (0)
#define flush_dcache_mmap_unlock(mapping)	do { } while (0)

/* SH3 has unified cache so no special action needed here */
#define flush_cache_sigtramp(vaddr)		do { } while (0)
#define flush_cache_sigtramp(vaddr)		do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)


#define p3_cache_init()				do { } while (0)
#define p3_cache_init()				do { } while (0)


#endif
/*
 * We provide our own get_unmapped_area to avoid cache aliasing issues
 * on SH7705 with a 32KB cache, and to page align addresses in the
 * non-aliasing case.
 */
#define HAVE_ARCH_UNMAPPED_AREA


#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */
#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */

include/asm-sh/cpu-sh3/shmparam.h

deleted100644 → 0
+0 −16
Original line number Original line Diff line number Diff line
/*
 * include/asm-sh/cpu-sh3/shmparam.h
 *
 * Copyright (C) 1999 Niibe Yutaka
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#ifndef __ASM_CPU_SH3_SHMPARAM_H
#define __ASM_CPU_SH3_SHMPARAM_H

#define	SHMLBA PAGE_SIZE		 /* attach addr a multiple of this */

#endif /* __ASM_CPU_SH3_SHMPARAM_H */

include/asm-sh/cpu-sh4/shmparam.h

deleted100644 → 0
+0 −19
Original line number Original line Diff line number Diff line
/*
 * include/asm-sh/cpu-sh4/shmparam.h
 *
 * Copyright (C) 1999 Niibe Yutaka
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#ifndef __ASM_CPU_SH4_SHMPARAM_H
#define __ASM_CPU_SH4_SHMPARAM_H

/*
 * SH-4 has D-cache alias issue
 */
#define	SHMLBA (PAGE_SIZE*4)		 /* attach addr a multiple of this */

#endif /* __ASM_CPU_SH4_SHMPARAM_H */
+17 −3
Original line number Original line Diff line number Diff line
/*
 * include/asm-sh/shmparam.h
 *
 * Copyright (C) 1999 Niibe Yutaka
 * Copyright (C) 2006 Paul Mundt
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#ifndef __ASM_SH_SHMPARAM_H
#ifndef __ASM_SH_SHMPARAM_H
#define __ASM_SH_SHMPARAM_H
#define __ASM_SH_SHMPARAM_H
#ifdef __KERNEL__


#include <asm/cpu/shmparam.h>
/*
 * SH-4 and SH-3 7705 have an aliasing dcache. Bump this up to a sensible value
 * for everyone, and work out the specifics from the probed cache descriptor.
 */
#define	SHMLBA	0x4000		 /* attach addr a multiple of this */

#define __ARCH_FORCE_SHMLBA


#endif /* __KERNEL__ */
#endif /* __ASM_SH_SHMPARAM_H */
#endif /* __ASM_SH_SHMPARAM_H */