Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cbaa118e authored by Stuart Menefy's avatar Stuart Menefy Committed by Paul Mundt
Browse files

sh: Preparation for uncached jumps through PMB.



Presently most of the 29-bit physical parts do P1/P2 segmentation
with a 1:1 cached/uncached mapping, jumping between the two to
control the caching behaviour. This provides the basic infrastructure
to maintain this behaviour on 32-bit physical parts that don't map
P1/P2 at all, using a shiny new linker section and corresponding
fixmap entry.

Signed-off-by: default avatarStuart Menefy <stuart.menefy@st.com>
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 325df7f2
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -64,11 +64,11 @@ static void __init speculative_execution_init(void)
 * Generic first-level cache init
 */
#ifdef CONFIG_SUPERH32
static void __init cache_init(void)
static void __uses_jump_to_uncached cache_init(void)
{
	unsigned long ccr, flags;

	jump_to_P2();
	jump_to_uncached();
	ccr = ctrl_inl(CCR);

	/*
@@ -145,7 +145,7 @@ static void __init cache_init(void)
#endif

	ctrl_outl(flags, CCR);
	back_to_P1();
	back_to_cached();
}
#else
#define cache_init()	do { } while (0)
+3 −3
Original line number Diff line number Diff line
@@ -16,11 +16,11 @@
#include <asm/cache.h>
#include <asm/io.h>

int __init detect_cpu_and_cache_system(void)
int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
{
	unsigned long addr0, addr1, data0, data1, data2, data3;

	jump_to_P2();
	jump_to_uncached();
	/*
	 * Check if the entry shadows or not.
	 * When shadowed, it's 128-entry system.
@@ -48,7 +48,7 @@ int __init detect_cpu_and_cache_system(void)
	ctrl_outl(data0&~SH_CACHE_VALID, addr0);
	ctrl_outl(data2&~SH_CACHE_VALID, addr1);

	back_to_P1();
	back_to_cached();

	boot_cpu_data.dcache.ways		= 4;
	boot_cpu_data.dcache.entry_shift	= 4;
+9 −0
Original line number Diff line number Diff line
@@ -43,6 +43,15 @@ SECTIONS
	NOTES
	RO_DATA(PAGE_SIZE)

	/*
	 * Code which must be executed uncached and the associated data
	 */
	. = ALIGN(PAGE_SIZE);
	__uncached_start = .;
	.uncached.text : { *(.uncached.text) }
	.uncached.data : { *(.uncached.data) }
	__uncached_end = .;

	. = ALIGN(THREAD_SIZE);
	.data : {			/* Data */
		*(.data.init_task)
+5 −4
Original line number Diff line number Diff line
@@ -22,7 +22,8 @@ enum cache_type {
	CACHE_TYPE_UNIFIED,
};

static int cache_seq_show(struct seq_file *file, void *iter)
static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file,
						  void *iter)
{
	unsigned int cache_type = (unsigned int)file->private;
	struct cache_info *cache;
@@ -34,11 +35,11 @@ static int cache_seq_show(struct seq_file *file, void *iter)
	 * Go uncached immediately so we don't skew the results any
	 * more than we already are..
	 */
	jump_to_P2();
	jump_to_uncached();

	ccr = ctrl_inl(CCR);
	if ((ccr & CCR_CACHE_ENABLE) == 0) {
		back_to_P1();
		back_to_cached();

		seq_printf(file, "disabled\n");
		return 0;
@@ -104,7 +105,7 @@ static int cache_seq_show(struct seq_file *file, void *iter)
		addrstart += cache->way_incr;
	}

	back_to_P1();
	back_to_cached();

	return 0;
}
+7 −7
Original line number Diff line number Diff line
@@ -190,7 +190,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
 * .. which happens to be the same behavior as flush_icache_range().
 * So, we simply flush out a line.
 */
void flush_cache_sigtramp(unsigned long addr)
void __uses_jump_to_uncached flush_cache_sigtramp(unsigned long addr)
{
	unsigned long v, index;
	unsigned long flags;
@@ -205,13 +205,13 @@ void flush_cache_sigtramp(unsigned long addr)
			(v & boot_cpu_data.icache.entry_mask);

	local_irq_save(flags);
	jump_to_P2();
	jump_to_uncached();

	for (i = 0; i < boot_cpu_data.icache.ways;
	     i++, index += boot_cpu_data.icache.way_incr)
		ctrl_outl(0, index);	/* Clear out Valid-bit */

	back_to_P1();
	back_to_cached();
	wmb();
	local_irq_restore(flags);
}
@@ -256,12 +256,12 @@ void flush_dcache_page(struct page *page)
}

/* TODO: Selective icache invalidation through IC address array.. */
static inline void flush_icache_all(void)
static inline void __uses_jump_to_uncached flush_icache_all(void)
{
	unsigned long flags, ccr;

	local_irq_save(flags);
	jump_to_P2();
	jump_to_uncached();

	/* Flush I-cache */
	ccr = ctrl_inl(CCR);
@@ -269,11 +269,11 @@ static inline void flush_icache_all(void)
	ctrl_outl(ccr, CCR);

	/*
	 * back_to_P1() will take care of the barrier for us, don't add
	 * back_to_cached() will take care of the barrier for us, don't add
	 * another one!
	 */

	back_to_P1();
	back_to_cached();
	local_irq_restore(flags);
}

Loading