Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 14bd8c08 authored by Ralf Baechle's avatar Ralf Baechle
Browse files

MIPS: Loongson: Get rid of Loongson 2 #ifdefery all over arch/mips.



It was ugly.

Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 7b784c63
Loading
Loading
Loading
Loading
+5 −4
Original line number Original line Diff line number Diff line
@@ -20,11 +20,7 @@
#define Index_Load_Tag_D		0x05
#define Index_Load_Tag_D		0x05
#define Index_Store_Tag_I		0x08
#define Index_Store_Tag_I		0x08
#define Index_Store_Tag_D		0x09
#define Index_Store_Tag_D		0x09
#if defined(CONFIG_CPU_LOONGSON2)
#define Hit_Invalidate_I		0x00
#else
#define Hit_Invalidate_I		0x10
#define Hit_Invalidate_I		0x10
#endif
#define Hit_Invalidate_D		0x11
#define Hit_Invalidate_D		0x11
#define Hit_Writeback_Inv_D		0x15
#define Hit_Writeback_Inv_D		0x15


@@ -84,4 +80,9 @@
#define Index_Store_Data_D		0x1d
#define Index_Store_Data_D		0x1d
#define Index_Store_Data_S		0x1f
#define Index_Store_Data_S		0x1f


/*
 * Loongson2-specific cacheops
 */
#define Hit_Invalidate_I_Loongson23	0x00

#endif	/* __ASM_CACHEOPS_H */
#endif	/* __ASM_CACHEOPS_H */
+30 −11
Original line number Original line Diff line number Diff line
@@ -15,6 +15,7 @@
#include <asm/asm.h>
#include <asm/asm.h>
#include <asm/cacheops.h>
#include <asm/cacheops.h>
#include <asm/cpu-features.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/mipsmtregs.h>
#include <asm/mipsmtregs.h>


/*
/*
@@ -162,7 +163,15 @@ static inline void flush_scache_line_indexed(unsigned long addr)
static inline void flush_icache_line(unsigned long addr)
static inline void flush_icache_line(unsigned long addr)
{
{
	__iflush_prologue
	__iflush_prologue
	switch (boot_cpu_type()) {
	case CPU_LOONGSON2:
		cache_op(Hit_Invalidate_I_Loongson23, addr);
		break;

	default:
		cache_op(Hit_Invalidate_I, addr);
		cache_op(Hit_Invalidate_I, addr);
		break;
	}
	__iflush_epilogue
	__iflush_epilogue
}
}


@@ -208,7 +217,15 @@ static inline void flush_scache_line(unsigned long addr)
 */
 */
static inline void protected_flush_icache_line(unsigned long addr)
static inline void protected_flush_icache_line(unsigned long addr)
{
{
	switch (boot_cpu_type()) {
	case CPU_LOONGSON2:
		protected_cache_op(Hit_Invalidate_I_Loongson23, addr);
		break;

	default:
		protected_cache_op(Hit_Invalidate_I, addr);
		protected_cache_op(Hit_Invalidate_I, addr);
		break;
	}
}
}


/*
/*
@@ -412,8 +429,8 @@ __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)


/* build blast_xxx_range, protected_blast_xxx_range */
/* build blast_xxx_range, protected_blast_xxx_range */
#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
						    unsigned long end)	\
						    unsigned long end)	\
{									\
{									\
	unsigned long lsize = cpu_##desc##_line_size();			\
	unsigned long lsize = cpu_##desc##_line_size();			\
@@ -432,13 +449,15 @@ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
	__##pfx##flush_epilogue						\
	__##pfx##flush_epilogue						\
}
}


__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
	protected_, loongson23_)
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
/* blast_inv_dcache_range */
/* blast_inv_dcache_range */
__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )


#endif /* _ASM_R4KCACHE_H */
#endif /* _ASM_R4KCACHE_H */
+31 −21
Original line number Original line Diff line number Diff line
@@ -346,14 +346,8 @@ static void r4k_blast_scache_setup(void)


static inline void local_r4k___flush_cache_all(void * args)
static inline void local_r4k___flush_cache_all(void * args)
{
{
#if defined(CONFIG_CPU_LOONGSON2)
	r4k_blast_scache();
	return;
#endif
	r4k_blast_dcache();
	r4k_blast_icache();

	switch (current_cpu_type()) {
	switch (current_cpu_type()) {
	case CPU_LOONGSON2:
	case CPU_R4000SC:
	case CPU_R4000SC:
	case CPU_R4000MC:
	case CPU_R4000MC:
	case CPU_R4400SC:
	case CPU_R4400SC:
@@ -361,7 +355,18 @@ static inline void local_r4k___flush_cache_all(void * args)
	case CPU_R10000:
	case CPU_R10000:
	case CPU_R12000:
	case CPU_R12000:
	case CPU_R14000:
	case CPU_R14000:
		/*
		 * These caches are inclusive caches, that is, if something
		 * is not cached in the S-cache, we know it also won't be
		 * in one of the primary caches.
		 */
		r4k_blast_scache();
		r4k_blast_scache();
		break;

	default:
		r4k_blast_dcache();
		r4k_blast_icache();
		break;
	}
	}
}
}


@@ -572,8 +577,17 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo


	if (end - start > icache_size)
	if (end - start > icache_size)
		r4k_blast_icache();
		r4k_blast_icache();
	else
	else {
		switch (boot_cpu_type()) {
		case CPU_LOONGSON2:
			protected_blast_icache_range(start, end);
			protected_blast_icache_range(start, end);
			break;

		default:
			protected_loongson23_blast_icache_range(start, end);
			break;
		}
	}
}
}


static inline void local_r4k_flush_icache_range_ipi(void *args)
static inline void local_r4k_flush_icache_range_ipi(void *args)
@@ -1109,15 +1123,14 @@ static void probe_pcache(void)
	case CPU_ALCHEMY:
	case CPU_ALCHEMY:
		c->icache.flags |= MIPS_CACHE_IC_F_DC;
		c->icache.flags |= MIPS_CACHE_IC_F_DC;
		break;
		break;
	}


#ifdef	CONFIG_CPU_LOONGSON2
	case CPU_LOONGSON2:
		/*
		/*
		 * LOONGSON2 has 4 way icache, but when using indexed cache op,
		 * LOONGSON2 has 4 way icache, but when using indexed cache op,
		 * one op will act on all 4 ways
		 * one op will act on all 4 ways
		 */
		 */
		c->icache.ways = 1;
		c->icache.ways = 1;
#endif
	}


	printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
	printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
	       icache_size >> 10,
	       icache_size >> 10,
@@ -1193,7 +1206,6 @@ static int probe_scache(void)
	return 1;
	return 1;
}
}


#if defined(CONFIG_CPU_LOONGSON2)
static void __init loongson2_sc_init(void)
static void __init loongson2_sc_init(void)
{
{
	struct cpuinfo_mips *c = &current_cpu_data;
	struct cpuinfo_mips *c = &current_cpu_data;
@@ -1209,7 +1221,6 @@ static void __init loongson2_sc_init(void)


	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
}
#endif


extern int r5k_sc_init(void);
extern int r5k_sc_init(void);
extern int rm7k_sc_init(void);
extern int rm7k_sc_init(void);
@@ -1259,11 +1270,10 @@ static void setup_scache(void)
#endif
#endif
		return;
		return;


#if defined(CONFIG_CPU_LOONGSON2)
	case CPU_LOONGSON2:
	case CPU_LOONGSON2:
		loongson2_sc_init();
		loongson2_sc_init();
		return;
		return;
#endif

	case CPU_XLP:
	case CPU_XLP:
		/* don't need to worry about L2, fully coherent */
		/* don't need to worry about L2, fully coherent */
		return;
		return;
+21 −16
Original line number Original line Diff line number Diff line
@@ -52,21 +52,26 @@ extern void build_tlb_refill_handler(void);


#endif /* CONFIG_MIPS_MT_SMTC */
#endif /* CONFIG_MIPS_MT_SMTC */


#if defined(CONFIG_CPU_LOONGSON2)
/*
/*
 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
 * unfortrunately, itlb is not totally transparent to software.
 * unfortrunately, itlb is not totally transparent to software.
 */
 */
#define FLUSH_ITLB write_c0_diag(4);
static inline void flush_itlb(void)

{
#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }
	switch (current_cpu_type()) {

	case CPU_LOONGSON2:
#else
		write_c0_diag(4);

		break;
#define FLUSH_ITLB
	default:
#define FLUSH_ITLB_VM(vma)
		break;
	}
}


#endif
static inline void flush_itlb_vm(struct vm_area_struct *vma)
{
	if (vma->vm_flags & VM_EXEC)
		flush_itlb();
}


void local_flush_tlb_all(void)
void local_flush_tlb_all(void)
{
{
@@ -93,7 +98,7 @@ void local_flush_tlb_all(void)
	}
	}
	tlbw_use_hazard();
	tlbw_use_hazard();
	write_c0_entryhi(old_ctx);
	write_c0_entryhi(old_ctx);
	FLUSH_ITLB;
	flush_itlb();
	EXIT_CRITICAL(flags);
	EXIT_CRITICAL(flags);
}
}
EXPORT_SYMBOL(local_flush_tlb_all);
EXPORT_SYMBOL(local_flush_tlb_all);
@@ -155,7 +160,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
		} else {
		} else {
			drop_mmu_context(mm, cpu);
			drop_mmu_context(mm, cpu);
		}
		}
		FLUSH_ITLB;
		flush_itlb();
		EXIT_CRITICAL(flags);
		EXIT_CRITICAL(flags);
	}
	}
}
}
@@ -197,7 +202,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
	} else {
	} else {
		local_flush_tlb_all();
		local_flush_tlb_all();
	}
	}
	FLUSH_ITLB;
	flush_itlb();
	EXIT_CRITICAL(flags);
	EXIT_CRITICAL(flags);
}
}


@@ -230,7 +235,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)


	finish:
	finish:
		write_c0_entryhi(oldpid);
		write_c0_entryhi(oldpid);
		FLUSH_ITLB_VM(vma);
		flush_itlb_vm(vma);
		EXIT_CRITICAL(flags);
		EXIT_CRITICAL(flags);
	}
	}
}
}
@@ -262,7 +267,7 @@ void local_flush_tlb_one(unsigned long page)
		tlbw_use_hazard();
		tlbw_use_hazard();
	}
	}
	write_c0_entryhi(oldpid);
	write_c0_entryhi(oldpid);
	FLUSH_ITLB;
	flush_itlb();
	EXIT_CRITICAL(flags);
	EXIT_CRITICAL(flags);
}
}


@@ -335,7 +340,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
			tlb_write_indexed();
			tlb_write_indexed();
	}
	}
	tlbw_use_hazard();
	tlbw_use_hazard();
	FLUSH_ITLB_VM(vma);
	flush_itlb_vm(vma);
	EXIT_CRITICAL(flags);
	EXIT_CRITICAL(flags);
}
}


+87 −82
Original line number Original line Diff line number Diff line
@@ -1311,27 +1311,30 @@ static void build_r4000_tlb_refill_handler(void)
	 * need three, with the second nop'ed and the third being
	 * need three, with the second nop'ed and the third being
	 * unused.
	 * unused.
	 */
	 */
	switch (boot_cpu_type()) {
	default:
		if (sizeof(long) == 4) {
	case CPU_LOONGSON2:
		/* Loongson2 ebase is different than r4k, we have more space */
		/* Loongson2 ebase is different than r4k, we have more space */
#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
			if ((p - tlb_handler) > 64)
			if ((p - tlb_handler) > 64)
				panic("TLB refill handler space exceeded");
				panic("TLB refill handler space exceeded");
#else
			/*
			 * Now fold the handler in the TLB refill handler space.
			 */
			f = final_handler;
			/* Simplest case, just copy the handler. */
			uasm_copy_handler(relocs, labels, tlb_handler, p, f);
			final_len = p - tlb_handler;
			break;
		} else {
			if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
			if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
			    || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
			    || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
				&& uasm_insn_has_bdelay(relocs,
				&& uasm_insn_has_bdelay(relocs,
							tlb_handler + MIPS64_REFILL_INSNS - 3)))
							tlb_handler + MIPS64_REFILL_INSNS - 3)))
				panic("TLB refill handler space exceeded");
				panic("TLB refill handler space exceeded");
#endif

			/*
			/*
			 * Now fold the handler in the TLB refill handler space.
			 * Now fold the handler in the TLB refill handler space.
			 */
			 */
#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
	f = final_handler;
	/* Simplest case, just copy the handler. */
	uasm_copy_handler(relocs, labels, tlb_handler, p, f);
	final_len = p - tlb_handler;
#else /* CONFIG_64BIT */
			f = final_handler + MIPS64_REFILL_INSNS;
			f = final_handler + MIPS64_REFILL_INSNS;
			if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
			if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
				/* Just copy the handler. */
				/* Just copy the handler. */
@@ -1399,7 +1402,9 @@ static void build_r4000_tlb_refill_handler(void)
				final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
				final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
					    (p - split);
					    (p - split);
			}
			}
#endif /* CONFIG_64BIT */
		}
		break;
	}


	uasm_resolve_relocs(relocs, labels);
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB refill handler (%u instructions).\n",
	pr_debug("Wrote TLB refill handler (%u instructions).\n",