Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 48c4ac97 authored by David Daney's avatar David Daney Committed by Ralf Baechle
Browse files

Revert "MIPS: Allow ASID size to be determined at boot time."



This reverts commit d532f3d2.

The original commit has several problems:

1) Doesn't work with 64-bit kernels.

2) Calls TLBMISS_HANDLER_SETUP() before the code is generated.

3) Calls TLBMISS_HANDLER_SETUP() twice in per_cpu_trap_init() when
   only one call is needed.

[ralf@linux-mips.org: Also revert the bits of the ASID patch which were
hidden in the KVM merge.]

Signed-off-by: default avatarDavid Daney <david.daney@cavium.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Cc: "Steven J. Hill" <Steven.Hill@imgtec.com>
Cc: David Daney <david.daney@cavium.com>
Patchwork: https://patchwork.linux-mips.org/patch/5242/


Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 8ea6cd7a
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -336,7 +336,7 @@ enum emulation_result {
#define VPN2_MASK           0xffffe000
#define TLB_IS_GLOBAL(x)    (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
#define TLB_VPN2(x)         ((x).tlb_hi & VPN2_MASK)
#define TLB_ASID(x)         (ASID_MASK((x).tlb_hi))
#define TLB_ASID(x)         ((x).tlb_hi & ASID_MASK)
#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))

struct kvm_mips_tlb {
+36 −59
Original line number Diff line number Diff line
@@ -67,68 +67,45 @@ extern unsigned long pgd_current[];
	TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
#endif
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)

#define ASID_INC(asid)						\
({								\
	unsigned long __asid = asid;				\
	__asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t"		\
	".section\t__asid_inc,\"a\"\n\t"			\
	".word\t1b\n\t"						\
	".previous"						\
	:"=r" (__asid)						\
	:"0" (__asid));						\
	__asid;							\
})
#define ASID_MASK(asid)						\
({								\
	unsigned long __asid = asid;				\
	__asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t"	\
	".section\t__asid_mask,\"a\"\n\t"			\
	".word\t1b\n\t"						\
	".previous"						\
	:"=r" (__asid)						\
	:"r" (__asid));						\
	__asid;							\
})
#define ASID_VERSION_MASK					\
({								\
	unsigned long __asid;					\
	__asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t"	\
	".section\t__asid_version_mask,\"a\"\n\t"		\
	".word\t1b\n\t"						\
	".previous"						\
	:"=r" (__asid));					\
	__asid;							\
})
#define ASID_FIRST_VERSION					\
({								\
	unsigned long __asid = asid;				\
	__asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t"		\
	".section\t__asid_first_version,\"a\"\n\t"		\
	".word\t1b\n\t"						\
	".previous"						\
	:"=r" (__asid));					\
	__asid;							\
})

#define ASID_FIRST_VERSION_R3000	0x1000
#define ASID_FIRST_VERSION_R4000	0x100
#define ASID_FIRST_VERSION_R8000	0x1000
#define ASID_FIRST_VERSION_RM9000	0x1000
#define ASID_INC	0x40
#define ASID_MASK	0xfc0

#elif defined(CONFIG_CPU_R8000)

#define ASID_INC	0x10
#define ASID_MASK	0xff0

#elif defined(CONFIG_MIPS_MT_SMTC)

#define ASID_INC	0x1
extern unsigned long smtc_asid_mask;
#define ASID_MASK	(smtc_asid_mask)
#define HW_ASID_MASK	0xff
/* End SMTC/34K debug hack */
#else /* FIXME: not correct for R6000 */

#define ASID_INC	0x1
#define ASID_MASK	0xff

#ifdef CONFIG_MIPS_MT_SMTC
#define SMTC_HW_ASID_MASK		0xff
extern unsigned int smtc_asid_mask;
#endif

#define cpu_context(cpu, mm)	((mm)->context.asid[cpu])
#define cpu_asid(cpu, mm)	ASID_MASK(cpu_context((cpu), (mm)))
#define cpu_asid(cpu, mm)	(cpu_context((cpu), (mm)) & ASID_MASK)
#define asid_cache(cpu)		(cpu_data[cpu].asid_cache)

static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}

/*
 *  All unused by hardware upper bits will be considered
 *  as a software asid extension.
 */
#define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)

#ifndef CONFIG_MIPS_MT_SMTC
/* Normal, classic MIPS get_new_mmu_context */
static inline void
@@ -137,7 +114,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
	extern void kvm_local_flush_tlb_all(void);
	unsigned long asid = asid_cache(cpu);

	if (!ASID_MASK((asid = ASID_INC(asid)))) {
	if (! ((asid += ASID_INC) & ASID_MASK) ) {
		if (cpu_has_vtag_icache)
			flush_icache_all();
#ifdef CONFIG_VIRTUALIZATION
@@ -200,7 +177,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
	 * free up the ASID value for use and flush any old
	 * instances of it from the TLB.
	 */
	oldasid = ASID_MASK(read_c0_entryhi());
	oldasid = (read_c0_entryhi() & ASID_MASK);
	if(smtc_live_asid[mytlb][oldasid]) {
		smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
		if(smtc_live_asid[mytlb][oldasid] == 0)
@@ -211,7 +188,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
	 * having ASID_MASK smaller than the hardware maximum,
	 * make sure no "soft" bits become "hard"...
	 */
	write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
	write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
			 cpu_asid(cpu, next));
	ehb(); /* Make sure it propagates to TCStatus */
	evpe(mtflags);
@@ -264,14 +241,14 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
#ifdef CONFIG_MIPS_MT_SMTC
	/* See comments for similar code above */
	mtflags = dvpe();
	oldasid = ASID_MASK(read_c0_entryhi());
	oldasid = read_c0_entryhi() & ASID_MASK;
	if(smtc_live_asid[mytlb][oldasid]) {
		smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
		if(smtc_live_asid[mytlb][oldasid] == 0)
			 smtc_flush_tlb_asid(oldasid);
	}
	/* See comments for similar code above */
	write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
	write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
			 cpu_asid(cpu, next));
	ehb(); /* Make sure it propagates to TCStatus */
	evpe(mtflags);
@@ -309,14 +286,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
#ifdef CONFIG_MIPS_MT_SMTC
		/* See comments for similar code above */
		prevvpe = dvpe();
		oldasid = ASID_MASK(read_c0_entryhi());
		oldasid = (read_c0_entryhi() & ASID_MASK);
		if (smtc_live_asid[mytlb][oldasid]) {
			smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
			if(smtc_live_asid[mytlb][oldasid] == 0)
				smtc_flush_tlb_asid(oldasid);
		}
		/* See comments for similar code above */
		write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK)
		write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
				| cpu_asid(cpu, mm));
		ehb(); /* Make sure it propagates to TCStatus */
		evpe(prevvpe);
+1 −1
Original line number Diff line number Diff line
@@ -493,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
	.set	noreorder
	/* check if TLB contains a entry for EPC */
	MFC0	k1, CP0_ENTRYHI
	andi	k1, 0xff	/* ASID_MASK patched at run-time!! */
	andi	k1, 0xff	/* ASID_MASK */
	MFC0	k0, CP0_EPC
	PTR_SRL k0, _PAGE_SHIFT + 1
	PTR_SLL k0, _PAGE_SHIFT + 1
+5 −5
Original line number Diff line number Diff line
@@ -111,7 +111,7 @@ static int vpe0limit;
static int ipibuffers;
static int nostlb;
static int asidmask;
unsigned int smtc_asid_mask = 0xff;
unsigned long smtc_asid_mask = 0xff;

static int __init vpe0tcs(char *str)
{
@@ -1395,7 +1395,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
	asid = asid_cache(cpu);

	do {
		if (!ASID_MASK(ASID_INC(asid))) {
		if (!((asid += ASID_INC) & ASID_MASK) ) {
			if (cpu_has_vtag_icache)
				flush_icache_all();
			/* Traverse all online CPUs (hack requires contiguous range) */
@@ -1414,7 +1414,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
						mips_ihb();
					}
					tcstat = read_tc_c0_tcstatus();
					smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i);
					smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
					if (!prevhalt)
						write_tc_c0_tchalt(0);
				}
@@ -1423,7 +1423,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
				asid = ASID_FIRST_VERSION;
			local_flush_tlb_all();	/* start new asid cycle */
		}
	} while (smtc_live_asid[tlb][ASID_MASK(asid)]);
	} while (smtc_live_asid[tlb][(asid & ASID_MASK)]);

	/*
	 * SMTC shares the TLB within VPEs and possibly across all VPEs.
@@ -1461,7 +1461,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
		tlb_read();
		ehb();
		ehi = read_c0_entryhi();
		if (ASID_MASK(ehi) == asid) {
		if ((ehi & ASID_MASK) == asid) {
		    /*
		     * Invalidate only entries with specified ASID,
		     * makiing sure all entries differ.
+2 −4
Original line number Diff line number Diff line
@@ -1656,7 +1656,6 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
	unsigned int cpu = smp_processor_id();
	unsigned int status_set = ST0_CU0;
	unsigned int hwrena = cpu_hwrena_impl_bits;
	unsigned long asid = 0;
#ifdef CONFIG_MIPS_MT_SMTC
	int secondaryTC = 0;
	int bootTC = (cpu == 0);
@@ -1740,9 +1739,8 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
	}
#endif /* CONFIG_MIPS_MT_SMTC */

	asid = ASID_FIRST_VERSION;
	cpu_data[cpu].asid_cache = asid;
	TLBMISS_HANDLER_SETUP();
	if (!cpu_data[cpu].asid_cache)
		cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;

	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
Loading