Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8263a67e authored by Paul Mundt's avatar Paul Mundt
Browse files

sh: Support for extended ASIDs on PTEAEX-capable SH-X3 cores.



This adds support for extended ASIDs (up to 16-bits) on newer SH-X3 cores
that implement the PTAEX register and respective functionality. Presently
only the 65nm SH7786 (90nm only supports legacy 8-bit ASIDs).

The main change is in how the PTE is written out when loading the entry
in to the TLB, as well as in how the TLB entry is selectively flushed.

While SH-X2 extended mode splits out the memory-mapped U and I-TLB data
arrays for extra bits, extended ASID mode splits out the address arrays.
While we don't use the memory-mapped data array access, the address
array accesses are necessary for selective TLB flushes, so these are
implemented newly and replace the generic SH-4 implementation.

With this, TLB flushes in switch_mm() are almost non-existent on newer
parts.

Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent da788006
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -365,6 +365,7 @@ config CPU_SUBTYPE_SH7786
	bool "Support SH7786 processor"
	select CPU_SH4A
	select CPU_SHX3
	select CPU_HAS_PTEAEX
	select ARCH_SPARSEMEM_ENABLE
	select SYS_SUPPORTS_NUMA

+3 −0
Original line number Diff line number Diff line
@@ -104,6 +104,9 @@ config CPU_HAS_SR_RB
config CPU_HAS_PTEA
	bool

config CPU_HAS_PTEAEX
	bool

config CPU_HAS_DSP
	bool

+1 −0
Original line number Diff line number Diff line
@@ -21,5 +21,6 @@
#define CPU_HAS_LLSC		0x0040	/* movli.l/movco.l */
#define CPU_HAS_L2_CACHE	0x0080	/* Secondary cache / URAM */
#define CPU_HAS_OP32		0x0100	/* 32-bit instruction support */
#define CPU_HAS_PTEAEX		0x0200	/* PTE ASID Extension support */

#endif /* __ASM_SH_CPU_FEATURES_H */
+10 −5
Original line number Diff line number Diff line
@@ -19,13 +19,18 @@
 *    (a) TLB cache version (or round, cycle whatever expression you like)
 *    (b) ASID (Address Space IDentifier)
 */
#ifdef CONFIG_CPU_HAS_PTEAEX
#define MMU_CONTEXT_ASID_MASK		0x0000ffff
#else
#define MMU_CONTEXT_ASID_MASK		0x000000ff
#define MMU_CONTEXT_VERSION_MASK	0xffffff00
#define MMU_CONTEXT_FIRST_VERSION	0x00000100
#define NO_CONTEXT			0UL
#endif

/* ASID is 8-bit value, so it can't be 0x100 */
#define MMU_NO_ASID			0x100
#define MMU_CONTEXT_VERSION_MASK	(~0UL & ~MMU_CONTEXT_ASID_MASK)
#define MMU_CONTEXT_FIRST_VERSION	(MMU_CONTEXT_ASID_MASK + 1)

/* Impossible ASID value, to differentiate from NO_CONTEXT. */
#define MMU_NO_ASID			MMU_CONTEXT_FIRST_VERSION
#define NO_CONTEXT			0UL

#define asid_cache(cpu)		(cpu_data[cpu].asid_cache)

+12 −0
Original line number Diff line number Diff line
@@ -10,6 +10,17 @@ static inline void destroy_context(struct mm_struct *mm)
	/* Do nothing */
}

#ifdef CONFIG_CPU_HAS_PTEAEX
static inline void set_asid(unsigned long asid)
{
	__raw_writel(asid, MMU_PTEAEX);
}

static inline unsigned long get_asid(void)
{
	return __raw_readl(MMU_PTEAEX) & MMU_CONTEXT_ASID_MASK;
}
#else
static inline void set_asid(unsigned long asid)
{
	unsigned long __dummy;
@@ -33,6 +44,7 @@ static inline unsigned long get_asid(void)
	asid &= MMU_CONTEXT_ASID_MASK;
	return asid;
}
#endif /* CONFIG_CPU_HAS_PTEAEX */

/* MMU_TTB is used for optimizing the fault handling. */
static inline void set_TTB(pgd_t *pgd)
Loading