Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8799ee9f authored by Russell King's avatar Russell King Committed by Russell King
Browse files

[ARM] Set bit 4 on section mappings correctly depending on CPU



On some CPUs, bit 4 of section mappings means "update the
cache when written to".  On others, this bit is required to
be one, and others it's required to be zero.  Finally, on
ARMv6 and above, setting it turns on "no execute" and prevents
speculative prefetches.

With all these combinations, no one value fits all CPUs, so we
have to pick a value depending on the CPU type, and the area
we're mapping.

Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 326764a8
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -105,6 +105,7 @@ int main(void)
  BLANK();
  DEFINE(PROC_INFO_SZ,		sizeof(struct proc_info_list));
  DEFINE(PROCINFO_INITFUNC,	offsetof(struct proc_info_list, __cpu_flush));
  DEFINE(PROCINFO_MMUFLAGS,	offsetof(struct proc_info_list, __cpu_mmu_flags));
  DEFINE(PROCINFO_MM_MMUFLAGS,	offsetof(struct proc_info_list, __cpu_mm_mmu_flags));
  DEFINE(PROCINFO_IO_MMUFLAGS,	offsetof(struct proc_info_list, __cpu_io_mmu_flags));
  return 0; 
}
+2 −3
Original line number Diff line number Diff line
@@ -221,7 +221,7 @@ __create_page_tables:
	teq	r0, r6
	bne	1b

	ldr	r7, [r10, #PROCINFO_MMUFLAGS]	@ mmuflags
	ldr	r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags

	/*
	 * Create identity mapping for first MB of kernel to
@@ -272,8 +272,7 @@ __create_page_tables:
#endif

#ifdef CONFIG_DEBUG_LL
	bic	r7, r7, #0x0c			@ turn off cacheable
						@ and bufferable bits
	ldr	r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
	/*
	 * Map in IO space for serial debugging.
	 * This allows debug messages to be output
+22 −15
Original line number Diff line number Diff line
@@ -303,16 +303,16 @@ static struct mem_types mem_types[] __initdata = {
		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
				L_PTE_WRITE,
		.prot_l1   = PMD_TYPE_TABLE,
		.prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
		.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
				PMD_SECT_AP_WRITE,
		.domain    = DOMAIN_IO,
	},
	[MT_CACHECLEAN] = {
		.prot_sect = PMD_TYPE_SECT,
		.prot_sect = PMD_TYPE_SECT | PMD_BIT4,
		.domain    = DOMAIN_KERNEL,
	},
	[MT_MINICLEAN] = {
		.prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE,
		.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
		.domain    = DOMAIN_KERNEL,
	},
	[MT_LOW_VECTORS] = {
@@ -328,25 +328,25 @@ static struct mem_types mem_types[] __initdata = {
		.domain    = DOMAIN_USER,
	},
	[MT_MEMORY] = {
		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
		.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
		.domain    = DOMAIN_KERNEL,
	},
	[MT_ROM] = {
		.prot_sect = PMD_TYPE_SECT,
		.prot_sect = PMD_TYPE_SECT | PMD_BIT4,
		.domain    = DOMAIN_KERNEL,
	},
	[MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
				L_PTE_WRITE,
		.prot_l1   = PMD_TYPE_TABLE,
		.prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
		.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
				PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
				PMD_SECT_TEX(1),
		.domain    = DOMAIN_IO,
	},
	[MT_NONSHARED_DEVICE] = {
		.prot_l1   = PMD_TYPE_TABLE,
		.prot_sect = PMD_TYPE_SECT | PMD_SECT_NONSHARED_DEV |
		.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV |
				PMD_SECT_AP_WRITE,
		.domain    = DOMAIN_IO,
	}
@@ -376,14 +376,21 @@ void __init build_mem_type_table(void)
		ecc_mask = 0;
	}

	if (cpu_arch <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) {
		for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
	/*
	 * Xscale must not have PMD bit 4 set for section mappings.
	 */
	if (cpu_is_xscale())
		for (i = 0; i < ARRAY_SIZE(mem_types); i++)
			mem_types[i].prot_sect &= ~PMD_BIT4;

	/*
	 * ARMv5 and lower, excluding Xscale, bit 4 must be set for
	 * page tables.
	 */
	if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale())
		for (i = 0; i < ARRAY_SIZE(mem_types); i++)
			if (mem_types[i].prot_l1)
				mem_types[i].prot_l1 |= PMD_BIT4;
			if (mem_types[i].prot_sect)
				mem_types[i].prot_sect |= PMD_BIT4;
		}
	}

	cp = &cache_policies[cachepolicy];
	kern_pgprot = user_pgprot = cp->pte;
@@ -407,8 +414,8 @@ void __init build_mem_type_table(void)
		 * bit 4 becomes XN which we must clear for the
		 * kernel memory mapping.
		 */
		mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
		mem_types[MT_ROM].prot_sect &= ~PMD_BIT4;
		mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN;
		mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN;

		/*
		 * Mark cache clean areas and XIP ROM read only
+3 −0
Original line number Diff line number Diff line
@@ -524,6 +524,9 @@ cpu_arm1020_name:
__arm1020_proc_info:
	.long	0x4104a200			@ ARM 1020T (Architecture v5T)
	.long	0xff0ffff0
	.long   PMD_TYPE_SECT | \
		PMD_SECT_AP_WRITE | \
		PMD_SECT_AP_READ
	.long   PMD_TYPE_SECT | \
		PMD_SECT_AP_WRITE | \
		PMD_SECT_AP_READ
+4 −0
Original line number Diff line number Diff line
@@ -488,6 +488,10 @@ cpu_arm1020e_name:
__arm1020e_proc_info:
	.long	0x4105a200			@ ARM 1020TE (Architecture v5TE)
	.long	0xff0ffff0
	.long   PMD_TYPE_SECT | \
		PMD_BIT4 | \
		PMD_SECT_AP_WRITE | \
		PMD_SECT_AP_READ
	.long   PMD_TYPE_SECT | \
		PMD_BIT4 | \
		PMD_SECT_AP_WRITE | \
Loading