Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e58e87ad authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman
Browse files

powerpc/mm: Update _PAGE_KERNEL_RO



PS3 had used a PPP bit hack to implement a read only mapping in the
kernel area. Since we are bolting the ioremap area, it used the pte
flags _PAGE_PRESENT | _PAGE_USER to get a PPP value of 0x3 there by
resulting in a read only mapping. This means the area can be accessed by
user space, but kernel will never return such an address to user space.

But we can do better by implementing a read only kernel mapping using
PPP bits 0b110.

This also allows us to do read only kernel mapping for radix in later
patches.

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 96270b1f
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -119,10 +119,10 @@
#endif /* CONFIG_PPC_MM_SLICES */

/*
 * No separate kernel read-only, user access blocked by key
 * user access blocked by key
 */
#define _PAGE_KERNEL_RW		(_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
#define _PAGE_KERNEL_RO		 _PAGE_KERNEL_RW
#define _PAGE_KERNEL_RO		 (_PAGE_PRIVILEGED | _PAGE_READ)
#define _PAGE_KERNEL_RWX	(_PAGE_PRIVILEGED | _PAGE_DIRTY | \
				 _PAGE_RW | _PAGE_EXEC)

+11 −6
Original line number Diff line number Diff line
@@ -167,14 +167,19 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
	if ((pteflags & _PAGE_EXEC) == 0)
		rflags |= HPTE_R_N;
	/*
	 * PP bits:
	 * PPP bits:
	 * Linux uses slb key 0 for kernel and 1 for user.
	 * kernel areas are mapped with PP=00
	 * and there is no kernel RO (_PAGE_KERNEL_RO).
	 * User area is mapped with PP=0x2 for read/write
	 * or PP=0x3 for read-only (including writeable but clean pages).
	 * kernel RW areas are mapped with PPP=0b000
	 * User area is mapped with PPP=0b010 for read/write
	 * or PPP=0b011 for read-only (including writeable but clean pages).
	 */
	if (!(pteflags & _PAGE_PRIVILEGED)) {
	if (pteflags & _PAGE_PRIVILEGED) {
		/*
		 * Kernel read only mapped with ppp bits 0b110
		 */
		if (!(pteflags & _PAGE_WRITE))
			rflags |= (HPTE_R_PP0 | 0x2);
	} else {
		if (pteflags & _PAGE_RWX)
			rflags |= 0x2;
		if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY)))
+1 −1
Original line number Diff line number Diff line
@@ -205,7 +205,7 @@ static void spu_unmap(struct spu *spu)
static int __init setup_areas(struct spu *spu)
{
	struct table {char* name; unsigned long addr; unsigned long size;};
	static const unsigned long shadow_flags = _PAGE_NO_CACHE | 3;
	unsigned long shadow_flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL_RO));

	spu_pdata(spu)->shadow = __ioremap(spu_pdata(spu)->shadow_addr,
					   sizeof(struct spe_shadow),