Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 76db5bd2 authored by Vitaly Bordug's avatar Vitaly Bordug Committed by Kumar Gala
Browse files

[POWERPC] 8xx: fix swap



This makes swap routines operate correctly on the ppc_8xx based machines.
Code has been revalidated on mpc885ads (8M sdram) with recent kernel. Based
on patch from Yuri Tikhonov <yur@emcraft.com> to do the same on arch/ppc
instance.

Recent kernel's size makes swap feature very important on low-memory platforms,
those are actually non-operable without it.

Signed-off-by: default avatarYuri Tikhonov <yur@emcraft.com>
Signed-off-by: default avatarVitaly Bordug <vitb@kernel.crashing.org>
Signed-off-by: default avatarKumar Gala <galak@kernel.crashing.org>
parent d7f46190
Loading
Loading
Loading
Loading
+29 −1
Original line number Original line Diff line number Diff line
@@ -332,8 +332,18 @@ InstructionTLBMiss:
	mfspr	r11, SPRN_MD_TWC	/* ....and get the pte address */
	mfspr	r11, SPRN_MD_TWC	/* ....and get the pte address */
	lwz	r10, 0(r11)	/* Get the pte */
	lwz	r10, 0(r11)	/* Get the pte */


#ifdef CONFIG_SWAP
	/* do not set the _PAGE_ACCESSED bit of a non-present page */
	andi.	r11, r10, _PAGE_PRESENT
	beq	4f
	ori	r10, r10, _PAGE_ACCESSED
	mfspr	r11, SPRN_MD_TWC	/* get the pte address again */
	stw	r10, 0(r11)
4:
#else
	ori	r10, r10, _PAGE_ACCESSED
	ori	r10, r10, _PAGE_ACCESSED
	stw	r10, 0(r11)
	stw	r10, 0(r11)
#endif


	/* The Linux PTE won't go exactly into the MMU TLB.
	/* The Linux PTE won't go exactly into the MMU TLB.
	 * Software indicator bits 21, 22 and 28 must be clear.
	 * Software indicator bits 21, 22 and 28 must be clear.
@@ -398,8 +408,17 @@ DataStoreTLBMiss:
	DO_8xx_CPU6(0x3b80, r3)
	DO_8xx_CPU6(0x3b80, r3)
	mtspr	SPRN_MD_TWC, r11
	mtspr	SPRN_MD_TWC, r11


	mfspr	r11, SPRN_MD_TWC	/* get the pte address again */
#ifdef CONFIG_SWAP
	/* do not set the _PAGE_ACCESSED bit of a non-present page */
	andi.	r11, r10, _PAGE_PRESENT
	beq	4f
	ori	r10, r10, _PAGE_ACCESSED
	ori	r10, r10, _PAGE_ACCESSED
4:
	/* and update pte in table */
#else
	ori	r10, r10, _PAGE_ACCESSED
#endif
	mfspr	r11, SPRN_MD_TWC	/* get the pte address again */
	stw	r10, 0(r11)
	stw	r10, 0(r11)


	/* The Linux PTE won't go exactly into the MMU TLB.
	/* The Linux PTE won't go exactly into the MMU TLB.
@@ -507,7 +526,16 @@ DataTLBError:


	/* Update 'changed', among others.
	/* Update 'changed', among others.
	*/
	*/
#ifdef CONFIG_SWAP
	ori	r10, r10, _PAGE_DIRTY|_PAGE_HWWRITE
	/* do not set the _PAGE_ACCESSED bit of a non-present page */
	andi.	r11, r10, _PAGE_PRESENT
	beq	4f
	ori	r10, r10, _PAGE_ACCESSED
4:
#else
	ori	r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
	ori	r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
#endif
	mfspr	r11, SPRN_MD_TWC		/* Get pte address again */
	mfspr	r11, SPRN_MD_TWC		/* Get pte address again */
	stw	r10, 0(r11)		/* and update pte in table */
	stw	r10, 0(r11)		/* and update pte in table */


+0 −8
Original line number Original line Diff line number Diff line
@@ -339,14 +339,6 @@ extern int icache_44x_need_flush;
#define _PMD_PAGE_MASK	0x000c
#define _PMD_PAGE_MASK	0x000c
#define _PMD_PAGE_8M	0x000c
#define _PMD_PAGE_8M	0x000c


/*
 * The 8xx TLB miss handler allegedly sets _PAGE_ACCESSED in the PTE
 * for an address even if _PAGE_PRESENT is not set, as a performance
 * optimization.  This is a bug if you ever want to use swap unless
 * _PAGE_ACCESSED is 2, which it isn't, or unless you have 8xx-specific
 * definitions for __swp_entry etc. below, which would be gross.
 *  -- paulus
 */
#define _PTE_NONE_MASK _PAGE_ACCESSED
#define _PTE_NONE_MASK _PAGE_ACCESSED


#else /* CONFIG_6xx */
#else /* CONFIG_6xx */