Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e701d269 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Josh Boyer
Browse files

[POWERPC] 4xx: Fix 4xx flush_tlb_page()



On 4xx CPUs, the current implementation of flush_tlb_page() uses
a low level _tlbie() assembly function that only works for the
current PID. Thus, invalidations caused by, for example, a COW
fault triggered by get_user_pages() from a different context will
not work properly, causing among other things, gdb breakpoints
to fail.

This patch adds a "pid" argument to _tlbie() on 4xx processors,
and uses it to flush entries in the right context. FSL BookE
also gets the argument but it seems they don't need it (their
tlbivax form ignores the PID when invalidating according to the
document I have).

Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: default avatarKumar Gala <galak@kernel.crashing.org>
Signed-off-by: default avatarJosh Boyer <jwboyer@linux.vnet.ibm.com>
parent 57d75561
Loading
Loading
Loading
Loading
+16 −7
Original line number Original line Diff line number Diff line
@@ -288,7 +288,16 @@ _GLOBAL(_tlbia)
 */
 */
_GLOBAL(_tlbie)
_GLOBAL(_tlbie)
#if defined(CONFIG_40x)
#if defined(CONFIG_40x)
	/* We run the search with interrupts disabled because we have to change
	 * the PID and I don't want to preempt when that happens.
	 */
	mfmsr	r5
	mfspr	r6,SPRN_PID
	wrteei	0
	mtspr	SPRN_PID,r4
	tlbsx.	r3, 0, r3
	tlbsx.	r3, 0, r3
	mtspr	SPRN_PID,r6
	wrtee	r5
	bne	10f
	bne	10f
	sync
	sync
	/* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
	/* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
@@ -297,23 +306,23 @@ _GLOBAL(_tlbie)
	tlbwe	r3, r3, TLB_TAG
	tlbwe	r3, r3, TLB_TAG
	isync
	isync
10:
10:

#elif defined(CONFIG_44x)
#elif defined(CONFIG_44x)
	mfspr	r4,SPRN_MMUCR
	mfspr	r5,SPRN_MMUCR
	mfspr	r5,SPRN_PID			/* Get PID */
	rlwimi	r5,r4,0,24,31			/* Set TID */
	rlwimi	r4,r5,0,24,31			/* Set TID */


	/* We have to run the search with interrupts disabled, even critical
	/* We have to run the search with interrupts disabled, even critical
	 * and debug interrupts (in fact the only critical exceptions we have
	 * and debug interrupts (in fact the only critical exceptions we have
	 * are debug and machine check).  Otherwise  an interrupt which causes
	 * are debug and machine check).  Otherwise  an interrupt which causes
	 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
	 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
	mfmsr	r5
	mfmsr	r4
	lis	r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
	lis	r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
	addi	r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
	addi	r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
	andc	r6,r5,r6
	andc	r6,r4,r6
	mtmsr	r6
	mtmsr	r6
	mtspr	SPRN_MMUCR,r4
	mtspr	SPRN_MMUCR,r5
	tlbsx.	r3, 0, r3
	tlbsx.	r3, 0, r3
	mtmsr	r5
	mtmsr	r4
	bne	10f
	bne	10f
	sync
	sync
	/* There are only 64 TLB entries, so r3 < 64,
	/* There are only 64 TLB entries, so r3 < 64,
+1 −1
Original line number Original line Diff line number Diff line
@@ -309,7 +309,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
					set_bit(PG_arch_1, &page->flags);
					set_bit(PG_arch_1, &page->flags);
				}
				}
				pte_update(ptep, 0, _PAGE_HWEXEC);
				pte_update(ptep, 0, _PAGE_HWEXEC);
				_tlbie(address);
				_tlbie(address, mm->context.id);
				pte_unmap_unlock(ptep, ptl);
				pte_unmap_unlock(ptep, ptl);
				up_read(&mm->mmap_sem);
				up_read(&mm->mmap_sem);
				return 0;
				return 0;
+2 −2
Original line number Original line Diff line number Diff line
@@ -61,12 +61,12 @@ extern unsigned long total_lowmem;
#define mmu_mapin_ram()		(0UL)
#define mmu_mapin_ram()		(0UL)


#elif defined(CONFIG_4xx)
#elif defined(CONFIG_4xx)
#define flush_HPTE(X, va, pg)	_tlbie(va)
#define flush_HPTE(pid, va, pg)	_tlbie(va, pid)
extern void MMU_init_hw(void);
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
extern unsigned long mmu_mapin_ram(void);


#elif defined(CONFIG_FSL_BOOKE)
#elif defined(CONFIG_FSL_BOOKE)
#define flush_HPTE(X, va, pg)	_tlbie(va)
#define flush_HPTE(pid, va, pg)	_tlbie(va, pid)
extern void MMU_init_hw(void);
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
extern unsigned long mmu_mapin_ram(void);
extern void adjust_total_lowmem(void);
extern void adjust_total_lowmem(void);
+15 −7
Original line number Original line Diff line number Diff line
@@ -224,7 +224,16 @@ _GLOBAL(_tlbia)
 */
 */
_GLOBAL(_tlbie)
_GLOBAL(_tlbie)
#if defined(CONFIG_40x)
#if defined(CONFIG_40x)
	/* We run the search with interrupts disabled because we have to change
	 * the PID and I don't want to preempt when that happens.
	 */
	mfmsr	r5
	mfspr	r6,SPRN_PID
	wrteei	0
	mtspr	SPRN_PID,r4
	tlbsx.	r3, 0, r3
	tlbsx.	r3, 0, r3
	mtspr	SPRN_PID,r6
	wrtee	r5
	bne	10f
	bne	10f
	sync
	sync
	/* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
	/* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
@@ -234,22 +243,21 @@ _GLOBAL(_tlbie)
	isync
	isync
10:
10:
#elif defined(CONFIG_44x)
#elif defined(CONFIG_44x)
	mfspr	r4,SPRN_MMUCR
	mfspr	r5,SPRN_MMUCR
	mfspr	r5,SPRN_PID			/* Get PID */
	rlwimi	r5,r4,0,24,31			/* Set TID */
	rlwimi	r4,r5,0,24,31			/* Set TID */


	/* We have to run the search with interrupts disabled, even critical
	/* We have to run the search with interrupts disabled, even critical
	 * and debug interrupts (in fact the only critical exceptions we have
	 * and debug interrupts (in fact the only critical exceptions we have
	 * are debug and machine check).  Otherwise  an interrupt which causes
	 * are debug and machine check).  Otherwise  an interrupt which causes
	 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
	 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
	mfmsr	r5
	mfmsr	r4
	lis	r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
	lis	r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
	addi	r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
	addi	r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
	andc	r6,r5,r6
	andc	r6,r4,r6
	mtmsr	r6
	mtmsr	r6
	mtspr	SPRN_MMUCR,r4
	mtspr	SPRN_MMUCR,r5
	tlbsx.	r3, 0, r3
	tlbsx.	r3, 0, r3
	mtmsr	r5
	mtmsr	r4
	bne	10f
	bne	10f
	sync
	sync
	/* There are only 64 TLB entries, so r3 < 64,
	/* There are only 64 TLB entries, so r3 < 64,
+1 −1
Original line number Original line Diff line number Diff line
@@ -227,7 +227,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
					set_bit(PG_arch_1, &page->flags);
					set_bit(PG_arch_1, &page->flags);
				}
				}
				pte_update(ptep, 0, _PAGE_HWEXEC);
				pte_update(ptep, 0, _PAGE_HWEXEC);
				_tlbie(address);
				_tlbie(address, mm->context.id);
				pte_unmap_unlock(ptep, ptl);
				pte_unmap_unlock(ptep, ptl);
				up_read(&mm->mmap_sem);
				up_read(&mm->mmap_sem);
				return 0;
				return 0;
Loading