Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit caca285e authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman
Browse files

powerpc/mm/radix: Use STD_MMU_64 to properly isolate hash related code



We also use MMU_FTR_RADIX to branch out from code path specific to
hash.

No functionality change.

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent a8ed87c9
Loading
Loading
Loading
Loading
+5 −2
Original line number Original line Diff line number Diff line
@@ -529,7 +529,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
	std	r6,PACACURRENT(r13)	/* Set new 'current' */
	std	r6,PACACURRENT(r13)	/* Set new 'current' */


	ld	r8,KSP(r4)	/* new stack pointer */
	ld	r8,KSP(r4)	/* new stack pointer */
#ifdef CONFIG_PPC_BOOK3S
#ifdef CONFIG_PPC_STD_MMU_64
BEGIN_MMU_FTR_SECTION
	b	2f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
BEGIN_FTR_SECTION
BEGIN_FTR_SECTION
	clrrdi	r6,r8,28	/* get its ESID */
	clrrdi	r6,r8,28	/* get its ESID */
	clrrdi	r9,r1,28	/* get current sp ESID */
	clrrdi	r9,r1,28	/* get current sp ESID */
@@ -575,7 +578,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
	slbmte	r7,r0
	slbmte	r7,r0
	isync
	isync
2:
2:
#endif /* !CONFIG_PPC_BOOK3S */
#endif /* CONFIG_PPC_STD_MMU_64 */


	CURRENT_THREAD_INFO(r7, r8)  /* base of new stack */
	CURRENT_THREAD_INFO(r7, r8)  /* base of new stack */
	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
+23 −5
Original line number Original line Diff line number Diff line
@@ -939,7 +939,13 @@ data_access_common:
	ld	r3,PACA_EXGEN+EX_DAR(r13)
	ld	r3,PACA_EXGEN+EX_DAR(r13)
	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
	li	r5,0x300
	li	r5,0x300
	std	r3,_DAR(r1)
	std	r4,_DSISR(r1)
BEGIN_MMU_FTR_SECTION
	b	do_hash_page		/* Try to handle as hpte fault */
	b	do_hash_page		/* Try to handle as hpte fault */
MMU_FTR_SECTION_ELSE
	b	handle_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)


	.align  7
	.align  7
	.globl  h_data_storage_common
	.globl  h_data_storage_common
@@ -964,7 +970,13 @@ instruction_access_common:
	ld	r3,_NIP(r1)
	ld	r3,_NIP(r1)
	andis.	r4,r12,0x5820
	andis.	r4,r12,0x5820
	li	r5,0x400
	li	r5,0x400
	std	r3,_DAR(r1)
	std	r4,_DSISR(r1)
BEGIN_MMU_FTR_SECTION
	b	do_hash_page		/* Try to handle as hpte fault */
	b	do_hash_page		/* Try to handle as hpte fault */
MMU_FTR_SECTION_ELSE
	b	handle_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)


	STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
	STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)


@@ -1375,8 +1387,11 @@ slb_miss_realmode:
	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */


#ifdef CONFIG_PPC_STD_MMU_64
BEGIN_MMU_FTR_SECTION
	bl	slb_allocate_realmode
	bl	slb_allocate_realmode

END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX)
#endif
	/* All done -- return from exception. */
	/* All done -- return from exception. */


	ld	r10,PACA_EXSLB+EX_LR(r13)
	ld	r10,PACA_EXSLB+EX_LR(r13)
@@ -1384,7 +1399,9 @@ slb_miss_realmode:
	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */


	mtlr	r10
	mtlr	r10

BEGIN_MMU_FTR_SECTION
	b	2f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
	beq-	2f
	beq-	2f


@@ -1435,9 +1452,7 @@ power4_fixup_nap:
 */
 */
	.align	7
	.align	7
do_hash_page:
do_hash_page:
	std	r3,_DAR(r1)
#ifdef CONFIG_PPC_STD_MMU_64
	std	r4,_DSISR(r1)

	andis.	r0,r4,0xa410		/* weird error? */
	andis.	r0,r4,0xa410		/* weird error? */
	bne-	handle_page_fault	/* if not, try to insert a HPTE */
	bne-	handle_page_fault	/* if not, try to insert a HPTE */
	andis.  r0,r4,DSISR_DABRMATCH@h
	andis.  r0,r4,DSISR_DABRMATCH@h
@@ -1465,6 +1480,7 @@ do_hash_page:


	/* Error */
	/* Error */
	blt-	13f
	blt-	13f
#endif /* CONFIG_PPC_STD_MMU_64 */


/* Here we have a page fault that hash_page can't handle. */
/* Here we have a page fault that hash_page can't handle. */
handle_page_fault:
handle_page_fault:
@@ -1491,6 +1507,7 @@ handle_dabr_fault:
12:	b       ret_from_except_lite
12:	b       ret_from_except_lite




#ifdef CONFIG_PPC_STD_MMU_64
/* We have a page fault that hash_page could handle but HV refused
/* We have a page fault that hash_page could handle but HV refused
 * the PTE insertion
 * the PTE insertion
 */
 */
@@ -1500,6 +1517,7 @@ handle_dabr_fault:
	ld	r4,_DAR(r1)
	ld	r4,_DAR(r1)
	bl	low_hash_fault
	bl	low_hash_fault
	b	ret_from_except
	b	ret_from_except
#endif


/*
/*
 * We come here as a result of a DSI at a point where we don't want
 * We come here as a result of a DSI at a point where we don't want
+4 −2
Original line number Original line Diff line number Diff line
@@ -76,6 +76,7 @@ int default_machine_kexec_prepare(struct kimage *image)
	 * end of the blocked region (begin >= high).  Use the
	 * end of the blocked region (begin >= high).  Use the
	 * boolean identity !(a || b)  === (!a && !b).
	 * boolean identity !(a || b)  === (!a && !b).
	 */
	 */
#ifdef CONFIG_PPC_STD_MMU_64
	if (htab_address) {
	if (htab_address) {
		low = __pa(htab_address);
		low = __pa(htab_address);
		high = low + htab_size_bytes;
		high = low + htab_size_bytes;
@@ -88,6 +89,7 @@ int default_machine_kexec_prepare(struct kimage *image)
				return -ETXTBSY;
				return -ETXTBSY;
		}
		}
	}
	}
#endif /* CONFIG_PPC_STD_MMU_64 */


	/* We also should not overwrite the tce tables */
	/* We also should not overwrite the tce tables */
	for_each_node_by_type(node, "pci") {
	for_each_node_by_type(node, "pci") {
@@ -381,7 +383,7 @@ void default_machine_kexec(struct kimage *image)
	/* NOTREACHED */
	/* NOTREACHED */
}
}


#ifndef CONFIG_PPC_BOOK3E
#ifdef CONFIG_PPC_STD_MMU_64
/* Values we need to export to the second kernel via the device tree. */
/* Values we need to export to the second kernel via the device tree. */
static unsigned long htab_base;
static unsigned long htab_base;
static unsigned long htab_size;
static unsigned long htab_size;
@@ -428,4 +430,4 @@ static int __init export_htab_values(void)
	return 0;
	return 0;
}
}
late_initcall(export_htab_values);
late_initcall(export_htab_values);
#endif /* !CONFIG_PPC_BOOK3E */
#endif /* CONFIG_PPC_STD_MMU_64 */
+10 −0
Original line number Original line Diff line number Diff line
@@ -80,6 +80,7 @@ void __flush_tlb_power9(unsigned int action)




/* flush SLBs and reload */
/* flush SLBs and reload */
#ifdef CONFIG_PPC_MMU_STD_64
static void flush_and_reload_slb(void)
static void flush_and_reload_slb(void)
{
{
	struct slb_shadow *slb;
	struct slb_shadow *slb;
@@ -113,6 +114,7 @@ static void flush_and_reload_slb(void)
		asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
		asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
	}
	}
}
}
#endif


static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
{
{
@@ -123,6 +125,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
	 * reset the error bits whenever we handle them so that at the end
	 * reset the error bits whenever we handle them so that at the end
	 * we can check whether we handled all of them or not.
	 * we can check whether we handled all of them or not.
	 * */
	 * */
#ifdef CONFIG_PPC_MMU_STD_64
	if (dsisr & slb_error_bits) {
	if (dsisr & slb_error_bits) {
		flush_and_reload_slb();
		flush_and_reload_slb();
		/* reset error bits */
		/* reset error bits */
@@ -134,6 +137,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
		/* reset error bits */
		/* reset error bits */
		dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
		dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
	}
	}
#endif
	/* Any other errors we don't understand? */
	/* Any other errors we don't understand? */
	if (dsisr & 0xffffffffUL)
	if (dsisr & 0xffffffffUL)
		handled = 0;
		handled = 0;
@@ -153,6 +157,7 @@ static long mce_handle_common_ierror(uint64_t srr1)
	switch (P7_SRR1_MC_IFETCH(srr1)) {
	switch (P7_SRR1_MC_IFETCH(srr1)) {
	case 0:
	case 0:
		break;
		break;
#ifdef CONFIG_PPC_MMU_STD_64
	case P7_SRR1_MC_IFETCH_SLB_PARITY:
	case P7_SRR1_MC_IFETCH_SLB_PARITY:
	case P7_SRR1_MC_IFETCH_SLB_MULTIHIT:
	case P7_SRR1_MC_IFETCH_SLB_MULTIHIT:
		/* flush and reload SLBs for SLB errors. */
		/* flush and reload SLBs for SLB errors. */
@@ -165,6 +170,7 @@ static long mce_handle_common_ierror(uint64_t srr1)
			handled = 1;
			handled = 1;
		}
		}
		break;
		break;
#endif
	default:
	default:
		break;
		break;
	}
	}
@@ -178,10 +184,12 @@ static long mce_handle_ierror_p7(uint64_t srr1)


	handled = mce_handle_common_ierror(srr1);
	handled = mce_handle_common_ierror(srr1);


#ifdef CONFIG_PPC_MMU_STD_64
	if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) {
	if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) {
		flush_and_reload_slb();
		flush_and_reload_slb();
		handled = 1;
		handled = 1;
	}
	}
#endif
	return handled;
	return handled;
}
}


@@ -324,10 +332,12 @@ static long mce_handle_ierror_p8(uint64_t srr1)


	handled = mce_handle_common_ierror(srr1);
	handled = mce_handle_common_ierror(srr1);


#ifdef CONFIG_PPC_MMU_STD_64
	if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) {
	if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) {
		flush_and_reload_slb();
		flush_and_reload_slb();
		handled = 1;
		handled = 1;
	}
	}
#endif
	return handled;
	return handled;
}
}


+9 −6
Original line number Original line Diff line number Diff line
@@ -1079,7 +1079,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
	}
	}
#endif /* CONFIG_PPC64 */
#endif /* CONFIG_PPC64 */


#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_PPC_STD_MMU_64
	batch = this_cpu_ptr(&ppc64_tlb_batch);
	batch = this_cpu_ptr(&ppc64_tlb_batch);
	if (batch->active) {
	if (batch->active) {
		current_thread_info()->local_flags |= _TLF_LAZY_MMU;
		current_thread_info()->local_flags |= _TLF_LAZY_MMU;
@@ -1087,7 +1087,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
			__flush_tlb_pending(batch);
			__flush_tlb_pending(batch);
		batch->active = 0;
		batch->active = 0;
	}
	}
#endif /* CONFIG_PPC_BOOK3S_64 */
#endif /* CONFIG_PPC_STD_MMU_64 */


#ifdef CONFIG_PPC_ADV_DEBUG_REGS
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
	switch_booke_debug_regs(&new->thread.debug);
	switch_booke_debug_regs(&new->thread.debug);
@@ -1133,7 +1133,7 @@ struct task_struct *__switch_to(struct task_struct *prev,


	last = _switch(old_thread, new_thread);
	last = _switch(old_thread, new_thread);


#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_PPC_STD_MMU_64
	if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
	if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
		current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
		current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
		batch = this_cpu_ptr(&ppc64_tlb_batch);
		batch = this_cpu_ptr(&ppc64_tlb_batch);
@@ -1142,8 +1142,7 @@ struct task_struct *__switch_to(struct task_struct *prev,


	if (current_thread_info()->task->thread.regs)
	if (current_thread_info()->task->thread.regs)
		restore_math(current_thread_info()->task->thread.regs);
		restore_math(current_thread_info()->task->thread.regs);

#endif /* CONFIG_PPC_STD_MMU_64 */
#endif /* CONFIG_PPC_BOOK3S_64 */


	return last;
	return last;
}
}
@@ -1378,6 +1377,9 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
	unsigned long sp_vsid;
	unsigned long sp_vsid;
	unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
	unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;


	if (radix_enabled())
		return;

	if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
	if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
		sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
		sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
			<< SLB_VSID_SHIFT_1T;
			<< SLB_VSID_SHIFT_1T;
@@ -1926,7 +1928,8 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
	 * the heap, we can put it above 1TB so it is backed by a 1TB
	 * the heap, we can put it above 1TB so it is backed by a 1TB
	 * segment. Otherwise the heap will be in the bottom 1TB
	 * segment. Otherwise the heap will be in the bottom 1TB
	 * which always uses 256MB segments and this may result in a
	 * which always uses 256MB segments and this may result in a
	 * performance penalty.
	 * performance penalty. We don't need to worry about radix. For
	 * radix, mmu_highuser_ssize remains unchanged from 256MB.
	 */
	 */
	if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
	if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
		base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
		base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
Loading