Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4edf00a4 authored by Paul Burton's avatar Paul Burton Committed by Ralf Baechle
Browse files

MIPS: Retrieve ASID masks using function accepting struct cpuinfo_mips



In preparation for supporting variable ASID masks, retrieve ASID masks
using functions in asm/cpu-info.h which accept struct cpuinfo_mips. This
will allow those functions to determine the ASID mask based upon the CPU
in a later patch. This also allows for the r3k & r8k cases to be handled
in Kconfig, which is arguably cleaner than the previous #ifdefs.

Signed-off-by: default avatarPaul Burton <paul.burton@imgtec.com>
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/13210/


Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent f1b711c6
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -2449,6 +2449,17 @@ config CPU_R4000_WORKAROUNDS
config CPU_R4400_WORKAROUNDS
	bool

config MIPS_ASID_SHIFT
	int
	default 6 if CPU_R3000 || CPU_TX39XX
	default 4 if CPU_R8000
	default 0

config MIPS_ASID_BITS
	int
	default 6 if CPU_R3000 || CPU_TX39XX
	default 8

#
# - Highmem only makes sense for the 32-bit kernel.
# - The current highmem code will only work properly on physically indexed
+10 −0
Original line number Diff line number Diff line
@@ -132,4 +132,14 @@ struct proc_cpuinfo_notifier_args {
# define cpu_vpe_id(cpuinfo)	({ (void)cpuinfo; 0; })
#endif

static inline unsigned long cpu_asid_inc(void)
{
	return 1 << CONFIG_MIPS_ASID_SHIFT;
}

static inline unsigned long cpu_asid_mask(struct cpuinfo_mips *cpuinfo)
{
	return ((1 << CONFIG_MIPS_ASID_BITS) - 1) << CONFIG_MIPS_ASID_SHIFT;
}

#endif /* __ASM_CPU_INFO_H */
+18 −23
Original line number Diff line number Diff line
@@ -65,37 +65,32 @@ extern unsigned long pgd_current[];
	back_to_back_c0_hazard();					\
	TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)

#define ASID_INC	0x40
#define ASID_MASK	0xfc0

#elif defined(CONFIG_CPU_R8000)

#define ASID_INC	0x10
#define ASID_MASK	0xff0

#else /* FIXME: not correct for R6000 */
/*
 *  All unused by hardware upper bits will be considered
 *  as a software asid extension.
 */
static unsigned long asid_version_mask(unsigned int cpu)
{
	unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);

#define ASID_INC	0x1
#define ASID_MASK	0xff
	return ~(asid_mask | (asid_mask - 1));
}

#endif
static unsigned long asid_first_version(unsigned int cpu)
{
	return ~asid_version_mask(cpu) + 1;
}

#define cpu_context(cpu, mm)	((mm)->context.asid[cpu])
#define cpu_asid(cpu, mm)	(cpu_context((cpu), (mm)) & ASID_MASK)
#define asid_cache(cpu)		(cpu_data[cpu].asid_cache)
#define cpu_asid(cpu, mm) \
	(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))

static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}

/*
 *  All unused by hardware upper bits will be considered
 *  as a software asid extension.
 */
#define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)

/* Normal, classic MIPS get_new_mmu_context */
static inline void
@@ -104,7 +99,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
	extern void kvm_local_flush_tlb_all(void);
	unsigned long asid = asid_cache(cpu);

	if (! ((asid += ASID_INC) & ASID_MASK) ) {
	if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
		if (cpu_has_vtag_icache)
			flush_icache_all();
#ifdef CONFIG_KVM
@@ -113,7 +108,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
		local_flush_tlb_all();	/* start new asid cycle */
#endif
		if (!asid)		/* fix version if needed */
			asid = ASID_FIRST_VERSION;
			asid = asid_first_version(cpu);
	}

	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
@@ -145,7 +140,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,

	htw_stop();
	/* Check if our ASID is of an older version and thus invalid */
	if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
	if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu))
		get_new_mmu_context(next, cpu);
	write_c0_entryhi(cpu_asid(cpu, next));
	TLBMISS_HANDLER_SETUP_PGD(next->pgd);
+1 −1
Original line number Diff line number Diff line
@@ -2136,7 +2136,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
	}

	if (!cpu_data[cpu].asid_cache)
		cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
		cpu_data[cpu].asid_cache = asid_first_version(cpu);

	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
+19 −11
Original line number Diff line number Diff line
@@ -49,12 +49,18 @@ EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);

uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
	int cpu = smp_processor_id();

	return vcpu->arch.guest_kernel_asid[cpu] &
			cpu_asid_mask(&cpu_data[cpu]);
}

uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
	int cpu = smp_processor_id();

	return vcpu->arch.guest_user_asid[cpu] &
			cpu_asid_mask(&cpu_data[cpu]);
}

inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
@@ -78,7 +84,8 @@ void kvm_mips_dump_host_tlbs(void)
	old_pagemask = read_c0_pagemask();

	kvm_info("HOST TLBs:\n");
	kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
	kvm_info("ASID: %#lx\n", read_c0_entryhi() &
		 cpu_asid_mask(&current_cpu_data));

	for (i = 0; i < current_cpu_data.tlbsize; i++) {
		write_c0_index(i);
@@ -564,15 +571,15 @@ void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
{
	unsigned long asid = asid_cache(cpu);

	asid += ASID_INC;
	if (!(asid & ASID_MASK)) {
	asid += cpu_asid_inc();
	if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
		if (cpu_has_vtag_icache)
			flush_icache_all();

		kvm_local_flush_tlb_all();      /* start new asid cycle */

		if (!asid)      /* fix version if needed */
			asid = ASID_FIRST_VERSION;
			asid = asid_first_version(cpu);
	}

	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
@@ -627,6 +634,7 @@ static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
/* Restore ASID once we are scheduled back after preemption */
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
	unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
	unsigned long flags;
	int newasid = 0;

@@ -637,7 +645,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
	local_irq_save(flags);

	if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
							ASID_VERSION_MASK) {
						asid_version_mask(cpu)) {
		kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
		vcpu->arch.guest_kernel_asid[cpu] =
		    vcpu->arch.guest_kernel_mm.context.asid[cpu];
@@ -672,7 +680,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
		 */
		if (current->flags & PF_VCPU) {
			write_c0_entryhi(vcpu->arch.
					 preempt_entryhi & ASID_MASK);
					 preempt_entryhi & asid_mask);
			ehb();
		}
	} else {
@@ -687,11 +695,11 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
			if (KVM_GUEST_KERNEL_MODE(vcpu))
				write_c0_entryhi(vcpu->arch.
						 guest_kernel_asid[cpu] &
						 ASID_MASK);
						 asid_mask);
			else
				write_c0_entryhi(vcpu->arch.
						 guest_user_asid[cpu] &
						 ASID_MASK);
						 asid_mask);
			ehb();
		}
	}
@@ -721,7 +729,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
	kvm_mips_callbacks->vcpu_get_regs(vcpu);

	if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
	     ASID_VERSION_MASK)) {
	     asid_version_mask(cpu))) {
		kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
			  cpu_context(cpu, current->mm));
		drop_mmu_context(current->mm, cpu);
Loading