Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3c1e7165 authored by Marc Zyngier's avatar Marc Zyngier Committed by Christoffer Dall
Browse files

arm/arm64: KVM: Use set/way op trapping to track the state of the caches



Trying to emulate the behaviour of set/way cache ops is fairly
pointless, as there are too many ways we can end-up missing stuff.
Also, there is some system caches out there that simply ignore
set/way operations.

So instead of trying to implement them, let's convert it to VA ops,
and use them as a way to re-enable the trapping of VM ops. That way,
we can detect the point when the MMU/caches are turned off, and do
a full VM flush (which is what the guest was trying to do anyway).

This allows a 32bit zImage to boot on the APM thingy, and will
probably help bootloaders in general.

Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent f3747379
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
	vcpu->arch.hcr = HCR_GUEST_MASK;
}

static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.hcr;
}

static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
{
	vcpu->arch.hcr = hcr;
}

static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
{
	return 1;
+0 −3
Original line number Diff line number Diff line
@@ -125,9 +125,6 @@ struct kvm_vcpu_arch {
	 * Anything that is not used directly from assembly code goes
	 * here.
	 */
	/* dcache set/way operation pending */
	int last_pcpu;
	cpumask_t require_dcache_flush;

	/* Don't run the guest on this vcpu */
	bool pause;
+2 −1
Original line number Diff line number Diff line
@@ -190,7 +190,8 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,

#define kvm_virt_to_phys(x)		virt_to_idmap((unsigned long)(x))

void stage2_flush_vm(struct kvm *kvm);
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);

#endif	/* !__ASSEMBLY__ */

+0 −10
Original line number Diff line number Diff line
@@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
	vcpu->cpu = cpu;
	vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);

	/*
	 * Check whether this vcpu requires the cache to be flushed on
	 * this physical CPU. This is a consequence of doing dcache
	 * operations by set/way on this vcpu. We do it here to be in
	 * a non-preemptible section.
	 */
	if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
		flush_cache_all(); /* We'd really want v7_flush_dcache_all() */

	kvm_arm_set_running_vcpu(vcpu);
}

@@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
		ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);

		vcpu->mode = OUTSIDE_GUEST_MODE;
		vcpu->arch.last_pcpu = smp_processor_id();
		kvm_guest_exit();
		trace_kvm_exit(*vcpu_pc(vcpu));
		/*
+14 −56
Original line number Diff line number Diff line
@@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
	return true;
}

/* See note at ARM ARM B1.14.4 */
/*
 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 */
static bool access_dcsw(struct kvm_vcpu *vcpu,
			const struct coproc_params *p,
			const struct coproc_reg *r)
{
	unsigned long val;
	int cpu;

	if (!p->is_write)
		return read_from_write_only(vcpu, p);

	cpu = get_cpu();

	cpumask_setall(&vcpu->arch.require_dcache_flush);
	cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);

	/* If we were already preempted, take the long way around */
	if (cpu != vcpu->arch.last_pcpu) {
		flush_cache_all();
		goto done;
	}

	val = *vcpu_reg(vcpu, p->Rt1);

	switch (p->CRm) {
	case 6:			/* Upgrade DCISW to DCCISW, as per HCR.SWIO */
	case 14:		/* DCCISW */
		asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
		break;

	case 10:		/* DCCSW */
		asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
		break;
	}

done:
	put_cpu();

	kvm_set_way_flush(vcpu);
	return true;
}

/*
 * Generic accessor for VM registers. Only called as long as HCR_TVM
 * is set.
 * is set.  If the guest enables the MMU, we stop trapping the VM
 * sys_regs and leave it in complete control of the caches.
 *
 * Used by the cpu-specific code.
 */
static bool access_vm_reg(struct kvm_vcpu *vcpu,
bool access_vm_reg(struct kvm_vcpu *vcpu,
		   const struct coproc_params *p,
		   const struct coproc_reg *r)
{
	bool was_enabled = vcpu_has_cache_enabled(vcpu);

	BUG_ON(!p->is_write);

	vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
	if (p->is_64bit)
		vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);

	return true;
}

/*
 * SCTLR accessor. Only called as long as HCR_TVM is set.  If the
 * guest enables the MMU, we stop trapping the VM sys_regs and leave
 * it in complete control of the caches.
 *
 * Used by the cpu-specific code.
 */
bool access_sctlr(struct kvm_vcpu *vcpu,
		  const struct coproc_params *p,
		  const struct coproc_reg *r)
{
	access_vm_reg(vcpu, p, r);

	if (vcpu_has_cache_enabled(vcpu)) {	/* MMU+Caches enabled? */
		vcpu->arch.hcr &= ~HCR_TVM;
		stage2_flush_vm(vcpu->kvm);
	}

	kvm_toggle_cache(vcpu, was_enabled);
	return true;
}

Loading