Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 02512b2b authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge tag 'kvm-arm-fixes-3.19-2' of...

Merge tag 'kvm-arm-fixes-3.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master

Second round of fixes for KVM/ARM for 3.19.

Fixes memory corruption issues on APM platforms and swapping issues on
DMA-coherent systems.
parents 26bc420b 0d3e4d4f
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
	vcpu->arch.hcr = HCR_GUEST_MASK;
}

static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.hcr;
}

static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
{
	vcpu->arch.hcr = hcr;
}

static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
{
	return 1;
+0 −3
Original line number Diff line number Diff line
@@ -125,9 +125,6 @@ struct kvm_vcpu_arch {
	 * Anything that is not used directly from assembly code goes
	 * here.
	 */
	/* dcache set/way operation pending */
	int last_pcpu;
	cpumask_t require_dcache_flush;

	/* Don't run the guest on this vcpu */
	bool pause;
+67 −10
Original line number Diff line number Diff line
@@ -44,6 +44,7 @@

#ifndef __ASSEMBLY__

#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>

@@ -161,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
	return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
}

static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
					       unsigned long size,
					       bool ipa_uncached)
{
	if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
		kvm_flush_dcache_to_poc((void *)hva, size);
	
	/*
	 * If we are going to insert an instruction page and the icache is
	 * either VIPT or PIPT, there is a potential problem where the host
@@ -179,18 +177,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
	 *
	 * VIVT caches are tagged using both the ASID and the VMID and doesn't
	 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
	 *
	 * We need to do this through a kernel mapping (using the
	 * user-space mapping has proved to be the wrong
	 * solution). For that, we need to kmap one page at a time,
	 * and iterate over the range.
	 */
	if (icache_is_pipt()) {
		__cpuc_coherent_user_range(hva, hva + size);
	} else if (!icache_is_vivt_asid_tagged()) {

	bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;

	VM_BUG_ON(size & PAGE_MASK);

	if (!need_flush && !icache_is_pipt())
		goto vipt_cache;

	while (size) {
		void *va = kmap_atomic_pfn(pfn);

		if (need_flush)
			kvm_flush_dcache_to_poc(va, PAGE_SIZE);

		if (icache_is_pipt())
			__cpuc_coherent_user_range((unsigned long)va,
						   (unsigned long)va + PAGE_SIZE);

		size -= PAGE_SIZE;
		pfn++;

		kunmap_atomic(va);
	}

vipt_cache:
	if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
		/* any kind of VIPT cache */
		__flush_icache_all();
	}
}

static inline void __kvm_flush_dcache_pte(pte_t pte)
{
	void *va = kmap_atomic(pte_page(pte));

	kvm_flush_dcache_to_poc(va, PAGE_SIZE);

	kunmap_atomic(va);
}

static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
{
	unsigned long size = PMD_SIZE;
	pfn_t pfn = pmd_pfn(pmd);

	while (size) {
		void *va = kmap_atomic_pfn(pfn);

		kvm_flush_dcache_to_poc(va, PAGE_SIZE);

		pfn++;
		size -= PAGE_SIZE;

		kunmap_atomic(va);
	}
}

static inline void __kvm_flush_dcache_pud(pud_t pud)
{
}

#define kvm_virt_to_phys(x)		virt_to_idmap((unsigned long)(x))

void stage2_flush_vm(struct kvm *kvm);
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);

#endif	/* !__ASSEMBLY__ */

+0 −10
Original line number Diff line number Diff line
@@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
	vcpu->cpu = cpu;
	vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);

	/*
	 * Check whether this vcpu requires the cache to be flushed on
	 * this physical CPU. This is a consequence of doing dcache
	 * operations by set/way on this vcpu. We do it here to be in
	 * a non-preemptible section.
	 */
	if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
		flush_cache_all(); /* We'd really want v7_flush_dcache_all() */

	kvm_arm_set_running_vcpu(vcpu);
}

@@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
		ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);

		vcpu->mode = OUTSIDE_GUEST_MODE;
		vcpu->arch.last_pcpu = smp_processor_id();
		kvm_guest_exit();
		trace_kvm_exit(*vcpu_pc(vcpu));
		/*
+14 −56
Original line number Diff line number Diff line
@@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
	return true;
}

/* See note at ARM ARM B1.14.4 */
/*
 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 */
static bool access_dcsw(struct kvm_vcpu *vcpu,
			const struct coproc_params *p,
			const struct coproc_reg *r)
{
	unsigned long val;
	int cpu;

	if (!p->is_write)
		return read_from_write_only(vcpu, p);

	cpu = get_cpu();

	cpumask_setall(&vcpu->arch.require_dcache_flush);
	cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);

	/* If we were already preempted, take the long way around */
	if (cpu != vcpu->arch.last_pcpu) {
		flush_cache_all();
		goto done;
	}

	val = *vcpu_reg(vcpu, p->Rt1);

	switch (p->CRm) {
	case 6:			/* Upgrade DCISW to DCCISW, as per HCR.SWIO */
	case 14:		/* DCCISW */
		asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
		break;

	case 10:		/* DCCSW */
		asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
		break;
	}

done:
	put_cpu();

	kvm_set_way_flush(vcpu);
	return true;
}

/*
 * Generic accessor for VM registers. Only called as long as HCR_TVM
 * is set.
 * is set.  If the guest enables the MMU, we stop trapping the VM
 * sys_regs and leave it in complete control of the caches.
 *
 * Used by the cpu-specific code.
 */
static bool access_vm_reg(struct kvm_vcpu *vcpu,
bool access_vm_reg(struct kvm_vcpu *vcpu,
		   const struct coproc_params *p,
		   const struct coproc_reg *r)
{
	bool was_enabled = vcpu_has_cache_enabled(vcpu);

	BUG_ON(!p->is_write);

	vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
	if (p->is_64bit)
		vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);

	return true;
}

/*
 * SCTLR accessor. Only called as long as HCR_TVM is set.  If the
 * guest enables the MMU, we stop trapping the VM sys_regs and leave
 * it in complete control of the caches.
 *
 * Used by the cpu-specific code.
 */
bool access_sctlr(struct kvm_vcpu *vcpu,
		  const struct coproc_params *p,
		  const struct coproc_reg *r)
{
	access_vm_reg(vcpu, p, r);

	if (vcpu_has_cache_enabled(vcpu)) {	/* MMU+Caches enabled? */
		vcpu->arch.hcr &= ~HCR_TVM;
		stage2_flush_vm(vcpu->kvm);
	}

	kvm_toggle_cache(vcpu, was_enabled);
	return true;
}

Loading