Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit db281766 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull ARM KVM fixes from Paolo Bonzini:

 - a series of fixes to deal with the aliasing between the sp and xzr
   register

 - a fix for the cache flush fix that went in -rc3

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  ARM/arm64: KVM: correct PTE uncachedness check
  arm64: KVM: Get rid of old vcpu_reg()
  arm64: KVM: Correctly handle zero register in system register accesses
  arm64: KVM: Remove const from struct sys_reg_params
  arm64: KVM: Correctly handle zero register during MMIO
parents 8cdef969 09922076
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -28,6 +28,18 @@
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);

static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
					 u8 reg_num)
{
	return *vcpu_reg(vcpu, reg_num);
}

static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
				unsigned long val)
{
	*vcpu_reg(vcpu, reg_num) = val;
}

bool kvm_condition_valid(struct kvm_vcpu *vcpu);
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+3 −2
Original line number Diff line number Diff line
@@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
			       data);
		data = vcpu_data_host_to_guest(vcpu, data, len);
		*vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
		vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
	}

	return 0;
@@ -186,7 +186,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
	rt = vcpu->arch.mmio_decode.rt;

	if (is_write) {
		data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
		data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
					       len);

		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
		mmio_write_buf(data_buf, len, data);
+2 −2
Original line number Diff line number Diff line
@@ -218,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
			kvm_tlb_flush_vmid_ipa(kvm, addr);

			/* No need to invalidate the cache for device mappings */
			if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
			if (!kvm_is_device_pfn(pte_pfn(old_pte)))
				kvm_flush_dcache_pte(old_pte);

			put_page(virt_to_page(pte));
@@ -310,7 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,

	pte = pte_offset_kernel(pmd, addr);
	do {
		if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
		if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
			kvm_flush_dcache_pte(*pte);
	} while (pte++, addr += PAGE_SIZE, addr != end);
}
+10 −10
Original line number Diff line number Diff line
@@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
	unsigned long context_id;
	phys_addr_t target_pc;

	cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
	cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
	if (vcpu_mode_is_32bit(source_vcpu))
		cpu_id &= ~((u32) 0);

@@ -94,8 +94,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
			return PSCI_RET_INVALID_PARAMS;
	}

	target_pc = *vcpu_reg(source_vcpu, 2);
	context_id = *vcpu_reg(source_vcpu, 3);
	target_pc = vcpu_get_reg(source_vcpu, 2);
	context_id = vcpu_get_reg(source_vcpu, 3);

	kvm_reset_vcpu(vcpu);

@@ -114,7 +114,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
	 * NOTE: We always update r0 (or x0) because for PSCI v0.1
	 * the general puspose registers are undefined upon CPU_ON.
	 */
	*vcpu_reg(vcpu, 0) = context_id;
	vcpu_set_reg(vcpu, 0, context_id);
	vcpu->arch.power_off = false;
	smp_mb();		/* Make sure the above is visible */

@@ -134,8 +134,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
	struct kvm *kvm = vcpu->kvm;
	struct kvm_vcpu *tmp;

	target_affinity = *vcpu_reg(vcpu, 1);
	lowest_affinity_level = *vcpu_reg(vcpu, 2);
	target_affinity = vcpu_get_reg(vcpu, 1);
	lowest_affinity_level = vcpu_get_reg(vcpu, 2);

	/* Determine target affinity mask */
	target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
@@ -209,7 +209,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
	int ret = 1;
	unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
	unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
	unsigned long val;

	switch (psci_fn) {
@@ -273,13 +273,13 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
		break;
	}

	*vcpu_reg(vcpu, 0) = val;
	vcpu_set_reg(vcpu, 0, val);
	return ret;
}

static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{
	unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
	unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
	unsigned long val;

	switch (psci_fn) {
@@ -295,7 +295,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
		break;
	}

	*vcpu_reg(vcpu, 0) = val;
	vcpu_set_reg(vcpu, 0, val);
	return 1;
}

+13 −5
Original line number Diff line number Diff line
@@ -100,13 +100,21 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
}

/*
 * vcpu_reg should always be passed a register number coming from a
 * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
 * with banked registers.
 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
 * AArch32 with banked registers.
 */
static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
					 u8 reg_num)
{
	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
}

static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
				unsigned long val)
{
	if (reg_num != 31)
		vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
}

/* Get vcpu SPSR for current mode */
Loading