Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ede58222 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge tag 'kvm-arm64/for-3.13-1' of...

Merge tag 'kvm-arm64/for-3.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms

 into kvm-next

A handful of fixes for KVM/arm64:

- A couple a basic fixes for running BE guests on a LE host
- A performance improvement for overcommitted VMs (same as the equivalent
  patch for ARM)

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>

Conflicts:
	arch/arm/include/asm/kvm_emulate.h
	arch/arm64/include/asm/kvm_emulate.h
parents 6da8ae55 ce94fe93
Loading
Loading
Loading
Loading
+46 −0
Original line number Diff line number Diff line
@@ -162,4 +162,50 @@ static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
	return vcpu->arch.cp15[c0_MPIDR];
}

static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{
	*vcpu_cpsr(vcpu) |= PSR_E_BIT;
}

static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
{
	return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
}

static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
						    unsigned long data,
						    unsigned int len)
{
	if (kvm_vcpu_is_be(vcpu)) {
		switch (len) {
		case 1:
			return data & 0xff;
		case 2:
			return be16_to_cpu(data & 0xffff);
		default:
			return be32_to_cpu(data);
		}
	}

	return data;		/* Leave LE untouched */
}

static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
						    unsigned long data,
						    unsigned int len)
{
	if (kvm_vcpu_is_be(vcpu)) {
		switch (len) {
		case 1:
			return data & 0xff;
		case 2:
			return cpu_to_be16(data & 0xffff);
		default:
			return cpu_to_be32(data);
		}
	}

	return data;		/* Leave LE untouched */
}

#endif /* __ARM_KVM_EMULATE_H__ */
+75 −11
Original line number Diff line number Diff line
@@ -23,6 +23,68 @@

#include "trace.h"

static void mmio_write_buf(char *buf, unsigned int len, unsigned long data)
{
	void *datap = NULL;
	union {
		u8	byte;
		u16	hword;
		u32	word;
		u64	dword;
	} tmp;

	switch (len) {
	case 1:
		tmp.byte	= data;
		datap		= &tmp.byte;
		break;
	case 2:
		tmp.hword	= data;
		datap		= &tmp.hword;
		break;
	case 4:
		tmp.word	= data;
		datap		= &tmp.word;
		break;
	case 8:
		tmp.dword	= data;
		datap		= &tmp.dword;
		break;
	}

	memcpy(buf, datap, len);
}

static unsigned long mmio_read_buf(char *buf, unsigned int len)
{
	unsigned long data = 0;
	union {
		u16	hword;
		u32	word;
		u64	dword;
	} tmp;

	switch (len) {
	case 1:
		data = buf[0];
		break;
	case 2:
		memcpy(&tmp.hword, buf, len);
		data = tmp.hword;
		break;
	case 4:
		memcpy(&tmp.word, buf, len);
		data = tmp.word;
		break;
	case 8:
		memcpy(&tmp.dword, buf, len);
		data = tmp.dword;
		break;
	}

	return data;
}

/**
 * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
 * @vcpu: The VCPU pointer
@@ -33,28 +95,27 @@
 */
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
	unsigned long *dest;
	unsigned long data;
	unsigned int len;
	int mask;

	if (!run->mmio.is_write) {
		dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
		*dest = 0;

		len = run->mmio.len;
		if (len > sizeof(unsigned long))
			return -EINVAL;

		memcpy(dest, run->mmio.data, len);

		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
				*((u64 *)run->mmio.data));
		data = mmio_read_buf(run->mmio.data, len);

		if (vcpu->arch.mmio_decode.sign_extend &&
		    len < sizeof(unsigned long)) {
			mask = 1U << ((len * 8) - 1);
			*dest = (*dest ^ mask) - mask;
			data = (data ^ mask) - mask;
		}

		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
			       data);
		data = vcpu_data_host_to_guest(vcpu, data, len);
		*vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
	}

	return 0;
@@ -105,6 +166,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
		 phys_addr_t fault_ipa)
{
	struct kvm_exit_mmio mmio;
	unsigned long data;
	unsigned long rt;
	int ret;

@@ -125,13 +187,15 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
	}

	rt = vcpu->arch.mmio_decode.rt;
	data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), mmio.len);

	trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
					 KVM_TRACE_MMIO_READ_UNSATISFIED,
			mmio.len, fault_ipa,
			(mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0);
			(mmio.is_write) ? data : 0);

	if (mmio.is_write)
		memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);
		mmio_write_buf(mmio.data, mmio.len, data);

	if (vgic_handle_mmio(vcpu, run, &mmio))
		return 1;
+4 −0
Original line number Diff line number Diff line
@@ -71,6 +71,10 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
		vcpu_set_thumb(vcpu);
	}

	/* Propagate caller endianness */
	if (kvm_vcpu_is_be(source_vcpu))
		kvm_vcpu_set_be(vcpu);

	*vcpu_pc(vcpu) = target_pc;
	vcpu->arch.pause = false;
	smp_mb();		/* Make sure the above is visible */
+6 −2
Original line number Diff line number Diff line
@@ -63,6 +63,7 @@
 * TAC:		Trap ACTLR
 * TSC:		Trap SMC
 * TSW:		Trap cache operations by set/way
 * TWE:		Trap WFE
 * TWI:		Trap WFI
 * TIDCP:	Trap L2CTLR/L2ECTLR
 * BSU_IS:	Upgrade barriers to the inner shareable domain
@@ -72,8 +73,9 @@
 * FMO:		Override CPSR.F and enable signaling with VF
 * SWIO:	Turn set/way invalidates into set/way clean+invalidate
 */
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
			 HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
			 HCR_BSU_IS | HCR_FB | HCR_TAC | \
			 HCR_AMO | HCR_IMO | HCR_FMO | \
			 HCR_SWIO | HCR_TIDCP | HCR_RW)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)

@@ -242,4 +244,6 @@

#define ESR_EL2_EC_xABT_xFSR_EXTABT	0x10

#define ESR_EL2_EC_WFI_ISS_WFE	(1 << 0)

#endif /* __ARM64_KVM_ARM_H__ */
+56 −0
Original line number Diff line number Diff line
@@ -182,4 +182,60 @@ static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
	return vcpu_sys_reg(vcpu, MPIDR_EL1);
}

static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{
	if (vcpu_mode_is_32bit(vcpu))
		*vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
	else
		vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
}

static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
{
	if (vcpu_mode_is_32bit(vcpu))
		return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);

	return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
}

static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
						    unsigned long data,
						    unsigned int len)
{
	if (kvm_vcpu_is_be(vcpu)) {
		switch (len) {
		case 1:
			return data & 0xff;
		case 2:
			return be16_to_cpu(data & 0xffff);
		case 4:
			return be32_to_cpu(data & 0xffffffff);
		default:
			return be64_to_cpu(data);
		}
	}

	return data;		/* Leave LE untouched */
}

static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
						    unsigned long data,
						    unsigned int len)
{
	if (kvm_vcpu_is_be(vcpu)) {
		switch (len) {
		case 1:
			return data & 0xff;
		case 2:
			return cpu_to_be16(data & 0xffff);
		case 4:
			return cpu_to_be32(data & 0xffffffff);
		default:
			return cpu_to_be64(data);
		}
	}

	return data;		/* Leave LE untouched */
}

#endif /* __ARM64_KVM_EMULATE_H__ */
Loading