Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 082d3900 authored by Radim Krčmář's avatar Radim Krčmář
Browse files
KVM/ARM Changes for v4.14

Two minor cleanups and improvements, a fix for decoding external abort
types from guests, and added support for migrating the active priority
of interrupts when running a GICv2 guest on a GICv3 host.
parents 6e0ff1b4 9b87e7a8
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -83,6 +83,11 @@ Groups:

    Bits for undefined preemption levels are RAZ/WI.

    Note that this differs from a CPU's view of the APRs on hardware in which
    a GIC without the security extensions expose group 0 and group 1 active
    priorities in separate register groups, whereas we show a combined view
    similar to GICv2's GICH_APR.

    For historical reasons and to provide ABI compatibility with userspace we
    export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
    field in the lower 5 bits of a word, meaning that userspace must always
+0 −1
Original line number Diff line number Diff line
@@ -227,7 +227,6 @@

#define HSR_DABT_S1PTW		(_AC(1, UL) << 7)
#define HSR_DABT_CM		(_AC(1, UL) << 8)
#define HSR_DABT_EA		(_AC(1, UL) << 9)

#define kvm_arm_exception_type	\
	{0, "RESET" }, 		\
+19 −5
Original line number Diff line number Diff line
@@ -149,11 +149,6 @@ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
	return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
}

static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
{
	return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
}

static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
{
	return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
@@ -206,6 +201,25 @@ static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
	return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
}

static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
{
	switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
	case FSC_SEA:
	case FSC_SEA_TTW0:
	case FSC_SEA_TTW1:
	case FSC_SEA_TTW2:
	case FSC_SEA_TTW3:
	case FSC_SECC:
	case FSC_SECC_TTW0:
	case FSC_SECC_TTW1:
	case FSC_SECC_TTW2:
	case FSC_SECC_TTW3:
		return true;
	default:
		return false;
	}
}

static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
{
	return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
+19 −5
Original line number Diff line number Diff line
@@ -188,11 +188,6 @@ static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
	return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
}

static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
{
	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA);
}

static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
{
	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
@@ -240,6 +235,25 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
}

static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
{
	switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
	case FSC_SEA:
	case FSC_SEA_TTW0:
	case FSC_SEA_TTW1:
	case FSC_SEA_TTW2:
	case FSC_SEA_TTW3:
	case FSC_SECC:
	case FSC_SECC_TTW0:
	case FSC_SECC_TTW1:
	case FSC_SECC_TTW2:
	case FSC_SECC_TTW3:
		return true;
	default:
		return false;
	}
}

static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
{
	u32 esr = kvm_vcpu_get_hsr(vcpu);
+3 −20
Original line number Diff line number Diff line
@@ -208,29 +208,12 @@ static void vgic_v3_access_apr_reg(struct kvm_vcpu *vcpu,
static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
			    const struct sys_reg_desc *r, u8 apr)
{
	struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
	u8 idx = r->Op2 & 3;

	/*
	 * num_pri_bits are initialized with HW supported values.
	 * We can rely safely on num_pri_bits even if VM has not
	 * restored ICC_CTLR_EL1 before restoring APnR registers.
	 */
	switch (vgic_v3_cpu->num_pri_bits) {
	case 7:
		vgic_v3_access_apr_reg(vcpu, p, apr, idx);
		break;
	case 6:
		if (idx > 1)
	if (idx > vgic_v3_max_apr_idx(vcpu))
		goto err;
		vgic_v3_access_apr_reg(vcpu, p, apr, idx);
		break;
	default:
		if (idx > 0)
			goto err;
		vgic_v3_access_apr_reg(vcpu, p, apr, idx);
	}

	vgic_v3_access_apr_reg(vcpu, p, apr, idx);
	return true;
err:
	if (!p->is_write)
Loading