Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e3feebf8 authored by Alex Bennée's avatar Alex Bennée Committed by Christoffer Dall
Browse files

kvm: arm64: handle single-step of hyp emulated mmio instructions



There is a fast-path of MMIO emulation inside hyp mode. The handling
of single-step is broadly the same as kvm_arm_handle_step_debug()
except we just setup ESR/HSR so handle_exit() does the correct thing
as we exit.

For the case of an emulated illegal access causing an SError we will
exit via the ARM_EXCEPTION_EL1_SERROR path in handle_exit(). We behave
as we would during a real SError and clear the DBG_SPSR_SS bit for the
emulated instruction.

Acked-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Reviewed-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarAlex Bennée <alex.bennee@linaro.org>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent e70dce73
Loading
Loading
Loading
Loading
+30 −7
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>

static bool __hyp_text __fpsimd_enabled_nvhe(void)
{
@@ -269,7 +270,11 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
	return true;
}

static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
/* Skip an instruction which has been emulated. Returns true if
 * execution can continue or false if we need to exit hyp mode because
 * single-step was in effect.
 */
static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
{
	*vcpu_pc(vcpu) = read_sysreg_el2(elr);

@@ -282,6 +287,14 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
	}

	write_sysreg_el2(*vcpu_pc(vcpu), elr);

	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
		vcpu->arch.fault.esr_el2 =
			(ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
		return false;
	} else {
		return true;
	}
}

int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
@@ -342,13 +355,21 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
			int ret = __vgic_v2_perform_cpuif_access(vcpu);

			if (ret == 1) {
				__skip_instr(vcpu);
				if (__skip_instr(vcpu))
					goto again;
				else
					exit_code = ARM_EXCEPTION_TRAP;
			}

			if (ret == -1) {
				/* Promote an illegal access to an SError */
				__skip_instr(vcpu);
				/* Promote an illegal access to an
				 * SError. If we would be returning
				 * due to single-step clear the SS
				 * bit so handle_exit knows what to
				 * do after dealing with the error.
				 */
				if (!__skip_instr(vcpu))
					*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
				exit_code = ARM_EXCEPTION_EL1_SERROR;
			}

@@ -363,8 +384,10 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
		int ret = __vgic_v3_perform_cpuif_access(vcpu);

		if (ret == 1) {
			__skip_instr(vcpu);
			if (__skip_instr(vcpu))
				goto again;
			else
				exit_code = ARM_EXCEPTION_TRAP;
		}

		/* 0 falls through to be handled out of EL2 */