Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 89ef2b21 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

ARM: KVM: Add guest entry code



Add the very minimal piece of code that is now required to jump
into the guest (and return from it). This code is only concerned
with save/restoring the USR registers (r0-r12+lr for the guest,
r4-r12+lr for the host), as everything else is dealt with in C
(VFP is another matter though).

Reviewed-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 33280b4c
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -8,3 +8,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += entry.o
+70 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2016 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
*/

#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/kvm_arm.h>

	.arch_extension     virt

	.text
	.pushsection	.hyp.text, "ax"

#define USR_REGS_OFFSET		(CPU_CTXT_GP_REGS + GP_REGS_USR)

/* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */
ENTRY(__guest_enter)
	@ Save host registers
	add	r1, r1, #(USR_REGS_OFFSET + S_R4)
	stm	r1!, {r4-r12}
	str	lr, [r1, #4]	@ Skip SP_usr (already saved)

	@ Restore guest registers
	add	r0, r0,  #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
	ldr	lr, [r0, #S_LR]
	ldm	r0, {r0-r12}

	clrex
	eret
ENDPROC(__guest_enter)

ENTRY(__guest_exit)
	/*
	 * return convention:
	 * guest r0, r1, r2 saved on the stack
	 * r0: vcpu pointer
	 * r1: exception code
	 */

	add	r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3)
	stm	r2!, {r3-r12}
	str	lr, [r2, #4]
	add	r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
	pop	{r3, r4, r5}		@ r0, r1, r2
	stm	r2, {r3-r5}

	ldr	r0, [r0, #VCPU_HOST_CTXT]
	add	r0, r0, #(USR_REGS_OFFSET + S_R4)
	ldm	r0!, {r4-r12}
	ldr	lr, [r0, #4]

	mov	r0, r1
	bx	lr
ENDPROC(__guest_exit)

	.popsection
+2 −0
Original line number Diff line number Diff line
@@ -110,4 +110,6 @@ static inline bool __vfp_enabled(void)
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt);
void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt);

int asmlinkage __guest_enter(struct kvm_vcpu *vcpu,
			     struct kvm_cpu_context *host);
#endif /* __ARM_KVM_HYP_H__ */