Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit abe7a458 authored by Radim Krčmář's avatar Radim Krčmář
Browse files
KVM/ARM updates for v4.17

- VHE optimizations
- EL2 address space randomization
- Variant 3a mitigation for Cortex-A57 and A72
- The usual vgic fixes
- Various minor tidying-up
parents d32ef547 dc6ed61d
Loading
Loading
Loading
Loading
+6 −3
Original line number Diff line number Diff line
@@ -86,9 +86,12 @@ Translation table lookup with 64KB pages:
 +-------------------------------------------------> [63] TTBR0/1


When using KVM without the Virtualization Host Extensions, the hypervisor
maps kernel pages in EL2 at a fixed offset from the kernel VA. See the
kern_hyp_va macro for more details.
When using KVM without the Virtualization Host Extensions, the
hypervisor maps kernel pages in EL2 at a fixed (and potentially
random) offset from the linear mapping. See the kern_hyp_va macro and
kvm_update_va_mask function for more details. MMIO devices such as
GICv2 gets mapped next to the HYP idmap page, as do vectors when
ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.

When using KVM with the Virtualization Host Extensions, no additional
mappings are created, since the host kernel runs directly in EL2.
+4 −1
Original line number Diff line number Diff line
@@ -70,7 +70,10 @@ extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);

extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);

extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
/* no VHE on 32-bit :( */
static inline int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { BUG(); return 0; }

extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);

extern void __init_stage2_translation(void);

+13 −8
Original line number Diff line number Diff line
@@ -41,7 +41,17 @@ static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
	return vcpu_reg(vcpu, reg_num);
}

unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);

static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
{
	return *__vcpu_spsr(vcpu);
}

static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
{
	*__vcpu_spsr(vcpu) = v;
}

static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
					 u8 reg_num)
@@ -92,14 +102,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
	vcpu->arch.hcr = HCR_GUEST_MASK;
}

static inline unsigned long vcpu_get_hcr(const struct kvm_vcpu *vcpu)
{
	return vcpu->arch.hcr;
}

static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
{
	vcpu->arch.hcr = hcr;
	return (unsigned long *)&vcpu->arch.hcr;
}

static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
+3 −3
Original line number Diff line number Diff line
@@ -155,9 +155,6 @@ struct kvm_vcpu_arch {
	/* HYP trapping configuration */
	u32 hcr;

	/* Interrupt related fields */
	u32 irq_lines;		/* IRQ and FIQ levels */

	/* Exception Information */
	struct kvm_vcpu_fault_info fault;

@@ -315,4 +312,7 @@ static inline bool kvm_arm_harden_branch_predictor(void)
	return false;
}

static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}

#endif /* __ARM_KVM_HOST_H__ */
+4 −0
Original line number Diff line number Diff line
@@ -110,6 +110,10 @@ void __sysreg_restore_state(struct kvm_cpu_context *ctxt);

void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);

asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp);
asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp);
Loading