Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9d0eb462 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull KVM fixes from Paolo Bonzini:
 "Bug fixes (ARM, s390, x86)"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: async_pf: avoid async pf injection when in guest mode
  KVM: cpuid: Fix read/write out-of-bounds vulnerability in cpuid emulation
  arm: KVM: Allow unaligned accesses at HYP
  arm64: KVM: Allow unaligned accesses at EL2
  arm64: KVM: Preserve RES1 bits in SCTLR_EL2
  KVM: arm/arm64: Handle possible NULL stage2 pud when ageing pages
  KVM: nVMX: Fix exception injection
  kvm: async_pf: fix rcu_irq_enter() with irqs enabled
  KVM: arm/arm64: vgic-v3: Fix nr_pre_bits bitfield extraction
  KVM: s390: fix ais handling vs cpu model
  KVM: arm/arm64: Fix isues with GICv2 on GICv3 migration
parents 5faab9e0 9bc1f09f
Loading
Loading
Loading
Loading
+2 −3
Original line number Original line Diff line number Diff line
@@ -104,7 +104,6 @@ __do_hyp_init:
	@  - Write permission implies XN: disabled
	@  - Write permission implies XN: disabled
	@  - Instruction cache: enabled
	@  - Instruction cache: enabled
	@  - Data/Unified cache: enabled
	@  - Data/Unified cache: enabled
	@  - Memory alignment checks: enabled
	@  - MMU: enabled (this code must be run from an identity mapping)
	@  - MMU: enabled (this code must be run from an identity mapping)
	mrc	p15, 4, r0, c1, c0, 0	@ HSCR
	mrc	p15, 4, r0, c1, c0, 0	@ HSCR
	ldr	r2, =HSCTLR_MASK
	ldr	r2, =HSCTLR_MASK
@@ -112,8 +111,8 @@ __do_hyp_init:
	mrc	p15, 0, r1, c1, c0, 0	@ SCTLR
	mrc	p15, 0, r1, c1, c0, 0	@ SCTLR
	ldr	r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
	ldr	r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
	and	r1, r1, r2
	and	r1, r1, r2
 ARM(	ldr	r2, =(HSCTLR_M | HSCTLR_A)			)
 ARM(	ldr	r2, =(HSCTLR_M)					)
 THUMB(	ldr	r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE)		)
 THUMB(	ldr	r2, =(HSCTLR_M | HSCTLR_TE)			)
	orr	r1, r1, r2
	orr	r1, r1, r2
	orr	r0, r0, r1
	orr	r0, r0, r1
	mcr	p15, 4, r0, c1, c0, 0	@ HSCR
	mcr	p15, 4, r0, c1, c0, 0	@ HSCR
+4 −0
Original line number Original line Diff line number Diff line
@@ -286,6 +286,10 @@
#define SCTLR_ELx_A	(1 << 1)
#define SCTLR_ELx_A	(1 << 1)
#define SCTLR_ELx_M	1
#define SCTLR_ELx_M	1


#define SCTLR_EL2_RES1	((1 << 4)  | (1 << 5)  | (1 << 11) | (1 << 16) | \
			 (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
			 (1 << 28) | (1 << 29))

#define SCTLR_ELx_FLAGS	(SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
#define SCTLR_ELx_FLAGS	(SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
			 SCTLR_ELx_SA | SCTLR_ELx_I)
			 SCTLR_ELx_SA | SCTLR_ELx_I)


+7 −4
Original line number Original line Diff line number Diff line
@@ -106,10 +106,13 @@ __do_hyp_init:
	tlbi	alle2
	tlbi	alle2
	dsb	sy
	dsb	sy


	mrs	x4, sctlr_el2
	/*
	and	x4, x4, #SCTLR_ELx_EE	// preserve endianness of EL2
	 * Preserve all the RES1 bits while setting the default flags,
	ldr	x5, =SCTLR_ELx_FLAGS
	 * as well as the EE bit on BE. Drop the A flag since the compiler
	orr	x4, x4, x5
	 * is allowed to generate unaligned accesses.
	 */
	ldr	x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
CPU_BE(	orr	x4, x4, #SCTLR_ELx_EE)
	msr	sctlr_el2, x4
	msr	sctlr_el2, x4
	isb
	isb


+5 −5
Original line number Original line Diff line number Diff line
@@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
		 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
		 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
		 * The vgic_set_vmcr() will convert to ICH_VMCR layout.
		 * The vgic_set_vmcr() will convert to ICH_VMCR layout.
		 */
		 */
		vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK;
		vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
		vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK;
		vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
		vgic_set_vmcr(vcpu, &vmcr);
		vgic_set_vmcr(vcpu, &vmcr);
	} else {
	} else {
		val = 0;
		val = 0;
@@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
		 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
		 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
		 * Extract it directly using ICC_CTLR_EL1 reg definitions.
		 * Extract it directly using ICC_CTLR_EL1 reg definitions.
		 */
		 */
		val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK;
		val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
		val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK;
		val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;


		p->regval = val;
		p->regval = val;
	}
	}
@@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
		p->regval = 0;
		p->regval = 0;


	vgic_get_vmcr(vcpu, &vmcr);
	vgic_get_vmcr(vcpu, &vmcr);
	if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) {
	if (!vmcr.cbpr) {
		if (p->is_write) {
		if (p->is_write) {
			vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
			vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
				     ICC_BPR1_EL1_SHIFT;
				     ICC_BPR1_EL1_SHIFT;
+0 −1
Original line number Original line Diff line number Diff line
@@ -541,7 +541,6 @@ struct kvm_s390_float_interrupt {
	struct mutex ais_lock;
	struct mutex ais_lock;
	u8 simm;
	u8 simm;
	u8 nimm;
	u8 nimm;
	int ais_enabled;
};
};


struct kvm_hw_wp_info_arch {
struct kvm_hw_wp_info_arch {
Loading