Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4cf193b4 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull KVM fixes from Paolo Bonzini:
 "Bug fixes for all architectures.  Nothing really stands out"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (21 commits)
  KVM: nVMX: remove incorrect vpid check in nested invvpid emulation
  arm64: kvm: report original PAR_EL1 upon panic
  arm64: kvm: avoid %p in __kvm_hyp_panic
  KVM: arm/arm64: vgic: Trust the LR state for HW IRQs
  KVM: arm/arm64: arch_timer: Preserve physical dist. active state on LR.active
  KVM: arm/arm64: Fix preemptible timer active state crazyness
  arm64: KVM: Add workaround for Cortex-A57 erratum 834220
  arm64: KVM: Fix AArch32 to AArch64 register mapping
  ARM/arm64: KVM: test properly for a PTE's uncachedness
  KVM: s390: fix wrong lookup of VCPUs by array index
  KVM: s390: avoid memory overwrites on emergency signal injection
  KVM: Provide function for VCPU lookup by id
  KVM: s390: fix pfmf intercept handler
  KVM: s390: enable SIMD only when no VCPUs were created
  KVM: x86: request interrupt window when IRQ chip is split
  KVM: x86: set KVM_REQ_EVENT on local interrupt request from user space
  KVM: x86: split kvm_vcpu_ready_for_interrupt_injection out of dm_request_for_irq_injection
  KVM: x86: fix interrupt window handling in split IRQ chip case
  MIPS: KVM: Uninit VCPU in vcpu_create error path
  MIPS: KVM: Fix CACHE immediate offset sign extension
  ...
parents 6ffeba96 b2467e74
Loading
Loading
Loading
Loading
+1 −6
Original line number Diff line number Diff line
@@ -563,18 +563,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
		if (vcpu->arch.power_off || vcpu->arch.pause)
			vcpu_sleep(vcpu);

		/*
		 * Disarming the background timer must be done in a
		 * preemptible context, as this call may sleep.
		 */
		kvm_timer_flush_hwstate(vcpu);

		/*
		 * Preparing the interrupts to be injected also
		 * involves poking the GIC, which must be done in a
		 * non-preemptible context.
		 */
		preempt_disable();
		kvm_timer_flush_hwstate(vcpu);
		kvm_vgic_flush_hwstate(vcpu);

		local_irq_disable();
+7 −8
Original line number Diff line number Diff line
@@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
	__kvm_flush_dcache_pud(pud);
}

static bool kvm_is_device_pfn(unsigned long pfn)
{
	return !pfn_valid(pfn);
}

/**
 * stage2_dissolve_pmd() - clear and flush huge PMD entry
 * @kvm:	pointer to kvm structure.
@@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
			kvm_tlb_flush_vmid_ipa(kvm, addr);

			/* No need to invalidate the cache for device mappings */
			if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
			if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
				kvm_flush_dcache_pte(old_pte);

			put_page(virt_to_page(pte));
@@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,

	pte = pte_offset_kernel(pmd, addr);
	do {
		if (!pte_none(*pte) &&
		    (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
		if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
			kvm_flush_dcache_pte(*pte);
	} while (pte++, addr += PAGE_SIZE, addr != end);
}
@@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
	return kvm_vcpu_dabt_iswrite(vcpu);
}

static bool kvm_is_device_pfn(unsigned long pfn)
{
	return !pfn_valid(pfn);
}

/**
 * stage2_wp_ptes - write protect PMD range
 * @pmd:	pointer to pmd entry
+21 −0
Original line number Diff line number Diff line
@@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075

	  If unsure, say Y.

config ARM64_ERRATUM_834220
	bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
	depends on KVM
	default y
	help
	  This option adds an alternative code sequence to work around ARM
	  erratum 834220 on Cortex-A57 parts up to r1p2.

	  Affected Cortex-A57 parts might report a Stage 2 translation
	  fault as the result of a Stage 1 fault for load crossing a
	  page boundary when there is a permission or device memory
	  alignment fault at Stage 1 and a translation fault at Stage 2.

	  The workaround is to verify that the Stage 1 translation
	  doesn't generate a fault before handling the Stage 2 fault.
	  Please note that this does not necessarily enable the workaround,
	  as it depends on the alternative framework, which will only patch
	  the kernel if an affected CPU is detected.

	  If unsure, say Y.

config ARM64_ERRATUM_845719
	bool "Cortex-A53: 845719: a load might read incorrect data"
	depends on COMPAT
+2 −1
Original line number Diff line number Diff line
@@ -29,8 +29,9 @@
#define ARM64_HAS_PAN				4
#define ARM64_HAS_LSE_ATOMICS			5
#define ARM64_WORKAROUND_CAVIUM_23154		6
#define ARM64_WORKAROUND_834220			7

#define ARM64_NCAPS				7
#define ARM64_NCAPS				8

#ifndef __ASSEMBLY__

+5 −3
Original line number Diff line number Diff line
@@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
	*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
}

/*
 * vcpu_reg should always be passed a register number coming from a
 * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
 * with banked registers.
 */
static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
{
	if (vcpu_mode_is_32bit(vcpu))
		return vcpu_reg32(vcpu, reg_num);

	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
}

Loading