Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b0d8aa08 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm

* 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
  KVM: ppc: fix invalidation of large guest pages
  KVM: s390: Fix possible host kernel bug on lctl(g) handling
  KVM: s390: Fix instruction naming for lctlg
  KVM: s390: Fix program check on interrupt delivery handling
  KVM: s390: Change guestaddr type in gaccess
  KVM: s390: Fix guest kconfig
  KVM: s390: Advertise KVM_CAP_USER_MEMORY
  KVM: ia64: Fix irq disabling leak in error handling code
  KVM: VMX: Fix undefined beaviour of EPT after reload kvm-intel.ko
  KVM: VMX: Fix bypass_guest_pf enabling when disable EPT in module parameter
  KVM: task switch: translate guest segment limit to virt-extension byte granular field
  KVM: Avoid instruction emulation when event delivery is pending
  KVM: task switch: use seg regs provided by subarch instead of reading from GDT
  KVM: task switch: segment base is linear address
  KVM: SVM: allow enabling/disabling NPT by reloading only the architecture module
parents 6948385c cc04454f
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -125,9 +125,9 @@ void kvm_arch_hardware_enable(void *garbage)
				PAGE_KERNEL));
	local_irq_save(saved_psr);
	slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
	local_irq_restore(saved_psr);
	if (slot < 0)
		return;
	local_irq_restore(saved_psr);

	spin_lock(&vp_lock);
	status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -160,9 +160,9 @@ void kvm_arch_hardware_disable(void *garbage)

	local_irq_save(saved_psr);
	slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
	local_irq_restore(saved_psr);
	if (slot < 0)
		return;
	local_irq_restore(saved_psr);

	status = ia64_pal_vp_exit_env(host_iva);
	if (status)
@@ -1253,6 +1253,7 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
uninit:
	kvm_vcpu_uninit(vcpu);
fail:
	local_irq_restore(psr);
	return r;
}

+3 −2
Original line number Diff line number Diff line
@@ -177,7 +177,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
	                                            vcpu->arch.msr & MSR_PR);
}

void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid)
void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
                           gva_t eend, u32 asid)
{
	unsigned int pid = asid & 0xff;
	int i;
@@ -191,7 +192,7 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid)
		if (!get_tlb_v(stlbe))
			continue;

		if (eaddr < get_tlb_eaddr(stlbe))
		if (eend < get_tlb_eaddr(stlbe))
			continue;

		if (eaddr > get_tlb_end(stlbe))
+1 −1
Original line number Diff line number Diff line
@@ -137,7 +137,7 @@ static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst)
	if (tlbe->word0 & PPC44x_TLB_VALID) {
		eaddr = get_tlb_eaddr(tlbe);
		asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
		kvmppc_mmu_invalidate(vcpu, eaddr, asid);
		kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid);
	}

	switch (ws) {
+33 −29
Original line number Diff line number Diff line
@@ -18,11 +18,11 @@
#include <asm/uaccess.h>

static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
					       u64 guestaddr)
					       unsigned long guestaddr)
{
	u64 prefix  = vcpu->arch.sie_block->prefix;
	u64 origin  = vcpu->kvm->arch.guest_origin;
	u64 memsize = vcpu->kvm->arch.guest_memsize;
	unsigned long prefix  = vcpu->arch.sie_block->prefix;
	unsigned long origin  = vcpu->kvm->arch.guest_origin;
	unsigned long memsize = vcpu->kvm->arch.guest_memsize;

	if (guestaddr < 2 * PAGE_SIZE)
		guestaddr += prefix;
@@ -37,7 +37,7 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
	return (void __user *) guestaddr;
}

static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
				u64 *result)
{
	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -47,10 +47,10 @@ static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
	if (IS_ERR((void __force *) uptr))
		return PTR_ERR((void __force *) uptr);

	return get_user(*result, (u64 __user *) uptr);
	return get_user(*result, (unsigned long __user *) uptr);
}

static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
				u32 *result)
{
	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -63,7 +63,7 @@ static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
	return get_user(*result, (u32 __user *) uptr);
}

static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
				u16 *result)
{
	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -76,7 +76,7 @@ static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
	return get_user(*result, (u16 __user *) uptr);
}

static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
			       u8 *result)
{
	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -87,7 +87,7 @@ static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
	return get_user(*result, (u8 __user *) uptr);
}

static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
				u64 value)
{
	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -100,7 +100,7 @@ static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
	return put_user(value, (u64 __user *) uptr);
}

static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
				u32 value)
{
	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -113,7 +113,7 @@ static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
	return put_user(value, (u32 __user *) uptr);
}

static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
				u16 value)
{
	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -126,7 +126,7 @@ static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
	return put_user(value, (u16 __user *) uptr);
}

static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
			       u8 value)
{
	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -138,7 +138,8 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
}


static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest,
static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
				       unsigned long guestdest,
				       const void *from, unsigned long n)
{
	int rc;
@@ -153,12 +154,12 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest,
	return 0;
}

static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest,
static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
				const void *from, unsigned long n)
{
	u64 prefix  = vcpu->arch.sie_block->prefix;
	u64 origin  = vcpu->kvm->arch.guest_origin;
	u64 memsize = vcpu->kvm->arch.guest_memsize;
	unsigned long prefix  = vcpu->arch.sie_block->prefix;
	unsigned long origin  = vcpu->kvm->arch.guest_origin;
	unsigned long memsize = vcpu->kvm->arch.guest_memsize;

	if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
		goto slowpath;
@@ -189,7 +190,8 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest,
}

static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
					 u64 guestsrc, unsigned long n)
					 unsigned long guestsrc,
					 unsigned long n)
{
	int rc;
	unsigned long i;
@@ -204,11 +206,11 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
}

static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
				  u64 guestsrc, unsigned long n)
				  unsigned long guestsrc, unsigned long n)
{
	u64 prefix  = vcpu->arch.sie_block->prefix;
	u64 origin  = vcpu->kvm->arch.guest_origin;
	u64 memsize = vcpu->kvm->arch.guest_memsize;
	unsigned long prefix  = vcpu->arch.sie_block->prefix;
	unsigned long origin  = vcpu->kvm->arch.guest_origin;
	unsigned long memsize = vcpu->kvm->arch.guest_memsize;

	if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
		goto slowpath;
@@ -238,11 +240,12 @@ static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
	return __copy_from_guest_slow(vcpu, to, guestsrc, n);
}

static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest,
static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
					 unsigned long guestdest,
					 const void *from, unsigned long n)
{
	u64 origin  = vcpu->kvm->arch.guest_origin;
	u64 memsize = vcpu->kvm->arch.guest_memsize;
	unsigned long origin  = vcpu->kvm->arch.guest_origin;
	unsigned long memsize = vcpu->kvm->arch.guest_memsize;

	if (guestdest + n > memsize)
		return -EFAULT;
@@ -256,10 +259,11 @@ static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest,
}

static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
					   u64 guestsrc, unsigned long n)
					   unsigned long guestsrc,
					   unsigned long n)
{
	u64 origin  = vcpu->kvm->arch.guest_origin;
	u64 memsize = vcpu->kvm->arch.guest_memsize;
	unsigned long origin  = vcpu->kvm->arch.guest_origin;
	unsigned long memsize = vcpu->kvm->arch.guest_memsize;

	if (guestsrc + n > memsize)
		return -EFAULT;
+10 −4
Original line number Diff line number Diff line
@@ -20,7 +20,7 @@
#include "kvm-s390.h"
#include "gaccess.h"

static int handle_lctg(struct kvm_vcpu *vcpu)
static int handle_lctlg(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
@@ -30,7 +30,7 @@ static int handle_lctg(struct kvm_vcpu *vcpu)
	u64 useraddr;
	int reg, rc;

	vcpu->stat.instruction_lctg++;
	vcpu->stat.instruction_lctlg++;
	if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
		return -ENOTSUPP;

@@ -38,9 +38,12 @@ static int handle_lctg(struct kvm_vcpu *vcpu)
	if (base2)
		useraddr += vcpu->arch.guest_gprs[base2];

	if (useraddr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	reg = reg1;

	VCPU_EVENT(vcpu, 5, "lctg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
	VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
		   disp2);

	do {
@@ -74,6 +77,9 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
	if (base2)
		useraddr += vcpu->arch.guest_gprs[base2];

	if (useraddr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
		   disp2);

@@ -99,7 +105,7 @@ static intercept_handler_t instruction_handlers[256] = {
	[0xae] = kvm_s390_handle_sigp,
	[0xb2] = kvm_s390_handle_priv,
	[0xb7] = handle_lctl,
	[0xeb] = handle_lctg,
	[0xeb] = handle_lctlg,
};

static int handle_noop(struct kvm_vcpu *vcpu)
Loading