Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b07d41b7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'kvm-updates/2.6.33' of git://git.kernel.org/pub/scm/virt/kvm/kvm

* 'kvm-updates/2.6.33' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: get rid of kvm_create_vm() unused label warning on s390
  KVM: powerpc: Fix mtsrin in book3s_64 mmu
  KVM: ia64: fix build breakage due to host spinlock change
  KVM: x86: Extend KVM_SET_VCPU_EVENTS with selective updates
  KVM: LAPIC: make sure IRR bitmap is scanned after vm load
  KVM: Fix possible circular locking in kvm_vm_ioctl_assign_device()
  KVM: MMU: remove prefault from invlpg handler
parents 8fa4d870 b4329db0
Loading
Loading
Loading
Loading
+9 −1
Original line number Diff line number Diff line
@@ -685,7 +685,7 @@ struct kvm_vcpu_events {
		__u8 pad;
	} nmi;
	__u32 sipi_vector;
	__u32 flags;   /* must be zero */
	__u32 flags;
};

4.30 KVM_SET_VCPU_EVENTS
@@ -701,6 +701,14 @@ vcpu.

See KVM_GET_VCPU_EVENTS for the data structure.

Fields that may be modified asynchronously by running VCPUs can be excluded
from the update. These fields are nmi.pending and sipi_vector. Keep the
corresponding bits in the flags field cleared to suppress overwriting the
current in-kernel state. The bits are:

KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel
KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector


5. The kvm_run structure

+6 −3
Original line number Diff line number Diff line
@@ -388,6 +388,9 @@ static inline u64 __gpfn_is_io(u64 gpfn)
#define _vmm_raw_spin_lock(x)	 do {}while(0)
#define _vmm_raw_spin_unlock(x) do {}while(0)
#else
typedef struct {
	volatile unsigned int lock;
} vmm_spinlock_t;
#define _vmm_raw_spin_lock(x)						\
	do {								\
		__u32 *ia64_spinlock_ptr = (__u32 *) (x);		\
@@ -405,12 +408,12 @@ static inline u64 __gpfn_is_io(u64 gpfn)

#define _vmm_raw_spin_unlock(x)				\
	do { barrier();				\
		((spinlock_t *)x)->raw_lock.lock = 0; } \
		((vmm_spinlock_t *)x)->lock = 0; } \
while (0)
#endif

void vmm_spin_lock(spinlock_t *lock);
void vmm_spin_unlock(spinlock_t *lock);
void vmm_spin_lock(vmm_spinlock_t *lock);
void vmm_spin_unlock(vmm_spinlock_t *lock);
enum {
	I_TLB = 1,
	D_TLB = 2
+2 −2
Original line number Diff line number Diff line
@@ -60,12 +60,12 @@ static void __exit kvm_vmm_exit(void)
	return ;
}

void vmm_spin_lock(spinlock_t *lock)
void vmm_spin_lock(vmm_spinlock_t *lock)
{
	_vmm_raw_spin_lock(lock);
}

void vmm_spin_unlock(spinlock_t *lock)
void vmm_spin_unlock(vmm_spinlock_t *lock)
{
	_vmm_raw_spin_unlock(lock);
}
+1 −1
Original line number Diff line number Diff line
@@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
{
	u64 i, dirty_pages = 1;
	u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
	spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
	vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
	void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;

	dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
+21 −1
Original line number Diff line number Diff line
@@ -390,6 +390,26 @@ static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
{
	u64 rb = 0, rs = 0;

	/*
	 * According to Book3 2.01 mtsrin is implemented as:
	 *
	 * The SLB entry specified by (RB)32:35 is loaded from register
	 * RS, as follows.
	 *
	 * SLBE Bit	Source			SLB Field
	 *
	 * 0:31		0x0000_0000		ESID-0:31
	 * 32:35	(RB)32:35		ESID-32:35
	 * 36		0b1			V
	 * 37:61	0x00_0000|| 0b0		VSID-0:24
	 * 62:88	(RS)37:63		VSID-25:51
	 * 89:91	(RS)33:35		Ks Kp N
	 * 92		(RS)36			L ((RS)36 must be 0b0)
	 * 93		0b0			C
	 */

	dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);

	/* ESID = srnum */
	rb |= (srnum & 0xf) << 28;
	/* Set the valid bit */
@@ -400,7 +420,7 @@ static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
	/* VSID = VSID */
	rs |= (value & 0xfffffff) << 12;
	/* flags = flags */
	rs |= ((value >> 27) & 0xf) << 9;
	rs |= ((value >> 28) & 0x7) << 9;

	kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
}
Loading