Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a662b813 authored by Tony Luck's avatar Tony Luck Committed by Marcelo Tosatti
Browse files

KVM: ia64: fix build breakage due to host spinlock change



Len Brown pointed out that allmodconfig is broken for
ia64 because of:

arch/ia64/kvm/vmm.c: In function 'vmm_spin_unlock':
arch/ia64/kvm/vmm.c:70: error: 'spinlock_t' has no member named 'raw_lock'

KVM has it's own spinlock routines. It should not depend on the base kernel
spinlock_t type (which changed when ia64 switched to ticket locks).  Define
its own vmm_spinlock_t type.

Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent dab4b911
Loading
Loading
Loading
Loading
+6 −3
Original line number Diff line number Diff line
@@ -388,6 +388,9 @@ static inline u64 __gpfn_is_io(u64 gpfn)
#define _vmm_raw_spin_lock(x)	 do {}while(0)
#define _vmm_raw_spin_unlock(x) do {}while(0)
#else
typedef struct {
	volatile unsigned int lock;
} vmm_spinlock_t;
#define _vmm_raw_spin_lock(x)						\
	do {								\
		__u32 *ia64_spinlock_ptr = (__u32 *) (x);		\
@@ -405,12 +408,12 @@ static inline u64 __gpfn_is_io(u64 gpfn)

#define _vmm_raw_spin_unlock(x)				\
	do { barrier();				\
		((spinlock_t *)x)->raw_lock.lock = 0; } \
		((vmm_spinlock_t *)x)->lock = 0; } \
while (0)
#endif

void vmm_spin_lock(spinlock_t *lock);
void vmm_spin_unlock(spinlock_t *lock);
void vmm_spin_lock(vmm_spinlock_t *lock);
void vmm_spin_unlock(vmm_spinlock_t *lock);
enum {
	I_TLB = 1,
	D_TLB = 2
+2 −2
Original line number Diff line number Diff line
@@ -60,12 +60,12 @@ static void __exit kvm_vmm_exit(void)
	return ;
}

void vmm_spin_lock(spinlock_t *lock)
void vmm_spin_lock(vmm_spinlock_t *lock)
{
	_vmm_raw_spin_lock(lock);
}

void vmm_spin_unlock(spinlock_t *lock)
void vmm_spin_unlock(vmm_spinlock_t *lock)
{
	_vmm_raw_spin_unlock(lock);
}
+1 −1
Original line number Diff line number Diff line
@@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
{
	u64 i, dirty_pages = 1;
	u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
	spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
	vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
	void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;

	dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;