Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 30d8d8d6 authored by Junaid Shahid's avatar Junaid Shahid Committed by Greg Kroah-Hartman
Browse files

kvm: Convert kvm_lock to a mutex



commit 0d9ce162cf46c99628cc5da9510b959c7976735b upstream.

It doesn't seem as if there is any particular need for kvm_lock to be a
spinlock, so convert the lock to a mutex so that sleepable functions (in
particular cond_resched()) can be called while holding it.

Signed-off-by: default avatarJunaid Shahid <junaids@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent a991063c
Loading
Loading
Loading
Loading
+1 −3
Original line number Diff line number Diff line
@@ -15,8 +15,6 @@ The acquisition orders for mutexes are as follows:

On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock.

For spinlocks, kvm_lock is taken outside kvm->mmu_lock.

Everything else is a leaf: no other lock is taken inside the critical
sections.

@@ -169,7 +167,7 @@ which time it will be set using the Dirty tracking mechanism described above.
------------

Name:		kvm_lock
Type:		spinlock_t
Type:		mutex
Arch:		any
Protects:	- vm_list

+2 −2
Original line number Diff line number Diff line
@@ -2110,13 +2110,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
	if (!kvm->arch.sca)
		goto out_err;
	spin_lock(&kvm_lock);
	mutex_lock(&kvm_lock);
	sca_offset += 16;
	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
		sca_offset = 0;
	kvm->arch.sca = (struct bsca_block *)
			((char *) kvm->arch.sca + sca_offset);
	spin_unlock(&kvm_lock);
	mutex_unlock(&kvm_lock);

	sprintf(debug_name, "kvm-%u", current->pid);

+2 −2
Original line number Diff line number Diff line
@@ -5819,7 +5819,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
	int nr_to_scan = sc->nr_to_scan;
	unsigned long freed = 0;

	spin_lock(&kvm_lock);
	mutex_lock(&kvm_lock);

	list_for_each_entry(kvm, &vm_list, vm_list) {
		int idx;
@@ -5869,7 +5869,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
		break;
	}

	spin_unlock(&kvm_lock);
	mutex_unlock(&kvm_lock);
	return freed;
}

+7 −7
Original line number Diff line number Diff line
@@ -6529,7 +6529,7 @@ static void kvm_hyperv_tsc_notifier(void)
	struct kvm_vcpu *vcpu;
	int cpu;

	spin_lock(&kvm_lock);
	mutex_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
		kvm_make_mclock_inprogress_request(kvm);

@@ -6555,7 +6555,7 @@ static void kvm_hyperv_tsc_notifier(void)

		spin_unlock(&ka->pvclock_gtod_sync_lock);
	}
	spin_unlock(&kvm_lock);
	mutex_unlock(&kvm_lock);
}
#endif

@@ -6613,17 +6613,17 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va

	smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);

	spin_lock(&kvm_lock);
	mutex_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list) {
		kvm_for_each_vcpu(i, vcpu, kvm) {
			if (vcpu->cpu != freq->cpu)
				continue;
			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
			if (vcpu->cpu != smp_processor_id())
			if (vcpu->cpu != raw_smp_processor_id())
				send_ipi = 1;
		}
	}
	spin_unlock(&kvm_lock);
	mutex_unlock(&kvm_lock);

	if (freq->old < freq->new && send_ipi) {
		/*
@@ -6749,12 +6749,12 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
	struct kvm_vcpu *vcpu;
	int i;

	spin_lock(&kvm_lock);
	mutex_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
		kvm_for_each_vcpu(i, vcpu, kvm)
			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
	atomic_set(&kvm_guest_has_master_clock, 0);
	spin_unlock(&kvm_lock);
	mutex_unlock(&kvm_lock);
}

static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
+1 −1
Original line number Diff line number Diff line
@@ -141,7 +141,7 @@ static inline bool is_error_page(struct page *page)

extern struct kmem_cache *kvm_vcpu_cache;

extern spinlock_t kvm_lock;
extern struct mutex kvm_lock;
extern struct list_head vm_list;

struct kvm_io_range {
Loading