Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b34cb590 authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Gleb Natapov
Browse files

KVM: Make kvm_mmu_change_mmu_pages() take mmu_lock by itself



No reason to make callers take mmu_lock since we do not need to protect
kvm_mmu_change_mmu_pages() and kvm_mmu_slot_remove_write_access()
together by mmu_lock in kvm_arch_commit_memory_region(): the former
calls kvm_mmu_commit_zap_page() and flushes TLBs by itself.

Note: we do not need to protect kvm->arch.n_requested_mmu_pages by
mmu_lock as can be seen from the fact that it is read locklessly.

Reviewed-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
parent e12091ce
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -2143,6 +2143,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
	 * change the value
	 */

	spin_lock(&kvm->mmu_lock);

	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
		while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
			!list_empty(&kvm->arch.active_mmu_pages)) {
@@ -2157,6 +2159,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
	}

	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;

	spin_unlock(&kvm->mmu_lock);
}

int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
+4 −5
Original line number Diff line number Diff line
@@ -3270,12 +3270,10 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
		return -EINVAL;

	mutex_lock(&kvm->slots_lock);
	spin_lock(&kvm->mmu_lock);

	kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
	kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;

	spin_unlock(&kvm->mmu_lock);
	mutex_unlock(&kvm->slots_lock);
	return 0;
}
@@ -6894,7 +6892,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
	if (!kvm->arch.n_requested_mmu_pages)
		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);

	spin_lock(&kvm->mmu_lock);
	if (nr_mmu_pages)
		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
	/*
@@ -6902,9 +6899,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
	 * Existing largepage mappings are destroyed here and new ones will
	 * not be created until the end of the logging.
	 */
	if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
	if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
		spin_lock(&kvm->mmu_lock);
		kvm_mmu_slot_remove_write_access(kvm, mem->slot);
		spin_unlock(&kvm->mmu_lock);
	}
	/*
	 * If memory slot is created, or moved, we need to clear all
	 * mmio sptes.