Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e7c1d14e authored by Alexander Graf's avatar Alexander Graf Committed by Avi Kivity
Browse files

KVM: PPC: Make invalidation code more reliable



There is a race condition in the pte invalidation code path where we can't
be sure if a pte was invalidated already. So let's move the spin lock around
to get rid of the race.

Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 2e602847
Loading
Loading
Loading
Loading
+8 −6
Original line number Diff line number Diff line
@@ -92,10 +92,6 @@ static void free_pte_rcu(struct rcu_head *head)

static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
	/* pte already invalidated? */
	if (hlist_unhashed(&pte->list_pte))
		return;

	trace_kvm_book3s_mmu_invalidate(pte);

	/* Different for 32 and 64 bit */
@@ -103,18 +99,24 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)

	spin_lock(&vcpu->arch.mmu_lock);

	/* pte already invalidated in between? */
	if (hlist_unhashed(&pte->list_pte)) {
		spin_unlock(&vcpu->arch.mmu_lock);
		return;
	}

	hlist_del_init_rcu(&pte->list_pte);
	hlist_del_init_rcu(&pte->list_pte_long);
	hlist_del_init_rcu(&pte->list_vpte);
	hlist_del_init_rcu(&pte->list_vpte_long);

	spin_unlock(&vcpu->arch.mmu_lock);

	if (pte->pte.may_write)
		kvm_release_pfn_dirty(pte->pfn);
	else
		kvm_release_pfn_clean(pte->pfn);

	spin_unlock(&vcpu->arch.mmu_lock);

	vcpu->arch.hpte_cache_count--;
	call_rcu(&pte->rcu_head, free_pte_rcu);
}