Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ea4114bc authored by Junaid Shahid's avatar Junaid Shahid Committed by Radim Krčmář
Browse files

kvm: x86: mmu: Rename spte_is_locklessly_modifiable()



This change renames spte_is_locklessly_modifiable() to
spte_can_locklessly_be_made_writable() to distinguish it from other
forms of lockless modifications. The full set of lockless modifications
is covered by spte_has_volatile_bits().

Signed-off-by: default avatarJunaid Shahid <junaids@google.com>
Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 27959a44
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -474,7 +474,7 @@ static u64 __get_spte_lockless(u64 *sptep)
}
#endif

static bool spte_is_locklessly_modifiable(u64 spte)
static bool spte_can_locklessly_be_made_writable(u64 spte)
{
	return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
		(SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
@@ -488,7 +488,7 @@ static bool spte_has_volatile_bits(u64 spte)
	 * also, it can help us to get a stable is_writable_pte()
	 * to ensure tlb flush is not missed.
	 */
	if (spte_is_locklessly_modifiable(spte))
	if (spte_can_locklessly_be_made_writable(spte))
		return true;

	if (!shadow_accessed_mask)
@@ -557,7 +557,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
	 * we always atomically update it, see the comments in
	 * spte_has_volatile_bits().
	 */
	if (spte_is_locklessly_modifiable(old_spte) &&
	if (spte_can_locklessly_be_made_writable(old_spte) &&
	      !is_writable_pte(new_spte))
		ret = true;

@@ -1213,7 +1213,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
	u64 spte = *sptep;

	if (!is_writable_pte(spte) &&
	      !(pt_protect && spte_is_locklessly_modifiable(spte)))
	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
		return false;

	rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
@@ -2975,7 +2975,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
	 * Currently, to simplify the code, only the spte write-protected
	 * by dirty-log can be fast fixed.
	 */
	if (!spte_is_locklessly_modifiable(spte))
	if (!spte_can_locklessly_be_made_writable(spte))
		goto exit;

	/*