Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb3541f1 authored by Andrea Gelmini's avatar Andrea Gelmini Committed by Paolo Bonzini
Browse files

KVM: x86: Fix typos

parent 960cb306
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -89,7 +89,7 @@ In mmu_spte_clear_track_bits():
   old_spte = *spte;

   /* 'if' condition is satisfied. */
   if (old_spte.Accssed == 1 &&
   if (old_spte.Accessed == 1 &&
        old_spte.W == 0)
      spte = 0ull;
                                         on fast page fault path:
@@ -102,7 +102,7 @@ In mmu_spte_clear_track_bits():
      old_spte = xchg(spte, 0ull)


   if (old_spte.Accssed == 1)
   if (old_spte.Accessed == 1)
      kvm_set_pfn_accessed(spte.pfn);
   if (old_spte.Dirty == 1)
      kvm_set_pfn_dirty(spte.pfn);
+1 −1
Original line number Diff line number Diff line
@@ -523,7 +523,7 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte)
}

/* Rules for using mmu_spte_update:
 * Update the state bits, it means the mapped pfn is not changged.
 * Update the state bits, it means the mapped pfn is not changed.
 *
 * Whenever we overwrite a writable spte with a read-only one we
 * should flush remote TLBs. Otherwise rmap_write_protect
+1 −1
Original line number Diff line number Diff line
@@ -93,7 +93,7 @@ static unsigned intel_find_fixed_event(int idx)
	return intel_arch_events[fixed_pmc_events[idx]].event_type;
}

/* check if a PMC is enabled by comparising it with globl_ctrl bits. */
/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
{
	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+1 −1
Original line number Diff line number Diff line
@@ -1572,7 +1572,7 @@ static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
       /*
        * Any change of EFLAGS.VM is accompained by a reload of SS
        * Any change of EFLAGS.VM is accompanied by a reload of SS
        * (caused by either a task switch or an inter-privilege IRET),
        * so we do not need to update the CPL here.
        */
+1 −1
Original line number Diff line number Diff line
@@ -3364,7 +3364,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)

	/*
	 * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
	 * but due to arrata below it can't be used. Workaround is to use
	 * but due to errata below it can't be used. Workaround is to use
	 * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
	 *
	 * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
Loading