Loading Documentation/virtual/kvm/locking.txt +2 −2 Original line number Diff line number Diff line Loading @@ -89,7 +89,7 @@ In mmu_spte_clear_track_bits(): old_spte = *spte; /* 'if' condition is satisfied. */ if (old_spte.Accssed == 1 && if (old_spte.Accessed == 1 && old_spte.W == 0) spte = 0ull; on fast page fault path: Loading @@ -102,7 +102,7 @@ In mmu_spte_clear_track_bits(): old_spte = xchg(spte, 0ull) if (old_spte.Accssed == 1) if (old_spte.Accessed == 1) kvm_set_pfn_accessed(spte.pfn); if (old_spte.Dirty == 1) kvm_set_pfn_dirty(spte.pfn); Loading arch/x86/kvm/mmu.c +1 −1 Original line number Diff line number Diff line Loading @@ -523,7 +523,7 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte) } /* Rules for using mmu_spte_update: * Update the state bits, it means the mapped pfn is not changged. * Update the state bits, it means the mapped pfn is not changed. * * Whenever we overwrite a writable spte with a read-only one we * should flush remote TLBs. Otherwise rmap_write_protect Loading arch/x86/kvm/pmu_intel.c +1 −1 Original line number Diff line number Diff line Loading @@ -93,7 +93,7 @@ static unsigned intel_find_fixed_event(int idx) return intel_arch_events[fixed_pmc_events[idx]].event_type; } /* check if a PMC is enabled by comparising it with globl_ctrl bits. */ /* check if a PMC is enabled by comparing it with globl_ctrl bits. */ static bool intel_pmc_is_enabled(struct kvm_pmc *pmc) { struct kvm_pmu *pmu = pmc_to_pmu(pmc); Loading arch/x86/kvm/svm.c +1 −1 Original line number Diff line number Diff line Loading @@ -1572,7 +1572,7 @@ static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { /* * Any change of EFLAGS.VM is accompained by a reload of SS * Any change of EFLAGS.VM is accompanied by a reload of SS * (caused by either a task switch or an inter-privilege IRET), * so we do not need to update the CPL here. */ Loading arch/x86/kvm/vmx.c +1 −1 Original line number Diff line number Diff line Loading @@ -3364,7 +3364,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) /* * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL * but due to arrata below it can't be used. Workaround is to use * but due to errata below it can't be used. Workaround is to use * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL. * * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32] Loading Loading
Documentation/virtual/kvm/locking.txt +2 −2 Original line number Diff line number Diff line Loading @@ -89,7 +89,7 @@ In mmu_spte_clear_track_bits(): old_spte = *spte; /* 'if' condition is satisfied. */ if (old_spte.Accssed == 1 && if (old_spte.Accessed == 1 && old_spte.W == 0) spte = 0ull; on fast page fault path: Loading @@ -102,7 +102,7 @@ In mmu_spte_clear_track_bits(): old_spte = xchg(spte, 0ull) if (old_spte.Accssed == 1) if (old_spte.Accessed == 1) kvm_set_pfn_accessed(spte.pfn); if (old_spte.Dirty == 1) kvm_set_pfn_dirty(spte.pfn); Loading
arch/x86/kvm/mmu.c +1 −1 Original line number Diff line number Diff line Loading @@ -523,7 +523,7 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte) } /* Rules for using mmu_spte_update: * Update the state bits, it means the mapped pfn is not changged. * Update the state bits, it means the mapped pfn is not changed. * * Whenever we overwrite a writable spte with a read-only one we * should flush remote TLBs. Otherwise rmap_write_protect Loading
arch/x86/kvm/pmu_intel.c +1 −1 Original line number Diff line number Diff line Loading @@ -93,7 +93,7 @@ static unsigned intel_find_fixed_event(int idx) return intel_arch_events[fixed_pmc_events[idx]].event_type; } /* check if a PMC is enabled by comparising it with globl_ctrl bits. */ /* check if a PMC is enabled by comparing it with globl_ctrl bits. */ static bool intel_pmc_is_enabled(struct kvm_pmc *pmc) { struct kvm_pmu *pmu = pmc_to_pmu(pmc); Loading
arch/x86/kvm/svm.c +1 −1 Original line number Diff line number Diff line Loading @@ -1572,7 +1572,7 @@ static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { /* * Any change of EFLAGS.VM is accompained by a reload of SS * Any change of EFLAGS.VM is accompanied by a reload of SS * (caused by either a task switch or an inter-privilege IRET), * so we do not need to update the CPL here. */ Loading
arch/x86/kvm/vmx.c +1 −1 Original line number Diff line number Diff line Loading @@ -3364,7 +3364,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) /* * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL * but due to arrata below it can't be used. Workaround is to use * but due to errata below it can't be used. Workaround is to use * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL. * * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32] Loading