Loading arch/x86/include/asm/kvm_host.h +0 −24 Original line number Original line Diff line number Diff line Loading @@ -652,20 +652,6 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) return (struct kvm_mmu_page *)page_private(page); return (struct kvm_mmu_page *)page_private(page); } } static inline u16 kvm_read_fs(void) { u16 seg; asm("mov %%fs, %0" : "=g"(seg)); return seg; } static inline u16 kvm_read_gs(void) { u16 seg; asm("mov %%gs, %0" : "=g"(seg)); return seg; } static inline u16 kvm_read_ldt(void) static inline u16 kvm_read_ldt(void) { { u16 ldt; u16 ldt; Loading @@ -673,16 +659,6 @@ static inline u16 kvm_read_ldt(void) return ldt; return ldt; } } static inline void kvm_load_fs(u16 sel) { asm("mov %0, %%fs" : : "rm"(sel)); } static inline void kvm_load_gs(u16 sel) { asm("mov %0, %%gs" : : "rm"(sel)); } static inline void kvm_load_ldt(u16 sel) static inline void kvm_load_ldt(u16 sel) { { asm("lldt %0" : : "rm"(sel)); asm("lldt %0" : : "rm"(sel)); Loading arch/x86/kvm/svm.c +10 −5 Original line number Original line Diff line number Diff line Loading @@ -3163,8 +3163,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) sync_lapic_to_cr8(vcpu); sync_lapic_to_cr8(vcpu); save_host_msrs(vcpu); save_host_msrs(vcpu); fs_selector = kvm_read_fs(); savesegment(fs, fs_selector); gs_selector = kvm_read_gs(); savesegment(gs, gs_selector); ldt_selector = kvm_read_ldt(); ldt_selector = kvm_read_ldt(); svm->vmcb->save.cr2 = vcpu->arch.cr2; svm->vmcb->save.cr2 = vcpu->arch.cr2; /* required for live migration with NPT */ /* required for live migration with NPT */ Loading Loading @@ -3251,10 +3251,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; kvm_load_fs(fs_selector); kvm_load_gs(gs_selector); kvm_load_ldt(ldt_selector); load_host_msrs(vcpu); load_host_msrs(vcpu); loadsegment(fs, fs_selector); #ifdef CONFIG_X86_64 load_gs_index(gs_selector); wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); #else loadsegment(gs, gs_selector); #endif kvm_load_ldt(ldt_selector); reload_tss(vcpu); reload_tss(vcpu); Loading arch/x86/kvm/vmx.c +9 −15 Original line number Original line Diff line number Diff line Loading @@ -803,7 +803,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) */ */ vmx->host_state.ldt_sel = kvm_read_ldt(); vmx->host_state.ldt_sel = kvm_read_ldt(); vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; vmx->host_state.fs_sel = kvm_read_fs(); savesegment(fs, vmx->host_state.fs_sel); if (!(vmx->host_state.fs_sel & 7)) { if (!(vmx->host_state.fs_sel & 7)) { vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); vmx->host_state.fs_reload_needed = 0; vmx->host_state.fs_reload_needed = 0; Loading @@ -811,7 +811,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) vmcs_write16(HOST_FS_SELECTOR, 0); vmcs_write16(HOST_FS_SELECTOR, 0); vmx->host_state.fs_reload_needed = 1; vmx->host_state.fs_reload_needed = 1; } } vmx->host_state.gs_sel = kvm_read_gs(); savesegment(gs, vmx->host_state.gs_sel); if (!(vmx->host_state.gs_sel & 7)) if (!(vmx->host_state.gs_sel & 7)) vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); else { else { Loading Loading @@ -841,27 +841,21 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) static void __vmx_load_host_state(struct vcpu_vmx *vmx) static void __vmx_load_host_state(struct vcpu_vmx *vmx) { { unsigned long flags; if (!vmx->host_state.loaded) if (!vmx->host_state.loaded) return; return; ++vmx->vcpu.stat.host_state_reload; ++vmx->vcpu.stat.host_state_reload; vmx->host_state.loaded = 0; vmx->host_state.loaded = 0; if (vmx->host_state.fs_reload_needed) if (vmx->host_state.fs_reload_needed) kvm_load_fs(vmx->host_state.fs_sel); loadsegment(fs, vmx->host_state.fs_sel); if (vmx->host_state.gs_ldt_reload_needed) { if (vmx->host_state.gs_ldt_reload_needed) { kvm_load_ldt(vmx->host_state.ldt_sel); kvm_load_ldt(vmx->host_state.ldt_sel); /* * If we have to reload gs, we must take care to * preserve our gs base. */ local_irq_save(flags); kvm_load_gs(vmx->host_state.gs_sel); #ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); load_gs_index(vmx->host_state.gs_sel); wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); #else loadsegment(gs, vmx->host_state.gs_sel); #endif #endif local_irq_restore(flags); } } reload_tss(); reload_tss(); #ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64 Loading Loading @@ -2589,8 +2583,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ #ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); rdmsrl(MSR_FS_BASE, a); Loading Loading
arch/x86/include/asm/kvm_host.h +0 −24 Original line number Original line Diff line number Diff line Loading @@ -652,20 +652,6 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) return (struct kvm_mmu_page *)page_private(page); return (struct kvm_mmu_page *)page_private(page); } } static inline u16 kvm_read_fs(void) { u16 seg; asm("mov %%fs, %0" : "=g"(seg)); return seg; } static inline u16 kvm_read_gs(void) { u16 seg; asm("mov %%gs, %0" : "=g"(seg)); return seg; } static inline u16 kvm_read_ldt(void) static inline u16 kvm_read_ldt(void) { { u16 ldt; u16 ldt; Loading @@ -673,16 +659,6 @@ static inline u16 kvm_read_ldt(void) return ldt; return ldt; } } static inline void kvm_load_fs(u16 sel) { asm("mov %0, %%fs" : : "rm"(sel)); } static inline void kvm_load_gs(u16 sel) { asm("mov %0, %%gs" : : "rm"(sel)); } static inline void kvm_load_ldt(u16 sel) static inline void kvm_load_ldt(u16 sel) { { asm("lldt %0" : : "rm"(sel)); asm("lldt %0" : : "rm"(sel)); Loading
arch/x86/kvm/svm.c +10 −5 Original line number Original line Diff line number Diff line Loading @@ -3163,8 +3163,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) sync_lapic_to_cr8(vcpu); sync_lapic_to_cr8(vcpu); save_host_msrs(vcpu); save_host_msrs(vcpu); fs_selector = kvm_read_fs(); savesegment(fs, fs_selector); gs_selector = kvm_read_gs(); savesegment(gs, gs_selector); ldt_selector = kvm_read_ldt(); ldt_selector = kvm_read_ldt(); svm->vmcb->save.cr2 = vcpu->arch.cr2; svm->vmcb->save.cr2 = vcpu->arch.cr2; /* required for live migration with NPT */ /* required for live migration with NPT */ Loading Loading @@ -3251,10 +3251,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; kvm_load_fs(fs_selector); kvm_load_gs(gs_selector); kvm_load_ldt(ldt_selector); load_host_msrs(vcpu); load_host_msrs(vcpu); loadsegment(fs, fs_selector); #ifdef CONFIG_X86_64 load_gs_index(gs_selector); wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); #else loadsegment(gs, gs_selector); #endif kvm_load_ldt(ldt_selector); reload_tss(vcpu); reload_tss(vcpu); Loading
arch/x86/kvm/vmx.c +9 −15 Original line number Original line Diff line number Diff line Loading @@ -803,7 +803,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) */ */ vmx->host_state.ldt_sel = kvm_read_ldt(); vmx->host_state.ldt_sel = kvm_read_ldt(); vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; vmx->host_state.fs_sel = kvm_read_fs(); savesegment(fs, vmx->host_state.fs_sel); if (!(vmx->host_state.fs_sel & 7)) { if (!(vmx->host_state.fs_sel & 7)) { vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); vmx->host_state.fs_reload_needed = 0; vmx->host_state.fs_reload_needed = 0; Loading @@ -811,7 +811,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) vmcs_write16(HOST_FS_SELECTOR, 0); vmcs_write16(HOST_FS_SELECTOR, 0); vmx->host_state.fs_reload_needed = 1; vmx->host_state.fs_reload_needed = 1; } } vmx->host_state.gs_sel = kvm_read_gs(); savesegment(gs, vmx->host_state.gs_sel); if (!(vmx->host_state.gs_sel & 7)) if (!(vmx->host_state.gs_sel & 7)) vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); else { else { Loading Loading @@ -841,27 +841,21 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) static void __vmx_load_host_state(struct vcpu_vmx *vmx) static void __vmx_load_host_state(struct vcpu_vmx *vmx) { { unsigned long flags; if (!vmx->host_state.loaded) if (!vmx->host_state.loaded) return; return; ++vmx->vcpu.stat.host_state_reload; ++vmx->vcpu.stat.host_state_reload; vmx->host_state.loaded = 0; vmx->host_state.loaded = 0; if (vmx->host_state.fs_reload_needed) if (vmx->host_state.fs_reload_needed) kvm_load_fs(vmx->host_state.fs_sel); loadsegment(fs, vmx->host_state.fs_sel); if (vmx->host_state.gs_ldt_reload_needed) { if (vmx->host_state.gs_ldt_reload_needed) { kvm_load_ldt(vmx->host_state.ldt_sel); kvm_load_ldt(vmx->host_state.ldt_sel); /* * If we have to reload gs, we must take care to * preserve our gs base. */ local_irq_save(flags); kvm_load_gs(vmx->host_state.gs_sel); #ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); load_gs_index(vmx->host_state.gs_sel); wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); #else loadsegment(gs, vmx->host_state.gs_sel); #endif #endif local_irq_restore(flags); } } reload_tss(); reload_tss(); #ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64 Loading Loading @@ -2589,8 +2583,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ #ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); rdmsrl(MSR_FS_BASE, a); Loading