Loading arch/powerpc/include/asm/kvm_book3s_asm.h +0 −4 Original line number Original line Diff line number Diff line Loading @@ -104,10 +104,6 @@ struct kvmppc_host_state { u8 napping; u8 napping; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* * hwthread_req/hwthread_state pair is used to pull sibling threads * out of guest on pre-ISAv3.0B CPUs where threads share MMU. */ u8 hwthread_req; u8 hwthread_req; u8 hwthread_state; u8 hwthread_state; u8 host_ipi; u8 host_ipi; Loading arch/powerpc/kernel/idle_book3s.S +11 −24 Original line number Original line Diff line number Diff line Loading @@ -319,20 +319,13 @@ enter_winkle: /* /* * r3 - PSSCR value corresponding to the requested stop state. * r3 - PSSCR value corresponding to the requested stop state. */ */ power_enter_stop: #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE power_enter_stop_kvm_rm: /* Tell KVM we're entering idle */ /* * This is currently unused because POWER9 KVM does not have to * gather secondary threads into sibling mode, but the code is * here in case that function is required. * * Tell KVM we're entering idle. */ li r4,KVM_HWTHREAD_IN_IDLE li r4,KVM_HWTHREAD_IN_IDLE /* DO THIS IN REAL MODE! See comment above. */ /* DO THIS IN REAL MODE! See comment above. */ stb r4,HSTATE_HWTHREAD_STATE(r13) stb r4,HSTATE_HWTHREAD_STATE(r13) #endif #endif power_enter_stop: /* /* * Check if we are executing the lite variant with ESL=EC=0 * Check if we are executing the lite variant with ESL=EC=0 */ */ Loading Loading @@ -496,18 +489,6 @@ pnv_powersave_wakeup_mce: b pnv_powersave_wakeup b pnv_powersave_wakeup #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE kvm_start_guest_check: li r0,KVM_HWTHREAD_IN_KERNEL stb r0,HSTATE_HWTHREAD_STATE(r13) /* Order setting hwthread_state vs. testing hwthread_req */ sync lbz r0,HSTATE_HWTHREAD_REQ(r13) cmpwi r0,0 beqlr b kvm_start_guest #endif /* /* * Called from reset vector for powersave wakeups. * Called from reset vector for powersave wakeups. * cr3 - set to gt if waking up with partial/complete hypervisor state loss * cr3 - set to gt if waking up with partial/complete hypervisor state loss Loading @@ -532,9 +513,15 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) mr r3,r12 mr r3,r12 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE BEGIN_FTR_SECTION li r0,KVM_HWTHREAD_IN_KERNEL bl kvm_start_guest_check stb r0,HSTATE_HWTHREAD_STATE(r13) END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) /* Order setting hwthread_state vs. testing hwthread_req */ sync lbz r0,HSTATE_HWTHREAD_REQ(r13) cmpwi r0,0 beq 1f b kvm_start_guest 1: #endif #endif /* Return SRR1 from power7_nap() */ /* Return SRR1 from power7_nap() */ Loading arch/powerpc/kvm/book3s_hv.c +1 −12 Original line number Original line Diff line number Diff line Loading @@ -2117,15 +2117,6 @@ static int kvmppc_grab_hwthread(int cpu) struct paca_struct *tpaca; struct paca_struct *tpaca; long timeout = 10000; long timeout = 10000; /* * ISA v3.0 idle routines do not set hwthread_state or test * hwthread_req, so they can not grab idle threads. */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { WARN(1, "KVM: can not control sibling threads\n"); return -EBUSY; } tpaca = &paca[cpu]; tpaca = &paca[cpu]; /* Ensure the thread won't go into the kernel if it wakes */ /* Ensure the thread won't go into the kernel if it wakes */ Loading Loading @@ -2160,12 +2151,10 @@ static void kvmppc_release_hwthread(int cpu) struct paca_struct *tpaca; struct paca_struct *tpaca; tpaca = &paca[cpu]; tpaca = &paca[cpu]; tpaca->kvm_hstate.hwthread_req = 0; tpaca->kvm_hstate.kvm_vcpu = NULL; tpaca->kvm_hstate.kvm_vcpu = NULL; tpaca->kvm_hstate.kvm_vcore = NULL; tpaca->kvm_hstate.kvm_vcore = NULL; tpaca->kvm_hstate.kvm_split_mode = NULL; tpaca->kvm_hstate.kvm_split_mode = NULL; if (!cpu_has_feature(CPU_FTR_ARCH_300)) tpaca->kvm_hstate.hwthread_req = 0; } } static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) Loading arch/powerpc/kvm/book3s_hv_rmhandlers.S +0 −8 Original line number Original line Diff line number Diff line Loading @@ -149,11 +149,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) subf r4, r4, r3 subf r4, r4, r3 mtspr SPRN_DEC, r4 mtspr SPRN_DEC, r4 BEGIN_FTR_SECTION /* hwthread_req may have got set by cede or no vcpu, so clear it */ /* hwthread_req may have got set by cede or no vcpu, so clear it */ li r0, 0 li r0, 0 stb r0, HSTATE_HWTHREAD_REQ(r13) stb r0, HSTATE_HWTHREAD_REQ(r13) END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) /* /* * For external interrupts we need to call the Linux * For external interrupts we need to call the Linux Loading Loading @@ -316,7 +314,6 @@ kvm_novcpu_exit: * Relocation is off and most register values are lost. * Relocation is off and most register values are lost. * r13 points to the PACA. * r13 points to the PACA. * r3 contains the SRR1 wakeup value, SRR1 is trashed. * r3 contains the SRR1 wakeup value, SRR1 is trashed. * This is not used by ISAv3.0B processors. */ */ .globl kvm_start_guest .globl kvm_start_guest kvm_start_guest: kvm_start_guest: Loading Loading @@ -435,9 +432,6 @@ kvm_secondary_got_guest: * While waiting we also need to check if we get given a vcpu to run. * While waiting we also need to check if we get given a vcpu to run. */ */ kvm_no_guest: kvm_no_guest: BEGIN_FTR_SECTION twi 31,0,0 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) lbz r3, HSTATE_HWTHREAD_REQ(r13) lbz r3, HSTATE_HWTHREAD_REQ(r13) cmpwi r3, 0 cmpwi r3, 0 bne 53f bne 53f Loading Loading @@ -2531,10 +2525,8 @@ kvm_do_nap: clrrdi r0, r0, 1 clrrdi r0, r0, 1 mtspr SPRN_CTRLT, r0 mtspr SPRN_CTRLT, r0 BEGIN_FTR_SECTION li r0,1 li r0,1 stb r0,HSTATE_HWTHREAD_REQ(r13) stb r0,HSTATE_HWTHREAD_REQ(r13) END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) mfspr r5,SPRN_LPCR mfspr r5,SPRN_LPCR ori r5,r5,LPCR_PECE0 | LPCR_PECE1 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 BEGIN_FTR_SECTION BEGIN_FTR_SECTION Loading arch/powerpc/kvm/powerpc.c +2 −2 Original line number Original line Diff line number Diff line Loading @@ -644,8 +644,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; break; #endif #endif case KVM_CAP_PPC_HTM: case KVM_CAP_PPC_HTM: r = cpu_has_feature(CPU_FTR_TM_COMP) && r = is_kvmppc_hv_enabled(kvm) && is_kvmppc_hv_enabled(kvm); (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP); break; break; default: default: r = 0; r = 0; Loading Loading
arch/powerpc/include/asm/kvm_book3s_asm.h +0 −4 Original line number Original line Diff line number Diff line Loading @@ -104,10 +104,6 @@ struct kvmppc_host_state { u8 napping; u8 napping; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* * hwthread_req/hwthread_state pair is used to pull sibling threads * out of guest on pre-ISAv3.0B CPUs where threads share MMU. */ u8 hwthread_req; u8 hwthread_req; u8 hwthread_state; u8 hwthread_state; u8 host_ipi; u8 host_ipi; Loading
arch/powerpc/kernel/idle_book3s.S +11 −24 Original line number Original line Diff line number Diff line Loading @@ -319,20 +319,13 @@ enter_winkle: /* /* * r3 - PSSCR value corresponding to the requested stop state. * r3 - PSSCR value corresponding to the requested stop state. */ */ power_enter_stop: #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE power_enter_stop_kvm_rm: /* Tell KVM we're entering idle */ /* * This is currently unused because POWER9 KVM does not have to * gather secondary threads into sibling mode, but the code is * here in case that function is required. * * Tell KVM we're entering idle. */ li r4,KVM_HWTHREAD_IN_IDLE li r4,KVM_HWTHREAD_IN_IDLE /* DO THIS IN REAL MODE! See comment above. */ /* DO THIS IN REAL MODE! See comment above. */ stb r4,HSTATE_HWTHREAD_STATE(r13) stb r4,HSTATE_HWTHREAD_STATE(r13) #endif #endif power_enter_stop: /* /* * Check if we are executing the lite variant with ESL=EC=0 * Check if we are executing the lite variant with ESL=EC=0 */ */ Loading Loading @@ -496,18 +489,6 @@ pnv_powersave_wakeup_mce: b pnv_powersave_wakeup b pnv_powersave_wakeup #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE kvm_start_guest_check: li r0,KVM_HWTHREAD_IN_KERNEL stb r0,HSTATE_HWTHREAD_STATE(r13) /* Order setting hwthread_state vs. testing hwthread_req */ sync lbz r0,HSTATE_HWTHREAD_REQ(r13) cmpwi r0,0 beqlr b kvm_start_guest #endif /* /* * Called from reset vector for powersave wakeups. * Called from reset vector for powersave wakeups. * cr3 - set to gt if waking up with partial/complete hypervisor state loss * cr3 - set to gt if waking up with partial/complete hypervisor state loss Loading @@ -532,9 +513,15 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) mr r3,r12 mr r3,r12 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE BEGIN_FTR_SECTION li r0,KVM_HWTHREAD_IN_KERNEL bl kvm_start_guest_check stb r0,HSTATE_HWTHREAD_STATE(r13) END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) /* Order setting hwthread_state vs. testing hwthread_req */ sync lbz r0,HSTATE_HWTHREAD_REQ(r13) cmpwi r0,0 beq 1f b kvm_start_guest 1: #endif #endif /* Return SRR1 from power7_nap() */ /* Return SRR1 from power7_nap() */ Loading
arch/powerpc/kvm/book3s_hv.c +1 −12 Original line number Original line Diff line number Diff line Loading @@ -2117,15 +2117,6 @@ static int kvmppc_grab_hwthread(int cpu) struct paca_struct *tpaca; struct paca_struct *tpaca; long timeout = 10000; long timeout = 10000; /* * ISA v3.0 idle routines do not set hwthread_state or test * hwthread_req, so they can not grab idle threads. */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { WARN(1, "KVM: can not control sibling threads\n"); return -EBUSY; } tpaca = &paca[cpu]; tpaca = &paca[cpu]; /* Ensure the thread won't go into the kernel if it wakes */ /* Ensure the thread won't go into the kernel if it wakes */ Loading Loading @@ -2160,12 +2151,10 @@ static void kvmppc_release_hwthread(int cpu) struct paca_struct *tpaca; struct paca_struct *tpaca; tpaca = &paca[cpu]; tpaca = &paca[cpu]; tpaca->kvm_hstate.hwthread_req = 0; tpaca->kvm_hstate.kvm_vcpu = NULL; tpaca->kvm_hstate.kvm_vcpu = NULL; tpaca->kvm_hstate.kvm_vcore = NULL; tpaca->kvm_hstate.kvm_vcore = NULL; tpaca->kvm_hstate.kvm_split_mode = NULL; tpaca->kvm_hstate.kvm_split_mode = NULL; if (!cpu_has_feature(CPU_FTR_ARCH_300)) tpaca->kvm_hstate.hwthread_req = 0; } } static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) Loading
arch/powerpc/kvm/book3s_hv_rmhandlers.S +0 −8 Original line number Original line Diff line number Diff line Loading @@ -149,11 +149,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) subf r4, r4, r3 subf r4, r4, r3 mtspr SPRN_DEC, r4 mtspr SPRN_DEC, r4 BEGIN_FTR_SECTION /* hwthread_req may have got set by cede or no vcpu, so clear it */ /* hwthread_req may have got set by cede or no vcpu, so clear it */ li r0, 0 li r0, 0 stb r0, HSTATE_HWTHREAD_REQ(r13) stb r0, HSTATE_HWTHREAD_REQ(r13) END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) /* /* * For external interrupts we need to call the Linux * For external interrupts we need to call the Linux Loading Loading @@ -316,7 +314,6 @@ kvm_novcpu_exit: * Relocation is off and most register values are lost. * Relocation is off and most register values are lost. * r13 points to the PACA. * r13 points to the PACA. * r3 contains the SRR1 wakeup value, SRR1 is trashed. * r3 contains the SRR1 wakeup value, SRR1 is trashed. * This is not used by ISAv3.0B processors. */ */ .globl kvm_start_guest .globl kvm_start_guest kvm_start_guest: kvm_start_guest: Loading Loading @@ -435,9 +432,6 @@ kvm_secondary_got_guest: * While waiting we also need to check if we get given a vcpu to run. * While waiting we also need to check if we get given a vcpu to run. */ */ kvm_no_guest: kvm_no_guest: BEGIN_FTR_SECTION twi 31,0,0 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) lbz r3, HSTATE_HWTHREAD_REQ(r13) lbz r3, HSTATE_HWTHREAD_REQ(r13) cmpwi r3, 0 cmpwi r3, 0 bne 53f bne 53f Loading Loading @@ -2531,10 +2525,8 @@ kvm_do_nap: clrrdi r0, r0, 1 clrrdi r0, r0, 1 mtspr SPRN_CTRLT, r0 mtspr SPRN_CTRLT, r0 BEGIN_FTR_SECTION li r0,1 li r0,1 stb r0,HSTATE_HWTHREAD_REQ(r13) stb r0,HSTATE_HWTHREAD_REQ(r13) END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) mfspr r5,SPRN_LPCR mfspr r5,SPRN_LPCR ori r5,r5,LPCR_PECE0 | LPCR_PECE1 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 BEGIN_FTR_SECTION BEGIN_FTR_SECTION Loading
arch/powerpc/kvm/powerpc.c +2 −2 Original line number Original line Diff line number Diff line Loading @@ -644,8 +644,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; break; #endif #endif case KVM_CAP_PPC_HTM: case KVM_CAP_PPC_HTM: r = cpu_has_feature(CPU_FTR_TM_COMP) && r = is_kvmppc_hv_enabled(kvm) && is_kvmppc_hv_enabled(kvm); (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP); break; break; default: default: r = 0; r = 0; Loading