Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 432953b4 authored by Paul Mackerras's avatar Paul Mackerras
Browse files

KVM: PPC: Book3S HV: Cosmetic post-merge cleanups



This rearranges the code in kvmppc_run_vcpu() and kvmppc_run_vcpu_hv()
to be neater and clearer.  Deeply indented code in kvmppc_run_vcpu()
is moved out to a helper function, kvmhv_setup_mmu().  In
kvmppc_vcpu_run_hv(), make use of the existing variable 'kvm' in
place of 'vcpu->kvm'.

No functional change.

Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 072df813
Loading
Loading
Loading
Loading
+25 −16
Original line number Original line Diff line number Diff line
@@ -3120,6 +3120,25 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
	trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
	trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
}
}


static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
{
	int r = 0;
	struct kvm *kvm = vcpu->kvm;

	mutex_lock(&kvm->lock);
	if (!kvm->arch.mmu_ready) {
		if (!kvm_is_radix(kvm))
			r = kvmppc_hv_setup_htab_rma(vcpu);
		if (!r) {
			if (cpu_has_feature(CPU_FTR_ARCH_300))
				kvmppc_setup_partition_table(kvm);
			kvm->arch.mmu_ready = 1;
		}
	}
	mutex_unlock(&kvm->lock);
	return r;
}

static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
{
	int n_ceded, i, r;
	int n_ceded, i, r;
@@ -3179,22 +3198,12 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
		/* See if the MMU is ready to go */
		/* See if the MMU is ready to go */
		if (!vcpu->kvm->arch.mmu_ready) {
		if (!vcpu->kvm->arch.mmu_ready) {
			spin_unlock(&vc->lock);
			spin_unlock(&vc->lock);
			mutex_lock(&vcpu->kvm->lock);
			r = kvmhv_setup_mmu(vcpu);
			r = 0;
			if (!vcpu->kvm->arch.mmu_ready) {
				if (!kvm_is_radix(vcpu->kvm))
					r = kvmppc_hv_setup_htab_rma(vcpu);
				if (!r) {
					if (cpu_has_feature(CPU_FTR_ARCH_300))
						kvmppc_setup_partition_table(vcpu->kvm);
					vcpu->kvm->arch.mmu_ready = 1;
				}
			}
			mutex_unlock(&vcpu->kvm->lock);
			spin_lock(&vc->lock);
			spin_lock(&vc->lock);
			if (r) {
			if (r) {
				kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
				kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
				kvm_run->fail_entry.hardware_entry_failure_reason = 0;
				kvm_run->fail_entry.
					hardware_entry_failure_reason = 0;
				vcpu->arch.ret = r;
				vcpu->arch.ret = r;
				break;
				break;
			}
			}
@@ -3344,10 +3353,10 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
			trace_kvm_hcall_exit(vcpu, r);
			trace_kvm_hcall_exit(vcpu, r);
			kvmppc_core_prepare_to_enter(vcpu);
			kvmppc_core_prepare_to_enter(vcpu);
		} else if (r == RESUME_PAGE_FAULT) {
		} else if (r == RESUME_PAGE_FAULT) {
			srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
			srcu_idx = srcu_read_lock(&kvm->srcu);
			r = kvmppc_book3s_hv_page_fault(run, vcpu,
			r = kvmppc_book3s_hv_page_fault(run, vcpu,
				vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
				vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
			srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
			srcu_read_unlock(&kvm->srcu, srcu_idx);
		} else if (r == RESUME_PASSTHROUGH) {
		} else if (r == RESUME_PASSTHROUGH) {
			if (WARN_ON(xive_enabled()))
			if (WARN_ON(xive_enabled()))
				r = H_SUCCESS;
				r = H_SUCCESS;
@@ -3367,7 +3376,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
	mtspr(SPRN_VRSAVE, user_vrsave);
	mtspr(SPRN_VRSAVE, user_vrsave);


	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
	atomic_dec(&vcpu->kvm->arch.vcpus_running);
	atomic_dec(&kvm->arch.vcpus_running);
	return r;
	return r;
}
}