Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e0231715 authored by Joerg Roedel's avatar Joerg Roedel Committed by Avi Kivity
Browse files

KVM: SVM: Coding style cleanup



This patch removes whitespace errors, fixes comment formats
and most of checkpatch warnings. Now vim does not show
c-space-errors anymore.

Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 83bf0002
Loading
Loading
Loading
Loading
+81 −67
Original line number Original line Diff line number Diff line
@@ -120,7 +120,7 @@ struct vcpu_svm {
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
static bool npt_enabled = true;
static bool npt_enabled = true;
#else
#else
static bool npt_enabled = false;
static bool npt_enabled;
#endif
#endif
static int npt = 1;
static int npt = 1;


@@ -290,8 +290,10 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
{
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vcpu_svm *svm = to_svm(vcpu);


	/* If we are within a nested VM we'd better #VMEXIT and let the
	/*
	   guest handle the exception */
	 * If we are within a nested VM we'd better #VMEXIT and let the guest
	 * handle the exception
	 */
	if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
	if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
		return;
		return;


@@ -657,7 +659,8 @@ static void init_vmcb(struct vcpu_svm *svm)
	save->rip = 0x0000fff0;
	save->rip = 0x0000fff0;
	svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
	svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;


	/* This is the guest-visible cr0 value.
	/*
	 * This is the guest-visible cr0 value.
	 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
	 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
	 */
	 */
	svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
	svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
@@ -903,7 +906,8 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
	var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
	var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
	var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
	var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;


	/* AMD's VMCB does not have an explicit unusable field, so emulate it
	/*
	 * AMD's VMCB does not have an explicit unusable field, so emulate it
	 * for cross vendor migration purposes by "not present"
	 * for cross vendor migration purposes by "not present"
	 */
	 */
	var->unusable = !var->present || (var->type == 0);
	var->unusable = !var->present || (var->type == 0);
@@ -939,7 +943,8 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
			var->type |= 0x1;
			var->type |= 0x1;
		break;
		break;
	case VCPU_SREG_SS:
	case VCPU_SREG_SS:
		/* On AMD CPUs sometimes the DB bit in the segment
		/*
		 * On AMD CPUs sometimes the DB bit in the segment
		 * descriptor is left as 1, although the whole segment has
		 * descriptor is left as 1, although the whole segment has
		 * been made unusable. Clear it here to pass an Intel VMX
		 * been made unusable. Clear it here to pass an Intel VMX
		 * entry check when cross vendor migrating.
		 * entry check when cross vendor migrating.
@@ -1554,13 +1559,13 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
	case SVM_EXIT_INTR:
	case SVM_EXIT_INTR:
	case SVM_EXIT_NMI:
	case SVM_EXIT_NMI:
		return NESTED_EXIT_HOST;
		return NESTED_EXIT_HOST;
		/* For now we are always handling NPFs when using them */
	case SVM_EXIT_NPF:
	case SVM_EXIT_NPF:
		/* For now we are always handling NPFs when using them */
		if (npt_enabled)
		if (npt_enabled)
			return NESTED_EXIT_HOST;
			return NESTED_EXIT_HOST;
		break;
		break;
	/* When we're shadowing, trap PFs */
	case SVM_EXIT_EXCP_BASE + PF_VECTOR:
	case SVM_EXIT_EXCP_BASE + PF_VECTOR:
		/* When we're shadowing, trap PFs */
		if (!npt_enabled)
		if (!npt_enabled)
			return NESTED_EXIT_HOST;
			return NESTED_EXIT_HOST;
		break;
		break;
@@ -1829,8 +1834,10 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
	kvm_clear_exception_queue(&svm->vcpu);
	kvm_clear_exception_queue(&svm->vcpu);
	kvm_clear_interrupt_queue(&svm->vcpu);
	kvm_clear_interrupt_queue(&svm->vcpu);


	/* Save the old vmcb, so we don't need to pick what we save, but
	/*
	   can restore everything when a VMEXIT occurs */
	 * Save the old vmcb, so we don't need to pick what we save, but can
	 * restore everything when a VMEXIT occurs
	 */
	hsave->save.es     = vmcb->save.es;
	hsave->save.es     = vmcb->save.es;
	hsave->save.cs     = vmcb->save.cs;
	hsave->save.cs     = vmcb->save.cs;
	hsave->save.ss     = vmcb->save.ss;
	hsave->save.ss     = vmcb->save.ss;
@@ -1878,6 +1885,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
	kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
	kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
	kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
	kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
	kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
	kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);

	/* In case we don't even reach vcpu_run, the fields are not updated */
	/* In case we don't even reach vcpu_run, the fields are not updated */
	svm->vmcb->save.rax = nested_vmcb->save.rax;
	svm->vmcb->save.rax = nested_vmcb->save.rax;
	svm->vmcb->save.rsp = nested_vmcb->save.rsp;
	svm->vmcb->save.rsp = nested_vmcb->save.rsp;
@@ -1909,8 +1917,10 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
		svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
		svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
	}
	}


	/* We don't want a nested guest to be more powerful than the guest,
	/*
	   so all intercepts are ORed */
	 * We don't want a nested guest to be more powerful than the guest, so
	 * all intercepts are ORed
	 */
	svm->vmcb->control.intercept_cr_read |=
	svm->vmcb->control.intercept_cr_read |=
		nested_vmcb->control.intercept_cr_read;
		nested_vmcb->control.intercept_cr_read;
	svm->vmcb->control.intercept_cr_write |=
	svm->vmcb->control.intercept_cr_write |=
@@ -2224,9 +2234,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
	case MSR_IA32_SYSENTER_ESP:
	case MSR_IA32_SYSENTER_ESP:
		*data = svm->sysenter_esp;
		*data = svm->sysenter_esp;
		break;
		break;
	/* Nobody will change the following 5 values in the VMCB so
	/*
	   we can safely return them on rdmsr. They will always be 0
	 * Nobody will change the following 5 values in the VMCB so we can
	   until LBRV is implemented. */
	 * safely return them on rdmsr. They will always be 0 until LBRV is
	 * implemented.
	 */
	case MSR_IA32_DEBUGCTLMSR:
	case MSR_IA32_DEBUGCTLMSR:
		*data = svm->vmcb->save.dbgctl;
		*data = svm->vmcb->save.dbgctl;
		break;
		break;
@@ -2441,7 +2453,6 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
	[SVM_EXIT_SMI]				= nop_on_interception,
	[SVM_EXIT_SMI]				= nop_on_interception,
	[SVM_EXIT_INIT]				= nop_on_interception,
	[SVM_EXIT_INIT]				= nop_on_interception,
	[SVM_EXIT_VINTR]			= interrupt_window_interception,
	[SVM_EXIT_VINTR]			= interrupt_window_interception,
	/* [SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception, */
	[SVM_EXIT_CPUID]			= cpuid_interception,
	[SVM_EXIT_CPUID]			= cpuid_interception,
	[SVM_EXIT_IRET]                         = iret_interception,
	[SVM_EXIT_IRET]                         = iret_interception,
	[SVM_EXIT_INVD]                         = emulate_on_interception,
	[SVM_EXIT_INVD]                         = emulate_on_interception,
@@ -2650,10 +2661,12 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
{
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vcpu_svm *svm = to_svm(vcpu);


	/* In case GIF=0 we can't rely on the CPU to tell us when
	/*
	 * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
	 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
	 * The next time we get that intercept, this function will be
	 * 1, because that's a separate STGI/VMRUN intercept.  The next time we
	 * called again though and we'll get the vintr intercept. */
	 * get that intercept, this function will be called again though and
	 * we'll get the vintr intercept.
	 */
	if (gif_set(svm) && nested_svm_intr(svm)) {
	if (gif_set(svm) && nested_svm_intr(svm)) {
		svm_set_vintr(svm);
		svm_set_vintr(svm);
		svm_inject_irq(svm, 0x0);
		svm_inject_irq(svm, 0x0);
@@ -2668,9 +2681,10 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
	    == HF_NMI_MASK)
	    == HF_NMI_MASK)
		return; /* IRET will cause a vm exit */
		return; /* IRET will cause a vm exit */


	/* Something prevents NMI from been injected. Single step over
	/*
	   possible problem (IRET or exception injection or interrupt
	 * Something prevents NMI from been injected. Single step over possible
	   shadow) */
	 * problem (IRET or exception injection or interrupt shadow)
	 */
	svm->nmi_singlestep = true;
	svm->nmi_singlestep = true;
	svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
	svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
	update_db_intercept(vcpu);
	update_db_intercept(vcpu);