Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b3e97833 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull kvm fixes from Paolo Bonzini:
 "Fixes for ARM and x86, plus selftest patches and nicer structs for
  nested state save/restore"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: nVMX: reorganize initial steps of vmx_set_nested_state
  KVM: arm/arm64: Fix emulated ptimer irq injection
  tests: kvm: Check for a kernel warning
  kvm: tests: Sort tests in the Makefile alphabetically
  KVM: x86/mmu: Allocate PAE root array when using SVM's 32-bit NPT
  KVM: x86: Modify struct kvm_nested_state to have explicit fields for data
  KVM: fix typo in documentation
  KVM: nVMX: use correct clean fields when copying from eVMCS
  KVM: arm/arm64: vgic: Fix kvm_device leak in vgic_its_destroy
  KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST
  KVM: arm64: Implement vq_present() as a macro
parents e9293874 b21e31b2
Loading
Loading
Loading
Loading
+32 −16
Original line number Diff line number Diff line
@@ -1079,7 +1079,7 @@ yet and must be cleared on entry.

4.35 KVM_SET_USER_MEMORY_REGION

Capability: KVM_CAP_USER_MEM
Capability: KVM_CAP_USER_MEMORY
Architectures: all
Type: vm ioctl
Parameters: struct kvm_userspace_memory_region (in)
@@ -3857,43 +3857,59 @@ Type: vcpu ioctl
Parameters: struct kvm_nested_state (in/out)
Returns: 0 on success, -1 on error
Errors:
  E2BIG:     the total state size (including the fixed-size part of struct
             kvm_nested_state) exceeds the value of 'size' specified by
  E2BIG:     the total state size exceeds the value of 'size' specified by
             the user; the size required will be written into size.

struct kvm_nested_state {
	__u16 flags;
	__u16 format;
	__u32 size;

	union {
		struct kvm_vmx_nested_state vmx;
		struct kvm_svm_nested_state svm;
		struct kvm_vmx_nested_state_hdr vmx;
		struct kvm_svm_nested_state_hdr svm;

		/* Pad the header to 128 bytes.  */
		__u8 pad[120];
	};
	__u8 data[0];
	} hdr;

	union {
		struct kvm_vmx_nested_state_data vmx[0];
		struct kvm_svm_nested_state_data svm[0];
	} data;
};

#define KVM_STATE_NESTED_GUEST_MODE	0x00000001
#define KVM_STATE_NESTED_RUN_PENDING	0x00000002
#define KVM_STATE_NESTED_EVMCS		0x00000004

#define KVM_STATE_NESTED_SMM_GUEST_MODE	0x00000001
#define KVM_STATE_NESTED_SMM_VMXON	0x00000002
#define KVM_STATE_NESTED_FORMAT_VMX		0
#define KVM_STATE_NESTED_FORMAT_SVM		1

struct kvm_vmx_nested_state {
#define KVM_STATE_NESTED_VMX_VMCS_SIZE		0x1000

#define KVM_STATE_NESTED_VMX_SMM_GUEST_MODE	0x00000001
#define KVM_STATE_NESTED_VMX_SMM_VMXON		0x00000002

struct kvm_vmx_nested_state_hdr {
	__u64 vmxon_pa;
	__u64 vmcs_pa;
	__u64 vmcs12_pa;

	struct {
		__u16 flags;
	} smm;
};

struct kvm_vmx_nested_state_data {
	__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
	__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
};

This ioctl copies the vcpu's nested virtualization state from the kernel to
userspace.

The maximum size of the state, including the fixed-size part of struct
kvm_nested_state, can be retrieved by passing KVM_CAP_NESTED_STATE to
the KVM_CHECK_EXTENSION ioctl().
The maximum size of the state can be retrieved by passing KVM_CAP_NESTED_STATE
to the KVM_CHECK_EXTENSION ioctl().

4.115 KVM_SET_NESTED_STATE

@@ -3903,8 +3919,8 @@ Type: vcpu ioctl
Parameters: struct kvm_nested_state (in)
Returns: 0 on success, -1 on error

This copies the vcpu's kvm_nested_state struct from userspace to the kernel.  For
the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE.
This copies the vcpu's kvm_nested_state struct from userspace to the kernel.
For the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE.

4.116 KVM_(UN)REGISTER_COALESCED_MMIO

+43 −22
Original line number Diff line number Diff line
@@ -70,10 +70,8 @@ static u64 core_reg_offset_from_id(u64 id)
	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}

static int validate_core_offset(const struct kvm_vcpu *vcpu,
				const struct kvm_one_reg *reg)
static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
{
	u64 off = core_reg_offset_from_id(reg->id);
	int size;

	switch (off) {
@@ -103,8 +101,7 @@ static int validate_core_offset(const struct kvm_vcpu *vcpu,
		return -EINVAL;
	}

	if (KVM_REG_SIZE(reg->id) != size ||
	    !IS_ALIGNED(off, size / sizeof(__u32)))
	if (!IS_ALIGNED(off, size / sizeof(__u32)))
		return -EINVAL;

	/*
@@ -115,6 +112,21 @@ static int validate_core_offset(const struct kvm_vcpu *vcpu,
	if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
		return -EINVAL;

	return size;
}

static int validate_core_offset(const struct kvm_vcpu *vcpu,
				const struct kvm_one_reg *reg)
{
	u64 off = core_reg_offset_from_id(reg->id);
	int size = core_reg_size_from_offset(vcpu, off);

	if (size < 0)
		return -EINVAL;

	if (KVM_REG_SIZE(reg->id) != size)
		return -EINVAL;

	return 0;
}

@@ -207,13 +219,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)

#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)

static bool vq_present(
	const u64 (*const vqs)[KVM_ARM64_SVE_VLS_WORDS],
	unsigned int vq)
{
	return (*vqs)[vq_word(vq)] & vq_mask(vq);
}
#define vq_present(vqs, vq) ((vqs)[vq_word(vq)] & vq_mask(vq))

static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
@@ -258,7 +264,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)

	max_vq = 0;
	for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
		if (vq_present(&vqs, vq))
		if (vq_present(vqs, vq))
			max_vq = vq;

	if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
@@ -272,7 +278,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
	 * maximum:
	 */
	for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
		if (vq_present(&vqs, vq) != sve_vq_available(vq))
		if (vq_present(vqs, vq) != sve_vq_available(vq))
			return -EINVAL;

	/* Can't run with no vector lengths at all: */
@@ -453,19 +459,34 @@ static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
{
	unsigned int i;
	int n = 0;
	const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;

	for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
		/*
		 * The KVM_REG_ARM64_SVE regs must be used instead of
		 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
		 * SVE-enabled vcpus:
		 */
		if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i))
		u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
		int size = core_reg_size_from_offset(vcpu, i);

		if (size < 0)
			continue;

		switch (size) {
		case sizeof(__u32):
			reg |= KVM_REG_SIZE_U32;
			break;

		case sizeof(__u64):
			reg |= KVM_REG_SIZE_U64;
			break;

		case sizeof(__uint128_t):
			reg |= KVM_REG_SIZE_U128;
			break;

		default:
			WARN_ON(1);
			continue;
		}

		if (uindices) {
			if (put_user(core_reg | i, uindices))
			if (put_user(reg, uindices))
				return -EFAULT;
			uindices++;
		}
+22 −11
Original line number Diff line number Diff line
@@ -383,6 +383,9 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE	(1 << 2)
#define KVM_X86_QUIRK_OUT_7E_INC_RIP	(1 << 3)

#define KVM_STATE_NESTED_FORMAT_VMX	0
#define KVM_STATE_NESTED_FORMAT_SVM	1	/* unused */

#define KVM_STATE_NESTED_GUEST_MODE	0x00000001
#define KVM_STATE_NESTED_RUN_PENDING	0x00000002
#define KVM_STATE_NESTED_EVMCS		0x00000004
@@ -390,9 +393,16 @@ struct kvm_sync_regs {
#define KVM_STATE_NESTED_SMM_GUEST_MODE	0x00000001
#define KVM_STATE_NESTED_SMM_VMXON	0x00000002

struct kvm_vmx_nested_state {
#define KVM_STATE_NESTED_VMX_VMCS_SIZE	0x1000

struct kvm_vmx_nested_state_data {
	__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
	__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
};

struct kvm_vmx_nested_state_hdr {
	__u64 vmxon_pa;
	__u64 vmcs_pa;
	__u64 vmcs12_pa;

	struct {
		__u16 flags;
@@ -401,24 +411,25 @@ struct kvm_vmx_nested_state {

/* for KVM_CAP_NESTED_STATE */
struct kvm_nested_state {
	/* KVM_STATE_* flags */
	__u16 flags;

	/* 0 for VMX, 1 for SVM.  */
	__u16 format;

	/* 128 for SVM, 128 + VMCS size for VMX.  */
	__u32 size;

	union {
		/* VMXON, VMCS */
		struct kvm_vmx_nested_state vmx;
		struct kvm_vmx_nested_state_hdr vmx;

		/* Pad the header to 128 bytes.  */
		__u8 pad[120];
	};
	} hdr;

	__u8 data[0];
	/*
	 * Define data region as 0 bytes to preserve backwards-compatability
	 * to old definition of kvm_nested_state in order to avoid changing
	 * KVM_{GET,PUT}_NESTED_STATE ioctl values.
	 */
	union {
		struct kvm_vmx_nested_state_data vmx[0];
	} data;
};

#endif /* _ASM_X86_KVM_H */
+10 −6
Original line number Diff line number Diff line
@@ -5602,14 +5602,18 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
	struct page *page;
	int i;

	if (tdp_enabled)
		return 0;

	/*
	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
	 * Therefore we need to allocate shadow page tables in the first
	 * 4GB of memory, which happens to fit the DMA32 zone.
	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
	 * while the PDP table is a per-vCPU construct that's allocated at MMU
	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
	 * x86_64.  Therefore we need to allocate the PDP table in the first
	 * 4GB of memory, which happens to fit the DMA32 zone.  Except for
	 * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
	 * skip allocating the PDP table.
	 */
	if (tdp_enabled && kvm_x86_ops->get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
		return 0;

	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
	if (!page)
		return -ENOMEM;
+58 −45
Original line number Diff line number Diff line
@@ -1397,7 +1397,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
	}

	if (unlikely(!(evmcs->hv_clean_fields &
		       HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
		       HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
		vmcs12->exception_bitmap = evmcs->exception_bitmap;
	}

@@ -1437,7 +1437,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
	}

	if (unlikely(!(evmcs->hv_clean_fields &
		       HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
		       HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
		vmcs12->pin_based_vm_exec_control =
			evmcs->pin_based_vm_exec_control;
		vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
@@ -5226,14 +5226,16 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
	struct vmcs12 *vmcs12;
	struct kvm_nested_state kvm_state = {
		.flags = 0,
		.format = 0,
		.format = KVM_STATE_NESTED_FORMAT_VMX,
		.size = sizeof(kvm_state),
		.vmx.vmxon_pa = -1ull,
		.vmx.vmcs_pa = -1ull,
		.hdr.vmx.vmxon_pa = -1ull,
		.hdr.vmx.vmcs12_pa = -1ull,
	};
	struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
		&user_kvm_nested_state->data.vmx[0];

	if (!vcpu)
		return kvm_state.size + 2 * VMCS12_SIZE;
		return kvm_state.size + sizeof(*user_vmx_nested_state);

	vmx = to_vmx(vcpu);
	vmcs12 = get_vmcs12(vcpu);
@@ -5243,23 +5245,23 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,

	if (nested_vmx_allowed(vcpu) &&
	    (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
		kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
		kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
		kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
		kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;

		if (vmx_has_valid_vmcs12(vcpu)) {
			kvm_state.size += VMCS12_SIZE;
			kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);

			if (is_guest_mode(vcpu) &&
			    nested_cpu_has_shadow_vmcs(vmcs12) &&
			    vmcs12->vmcs_link_pointer != -1ull)
				kvm_state.size += VMCS12_SIZE;
				kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
		}

		if (vmx->nested.smm.vmxon)
			kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
			kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;

		if (vmx->nested.smm.guest_mode)
			kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
			kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;

		if (is_guest_mode(vcpu)) {
			kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
@@ -5294,16 +5296,19 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
			copy_shadow_to_vmcs12(vmx);
	}

	BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
	BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);

	/*
	 * Copy over the full allocated size of vmcs12 rather than just the size
	 * of the struct.
	 */
	if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
	if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
		return -EFAULT;

	if (nested_cpu_has_shadow_vmcs(vmcs12) &&
	    vmcs12->vmcs_link_pointer != -1ull) {
		if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
		if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
				 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
			return -EFAULT;
	}
@@ -5331,33 +5336,35 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	struct vmcs12 *vmcs12;
	u32 exit_qual;
	struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
		&user_kvm_nested_state->data.vmx[0];
	int ret;

	if (kvm_state->format != 0)
	if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
		return -EINVAL;

	if (!nested_vmx_allowed(vcpu))
		return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;

	if (kvm_state->vmx.vmxon_pa == -1ull) {
		if (kvm_state->vmx.smm.flags)
	if (kvm_state->hdr.vmx.vmxon_pa == -1ull) {
		if (kvm_state->hdr.vmx.smm.flags)
			return -EINVAL;

		if (kvm_state->vmx.vmcs_pa != -1ull)
		if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
			return -EINVAL;

		vmx_leave_nested(vcpu);
		return 0;
	}
		if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
			return -EINVAL;
	} else {
		if (!nested_vmx_allowed(vcpu))
			return -EINVAL;

	if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
		if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
			return -EINVAL;
    	}

	if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
	if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
	    (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
		return -EINVAL;

	if (kvm_state->vmx.smm.flags &
	if (kvm_state->hdr.vmx.smm.flags &
	    ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
		return -EINVAL;

@@ -5366,21 +5373,25 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
	 * nor can VMLAUNCH/VMRESUME be pending.  Outside SMM, SMM flags
	 * must be zero.
	 */
	if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
	if (is_smm(vcpu) ? kvm_state->flags : kvm_state->hdr.vmx.smm.flags)
		return -EINVAL;

	if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
	    !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
	if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
	    !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
		return -EINVAL;

	vmx_leave_nested(vcpu);
	if (kvm_state->vmx.vmxon_pa == -1ull)
		return 0;
	if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
		if (!nested_vmx_allowed(vcpu))
			return -EINVAL;

	if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
		nested_enable_evmcs(vcpu, NULL);
	}

	if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
		return 0;

	vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
	vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
	ret = enter_vmx_operation(vcpu);
	if (ret)
		return ret;
@@ -5389,12 +5400,12 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
	if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
		return 0;

	if (kvm_state->vmx.vmcs_pa != -1ull) {
		if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
		    !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
	if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
		if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
		    !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
			return -EINVAL;

		set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
		set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
	} else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
		/*
		 * Sync eVMCS upon entry as we may not have
@@ -5405,16 +5416,16 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
		return -EINVAL;
	}

	if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
	if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
		vmx->nested.smm.vmxon = true;
		vmx->nested.vmxon = false;

		if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
		if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
			vmx->nested.smm.guest_mode = true;
	}

	vmcs12 = get_vmcs12(vcpu);
	if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
	if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
		return -EFAULT;

	if (vmcs12->hdr.revision_id != VMCS12_REVISION)
@@ -5431,12 +5442,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
	    vmcs12->vmcs_link_pointer != -1ull) {
		struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);

		if (kvm_state->size < sizeof(*kvm_state) + VMCS12_SIZE + sizeof(*vmcs12))
		if (kvm_state->size <
		    sizeof(*kvm_state) +
		    sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
			goto error_guest_mode;

		if (copy_from_user(shadow_vmcs12,
				   user_kvm_nested_state->data + VMCS12_SIZE,
				   sizeof(*vmcs12))) {
				   user_vmx_nested_state->shadow_vmcs12,
				   sizeof(*shadow_vmcs12))) {
			ret = -EFAULT;
			goto error_guest_mode;
		}
Loading