Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b666a4b6 authored by Marc Orr's avatar Marc Orr Committed by Paolo Bonzini
Browse files

kvm: x86: Dynamically allocate guest_fpu



Previously, the guest_fpu field was embedded in the kvm_vcpu_arch
struct. Unfortunately, the field is quite large, (e.g., 4352 bytes on my
current setup). This bloats the kvm_vcpu_arch struct for x86 into an
order 3 memory allocation, which can become a problem on overcommitted
machines. Thus, this patch moves the fpu state outside of the
kvm_vcpu_arch struct.

With this patch applied, the kvm_vcpu_arch struct is reduced to 15168
bytes for vmx on my setup when building the kernel with kvmconfig.

Suggested-by: default avatarDave Hansen <dave.hansen@intel.com>
Signed-off-by: default avatarMarc Orr <marcorr@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 240c35a3
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -610,7 +610,7 @@ struct kvm_vcpu_arch {
	 * "guest_fpu" state here contains the guest FPU context, with the
	 * host PRKU bits.
	 */
	struct fpu guest_fpu;
	struct fpu *guest_fpu;

	u64 xcr0;
	u64 guest_supported_xcr0;
@@ -1196,6 +1196,7 @@ struct kvm_arch_async_pf {
};

extern struct kvm_x86_ops *kvm_x86_ops;
extern struct kmem_cache *x86_fpu_cache;

#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
+10 −0
Original line number Diff line number Diff line
@@ -2125,6 +2125,13 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
		goto out;
	}

	svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL);
	if (!svm->vcpu.arch.guest_fpu) {
		printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
		err = -ENOMEM;
		goto free_partial_svm;
	}

	err = kvm_vcpu_init(&svm->vcpu, kvm, id);
	if (err)
		goto free_svm;
@@ -2184,6 +2191,8 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
uninit:
	kvm_vcpu_uninit(&svm->vcpu);
free_svm:
	kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
free_partial_svm:
	kmem_cache_free(kvm_vcpu_cache, svm);
out:
	return ERR_PTR(err);
@@ -2213,6 +2222,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
	__free_page(virt_to_page(svm->nested.hsave));
	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
	kvm_vcpu_uninit(vcpu);
	kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
	kmem_cache_free(kvm_vcpu_cache, svm);
}

+10 −0
Original line number Diff line number Diff line
@@ -6349,6 +6349,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
	free_loaded_vmcs(vmx->loaded_vmcs);
	kfree(vmx->guest_msrs);
	kvm_vcpu_uninit(vcpu);
	kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
	kmem_cache_free(kvm_vcpu_cache, vmx);
}

@@ -6362,6 +6363,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
	if (!vmx)
		return ERR_PTR(-ENOMEM);

	vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL);
	if (!vmx->vcpu.arch.guest_fpu) {
		printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
		err = -ENOMEM;
		goto free_partial_vcpu;
	}

	vmx->vpid = allocate_vpid();

	err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
@@ -6454,6 +6462,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
	kvm_vcpu_uninit(&vmx->vcpu);
free_vcpu:
	free_vpid(vmx->vpid);
	kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
free_partial_vcpu:
	kmem_cache_free(kvm_vcpu_cache, vmx);
	return ERR_PTR(err);
}
+38 −13
Original line number Diff line number Diff line
@@ -213,6 +213,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {

u64 __read_mostly host_xcr0;

struct kmem_cache *x86_fpu_cache;
EXPORT_SYMBOL_GPL(x86_fpu_cache);

static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);

static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
@@ -3630,7 +3633,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,

static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
{
	struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
	struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
	u64 xstate_bv = xsave->header.xfeatures;
	u64 valid;

@@ -3672,7 +3675,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)

static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
{
	struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
	struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
	u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
	u64 valid;

@@ -3720,7 +3723,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
		fill_xsave((u8 *) guest_xsave->region, vcpu);
	} else {
		memcpy(guest_xsave->region,
			&vcpu->arch.guest_fpu.state.fxsave,
			&vcpu->arch.guest_fpu->state.fxsave,
			sizeof(struct fxregs_state));
		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
			XFEATURE_MASK_FPSSE;
@@ -3750,7 +3753,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
		if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
			mxcsr & ~mxcsr_feature_mask)
			return -EINVAL;
		memcpy(&vcpu->arch.guest_fpu.state.fxsave,
		memcpy(&vcpu->arch.guest_fpu->state.fxsave,
			guest_xsave->region, sizeof(struct fxregs_state));
	}
	return 0;
@@ -6852,11 +6855,30 @@ int kvm_arch_init(void *opaque)
		goto out;
	}

	/*
	 * KVM explicitly assumes that the guest has an FPU and
	 * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the
	 * vCPU's FPU state as a fxregs_state struct.
	 */
	if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) {
		printk(KERN_ERR "kvm: inadequate fpu\n");
		r = -EOPNOTSUPP;
		goto out;
	}

	r = -ENOMEM;
	x86_fpu_cache = kmem_cache_create("x86_fpu", fpu_kernel_xstate_size,
					  __alignof__(struct fpu), SLAB_ACCOUNT,
					  NULL);
	if (!x86_fpu_cache) {
		printk(KERN_ERR "kvm: failed to allocate cache for x86 fpu\n");
		goto out;
	}

	shared_msrs = alloc_percpu(struct kvm_shared_msrs);
	if (!shared_msrs) {
		printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
		goto out;
		goto out_free_x86_fpu_cache;
	}

	r = kvm_mmu_module_init();
@@ -6889,6 +6911,8 @@ int kvm_arch_init(void *opaque)

out_free_percpu:
	free_percpu(shared_msrs);
out_free_x86_fpu_cache:
	kmem_cache_destroy(x86_fpu_cache);
out:
	return r;
}
@@ -6912,6 +6936,7 @@ void kvm_arch_exit(void)
	kvm_x86_ops = NULL;
	kvm_mmu_module_exit();
	free_percpu(shared_msrs);
	kmem_cache_destroy(x86_fpu_cache);
}

int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
@@ -8037,7 +8062,7 @@ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
	preempt_disable();
	copy_fpregs_to_fpstate(&current->thread.fpu);
	/* PKRU is separately restored in kvm_x86_ops->run.  */
	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
				~XFEATURE_MASK_PKRU);
	preempt_enable();
	trace_kvm_fpu(1);
@@ -8047,7 +8072,7 @@ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
	preempt_disable();
	copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
	copy_fpregs_to_fpstate(vcpu->arch.guest_fpu);
	copy_kernel_to_fpregs(&current->thread.fpu.state);
	preempt_enable();
	++vcpu->stat.fpu_reload;
@@ -8542,7 +8567,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)

	vcpu_load(vcpu);

	fxsave = &vcpu->arch.guest_fpu.state.fxsave;
	fxsave = &vcpu->arch.guest_fpu->state.fxsave;
	memcpy(fpu->fpr, fxsave->st_space, 128);
	fpu->fcw = fxsave->cwd;
	fpu->fsw = fxsave->swd;
@@ -8562,7 +8587,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)

	vcpu_load(vcpu);

	fxsave = &vcpu->arch.guest_fpu.state.fxsave;
	fxsave = &vcpu->arch.guest_fpu->state.fxsave;

	memcpy(fxsave->st_space, fpu->fpr, 128);
	fxsave->cwd = fpu->fcw;
@@ -8618,9 +8643,9 @@ static int sync_regs(struct kvm_vcpu *vcpu)

static void fx_init(struct kvm_vcpu *vcpu)
{
	fpstate_init(&vcpu->arch.guest_fpu.state);
	fpstate_init(&vcpu->arch.guest_fpu->state);
	if (boot_cpu_has(X86_FEATURE_XSAVES))
		vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
		vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv =
			host_xcr0 | XSTATE_COMPACTION_ENABLED;

	/*
@@ -8745,11 +8770,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
		 */
		if (init_event)
			kvm_put_guest_fpu(vcpu);
		mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave,
		mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
					XFEATURE_MASK_BNDREGS);
		if (mpx_state_buffer)
			memset(mpx_state_buffer, 0, sizeof(struct mpx_bndreg_state));
		mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave,
		mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
					XFEATURE_MASK_BNDCSR);
		if (mpx_state_buffer)
			memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr));