Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c16f862d authored by Rusty Russell's avatar Rusty Russell Committed by Avi Kivity
Browse files

KVM: Use kmem cache for allocating vcpus



Avi wants the allocations of vcpus centralized again.  The easiest way
is to add a "size" arg to kvm_init_arch, and expose the thus-prepared
cache to the modules.

Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent e7d5d76c
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -141,6 +141,7 @@ struct kvm_mmu_page {
};

struct kvm_vcpu;
extern struct kmem_cache *kvm_vcpu_cache;

/*
 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
@@ -483,7 +484,8 @@ extern struct kvm_arch_ops *kvm_arch_ops;
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);

int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module);
int kvm_init_arch(struct kvm_arch_ops *ops, unsigned int vcpu_size,
		  struct module *module);
void kvm_exit_arch(void);

int kvm_mmu_module_init(void);
+15 −1
Original line number Diff line number Diff line
@@ -53,6 +53,8 @@ static LIST_HEAD(vm_list);
static cpumask_t cpus_hardware_enabled;

struct kvm_arch_ops *kvm_arch_ops;
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);

static __read_mostly struct preempt_ops kvm_preempt_ops;

@@ -3104,7 +3106,8 @@ static void kvm_sched_out(struct preempt_notifier *pn,
	kvm_arch_ops->vcpu_put(vcpu);
}

int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
int kvm_init_arch(struct kvm_arch_ops *ops, unsigned int vcpu_size,
		  struct module *module)
{
	int r;

@@ -3142,6 +3145,14 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
	if (r)
		goto out_free_3;

	/* A kmem cache lets us meet the alignment requirements of fx_save. */
	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
					   __alignof__(struct kvm_vcpu), 0, 0);
	if (!kvm_vcpu_cache) {
		r = -ENOMEM;
		goto out_free_4;
	}

	kvm_chardev_ops.owner = module;

	r = misc_register(&kvm_dev);
@@ -3156,6 +3167,8 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
	return r;

out_free:
	kmem_cache_destroy(kvm_vcpu_cache);
out_free_4:
	sysdev_unregister(&kvm_sysdev);
out_free_3:
	sysdev_class_unregister(&kvm_sysdev_class);
@@ -3173,6 +3186,7 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
void kvm_exit_arch(void)
{
	misc_deregister(&kvm_dev);
	kmem_cache_destroy(kvm_vcpu_cache);
	sysdev_unregister(&kvm_sysdev);
	sysdev_class_unregister(&kvm_sysdev_class);
	unregister_reboot_notifier(&kvm_reboot_notifier);
+3 −2
Original line number Diff line number Diff line
@@ -577,7 +577,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
	struct page *page;
	int err;

	svm = kzalloc(sizeof *svm, GFP_KERNEL);
	svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
	if (!svm) {
		err = -ENOMEM;
		goto out;
@@ -1849,7 +1849,8 @@ static struct kvm_arch_ops svm_arch_ops = {

static int __init svm_init(void)
{
	return kvm_init_arch(&svm_arch_ops, THIS_MODULE);
	return kvm_init_arch(&svm_arch_ops, sizeof(struct vcpu_svm),
			      THIS_MODULE);
}

static void __exit svm_exit(void)
+2 −2
Original line number Diff line number Diff line
@@ -2365,7 +2365,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
{
	int err;
	struct vcpu_vmx *vmx = kzalloc(sizeof(*vmx), GFP_KERNEL);
	struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
	int cpu;

	if (!vmx)
@@ -2490,7 +2490,7 @@ static int __init vmx_init(void)
	memset(iova, 0xff, PAGE_SIZE);
	kunmap(vmx_io_bitmap_b);

	r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
	r = kvm_init_arch(&vmx_arch_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
	if (r)
		goto out1;