Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d89f5eff authored by Jan Kiszka's avatar Jan Kiszka Committed by Avi Kivity
Browse files

KVM: Clean up vm creation and release



IA64 support forces us to abstract the allocation of the kvm structure.
But instead of mixing this up with arch-specific initialization and
doing the same on destruction, split both steps. This allows to move
generic destruction calls into generic code.

It also fixes error clean-up on failures of kvm_create_vm for IA64.

Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 9d893c6b
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -590,6 +590,10 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu);
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
void kvm_sal_emul(struct kvm_vcpu *vcpu);

#define __KVM_HAVE_ARCH_VM_ALLOC 1
struct kvm *kvm_arch_alloc_vm(void);
void kvm_arch_free_vm(struct kvm *kvm);

#endif /* __ASSEMBLY__*/

#endif
+7 −21
Original line number Diff line number Diff line
@@ -749,7 +749,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
	return r;
}

static struct kvm *kvm_alloc_kvm(void)
struct kvm *kvm_arch_alloc_vm(void)
{

	struct kvm *kvm;
@@ -760,7 +760,7 @@ static struct kvm *kvm_alloc_kvm(void)
	vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));

	if (!vm_base)
		return ERR_PTR(-ENOMEM);
		return NULL;

	memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
	kvm = (struct kvm *)(vm_base +
@@ -806,10 +806,12 @@ static void kvm_build_io_pmt(struct kvm *kvm)
#define GUEST_PHYSICAL_RR4	0x2739
#define VMM_INIT_RR		0x1660

static void kvm_init_vm(struct kvm *kvm)
int kvm_arch_init_vm(struct kvm *kvm)
{
	BUG_ON(!kvm);

	kvm->arch.is_sn2 = ia64_platform_is("sn2");

	kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
	kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
	kvm->arch.vmm_init_rr = VMM_INIT_RR;
@@ -823,21 +825,8 @@ static void kvm_init_vm(struct kvm *kvm)

	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
}

struct  kvm *kvm_arch_create_vm(void)
{
	struct kvm *kvm = kvm_alloc_kvm();

	if (IS_ERR(kvm))
		return ERR_PTR(-ENOMEM);

	kvm->arch.is_sn2 = ia64_platform_is("sn2");

	kvm_init_vm(kvm);

	return kvm;

	return 0;
}

static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
@@ -1357,7 +1346,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
	return -EINVAL;
}

static void free_kvm(struct kvm *kvm)
void kvm_arch_free_vm(struct kvm *kvm)
{
	unsigned long vm_base = kvm->arch.vm_base;

@@ -1399,9 +1388,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
#endif
	kfree(kvm->arch.vioapic);
	kvm_release_vm_pages(kvm);
	kvm_free_physmem(kvm);
	cleanup_srcu_struct(&kvm->srcu);
	free_kvm(kvm);
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+3 −17
Original line number Diff line number Diff line
@@ -145,18 +145,12 @@ void kvm_arch_check_processor_compat(void *rtn)
	*(int *)rtn = kvmppc_core_check_processor_compat();
}

struct kvm *kvm_arch_create_vm(void)
int kvm_arch_init_vm(struct kvm *kvm)
{
	struct kvm *kvm;

	kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
	if (!kvm)
		return ERR_PTR(-ENOMEM);

	return kvm;
	return 0;
}

static void kvmppc_free_vcpus(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
	unsigned int i;
	struct kvm_vcpu *vcpu;
@@ -176,14 +170,6 @@ void kvm_arch_sync_events(struct kvm *kvm)
{
}

void kvm_arch_destroy_vm(struct kvm *kvm)
{
	kvmppc_free_vcpus(kvm);
	kvm_free_physmem(kvm);
	cleanup_srcu_struct(&kvm->srcu);
	kfree(kvm);
}

int kvm_dev_ioctl_check_extension(long ext)
{
	int r;
+6 −17
Original line number Diff line number Diff line
@@ -164,24 +164,18 @@ long kvm_arch_vm_ioctl(struct file *filp,
	return r;
}

struct kvm *kvm_arch_create_vm(void)
int kvm_arch_init_vm(struct kvm *kvm)
{
	struct kvm *kvm;
	int rc;
	char debug_name[16];

	rc = s390_enable_sie();
	if (rc)
		goto out_nokvm;

	rc = -ENOMEM;
	kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
	if (!kvm)
		goto out_nokvm;
		goto out_err;

	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
	if (!kvm->arch.sca)
		goto out_nosca;
		goto out_err;

	sprintf(debug_name, "kvm-%u", current->pid);

@@ -195,13 +189,11 @@ struct kvm *kvm_arch_create_vm(void)
	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
	VM_EVENT(kvm, 3, "%s", "vm created");

	return kvm;
	return 0;
out_nodbf:
	free_page((unsigned long)(kvm->arch.sca));
out_nosca:
	kfree(kvm);
out_nokvm:
	return ERR_PTR(rc);
out_err:
	return rc;
}

void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -240,11 +232,8 @@ void kvm_arch_sync_events(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
	kvm_free_vcpus(kvm);
	kvm_free_physmem(kvm);
	free_page((unsigned long)(kvm->arch.sca));
	debug_unregister(kvm->arch.dbf);
	cleanup_srcu_struct(&kvm->srcu);
	kfree(kvm);
}

/* Section: vcpu related */
+2 −10
Original line number Diff line number Diff line
@@ -5961,13 +5961,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
	free_page((unsigned long)vcpu->arch.pio_data);
}

struct  kvm *kvm_arch_create_vm(void)
int kvm_arch_init_vm(struct kvm *kvm)
{
	struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);

	if (!kvm)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);

@@ -5976,7 +5971,7 @@ struct kvm *kvm_arch_create_vm(void)

	spin_lock_init(&kvm->arch.tsc_write_lock);

	return kvm;
	return 0;
}

static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
@@ -6021,13 +6016,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
	kfree(kvm->arch.vpic);
	kfree(kvm->arch.vioapic);
	kvm_free_vcpus(kvm);
	kvm_free_physmem(kvm);
	if (kvm->arch.apic_access_page)
		put_page(kvm->arch.apic_access_page);
	if (kvm->arch.ept_identity_pagetable)
		put_page(kvm->arch.ept_identity_pagetable);
	cleanup_srcu_struct(&kvm->srcu);
	kfree(kvm);
}

int kvm_arch_prepare_memory_region(struct kvm *kvm,
Loading