Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f7784b8e authored by Marcelo Tosatti's avatar Marcelo Tosatti
Browse files

KVM: split kvm_arch_set_memory_region into prepare and commit



Required for SRCU convertion later.

Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent fef9cce0
Loading
Loading
Loading
Loading
+12 −4
Original line number Diff line number Diff line
@@ -1578,15 +1578,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
	return r;
}

int kvm_arch_set_memory_region(struct kvm *kvm,
		struct kvm_userspace_memory_region *mem,
int kvm_arch_prepare_memory_region(struct kvm *kvm,
		struct kvm_memory_slot *memslot,
		struct kvm_memory_slot old,
		struct kvm_userspace_memory_region *mem,
		int user_alloc)
{
	unsigned long i;
	unsigned long pfn;
	int npages = mem->memory_size >> PAGE_SHIFT;
	struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
	int npages = memslot->npages;
	unsigned long base_gfn = memslot->base_gfn;

	if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
@@ -1610,6 +1610,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
	return 0;
}

void kvm_arch_commit_memory_region(struct kvm *kvm,
		struct kvm_userspace_memory_region *mem,
		struct kvm_memory_slot old,
		int user_alloc)
{
	return;
}

void kvm_arch_flush_shadow(struct kvm *kvm)
{
	kvm_flush_remote_tlbs(kvm);
+14 −4
Original line number Diff line number Diff line
@@ -165,14 +165,24 @@ long kvm_arch_dev_ioctl(struct file *filp,
	return -EINVAL;
}

int kvm_arch_set_memory_region(struct kvm *kvm,
                               struct kvm_userspace_memory_region *mem,
int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                   struct kvm_memory_slot *memslot,
                                   struct kvm_memory_slot old,
                                   struct kvm_userspace_memory_region *mem,
                                   int user_alloc)
{
	return 0;
}

void kvm_arch_commit_memory_region(struct kvm *kvm,
               struct kvm_userspace_memory_region *mem,
               struct kvm_memory_slot old,
               int user_alloc)
{
       return;
}


void kvm_arch_flush_shadow(struct kvm *kvm)
{
}
+16 −9
Original line number Diff line number Diff line
@@ -690,14 +690,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}

/* Section: memory related */
int kvm_arch_set_memory_region(struct kvm *kvm,
				struct kvm_userspace_memory_region *mem,
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				   struct kvm_memory_slot *memslot,
				   struct kvm_memory_slot old,
				   struct kvm_userspace_memory_region *mem,
				   int user_alloc)
{
	int i;
	struct kvm_vcpu *vcpu;

	/* A few sanity checks. We can have exactly one memory slot which has
	   to start at guest virtual zero and which has to be located at a
	   page boundary in userland and which has to end at a page boundary.
@@ -720,14 +718,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
	if (!user_alloc)
		return -EINVAL;

	return 0;
}

void kvm_arch_commit_memory_region(struct kvm *kvm,
				struct kvm_userspace_memory_region *mem,
				struct kvm_memory_slot old,
				int user_alloc)
{
	int i;
	struct kvm_vcpu *vcpu;

	/* request update of sie control block for all available vcpus */
	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
			continue;
		kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
	}

	return 0;
}

void kvm_arch_flush_shadow(struct kvm *kvm)
+29 −22
Original line number Diff line number Diff line
@@ -5228,13 +5228,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
	kfree(kvm);
}

int kvm_arch_set_memory_region(struct kvm *kvm,
				struct kvm_userspace_memory_region *mem,
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				struct kvm_memory_slot *memslot,
				struct kvm_memory_slot old,
				struct kvm_userspace_memory_region *mem,
				int user_alloc)
{
	int npages = mem->memory_size >> PAGE_SHIFT;
	struct kvm_memory_slot *memslot = &kvm->memslots->memslots[mem->slot];
	int npages = memslot->npages;

	/*To keep backward compatibility with older userspace,
	 *x86 needs to hanlde !user_alloc case.
@@ -5254,12 +5254,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
			if (IS_ERR((void *)userspace_addr))
				return PTR_ERR((void *)userspace_addr);

			/* set userspace_addr atomically for kvm_hva_to_rmapp */
			spin_lock(&kvm->mmu_lock);
			memslot->userspace_addr = userspace_addr;
			spin_unlock(&kvm->mmu_lock);
		} else {
			if (!old.user_alloc && old.rmap) {
		}
	}


	return 0;
}

void kvm_arch_commit_memory_region(struct kvm *kvm,
				struct kvm_userspace_memory_region *mem,
				struct kvm_memory_slot old,
				int user_alloc)
{

	int npages = mem->memory_size >> PAGE_SHIFT;

	if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
		int ret;

		down_write(&current->mm->mmap_sem);
@@ -5271,8 +5282,6 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
			       "kvm_vm_ioctl_set_memory_region: "
			       "failed to munmap memory\n");
	}
		}
	}

	spin_lock(&kvm->mmu_lock);
	if (!kvm->arch.n_requested_mmu_pages) {
@@ -5282,8 +5291,6 @@ int kvm_arch_set_memory_region(struct kvm *kvm,

	kvm_mmu_slot_remove_write_access(kvm, mem->slot);
	spin_unlock(&kvm->mmu_lock);

	return 0;
}

void kvm_arch_flush_shadow(struct kvm *kvm)
+6 −1
Original line number Diff line number Diff line
@@ -253,7 +253,12 @@ int kvm_set_memory_region(struct kvm *kvm,
int __kvm_set_memory_region(struct kvm *kvm,
			    struct kvm_userspace_memory_region *mem,
			    int user_alloc);
int kvm_arch_set_memory_region(struct kvm *kvm,
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				struct kvm_memory_slot *memslot,
				struct kvm_memory_slot old,
				struct kvm_userspace_memory_region *mem,
				int user_alloc);
void kvm_arch_commit_memory_region(struct kvm *kvm,
				struct kvm_userspace_memory_region *mem,
				struct kvm_memory_slot old,
				int user_alloc);
Loading