Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7a905b14 authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Gleb Natapov
Browse files

KVM: Remove user_alloc from struct kvm_memory_slot



This field was needed to differentiate memory slots created by the new
API, KVM_SET_USER_MEMORY_REGION, from those by the old equivalent,
KVM_SET_MEMORY_REGION, whose support was dropped long before:

  commit b74a07be
  KVM: Remove kernel-allocated memory regions

Although we also have private memory slots to which KVM allocates
memory with vm_mmap(), !user_alloc slots in other words, the slot id
should be enough for differentiating them.

Note: corresponding function parameters will be removed later.

Reviewed-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
parent 257090f7
Loading
Loading
Loading
Loading
+16 −21
Original line number Diff line number Diff line
@@ -6897,32 +6897,27 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
				bool user_alloc)
{
	int npages = memslot->npages;
	int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;

	/* Prevent internal slot pages from being moved by fork()/COW. */
	if (memslot->id >= KVM_USER_MEM_SLOTS)
		map_flags = MAP_SHARED | MAP_ANONYMOUS;

	/*To keep backward compatibility with older userspace,
	 *x86 needs to handle !user_alloc case.
	/*
	 * Only private memory slots need to be mapped here since
	 * KVM_SET_MEMORY_REGION ioctl is no longer supported.
	 */
	if (!user_alloc) {
		if (npages && !old.npages) {
	if ((memslot->id >= KVM_USER_MEM_SLOTS) && npages && !old.npages) {
		unsigned long userspace_addr;

			userspace_addr = vm_mmap(NULL, 0,
						 npages * PAGE_SIZE,
		/*
		 * MAP_SHARED to prevent internal slot pages from being moved
		 * by fork()/COW.
		 */
		userspace_addr = vm_mmap(NULL, 0, npages * PAGE_SIZE,
					 PROT_READ | PROT_WRITE,
						 map_flags,
						 0);
					 MAP_SHARED | MAP_ANONYMOUS, 0);

		if (IS_ERR((void *)userspace_addr))
			return PTR_ERR((void *)userspace_addr);

		memslot->userspace_addr = userspace_addr;
	}
	}


	return 0;
}
@@ -6935,7 +6930,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,

	int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;

	if (!user_alloc && !old.user_alloc && old.npages && !npages) {
	if ((mem->slot >= KVM_USER_MEM_SLOTS) && old.npages && !npages) {
		int ret;

		ret = vm_munmap(old.userspace_addr,
+0 −1
Original line number Diff line number Diff line
@@ -273,7 +273,6 @@ struct kvm_memory_slot {
	unsigned long userspace_addr;
	u32 flags;
	short id;
	bool user_alloc;
};

static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
+0 −1
Original line number Diff line number Diff line
@@ -839,7 +839,6 @@ int __kvm_set_memory_region(struct kvm *kvm,

	r = -ENOMEM;
	if (change == KVM_MR_CREATE) {
		new.user_alloc = user_alloc;
		new.userspace_addr = mem->userspace_addr;

		if (kvm_arch_create_memslot(&new, npages))