Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a64f273a authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Avi Kivity
Browse files

KVM: Split lpage_info creation out from __kvm_set_memory_region()



This makes it easy to make lpage_info architecture specific.

Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent fb03cb6f
Loading
Loading
Loading
Loading
+52 −31
Original line number Original line Diff line number Diff line
@@ -685,6 +685,56 @@ void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
	slots->generation++;
	slots->generation++;
}
}


#ifndef CONFIG_S390
static int create_lpage_info(struct kvm_memory_slot *slot, unsigned long npages)
{
	int i;

	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
		unsigned long ugfn;
		int lpages;
		int level = i + 2;

		if (slot->lpage_info[i])
			continue;

		lpages = gfn_to_index(slot->base_gfn + npages - 1,
				      slot->base_gfn, level) + 1;

		slot->lpage_info[i] = vzalloc(lpages * sizeof(*slot->lpage_info[i]));
		if (!slot->lpage_info[i])
			goto out_free;

		if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
			slot->lpage_info[i][0].write_count = 1;
		if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
			slot->lpage_info[i][lpages - 1].write_count = 1;
		ugfn = slot->userspace_addr >> PAGE_SHIFT;
		/*
		 * If the gfn and userspace address are not aligned wrt each
		 * other, or if explicitly asked to, disable large page
		 * support for this slot
		 */
		if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
		    !largepages_enabled) {
			unsigned long j;

			for (j = 0; j < lpages; ++j)
				slot->lpage_info[i][j].write_count = 1;
		}
	}

	return 0;

out_free:
	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
		vfree(slot->lpage_info[i]);
		slot->lpage_info[i] = NULL;
	}
	return -ENOMEM;
}
#endif /* not defined CONFIG_S390 */

/*
/*
 * Allocate some memory and give it an address in the guest physical address
 * Allocate some memory and give it an address in the guest physical address
 * space.
 * space.
@@ -778,38 +828,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
	if (!npages)
	if (!npages)
		goto skip_lpage;
		goto skip_lpage;


	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
	if (create_lpage_info(&new, npages))
		unsigned long ugfn;
		unsigned long j;
		int lpages;
		int level = i + 2;

		if (new.lpage_info[i])
			continue;

		lpages = gfn_to_index(base_gfn + npages - 1, base_gfn, level) + 1;

		new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i]));

		if (!new.lpage_info[i])
		goto out_free;
		goto out_free;


		if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
			new.lpage_info[i][0].write_count = 1;
		if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
			new.lpage_info[i][lpages - 1].write_count = 1;
		ugfn = new.userspace_addr >> PAGE_SHIFT;
		/*
		 * If the gfn and userspace address are not aligned wrt each
		 * other, or if explicitly asked to, disable large page
		 * support for this slot
		 */
		if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
		    !largepages_enabled)
			for (j = 0; j < lpages; ++j)
				new.lpage_info[i][j].write_count = 1;
	}

skip_lpage:
skip_lpage:


	/* Allocate page dirty bitmap if needed */
	/* Allocate page dirty bitmap if needed */