Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5972e953 authored by Markus Rechberger's avatar Markus Rechberger Committed by Avi Kivity
Browse files

KVM: Use page_private()/set_page_private() apis



Besides using an established api, this allows using kvm in older kernels.

Signed-off-by: default avatarMarkus Rechberger <markus.rechberger@amd.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 9d8f549d
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -523,7 +523,7 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{
	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);

	return (struct kvm_mmu_page *)page->private;
	return (struct kvm_mmu_page *)page_private(page);
}

static inline u16 read_fs(void)
+1 −1
Original line number Diff line number Diff line
@@ -670,7 +670,7 @@ static int kvm_dev_ioctl_set_memory_region(struct kvm *kvm,
						     | __GFP_ZERO);
			if (!new.phys_mem[i])
				goto out_free;
 			new.phys_mem[i]->private = 0;
			set_page_private(new.phys_mem[i],0);
		}
	}

+18 −18
Original line number Diff line number Diff line
@@ -298,18 +298,18 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
	if (!is_rmap_pte(*spte))
		return;
	page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
	if (!page->private) {
	if (!page_private(page)) {
		rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
		page->private = (unsigned long)spte;
	} else if (!(page->private & 1)) {
		set_page_private(page,(unsigned long)spte);
	} else if (!(page_private(page) & 1)) {
		rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
		desc = mmu_alloc_rmap_desc(vcpu);
		desc->shadow_ptes[0] = (u64 *)page->private;
		desc->shadow_ptes[0] = (u64 *)page_private(page);
		desc->shadow_ptes[1] = spte;
		page->private = (unsigned long)desc | 1;
		set_page_private(page,(unsigned long)desc | 1);
	} else {
		rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
		desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
		desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
		while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
			desc = desc->more;
		if (desc->shadow_ptes[RMAP_EXT-1]) {
@@ -337,12 +337,12 @@ static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
	if (j != 0)
		return;
	if (!prev_desc && !desc->more)
		page->private = (unsigned long)desc->shadow_ptes[0];
		set_page_private(page,(unsigned long)desc->shadow_ptes[0]);
	else
		if (prev_desc)
			prev_desc->more = desc->more;
		else
			page->private = (unsigned long)desc->more | 1;
			set_page_private(page,(unsigned long)desc->more | 1);
	mmu_free_rmap_desc(vcpu, desc);
}

@@ -356,20 +356,20 @@ static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
	if (!is_rmap_pte(*spte))
		return;
	page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
	if (!page->private) {
	if (!page_private(page)) {
		printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
		BUG();
	} else if (!(page->private & 1)) {
	} else if (!(page_private(page) & 1)) {
		rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
		if ((u64 *)page->private != spte) {
		if ((u64 *)page_private(page) != spte) {
			printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
			       spte, *spte);
			BUG();
		}
		page->private = 0;
		set_page_private(page,0);
	} else {
		rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
		desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
		desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
		prev_desc = NULL;
		while (desc) {
			for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
@@ -398,11 +398,11 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
	BUG_ON(!slot);
	page = gfn_to_page(slot, gfn);

	while (page->private) {
		if (!(page->private & 1))
			spte = (u64 *)page->private;
	while (page_private(page)) {
		if (!(page_private(page) & 1))
			spte = (u64 *)page_private(page);
		else {
			desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
			desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
			spte = desc->shadow_ptes[0];
		}
		BUG_ON(!spte);
@@ -1218,7 +1218,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
		INIT_LIST_HEAD(&page_header->link);
		if ((page = alloc_page(GFP_KERNEL)) == NULL)
			goto error_1;
		page->private = (unsigned long)page_header;
		set_page_private(page, (unsigned long)page_header);
		page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
		memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
		list_add(&page_header->link, &vcpu->free_pages);