Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bc6678a3 authored by Marcelo Tosatti's avatar Marcelo Tosatti
Browse files

KVM: introduce kvm->srcu and convert kvm_set_memory_region to SRCU update



Use two steps for memslot deletion: mark the slot invalid (which stops
instantiation of new shadow pages for that slot, but allows destruction),
then instantiate the new empty slot.

Also simplifies kvm_handle_hva locking.

Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 3ad26d81
Loading
Loading
Loading
Loading
+3 −1
Original line number Original line Diff line number Diff line
@@ -1382,7 +1382,7 @@ static void kvm_release_vm_pages(struct kvm *kvm)
	int i, j;
	int i, j;
	unsigned long base_gfn;
	unsigned long base_gfn;


	slots = kvm->memslots;
	slots = rcu_dereference(kvm->memslots);
	for (i = 0; i < slots->nmemslots; i++) {
	for (i = 0; i < slots->nmemslots; i++) {
		memslot = &slots->memslots[i];
		memslot = &slots->memslots[i];
		base_gfn = memslot->base_gfn;
		base_gfn = memslot->base_gfn;
@@ -1837,6 +1837,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
	struct kvm_memory_slot *memslot;
	struct kvm_memory_slot *memslot;
	int is_dirty = 0;
	int is_dirty = 0;


	down_write(&kvm->slots_lock);
	spin_lock(&kvm->arch.dirty_log_lock);
	spin_lock(&kvm->arch.dirty_log_lock);


	r = kvm_ia64_sync_dirty_log(kvm, log);
	r = kvm_ia64_sync_dirty_log(kvm, log);
@@ -1856,6 +1857,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
	}
	}
	r = 0;
	r = 0;
out:
out:
	up_write(&kvm->slots_lock);
	spin_unlock(&kvm->arch.dirty_log_lock);
	spin_unlock(&kvm->arch.dirty_log_lock);
	return r;
	return r;
}
}
+14 −14
Original line number Original line Diff line number Diff line
@@ -29,6 +29,7 @@
#include <linux/swap.h>
#include <linux/swap.h>
#include <linux/hugetlb.h>
#include <linux/hugetlb.h>
#include <linux/compiler.h>
#include <linux/compiler.h>
#include <linux/srcu.h>


#include <asm/page.h>
#include <asm/page.h>
#include <asm/cmpxchg.h>
#include <asm/cmpxchg.h>
@@ -807,21 +808,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
{
{
	int i, j;
	int i, j;
	int retval = 0;
	int retval = 0;
	struct kvm_memslots *slots = kvm->memslots;
	struct kvm_memslots *slots;

	slots = rcu_dereference(kvm->memslots);


	/*
	 * If mmap_sem isn't taken, we can look the memslots with only
	 * the mmu_lock by skipping over the slots with userspace_addr == 0.
	 */
	for (i = 0; i < slots->nmemslots; i++) {
	for (i = 0; i < slots->nmemslots; i++) {
		struct kvm_memory_slot *memslot = &slots->memslots[i];
		struct kvm_memory_slot *memslot = &slots->memslots[i];
		unsigned long start = memslot->userspace_addr;
		unsigned long start = memslot->userspace_addr;
		unsigned long end;
		unsigned long end;


		/* mmu_lock protects userspace_addr */
		if (!start)
			continue;

		end = start + (memslot->npages << PAGE_SHIFT);
		end = start + (memslot->npages << PAGE_SHIFT);
		if (hva >= start && hva < end) {
		if (hva >= start && hva < end) {
			gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
			gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
@@ -1617,7 +1612,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)


static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
{
{
	int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
	int slot = memslot_id(kvm, gfn);
	struct kvm_mmu_page *sp = page_header(__pa(pte));
	struct kvm_mmu_page *sp = page_header(__pa(pte));


	__set_bit(slot, sp->slot_bitmap);
	__set_bit(slot, sp->slot_bitmap);
@@ -3021,9 +3016,11 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
	int i;
	int i;
	unsigned int nr_mmu_pages;
	unsigned int nr_mmu_pages;
	unsigned int  nr_pages = 0;
	unsigned int  nr_pages = 0;
	struct kvm_memslots *slots;


	for (i = 0; i < kvm->memslots->nmemslots; i++)
	slots = rcu_dereference(kvm->memslots);
		nr_pages += kvm->memslots->memslots[i].npages;
	for (i = 0; i < slots->nmemslots; i++)
		nr_pages += slots->memslots[i].npages;


	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
	nr_mmu_pages = max(nr_mmu_pages,
	nr_mmu_pages = max(nr_mmu_pages,
@@ -3293,10 +3290,12 @@ static void audit_mappings(struct kvm_vcpu *vcpu)
static int count_rmaps(struct kvm_vcpu *vcpu)
static int count_rmaps(struct kvm_vcpu *vcpu)
{
{
	int nmaps = 0;
	int nmaps = 0;
	int i, j, k;
	int i, j, k, idx;


	idx = srcu_read_lock(&kvm->srcu);
	slots = rcu_dereference(kvm->memslots);
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
		struct kvm_memory_slot *m = &vcpu->kvm->memslots->memslots[i];
		struct kvm_memory_slot *m = &slots->memslots[i];
		struct kvm_rmap_desc *d;
		struct kvm_rmap_desc *d;


		for (j = 0; j < m->npages; ++j) {
		for (j = 0; j < m->npages; ++j) {
@@ -3319,6 +3318,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
			}
			}
		}
		}
	}
	}
	srcu_read_unlock(&kvm->srcu, idx);
	return nmaps;
	return nmaps;
}
}


+5 −1
Original line number Original line Diff line number Diff line
@@ -1503,7 +1503,11 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
static gva_t rmode_tss_base(struct kvm *kvm)
static gva_t rmode_tss_base(struct kvm *kvm)
{
{
	if (!kvm->arch.tss_addr) {
	if (!kvm->arch.tss_addr) {
		gfn_t base_gfn = kvm->memslots->memslots[0].base_gfn +
		struct kvm_memslots *slots;
		gfn_t base_gfn;

		slots = rcu_dereference(kvm->memslots);
		base_gfn = kvm->memslots->memslots[0].base_gfn +
				 kvm->memslots->memslots[0].npages - 3;
				 kvm->memslots->memslots[0].npages - 3;
		return base_gfn << PAGE_SHIFT;
		return base_gfn << PAGE_SHIFT;
	}
	}
+1 −1
Original line number Original line Diff line number Diff line
@@ -103,7 +103,7 @@ struct kvm_userspace_memory_region {


/* for kvm_memory_region::flags */
/* for kvm_memory_region::flags */
#define KVM_MEM_LOG_DIRTY_PAGES  1UL
#define KVM_MEM_LOG_DIRTY_PAGES  1UL

#define KVM_MEMSLOT_INVALID      (1UL << 1)


/* for KVM_IRQ_LINE */
/* for KVM_IRQ_LINE */
struct kvm_irq_level {
struct kvm_irq_level {
+2 −5
Original line number Original line Diff line number Diff line
@@ -162,6 +162,7 @@ struct kvm {
	struct rw_semaphore slots_lock;
	struct rw_semaphore slots_lock;
	struct mm_struct *mm; /* userspace tied to this vm */
	struct mm_struct *mm; /* userspace tied to this vm */
	struct kvm_memslots *memslots;
	struct kvm_memslots *memslots;
	struct srcu_struct srcu;
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
	u32 bsp_vcpu_id;
	u32 bsp_vcpu_id;
	struct kvm_vcpu *bsp_vcpu;
	struct kvm_vcpu *bsp_vcpu;
@@ -275,6 +276,7 @@ void kvm_set_page_accessed(struct page *page);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
			 struct kvm_memory_slot *slot, gfn_t gfn);
			 struct kvm_memory_slot *slot, gfn_t gfn);
int memslot_id(struct kvm *kvm, gfn_t gfn);
void kvm_release_pfn_dirty(pfn_t);
void kvm_release_pfn_dirty(pfn_t);
void kvm_release_pfn_clean(pfn_t pfn);
void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn);
@@ -490,11 +492,6 @@ static inline void kvm_guest_exit(void)
	current->flags &= ~PF_VCPU;
	current->flags &= ~PF_VCPU;
}
}


static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
{
	return slot - kvm->memslots->memslots;
}

static inline gpa_t gfn_to_gpa(gfn_t gfn)
static inline gpa_t gfn_to_gpa(gfn_t gfn)
{
{
	return (gpa_t)gfn << PAGE_SHIFT;
	return (gpa_t)gfn << PAGE_SHIFT;
Loading