Loading arch/ia64/include/asm/kvm_host.h +2 −1 Original line number Diff line number Diff line Loading @@ -235,7 +235,8 @@ struct kvm_vm_data { #define KVM_REQ_PTC_G 32 #define KVM_REQ_RESUME 33 #define KVM_PAGES_PER_HPAGE 1 #define KVM_NR_PAGE_SIZES 1 #define KVM_PAGES_PER_HPAGE(x) 1 struct kvm; struct kvm_vcpu; Loading arch/powerpc/include/asm/kvm_host.h +2 −1 Original line number Diff line number Diff line Loading @@ -34,7 +34,8 @@ #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 /* We don't currently support large pages. */ #define KVM_PAGES_PER_HPAGE (1UL << 31) #define KVM_NR_PAGE_SIZES 1 #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) struct kvm; struct kvm_run; Loading arch/s390/include/asm/kvm_host.h +5 −1 Original line number Diff line number Diff line Loading @@ -40,7 +40,11 @@ struct sca_block { struct sca_entry cpu[64]; } __attribute__((packed)); #define KVM_PAGES_PER_HPAGE 256 #define KVM_NR_PAGE_SIZES 2 #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + ((x) - 1) * 8) #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) #define CPUSTAT_HOST 0x80000000 #define CPUSTAT_WAIT 0x10000000 Loading arch/x86/include/asm/kvm_host.h +6 −6 Original line number Diff line number Diff line Loading @@ -54,12 +54,12 @@ #define INVALID_PAGE (~(hpa_t)0) #define UNMAPPED_GVA (~(gpa_t)0) /* shadow tables are PAE even on non-PAE hosts */ #define KVM_HPAGE_SHIFT 21 #define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT) #define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1)) #define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE) /* KVM Hugepage definitions for x86 */ #define KVM_NR_PAGE_SIZES 2 #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9)) #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) #define DE_VECTOR 0 #define DB_VECTOR 1 Loading arch/x86/kvm/mmu.c +16 −14 Original line number Diff line number Diff line Loading @@ -394,9 +394,9 @@ static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot) { unsigned long idx; idx = (gfn / KVM_PAGES_PER_HPAGE) - (slot->base_gfn / KVM_PAGES_PER_HPAGE); return &slot->lpage_info[idx].write_count; idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) - (slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)); return &slot->lpage_info[0][idx].write_count; } static void account_shadowed(struct kvm *kvm, gfn_t gfn) Loading Loading @@ -485,10 +485,10 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage) if (!lpage) return &slot->rmap[gfn - slot->base_gfn]; idx = (gfn / KVM_PAGES_PER_HPAGE) - (slot->base_gfn / KVM_PAGES_PER_HPAGE); idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) - (slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)); return &slot->lpage_info[idx].rmap_pde; return &slot->lpage_info[0][idx].rmap_pde; } /* Loading Loading @@ -731,11 +731,11 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, end = start + (memslot->npages << PAGE_SHIFT); if (hva >= start && hva < end) { gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; int idx = gfn_offset / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL); retval |= handler(kvm, &memslot->rmap[gfn_offset]); retval |= handler(kvm, &memslot->lpage_info[ gfn_offset / KVM_PAGES_PER_HPAGE].rmap_pde); &memslot->lpage_info[0][idx].rmap_pde); } } Loading Loading @@ -1876,8 +1876,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) pfn_t pfn; unsigned long mmu_seq; if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { gfn &= ~(KVM_PAGES_PER_HPAGE-1); if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) { gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1); largepage = 1; } Loading Loading @@ -2082,8 +2083,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, if (r) return r; if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { gfn &= ~(KVM_PAGES_PER_HPAGE-1); if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) { gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1); largepage = 1; } mmu_seq = vcpu->kvm->mmu_notifier_seq; Loading Loading @@ -2485,7 +2487,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) { gfn &= ~(KVM_PAGES_PER_HPAGE-1); gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1); vcpu->arch.update_pte.largepage = 1; } vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq; Loading Loading
arch/ia64/include/asm/kvm_host.h +2 −1 Original line number Diff line number Diff line Loading @@ -235,7 +235,8 @@ struct kvm_vm_data { #define KVM_REQ_PTC_G 32 #define KVM_REQ_RESUME 33 #define KVM_PAGES_PER_HPAGE 1 #define KVM_NR_PAGE_SIZES 1 #define KVM_PAGES_PER_HPAGE(x) 1 struct kvm; struct kvm_vcpu; Loading
arch/powerpc/include/asm/kvm_host.h +2 −1 Original line number Diff line number Diff line Loading @@ -34,7 +34,8 @@ #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 /* We don't currently support large pages. */ #define KVM_PAGES_PER_HPAGE (1UL << 31) #define KVM_NR_PAGE_SIZES 1 #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) struct kvm; struct kvm_run; Loading
arch/s390/include/asm/kvm_host.h +5 −1 Original line number Diff line number Diff line Loading @@ -40,7 +40,11 @@ struct sca_block { struct sca_entry cpu[64]; } __attribute__((packed)); #define KVM_PAGES_PER_HPAGE 256 #define KVM_NR_PAGE_SIZES 2 #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + ((x) - 1) * 8) #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) #define CPUSTAT_HOST 0x80000000 #define CPUSTAT_WAIT 0x10000000 Loading
arch/x86/include/asm/kvm_host.h +6 −6 Original line number Diff line number Diff line Loading @@ -54,12 +54,12 @@ #define INVALID_PAGE (~(hpa_t)0) #define UNMAPPED_GVA (~(gpa_t)0) /* shadow tables are PAE even on non-PAE hosts */ #define KVM_HPAGE_SHIFT 21 #define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT) #define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1)) #define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE) /* KVM Hugepage definitions for x86 */ #define KVM_NR_PAGE_SIZES 2 #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9)) #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) #define DE_VECTOR 0 #define DB_VECTOR 1 Loading
arch/x86/kvm/mmu.c +16 −14 Original line number Diff line number Diff line Loading @@ -394,9 +394,9 @@ static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot) { unsigned long idx; idx = (gfn / KVM_PAGES_PER_HPAGE) - (slot->base_gfn / KVM_PAGES_PER_HPAGE); return &slot->lpage_info[idx].write_count; idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) - (slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)); return &slot->lpage_info[0][idx].write_count; } static void account_shadowed(struct kvm *kvm, gfn_t gfn) Loading Loading @@ -485,10 +485,10 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage) if (!lpage) return &slot->rmap[gfn - slot->base_gfn]; idx = (gfn / KVM_PAGES_PER_HPAGE) - (slot->base_gfn / KVM_PAGES_PER_HPAGE); idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) - (slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)); return &slot->lpage_info[idx].rmap_pde; return &slot->lpage_info[0][idx].rmap_pde; } /* Loading Loading @@ -731,11 +731,11 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, end = start + (memslot->npages << PAGE_SHIFT); if (hva >= start && hva < end) { gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; int idx = gfn_offset / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL); retval |= handler(kvm, &memslot->rmap[gfn_offset]); retval |= handler(kvm, &memslot->lpage_info[ gfn_offset / KVM_PAGES_PER_HPAGE].rmap_pde); &memslot->lpage_info[0][idx].rmap_pde); } } Loading Loading @@ -1876,8 +1876,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) pfn_t pfn; unsigned long mmu_seq; if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { gfn &= ~(KVM_PAGES_PER_HPAGE-1); if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) { gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1); largepage = 1; } Loading Loading @@ -2082,8 +2083,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, if (r) return r; if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { gfn &= ~(KVM_PAGES_PER_HPAGE-1); if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) { gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1); largepage = 1; } mmu_seq = vcpu->kvm->mmu_notifier_seq; Loading Loading @@ -2485,7 +2487,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) { gfn &= ~(KVM_PAGES_PER_HPAGE-1); gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1); vcpu->arch.update_pte.largepage = 1; } vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq; Loading