Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6c45b810 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Alexander Graf
Browse files

powerpc/kvm: Contiguous memory allocator based RMA allocation



Older version of power architecture use Real Mode Offset register and Real Mode Limit
Selector for mapping guest Real Mode Area. The guest RMA should be physically
contigous since we use the range when address translation is not enabled.

This patch switch RMA allocation code to use contigous memory allocator. The patch
also remove the the linear allocator which not used any more

Acked-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent fa61a4e3
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)

#ifdef CONFIG_KVM_BOOK3S_64_HV
#define KVM_DEFAULT_HPT_ORDER	24	/* 16MB HPT by default */
extern unsigned long kvm_rma_pages;
#endif

#define VRMA_VSID	0x1ffffffUL	/* 1TB VSID reserved for VRMA */
+4 −8
Original line number Diff line number Diff line
@@ -183,13 +183,9 @@ struct kvmppc_spapr_tce_table {
	struct page *pages[0];
};

struct kvmppc_linear_info {
	void		*base_virt;
	unsigned long	 base_pfn;
	unsigned long	 npages;
	struct list_head list;
struct kvm_rma_info {
	atomic_t use_count;
	int		 type;
	unsigned long base_pfn;
};

/* XICS components, defined in book3s_xics.c */
@@ -246,7 +242,7 @@ struct kvm_arch {
	int tlbie_lock;
	unsigned long lpcr;
	unsigned long rmor;
	struct kvmppc_linear_info *rma;
	struct kvm_rma_info *rma;
	unsigned long vrma_slb_v;
	int rma_setup_done;
	int using_mmu_notifiers;
+2 −6
Original line number Diff line number Diff line
@@ -137,8 +137,8 @@ extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
			     unsigned long ioba, unsigned long tce);
extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
				struct kvm_allocate_rma *rma);
extern struct kvmppc_linear_info *kvm_alloc_rma(void);
extern void kvm_release_rma(struct kvmppc_linear_info *ri);
extern struct kvm_rma_info *kvm_alloc_rma(void);
extern void kvm_release_rma(struct kvm_rma_info *ri);
extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
extern int kvmppc_core_init_vm(struct kvm *kvm);
@@ -282,7 +282,6 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
}

extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu);
extern void kvm_linear_init(void);

#else
static inline void __init kvm_cma_reserve(void)
@@ -291,9 +290,6 @@ static inline void __init kvm_cma_reserve(void)
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{}

static inline void kvm_linear_init(void)
{}

static inline u32 kvmppc_get_xics_latch(void)
{
	return 0;
+0 −2
Original line number Diff line number Diff line
@@ -611,8 +611,6 @@ void __init setup_arch(char **cmdline_p)
	/* Initialize the MMU context management stuff */
	mmu_context_init();

	kvm_linear_init();

	/* Interrupt code needs to be 64K-aligned */
	if ((unsigned long)_stext & 0xffff)
		panic("Kernelbase not 64K-aligned (0x%lx)!\n",
+18 −9
Original line number Diff line number Diff line
@@ -1511,10 +1511,10 @@ static inline int lpcr_rmls(unsigned long rma_size)

static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct kvmppc_linear_info *ri = vma->vm_file->private_data;
	struct page *page;
	struct kvm_rma_info *ri = vma->vm_file->private_data;

	if (vmf->pgoff >= ri->npages)
	if (vmf->pgoff >= kvm_rma_pages)
		return VM_FAULT_SIGBUS;

	page = pfn_to_page(ri->base_pfn + vmf->pgoff);
@@ -1536,7 +1536,7 @@ static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)

static int kvm_rma_release(struct inode *inode, struct file *filp)
{
	struct kvmppc_linear_info *ri = filp->private_data;
	struct kvm_rma_info *ri = filp->private_data;

	kvm_release_rma(ri);
	return 0;
@@ -1549,8 +1549,17 @@ static const struct file_operations kvm_rma_fops = {

long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
{
	struct kvmppc_linear_info *ri;
	long fd;
	struct kvm_rma_info *ri;
	/*
	 * Only do this on PPC970 in HV mode
	 */
	if (!cpu_has_feature(CPU_FTR_HVMODE) ||
	    !cpu_has_feature(CPU_FTR_ARCH_201))
		return -EINVAL;

	if (!kvm_rma_pages)
		return -EINVAL;

	ri = kvm_alloc_rma();
	if (!ri)
@@ -1560,7 +1569,7 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
	if (fd < 0)
		kvm_release_rma(ri);

	ret->rma_size = ri->npages << PAGE_SHIFT;
	ret->rma_size = kvm_rma_pages << PAGE_SHIFT;
	return fd;
}

@@ -1725,7 +1734,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{
	int err = 0;
	struct kvm *kvm = vcpu->kvm;
	struct kvmppc_linear_info *ri = NULL;
	struct kvm_rma_info *ri = NULL;
	unsigned long hva;
	struct kvm_memory_slot *memslot;
	struct vm_area_struct *vma;
@@ -1803,7 +1812,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)

	} else {
		/* Set up to use an RMO region */
		rma_size = ri->npages;
		rma_size = kvm_rma_pages;
		if (rma_size > memslot->npages)
			rma_size = memslot->npages;
		rma_size <<= PAGE_SHIFT;
@@ -1831,14 +1840,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
			/* POWER7 */
			lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
			lpcr |= rmls << LPCR_RMLS_SH;
			kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
			kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
		}
		kvm->arch.lpcr = lpcr;
		pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
			ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);

		/* Initialize phys addrs of pages in RMO */
		npages = ri->npages;
		npages = kvm_rma_pages;
		porder = __ilog2(npages);
		physp = memslot->arch.slot_phys;
		if (physp) {
Loading