Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a9f6cf96 authored by Gleb Natapov's avatar Gleb Natapov
Browse files

Merge branch 'kvm-ppc-next' of git://github.com/agraf/linux-2.6 into queue

* 'kvm-ppc-next' of git://github.com/agraf/linux-2.6:
  KVM: PPC: Book3S PR: Rework kvmppc_mmu_book3s_64_xlate()
  KVM: PPC: Book3S PR: Make instruction fetch fallback work for system calls
  KVM: PPC: Book3S PR: Don't corrupt guest state when kernel uses VMX
  KVM: PPC: Book3S: Fix compile error in XICS emulation
  KVM: PPC: Book3S PR: return appropriate error when allocation fails
  arch: powerpc: kvm: add signed type cast for comparation
  powerpc/kvm: Copy the pvr value after memset
  KVM: PPC: Book3S PR: Load up SPRG3 register with guest value on guest entry
  kvm/ppc/booke: Don't call kvm_guest_enter twice
  kvm/ppc: Call trace_hardirqs_on before entry
  KVM: PPC: Book3S HV: Allow negative offsets to real-mode hcall handlers
  KVM: PPC: Book3S HV: Correct tlbie usage
  powerpc/kvm: Use 256K chunk to track both RMA and hash page table allocation.
  powerpc/kvm: Contiguous memory allocator based RMA allocation
  powerpc/kvm: Contiguous memory allocator based hash page table allocation
  KVM: PPC: Book3S: Ignore DABR register
  mm/cma: Move dma contiguous changes into a seperate config
parents e5552fd2 bf550fc9
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2,7 +2,7 @@
#define ASMARM_DMA_CONTIGUOUS_H

#ifdef __KERNEL__
#ifdef CONFIG_CMA
#ifdef CONFIG_DMA_CMA

#include <linux/types.h>
#include <asm-generic/dma-contiguous.h>
+3 −3
Original line number Diff line number Diff line
@@ -358,7 +358,7 @@ static int __init atomic_pool_init(void)
	if (!pages)
		goto no_pages;

	if (IS_ENABLED(CONFIG_CMA))
	if (IS_ENABLED(CONFIG_DMA_CMA))
		ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
					      atomic_pool_init);
	else
@@ -670,7 +670,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
		addr = __alloc_simple_buffer(dev, size, gfp, &page);
	else if (!(gfp & __GFP_WAIT))
		addr = __alloc_from_pool(size, &page);
	else if (!IS_ENABLED(CONFIG_CMA))
	else if (!IS_ENABLED(CONFIG_DMA_CMA))
		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
	else
		addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
@@ -759,7 +759,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
		__dma_free_buffer(page, size);
	} else if (__free_from_pool(cpu_addr, size)) {
		return;
	} else if (!IS_ENABLED(CONFIG_CMA)) {
	} else if (!IS_ENABLED(CONFIG_DMA_CMA)) {
		__dma_free_remap(cpu_addr, size);
		__dma_free_buffer(page, size);
	} else {
+38 −0
Original line number Diff line number Diff line
@@ -334,6 +334,27 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
	return r;
}

/*
 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
 * Because the sc instruction sets SRR0 to point to the following
 * instruction, we have to fetch from pc - 4.
 */
static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
{
	ulong pc = kvmppc_get_pc(vcpu) - 4;
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	u32 r;

	/* Load the instruction manually if it failed to do so in the
	 * exit path */
	if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
		kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);

	r = svcpu->last_inst;
	svcpu_put(svcpu);
	return r;
}

static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
@@ -446,6 +467,23 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
	return vcpu->arch.last_inst;
}

/*
 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
 * Because the sc instruction sets SRR0 to point to the following
 * instruction, we have to fetch from pc - 4.
 */
static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
{
	ulong pc = kvmppc_get_pc(vcpu) - 4;

	/* Load the instruction manually if it failed to do so in the
	 * exit path */
	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
		kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);

	return vcpu->arch.last_inst;
}

static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.fault_dar;
+2 −2
Original line number Diff line number Diff line
@@ -37,7 +37,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)

#ifdef CONFIG_KVM_BOOK3S_64_HV
#define KVM_DEFAULT_HPT_ORDER	24	/* 16MB HPT by default */
extern int kvm_hpt_order;		/* order of preallocated HPTs */
extern unsigned long kvm_rma_pages;
#endif

#define VRMA_VSID	0x1ffffffUL	/* 1TB VSID reserved for VRMA */
@@ -100,7 +100,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
			/* (masks depend on page size) */
			rb |= 0x1000;		/* page encoding in LP field */
			rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
			rb |= (va_low & 0xfe);	/* AVAL field (P7 doesn't seem to care) */
			rb |= ((va_low << 4) & 0xf0);	/* AVAL field (P7 doesn't seem to care) */
		}
	} else {
		/* 4kB page */
+5 −9
Original line number Diff line number Diff line
@@ -183,13 +183,9 @@ struct kvmppc_spapr_tce_table {
	struct page *pages[0];
};

struct kvmppc_linear_info {
	void		*base_virt;
	unsigned long	 base_pfn;
	unsigned long	 npages;
	struct list_head list;
struct kvm_rma_info {
	atomic_t use_count;
	int		 type;
	unsigned long base_pfn;
};

/* XICS components, defined in book3s_xics.c */
@@ -246,7 +242,7 @@ struct kvm_arch {
	int tlbie_lock;
	unsigned long lpcr;
	unsigned long rmor;
	struct kvmppc_linear_info *rma;
	struct kvm_rma_info *rma;
	unsigned long vrma_slb_v;
	int rma_setup_done;
	int using_mmu_notifiers;
@@ -259,7 +255,7 @@ struct kvm_arch {
	spinlock_t slot_phys_lock;
	cpumask_t need_tlb_flush;
	struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
	struct kvmppc_linear_info *hpt_li;
	int hpt_cma_alloc;
#endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64
	struct list_head spapr_tce_tables;
Loading