Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2c4aa55a authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge tag 'signed-kvm-ppc-next' of git://github.com/agraf/linux-2.6 into HEAD

Patch queue for ppc - 2014-12-18

Highights this time around:

  - Removal of HV support for 970. It became a maintenance burden and received
    practically no testing. POWER8 with HV is available now, so just grab one
    of those boxes if PR isn't enough for you.
  - Some bug fixes and performance improvements
  - Tracepoints for book3s_hv
parents cb5281a5 476ce5ef
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -170,8 +170,6 @@ extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
			unsigned long *nb_ret);
extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
			unsigned long gpa, bool dirty);
extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
			long pte_index, unsigned long pteh, unsigned long ptel);
extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
			long pte_index, unsigned long pteh, unsigned long ptel,
			pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
+1 −2
Original line number Diff line number Diff line
@@ -37,7 +37,6 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)

#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
#define KVM_DEFAULT_HPT_ORDER	24	/* 16MB HPT by default */
extern unsigned long kvm_rma_pages;
#endif

#define VRMA_VSID	0x1ffffffUL	/* 1TB VSID reserved for VRMA */
@@ -148,7 +147,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
	/* This covers 14..54 bits of va*/
	rb = (v & ~0x7fUL) << 16;		/* AVA field */

	rb |= v >> (62 - 8);			/*  B field */
	rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8;	/*  B field */
	/*
	 * AVA in v had cleared lower 23 bits. We need to derive
	 * that from pteg index
+4 −14
Original line number Diff line number Diff line
@@ -180,11 +180,6 @@ struct kvmppc_spapr_tce_table {
	struct page *pages[0];
};

struct kvm_rma_info {
	atomic_t use_count;
	unsigned long base_pfn;
};

/* XICS components, defined in book3s_xics.c */
struct kvmppc_xics;
struct kvmppc_icp;
@@ -214,16 +209,9 @@ struct revmap_entry {
#define KVMPPC_RMAP_PRESENT	0x100000000ul
#define KVMPPC_RMAP_INDEX	0xfffffffful

/* Low-order bits in memslot->arch.slot_phys[] */
#define KVMPPC_PAGE_ORDER_MASK	0x1f
#define KVMPPC_PAGE_NO_CACHE	HPTE_R_I	/* 0x20 */
#define KVMPPC_PAGE_WRITETHRU	HPTE_R_W	/* 0x40 */
#define KVMPPC_GOT_PAGE		0x80

struct kvm_arch_memory_slot {
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	unsigned long *rmap;
	unsigned long *slot_phys;
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
};

@@ -242,14 +230,12 @@ struct kvm_arch {
	struct kvm_rma_info *rma;
	unsigned long vrma_slb_v;
	int rma_setup_done;
	int using_mmu_notifiers;
	u32 hpt_order;
	atomic_t vcpus_running;
	u32 online_vcores;
	unsigned long hpt_npte;
	unsigned long hpt_mask;
	atomic_t hpte_mod_interest;
	spinlock_t slot_phys_lock;
	cpumask_t need_tlb_flush;
	int hpt_cma_alloc;
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
@@ -297,6 +283,7 @@ struct kvmppc_vcore {
	struct list_head runnable_threads;
	spinlock_t lock;
	wait_queue_head_t wq;
	spinlock_t stoltb_lock;	/* protects stolen_tb and preempt_tb */
	u64 stolen_tb;
	u64 preempt_tb;
	struct kvm_vcpu *runner;
@@ -308,6 +295,7 @@ struct kvmppc_vcore {
	ulong dpdes;		/* doorbell state (POWER8) */
	void *mpp_buffer; /* Micro Partition Prefetch buffer */
	bool mpp_buffer_is_valid;
	ulong conferring_threads;
};

#define VCORE_ENTRY_COUNT(vc)	((vc)->entry_exit_count & 0xff)
@@ -664,6 +652,8 @@ struct kvm_vcpu_arch {
	spinlock_t tbacct_lock;
	u64 busy_stolen;
	u64 busy_preempt;

	u32 emul_inst;
#endif
};

+0 −2
Original line number Diff line number Diff line
@@ -170,8 +170,6 @@ extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
			     unsigned long ioba, unsigned long tce);
extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
			     unsigned long ioba);
extern struct kvm_rma_info *kvm_alloc_rma(void);
extern void kvm_release_rma(struct kvm_rma_info *ri);
extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
extern int kvmppc_core_init_vm(struct kvm *kvm);
+1 −1
Original line number Diff line number Diff line
@@ -489,7 +489,6 @@ int main(void)
	DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
	DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
	DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
	DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
	DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
	DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
	DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
@@ -499,6 +498,7 @@ int main(void)
	DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
	DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
	DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
	DEFINE(VCPU_HEIR, offsetof(struct kvm_vcpu, arch.emul_inst));
#endif
#ifdef CONFIG_PPC_BOOK3S
	DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
Loading