Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b44a1dd3 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull KVM fixes from Paolo Bonzini:
 "Fixes for PPC and s390"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: PPC: Book3S HV: Restore SPRG3 in kvmhv_p9_guest_entry()
  KVM: PPC: Book3S HV: Fix lockdep warning when entering guest on POWER9
  KVM: PPC: Book3S HV: XIVE: Fix page offset when clearing ESB pages
  KVM: PPC: Book3S HV: XIVE: Take the srcu read lock when accessing memslots
  KVM: PPC: Book3S HV: XIVE: Do not clear IRQ data of passthrough interrupts
  KVM: PPC: Book3S HV: XIVE: Introduce a new mutex for the XIVE device
  KVM: PPC: Book3S HV: XIVE: Fix the enforced limit on the vCPU identifier
  KVM: PPC: Book3S HV: XIVE: Do not test the EQ flag validity when resetting
  KVM: PPC: Book3S HV: XIVE: Clear file mapping when device is released
  KVM: PPC: Book3S HV: Don't take kvm->lock around kvm_for_each_vcpu
  KVM: PPC: Book3S: Use new mutex to synchronize access to rtas token list
  KVM: PPC: Book3S HV: Use new mutex to synchronize MMU setup
  KVM: PPC: Book3S HV: Avoid touching arch.mmu_ready in XIVE release functions
  KVM: s390: Do not report unusabled IDs via KVM_CAP_MAX_VCPU_ID
  kvm: fix compile on s390 part 2
parents 38baf0bb f8d221d2
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -1122,6 +1122,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
	case KVM_CAP_MAX_VCPUS:
		r = KVM_MAX_VCPUS;
		break;
	case KVM_CAP_MAX_VCPU_ID:
		r = KVM_MAX_VCPU_ID;
		break;
	case KVM_CAP_MIPS_FPU:
		/* We don't handle systems with inconsistent cpu_has_fpu */
		r = !!raw_cpu_has_fpu;
+2 −0
Original line number Diff line number Diff line
@@ -309,6 +309,7 @@ struct kvm_arch {
#ifdef CONFIG_PPC_BOOK3S_64
	struct list_head spapr_tce_tables;
	struct list_head rtas_tokens;
	struct mutex rtas_token_lock;
	DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
#endif
#ifdef CONFIG_KVM_MPIC
@@ -325,6 +326,7 @@ struct kvm_arch {
#endif
	struct kvmppc_ops *kvm_ops;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	struct mutex mmu_setup_lock;	/* nests inside vcpu mutexes */
	u64 l1_ptcr;
	int max_nested_lpid;
	struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];
+1 −0
Original line number Diff line number Diff line
@@ -902,6 +902,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
#ifdef CONFIG_PPC64
	INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
	INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
	mutex_init(&kvm->arch.rtas_token_lock);
#endif

	return kvm->arch.kvm_ops->init_vm(kvm);
+18 −18
Original line number Diff line number Diff line
@@ -63,7 +63,7 @@ struct kvm_resize_hpt {
	struct work_struct work;
	u32 order;

	/* These fields protected by kvm->lock */
	/* These fields protected by kvm->arch.mmu_setup_lock */

	/* Possible values and their usage:
	 *  <0     an error occurred during allocation,
@@ -73,7 +73,7 @@ struct kvm_resize_hpt {
	int error;

	/* Private to the work thread, until error != -EBUSY,
	 * then protected by kvm->lock.
	 * then protected by kvm->arch.mmu_setup_lock.
	 */
	struct kvm_hpt_info hpt;
};
@@ -139,7 +139,7 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
	long err = -EBUSY;
	struct kvm_hpt_info info;

	mutex_lock(&kvm->lock);
	mutex_lock(&kvm->arch.mmu_setup_lock);
	if (kvm->arch.mmu_ready) {
		kvm->arch.mmu_ready = 0;
		/* order mmu_ready vs. vcpus_running */
@@ -183,7 +183,7 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
		/* Ensure that each vcpu will flush its TLB on next entry. */
		cpumask_setall(&kvm->arch.need_tlb_flush);

	mutex_unlock(&kvm->lock);
	mutex_unlock(&kvm->arch.mmu_setup_lock);
	return err;
}

@@ -1447,7 +1447,7 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)

static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
{
	if (WARN_ON(!mutex_is_locked(&kvm->lock)))
	if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock)))
		return;

	if (!resize)
@@ -1474,14 +1474,14 @@ static void resize_hpt_prepare_work(struct work_struct *work)
	if (WARN_ON(resize->error != -EBUSY))
		return;

	mutex_lock(&kvm->lock);
	mutex_lock(&kvm->arch.mmu_setup_lock);

	/* Request is still current? */
	if (kvm->arch.resize_hpt == resize) {
		/* We may request large allocations here:
		 * do not sleep with kvm->lock held for a while.
		 * do not sleep with kvm->arch.mmu_setup_lock held for a while.
		 */
		mutex_unlock(&kvm->lock);
		mutex_unlock(&kvm->arch.mmu_setup_lock);

		resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
				 resize->order);
@@ -1494,9 +1494,9 @@ static void resize_hpt_prepare_work(struct work_struct *work)
		if (WARN_ON(err == -EBUSY))
			err = -EINPROGRESS;

		mutex_lock(&kvm->lock);
		mutex_lock(&kvm->arch.mmu_setup_lock);
		/* It is possible that kvm->arch.resize_hpt != resize
		 * after we grab kvm->lock again.
		 * after we grab kvm->arch.mmu_setup_lock again.
		 */
	}

@@ -1505,7 +1505,7 @@ static void resize_hpt_prepare_work(struct work_struct *work)
	if (kvm->arch.resize_hpt != resize)
		resize_hpt_release(kvm, resize);

	mutex_unlock(&kvm->lock);
	mutex_unlock(&kvm->arch.mmu_setup_lock);
}

long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
@@ -1522,7 +1522,7 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
	if (shift && ((shift < 18) || (shift > 46)))
		return -EINVAL;

	mutex_lock(&kvm->lock);
	mutex_lock(&kvm->arch.mmu_setup_lock);

	resize = kvm->arch.resize_hpt;

@@ -1565,7 +1565,7 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
	ret = 100; /* estimated time in ms */

out:
	mutex_unlock(&kvm->lock);
	mutex_unlock(&kvm->arch.mmu_setup_lock);
	return ret;
}

@@ -1588,7 +1588,7 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
	if (shift && ((shift < 18) || (shift > 46)))
		return -EINVAL;

	mutex_lock(&kvm->lock);
	mutex_lock(&kvm->arch.mmu_setup_lock);

	resize = kvm->arch.resize_hpt;

@@ -1625,7 +1625,7 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
	smp_mb();
out_no_hpt:
	resize_hpt_release(kvm, resize);
	mutex_unlock(&kvm->lock);
	mutex_unlock(&kvm->arch.mmu_setup_lock);
	return ret;
}

@@ -1868,7 +1868,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
		return -EINVAL;

	/* lock out vcpus from running while we're doing this */
	mutex_lock(&kvm->lock);
	mutex_lock(&kvm->arch.mmu_setup_lock);
	mmu_ready = kvm->arch.mmu_ready;
	if (mmu_ready) {
		kvm->arch.mmu_ready = 0;	/* temporarily */
@@ -1876,7 +1876,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
		smp_mb();
		if (atomic_read(&kvm->arch.vcpus_running)) {
			kvm->arch.mmu_ready = 1;
			mutex_unlock(&kvm->lock);
			mutex_unlock(&kvm->arch.mmu_setup_lock);
			return -EBUSY;
		}
	}
@@ -1963,7 +1963,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
	/* Order HPTE updates vs. mmu_ready */
	smp_wmb();
	kvm->arch.mmu_ready = mmu_ready;
	mutex_unlock(&kvm->lock);
	mutex_unlock(&kvm->arch.mmu_setup_lock);

	if (err)
		return err;
+30 −18
Original line number Diff line number Diff line
@@ -446,12 +446,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)

static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
{
	struct kvm_vcpu *ret;

	mutex_lock(&kvm->lock);
	ret = kvm_get_vcpu_by_id(kvm, id);
	mutex_unlock(&kvm->lock);
	return ret;
	return kvm_get_vcpu_by_id(kvm, id);
}

static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
@@ -1583,7 +1578,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
	struct kvmppc_vcore *vc = vcpu->arch.vcore;
	u64 mask;

	mutex_lock(&kvm->lock);
	spin_lock(&vc->lock);
	/*
	 * If ILE (interrupt little-endian) has changed, update the
@@ -1623,7 +1617,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
		mask &= 0xFFFFFFFF;
	vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
	spin_unlock(&vc->lock);
	mutex_unlock(&kvm->lock);
}

static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
@@ -2338,11 +2331,17 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
			pr_devel("KVM: collision on id %u", id);
			vcore = NULL;
		} else if (!vcore) {
			/*
			 * Take mmu_setup_lock for mutual exclusion
			 * with kvmppc_update_lpcr().
			 */
			err = -ENOMEM;
			vcore = kvmppc_vcore_create(kvm,
					id & ~(kvm->arch.smt_mode - 1));
			mutex_lock(&kvm->arch.mmu_setup_lock);
			kvm->arch.vcores[core] = vcore;
			kvm->arch.online_vcores++;
			mutex_unlock(&kvm->arch.mmu_setup_lock);
		}
	}
	mutex_unlock(&kvm->lock);
@@ -3663,6 +3662,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
	vc->in_guest = 0;

	mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
	mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);

	kvmhv_load_host_pmu();

@@ -3859,7 +3859,7 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
	int r = 0;
	struct kvm *kvm = vcpu->kvm;

	mutex_lock(&kvm->lock);
	mutex_lock(&kvm->arch.mmu_setup_lock);
	if (!kvm->arch.mmu_ready) {
		if (!kvm_is_radix(kvm))
			r = kvmppc_hv_setup_htab_rma(vcpu);
@@ -3869,7 +3869,7 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
			kvm->arch.mmu_ready = 1;
		}
	}
	mutex_unlock(&kvm->lock);
	mutex_unlock(&kvm->arch.mmu_setup_lock);
	return r;
}

@@ -4091,16 +4091,20 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
		kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
	}

	trace_hardirqs_on();
	guest_enter_irqoff();

	srcu_idx = srcu_read_lock(&kvm->srcu);

	this_cpu_disable_ftrace();

	/* Tell lockdep that we're about to enable interrupts */
	trace_hardirqs_on();

	trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr);
	vcpu->arch.trap = trap;

	trace_hardirqs_off();

	this_cpu_enable_ftrace();

	srcu_read_unlock(&kvm->srcu, srcu_idx);
@@ -4110,7 +4114,6 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
		isync();
	}

	trace_hardirqs_off();
	set_irq_happened(trap);

	kvmppc_set_host_core(pcpu);
@@ -4478,7 +4481,8 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,

/*
 * Update LPCR values in kvm->arch and in vcores.
 * Caller must hold kvm->lock.
 * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
 * of kvm->arch.lpcr update).
 */
void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
{
@@ -4530,7 +4534,7 @@ void kvmppc_setup_partition_table(struct kvm *kvm)

/*
 * Set up HPT (hashed page table) and RMA (real-mode area).
 * Must be called with kvm->lock held.
 * Must be called with kvm->arch.mmu_setup_lock held.
 */
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{
@@ -4618,7 +4622,10 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
	goto out_srcu;
}

/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
/*
 * Must be called with kvm->arch.mmu_setup_lock held and
 * mmu_ready = 0 and no vcpus running.
 */
int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
{
	if (nesting_enabled(kvm))
@@ -4635,7 +4642,10 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
	return 0;
}

/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
/*
 * Must be called with kvm->arch.mmu_setup_lock held and
 * mmu_ready = 0 and no vcpus running.
 */
int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
{
	int err;
@@ -4740,6 +4750,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
	char buf[32];
	int ret;

	mutex_init(&kvm->arch.mmu_setup_lock);

	/* Allocate the guest's logical partition ID */

	lpid = kvmppc_alloc_lpid();
@@ -5265,7 +5277,7 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
	if (kvmhv_on_pseries() && !radix)
		return -EINVAL;

	mutex_lock(&kvm->lock);
	mutex_lock(&kvm->arch.mmu_setup_lock);
	if (radix != kvm_is_radix(kvm)) {
		if (kvm->arch.mmu_ready) {
			kvm->arch.mmu_ready = 0;
@@ -5293,7 +5305,7 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
	err = 0;

 out_unlock:
	mutex_unlock(&kvm->lock);
	mutex_unlock(&kvm->arch.mmu_setup_lock);
	return err;
}

Loading