Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7cd91804 authored by Radim Krčmář's avatar Radim Krčmář
Browse files
KVM: s390: Fixes and features for 4.16

- add the virtio-ccw transport for kvmconfig
- more debug tracing for cpu model
- cleanups and fixes
parents 6b697711 a9f6c9a9
Loading
Loading
Loading
Loading
+0 −3
Original line number Original line Diff line number Diff line
@@ -515,9 +515,6 @@ struct kvm_s390_irq_payload {


struct kvm_s390_local_interrupt {
struct kvm_s390_local_interrupt {
	spinlock_t lock;
	spinlock_t lock;
	struct kvm_s390_float_interrupt *float_int;
	struct swait_queue_head *wq;
	atomic_t *cpuflags;
	DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
	DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
	struct kvm_s390_irq_payload irq;
	struct kvm_s390_irq_payload irq;
	unsigned long pending_irqs;
	unsigned long pending_irqs;
+11 −16
Original line number Original line Diff line number Diff line
@@ -107,12 +107,11 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)


static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
{
{
	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
	int rc, expect;
	int rc, expect;


	if (!kvm_s390_use_sca_entries())
	if (!kvm_s390_use_sca_entries())
		return;
		return;
	atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
	atomic_andnot(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
	read_lock(&vcpu->kvm->arch.sca_lock);
	read_lock(&vcpu->kvm->arch.sca_lock);
	if (vcpu->kvm->arch.use_esca) {
	if (vcpu->kvm->arch.use_esca) {
		struct esca_block *sca = vcpu->kvm->arch.sca;
		struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -279,13 +278,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{
{
	atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
	atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
	set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
	set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
}
}


static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{
{
	atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
	atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
	clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
	clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
}
}


static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
@@ -1228,7 +1227,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)


	li->irq.ext = irq->u.ext;
	li->irq.ext = irq->u.ext;
	set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
	set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
	__set_cpuflag(vcpu, CPUSTAT_EXT_INT);
	return 0;
	return 0;
}
}


@@ -1253,7 +1252,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
	if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
	if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
		return -EBUSY;
		return -EBUSY;
	*extcall = irq->u.extcall;
	*extcall = irq->u.extcall;
	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
	__set_cpuflag(vcpu, CPUSTAT_EXT_INT);
	return 0;
	return 0;
}
}


@@ -1329,7 +1328,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,


	set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
	set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
	set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
	set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
	__set_cpuflag(vcpu, CPUSTAT_EXT_INT);
	return 0;
	return 0;
}
}


@@ -1373,7 +1372,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
				   0, 0);
				   0, 0);


	set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
	set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
	__set_cpuflag(vcpu, CPUSTAT_EXT_INT);
	return 0;
	return 0;
}
}


@@ -1386,7 +1385,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
				   0, 0);
				   0, 0);


	set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
	set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
	__set_cpuflag(vcpu, CPUSTAT_EXT_INT);
	return 0;
	return 0;
}
}


@@ -1546,7 +1545,6 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
static void __floating_irq_kick(struct kvm *kvm, u64 type)
static void __floating_irq_kick(struct kvm *kvm, u64 type)
{
{
	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	struct kvm_vcpu *dst_vcpu;
	struct kvm_vcpu *dst_vcpu;
	int sigcpu, online_vcpus, nr_tries = 0;
	int sigcpu, online_vcpus, nr_tries = 0;


@@ -1568,20 +1566,17 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
	dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
	dst_vcpu = kvm_get_vcpu(kvm, sigcpu);


	/* make the VCPU drop out of the SIE, or wake it up if sleeping */
	/* make the VCPU drop out of the SIE, or wake it up if sleeping */
	li = &dst_vcpu->arch.local_int;
	spin_lock(&li->lock);
	switch (type) {
	switch (type) {
	case KVM_S390_MCHK:
	case KVM_S390_MCHK:
		atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
		__set_cpuflag(dst_vcpu, CPUSTAT_STOP_INT);
		break;
		break;
	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
		atomic_or(CPUSTAT_IO_INT, li->cpuflags);
		__set_cpuflag(dst_vcpu, CPUSTAT_IO_INT);
		break;
		break;
	default:
	default:
		atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
		__set_cpuflag(dst_vcpu, CPUSTAT_EXT_INT);
		break;
		break;
	}
	}
	spin_unlock(&li->lock);
	kvm_s390_vcpu_wakeup(dst_vcpu);
	kvm_s390_vcpu_wakeup(dst_vcpu);
}
}


+19 −10
Original line number Original line Diff line number Diff line
@@ -573,7 +573,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
	case KVM_CAP_S390_GS:
	case KVM_CAP_S390_GS:
		r = -EINVAL;
		r = -EINVAL;
		mutex_lock(&kvm->lock);
		mutex_lock(&kvm->lock);
		if (atomic_read(&kvm->online_vcpus)) {
		if (kvm->created_vcpus) {
			r = -EBUSY;
			r = -EBUSY;
		} else if (test_facility(133)) {
		} else if (test_facility(133)) {
			set_kvm_facility(kvm->arch.model.fac_mask, 133);
			set_kvm_facility(kvm->arch.model.fac_mask, 133);
@@ -1084,7 +1084,6 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm,
				       struct kvm_device_attr *attr)
				       struct kvm_device_attr *attr)
{
{
	struct kvm_s390_vm_cpu_feat data;
	struct kvm_s390_vm_cpu_feat data;
	int ret = -EBUSY;


	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
		return -EFAULT;
		return -EFAULT;
@@ -1094,13 +1093,18 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm,
		return -EINVAL;
		return -EINVAL;


	mutex_lock(&kvm->lock);
	mutex_lock(&kvm->lock);
	if (!atomic_read(&kvm->online_vcpus)) {
	if (kvm->created_vcpus) {
		mutex_unlock(&kvm->lock);
		return -EBUSY;
	}
	bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
	bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
		    KVM_S390_VM_CPU_FEAT_NR_BITS);
		    KVM_S390_VM_CPU_FEAT_NR_BITS);
		ret = 0;
	}
	mutex_unlock(&kvm->lock);
	mutex_unlock(&kvm->lock);
	return ret;
	VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
			 data.feat[0],
			 data.feat[1],
			 data.feat[2]);
	return 0;
}
}


static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
@@ -1202,6 +1206,10 @@ static int kvm_s390_get_processor_feat(struct kvm *kvm,
		    KVM_S390_VM_CPU_FEAT_NR_BITS);
		    KVM_S390_VM_CPU_FEAT_NR_BITS);
	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
		return -EFAULT;
		return -EFAULT;
	VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
			 data.feat[0],
			 data.feat[1],
			 data.feat[2]);
	return 0;
	return 0;
}
}


@@ -1215,6 +1223,10 @@ static int kvm_s390_get_machine_feat(struct kvm *kvm,
		    KVM_S390_VM_CPU_FEAT_NR_BITS);
		    KVM_S390_VM_CPU_FEAT_NR_BITS);
	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
		return -EFAULT;
		return -EFAULT;
	VM_EVENT(kvm, 3, "GET: host feat:  0x%16.16llx.0x%16.16llx.0x%16.16llx",
			 data.feat[0],
			 data.feat[1],
			 data.feat[2]);
	return 0;
	return 0;
}
}


@@ -2497,9 +2509,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,


	vcpu->arch.sie_block->icpua = id;
	vcpu->arch.sie_block->icpua = id;
	spin_lock_init(&vcpu->arch.local_int.lock);
	spin_lock_init(&vcpu->arch.local_int.lock);
	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
	vcpu->arch.local_int.wq = &vcpu->wq;
	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
	seqcount_init(&vcpu->arch.cputm_seqcount);
	seqcount_init(&vcpu->arch.cputm_seqcount);


	rc = kvm_vcpu_init(vcpu, kvm, id);
	rc = kvm_vcpu_init(vcpu, kvm, id);
+1 −1
Original line number Original line Diff line number Diff line
@@ -54,7 +54,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)


static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{
{
	return test_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
	return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
}
}


static inline int kvm_is_ucontrol(struct kvm *kvm)
static inline int kvm_is_ucontrol(struct kvm *kvm)
+4 −8
Original line number Original line Diff line number Diff line
@@ -20,14 +20,11 @@
static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
			u64 *reg)
			u64 *reg)
{
{
	struct kvm_s390_local_interrupt *li;
	int cpuflags;
	int cpuflags;
	int rc;
	int rc;
	int ext_call_pending;
	int ext_call_pending;


	li = &dst_vcpu->arch.local_int;
	cpuflags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);

	cpuflags = atomic_read(li->cpuflags);
	ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
	ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
	if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending)
	if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending)
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
@@ -211,7 +208,7 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
	int flags;
	int flags;
	int rc;
	int rc;


	flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
	flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
	if (!(flags & CPUSTAT_STOPPED)) {
	if (!(flags & CPUSTAT_STOPPED)) {
		*reg &= 0xffffffff00000000UL;
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
@@ -231,7 +228,6 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
static int __sigp_sense_running(struct kvm_vcpu *vcpu,
static int __sigp_sense_running(struct kvm_vcpu *vcpu,
				struct kvm_vcpu *dst_vcpu, u64 *reg)
				struct kvm_vcpu *dst_vcpu, u64 *reg)
{
{
	struct kvm_s390_local_interrupt *li;
	int rc;
	int rc;


	if (!test_kvm_facility(vcpu->kvm, 9)) {
	if (!test_kvm_facility(vcpu->kvm, 9)) {
@@ -240,8 +236,8 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu,
		return SIGP_CC_STATUS_STORED;
		return SIGP_CC_STATUS_STORED;
	}
	}


	li = &dst_vcpu->arch.local_int;
	if (atomic_read(&dst_vcpu->arch.sie_block->cpuflags) &
	if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
	    CPUSTAT_RUNNING) {
		/* running */
		/* running */
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
	} else {
	} else {
Loading