Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f958ee74 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge tag 'kvm-s390-next-4.6-2' of...

Merge tag 'kvm-s390-next-4.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: Fixes and features for kvm/next (4.6) part 2

- add watchdog diagnose to trace event decoder
- better handle the cpu timer when not inside the guest
- only provide STFLE if the CPU model has STFLE
- reduce DMA page usage
parents 5a5fbdc0 c54f0d6a
Loading
Loading
Loading
Loading
+25 −8
Original line number Original line Diff line number Diff line
@@ -20,6 +20,7 @@
#include <linux/kvm_types.h>
#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/kvm.h>
#include <linux/seqlock.h>
#include <asm/debug.h>
#include <asm/debug.h>
#include <asm/cpu.h>
#include <asm/cpu.h>
#include <asm/fpu/api.h>
#include <asm/fpu/api.h>
@@ -552,6 +553,15 @@ struct kvm_vcpu_arch {
	unsigned long pfault_token;
	unsigned long pfault_token;
	unsigned long pfault_select;
	unsigned long pfault_select;
	unsigned long pfault_compare;
	unsigned long pfault_compare;
	bool cputm_enabled;
	/*
	 * The seqcount protects updates to cputm_start and sie_block.cputm,
	 * this way we can have non-blocking reads with consistent values.
	 * Only the owning VCPU thread (vcpu->cpu) is allowed to change these
	 * values and to start/stop/enable/disable cpu timer accounting.
	 */
	seqcount_t cputm_seqcount;
	__u64 cputm_start;
};
};


struct kvm_vm_stat {
struct kvm_vm_stat {
@@ -590,15 +600,11 @@ struct s390_io_adapter {
#define S390_ARCH_FAC_MASK_SIZE_U64 \
#define S390_ARCH_FAC_MASK_SIZE_U64 \
	(S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
	(S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))


struct kvm_s390_fac {
	/* facility list requested by guest */
	__u64 list[S390_ARCH_FAC_LIST_SIZE_U64];
	/* facility mask supported by kvm & hosting machine */
	__u64 mask[S390_ARCH_FAC_LIST_SIZE_U64];
};

struct kvm_s390_cpu_model {
struct kvm_s390_cpu_model {
	struct kvm_s390_fac *fac;
	/* facility mask supported by kvm & hosting machine */
	__u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64];
	/* facility list requested by guest (in dma page) */
	__u64 *fac_list;
	struct cpuid cpu_id;
	struct cpuid cpu_id;
	unsigned short ibc;
	unsigned short ibc;
};
};
@@ -617,6 +623,16 @@ struct kvm_s390_crypto_cb {
	__u8    reserved80[128];                /* 0x0080 */
	__u8    reserved80[128];                /* 0x0080 */
};
};


/*
 * sie_page2 has to be allocated as DMA because fac_list and crycb need
 * 31bit addresses in the sie control block.
 */
struct sie_page2 {
	__u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64];	/* 0x0000 */
	struct kvm_s390_crypto_cb crycb;		/* 0x0800 */
	u8 reserved900[0x1000 - 0x900];			/* 0x0900 */
} __packed;

struct kvm_arch{
struct kvm_arch{
	void *sca;
	void *sca;
	int use_esca;
	int use_esca;
@@ -637,6 +653,7 @@ struct kvm_arch{
	int ipte_lock_count;
	int ipte_lock_count;
	struct mutex ipte_mutex;
	struct mutex ipte_mutex;
	spinlock_t start_stop_lock;
	spinlock_t start_stop_lock;
	struct sie_page2 *sie_page2;
	struct kvm_s390_cpu_model model;
	struct kvm_s390_cpu_model model;
	struct kvm_s390_crypto crypto;
	struct kvm_s390_crypto crypto;
	u64 epoch;
	u64 epoch;
+1 −0
Original line number Original line Diff line number Diff line
@@ -7,6 +7,7 @@
	{ 0x9c, "DIAG (0x9c) time slice end directed" },	\
	{ 0x9c, "DIAG (0x9c) time slice end directed" },	\
	{ 0x204, "DIAG (0x204) logical-cpu utilization" },	\
	{ 0x204, "DIAG (0x204) logical-cpu utilization" },	\
	{ 0x258, "DIAG (0x258) page-reference services" },	\
	{ 0x258, "DIAG (0x258) page-reference services" },	\
	{ 0x288, "DIAG (0x288) watchdog functions" },		\
	{ 0x308, "DIAG (0x308) ipl functions" },		\
	{ 0x308, "DIAG (0x308) ipl functions" },		\
	{ 0x500, "DIAG (0x500) KVM virtio functions" },		\
	{ 0x500, "DIAG (0x500) KVM virtio functions" },		\
	{ 0x501, "DIAG (0x501) KVM breakpoint" }
	{ 0x501, "DIAG (0x501) KVM breakpoint" }
+38 −15
Original line number Original line Diff line number Diff line
@@ -182,8 +182,9 @@ static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)


static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
{
{
	return (vcpu->arch.sie_block->cputm >> 63) &&
	if (!cpu_timer_interrupts_enabled(vcpu))
	       cpu_timer_interrupts_enabled(vcpu);
		return 0;
	return kvm_s390_get_cpu_timer(vcpu) >> 63;
}
}


static inline int is_ioirq(unsigned long irq_type)
static inline int is_ioirq(unsigned long irq_type)
@@ -908,9 +909,35 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
	return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
	return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
}
}


static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
{
	u64 now, cputm, sltime = 0;

	if (ckc_interrupts_enabled(vcpu)) {
		now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
		sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
		/* already expired or overflow? */
		if (!sltime || vcpu->arch.sie_block->ckc <= now)
			return 0;
		if (cpu_timer_interrupts_enabled(vcpu)) {
			cputm = kvm_s390_get_cpu_timer(vcpu);
			/* already expired? */
			if (cputm >> 63)
				return 0;
			return min(sltime, tod_to_ns(cputm));
		}
	} else if (cpu_timer_interrupts_enabled(vcpu)) {
		sltime = kvm_s390_get_cpu_timer(vcpu);
		/* already expired? */
		if (sltime >> 63)
			return 0;
	}
	return sltime;
}

int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
{
{
	u64 now, sltime;
	u64 sltime;


	vcpu->stat.exit_wait_state++;
	vcpu->stat.exit_wait_state++;


@@ -923,22 +950,20 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
		return -EOPNOTSUPP; /* disabled wait */
		return -EOPNOTSUPP; /* disabled wait */
	}
	}


	if (!ckc_interrupts_enabled(vcpu)) {
	if (!ckc_interrupts_enabled(vcpu) &&
	    !cpu_timer_interrupts_enabled(vcpu)) {
		VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
		VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
		__set_cpu_idle(vcpu);
		__set_cpu_idle(vcpu);
		goto no_timer;
		goto no_timer;
	}
	}


	now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
	sltime = __calculate_sltime(vcpu);
	sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
	if (!sltime)

	/* underflow */
	if (vcpu->arch.sie_block->ckc < now)
		return 0;
		return 0;


	__set_cpu_idle(vcpu);
	__set_cpu_idle(vcpu);
	hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
	hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
	VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime);
	VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
no_timer:
no_timer:
	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
	kvm_vcpu_block(vcpu);
	kvm_vcpu_block(vcpu);
@@ -965,18 +990,16 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
{
{
	struct kvm_vcpu *vcpu;
	struct kvm_vcpu *vcpu;
	u64 now, sltime;
	u64 sltime;


	vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
	vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
	now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
	sltime = __calculate_sltime(vcpu);
	sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);


	/*
	/*
	 * If the monotonic clock runs faster than the tod clock we might be
	 * If the monotonic clock runs faster than the tod clock we might be
	 * woken up too early and have to go back to sleep to avoid deadlocks.
	 * woken up too early and have to go back to sleep to avoid deadlocks.
	 */
	 */
	if (vcpu->arch.sie_block->ckc > now &&
	if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
	    hrtimer_forward_now(timer, ns_to_ktime(sltime)))
		return HRTIMER_RESTART;
		return HRTIMER_RESTART;
	kvm_s390_vcpu_wakeup(vcpu);
	kvm_s390_vcpu_wakeup(vcpu);
	return HRTIMER_NORESTART;
	return HRTIMER_NORESTART;
+135 −45
Original line number Original line Diff line number Diff line
@@ -158,6 +158,8 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
		kvm->arch.epoch -= *delta;
		kvm->arch.epoch -= *delta;
		kvm_for_each_vcpu(i, vcpu, kvm) {
		kvm_for_each_vcpu(i, vcpu, kvm) {
			vcpu->arch.sie_block->epoch -= *delta;
			vcpu->arch.sie_block->epoch -= *delta;
			if (vcpu->arch.cputm_enabled)
				vcpu->arch.cputm_start += *delta;
		}
		}
	}
	}
	return NOTIFY_OK;
	return NOTIFY_OK;
@@ -353,8 +355,8 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
		if (atomic_read(&kvm->online_vcpus)) {
		if (atomic_read(&kvm->online_vcpus)) {
			r = -EBUSY;
			r = -EBUSY;
		} else if (MACHINE_HAS_VX) {
		} else if (MACHINE_HAS_VX) {
			set_kvm_facility(kvm->arch.model.fac->mask, 129);
			set_kvm_facility(kvm->arch.model.fac_mask, 129);
			set_kvm_facility(kvm->arch.model.fac->list, 129);
			set_kvm_facility(kvm->arch.model.fac_list, 129);
			r = 0;
			r = 0;
		} else
		} else
			r = -EINVAL;
			r = -EINVAL;
@@ -368,8 +370,8 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
		if (atomic_read(&kvm->online_vcpus)) {
		if (atomic_read(&kvm->online_vcpus)) {
			r = -EBUSY;
			r = -EBUSY;
		} else if (test_facility(64)) {
		} else if (test_facility(64)) {
			set_kvm_facility(kvm->arch.model.fac->mask, 64);
			set_kvm_facility(kvm->arch.model.fac_mask, 64);
			set_kvm_facility(kvm->arch.model.fac->list, 64);
			set_kvm_facility(kvm->arch.model.fac_list, 64);
			r = 0;
			r = 0;
		}
		}
		mutex_unlock(&kvm->lock);
		mutex_unlock(&kvm->lock);
@@ -652,7 +654,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
		       sizeof(struct cpuid));
		       sizeof(struct cpuid));
		kvm->arch.model.ibc = proc->ibc;
		kvm->arch.model.ibc = proc->ibc;
		memcpy(kvm->arch.model.fac->list, proc->fac_list,
		memcpy(kvm->arch.model.fac_list, proc->fac_list,
		       S390_ARCH_FAC_LIST_SIZE_BYTE);
		       S390_ARCH_FAC_LIST_SIZE_BYTE);
	} else
	} else
		ret = -EFAULT;
		ret = -EFAULT;
@@ -686,7 +688,8 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
	}
	}
	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
	proc->ibc = kvm->arch.model.ibc;
	proc->ibc = kvm->arch.model.ibc;
	memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
	       S390_ARCH_FAC_LIST_SIZE_BYTE);
	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
		ret = -EFAULT;
		ret = -EFAULT;
	kfree(proc);
	kfree(proc);
@@ -706,7 +709,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
	}
	}
	get_cpu_id((struct cpuid *) &mach->cpuid);
	get_cpu_id((struct cpuid *) &mach->cpuid);
	mach->ibc = sclp.ibc;
	mach->ibc = sclp.ibc;
	memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
	       S390_ARCH_FAC_LIST_SIZE_BYTE);
	       S390_ARCH_FAC_LIST_SIZE_BYTE);
	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
	       S390_ARCH_FAC_LIST_SIZE_BYTE);
	       S390_ARCH_FAC_LIST_SIZE_BYTE);
@@ -1083,16 +1086,12 @@ static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
	cpu_id->version = 0xff;
	cpu_id->version = 0xff;
}
}


static int kvm_s390_crypto_init(struct kvm *kvm)
static void kvm_s390_crypto_init(struct kvm *kvm)
{
{
	if (!test_kvm_facility(kvm, 76))
	if (!test_kvm_facility(kvm, 76))
		return 0;
		return;

	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
					 GFP_KERNEL | GFP_DMA);
	if (!kvm->arch.crypto.crycb)
		return -ENOMEM;


	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
	kvm_s390_set_crycb_format(kvm);
	kvm_s390_set_crycb_format(kvm);


	/* Enable AES/DEA protected key functions by default */
	/* Enable AES/DEA protected key functions by default */
@@ -1102,8 +1101,6 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));

	return 0;
}
}


static void sca_dispose(struct kvm *kvm)
static void sca_dispose(struct kvm *kvm)
@@ -1157,37 +1154,30 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
	if (!kvm->arch.dbf)
	if (!kvm->arch.dbf)
		goto out_err;
		goto out_err;


	/*
	kvm->arch.sie_page2 =
	 * The architectural maximum amount of facilities is 16 kbit. To store
	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
	 * this amount, 2 kbyte of memory is required. Thus we need a full
	if (!kvm->arch.sie_page2)
	 * page to hold the guest facility list (arch.model.fac->list) and the
	 * facility mask (arch.model.fac->mask). Its address size has to be
	 * 31 bits and word aligned.
	 */
	kvm->arch.model.fac =
		(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!kvm->arch.model.fac)
		goto out_err;
		goto out_err;


	/* Populate the facility mask initially. */
	/* Populate the facility mask initially. */
	memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
	memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
	       S390_ARCH_FAC_LIST_SIZE_BYTE);
	       S390_ARCH_FAC_LIST_SIZE_BYTE);
	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
		if (i < kvm_s390_fac_list_mask_size())
		if (i < kvm_s390_fac_list_mask_size())
			kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
			kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
		else
		else
			kvm->arch.model.fac->mask[i] = 0UL;
			kvm->arch.model.fac_mask[i] = 0UL;
	}
	}


	/* Populate the facility list initially. */
	/* Populate the facility list initially. */
	memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
	memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
	       S390_ARCH_FAC_LIST_SIZE_BYTE);
	       S390_ARCH_FAC_LIST_SIZE_BYTE);


	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
	kvm->arch.model.ibc = sclp.ibc & 0x0fff;


	if (kvm_s390_crypto_init(kvm) < 0)
	kvm_s390_crypto_init(kvm);
		goto out_err;


	spin_lock_init(&kvm->arch.float_int.lock);
	spin_lock_init(&kvm->arch.float_int.lock);
	for (i = 0; i < FIRQ_LIST_COUNT; i++)
	for (i = 0; i < FIRQ_LIST_COUNT; i++)
@@ -1223,8 +1213,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)


	return 0;
	return 0;
out_err:
out_err:
	kfree(kvm->arch.crypto.crycb);
	free_page((unsigned long)kvm->arch.sie_page2);
	free_page((unsigned long)kvm->arch.model.fac);
	debug_unregister(kvm->arch.dbf);
	debug_unregister(kvm->arch.dbf);
	sca_dispose(kvm);
	sca_dispose(kvm);
	KVM_EVENT(3, "creation of vm failed: %d", rc);
	KVM_EVENT(3, "creation of vm failed: %d", rc);
@@ -1270,10 +1259,9 @@ static void kvm_free_vcpus(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
{
	kvm_free_vcpus(kvm);
	kvm_free_vcpus(kvm);
	free_page((unsigned long)kvm->arch.model.fac);
	sca_dispose(kvm);
	sca_dispose(kvm);
	debug_unregister(kvm->arch.dbf);
	debug_unregister(kvm->arch.dbf);
	kfree(kvm->arch.crypto.crycb);
	free_page((unsigned long)kvm->arch.sie_page2);
	if (!kvm_is_ucontrol(kvm))
	if (!kvm_is_ucontrol(kvm))
		gmap_free(kvm->arch.gmap);
		gmap_free(kvm->arch.gmap);
	kvm_s390_destroy_adapters(kvm);
	kvm_s390_destroy_adapters(kvm);
@@ -1429,6 +1417,93 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
	return 0;
	return 0;
}
}


/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
	vcpu->arch.cputm_start = get_tod_clock_fast();
	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
}

/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
	vcpu->arch.cputm_start = 0;
	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
}

/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
	vcpu->arch.cputm_enabled = true;
	__start_cpu_timer_accounting(vcpu);
}

/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
	__stop_cpu_timer_accounting(vcpu);
	vcpu->arch.cputm_enabled = false;
}

static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
	__enable_cpu_timer_accounting(vcpu);
	preempt_enable();
}

static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
	__disable_cpu_timer_accounting(vcpu);
	preempt_enable();
}

/* set the cpu timer - may only be called from the VCPU thread itself */
void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
{
	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
	if (vcpu->arch.cputm_enabled)
		vcpu->arch.cputm_start = get_tod_clock_fast();
	vcpu->arch.sie_block->cputm = cputm;
	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
	preempt_enable();
}

/* update and get the cpu timer - can also be called from other VCPU threads */
__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
{
	unsigned int seq;
	__u64 value;

	if (unlikely(!vcpu->arch.cputm_enabled))
		return vcpu->arch.sie_block->cputm;

	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
	do {
		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
		/*
		 * If the writer would ever execute a read in the critical
		 * section, e.g. in irq context, we have a deadlock.
		 */
		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
		value = vcpu->arch.sie_block->cputm;
		/* if cputm_start is 0, accounting is being started/stopped */
		if (likely(vcpu->arch.cputm_start))
			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
	preempt_enable();
	return value;
}

void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
{
	/* Save host register state */
	/* Save host register state */
@@ -1449,10 +1524,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
	restore_access_regs(vcpu->run->s.regs.acrs);
	restore_access_regs(vcpu->run->s.regs.acrs);
	gmap_enable(vcpu->arch.gmap);
	gmap_enable(vcpu->arch.gmap);
	atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
	atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
		__start_cpu_timer_accounting(vcpu);
	vcpu->cpu = cpu;
}
}


void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
{
	vcpu->cpu = -1;
	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
		__stop_cpu_timer_accounting(vcpu);
	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
	gmap_disable(vcpu->arch.gmap);
	gmap_disable(vcpu->arch.gmap);


@@ -1474,7 +1555,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
	vcpu->arch.sie_block->gpsw.mask = 0UL;
	vcpu->arch.sie_block->gpsw.mask = 0UL;
	vcpu->arch.sie_block->gpsw.addr = 0UL;
	vcpu->arch.sie_block->gpsw.addr = 0UL;
	kvm_s390_set_prefix(vcpu, 0);
	kvm_s390_set_prefix(vcpu, 0);
	vcpu->arch.sie_block->cputm     = 0UL;
	kvm_s390_set_cpu_timer(vcpu, 0);
	vcpu->arch.sie_block->ckc       = 0UL;
	vcpu->arch.sie_block->ckc       = 0UL;
	vcpu->arch.sie_block->todpr     = 0;
	vcpu->arch.sie_block->todpr     = 0;
	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
@@ -1544,7 +1625,8 @@ static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)


	vcpu->arch.cpu_id = model->cpu_id;
	vcpu->arch.cpu_id = model->cpu_id;
	vcpu->arch.sie_block->ibc = model->ibc;
	vcpu->arch.sie_block->ibc = model->ibc;
	vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
	if (test_kvm_facility(vcpu->kvm, 7))
		vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
}
}


int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
@@ -1622,6 +1704,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
	vcpu->arch.local_int.wq = &vcpu->wq;
	vcpu->arch.local_int.wq = &vcpu->wq;
	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
	seqcount_init(&vcpu->arch.cputm_seqcount);


	rc = kvm_vcpu_init(vcpu, kvm, id);
	rc = kvm_vcpu_init(vcpu, kvm, id);
	if (rc)
	if (rc)
@@ -1721,7 +1804,7 @@ static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
			     (u64 __user *)reg->addr);
			     (u64 __user *)reg->addr);
		break;
		break;
	case KVM_REG_S390_CPU_TIMER:
	case KVM_REG_S390_CPU_TIMER:
		r = put_user(vcpu->arch.sie_block->cputm,
		r = put_user(kvm_s390_get_cpu_timer(vcpu),
			     (u64 __user *)reg->addr);
			     (u64 __user *)reg->addr);
		break;
		break;
	case KVM_REG_S390_CLOCK_COMP:
	case KVM_REG_S390_CLOCK_COMP:
@@ -1759,6 +1842,7 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
					   struct kvm_one_reg *reg)
					   struct kvm_one_reg *reg)
{
{
	int r = -EINVAL;
	int r = -EINVAL;
	__u64 val;


	switch (reg->id) {
	switch (reg->id) {
	case KVM_REG_S390_TODPR:
	case KVM_REG_S390_TODPR:
@@ -1770,8 +1854,9 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
			     (u64 __user *)reg->addr);
			     (u64 __user *)reg->addr);
		break;
		break;
	case KVM_REG_S390_CPU_TIMER:
	case KVM_REG_S390_CPU_TIMER:
		r = get_user(vcpu->arch.sie_block->cputm,
		r = get_user(val, (u64 __user *)reg->addr);
			     (u64 __user *)reg->addr);
		if (!r)
			kvm_s390_set_cpu_timer(vcpu, val);
		break;
		break;
	case KVM_REG_S390_CLOCK_COMP:
	case KVM_REG_S390_CLOCK_COMP:
		r = get_user(vcpu->arch.sie_block->ckc,
		r = get_user(vcpu->arch.sie_block->ckc,
@@ -2261,10 +2346,12 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
		 */
		 */
		local_irq_disable();
		local_irq_disable();
		__kvm_guest_enter();
		__kvm_guest_enter();
		__disable_cpu_timer_accounting(vcpu);
		local_irq_enable();
		local_irq_enable();
		exit_reason = sie64a(vcpu->arch.sie_block,
		exit_reason = sie64a(vcpu->arch.sie_block,
				     vcpu->run->s.regs.gprs);
				     vcpu->run->s.regs.gprs);
		local_irq_disable();
		local_irq_disable();
		__enable_cpu_timer_accounting(vcpu);
		__kvm_guest_exit();
		__kvm_guest_exit();
		local_irq_enable();
		local_irq_enable();
		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -2288,7 +2375,7 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
	}
	}
	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
@@ -2310,7 +2397,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
@@ -2342,6 +2429,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
	}
	}


	sync_regs(vcpu, kvm_run);
	sync_regs(vcpu, kvm_run);
	enable_cpu_timer_accounting(vcpu);


	might_fault();
	might_fault();
	rc = __vcpu_run(vcpu);
	rc = __vcpu_run(vcpu);
@@ -2361,6 +2449,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
		rc = 0;
		rc = 0;
	}
	}


	disable_cpu_timer_accounting(vcpu);
	store_regs(vcpu, kvm_run);
	store_regs(vcpu, kvm_run);


	if (vcpu->sigset_active)
	if (vcpu->sigset_active)
@@ -2381,7 +2470,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
	unsigned char archmode = 1;
	unsigned char archmode = 1;
	freg_t fprs[NUM_FPRS];
	freg_t fprs[NUM_FPRS];
	unsigned int px;
	unsigned int px;
	u64 clkcomp;
	u64 clkcomp, cputm;
	int rc;
	int rc;


	px = kvm_s390_get_prefix(vcpu);
	px = kvm_s390_get_prefix(vcpu);
@@ -2415,8 +2504,9 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
			      &vcpu->run->s.regs.fpc, 4);
			      &vcpu->run->s.regs.fpc, 4);
	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
			      &vcpu->arch.sie_block->todpr, 4);
			      &vcpu->arch.sie_block->todpr, 4);
	cputm = kvm_s390_get_cpu_timer(vcpu);
	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
			      &vcpu->arch.sie_block->cputm, 8);
			      &cputm, 8);
	clkcomp = vcpu->arch.sie_block->ckc >> 8;
	clkcomp = vcpu->arch.sie_block->ckc >> 8;
	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
			      &clkcomp, 8);
			      &clkcomp, 8);
+9 −2
Original line number Original line Diff line number Diff line
@@ -54,6 +54,11 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
}
}


static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{
	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_WAIT;
}

static inline int kvm_is_ucontrol(struct kvm *kvm)
static inline int kvm_is_ucontrol(struct kvm *kvm)
{
{
#ifdef CONFIG_KVM_S390_UCONTROL
#ifdef CONFIG_KVM_S390_UCONTROL
@@ -155,8 +160,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
/* test availability of facility in a kvm instance */
/* test availability of facility in a kvm instance */
static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
{
{
	return __test_facility(nr, kvm->arch.model.fac->mask) &&
	return __test_facility(nr, kvm->arch.model.fac_mask) &&
		__test_facility(nr, kvm->arch.model.fac->list);
		__test_facility(nr, kvm->arch.model.fac_list);
}
}


static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
@@ -263,6 +268,8 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
unsigned long kvm_s390_fac_list_mask_size(void);
unsigned long kvm_s390_fac_list_mask_size(void);
extern unsigned long kvm_s390_fac_list_mask[];
extern unsigned long kvm_s390_fac_list_mask[];
void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);


/* implemented in diag.c */
/* implemented in diag.c */
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
Loading