Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c9f0a2b8 authored by Janosch Frank's avatar Janosch Frank Committed by Christian Borntraeger
Browse files

KVM: s390: Refactor host cmma and pfmfi interpretation controls



use_cmma in kvm_arch means that the KVM hypervisor is allowed to use
cmma, whereas use_cmma in the mm context means cmm has been used before.
Let's rename the context one to uses_cmm, as the vm does use
collaborative memory management but the host uses the cmm assist
(interpretation facility).

Also let's introduce use_pfmfi, so we can remove the pfmfi disablement
when we activate cmma and rather not activate it in the first place.

Signed-off-by: default avatarJanosch Frank <frankja@linux.vnet.ibm.com>
Message-Id: <1518779775-256056-2-git-send-email-frankja@linux.vnet.ibm.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent c3b9e3e1
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -792,6 +792,7 @@ struct kvm_arch{
	int css_support;
	int use_irqchip;
	int use_cmma;
	int use_pfmfi;
	int user_cpu_state_ctrl;
	int user_sigp;
	int user_stsi;
+2 −2
Original line number Diff line number Diff line
@@ -22,8 +22,8 @@ typedef struct {
	unsigned int has_pgste:1;
	/* The mmu context uses storage keys. */
	unsigned int use_skey:1;
	/* The mmu context uses CMMA. */
	unsigned int use_cmma:1;
	/* The mmu context uses CMM. */
	unsigned int uses_cmm:1;
} mm_context_t;

#define INIT_MM_CONTEXT(name)						   \
+1 −1
Original line number Diff line number Diff line
@@ -31,7 +31,7 @@ static inline int init_new_context(struct task_struct *tsk,
		(current->mm && current->mm->context.alloc_pgste);
	mm->context.has_pgste = 0;
	mm->context.use_skey = 0;
	mm->context.use_cmma = 0;
	mm->context.uses_cmm = 0;
#endif
	switch (mm->context.asce_limit) {
	case _REGION2_SIZE:
+12 −11
Original line number Diff line number Diff line
@@ -699,6 +699,8 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
		mutex_lock(&kvm->lock);
		if (!kvm->created_vcpus) {
			kvm->arch.use_cmma = 1;
			/* Not compatible with cmma. */
			kvm->arch.use_pfmfi = 0;
			ret = 0;
		}
		mutex_unlock(&kvm->lock);
@@ -1603,7 +1605,7 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm,
		return -EINVAL;
	/* CMMA is disabled or was not used, or the buffer has length zero */
	bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
	if (!bufsize || !kvm->mm->context.use_cmma) {
	if (!bufsize || !kvm->mm->context.uses_cmm) {
		memset(args, 0, sizeof(*args));
		return 0;
	}
@@ -1680,7 +1682,7 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm,
/*
 * This function sets the CMMA attributes for the given pages. If the input
 * buffer has zero length, no action is taken, otherwise the attributes are
 * set and the mm->context.use_cmma flag is set.
 * set and the mm->context.uses_cmm flag is set.
 */
static int kvm_s390_set_cmma_bits(struct kvm *kvm,
				  const struct kvm_s390_cmma_log *args)
@@ -1730,9 +1732,9 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm,
	srcu_read_unlock(&kvm->srcu, srcu_idx);
	up_read(&kvm->mm->mmap_sem);

	if (!kvm->mm->context.use_cmma) {
	if (!kvm->mm->context.uses_cmm) {
		down_write(&kvm->mm->mmap_sem);
		kvm->mm->context.use_cmma = 1;
		kvm->mm->context.uses_cmm = 1;
		up_write(&kvm->mm->mmap_sem);
	}
out:
@@ -2043,6 +2045,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)

	kvm->arch.css_support = 0;
	kvm->arch.use_irqchip = 0;
	kvm->arch.use_pfmfi = sclp.has_pfmfi;
	kvm->arch.epoch = 0;

	spin_lock_init(&kvm->arch.start_stop_lock);
@@ -2469,8 +2472,6 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
	if (!vcpu->arch.sie_block->cbrlo)
		return -ENOMEM;

	vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
	return 0;
}

@@ -2506,7 +2507,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
	if (test_kvm_facility(vcpu->kvm, 73))
		vcpu->arch.sie_block->ecb |= ECB_TE;

	if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
	if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
		vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
	if (test_kvm_facility(vcpu->kvm, 130))
		vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
@@ -3038,7 +3039,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)

	if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
		/*
		 * Disable CMMA virtualization; we will emulate the ESSA
		 * Disable CMM virtualization; we will emulate the ESSA
		 * instruction manually, in order to provide additional
		 * functionalities needed for live migration.
		 */
@@ -3048,11 +3049,11 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)

	if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
		/*
		 * Re-enable CMMA virtualization if CMMA is available and
		 * was used.
		 * Re-enable CMM virtualization if CMMA is available and
		 * CMM has been used.
		 */
		if ((vcpu->kvm->arch.use_cmma) &&
		    (vcpu->kvm->mm->context.use_cmma))
		    (vcpu->kvm->mm->context.uses_cmm))
			vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
		goto retry;
	}
+2 −2
Original line number Diff line number Diff line
@@ -1078,9 +1078,9 @@ static int handle_essa(struct kvm_vcpu *vcpu)
		 * value really needs to be written to; if the value is
		 * already correct, we do nothing and avoid the lock.
		 */
		if (vcpu->kvm->mm->context.use_cmma == 0) {
		if (vcpu->kvm->mm->context.uses_cmm == 0) {
			down_write(&vcpu->kvm->mm->mmap_sem);
			vcpu->kvm->mm->context.use_cmma = 1;
			vcpu->kvm->mm->context.uses_cmm = 1;
			up_write(&vcpu->kvm->mm->mmap_sem);
		}
		/*