Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5102ee87 authored by Tony Krowiak's avatar Tony Krowiak Committed by Christian Borntraeger
Browse files

KVM: CPACF: Enable MSA4 instructions for kvm guest



We have to provide a per guest crypto block for the CPUs to
enable MSA4 instructions. According to icainfo on z196 or
later this enables CCM-AES-128, CMAC-AES-128, CMAC-AES-192
and CMAC-AES-256.

Signed-off-by: default avatarTony Krowiak <akrowiak@linux.vnet.ibm.com>
Reviewed-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Reviewed-by: default avatarCornelia Huck <cornelia.huck@de.ibm.com>
Reviewed-by: default avatarMichael Mueller <mimu@linux.vnet.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
[split MSA4/protected key into two patches]
parent fd275235
Loading
Loading
Loading
Loading
+13 −1
Original line number Diff line number Diff line
@@ -157,7 +157,9 @@ struct kvm_s390_sie_block {
	__u8	armid;			/* 0x00e3 */
	__u8	reservede4[4];		/* 0x00e4 */
	__u64	tecmc;			/* 0x00e8 */
	__u8	reservedf0[16];		/* 0x00f0 */
	__u8	reservedf0[12];		/* 0x00f0 */
#define CRYCB_FORMAT1 0x00000001
	__u32	crycbd;			/* 0x00fc */
	__u64	gcr[16];		/* 0x0100 */
	__u64	gbea;			/* 0x0180 */
	__u8	reserved188[24];	/* 0x0188 */
@@ -410,6 +412,15 @@ struct s390_io_adapter {
#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
#define MAX_S390_ADAPTER_MAPS 256

struct kvm_s390_crypto {
	struct kvm_s390_crypto_cb *crycb;
	__u32 crycbd;
};

struct kvm_s390_crypto_cb {
	__u8    reserved00[128];                /* 0x0000 */
};

struct kvm_arch{
	struct sca_block *sca;
	debug_info_t *dbf;
@@ -423,6 +434,7 @@ struct kvm_arch{
	struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
	wait_queue_head_t ipte_wq;
	spinlock_t start_stop_lock;
	struct kvm_s390_crypto crypto;
};

#define KVM_HVA_ERR_BAD		(-1UL)
+33 −0
Original line number Diff line number Diff line
@@ -392,6 +392,22 @@ long kvm_arch_vm_ioctl(struct file *filp,
	return r;
}

static int kvm_s390_crypto_init(struct kvm *kvm)
{
	if (!test_vfacility(76))
		return 0;

	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
					 GFP_KERNEL | GFP_DMA);
	if (!kvm->arch.crypto.crycb)
		return -ENOMEM;

	kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
				  CRYCB_FORMAT1;

	return 0;
}

int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
	int rc;
@@ -429,6 +445,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
	if (!kvm->arch.dbf)
		goto out_nodbf;

	if (kvm_s390_crypto_init(kvm) < 0)
		goto out_crypto;

	spin_lock_init(&kvm->arch.float_int.lock);
	INIT_LIST_HEAD(&kvm->arch.float_int.list);
	init_waitqueue_head(&kvm->arch.ipte_wq);
@@ -453,6 +472,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)

	return 0;
out_nogmap:
	kfree(kvm->arch.crypto.crycb);
out_crypto:
	debug_unregister(kvm->arch.dbf);
out_nodbf:
	free_page((unsigned long)(kvm->arch.sca));
@@ -507,6 +528,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
	kvm_free_vcpus(kvm);
	free_page((unsigned long)(kvm->arch.sca));
	debug_unregister(kvm->arch.dbf);
	kfree(kvm->arch.crypto.crycb);
	if (!kvm_is_ucontrol(kvm))
		gmap_free(kvm->arch.gmap);
	kvm_s390_destroy_adapters(kvm);
@@ -588,6 +610,14 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
	return 0;
}

static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
{
	if (!test_vfacility(76))
		return;

	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
}

void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
{
	free_page(vcpu->arch.sie_block->cbrlo);
@@ -634,6 +664,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
	get_cpu_id(&vcpu->arch.cpu_id);
	vcpu->arch.cpu_id.version = 0xff;

	kvm_s390_vcpu_crypto_setup(vcpu);

	return rc;
}