Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c7d4d259 authored by Martin Schwidefsky's avatar Martin Schwidefsky
Browse files

s390/crypto: cleanup and move the header with the cpacf definitions



The CPACF instructions are going be used in KVM as well, move the
defines and the inline functions from arch/s390/crypt/crypt_s390.h
to arch/s390/include/asm. Rename the header to cpacf.h and replace
the crypt_s390_xxx names with cpacf_xxx.

While we are at it, cleanup the header as well. The encoding for
the CPACF operations is odd, there is an enum for each of the CPACF
instructions with the hardware function code in the lower 8 bits of
each entry and a software defined number for the CPACF instruction
in the upper 8 bits. Remove the superfluous software number and
replace the enums with simple defines.

The crypt_s390_func_available() function tests for the presence
of a specific CPACF operations. The new name of the function is
cpacf_query and it works slightly different than before. It gets
passed an opcode of an CPACF instruction and a function code for
this instruction. The facility_mask parameter is gone, the opcode
is used to find the correct MSA facility bit to check if the CPACF
instruction itself is available. If it is the query function of the
given instruction is used to test if the requested CPACF operation
is present.

Acked-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent f9dc447e
Loading
Loading
Loading
Loading
+56 −61
Original line number Original line Diff line number Diff line
@@ -28,7 +28,7 @@
#include <linux/init.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/spinlock.h>
#include <crypto/xts.h>
#include <crypto/xts.h>
#include "crypt_s390.h"
#include <asm/cpacf.h>


#define AES_KEYLEN_128		1
#define AES_KEYLEN_128		1
#define AES_KEYLEN_192		2
#define AES_KEYLEN_192		2
@@ -145,15 +145,15 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)


	switch (sctx->key_len) {
	switch (sctx->key_len) {
	case 16:
	case 16:
		crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
		cpacf_km(CPACF_KM_AES_128_ENC, &sctx->key, out, in,
			 AES_BLOCK_SIZE);
			 AES_BLOCK_SIZE);
		break;
		break;
	case 24:
	case 24:
		crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
		cpacf_km(CPACF_KM_AES_192_ENC, &sctx->key, out, in,
			 AES_BLOCK_SIZE);
			 AES_BLOCK_SIZE);
		break;
		break;
	case 32:
	case 32:
		crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
		cpacf_km(CPACF_KM_AES_256_ENC, &sctx->key, out, in,
			 AES_BLOCK_SIZE);
			 AES_BLOCK_SIZE);
		break;
		break;
	}
	}
@@ -170,15 +170,15 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)


	switch (sctx->key_len) {
	switch (sctx->key_len) {
	case 16:
	case 16:
		crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
		cpacf_km(CPACF_KM_AES_128_DEC, &sctx->key, out, in,
			 AES_BLOCK_SIZE);
			 AES_BLOCK_SIZE);
		break;
		break;
	case 24:
	case 24:
		crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
		cpacf_km(CPACF_KM_AES_192_DEC, &sctx->key, out, in,
			 AES_BLOCK_SIZE);
			 AES_BLOCK_SIZE);
		break;
		break;
	case 32:
	case 32:
		crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
		cpacf_km(CPACF_KM_AES_256_DEC, &sctx->key, out, in,
			 AES_BLOCK_SIZE);
			 AES_BLOCK_SIZE);
		break;
		break;
	}
	}
@@ -212,7 +212,7 @@ static void fallback_exit_cip(struct crypto_tfm *tfm)
static struct crypto_alg aes_alg = {
static struct crypto_alg aes_alg = {
	.cra_name		=	"aes",
	.cra_name		=	"aes",
	.cra_driver_name	=	"aes-s390",
	.cra_driver_name	=	"aes-s390",
	.cra_priority		=	CRYPT_S390_PRIORITY,
	.cra_priority		=	300,
	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER |
	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER |
					CRYPTO_ALG_NEED_FALLBACK,
					CRYPTO_ALG_NEED_FALLBACK,
	.cra_blocksize		=	AES_BLOCK_SIZE,
	.cra_blocksize		=	AES_BLOCK_SIZE,
@@ -298,16 +298,16 @@ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,


	switch (key_len) {
	switch (key_len) {
	case 16:
	case 16:
		sctx->enc = KM_AES_128_ENCRYPT;
		sctx->enc = CPACF_KM_AES_128_ENC;
		sctx->dec = KM_AES_128_DECRYPT;
		sctx->dec = CPACF_KM_AES_128_DEC;
		break;
		break;
	case 24:
	case 24:
		sctx->enc = KM_AES_192_ENCRYPT;
		sctx->enc = CPACF_KM_AES_192_ENC;
		sctx->dec = KM_AES_192_DECRYPT;
		sctx->dec = CPACF_KM_AES_192_DEC;
		break;
		break;
	case 32:
	case 32:
		sctx->enc = KM_AES_256_ENCRYPT;
		sctx->enc = CPACF_KM_AES_256_ENC;
		sctx->dec = KM_AES_256_DECRYPT;
		sctx->dec = CPACF_KM_AES_256_DEC;
		break;
		break;
	}
	}


@@ -326,7 +326,7 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
		u8 *out = walk->dst.virt.addr;
		u8 *out = walk->dst.virt.addr;
		u8 *in = walk->src.virt.addr;
		u8 *in = walk->src.virt.addr;


		ret = crypt_s390_km(func, param, out, in, n);
		ret = cpacf_km(func, param, out, in, n);
		if (ret < 0 || ret != n)
		if (ret < 0 || ret != n)
			return -EIO;
			return -EIO;


@@ -393,7 +393,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
static struct crypto_alg ecb_aes_alg = {
static struct crypto_alg ecb_aes_alg = {
	.cra_name		=	"ecb(aes)",
	.cra_name		=	"ecb(aes)",
	.cra_driver_name	=	"ecb-aes-s390",
	.cra_driver_name	=	"ecb-aes-s390",
	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
	.cra_priority		=	400,	/* combo: aes + ecb */
	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
					CRYPTO_ALG_NEED_FALLBACK,
					CRYPTO_ALG_NEED_FALLBACK,
	.cra_blocksize		=	AES_BLOCK_SIZE,
	.cra_blocksize		=	AES_BLOCK_SIZE,
@@ -427,16 +427,16 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,


	switch (key_len) {
	switch (key_len) {
	case 16:
	case 16:
		sctx->enc = KMC_AES_128_ENCRYPT;
		sctx->enc = CPACF_KMC_AES_128_ENC;
		sctx->dec = KMC_AES_128_DECRYPT;
		sctx->dec = CPACF_KMC_AES_128_DEC;
		break;
		break;
	case 24:
	case 24:
		sctx->enc = KMC_AES_192_ENCRYPT;
		sctx->enc = CPACF_KMC_AES_192_ENC;
		sctx->dec = KMC_AES_192_DECRYPT;
		sctx->dec = CPACF_KMC_AES_192_DEC;
		break;
		break;
	case 32:
	case 32:
		sctx->enc = KMC_AES_256_ENCRYPT;
		sctx->enc = CPACF_KMC_AES_256_ENC;
		sctx->dec = KMC_AES_256_DECRYPT;
		sctx->dec = CPACF_KMC_AES_256_DEC;
		break;
		break;
	}
	}


@@ -465,7 +465,7 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
		u8 *out = walk->dst.virt.addr;
		u8 *out = walk->dst.virt.addr;
		u8 *in = walk->src.virt.addr;
		u8 *in = walk->src.virt.addr;


		ret = crypt_s390_kmc(func, &param, out, in, n);
		ret = cpacf_kmc(func, &param, out, in, n);
		if (ret < 0 || ret != n)
		if (ret < 0 || ret != n)
			return -EIO;
			return -EIO;


@@ -509,7 +509,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg cbc_aes_alg = {
static struct crypto_alg cbc_aes_alg = {
	.cra_name		=	"cbc(aes)",
	.cra_name		=	"cbc(aes)",
	.cra_driver_name	=	"cbc-aes-s390",
	.cra_driver_name	=	"cbc-aes-s390",
	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
	.cra_priority		=	400,	/* combo: aes + cbc */
	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
					CRYPTO_ALG_NEED_FALLBACK,
					CRYPTO_ALG_NEED_FALLBACK,
	.cra_blocksize		=	AES_BLOCK_SIZE,
	.cra_blocksize		=	AES_BLOCK_SIZE,
@@ -596,8 +596,8 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,


	switch (key_len) {
	switch (key_len) {
	case 32:
	case 32:
		xts_ctx->enc = KM_XTS_128_ENCRYPT;
		xts_ctx->enc = CPACF_KM_XTS_128_ENC;
		xts_ctx->dec = KM_XTS_128_DECRYPT;
		xts_ctx->dec = CPACF_KM_XTS_128_DEC;
		memcpy(xts_ctx->key + 16, in_key, 16);
		memcpy(xts_ctx->key + 16, in_key, 16);
		memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
		memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
		break;
		break;
@@ -607,8 +607,8 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
		xts_fallback_setkey(tfm, in_key, key_len);
		xts_fallback_setkey(tfm, in_key, key_len);
		break;
		break;
	case 64:
	case 64:
		xts_ctx->enc = KM_XTS_256_ENCRYPT;
		xts_ctx->enc = CPACF_KM_XTS_256_ENC;
		xts_ctx->dec = KM_XTS_256_DECRYPT;
		xts_ctx->dec = CPACF_KM_XTS_256_DEC;
		memcpy(xts_ctx->key, in_key, 32);
		memcpy(xts_ctx->key, in_key, 32);
		memcpy(xts_ctx->pcc_key, in_key + 32, 32);
		memcpy(xts_ctx->pcc_key, in_key + 32, 32);
		break;
		break;
@@ -643,7 +643,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
	memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
	memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
	memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
	memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
	ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
	/* remove decipher modifier bit from 'func' and call PCC */
	ret = cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
	if (ret < 0)
	if (ret < 0)
		return -EIO;
		return -EIO;


@@ -655,7 +656,7 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
		out = walk->dst.virt.addr;
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;
		in = walk->src.virt.addr;


		ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
		ret = cpacf_km(func, &xts_param.key[offset], out, in, n);
		if (ret < 0 || ret != n)
		if (ret < 0 || ret != n)
			return -EIO;
			return -EIO;


@@ -721,7 +722,7 @@ static void xts_fallback_exit(struct crypto_tfm *tfm)
static struct crypto_alg xts_aes_alg = {
static struct crypto_alg xts_aes_alg = {
	.cra_name		=	"xts(aes)",
	.cra_name		=	"xts(aes)",
	.cra_driver_name	=	"xts-aes-s390",
	.cra_driver_name	=	"xts-aes-s390",
	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
	.cra_priority		=	400,	/* combo: aes + xts */
	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
					CRYPTO_ALG_NEED_FALLBACK,
					CRYPTO_ALG_NEED_FALLBACK,
	.cra_blocksize		=	AES_BLOCK_SIZE,
	.cra_blocksize		=	AES_BLOCK_SIZE,
@@ -751,16 +752,16 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,


	switch (key_len) {
	switch (key_len) {
	case 16:
	case 16:
		sctx->enc = KMCTR_AES_128_ENCRYPT;
		sctx->enc = CPACF_KMCTR_AES_128_ENC;
		sctx->dec = KMCTR_AES_128_DECRYPT;
		sctx->dec = CPACF_KMCTR_AES_128_DEC;
		break;
		break;
	case 24:
	case 24:
		sctx->enc = KMCTR_AES_192_ENCRYPT;
		sctx->enc = CPACF_KMCTR_AES_192_ENC;
		sctx->dec = KMCTR_AES_192_DECRYPT;
		sctx->dec = CPACF_KMCTR_AES_192_DEC;
		break;
		break;
	case 32:
	case 32:
		sctx->enc = KMCTR_AES_256_ENCRYPT;
		sctx->enc = CPACF_KMCTR_AES_256_ENC;
		sctx->dec = KMCTR_AES_256_DECRYPT;
		sctx->dec = CPACF_KMCTR_AES_256_DEC;
		break;
		break;
	}
	}


@@ -804,8 +805,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
				n = __ctrblk_init(ctrptr, nbytes);
				n = __ctrblk_init(ctrptr, nbytes);
			else
			else
				n = AES_BLOCK_SIZE;
				n = AES_BLOCK_SIZE;
			ret = crypt_s390_kmctr(func, sctx->key, out, in,
			ret = cpacf_kmctr(func, sctx->key, out, in, n, ctrptr);
					       n, ctrptr);
			if (ret < 0 || ret != n) {
			if (ret < 0 || ret != n) {
				if (ctrptr == ctrblk)
				if (ctrptr == ctrblk)
					spin_unlock(&ctrblk_lock);
					spin_unlock(&ctrblk_lock);
@@ -837,7 +837,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
	if (nbytes) {
	if (nbytes) {
		out = walk->dst.virt.addr;
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;
		in = walk->src.virt.addr;
		ret = crypt_s390_kmctr(func, sctx->key, buf, in,
		ret = cpacf_kmctr(func, sctx->key, buf, in,
				  AES_BLOCK_SIZE, ctrbuf);
				  AES_BLOCK_SIZE, ctrbuf);
		if (ret < 0 || ret != AES_BLOCK_SIZE)
		if (ret < 0 || ret != AES_BLOCK_SIZE)
			return -EIO;
			return -EIO;
@@ -875,7 +875,7 @@ static int ctr_aes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg ctr_aes_alg = {
static struct crypto_alg ctr_aes_alg = {
	.cra_name		=	"ctr(aes)",
	.cra_name		=	"ctr(aes)",
	.cra_driver_name	=	"ctr-aes-s390",
	.cra_driver_name	=	"ctr-aes-s390",
	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
	.cra_priority		=	400,	/* combo: aes + ctr */
	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
	.cra_blocksize		=	1,
	.cra_blocksize		=	1,
	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
@@ -899,11 +899,11 @@ static int __init aes_s390_init(void)
{
{
	int ret;
	int ret;


	if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
	if (cpacf_query(CPACF_KM, CPACF_KM_AES_128_ENC))
		keylen_flag |= AES_KEYLEN_128;
		keylen_flag |= AES_KEYLEN_128;
	if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
	if (cpacf_query(CPACF_KM, CPACF_KM_AES_192_ENC))
		keylen_flag |= AES_KEYLEN_192;
		keylen_flag |= AES_KEYLEN_192;
	if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
	if (cpacf_query(CPACF_KM, CPACF_KM_AES_256_ENC))
		keylen_flag |= AES_KEYLEN_256;
		keylen_flag |= AES_KEYLEN_256;


	if (!keylen_flag)
	if (!keylen_flag)
@@ -926,22 +926,17 @@ static int __init aes_s390_init(void)
	if (ret)
	if (ret)
		goto cbc_aes_err;
		goto cbc_aes_err;


	if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
	if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128_ENC) &&
			CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
	    cpacf_query(CPACF_KM, CPACF_KM_XTS_256_ENC)) {
	    crypt_s390_func_available(KM_XTS_256_ENCRYPT,
			CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
		ret = crypto_register_alg(&xts_aes_alg);
		ret = crypto_register_alg(&xts_aes_alg);
		if (ret)
		if (ret)
			goto xts_aes_err;
			goto xts_aes_err;
		xts_aes_alg_reg = 1;
		xts_aes_alg_reg = 1;
	}
	}


	if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
	if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128_ENC) &&
				CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
	    cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192_ENC) &&
	    crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
	    cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256_ENC)) {
				CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
	    crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
				CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
		if (!ctrblk) {
		if (!ctrblk) {
			ret = -ENOMEM;
			ret = -ENOMEM;

arch/s390/crypto/crypt_s390.h

deleted100644 → 0
+0 −493
Original line number Original line Diff line number Diff line
/*
 * Cryptographic API.
 *
 * Support for s390 cryptographic instructions.
 *
 *   Copyright IBM Corp. 2003, 2015
 *   Author(s): Thomas Spatzier
 *		Jan Glauber (jan.glauber@de.ibm.com)
 *		Harald Freudenberger (freude@de.ibm.com)
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 */
#ifndef _CRYPTO_ARCH_S390_CRYPT_S390_H
#define _CRYPTO_ARCH_S390_CRYPT_S390_H

#include <asm/errno.h>
#include <asm/facility.h>

#define CRYPT_S390_OP_MASK 0xFF00
#define CRYPT_S390_FUNC_MASK 0x00FF

#define CRYPT_S390_PRIORITY 300
#define CRYPT_S390_COMPOSITE_PRIORITY 400

#define CRYPT_S390_MSA	0x1
#define CRYPT_S390_MSA3	0x2
#define CRYPT_S390_MSA4	0x4
#define CRYPT_S390_MSA5	0x8

/* s390 cryptographic operations */
enum crypt_s390_operations {
	CRYPT_S390_KM	 = 0x0100,
	CRYPT_S390_KMC	 = 0x0200,
	CRYPT_S390_KIMD  = 0x0300,
	CRYPT_S390_KLMD  = 0x0400,
	CRYPT_S390_KMAC  = 0x0500,
	CRYPT_S390_KMCTR = 0x0600,
	CRYPT_S390_PPNO  = 0x0700
};

/*
 * function codes for KM (CIPHER MESSAGE) instruction
 * 0x80 is the decipher modifier bit
 */
enum crypt_s390_km_func {
	KM_QUERY	    = CRYPT_S390_KM | 0x0,
	KM_DEA_ENCRYPT      = CRYPT_S390_KM | 0x1,
	KM_DEA_DECRYPT      = CRYPT_S390_KM | 0x1 | 0x80,
	KM_TDEA_128_ENCRYPT = CRYPT_S390_KM | 0x2,
	KM_TDEA_128_DECRYPT = CRYPT_S390_KM | 0x2 | 0x80,
	KM_TDEA_192_ENCRYPT = CRYPT_S390_KM | 0x3,
	KM_TDEA_192_DECRYPT = CRYPT_S390_KM | 0x3 | 0x80,
	KM_AES_128_ENCRYPT  = CRYPT_S390_KM | 0x12,
	KM_AES_128_DECRYPT  = CRYPT_S390_KM | 0x12 | 0x80,
	KM_AES_192_ENCRYPT  = CRYPT_S390_KM | 0x13,
	KM_AES_192_DECRYPT  = CRYPT_S390_KM | 0x13 | 0x80,
	KM_AES_256_ENCRYPT  = CRYPT_S390_KM | 0x14,
	KM_AES_256_DECRYPT  = CRYPT_S390_KM | 0x14 | 0x80,
	KM_XTS_128_ENCRYPT  = CRYPT_S390_KM | 0x32,
	KM_XTS_128_DECRYPT  = CRYPT_S390_KM | 0x32 | 0x80,
	KM_XTS_256_ENCRYPT  = CRYPT_S390_KM | 0x34,
	KM_XTS_256_DECRYPT  = CRYPT_S390_KM | 0x34 | 0x80,
};

/*
 * function codes for KMC (CIPHER MESSAGE WITH CHAINING)
 * instruction
 */
enum crypt_s390_kmc_func {
	KMC_QUERY            = CRYPT_S390_KMC | 0x0,
	KMC_DEA_ENCRYPT      = CRYPT_S390_KMC | 0x1,
	KMC_DEA_DECRYPT      = CRYPT_S390_KMC | 0x1 | 0x80,
	KMC_TDEA_128_ENCRYPT = CRYPT_S390_KMC | 0x2,
	KMC_TDEA_128_DECRYPT = CRYPT_S390_KMC | 0x2 | 0x80,
	KMC_TDEA_192_ENCRYPT = CRYPT_S390_KMC | 0x3,
	KMC_TDEA_192_DECRYPT = CRYPT_S390_KMC | 0x3 | 0x80,
	KMC_AES_128_ENCRYPT  = CRYPT_S390_KMC | 0x12,
	KMC_AES_128_DECRYPT  = CRYPT_S390_KMC | 0x12 | 0x80,
	KMC_AES_192_ENCRYPT  = CRYPT_S390_KMC | 0x13,
	KMC_AES_192_DECRYPT  = CRYPT_S390_KMC | 0x13 | 0x80,
	KMC_AES_256_ENCRYPT  = CRYPT_S390_KMC | 0x14,
	KMC_AES_256_DECRYPT  = CRYPT_S390_KMC | 0x14 | 0x80,
	KMC_PRNG	     = CRYPT_S390_KMC | 0x43,
};

/*
 * function codes for KMCTR (CIPHER MESSAGE WITH COUNTER)
 * instruction
 */
enum crypt_s390_kmctr_func {
	KMCTR_QUERY            = CRYPT_S390_KMCTR | 0x0,
	KMCTR_DEA_ENCRYPT      = CRYPT_S390_KMCTR | 0x1,
	KMCTR_DEA_DECRYPT      = CRYPT_S390_KMCTR | 0x1 | 0x80,
	KMCTR_TDEA_128_ENCRYPT = CRYPT_S390_KMCTR | 0x2,
	KMCTR_TDEA_128_DECRYPT = CRYPT_S390_KMCTR | 0x2 | 0x80,
	KMCTR_TDEA_192_ENCRYPT = CRYPT_S390_KMCTR | 0x3,
	KMCTR_TDEA_192_DECRYPT = CRYPT_S390_KMCTR | 0x3 | 0x80,
	KMCTR_AES_128_ENCRYPT  = CRYPT_S390_KMCTR | 0x12,
	KMCTR_AES_128_DECRYPT  = CRYPT_S390_KMCTR | 0x12 | 0x80,
	KMCTR_AES_192_ENCRYPT  = CRYPT_S390_KMCTR | 0x13,
	KMCTR_AES_192_DECRYPT  = CRYPT_S390_KMCTR | 0x13 | 0x80,
	KMCTR_AES_256_ENCRYPT  = CRYPT_S390_KMCTR | 0x14,
	KMCTR_AES_256_DECRYPT  = CRYPT_S390_KMCTR | 0x14 | 0x80,
};

/*
 * function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
 * instruction
 */
enum crypt_s390_kimd_func {
	KIMD_QUERY   = CRYPT_S390_KIMD | 0,
	KIMD_SHA_1   = CRYPT_S390_KIMD | 1,
	KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
	KIMD_SHA_512 = CRYPT_S390_KIMD | 3,
	KIMD_GHASH   = CRYPT_S390_KIMD | 65,
};

/*
 * function codes for KLMD (COMPUTE LAST MESSAGE DIGEST)
 * instruction
 */
enum crypt_s390_klmd_func {
	KLMD_QUERY   = CRYPT_S390_KLMD | 0,
	KLMD_SHA_1   = CRYPT_S390_KLMD | 1,
	KLMD_SHA_256 = CRYPT_S390_KLMD | 2,
	KLMD_SHA_512 = CRYPT_S390_KLMD | 3,
};

/*
 * function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
 * instruction
 */
enum crypt_s390_kmac_func {
	KMAC_QUERY    = CRYPT_S390_KMAC | 0,
	KMAC_DEA      = CRYPT_S390_KMAC | 1,
	KMAC_TDEA_128 = CRYPT_S390_KMAC | 2,
	KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
};

/*
 * function codes for PPNO (PERFORM PSEUDORANDOM NUMBER
 * OPERATION) instruction
 */
enum crypt_s390_ppno_func {
	PPNO_QUERY	      = CRYPT_S390_PPNO | 0,
	PPNO_SHA512_DRNG_GEN  = CRYPT_S390_PPNO | 3,
	PPNO_SHA512_DRNG_SEED = CRYPT_S390_PPNO | 0x83
};

/**
 * crypt_s390_km:
 * @func: the function code passed to KM; see crypt_s390_km_func
 * @param: address of parameter block; see POP for details on each func
 * @dest: address of destination memory area
 * @src: address of source memory area
 * @src_len: length of src operand in bytes
 *
 * Executes the KM (CIPHER MESSAGE) operation of the CPU.
 *
 * Returns -1 for failure, 0 for the query func, number of processed
 * bytes for encryption/decryption funcs
 */
static inline int crypt_s390_km(long func, void *param,
				u8 *dest, const u8 *src, long src_len)
{
	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
	register void *__param asm("1") = param;
	register const u8 *__src asm("2") = src;
	register long __src_len asm("3") = src_len;
	register u8 *__dest asm("4") = dest;
	int ret;

	asm volatile(
		"0:	.insn	rre,0xb92e0000,%3,%1\n" /* KM opcode */
		"1:	brc	1,0b\n" /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
		: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
	if (ret < 0)
		return ret;
	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}

/**
 * crypt_s390_kmc:
 * @func: the function code passed to KM; see crypt_s390_kmc_func
 * @param: address of parameter block; see POP for details on each func
 * @dest: address of destination memory area
 * @src: address of source memory area
 * @src_len: length of src operand in bytes
 *
 * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU.
 *
 * Returns -1 for failure, 0 for the query func, number of processed
 * bytes for encryption/decryption funcs
 */
static inline int crypt_s390_kmc(long func, void *param,
				 u8 *dest, const u8 *src, long src_len)
{
	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
	register void *__param asm("1") = param;
	register const u8 *__src asm("2") = src;
	register long __src_len asm("3") = src_len;
	register u8 *__dest asm("4") = dest;
	int ret;

	asm volatile(
		"0:	.insn	rre,0xb92f0000,%3,%1\n" /* KMC opcode */
		"1:	brc	1,0b\n" /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
		: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
	if (ret < 0)
		return ret;
	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}

/**
 * crypt_s390_kimd:
 * @func: the function code passed to KM; see crypt_s390_kimd_func
 * @param: address of parameter block; see POP for details on each func
 * @src: address of source memory area
 * @src_len: length of src operand in bytes
 *
 * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation
 * of the CPU.
 *
 * Returns -1 for failure, 0 for the query func, number of processed
 * bytes for digest funcs
 */
static inline int crypt_s390_kimd(long func, void *param,
				  const u8 *src, long src_len)
{
	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
	register void *__param asm("1") = param;
	register const u8 *__src asm("2") = src;
	register long __src_len asm("3") = src_len;
	int ret;

	asm volatile(
		"0:	.insn	rre,0xb93e0000,%1,%1\n" /* KIMD opcode */
		"1:	brc	1,0b\n" /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "=d" (ret), "+a" (__src), "+d" (__src_len)
		: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
	if (ret < 0)
		return ret;
	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}

/**
 * crypt_s390_klmd:
 * @func: the function code passed to KM; see crypt_s390_klmd_func
 * @param: address of parameter block; see POP for details on each func
 * @src: address of source memory area
 * @src_len: length of src operand in bytes
 *
 * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU.
 *
 * Returns -1 for failure, 0 for the query func, number of processed
 * bytes for digest funcs
 */
static inline int crypt_s390_klmd(long func, void *param,
				  const u8 *src, long src_len)
{
	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
	register void *__param asm("1") = param;
	register const u8 *__src asm("2") = src;
	register long __src_len asm("3") = src_len;
	int ret;

	asm volatile(
		"0:	.insn	rre,0xb93f0000,%1,%1\n" /* KLMD opcode */
		"1:	brc	1,0b\n" /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "=d" (ret), "+a" (__src), "+d" (__src_len)
		: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
	if (ret < 0)
		return ret;
	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}

/**
 * crypt_s390_kmac:
 * @func: the function code passed to KM; see crypt_s390_klmd_func
 * @param: address of parameter block; see POP for details on each func
 * @src: address of source memory area
 * @src_len: length of src operand in bytes
 *
 * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation
 * of the CPU.
 *
 * Returns -1 for failure, 0 for the query func, number of processed
 * bytes for digest funcs
 */
static inline int crypt_s390_kmac(long func, void *param,
				  const u8 *src, long src_len)
{
	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
	register void *__param asm("1") = param;
	register const u8 *__src asm("2") = src;
	register long __src_len asm("3") = src_len;
	int ret;

	asm volatile(
		"0:	.insn	rre,0xb91e0000,%1,%1\n" /* KLAC opcode */
		"1:	brc	1,0b\n" /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "=d" (ret), "+a" (__src), "+d" (__src_len)
		: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
	if (ret < 0)
		return ret;
	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}

/**
 * crypt_s390_kmctr:
 * @func: the function code passed to KMCTR; see crypt_s390_kmctr_func
 * @param: address of parameter block; see POP for details on each func
 * @dest: address of destination memory area
 * @src: address of source memory area
 * @src_len: length of src operand in bytes
 * @counter: address of counter value
 *
 * Executes the KMCTR (CIPHER MESSAGE WITH COUNTER) operation of the CPU.
 *
 * Returns -1 for failure, 0 for the query func, number of processed
 * bytes for encryption/decryption funcs
 */
static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
				 const u8 *src, long src_len, u8 *counter)
{
	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
	register void *__param asm("1") = param;
	register const u8 *__src asm("2") = src;
	register long __src_len asm("3") = src_len;
	register u8 *__dest asm("4") = dest;
	register u8 *__ctr asm("6") = counter;
	int ret = -1;

	asm volatile(
		"0:	.insn	rrf,0xb92d0000,%3,%1,%4,0\n" /* KMCTR opcode */
		"1:	brc	1,0b\n" /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest),
		  "+a" (__ctr)
		: "d" (__func), "a" (__param) : "cc", "memory");
	if (ret < 0)
		return ret;
	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}

/**
 * crypt_s390_ppno:
 * @func: the function code passed to PPNO; see crypt_s390_ppno_func
 * @param: address of parameter block; see POP for details on each func
 * @dest: address of destination memory area
 * @dest_len: size of destination memory area in bytes
 * @seed: address of seed data
 * @seed_len: size of seed data in bytes
 *
 * Executes the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
 * operation of the CPU.
 *
 * Returns -1 for failure, 0 for the query func, number of random
 * bytes stored in dest buffer for generate function
 */
static inline int crypt_s390_ppno(long func, void *param,
				  u8 *dest, long dest_len,
				  const u8 *seed, long seed_len)
{
	register long  __func	  asm("0") = func & CRYPT_S390_FUNC_MASK;
	register void *__param	  asm("1") = param;    /* param block (240 bytes) */
	register u8   *__dest	  asm("2") = dest;     /* buf for recv random bytes */
	register long  __dest_len asm("3") = dest_len; /* requested random bytes */
	register const u8 *__seed asm("4") = seed;     /* buf with seed data */
	register long  __seed_len asm("5") = seed_len; /* bytes in seed buf */
	int ret = -1;

	asm volatile (
		"0:	.insn	rre,0xb93c0000,%1,%5\n"	/* PPNO opcode */
		"1:	brc	1,0b\n"	  /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "+d" (ret), "+a"(__dest), "+d"(__dest_len)
		: "d"(__func), "a"(__param), "a"(__seed), "d"(__seed_len)
		: "cc", "memory");
	if (ret < 0)
		return ret;
	return (func & CRYPT_S390_FUNC_MASK) ? dest_len - __dest_len : 0;
}

/**
 * crypt_s390_func_available:
 * @func: the function code of the specific function; 0 if op in general
 *
 * Tests if a specific crypto function is implemented on the machine.
 *
 * Returns 1 if func available; 0 if func or op in general not available
 */
static inline int crypt_s390_func_available(int func,
					    unsigned int facility_mask)
{
	unsigned char status[16];
	int ret;

	if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
		return 0;
	if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76))
		return 0;
	if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
		return 0;
	if (facility_mask & CRYPT_S390_MSA5 && !test_facility(57))
		return 0;

	switch (func & CRYPT_S390_OP_MASK) {
	case CRYPT_S390_KM:
		ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
		break;
	case CRYPT_S390_KMC:
		ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
		break;
	case CRYPT_S390_KIMD:
		ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
		break;
	case CRYPT_S390_KLMD:
		ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
		break;
	case CRYPT_S390_KMAC:
		ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
		break;
	case CRYPT_S390_KMCTR:
		ret = crypt_s390_kmctr(KMCTR_QUERY, &status,
				       NULL, NULL, 0, NULL);
		break;
	case CRYPT_S390_PPNO:
		ret = crypt_s390_ppno(PPNO_QUERY, &status,
				      NULL, 0, NULL, 0);
		break;
	default:
		return 0;
	}
	if (ret < 0)
		return 0;
	func &= CRYPT_S390_FUNC_MASK;
	func &= 0x7f;		/* mask modifier bit */
	return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
}

/**
 * crypt_s390_pcc:
 * @func: the function code passed to KM; see crypt_s390_km_func
 * @param: address of parameter block; see POP for details on each func
 *
 * Executes the PCC (PERFORM CRYPTOGRAPHIC COMPUTATION) operation of the CPU.
 *
 * Returns -1 for failure, 0 for success.
 */
static inline int crypt_s390_pcc(long func, void *param)
{
	register long __func asm("0") = func & 0x7f; /* encrypt or decrypt */
	register void *__param asm("1") = param;
	int ret = -1;

	asm volatile(
		"0:	.insn	rre,0xb92c0000,0,0\n" /* PCC opcode */
		"1:	brc	1,0b\n" /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "+d" (ret)
		: "d" (__func), "a" (__param) : "cc", "memory");
	return ret;
}

#endif	/* _CRYPTO_ARCH_S390_CRYPT_S390_H */
+34 −38

File changed.

Preview size limit exceeded, changes collapsed.

+7 −9
Original line number Original line Diff line number Diff line
@@ -10,8 +10,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/hash.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/cpufeature.h>

#include <asm/cpacf.h>
#include "crypt_s390.h"


#define GHASH_BLOCK_SIZE	16
#define GHASH_BLOCK_SIZE	16
#define GHASH_DIGEST_SIZE	16
#define GHASH_DIGEST_SIZE	16
@@ -72,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
		src += n;
		src += n;


		if (!dctx->bytes) {
		if (!dctx->bytes) {
			ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
			ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf,
					 GHASH_BLOCK_SIZE);
					 GHASH_BLOCK_SIZE);
			if (ret != GHASH_BLOCK_SIZE)
			if (ret != GHASH_BLOCK_SIZE)
				return -EIO;
				return -EIO;
@@ -81,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,


	n = srclen & ~(GHASH_BLOCK_SIZE - 1);
	n = srclen & ~(GHASH_BLOCK_SIZE - 1);
	if (n) {
	if (n) {
		ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
		ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n);
		if (ret != n)
		if (ret != n)
			return -EIO;
			return -EIO;
		src += n;
		src += n;
@@ -106,7 +105,7 @@ static int ghash_flush(struct ghash_desc_ctx *dctx)


		memset(pos, 0, dctx->bytes);
		memset(pos, 0, dctx->bytes);


		ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
		ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
		if (ret != GHASH_BLOCK_SIZE)
		if (ret != GHASH_BLOCK_SIZE)
			return -EIO;
			return -EIO;


@@ -137,7 +136,7 @@ static struct shash_alg ghash_alg = {
	.base		= {
	.base		= {
		.cra_name		= "ghash",
		.cra_name		= "ghash",
		.cra_driver_name	= "ghash-s390",
		.cra_driver_name	= "ghash-s390",
		.cra_priority		= CRYPT_S390_PRIORITY,
		.cra_priority		= 300,
		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
		.cra_blocksize		= GHASH_BLOCK_SIZE,
		.cra_blocksize		= GHASH_BLOCK_SIZE,
		.cra_ctxsize		= sizeof(struct ghash_ctx),
		.cra_ctxsize		= sizeof(struct ghash_ctx),
@@ -147,8 +146,7 @@ static struct shash_alg ghash_alg = {


static int __init ghash_mod_init(void)
static int __init ghash_mod_init(void)
{
{
	if (!crypt_s390_func_available(KIMD_GHASH,
	if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_GHASH))
				       CRYPT_S390_MSA | CRYPT_S390_MSA4))
		return -EOPNOTSUPP;
		return -EOPNOTSUPP;


	return crypto_register_shash(&ghash_alg);
	return crypto_register_shash(&ghash_alg);
+25 −35

File changed.

Preview size limit exceeded, changes collapsed.

Loading