Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f61a657f authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull s390 updates from Martin Schwidefsky:
 "The s390 patches for the 4.7 merge window have the usual bug fixes and
  cleanups, and the following new features:

   - An interface for dasd driver to query if a volume is online to
     another operating system

   - A new ioctl for the dasd driver to verify the format for a range of
     tracks

   - Following the example of x86 the struct fpu is now allocated with
     the task_struct

   - The 'report_error' interface for the PCI bus to send an
     adapter-error notification from user space to the service element
     of the machine"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (29 commits)
  s390/vmem: remove unused function parameter
  s390/vmem: fix identity mapping
  s390: add missing include statements
  s390: add missing declarations
  s390: make couple of variables and functions static
  s390/cache: remove superfluous locking
  s390/cpuinfo: simplify locking and skip offline cpus early
  s390/3270: hangup the 3270 tty after a disconnect
  s390/3270: handle reconnect of a tty with a different size
  s390/3270: avoid endless I/O loop with disconnected 3270 terminals
  s390/3270: fix garbled output on 3270 tty view
  s390/3270: fix view reference counting
  s390/3270: add missing tty_kref_put
  s390/dumpstack: implement and use return_address()
  s390/cpum_sf: Remove superfluous SMP function call
  s390/cpum_cf: Remove superfluous SMP function call
  s390/Kconfig: make z196 the default processor type
  s390/sclp: avoid compile warning in sclp_pci_report
  s390/fpu: allocate 'struct fpu' with the task_struct
  s390/crypto: cleanup and move the header with the cpacf definitions
  ...
parents 0e034f5c c53db522
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -2558,6 +2558,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.

	nohugeiomap	[KNL,x86] Disable kernel huge I/O mappings.

	nosmt		[KNL,S390] Disable symmetric multithreading (SMT).
			Equivalent to smt=1.

	noxsave		[BUGS=X86] Disables x86 extended register state save
			and restore using xsave. The kernel will fallback to
			enabling legacy floating-point and sse state.
@@ -3753,6 +3756,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
				1: Fast pin select (default)
				2: ATC IRMode

	smt		[KNL,S390] Set the maximum number of threads (logical
			CPUs) to use per physical CPU on systems capable of
			symmetric multithreading (SMT). Will be capped to the
			actual hardware limit.
			Format: <integer>
			Default: -1 (no limit)

	softlockup_panic=
			[KNL] Should the soft-lockup detector generate panics.
			Format: <integer>
+2 −1
Original line number Diff line number Diff line
@@ -107,6 +107,7 @@ config S390
	select ARCH_SUPPORTS_NUMA_BALANCING
	select ARCH_USE_BUILTIN_BSWAP
	select ARCH_USE_CMPXCHG_LOCKREF
	select ARCH_WANTS_DYNAMIC_TASK_STRUCT
	select ARCH_WANTS_PROT_NUMA_PROT_NONE
	select ARCH_WANT_IPC_PARSE_VERSION
	select BUILDTIME_EXTABLE_SORT
@@ -210,7 +211,7 @@ config HAVE_MARCH_Z13_FEATURES

choice
	prompt "Processor type"
	default MARCH_Z900
	default MARCH_Z196

config MARCH_Z900
	bool "IBM zSeries model z800 and z900"
+56 −61
Original line number Diff line number Diff line
@@ -28,7 +28,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <crypto/xts.h>
#include "crypt_s390.h"
#include <asm/cpacf.h>

#define AES_KEYLEN_128		1
#define AES_KEYLEN_192		2
@@ -145,15 +145,15 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)

	switch (sctx->key_len) {
	case 16:
		crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
		cpacf_km(CPACF_KM_AES_128_ENC, &sctx->key, out, in,
			 AES_BLOCK_SIZE);
		break;
	case 24:
		crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
		cpacf_km(CPACF_KM_AES_192_ENC, &sctx->key, out, in,
			 AES_BLOCK_SIZE);
		break;
	case 32:
		crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
		cpacf_km(CPACF_KM_AES_256_ENC, &sctx->key, out, in,
			 AES_BLOCK_SIZE);
		break;
	}
@@ -170,15 +170,15 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)

	switch (sctx->key_len) {
	case 16:
		crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
		cpacf_km(CPACF_KM_AES_128_DEC, &sctx->key, out, in,
			 AES_BLOCK_SIZE);
		break;
	case 24:
		crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
		cpacf_km(CPACF_KM_AES_192_DEC, &sctx->key, out, in,
			 AES_BLOCK_SIZE);
		break;
	case 32:
		crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
		cpacf_km(CPACF_KM_AES_256_DEC, &sctx->key, out, in,
			 AES_BLOCK_SIZE);
		break;
	}
@@ -212,7 +212,7 @@ static void fallback_exit_cip(struct crypto_tfm *tfm)
static struct crypto_alg aes_alg = {
	.cra_name		=	"aes",
	.cra_driver_name	=	"aes-s390",
	.cra_priority		=	CRYPT_S390_PRIORITY,
	.cra_priority		=	300,
	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER |
					CRYPTO_ALG_NEED_FALLBACK,
	.cra_blocksize		=	AES_BLOCK_SIZE,
@@ -298,16 +298,16 @@ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,

	switch (key_len) {
	case 16:
		sctx->enc = KM_AES_128_ENCRYPT;
		sctx->dec = KM_AES_128_DECRYPT;
		sctx->enc = CPACF_KM_AES_128_ENC;
		sctx->dec = CPACF_KM_AES_128_DEC;
		break;
	case 24:
		sctx->enc = KM_AES_192_ENCRYPT;
		sctx->dec = KM_AES_192_DECRYPT;
		sctx->enc = CPACF_KM_AES_192_ENC;
		sctx->dec = CPACF_KM_AES_192_DEC;
		break;
	case 32:
		sctx->enc = KM_AES_256_ENCRYPT;
		sctx->dec = KM_AES_256_DECRYPT;
		sctx->enc = CPACF_KM_AES_256_ENC;
		sctx->dec = CPACF_KM_AES_256_DEC;
		break;
	}

@@ -326,7 +326,7 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
		u8 *out = walk->dst.virt.addr;
		u8 *in = walk->src.virt.addr;

		ret = crypt_s390_km(func, param, out, in, n);
		ret = cpacf_km(func, param, out, in, n);
		if (ret < 0 || ret != n)
			return -EIO;

@@ -393,7 +393,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
static struct crypto_alg ecb_aes_alg = {
	.cra_name		=	"ecb(aes)",
	.cra_driver_name	=	"ecb-aes-s390",
	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
	.cra_priority		=	400,	/* combo: aes + ecb */
	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
					CRYPTO_ALG_NEED_FALLBACK,
	.cra_blocksize		=	AES_BLOCK_SIZE,
@@ -427,16 +427,16 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,

	switch (key_len) {
	case 16:
		sctx->enc = KMC_AES_128_ENCRYPT;
		sctx->dec = KMC_AES_128_DECRYPT;
		sctx->enc = CPACF_KMC_AES_128_ENC;
		sctx->dec = CPACF_KMC_AES_128_DEC;
		break;
	case 24:
		sctx->enc = KMC_AES_192_ENCRYPT;
		sctx->dec = KMC_AES_192_DECRYPT;
		sctx->enc = CPACF_KMC_AES_192_ENC;
		sctx->dec = CPACF_KMC_AES_192_DEC;
		break;
	case 32:
		sctx->enc = KMC_AES_256_ENCRYPT;
		sctx->dec = KMC_AES_256_DECRYPT;
		sctx->enc = CPACF_KMC_AES_256_ENC;
		sctx->dec = CPACF_KMC_AES_256_DEC;
		break;
	}

@@ -465,7 +465,7 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
		u8 *out = walk->dst.virt.addr;
		u8 *in = walk->src.virt.addr;

		ret = crypt_s390_kmc(func, &param, out, in, n);
		ret = cpacf_kmc(func, &param, out, in, n);
		if (ret < 0 || ret != n)
			return -EIO;

@@ -509,7 +509,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg cbc_aes_alg = {
	.cra_name		=	"cbc(aes)",
	.cra_driver_name	=	"cbc-aes-s390",
	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
	.cra_priority		=	400,	/* combo: aes + cbc */
	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
					CRYPTO_ALG_NEED_FALLBACK,
	.cra_blocksize		=	AES_BLOCK_SIZE,
@@ -596,8 +596,8 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,

	switch (key_len) {
	case 32:
		xts_ctx->enc = KM_XTS_128_ENCRYPT;
		xts_ctx->dec = KM_XTS_128_DECRYPT;
		xts_ctx->enc = CPACF_KM_XTS_128_ENC;
		xts_ctx->dec = CPACF_KM_XTS_128_DEC;
		memcpy(xts_ctx->key + 16, in_key, 16);
		memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
		break;
@@ -607,8 +607,8 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
		xts_fallback_setkey(tfm, in_key, key_len);
		break;
	case 64:
		xts_ctx->enc = KM_XTS_256_ENCRYPT;
		xts_ctx->dec = KM_XTS_256_DECRYPT;
		xts_ctx->enc = CPACF_KM_XTS_256_ENC;
		xts_ctx->dec = CPACF_KM_XTS_256_DEC;
		memcpy(xts_ctx->key, in_key, 32);
		memcpy(xts_ctx->pcc_key, in_key + 32, 32);
		break;
@@ -643,7 +643,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
	memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
	memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
	ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
	/* remove decipher modifier bit from 'func' and call PCC */
	ret = cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
	if (ret < 0)
		return -EIO;

@@ -655,7 +656,7 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;

		ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
		ret = cpacf_km(func, &xts_param.key[offset], out, in, n);
		if (ret < 0 || ret != n)
			return -EIO;

@@ -721,7 +722,7 @@ static void xts_fallback_exit(struct crypto_tfm *tfm)
static struct crypto_alg xts_aes_alg = {
	.cra_name		=	"xts(aes)",
	.cra_driver_name	=	"xts-aes-s390",
	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
	.cra_priority		=	400,	/* combo: aes + xts */
	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
					CRYPTO_ALG_NEED_FALLBACK,
	.cra_blocksize		=	AES_BLOCK_SIZE,
@@ -751,16 +752,16 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,

	switch (key_len) {
	case 16:
		sctx->enc = KMCTR_AES_128_ENCRYPT;
		sctx->dec = KMCTR_AES_128_DECRYPT;
		sctx->enc = CPACF_KMCTR_AES_128_ENC;
		sctx->dec = CPACF_KMCTR_AES_128_DEC;
		break;
	case 24:
		sctx->enc = KMCTR_AES_192_ENCRYPT;
		sctx->dec = KMCTR_AES_192_DECRYPT;
		sctx->enc = CPACF_KMCTR_AES_192_ENC;
		sctx->dec = CPACF_KMCTR_AES_192_DEC;
		break;
	case 32:
		sctx->enc = KMCTR_AES_256_ENCRYPT;
		sctx->dec = KMCTR_AES_256_DECRYPT;
		sctx->enc = CPACF_KMCTR_AES_256_ENC;
		sctx->dec = CPACF_KMCTR_AES_256_DEC;
		break;
	}

@@ -804,8 +805,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
				n = __ctrblk_init(ctrptr, nbytes);
			else
				n = AES_BLOCK_SIZE;
			ret = crypt_s390_kmctr(func, sctx->key, out, in,
					       n, ctrptr);
			ret = cpacf_kmctr(func, sctx->key, out, in, n, ctrptr);
			if (ret < 0 || ret != n) {
				if (ctrptr == ctrblk)
					spin_unlock(&ctrblk_lock);
@@ -837,7 +837,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
	if (nbytes) {
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;
		ret = crypt_s390_kmctr(func, sctx->key, buf, in,
		ret = cpacf_kmctr(func, sctx->key, buf, in,
				  AES_BLOCK_SIZE, ctrbuf);
		if (ret < 0 || ret != AES_BLOCK_SIZE)
			return -EIO;
@@ -875,7 +875,7 @@ static int ctr_aes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg ctr_aes_alg = {
	.cra_name		=	"ctr(aes)",
	.cra_driver_name	=	"ctr-aes-s390",
	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
	.cra_priority		=	400,	/* combo: aes + ctr */
	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
	.cra_blocksize		=	1,
	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
@@ -899,11 +899,11 @@ static int __init aes_s390_init(void)
{
	int ret;

	if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
	if (cpacf_query(CPACF_KM, CPACF_KM_AES_128_ENC))
		keylen_flag |= AES_KEYLEN_128;
	if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
	if (cpacf_query(CPACF_KM, CPACF_KM_AES_192_ENC))
		keylen_flag |= AES_KEYLEN_192;
	if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
	if (cpacf_query(CPACF_KM, CPACF_KM_AES_256_ENC))
		keylen_flag |= AES_KEYLEN_256;

	if (!keylen_flag)
@@ -926,22 +926,17 @@ static int __init aes_s390_init(void)
	if (ret)
		goto cbc_aes_err;

	if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
			CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
	    crypt_s390_func_available(KM_XTS_256_ENCRYPT,
			CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
	if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128_ENC) &&
	    cpacf_query(CPACF_KM, CPACF_KM_XTS_256_ENC)) {
		ret = crypto_register_alg(&xts_aes_alg);
		if (ret)
			goto xts_aes_err;
		xts_aes_alg_reg = 1;
	}

	if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
				CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
	    crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
				CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
	    crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
				CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
	if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128_ENC) &&
	    cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192_ENC) &&
	    cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256_ENC)) {
		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
		if (!ctrblk) {
			ret = -ENOMEM;

arch/s390/crypto/crypt_s390.h

deleted100644 → 0
+0 −493
Original line number Diff line number Diff line
/*
 * Cryptographic API.
 *
 * Support for s390 cryptographic instructions.
 *
 *   Copyright IBM Corp. 2003, 2015
 *   Author(s): Thomas Spatzier
 *		Jan Glauber (jan.glauber@de.ibm.com)
 *		Harald Freudenberger (freude@de.ibm.com)
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 */
#ifndef _CRYPTO_ARCH_S390_CRYPT_S390_H
#define _CRYPTO_ARCH_S390_CRYPT_S390_H

#include <asm/errno.h>
#include <asm/facility.h>

#define CRYPT_S390_OP_MASK 0xFF00
#define CRYPT_S390_FUNC_MASK 0x00FF

#define CRYPT_S390_PRIORITY 300
#define CRYPT_S390_COMPOSITE_PRIORITY 400

#define CRYPT_S390_MSA	0x1
#define CRYPT_S390_MSA3	0x2
#define CRYPT_S390_MSA4	0x4
#define CRYPT_S390_MSA5	0x8

/* s390 cryptographic operations */
enum crypt_s390_operations {
	CRYPT_S390_KM	 = 0x0100,
	CRYPT_S390_KMC	 = 0x0200,
	CRYPT_S390_KIMD  = 0x0300,
	CRYPT_S390_KLMD  = 0x0400,
	CRYPT_S390_KMAC  = 0x0500,
	CRYPT_S390_KMCTR = 0x0600,
	CRYPT_S390_PPNO  = 0x0700
};

/*
 * function codes for KM (CIPHER MESSAGE) instruction
 * 0x80 is the decipher modifier bit
 */
enum crypt_s390_km_func {
	KM_QUERY	    = CRYPT_S390_KM | 0x0,
	KM_DEA_ENCRYPT      = CRYPT_S390_KM | 0x1,
	KM_DEA_DECRYPT      = CRYPT_S390_KM | 0x1 | 0x80,
	KM_TDEA_128_ENCRYPT = CRYPT_S390_KM | 0x2,
	KM_TDEA_128_DECRYPT = CRYPT_S390_KM | 0x2 | 0x80,
	KM_TDEA_192_ENCRYPT = CRYPT_S390_KM | 0x3,
	KM_TDEA_192_DECRYPT = CRYPT_S390_KM | 0x3 | 0x80,
	KM_AES_128_ENCRYPT  = CRYPT_S390_KM | 0x12,
	KM_AES_128_DECRYPT  = CRYPT_S390_KM | 0x12 | 0x80,
	KM_AES_192_ENCRYPT  = CRYPT_S390_KM | 0x13,
	KM_AES_192_DECRYPT  = CRYPT_S390_KM | 0x13 | 0x80,
	KM_AES_256_ENCRYPT  = CRYPT_S390_KM | 0x14,
	KM_AES_256_DECRYPT  = CRYPT_S390_KM | 0x14 | 0x80,
	KM_XTS_128_ENCRYPT  = CRYPT_S390_KM | 0x32,
	KM_XTS_128_DECRYPT  = CRYPT_S390_KM | 0x32 | 0x80,
	KM_XTS_256_ENCRYPT  = CRYPT_S390_KM | 0x34,
	KM_XTS_256_DECRYPT  = CRYPT_S390_KM | 0x34 | 0x80,
};

/*
 * function codes for KMC (CIPHER MESSAGE WITH CHAINING)
 * instruction
 */
enum crypt_s390_kmc_func {
	KMC_QUERY            = CRYPT_S390_KMC | 0x0,
	KMC_DEA_ENCRYPT      = CRYPT_S390_KMC | 0x1,
	KMC_DEA_DECRYPT      = CRYPT_S390_KMC | 0x1 | 0x80,
	KMC_TDEA_128_ENCRYPT = CRYPT_S390_KMC | 0x2,
	KMC_TDEA_128_DECRYPT = CRYPT_S390_KMC | 0x2 | 0x80,
	KMC_TDEA_192_ENCRYPT = CRYPT_S390_KMC | 0x3,
	KMC_TDEA_192_DECRYPT = CRYPT_S390_KMC | 0x3 | 0x80,
	KMC_AES_128_ENCRYPT  = CRYPT_S390_KMC | 0x12,
	KMC_AES_128_DECRYPT  = CRYPT_S390_KMC | 0x12 | 0x80,
	KMC_AES_192_ENCRYPT  = CRYPT_S390_KMC | 0x13,
	KMC_AES_192_DECRYPT  = CRYPT_S390_KMC | 0x13 | 0x80,
	KMC_AES_256_ENCRYPT  = CRYPT_S390_KMC | 0x14,
	KMC_AES_256_DECRYPT  = CRYPT_S390_KMC | 0x14 | 0x80,
	KMC_PRNG	     = CRYPT_S390_KMC | 0x43,
};

/*
 * function codes for KMCTR (CIPHER MESSAGE WITH COUNTER)
 * instruction
 */
enum crypt_s390_kmctr_func {
	KMCTR_QUERY            = CRYPT_S390_KMCTR | 0x0,
	KMCTR_DEA_ENCRYPT      = CRYPT_S390_KMCTR | 0x1,
	KMCTR_DEA_DECRYPT      = CRYPT_S390_KMCTR | 0x1 | 0x80,
	KMCTR_TDEA_128_ENCRYPT = CRYPT_S390_KMCTR | 0x2,
	KMCTR_TDEA_128_DECRYPT = CRYPT_S390_KMCTR | 0x2 | 0x80,
	KMCTR_TDEA_192_ENCRYPT = CRYPT_S390_KMCTR | 0x3,
	KMCTR_TDEA_192_DECRYPT = CRYPT_S390_KMCTR | 0x3 | 0x80,
	KMCTR_AES_128_ENCRYPT  = CRYPT_S390_KMCTR | 0x12,
	KMCTR_AES_128_DECRYPT  = CRYPT_S390_KMCTR | 0x12 | 0x80,
	KMCTR_AES_192_ENCRYPT  = CRYPT_S390_KMCTR | 0x13,
	KMCTR_AES_192_DECRYPT  = CRYPT_S390_KMCTR | 0x13 | 0x80,
	KMCTR_AES_256_ENCRYPT  = CRYPT_S390_KMCTR | 0x14,
	KMCTR_AES_256_DECRYPT  = CRYPT_S390_KMCTR | 0x14 | 0x80,
};

/*
 * function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
 * instruction
 */
enum crypt_s390_kimd_func {
	KIMD_QUERY   = CRYPT_S390_KIMD | 0,
	KIMD_SHA_1   = CRYPT_S390_KIMD | 1,
	KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
	KIMD_SHA_512 = CRYPT_S390_KIMD | 3,
	KIMD_GHASH   = CRYPT_S390_KIMD | 65,
};

/*
 * function codes for KLMD (COMPUTE LAST MESSAGE DIGEST)
 * instruction
 */
enum crypt_s390_klmd_func {
	KLMD_QUERY   = CRYPT_S390_KLMD | 0,
	KLMD_SHA_1   = CRYPT_S390_KLMD | 1,
	KLMD_SHA_256 = CRYPT_S390_KLMD | 2,
	KLMD_SHA_512 = CRYPT_S390_KLMD | 3,
};

/*
 * function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
 * instruction
 */
enum crypt_s390_kmac_func {
	KMAC_QUERY    = CRYPT_S390_KMAC | 0,
	KMAC_DEA      = CRYPT_S390_KMAC | 1,
	KMAC_TDEA_128 = CRYPT_S390_KMAC | 2,
	KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
};

/*
 * function codes for PPNO (PERFORM PSEUDORANDOM NUMBER
 * OPERATION) instruction
 */
enum crypt_s390_ppno_func {
	PPNO_QUERY	      = CRYPT_S390_PPNO | 0,
	PPNO_SHA512_DRNG_GEN  = CRYPT_S390_PPNO | 3,
	PPNO_SHA512_DRNG_SEED = CRYPT_S390_PPNO | 0x83
};

/**
 * crypt_s390_km:
 * @func: the function code passed to KM; see crypt_s390_km_func
 * @param: address of parameter block; see POP for details on each func
 * @dest: address of destination memory area
 * @src: address of source memory area
 * @src_len: length of src operand in bytes
 *
 * Executes the KM (CIPHER MESSAGE) operation of the CPU.
 *
 * Returns -1 for failure, 0 for the query func, number of processed
 * bytes for encryption/decryption funcs
 */
static inline int crypt_s390_km(long func, void *param,
				u8 *dest, const u8 *src, long src_len)
{
	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
	register void *__param asm("1") = param;
	register const u8 *__src asm("2") = src;
	register long __src_len asm("3") = src_len;
	register u8 *__dest asm("4") = dest;
	int ret;

	asm volatile(
		"0:	.insn	rre,0xb92e0000,%3,%1\n" /* KM opcode */
		"1:	brc	1,0b\n" /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
		: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
	if (ret < 0)
		return ret;
	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}

/**
 * crypt_s390_kmc:
 * @func: the function code passed to KM; see crypt_s390_kmc_func
 * @param: address of parameter block; see POP for details on each func
 * @dest: address of destination memory area
 * @src: address of source memory area
 * @src_len: length of src operand in bytes
 *
 * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU.
 *
 * Returns -1 for failure, 0 for the query func, number of processed
 * bytes for encryption/decryption funcs
 */
static inline int crypt_s390_kmc(long func, void *param,
				 u8 *dest, const u8 *src, long src_len)
{
	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
	register void *__param asm("1") = param;
	register const u8 *__src asm("2") = src;
	register long __src_len asm("3") = src_len;
	register u8 *__dest asm("4") = dest;
	int ret;

	asm volatile(
		"0:	.insn	rre,0xb92f0000,%3,%1\n" /* KMC opcode */
		"1:	brc	1,0b\n" /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
		: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
	if (ret < 0)
		return ret;
	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}

/**
 * crypt_s390_kimd:
 * @func: the function code passed to KM; see crypt_s390_kimd_func
 * @param: address of parameter block; see POP for details on each func
 * @src: address of source memory area
 * @src_len: length of src operand in bytes
 *
 * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation
 * of the CPU.
 *
 * Returns -1 for failure, 0 for the query func, number of processed
 * bytes for digest funcs
 */
static inline int crypt_s390_kimd(long func, void *param,
				  const u8 *src, long src_len)
{
	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
	register void *__param asm("1") = param;
	register const u8 *__src asm("2") = src;
	register long __src_len asm("3") = src_len;
	int ret;

	asm volatile(
		"0:	.insn	rre,0xb93e0000,%1,%1\n" /* KIMD opcode */
		"1:	brc	1,0b\n" /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "=d" (ret), "+a" (__src), "+d" (__src_len)
		: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
	if (ret < 0)
		return ret;
	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}

/**
 * crypt_s390_klmd:
 * @func: the function code passed to KM; see crypt_s390_klmd_func
 * @param: address of parameter block; see POP for details on each func
 * @src: address of source memory area
 * @src_len: length of src operand in bytes
 *
 * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU.
 *
 * Returns -1 for failure, 0 for the query func, number of processed
 * bytes for digest funcs
 */
static inline int crypt_s390_klmd(long func, void *param,
				  const u8 *src, long src_len)
{
	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
	register void *__param asm("1") = param;
	register const u8 *__src asm("2") = src;
	register long __src_len asm("3") = src_len;
	int ret;

	asm volatile(
		"0:	.insn	rre,0xb93f0000,%1,%1\n" /* KLMD opcode */
		"1:	brc	1,0b\n" /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "=d" (ret), "+a" (__src), "+d" (__src_len)
		: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
	if (ret < 0)
		return ret;
	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}

/**
 * crypt_s390_kmac:
 * @func: the function code passed to KM; see crypt_s390_klmd_func
 * @param: address of parameter block; see POP for details on each func
 * @src: address of source memory area
 * @src_len: length of src operand in bytes
 *
 * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation
 * of the CPU.
 *
 * Returns -1 for failure, 0 for the query func, number of processed
 * bytes for digest funcs
 */
static inline int crypt_s390_kmac(long func, void *param,
				  const u8 *src, long src_len)
{
	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
	register void *__param asm("1") = param;
	register const u8 *__src asm("2") = src;
	register long __src_len asm("3") = src_len;
	int ret;

	asm volatile(
		"0:	.insn	rre,0xb91e0000,%1,%1\n" /* KLAC opcode */
		"1:	brc	1,0b\n" /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "=d" (ret), "+a" (__src), "+d" (__src_len)
		: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
	if (ret < 0)
		return ret;
	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}

/**
 * crypt_s390_kmctr:
 * @func: the function code passed to KMCTR; see crypt_s390_kmctr_func
 * @param: address of parameter block; see POP for details on each func
 * @dest: address of destination memory area
 * @src: address of source memory area
 * @src_len: length of src operand in bytes
 * @counter: address of counter value
 *
 * Executes the KMCTR (CIPHER MESSAGE WITH COUNTER) operation of the CPU.
 *
 * Returns -1 for failure, 0 for the query func, number of processed
 * bytes for encryption/decryption funcs
 */
static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
				 const u8 *src, long src_len, u8 *counter)
{
	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
	register void *__param asm("1") = param;
	register const u8 *__src asm("2") = src;
	register long __src_len asm("3") = src_len;
	register u8 *__dest asm("4") = dest;
	register u8 *__ctr asm("6") = counter;
	int ret = -1;

	asm volatile(
		"0:	.insn	rrf,0xb92d0000,%3,%1,%4,0\n" /* KMCTR opcode */
		"1:	brc	1,0b\n" /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest),
		  "+a" (__ctr)
		: "d" (__func), "a" (__param) : "cc", "memory");
	if (ret < 0)
		return ret;
	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}

/**
 * crypt_s390_ppno:
 * @func: the function code passed to PPNO; see crypt_s390_ppno_func
 * @param: address of parameter block; see POP for details on each func
 * @dest: address of destination memory area
 * @dest_len: size of destination memory area in bytes
 * @seed: address of seed data
 * @seed_len: size of seed data in bytes
 *
 * Executes the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
 * operation of the CPU.
 *
 * Returns -1 for failure, 0 for the query func, number of random
 * bytes stored in dest buffer for generate function
 */
static inline int crypt_s390_ppno(long func, void *param,
				  u8 *dest, long dest_len,
				  const u8 *seed, long seed_len)
{
	register long  __func	  asm("0") = func & CRYPT_S390_FUNC_MASK;
	register void *__param	  asm("1") = param;    /* param block (240 bytes) */
	register u8   *__dest	  asm("2") = dest;     /* buf for recv random bytes */
	register long  __dest_len asm("3") = dest_len; /* requested random bytes */
	register const u8 *__seed asm("4") = seed;     /* buf with seed data */
	register long  __seed_len asm("5") = seed_len; /* bytes in seed buf */
	int ret = -1;

	asm volatile (
		"0:	.insn	rre,0xb93c0000,%1,%5\n"	/* PPNO opcode */
		"1:	brc	1,0b\n"	  /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "+d" (ret), "+a"(__dest), "+d"(__dest_len)
		: "d"(__func), "a"(__param), "a"(__seed), "d"(__seed_len)
		: "cc", "memory");
	if (ret < 0)
		return ret;
	return (func & CRYPT_S390_FUNC_MASK) ? dest_len - __dest_len : 0;
}

/**
 * crypt_s390_func_available:
 * @func: the function code of the specific function; 0 if op in general
 *
 * Tests if a specific crypto function is implemented on the machine.
 *
 * Returns 1 if func available; 0 if func or op in general not available
 */
static inline int crypt_s390_func_available(int func,
					    unsigned int facility_mask)
{
	unsigned char status[16];
	int ret;

	if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
		return 0;
	if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76))
		return 0;
	if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
		return 0;
	if (facility_mask & CRYPT_S390_MSA5 && !test_facility(57))
		return 0;

	switch (func & CRYPT_S390_OP_MASK) {
	case CRYPT_S390_KM:
		ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
		break;
	case CRYPT_S390_KMC:
		ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
		break;
	case CRYPT_S390_KIMD:
		ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
		break;
	case CRYPT_S390_KLMD:
		ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
		break;
	case CRYPT_S390_KMAC:
		ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
		break;
	case CRYPT_S390_KMCTR:
		ret = crypt_s390_kmctr(KMCTR_QUERY, &status,
				       NULL, NULL, 0, NULL);
		break;
	case CRYPT_S390_PPNO:
		ret = crypt_s390_ppno(PPNO_QUERY, &status,
				      NULL, 0, NULL, 0);
		break;
	default:
		return 0;
	}
	if (ret < 0)
		return 0;
	func &= CRYPT_S390_FUNC_MASK;
	func &= 0x7f;		/* mask modifier bit */
	return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
}

/**
 * crypt_s390_pcc:
 * @func: the function code passed to KM; see crypt_s390_km_func
 * @param: address of parameter block; see POP for details on each func
 *
 * Executes the PCC (PERFORM CRYPTOGRAPHIC COMPUTATION) operation of the CPU.
 *
 * Returns -1 for failure, 0 for success.
 */
static inline int crypt_s390_pcc(long func, void *param)
{
	register long __func asm("0") = func & 0x7f; /* encrypt or decrypt */
	register void *__param asm("1") = param;
	int ret = -1;

	asm volatile(
		"0:	.insn	rre,0xb92c0000,0,0\n" /* PCC opcode */
		"1:	brc	1,0b\n" /* handle partial completion */
		"	la	%0,0\n"
		"2:\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: "+d" (ret)
		: "d" (__func), "a" (__param) : "cc", "memory");
	return ret;
}

#endif	/* _CRYPTO_ARCH_S390_CRYPT_S390_H */
+34 −38

File changed.

Preview size limit exceeded, changes collapsed.

Loading