Commit f61a657f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:
 "The s390 patches for the 4.7 merge window have the usual bug fixes and
  cleanups, and the following new features:

   - An interface for dasd driver to query if a volume is online to
     another operating system

   - A new ioctl for the dasd driver to verify the format for a range of
     tracks

   - Following the example of x86 the struct fpu is now allocated with
     the task_struct

   - The 'report_error' interface for the PCI bus to send an
     adapter-error notification from user space to the service element
     of the machine"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (29 commits)
  s390/vmem: remove unused function parameter
  s390/vmem: fix identity mapping
  s390: add missing include statements
  s390: add missing declarations
  s390: make couple of variables and functions static
  s390/cache: remove superfluous locking
  s390/cpuinfo: simplify locking and skip offline cpus early
  s390/3270: hangup the 3270 tty after a disconnect
  s390/3270: handle reconnect of a tty with a different size
  s390/3270: avoid endless I/O loop with disconnected 3270 terminals
  s390/3270: fix garbled output on 3270 tty view
  s390/3270: fix view reference counting
  s390/3270: add missing tty_kref_put
  s390/dumpstack: implement and use return_address()
  s390/cpum_sf: Remove superfluous SMP function call
  s390/cpum_cf: Remove superfluous SMP function call
  s390/Kconfig: make z196 the default processor type
  s390/sclp: avoid compile warning in sclp_pci_report
  s390/fpu: allocate 'struct fpu' with the task_struct
  s390/crypto: cleanup and move the header with the cpacf definitions
  ...
parents 0e034f5c c53db522
......@@ -2558,6 +2558,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nohugeiomap [KNL,x86] Disable kernel huge I/O mappings.
nosmt [KNL,S390] Disable symmetric multithreading (SMT).
Equivalent to smt=1.
noxsave [BUGS=X86] Disables x86 extended register state save
and restore using xsave. The kernel will fallback to
enabling legacy floating-point and sse state.
......@@ -3753,6 +3756,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1: Fast pin select (default)
2: ATC IRMode
smt [KNL,S390] Set the maximum number of threads (logical
CPUs) to use per physical CPU on systems capable of
symmetric multithreading (SMT). Will be capped to the
actual hardware limit.
Format: <integer>
Default: -1 (no limit)
softlockup_panic=
[KNL] Should the soft-lockup detector generate panics.
Format: <integer>
......
......@@ -107,6 +107,7 @@ config S390
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANTS_PROT_NUMA_PROT_NONE
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
......@@ -210,7 +211,7 @@ config HAVE_MARCH_Z13_FEATURES
choice
prompt "Processor type"
default MARCH_Z900
default MARCH_Z196
config MARCH_Z900
bool "IBM zSeries model z800 and z900"
......
......@@ -28,7 +28,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <crypto/xts.h>
#include "crypt_s390.h"
#include <asm/cpacf.h>
#define AES_KEYLEN_128 1
#define AES_KEYLEN_192 2
......@@ -145,15 +145,15 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
switch (sctx->key_len) {
case 16:
crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
cpacf_km(CPACF_KM_AES_128_ENC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 24:
crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
cpacf_km(CPACF_KM_AES_192_ENC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 32:
crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
cpacf_km(CPACF_KM_AES_256_ENC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
}
......@@ -170,15 +170,15 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
switch (sctx->key_len) {
case 16:
crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
cpacf_km(CPACF_KM_AES_128_DEC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 24:
crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
cpacf_km(CPACF_KM_AES_192_DEC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 32:
crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
cpacf_km(CPACF_KM_AES_256_DEC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
}
......@@ -212,7 +212,7 @@ static void fallback_exit_cip(struct crypto_tfm *tfm)
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
......@@ -298,16 +298,16 @@ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
switch (key_len) {
case 16:
sctx->enc = KM_AES_128_ENCRYPT;
sctx->dec = KM_AES_128_DECRYPT;
sctx->enc = CPACF_KM_AES_128_ENC;
sctx->dec = CPACF_KM_AES_128_DEC;
break;
case 24:
sctx->enc = KM_AES_192_ENCRYPT;
sctx->dec = KM_AES_192_DECRYPT;
sctx->enc = CPACF_KM_AES_192_ENC;
sctx->dec = CPACF_KM_AES_192_DEC;
break;
case 32:
sctx->enc = KM_AES_256_ENCRYPT;
sctx->dec = KM_AES_256_DECRYPT;
sctx->enc = CPACF_KM_AES_256_ENC;
sctx->dec = CPACF_KM_AES_256_DEC;
break;
}
......@@ -326,7 +326,7 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n);
ret = cpacf_km(func, param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
......@@ -393,7 +393,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
static struct crypto_alg ecb_aes_alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + ecb */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
......@@ -427,16 +427,16 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
switch (key_len) {
case 16:
sctx->enc = KMC_AES_128_ENCRYPT;
sctx->dec = KMC_AES_128_DECRYPT;
sctx->enc = CPACF_KMC_AES_128_ENC;
sctx->dec = CPACF_KMC_AES_128_DEC;
break;
case 24:
sctx->enc = KMC_AES_192_ENCRYPT;
sctx->dec = KMC_AES_192_DECRYPT;
sctx->enc = CPACF_KMC_AES_192_ENC;
sctx->dec = CPACF_KMC_AES_192_DEC;
break;
case 32:
sctx->enc = KMC_AES_256_ENCRYPT;
sctx->dec = KMC_AES_256_DECRYPT;
sctx->enc = CPACF_KMC_AES_256_ENC;
sctx->dec = CPACF_KMC_AES_256_DEC;
break;
}
......@@ -465,7 +465,7 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, &param, out, in, n);
ret = cpacf_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
......@@ -509,7 +509,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg cbc_aes_alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + cbc */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
......@@ -596,8 +596,8 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
switch (key_len) {
case 32:
xts_ctx->enc = KM_XTS_128_ENCRYPT;
xts_ctx->dec = KM_XTS_128_DECRYPT;
xts_ctx->enc = CPACF_KM_XTS_128_ENC;
xts_ctx->dec = CPACF_KM_XTS_128_DEC;
memcpy(xts_ctx->key + 16, in_key, 16);
memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
break;
......@@ -607,8 +607,8 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
xts_fallback_setkey(tfm, in_key, key_len);
break;
case 64:
xts_ctx->enc = KM_XTS_256_ENCRYPT;
xts_ctx->dec = KM_XTS_256_DECRYPT;
xts_ctx->enc = CPACF_KM_XTS_256_ENC;
xts_ctx->dec = CPACF_KM_XTS_256_DEC;
memcpy(xts_ctx->key, in_key, 32);
memcpy(xts_ctx->pcc_key, in_key + 32, 32);
break;
......@@ -643,7 +643,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
/* remove decipher modifier bit from 'func' and call PCC */
ret = cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
if (ret < 0)
return -EIO;
......@@ -655,7 +656,7 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
ret = cpacf_km(func, &xts_param.key[offset], out, in, n);
if (ret < 0 || ret != n)
return -EIO;
......@@ -721,7 +722,7 @@ static void xts_fallback_exit(struct crypto_tfm *tfm)
static struct crypto_alg xts_aes_alg = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + xts */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
......@@ -751,16 +752,16 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
switch (key_len) {
case 16:
sctx->enc = KMCTR_AES_128_ENCRYPT;
sctx->dec = KMCTR_AES_128_DECRYPT;
sctx->enc = CPACF_KMCTR_AES_128_ENC;
sctx->dec = CPACF_KMCTR_AES_128_DEC;
break;
case 24:
sctx->enc = KMCTR_AES_192_ENCRYPT;
sctx->dec = KMCTR_AES_192_DECRYPT;
sctx->enc = CPACF_KMCTR_AES_192_ENC;
sctx->dec = CPACF_KMCTR_AES_192_DEC;
break;
case 32:
sctx->enc = KMCTR_AES_256_ENCRYPT;
sctx->dec = KMCTR_AES_256_DECRYPT;
sctx->enc = CPACF_KMCTR_AES_256_ENC;
sctx->dec = CPACF_KMCTR_AES_256_DEC;
break;
}
......@@ -804,8 +805,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
n = __ctrblk_init(ctrptr, nbytes);
else
n = AES_BLOCK_SIZE;
ret = crypt_s390_kmctr(func, sctx->key, out, in,
n, ctrptr);
ret = cpacf_kmctr(func, sctx->key, out, in, n, ctrptr);
if (ret < 0 || ret != n) {
if (ctrptr == ctrblk)
spin_unlock(&ctrblk_lock);
......@@ -837,7 +837,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
if (nbytes) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, sctx->key, buf, in,
ret = cpacf_kmctr(func, sctx->key, buf, in,
AES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != AES_BLOCK_SIZE)
return -EIO;
......@@ -875,7 +875,7 @@ static int ctr_aes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg ctr_aes_alg = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + ctr */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
......@@ -899,11 +899,11 @@ static int __init aes_s390_init(void)
{
int ret;
if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
if (cpacf_query(CPACF_KM, CPACF_KM_AES_128_ENC))
keylen_flag |= AES_KEYLEN_128;
if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
if (cpacf_query(CPACF_KM, CPACF_KM_AES_192_ENC))
keylen_flag |= AES_KEYLEN_192;
if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
if (cpacf_query(CPACF_KM, CPACF_KM_AES_256_ENC))
keylen_flag |= AES_KEYLEN_256;
if (!keylen_flag)
......@@ -926,22 +926,17 @@ static int __init aes_s390_init(void)
if (ret)
goto cbc_aes_err;
if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
crypt_s390_func_available(KM_XTS_256_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128_ENC) &&
cpacf_query(CPACF_KM, CPACF_KM_XTS_256_ENC)) {
ret = crypto_register_alg(&xts_aes_alg);
if (ret)
goto xts_aes_err;
xts_aes_alg_reg = 1;
}
if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128_ENC) &&
cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192_ENC) &&
cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256_ENC)) {
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
ret = -ENOMEM;
......
/*
* Cryptographic API.
*
* Support for s390 cryptographic instructions.
*
* Copyright IBM Corp. 2003, 2015
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber@de.ibm.com)
* Harald Freudenberger (freude@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_ARCH_S390_CRYPT_S390_H
#define _CRYPTO_ARCH_S390_CRYPT_S390_H
#include <asm/errno.h>
#include <asm/facility.h>
#define CRYPT_S390_OP_MASK 0xFF00
#define CRYPT_S390_FUNC_MASK 0x00FF
#define CRYPT_S390_PRIORITY 300
#define CRYPT_S390_COMPOSITE_PRIORITY 400
#define CRYPT_S390_MSA 0x1
#define CRYPT_S390_MSA3 0x2
#define CRYPT_S390_MSA4 0x4
#define CRYPT_S390_MSA5 0x8
/* s390 cryptographic operations */
enum crypt_s390_operations {
CRYPT_S390_KM = 0x0100,
CRYPT_S390_KMC = 0x0200,
CRYPT_S390_KIMD = 0x0300,
CRYPT_S390_KLMD = 0x0400,
CRYPT_S390_KMAC = 0x0500,
CRYPT_S390_KMCTR = 0x0600,
CRYPT_S390_PPNO = 0x0700
};
/*
* function codes for KM (CIPHER MESSAGE) instruction
* 0x80 is the decipher modifier bit
*/
enum crypt_s390_km_func {
KM_QUERY = CRYPT_S390_KM | 0x0,
KM_DEA_ENCRYPT = CRYPT_S390_KM | 0x1,
KM_DEA_DECRYPT = CRYPT_S390_KM | 0x1 | 0x80,
KM_TDEA_128_ENCRYPT = CRYPT_S390_KM | 0x2,
KM_TDEA_128_DECRYPT = CRYPT_S390_KM | 0x2 | 0x80,
KM_TDEA_192_ENCRYPT = CRYPT_S390_KM | 0x3,
KM_TDEA_192_DECRYPT = CRYPT_S390_KM | 0x3 | 0x80,
KM_AES_128_ENCRYPT = CRYPT_S390_KM | 0x12,
KM_AES_128_DECRYPT = CRYPT_S390_KM | 0x12 | 0x80,
KM_AES_192_ENCRYPT = CRYPT_S390_KM | 0x13,
KM_AES_192_DECRYPT = CRYPT_S390_KM | 0x13 | 0x80,
KM_AES_256_ENCRYPT = CRYPT_S390_KM | 0x14,
KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80,
KM_XTS_128_ENCRYPT = CRYPT_S390_KM | 0x32,
KM_XTS_128_DECRYPT = CRYPT_S390_KM | 0x32 | 0x80,
KM_XTS_256_ENCRYPT = CRYPT_S390_KM | 0x34,
KM_XTS_256_DECRYPT = CRYPT_S390_KM | 0x34 | 0x80,
};
/*
* function codes for KMC (CIPHER MESSAGE WITH CHAINING)
* instruction
*/
enum crypt_s390_kmc_func {
KMC_QUERY = CRYPT_S390_KMC | 0x0,
KMC_DEA_ENCRYPT = CRYPT_S390_KMC | 0x1,
KMC_DEA_DECRYPT = CRYPT_S390_KMC | 0x1 | 0x80,
KMC_TDEA_128_ENCRYPT = CRYPT_S390_KMC | 0x2,
KMC_TDEA_128_DECRYPT = CRYPT_S390_KMC | 0x2 | 0x80,
KMC_TDEA_192_ENCRYPT = CRYPT_S390_KMC | 0x3,
KMC_TDEA_192_DECRYPT = CRYPT_S390_KMC | 0x3 | 0x80,
KMC_AES_128_ENCRYPT = CRYPT_S390_KMC | 0x12,
KMC_AES_128_DECRYPT = CRYPT_S390_KMC | 0x12 | 0x80,
KMC_AES_192_ENCRYPT = CRYPT_S390_KMC | 0x13,
KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80,
KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14,
KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80,
KMC_PRNG = CRYPT_S390_KMC | 0x43,
};
/*
* function codes for KMCTR (CIPHER MESSAGE WITH COUNTER)
* instruction
*/
enum crypt_s390_kmctr_func {
KMCTR_QUERY = CRYPT_S390_KMCTR | 0x0,
KMCTR_DEA_ENCRYPT = CRYPT_S390_KMCTR | 0x1,
KMCTR_DEA_DECRYPT = CRYPT_S390_KMCTR | 0x1 | 0x80,
KMCTR_TDEA_128_ENCRYPT = CRYPT_S390_KMCTR | 0x2,
KMCTR_TDEA_128_DECRYPT = CRYPT_S390_KMCTR | 0x2 | 0x80,
KMCTR_TDEA_192_ENCRYPT = CRYPT_S390_KMCTR | 0x3,
KMCTR_TDEA_192_DECRYPT = CRYPT_S390_KMCTR | 0x3 | 0x80,
KMCTR_AES_128_ENCRYPT = CRYPT_S390_KMCTR | 0x12,
KMCTR_AES_128_DECRYPT = CRYPT_S390_KMCTR | 0x12 | 0x80,
KMCTR_AES_192_ENCRYPT = CRYPT_S390_KMCTR | 0x13,
KMCTR_AES_192_DECRYPT = CRYPT_S390_KMCTR | 0x13 | 0x80,
KMCTR_AES_256_ENCRYPT = CRYPT_S390_KMCTR | 0x14,
KMCTR_AES_256_DECRYPT = CRYPT_S390_KMCTR | 0x14 | 0x80,
};
/*
* function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
* instruction
*/
enum crypt_s390_kimd_func {
KIMD_QUERY = CRYPT_S390_KIMD | 0,
KIMD_SHA_1 = CRYPT_S390_KIMD | 1,
KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
KIMD_SHA_512 = CRYPT_S390_KIMD | 3,
KIMD_GHASH = CRYPT_S390_KIMD | 65,
};
/*
* function codes for KLMD (COMPUTE LAST MESSAGE DIGEST)
* instruction
*/
enum crypt_s390_klmd_func {
KLMD_QUERY = CRYPT_S390_KLMD | 0,
KLMD_SHA_1 = CRYPT_S390_KLMD | 1,
KLMD_SHA_256 = CRYPT_S390_KLMD | 2,
KLMD_SHA_512 = CRYPT_S390_KLMD | 3,
};
/*
* function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
* instruction
*/
enum crypt_s390_kmac_func {
KMAC_QUERY = CRYPT_S390_KMAC | 0,
KMAC_DEA = CRYPT_S390_KMAC | 1,
KMAC_TDEA_128 = CRYPT_S390_KMAC | 2,
KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
};
/*
* function codes for PPNO (PERFORM PSEUDORANDOM NUMBER
* OPERATION) instruction
*/
enum crypt_s390_ppno_func {
PPNO_QUERY = CRYPT_S390_PPNO | 0,
PPNO_SHA512_DRNG_GEN = CRYPT_S390_PPNO | 3,
PPNO_SHA512_DRNG_SEED = CRYPT_S390_PPNO | 0x83
};
/**
* crypt_s390_km:
* @func: the function code passed to KM; see crypt_s390_km_func
* @param: address of parameter block; see POP for details on each func
* @dest: address of destination memory area
* @src: address of source memory area
* @src_len: length of src operand in bytes
*
* Executes the KM (CIPHER MESSAGE) operation of the CPU.
*
* Returns -1 for failure, 0 for the query func, number of processed
* bytes for encryption/decryption funcs
*/
static inline int crypt_s390_km(long func, void *param,
u8 *dest, const u8 *src, long src_len)
{
register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void *__param asm("1") = param;
register const u8 *__src asm("2") = src;
register long __src_len asm("3") = src_len;
register u8 *__dest asm("4") = dest;
int ret;
asm volatile(
"0: .insn rre,0xb92e0000,%3,%1\n" /* KM opcode */
"1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
if (ret < 0)
return ret;
return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}
/**
* crypt_s390_kmc:
* @func: the function code passed to KM; see crypt_s390_kmc_func
* @param: address of parameter block; see POP for details on each func
* @dest: address of destination memory area
* @src: address of source memory area
* @src_len: length of src operand in bytes
*
* Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU.
*
* Returns -1 for failure, 0 for the query func, number of processed
* bytes for encryption/decryption funcs
*/
static inline int crypt_s390_kmc(long func, void *param,
u8 *dest, const u8 *src, long src_len)
{
register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void *__param asm("1") = param;
register const u8 *__src asm("2") = src;
register long __src_len asm("3") = src_len;
register u8 *__dest asm("4") = dest;
int ret;
asm volatile(
"0: .insn rre,0xb92f0000,%3,%1\n" /* KMC opcode */
"1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
if (ret < 0)
return ret;
return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}
/**
* crypt_s390_kimd:
* @func: the function code passed to KM; see crypt_s390_kimd_func
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
*
* Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation
* of the CPU.
*
* Returns -1 for failure, 0 for the query func, number of processed
* bytes for digest funcs
*/
static inline int crypt_s390_kimd(long func, void *param,
const u8 *src, long src_len)
{
register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void *__param asm("1") = param;
register const u8 *__src asm("2") = src;
register long __src_len asm("3") = src_len;
int ret;
asm volatile(
"0: .insn rre,0xb93e0000,%1,%1\n" /* KIMD opcode */
"1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (ret), "+a" (__src), "+d" (__src_len)
: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
if (ret < 0)
return ret;
return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}
/**
* crypt_s390_klmd:
* @func: the function code passed to KM; see crypt_s390_klmd_func
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
*
* Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU.
*
* Returns -1 for failure, 0 for the query func, number of processed
* bytes for digest funcs
*/
static inline int crypt_s390_klmd(long func, void *param,
const u8 *src, long src_len)
{
register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void *__param asm("1") = param;
register const u8 *__src asm("2") = src;
register long __src_len asm("3") = src_len;
int ret;
asm volatile(
"0: .insn rre,0xb93f0000,%1,%1\n" /* KLMD opcode */
"1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (ret), "+a" (__src), "+d" (__src_len)
: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
if (ret < 0)
return ret;
return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}
/**
* crypt_s390_kmac:
* @func: the function code passed to KM; see crypt_s390_klmd_func
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
*
* Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation
* of the CPU.
*
* Returns -1 for failure, 0 for the query func, number of processed
* bytes for digest funcs
*/
static inline int crypt_s390_kmac(long func, void *param,
const u8 *src, long src_len)
{
register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void *__param asm("1") = param;
register const u8 *__src asm("2") = src;
register long __src_len asm("3") = src_len;
int ret;
asm volatile(
"0: .insn rre,0xb91e0000,%1,%1\n" /* KLAC opcode */
"1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (ret), "+a" (__src), "+d" (__src_len)
: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
if (ret < 0)
return ret;
return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}
/**
* crypt_s390_kmctr:
* @func: the function code passed to KMCTR; see crypt_s390_kmctr_func
* @param: address of parameter block; see POP for details on each func
* @dest: address of destination memory area
* @src: address of source memory area
* @src_len: length of src operand in bytes
* @counter: address of counter value
*
* Executes the KMCTR (CIPHER MESSAGE WITH COUNTER) operation of the CPU.
*
* Returns -1 for failure, 0 for the query func, number of processed
* bytes for encryption/decryption funcs
*/
static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
const u8 *src, long src_len, u8 *counter)
{
register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void *__param asm("1") = param;
register const u8 *__src asm("2") = src;
register long __src_len asm("3") = src_len;
register u8 *__dest asm("4") = dest;
register u8 *__ctr asm("6") = counter;
int ret = -1;
asm volatile(
"0: .insn rrf,0xb92d0000,%3,%1,%4,0\n" /* KMCTR opcode */
"1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest),
"+a" (__ctr)
: "d" (__func), "a" (__param) : "cc", "memory");
if (ret < 0)
return ret;
return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}
/**
* crypt_s390_ppno:
* @func: the function code passed to PPNO; see crypt_s390_ppno_func
* @param: address of parameter block; see POP for details on each func
* @dest: address of destination memory area
* @dest_len: size of destination memory area in bytes
* @seed: address of seed data
* @seed_len: size of seed data in bytes
*
* Executes the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
* operation of the CPU.
*
* Returns -1 for failure, 0 for the query func, number of random
* bytes stored in dest buffer for generate function
*/
static inline int crypt_s390_ppno(long func, void *param,
u8 *dest, long dest_len,
const u8 *seed, long seed_len)
{
register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void *__param asm("1") = param; /* param block (240 bytes) */
register u8 *__dest asm("2") = dest; /* buf for recv random bytes */
register long __dest_len asm("3") = dest_len; /* requested random bytes */
register const u8 *__seed asm("4") = seed; /* buf with seed data */
register long __seed_len asm("5") = seed_len; /* bytes in seed buf */
int ret = -1;
asm volatile (
"0: .insn rre,0xb93c0000,%1,%5\n" /* PPNO opcode */
"1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "+d" (ret), "+a"(__dest), "+d"(__dest_len)
: "d"(__func), "a"(__param), "a"(__seed), "d"(__seed_len)
: "cc", "memory");
if (ret < 0)
return ret;
return (func & CRYPT_S390_FUNC_MASK) ? dest_len - __dest_len : 0;
}
/**
* crypt_s390_func_available:
* @func: the function code of the specific function; 0 if op in general
*
* Tests if a specific crypto function is implemented on the machine.
*
* Returns 1 if func available; 0 if func or op in general not available
*/
static inline int crypt_s390_func_available(int func,
unsigned int facility_mask)
{
unsigned char status[16];
int ret;
if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
return 0;
if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76))
return 0;
if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
return 0;
if (facility_mask & CRYPT_S390_MSA5 && !test_facility(57))
return 0;
switch (func & CRYPT_S390_OP_MASK) {
case CRYPT_S390_KM:
ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
break;
case CRYPT_S390_KMC:
ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
break;
case CRYPT_S390_KIMD:
ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
break;
case CRYPT_S390_KLMD:
ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
break;
case CRYPT_S390_KMAC:
ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
break;
case CRYPT_S390_KMCTR:
ret = crypt_s390_kmctr(KMCTR_QUERY, &status,
NULL, NULL, 0, NULL);
break;
case CRYPT_S390_PPNO:
ret = crypt_s390_ppno(PPNO_QUERY, &status,
NULL, 0, NULL, 0);
break;
default:
return 0;
}
if (ret < 0)
return 0;
func &= CRYPT_S390_FUNC_MASK;
func &= 0x7f; /* mask modifier bit */
return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
}
/**
* crypt_s390_pcc:
* @func: the function code passed to KM; see crypt_s390_km_func
* @param: address of parameter block; see POP for details on each func
*
* Executes the PCC (PERFORM CRYPTOGRAPHIC COMPUTATION) operation of the CPU.
*
* Returns -1 for failure, 0 for success.
*/
static inline int crypt_s390_pcc(long func, void *param)
{
register long __func asm("0") = func & 0x7f; /* encrypt or decrypt */
register void *__param asm("1") = param;
int ret = -1;
asm volatile(
"0: .insn rre,0xb92c0000,0,0\n" /* PCC opcode */
"1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "+d" (ret)
: "d" (__func), "a" (__param) : "cc", "memory");
return ret;
}
#endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */
......@@ -20,8 +20,7 @@
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include "crypt_s390.h"
#include <asm/cpacf.h>
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
......@@ -54,20 +53,20 @@ static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_DEA_ENCRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
cpacf_km(CPACF_KM_DEA_ENC, ctx->key, out, in, DES_BLOCK_SIZE);
}
static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_DEA_DECRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
cpacf_km(CPACF_KM_DEA_DEC, ctx->key, out, in, DES_BLOCK_SIZE);
}
static struct crypto_alg des_alg = {
.cra_name = "des",
.cra_driver_name = "des-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -95,7 +94,7 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, key, out, in, n);
ret = cpacf_km(func, key, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
......@@ -128,7 +127,7 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, &param, out, in, n);
ret = cpacf_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
......@@ -149,7 +148,7 @@ static int ecb_des_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, ctx->key, &walk);
return ecb_desall_crypt(desc, CPACF_KM_DEA_ENC, ctx->key, &walk);
}
static int ecb_des_decrypt(struct blkcipher_desc *desc,
......@@ -160,13 +159,13 @@ static int ecb_des_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_DEA_DECRYPT, ctx->key, &walk);
return ecb_desall_crypt(desc, CPACF_KM_DEA_DEC, ctx->key, &walk);
}
static struct crypto_alg ecb_des_alg = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des + ecb */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -190,7 +189,7 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
return cbc_desall_crypt(desc, CPACF_KMC_DEA_ENC, &walk);
}
static int cbc_des_decrypt(struct blkcipher_desc *desc,
......@@ -200,13 +199,13 @@ static int cbc_des_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
return cbc_desall_crypt(desc, CPACF_KMC_DEA_DEC, &walk);
}
static struct crypto_alg cbc_des_alg = {
.cra_name = "cbc(des)",
.cra_driver_name = "cbc-des-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des + cbc */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -258,20 +257,20 @@ static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_TDEA_192_ENCRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
cpacf_km(CPACF_KM_TDEA_192_ENC, ctx->key, dst, src, DES_BLOCK_SIZE);
}
static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_TDEA_192_DECRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
cpacf_km(CPACF_KM_TDEA_192_DEC, ctx->key, dst, src, DES_BLOCK_SIZE);
}
static struct crypto_alg des3_alg = {
.cra_name = "des3_ede",
.cra_driver_name = "des3_ede-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -295,7 +294,7 @@ static int ecb_des3_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, ctx->key, &walk);
return ecb_desall_crypt(desc, CPACF_KM_TDEA_192_ENC, ctx->key, &walk);
}
static int ecb_des3_decrypt(struct blkcipher_desc *desc,
......@@ -306,13 +305,13 @@ static int ecb_des3_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, ctx->key, &walk);
return ecb_desall_crypt(desc, CPACF_KM_TDEA_192_DEC, ctx->key, &walk);
}
static struct crypto_alg ecb_des3_alg = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3_ede-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des3 + ecb */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -336,7 +335,7 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192_ENC, &walk);
}
static int cbc_des3_decrypt(struct blkcipher_desc *desc,
......@@ -346,13 +345,13 @@ static int cbc_des3_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192_DEC, &walk);
}
static struct crypto_alg cbc_des3_alg = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3_ede-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des3 + cbc */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -407,8 +406,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
n = __ctrblk_init(ctrptr, nbytes);
else
n = DES_BLOCK_SIZE;
ret = crypt_s390_kmctr(func, ctx->key, out, in,
n, ctrptr);
ret = cpacf_kmctr(func, ctx->key, out, in, n, ctrptr);
if (ret < 0 || ret != n) {
if (ctrptr == ctrblk)
spin_unlock(&ctrblk_lock);
......@@ -438,7 +436,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
if (nbytes) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, ctx->key, buf, in,
ret = cpacf_kmctr(func, ctx->key, buf, in,
DES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != DES_BLOCK_SIZE)
return -EIO;
......@@ -458,7 +456,7 @@ static int ctr_des_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_desall_crypt(desc, KMCTR_DEA_ENCRYPT, ctx, &walk);
return ctr_desall_crypt(desc, CPACF_KMCTR_DEA_ENC, ctx, &walk);
}
static int ctr_des_decrypt(struct blkcipher_desc *desc,
......@@ -469,13 +467,13 @@ static int ctr_des_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_desall_crypt(desc, KMCTR_DEA_DECRYPT, ctx, &walk);
return ctr_desall_crypt(desc, CPACF_KMCTR_DEA_DEC, ctx, &walk);
}
static struct crypto_alg ctr_des_alg = {
.cra_name = "ctr(des)",
.cra_driver_name = "ctr-des-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des + ctr */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -501,7 +499,7 @@ static int ctr_des3_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_desall_crypt(desc, KMCTR_TDEA_192_ENCRYPT, ctx, &walk);
return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192_ENC, ctx, &walk);
}
static int ctr_des3_decrypt(struct blkcipher_desc *desc,
......@@ -512,13 +510,13 @@ static int ctr_des3_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_desall_crypt(desc, KMCTR_TDEA_192_DECRYPT, ctx, &walk);
return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192_DEC, ctx, &walk);
}
static struct crypto_alg ctr_des3_alg = {
.cra_name = "ctr(des3_ede)",
.cra_driver_name = "ctr-des3_ede-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des3 + ede */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -540,8 +538,8 @@ static int __init des_s390_init(void)
{
int ret;
if (!crypt_s390_func_available(KM_DEA_ENCRYPT, CRYPT_S390_MSA) ||
!crypt_s390_func_available(KM_TDEA_192_ENCRYPT, CRYPT_S390_MSA))
if (!cpacf_query(CPACF_KM, CPACF_KM_DEA_ENC) ||
!cpacf_query(CPACF_KM, CPACF_KM_TDEA_192_ENC))
return -EOPNOTSUPP;
ret = crypto_register_alg(&des_alg);
......@@ -563,10 +561,8 @@ static int __init des_s390_init(void)
if (ret)
goto cbc_des3_err;
if (crypt_s390_func_available(KMCTR_DEA_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
crypt_s390_func_available(KMCTR_TDEA_192_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_DEA_ENC) &&
cpacf_query(CPACF_KMCTR, CPACF_KMCTR_TDEA_192_ENC)) {
ret = crypto_register_alg(&ctr_des_alg);
if (ret)
goto ctr_des_err;
......
......@@ -10,8 +10,7 @@
#include <crypto/internal/hash.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include "crypt_s390.h"
#include <asm/cpacf.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
......@@ -72,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
src += n;
if (!dctx->bytes) {
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf,
GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
......@@ -81,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) {
ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n);
if (ret != n)
return -EIO;
src += n;
......@@ -106,7 +105,7 @@ static int ghash_flush(struct ghash_desc_ctx *dctx)
memset(pos, 0, dctx->bytes);
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
......@@ -137,7 +136,7 @@ static struct shash_alg ghash_alg = {
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
......@@ -147,8 +146,7 @@ static struct shash_alg ghash_alg = {
static int __init ghash_mod_init(void)
{
if (!crypt_s390_func_available(KIMD_GHASH,
CRYPT_S390_MSA | CRYPT_S390_MSA4))
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_GHASH))
return -EOPNOTSUPP;
return crypto_register_shash(&ghash_alg);
......
......@@ -23,8 +23,7 @@
#include <asm/debug.h>
#include <asm/uaccess.h>
#include <asm/timex.h>
#include "crypt_s390.h"
#include <asm/cpacf.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
......@@ -136,7 +135,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
else
h = ebuf;
/* generate sha256 from this page */
if (crypt_s390_kimd(KIMD_SHA_256, h,
if (cpacf_kimd(CPACF_KIMD_SHA_256, h,
pg, PAGE_SIZE) != PAGE_SIZE) {
prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
ret = -EIO;
......@@ -164,7 +163,7 @@ static void prng_tdes_add_entropy(void)
int ret;
for (i = 0; i < 16; i++) {
ret = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
ret = cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
(char *)entropy, (char *)entropy,
sizeof(entropy));
BUG_ON(ret < 0 || ret != sizeof(entropy));
......@@ -311,8 +310,7 @@ static int __init prng_sha512_selftest(void)
memset(&ws, 0, sizeof(ws));
/* initial seed */
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
&ws, NULL, 0,
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED, &ws, NULL, 0,
seed, sizeof(seed));
if (ret < 0) {
pr_err("The prng self test seed operation for the "
......@@ -331,18 +329,16 @@ static int __init prng_sha512_selftest(void)
}
/* generate random bytes */
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf),
NULL, 0);
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf), NULL, 0);
if (ret < 0) {
pr_err("The prng self test generate operation for "
"the SHA-512 mode failed with rc=%d\n", ret);
prng_errorflag = PRNG_SELFTEST_FAILED;
return -EIO;
}
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf),
NULL, 0);
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf), NULL, 0);
if (ret < 0) {
pr_err("The prng self test generate operation for "
"the SHA-512 mode failed with rc=%d\n", ret);
......@@ -396,9 +392,8 @@ static int __init prng_sha512_instantiate(void)
get_tod_clock_ext(seed + 48);
/* initial seed of the ppno drng */
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
&prng_data->ppnows, NULL, 0,
seed, sizeof(seed));
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
&prng_data->ppnows, NULL, 0, seed, sizeof(seed));
if (ret < 0) {
prng_errorflag = PRNG_SEED_FAILED;
ret = -EIO;
......@@ -409,11 +404,9 @@ static int __init prng_sha512_instantiate(void)
bytes for the FIPS 140-2 Conditional Self Test */
if (fips_enabled) {
prng_data->prev = prng_data->buf + prng_chunk_size;
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&prng_data->ppnows,
prng_data->prev,
prng_chunk_size,
NULL, 0);
prng_data->prev, prng_chunk_size, NULL, 0);
if (ret < 0 || ret != prng_chunk_size) {
prng_errorflag = PRNG_GEN_FAILED;
ret = -EIO;
......@@ -447,9 +440,8 @@ static int prng_sha512_reseed(void)
return ret;
/* do a reseed of the ppno drng with this bytestring */
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
&prng_data->ppnows, NULL, 0,
seed, sizeof(seed));
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
&prng_data->ppnows, NULL, 0, seed, sizeof(seed));
if (ret) {
prng_errorflag = PRNG_RESEED_FAILED;
return -EIO;
......@@ -471,9 +463,8 @@ static int prng_sha512_generate(u8 *buf, size_t nbytes)
}
/* PPNO generate */
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
&prng_data->ppnows, buf, nbytes,
NULL, 0);
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&prng_data->ppnows, buf, nbytes, NULL, 0);
if (ret < 0 || ret != nbytes) {
prng_errorflag = PRNG_GEN_FAILED;
return -EIO;
......@@ -555,7 +546,7 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
* Note: you can still get strict X9.17 conformity by setting
* prng_chunk_size to 8 bytes.
*/
tmp = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
tmp = cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
prng_data->buf, prng_data->buf, n);
if (tmp < 0 || tmp != n) {
ret = -EIO;
......@@ -815,14 +806,13 @@ static int __init prng_init(void)
int ret;
/* check if the CPU has a PRNG */
if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA))
if (!cpacf_query(CPACF_KMC, CPACF_KMC_PRNG))
return -EOPNOTSUPP;
/* choose prng mode */
if (prng_mode != PRNG_MODE_TDES) {
/* check for MSA5 support for PPNO operations */
if (!crypt_s390_func_available(PPNO_SHA512_DRNG_GEN,
CRYPT_S390_MSA5)) {
if (!cpacf_query(CPACF_PPNO, CPACF_PPNO_SHA512_DRNG_GEN)) {
if (prng_mode == PRNG_MODE_SHA512) {
pr_err("The prng module cannot "
"start in SHA-512 mode\n");
......
......@@ -28,8 +28,8 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/sha.h>
#include <asm/cpacf.h>
#include "crypt_s390.h"
#include "sha.h"
static int sha1_init(struct shash_desc *desc)
......@@ -42,7 +42,7 @@ static int sha1_init(struct shash_desc *desc)
sctx->state[3] = SHA1_H3;
sctx->state[4] = SHA1_H4;
sctx->count = 0;
sctx->func = KIMD_SHA_1;
sctx->func = CPACF_KIMD_SHA_1;
return 0;
}
......@@ -66,7 +66,7 @@ static int sha1_import(struct shash_desc *desc, const void *in)
sctx->count = ictx->count;
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer));
sctx->func = KIMD_SHA_1;
sctx->func = CPACF_KIMD_SHA_1;
return 0;
}
......@@ -82,7 +82,7 @@ static struct shash_alg alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
......@@ -91,7 +91,7 @@ static struct shash_alg alg = {
static int __init sha1_s390_init(void)
{
if (!crypt_s390_func_available(KIMD_SHA_1, CRYPT_S390_MSA))
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_1))
return -EOPNOTSUPP;
return crypto_register_shash(&alg);
}
......
......@@ -18,8 +18,8 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/sha.h>
#include <asm/cpacf.h>
#include "crypt_s390.h"
#include "sha.h"
static int sha256_init(struct shash_desc *desc)
......@@ -35,7 +35,7 @@ static int sha256_init(struct shash_desc *desc)
sctx->state[6] = SHA256_H6;
sctx->state[7] = SHA256_H7;
sctx->count = 0;
sctx->func = KIMD_SHA_256;
sctx->func = CPACF_KIMD_SHA_256;
return 0;
}
......@@ -59,7 +59,7 @@ static int sha256_import(struct shash_desc *desc, const void *in)
sctx->count = ictx->count;
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->func = KIMD_SHA_256;
sctx->func = CPACF_KIMD_SHA_256;
return 0;
}
......@@ -75,7 +75,7 @@ static struct shash_alg sha256_alg = {
.base = {
.cra_name = "sha256",
.cra_driver_name= "sha256-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
......@@ -95,7 +95,7 @@ static int sha224_init(struct shash_desc *desc)
sctx->state[6] = SHA224_H6;
sctx->state[7] = SHA224_H7;
sctx->count = 0;
sctx->func = KIMD_SHA_256;
sctx->func = CPACF_KIMD_SHA_256;
return 0;
}
......@@ -112,7 +112,7 @@ static struct shash_alg sha224_alg = {
.base = {
.cra_name = "sha224",
.cra_driver_name= "sha224-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
......@@ -123,7 +123,7 @@ static int __init sha256_s390_init(void)
{
int ret;
if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA))
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_256))
return -EOPNOTSUPP;
ret = crypto_register_shash(&sha256_alg);
if (ret < 0)
......
......@@ -19,9 +19,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <asm/cpacf.h>
#include "sha.h"
#include "crypt_s390.h"
static int sha512_init(struct shash_desc *desc)
{
......@@ -36,7 +36,7 @@ static int sha512_init(struct shash_desc *desc)
*(__u64 *)&ctx->state[12] = 0x1f83d9abfb41bd6bULL;
*(__u64 *)&ctx->state[14] = 0x5be0cd19137e2179ULL;
ctx->count = 0;
ctx->func = KIMD_SHA_512;
ctx->func = CPACF_KIMD_SHA_512;
return 0;
}
......@@ -64,7 +64,7 @@ static int sha512_import(struct shash_desc *desc, const void *in)
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->func = KIMD_SHA_512;
sctx->func = CPACF_KIMD_SHA_512;
return 0;
}
......@@ -80,7 +80,7 @@ static struct shash_alg sha512_alg = {
.base = {
.cra_name = "sha512",
.cra_driver_name= "sha512-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
......@@ -102,7 +102,7 @@ static int sha384_init(struct shash_desc *desc)
*(__u64 *)&ctx->state[12] = 0xdb0c2e0d64f98fa7ULL;
*(__u64 *)&ctx->state[14] = 0x47b5481dbefa4fa4ULL;
ctx->count = 0;
ctx->func = KIMD_SHA_512;
ctx->func = CPACF_KIMD_SHA_512;
return 0;
}
......@@ -119,7 +119,7 @@ static struct shash_alg sha384_alg = {
.base = {
.cra_name = "sha384",
.cra_driver_name= "sha384-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_sha_ctx),
......@@ -133,7 +133,7 @@ static int __init init(void)
{
int ret;
if (!crypt_s390_func_available(KIMD_SHA_512, CRYPT_S390_MSA))
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_512))
return -EOPNOTSUPP;
if ((ret = crypto_register_shash(&sha512_alg)) < 0)
goto out;
......
......@@ -15,8 +15,8 @@
#include <crypto/internal/hash.h>
#include <linux/module.h>
#include <asm/cpacf.h>
#include "sha.h"
#include "crypt_s390.h"
int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
......@@ -35,7 +35,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
/* process one stored block */
if (index) {
memcpy(ctx->buf + index, data, bsize - index);
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
ret = cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize);
if (ret != bsize)
return -EIO;
data += bsize - index;
......@@ -45,7 +45,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
/* process as many blocks as possible */
if (len >= bsize) {
ret = crypt_s390_kimd(ctx->func, ctx->state, data,
ret = cpacf_kimd(ctx->func, ctx->state, data,
len & ~(bsize - 1));
if (ret != (len & ~(bsize - 1)))
return -EIO;
......@@ -89,7 +89,7 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
bits = ctx->count * 8;
memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
ret = cpacf_kimd(ctx->func, ctx->state, ctx->buf, end);
if (ret != end)
return -EIO;
......
/*
* CP Assist for Cryptographic Functions (CPACF)
*
* Copyright IBM Corp. 2003, 2016
* Author(s): Thomas Spatzier
* Jan Glauber
* Harald Freudenberger (freude@de.ibm.com)
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _ASM_S390_CPACF_H
#define _ASM_S390_CPACF_H
#include <asm/facility.h>
/*
* Instruction opcodes for the CPACF instructions
*/
#define CPACF_KMAC 0xb91e /* MSA */
#define CPACF_KM 0xb92e /* MSA */
#define CPACF_KMC 0xb92f /* MSA */
#define CPACF_KIMD 0xb93e /* MSA */
#define CPACF_KLMD 0xb93f /* MSA */
#define CPACF_PCC 0xb92c /* MSA4 */
#define CPACF_KMCTR 0xb92d /* MSA4 */
#define CPACF_PPNO 0xb93c /* MSA5 */
/*
* Function codes for the KM (CIPHER MESSAGE)
* instruction (0x80 is the decipher modifier bit)
*/
#define CPACF_KM_QUERY 0x00
#define CPACF_KM_DEA_ENC 0x01
#define CPACF_KM_DEA_DEC 0x81
#define CPACF_KM_TDEA_128_ENC 0x02
#define CPACF_KM_TDEA_128_DEC 0x82
#define CPACF_KM_TDEA_192_ENC 0x03
#define CPACF_KM_TDEA_192_DEC 0x83
#define CPACF_KM_AES_128_ENC 0x12
#define CPACF_KM_AES_128_DEC 0x92
#define CPACF_KM_AES_192_ENC 0x13
#define CPACF_KM_AES_192_DEC 0x93
#define CPACF_KM_AES_256_ENC 0x14
#define CPACF_KM_AES_256_DEC 0x94
#define CPACF_KM_XTS_128_ENC 0x32
#define CPACF_KM_XTS_128_DEC 0xb2
#define CPACF_KM_XTS_256_ENC 0x34
#define CPACF_KM_XTS_256_DEC 0xb4
/*
* Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
* instruction (0x80 is the decipher modifier bit)
*/
#define CPACF_KMC_QUERY 0x00
#define CPACF_KMC_DEA_ENC 0x01
#define CPACF_KMC_DEA_DEC 0x81
#define CPACF_KMC_TDEA_128_ENC 0x02
#define CPACF_KMC_TDEA_128_DEC 0x82
#define CPACF_KMC_TDEA_192_ENC 0x03
#define CPACF_KMC_TDEA_192_DEC 0x83
#define CPACF_KMC_AES_128_ENC 0x12
#define CPACF_KMC_AES_128_DEC 0x92
#define CPACF_KMC_AES_192_ENC 0x13
#define CPACF_KMC_AES_192_DEC 0x93
#define CPACF_KMC_AES_256_ENC 0x14
#define CPACF_KMC_AES_256_DEC 0x94
#define CPACF_KMC_PRNG 0x43
/*
* Function codes for the KMCTR (CIPHER MESSAGE WITH COUNTER)
* instruction (0x80 is the decipher modifier bit)
*/
#define CPACF_KMCTR_QUERY 0x00
#define CPACF_KMCTR_DEA_ENC 0x01
#define CPACF_KMCTR_DEA_DEC 0x81
#define CPACF_KMCTR_TDEA_128_ENC 0x02
#define CPACF_KMCTR_TDEA_128_DEC 0x82
#define CPACF_KMCTR_TDEA_192_ENC 0x03
#define CPACF_KMCTR_TDEA_192_DEC 0x83
#define CPACF_KMCTR_AES_128_ENC 0x12
#define CPACF_KMCTR_AES_128_DEC 0x92
#define CPACF_KMCTR_AES_192_ENC 0x13
#define CPACF_KMCTR_AES_192_DEC 0x93
#define CPACF_KMCTR_AES_256_ENC 0x14
#define CPACF_KMCTR_AES_256_DEC 0x94
/*
* Function codes for the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
* instruction (0x80 is the decipher modifier bit)
*/
#define CPACF_KIMD_QUERY 0x00
#define CPACF_KIMD_SHA_1 0x01
#define CPACF_KIMD_SHA_256 0x02
#define CPACF_KIMD_SHA_512 0x03
#define CPACF_KIMD_GHASH 0x41
/*
* Function codes for the KLMD (COMPUTE LAST MESSAGE DIGEST)
* instruction (0x80 is the decipher modifier bit)
*/
#define CPACF_KLMD_QUERY 0x00
#define CPACF_KLMD_SHA_1 0x01
#define CPACF_KLMD_SHA_256 0x02
#define CPACF_KLMD_SHA_512 0x03
/*
* function codes for the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
* instruction (0x80 is the decipher modifier bit)
*/
#define CPACF_KMAC_QUERY 0x00
#define CPACF_KMAC_DEA 0x01
#define CPACF_KMAC_TDEA_128 0x02
#define CPACF_KMAC_TDEA_192 0x03
/*
* Function codes for the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
* instruction (0x80 is the decipher modifier bit)
*/
#define CPACF_PPNO_QUERY 0x00
#define CPACF_PPNO_SHA512_DRNG_GEN 0x03
#define CPACF_PPNO_SHA512_DRNG_SEED 0x83
/**
* cpacf_query() - check if a specific CPACF function is available
* @opcode: the opcode of the crypto instruction
* @func: the function code to test for
*
* Executes the query function for the given crypto instruction @opcode
* and checks if @func is available
*
* Returns 1 if @func is available for @opcode, 0 otherwise
*/
static inline void __cpacf_query(unsigned int opcode, unsigned char *status)
{
typedef struct { unsigned char _[16]; } status_type;
register unsigned long r0 asm("0") = 0; /* query function */
register unsigned long r1 asm("1") = (unsigned long) status;
asm volatile(
/* Parameter registers are ignored, but may not be 0 */
"0: .insn rrf,%[opc] << 16,2,2,2,0\n"
" brc 1,0b\n" /* handle partial completion */
: "=m" (*(status_type *) status)
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (opcode)
: "cc");
}
static inline int cpacf_query(unsigned int opcode, unsigned int func)
{
unsigned char status[16];
switch (opcode) {
case CPACF_KMAC:
case CPACF_KM:
case CPACF_KMC:
case CPACF_KIMD:
case CPACF_KLMD:
if (!test_facility(17)) /* check for MSA */
return 0;
break;
case CPACF_PCC:
case CPACF_KMCTR:
if (!test_facility(77)) /* check for MSA4 */
return 0;
break;
case CPACF_PPNO:
if (!test_facility(57)) /* check for MSA5 */
return 0;
break;
default:
BUG();
}
__cpacf_query(opcode, status);
return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
}
/**
* cpacf_km() - executes the KM (CIPHER MESSAGE) instruction
* @func: the function code passed to KM; see CPACF_KM_xxx defines
* @param: address of parameter block; see POP for details on each func
* @dest: address of destination memory area
* @src: address of source memory area
* @src_len: length of src operand in bytes
*
* Returns 0 for the query func, number of processed bytes for
* encryption/decryption funcs
*/
static inline int cpacf_km(long func, void *param,
u8 *dest, const u8 *src, long src_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
register unsigned long r2 asm("2") = (unsigned long) src;
register unsigned long r3 asm("3") = (unsigned long) src_len;
register unsigned long r4 asm("4") = (unsigned long) dest;
asm volatile(
"0: .insn rre,%[opc] << 16,%[dst],%[src]\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+a" (r2), [len] "+d" (r3), [dst] "+a" (r4)
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KM)
: "cc", "memory");
return src_len - r3;
}
/**
* cpacf_kmc() - executes the KMC (CIPHER MESSAGE WITH CHAINING) instruction
* @func: the function code passed to KM; see CPACF_KMC_xxx defines
* @param: address of parameter block; see POP for details on each func
* @dest: address of destination memory area
* @src: address of source memory area
* @src_len: length of src operand in bytes
*
* Returns 0 for the query func, number of processed bytes for
* encryption/decryption funcs
*/
static inline int cpacf_kmc(long func, void *param,
u8 *dest, const u8 *src, long src_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
register unsigned long r2 asm("2") = (unsigned long) src;
register unsigned long r3 asm("3") = (unsigned long) src_len;
register unsigned long r4 asm("4") = (unsigned long) dest;
asm volatile(
"0: .insn rre,%[opc] << 16,%[dst],%[src]\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+a" (r2), [len] "+d" (r3), [dst] "+a" (r4)
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMC)
: "cc", "memory");
return src_len - r3;
}
/**
* cpacf_kimd() - executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
* instruction
* @func: the function code passed to KM; see CPACF_KIMD_xxx defines
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
*
* Returns 0 for the query func, number of processed bytes for digest funcs
*/
static inline int cpacf_kimd(long func, void *param,
const u8 *src, long src_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
register unsigned long r2 asm("2") = (unsigned long) src;
register unsigned long r3 asm("3") = (unsigned long) src_len;
asm volatile(
"0: .insn rre,%[opc] << 16,0,%[src]\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+a" (r2), [len] "+d" (r3)
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KIMD)
: "cc", "memory");
return src_len - r3;
}
/**
* cpacf_klmd() - executes the KLMD (COMPUTE LAST MESSAGE DIGEST) instruction
* @func: the function code passed to KM; see CPACF_KLMD_xxx defines
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
*
* Returns 0 for the query func, number of processed bytes for digest funcs
*/
static inline int cpacf_klmd(long func, void *param,
const u8 *src, long src_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
register unsigned long r2 asm("2") = (unsigned long) src;
register unsigned long r3 asm("3") = (unsigned long) src_len;
asm volatile(
"0: .insn rre,%[opc] << 16,0,%[src]\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+a" (r2), [len] "+d" (r3)
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KLMD)
: "cc", "memory");
return src_len - r3;
}
/**
* cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
* instruction
* @func: the function code passed to KM; see CPACF_KMAC_xxx defines
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
*
* Returns 0 for the query func, number of processed bytes for digest funcs
*/
static inline int cpacf_kmac(long func, void *param,
const u8 *src, long src_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
register unsigned long r2 asm("2") = (unsigned long) src;
register unsigned long r3 asm("3") = (unsigned long) src_len;
asm volatile(
"0: .insn rre,%[opc] << 16,0,%[src]\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+a" (r2), [len] "+d" (r3)
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMAC)
: "cc", "memory");
return src_len - r3;
}
/**
* cpacf_kmctr() - executes the KMCTR (CIPHER MESSAGE WITH COUNTER) instruction
* @func: the function code passed to KMCTR; see CPACF_KMCTR_xxx defines
* @param: address of parameter block; see POP for details on each func
* @dest: address of destination memory area
* @src: address of source memory area
* @src_len: length of src operand in bytes
* @counter: address of counter value
*
* Returns 0 for the query func, number of processed bytes for
* encryption/decryption funcs
*/
static inline int cpacf_kmctr(long func, void *param, u8 *dest,
const u8 *src, long src_len, u8 *counter)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
register unsigned long r2 asm("2") = (unsigned long) src;
register unsigned long r3 asm("3") = (unsigned long) src_len;
register unsigned long r4 asm("4") = (unsigned long) dest;
register unsigned long r6 asm("6") = (unsigned long) counter;
asm volatile(
"0: .insn rrf,%[opc] << 16,%[dst],%[src],%[ctr],0\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+a" (r2), [len] "+d" (r3),
[dst] "+a" (r4), [ctr] "+a" (r6)
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMCTR)
: "cc", "memory");
return src_len - r3;
}
/**
* cpacf_ppno() - executes the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
* instruction
* @func: the function code passed to PPNO; see CPACF_PPNO_xxx defines
* @param: address of parameter block; see POP for details on each func
* @dest: address of destination memory area
* @dest_len: size of destination memory area in bytes
* @seed: address of seed data
* @seed_len: size of seed data in bytes
*
* Returns 0 for the query func, number of random bytes stored in
* dest buffer for generate function
*/
static inline int cpacf_ppno(long func, void *param,
u8 *dest, long dest_len,
const u8 *seed, long seed_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
register unsigned long r2 asm("2") = (unsigned long) dest;
register unsigned long r3 asm("3") = (unsigned long) dest_len;
register unsigned long r4 asm("4") = (unsigned long) seed;
register unsigned long r5 asm("5") = (unsigned long) seed_len;
asm volatile (
"0: .insn rre,%[opc] << 16,%[dst],%[seed]\n"
" brc 1,0b\n" /* handle partial completion */
: [dst] "+a" (r2), [dlen] "+d" (r3)
: [fc] "d" (r0), [pba] "a" (r1),
[seed] "a" (r4), [slen] "d" (r5), [opc] "i" (CPACF_PPNO)
: "cc", "memory");
return dest_len - r3;
}
/**
* cpacf_pcc() - executes the PCC (PERFORM CRYPTOGRAPHIC COMPUTATION)
* instruction
* @func: the function code passed to PCC; see CPACF_KM_xxx defines
* @param: address of parameter block; see POP for details on each func
*
* Returns 0.
*/
static inline int cpacf_pcc(long func, void *param)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
asm volatile(
"0: .insn rre,%[opc] << 16,0,0\n" /* PCC opcode */
" brc 1,0b\n" /* handle partial completion */
:
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCC)
: "cc", "memory");
return 0;
}
#endif /* _ASM_S390_CPACF_H */
......@@ -12,10 +12,12 @@
struct fpu {
__u32 fpc; /* Floating-point control */
void *regs; /* Pointer to the current save area */
union {
void *regs;
freg_t *fprs; /* Floating-point register save area */
__vector128 *vxrs; /* Vector register save area */
/* Floating-point register save area */
freg_t fprs[__NUM_FPRS];
/* Vector register save area */
__vector128 vxrs[__NUM_VXRS];
};
};
......
......@@ -12,7 +12,9 @@
#ifndef __ASSEMBLY__
#define ftrace_return_address(n) __builtin_return_address(n)
unsigned long return_address(int depth);
#define ftrace_return_address(n) return_address(n)
void _mcount(void);
void ftrace_caller(void);
......
......@@ -31,20 +31,41 @@ int pci_proc_domain(struct pci_bus *);
#define ZPCI_FC_BLOCKED 0x20
#define ZPCI_FC_DMA_ENABLED 0x10
#define ZPCI_FMB_DMA_COUNTER_VALID (1 << 23)
struct zpci_fmb_fmt0 {
u64 dma_rbytes;
u64 dma_wbytes;
};
struct zpci_fmb_fmt1 {
u64 rx_bytes;
u64 rx_packets;
u64 tx_bytes;
u64 tx_packets;
};
struct zpci_fmb_fmt2 {
u64 consumed_work_units;
u64 max_work_units;
};
struct zpci_fmb {
u32 format : 8;
u32 dma_valid : 1;
u32 : 23;
u32 fmt_ind : 24;
u32 samples;
u64 last_update;
/* hardware counters */
/* common counters */
u64 ld_ops;
u64 st_ops;
u64 stb_ops;
u64 rpcit_ops;
u64 dma_rbytes;
u64 dma_wbytes;
u64 pad[2];
/* format specific counters */
union {
struct zpci_fmb_fmt0 fmt0;
struct zpci_fmb_fmt1 fmt1;
struct zpci_fmb_fmt2 fmt2;
};
} __packed __aligned(128);
enum zpci_state {
......
......@@ -105,7 +105,6 @@ typedef struct {
* Thread structure
*/
struct thread_struct {
struct fpu fpu; /* FP and VX register save area */
unsigned int acrs[NUM_ACRS];
unsigned long ksp; /* kernel stack pointer */
mm_segment_t mm_segment;
......@@ -120,6 +119,11 @@ struct thread_struct {
/* cpu runtime instrumentation */
struct runtime_instr_cb *ri_cb;
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
/*
* Warning: 'fpu' is dynamically-sized. It *MUST* be at
* the end.
*/
struct fpu fpu; /* FP and VX register save area */
};
/* Flag to disable transactions. */
......@@ -155,10 +159,9 @@ struct stack_frame {
#define ARCH_MIN_TASKALIGN 8
extern __vector128 init_task_fpu_regs[__NUM_VXRS];
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
.fpu.regs = (void *)&init_task_fpu_regs, \
.fpu.regs = (void *) init_task.thread.fpu.fprs, \
}
/*
......
......@@ -72,6 +72,18 @@ struct sclp_info {
};
extern struct sclp_info sclp;
struct zpci_report_error_header {
u8 version; /* Interface version byte */
u8 action; /* Action qualifier byte
* 1: Deconfigure and repair action requested
* (OpenCrypto Problem Call Home)
* 2: Informational Report
* (OpenCrypto Successful Diagnostics Execution)
*/
u16 length; /* Length of Subsequent Data (up to 4K – SCLP header */
u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */
} __packed;
int sclp_get_core_info(struct sclp_core_info *info);
int sclp_core_configure(u8 core);
int sclp_core_deconfigure(u8 core);
......@@ -83,6 +95,7 @@ int sclp_chp_read_info(struct sclp_chp_info *info);
void sclp_get_ipl_info(struct sclp_ipl_info *info);
int sclp_pci_configure(u32 fid);
int sclp_pci_deconfigure(u32 fid);
int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid);
int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count);
int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count);
void sclp_early_detect(void);
......
......@@ -62,6 +62,7 @@ static inline struct thread_info *current_thread_info(void)
}
void arch_release_task_struct(struct task_struct *tsk);
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define THREAD_SIZE_ORDER THREAD_ORDER
......
......@@ -187,6 +187,36 @@ typedef struct format_data_t {
#define DASD_FMT_INT_INVAL 4 /* invalidate tracks */
#define DASD_FMT_INT_COMPAT 8 /* use OS/390 compatible disk layout */
/*
* struct format_check_t
* represents all data necessary to evaluate the format of
* different tracks of a dasd
*/
typedef struct format_check_t {
/* Input */
struct format_data_t expect;
/* Output */
unsigned int result; /* Error indication (DASD_FMT_ERR_*) */
unsigned int unit; /* Track that is in error */
unsigned int rec; /* Record that is in error */
unsigned int num_records; /* Records in the track in error */
unsigned int blksize; /* Blocksize of first record in error */
unsigned int key_length; /* Key length of first record in error */
} format_check_t;
/* Values returned in format_check_t when a format error is detected: */
/* Too few records were found on a single track */
#define DASD_FMT_ERR_TOO_FEW_RECORDS 1
/* Too many records were found on a single track */
#define DASD_FMT_ERR_TOO_MANY_RECORDS 2
/* Blocksize/data-length of a record was wrong */
#define DASD_FMT_ERR_BLKSIZE 3
/* A record ID is defined by cylinder, head, and record number (CHR). */
/* On mismatch, this error is set */
#define DASD_FMT_ERR_RECORD_ID 4
/* If key-length was != 0 */
#define DASD_FMT_ERR_KEY_LENGTH 5
/*
* struct attrib_data_t
......@@ -288,6 +318,8 @@ struct dasd_snid_ioctl_data {
/* Get Sense Path Group ID (SNID) data */
#define BIODASDSNID _IOWR(DASD_IOCTL_LETTER, 1, struct dasd_snid_ioctl_data)
/* Check device format according to format_check_t */
#define BIODASDCHECKFMT _IOWR(DASD_IOCTL_LETTER, 2, format_check_t)
#define BIODASDSYMMIO _IOWR(DASD_IOCTL_LETTER, 240, dasd_symmio_parms_t)
......
......@@ -72,7 +72,6 @@ void show_cacheinfo(struct seq_file *m)
if (!test_facility(34))
return;
get_online_cpus();
this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
cache = this_cpu_ci->info_list + idx;
......@@ -86,7 +85,6 @@ void show_cacheinfo(struct seq_file *m)
seq_printf(m, "associativity=%d", cache->ways_of_associativity);
seq_puts(m, "\n");
}
put_online_cpus();
}
static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
......
......@@ -173,7 +173,7 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count)
/*
* Copy memory of the old, dumped system to a user space virtual address
*/
int copy_oldmem_user(void __user *dst, void *src, size_t count)
static int copy_oldmem_user(void __user *dst, void *src, size_t count)
{
unsigned long from, len;
int rc;
......
......@@ -89,6 +89,30 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
}
EXPORT_SYMBOL_GPL(dump_trace);
struct return_address_data {
unsigned long address;
int depth;
};
static int __return_address(void *data, unsigned long address)
{
struct return_address_data *rd = data;
if (rd->depth--)
return 0;
rd->address = address;
return 1;
}
unsigned long return_address(int depth)
{
struct return_address_data rd = { .depth = depth + 2 };
dump_trace(__return_address, &rd, NULL, current_stack_pointer());
return rd.address;
}
EXPORT_SYMBOL_GPL(return_address);
static int show_address(void *data, unsigned long address)
{
printk("([<%016lx>] %pSR)\n", address, (void *)address);
......
#ifndef _ENTRY_H
#define _ENTRY_H
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/signal.h>
#include <asm/ptrace.h>
......@@ -75,4 +76,7 @@ long sys_s390_personality(unsigned int personality);
long sys_s390_runtime_instr(int command, int signum);
long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
DECLARE_PER_CPU(u64, mt_cycles[8]);
#endif /* _ENTRY_H */
......@@ -665,18 +665,21 @@ static struct pmu cpumf_pmu = {
static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action,
void *hcpu)
{
unsigned int cpu = (long) hcpu;
int flags;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
flags = PMC_INIT;
smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
local_irq_disable();
setup_pmc_cpu(&flags);
local_irq_enable();
break;
case CPU_DOWN_PREPARE:
flags = PMC_RELEASE;
smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
local_irq_disable();
setup_pmc_cpu(&flags);
local_irq_enable();
break;
default:
break;
......
......@@ -1510,7 +1510,6 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
static int cpumf_pmu_notifier(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned int cpu = (long) hcpu;
int flags;
/* Ignore the notification if no events are scheduled on the PMU.
......@@ -1523,11 +1522,15 @@ static int cpumf_pmu_notifier(struct notifier_block *self,
case CPU_ONLINE:
case CPU_DOWN_FAILED:
flags = PMC_INIT;
smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
local_irq_disable();
setup_pmc_cpu(&flags);
local_irq_enable();
break;
case CPU_DOWN_PREPARE:
flags = PMC_RELEASE;
smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
local_irq_disable();
setup_pmc_cpu(&flags);
local_irq_enable();
break;
default:
break;
......
......@@ -7,6 +7,7 @@
* Denis Joseph Barrow,
*/
#include <linux/elf-randomize.h>
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/sched.h>
......@@ -37,9 +38,6 @@
asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
/* FPU save area for the init task */
__vector128 init_task_fpu_regs[__NUM_VXRS] __init_task_data;
/*
* Return saved PC of a blocked thread. used in kernel/sched.
* resume in entry.S does not create a new stack frame, it
......@@ -85,35 +83,19 @@ void release_thread(struct task_struct *dead_task)
void arch_release_task_struct(struct task_struct *tsk)
{
/* Free either the floating-point or the vector register save area */
kfree(tsk->thread.fpu.regs);
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
size_t fpu_regs_size;
*dst = *src;
/*
* If the vector extension is available, it is enabled for all tasks,
* and, thus, the FPU register save area must be allocated accordingly.
*/
fpu_regs_size = MACHINE_HAS_VX ? sizeof(__vector128) * __NUM_VXRS
: sizeof(freg_t) * __NUM_FPRS;
dst->thread.fpu.regs = kzalloc(fpu_regs_size, GFP_KERNEL|__GFP_REPEAT);
if (!dst->thread.fpu.regs)
return -ENOMEM;
/*
* Save the floating-point or vector register state of the current
* task and set the CIF_FPU flag to lazy restore the FPU register
* state when returning to user space.
*/
save_fpu_regs();
dst->thread.fpu.fpc = current->thread.fpu.fpc;
memcpy(dst->thread.fpu.regs, current->thread.fpu.regs, fpu_regs_size);
memcpy(dst, src, arch_task_struct_size);
dst->thread.fpu.regs = dst->thread.fpu.fprs;
return 0;
}
......
......@@ -6,6 +6,7 @@
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/cpufeature.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/seq_file.h>
......@@ -84,7 +85,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_puts(m, "\n");
show_cacheinfo(m);
}
get_online_cpus();
if (cpu_online(n)) {
struct cpuid *id = &per_cpu(cpu_id, n);
seq_printf(m, "processor %li: "
......@@ -93,23 +93,31 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"machine = %04X\n",
n, id->version, id->ident, id->machine);
}
put_online_cpus();
return 0;
}
static inline void *c_update(loff_t *pos)
{
if (*pos)
*pos = cpumask_next(*pos - 1, cpu_online_mask);
return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
get_online_cpus();
return c_update(pos);
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
return c_update(pos);
}
static void c_stop(struct seq_file *m, void *v)
{
put_online_cpus();
}
const struct seq_operations cpuinfo_op = {
......
......@@ -808,6 +808,22 @@ static void __init setup_randomness(void)
free_page((unsigned long) vmms);
}
/*
* Find the correct size for the task_struct. This depends on
* the size of the struct fpu at the end of the thread_struct
* which is embedded in the task_struct.
*/
static void __init setup_task_size(void)
{
int task_size = sizeof(struct task_struct);
if (!MACHINE_HAS_VX) {
task_size -= sizeof(__vector128) * __NUM_VXRS;
task_size += sizeof(freg_t) * __NUM_FPRS;
}
arch_task_struct_size = task_size;
}
/*
* Setup function called from init/main.c just after the banner
* was printed.
......@@ -846,6 +862,7 @@ void __init setup_arch(char **cmdline_p)
os_info_init();
setup_ipl();
setup_task_size();
/* Do some memory reservations *before* memory is added to memblock */
reserve_memory_end();
......
......@@ -18,6 +18,8 @@
#include <asm/cpu_mf.h>
#include <asm/smp.h>
#include "entry.h"
static void virt_timer_expire(void);
static LIST_HEAD(virt_timer_list);
......
......@@ -631,6 +631,29 @@ void pfault_fini(void)
static DEFINE_SPINLOCK(pfault_lock);
static LIST_HEAD(pfault_list);
#define PF_COMPLETE 0x0080
/*
* The mechanism of our pfault code: if Linux is running as guest, runs a user
* space process and the user space process accesses a page that the host has
* paged out we get a pfault interrupt.
*
* This allows us, within the guest, to schedule a different process. Without
* this mechanism the host would have to suspend the whole virtual cpu until
* the page has been paged in.
*
* So when we get such an interrupt then we set the state of the current task
* to uninterruptible and also set the need_resched flag. Both happens within
* interrupt context(!). If we later on want to return to user space we
* recognize the need_resched flag and then call schedule(). It's not very
* obvious how this works...
*
* Of course we have a lot of additional fun with the completion interrupt (->
* host signals that a page of a process has been paged in and the process can
* continue to run). This interrupt can arrive on any cpu and, since we have
* virtual cpus, actually appear before the interrupt that signals that a page
* is missing.
*/
static void pfault_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
......@@ -639,10 +662,9 @@ static void pfault_interrupt(struct ext_code ext_code,
pid_t pid;
/*
* Get the external interruption subcode & pfault
* initial/completion signal bit. VM stores this
* in the 'cpu address' field associated with the
* external interrupt.
* Get the external interruption subcode & pfault initial/completion
* signal bit. VM stores this in the 'cpu address' field associated
* with the external interrupt.
*/
subcode = ext_code.subcode;
if ((subcode & 0xff00) != __SUBCODE_MASK)
......@@ -658,7 +680,7 @@ static void pfault_interrupt(struct ext_code ext_code,
if (!tsk)
return;
spin_lock(&pfault_lock);
if (subcode & 0x0080) {
if (subcode & PF_COMPLETE) {
/* signal bit is set -> a page has been swapped in by VM */
if (tsk->thread.pfault_wait == 1) {
/* Initial interrupt was faster than the completion
......@@ -687,8 +709,7 @@ static void pfault_interrupt(struct ext_code ext_code,
goto out;
if (tsk->thread.pfault_wait == 1) {
/* Already on the list with a reference: put to sleep */
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
goto block;
} else if (tsk->thread.pfault_wait == -1) {
/* Completion interrupt was faster than the initial
* interrupt (pfault_wait == -1). Set pfault_wait
......@@ -703,7 +724,11 @@ static void pfault_interrupt(struct ext_code ext_code,
get_task_struct(tsk);
tsk->thread.pfault_wait = 1;
list_add(&tsk->thread.list, &pfault_list);
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
block:
/* Since this must be a userspace fault, there
* is no kernel task state to trample. Rely on the
* return to userspace schedule() to block. */
__set_current_state(TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
}
}
......
......@@ -22,6 +22,7 @@
* Started by Ingo Molnar <mingo@elte.hu>
*/
#include <linux/elf-randomize.h>
#include <linux/personality.h>
#include <linux/mm.h>
#include <linux/mman.h>
......
......@@ -56,7 +56,7 @@ static inline pmd_t *vmem_pmd_alloc(void)
return pmd;
}
static pte_t __ref *vmem_pte_alloc(unsigned long address)
static pte_t __ref *vmem_pte_alloc(void)
{
pte_t *pte;
......@@ -121,7 +121,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
continue;
}
if (pmd_none(*pm_dir)) {
pt_dir = vmem_pte_alloc(address);
pt_dir = vmem_pte_alloc();
if (!pt_dir)
goto out;
pmd_populate(&init_mm, pm_dir, pt_dir);
......@@ -233,7 +233,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
pt_dir = vmem_pte_alloc(address);
pt_dir = vmem_pte_alloc();
if (!pt_dir)
goto out;
pmd_populate(&init_mm, pm_dir, pt_dir);
......@@ -370,7 +370,7 @@ void __init vmem_map_init(void)
ro_end = (unsigned long)&_eshared & PAGE_MASK;
for_each_memblock(memory, reg) {
start = reg->base;
end = reg->base + reg->size - 1;
end = reg->base + reg->size;
if (start >= ro_end || end <= ro_start)
vmem_add_mem(start, end - start, 0);
else if (start >= ro_start && end <= ro_end)
......
/*
* Copyright IBM Corp. 2012
* Copyright IBM Corp. 2012,2015
*
* Author(s):
* Jan Glauber <jang@linux.vnet.ibm.com>
......@@ -23,22 +23,45 @@ EXPORT_SYMBOL_GPL(pci_debug_msg_id);
debug_info_t *pci_debug_err_id;
EXPORT_SYMBOL_GPL(pci_debug_err_id);
static char *pci_perf_names[] = {
/* hardware counters */
static char *pci_common_names[] = {
"Load operations",
"Store operations",
"Store block operations",
"Refresh operations",
};
static char *pci_fmt0_names[] = {
"DMA read bytes",
"DMA write bytes",
};
static char *pci_fmt1_names[] = {
"Received bytes",
"Received packets",
"Transmitted bytes",
"Transmitted packets",
};
static char *pci_fmt2_names[] = {
"Consumed work units",
"Maximum work units",
};
static char *pci_sw_names[] = {
"Allocated pages",
"Mapped pages",
"Unmapped pages",
};
static void pci_fmb_show(struct seq_file *m, char *name[], int length,
u64 *data)
{
int i;
for (i = 0; i < length; i++, data++)
seq_printf(m, "%26s:\t%llu\n", name[i], *data);
}
static void pci_sw_counter_show(struct seq_file *m)
{
struct zpci_dev *zdev = m->private;
......@@ -53,8 +76,6 @@ static void pci_sw_counter_show(struct seq_file *m)
static int pci_perf_show(struct seq_file *m, void *v)
{
struct zpci_dev *zdev = m->private;
u64 *stat;
int i;
if (!zdev)
return 0;
......@@ -72,15 +93,27 @@ static int pci_perf_show(struct seq_file *m, void *v)
seq_printf(m, "Samples: %u\n", zdev->fmb->samples);
seq_printf(m, "Last update TOD: %Lx\n", zdev->fmb->last_update);
/* hardware counters */
stat = (u64 *) &zdev->fmb->ld_ops;
for (i = 0; i < 4; i++)
seq_printf(m, "%26s:\t%llu\n",
pci_perf_names[i], *(stat + i));
if (zdev->fmb->dma_valid)
for (i = 4; i < 6; i++)
seq_printf(m, "%26s:\t%llu\n",
pci_perf_names[i], *(stat + i));
pci_fmb_show(m, pci_common_names, ARRAY_SIZE(pci_common_names),
&zdev->fmb->ld_ops);
switch (zdev->fmb->format) {
case 0:
if (!(zdev->fmb->fmt_ind & ZPCI_FMB_DMA_COUNTER_VALID))
break;
pci_fmb_show(m, pci_fmt0_names, ARRAY_SIZE(pci_fmt0_names),
&zdev->fmb->fmt0.dma_rbytes);
break;
case 1:
pci_fmb_show(m, pci_fmt1_names, ARRAY_SIZE(pci_fmt1_names),
&zdev->fmb->fmt1.rx_bytes);
break;
case 2:
pci_fmb_show(m, pci_fmt2_names, ARRAY_SIZE(pci_fmt2_names),
&zdev->fmb->fmt2.consumed_work_units);
break;
default:
seq_puts(m, "Unknown format\n");
}
pci_sw_counter_show(m);
mutex_unlock(&zdev->lock);
......
......@@ -12,6 +12,8 @@
#include <linux/stat.h>
#include <linux/pci.h>
#include <asm/sclp.h>
#define zpci_attr(name, fmt, member) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
......@@ -77,8 +79,29 @@ static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
sizeof(zdev->util_str));
}
static BIN_ATTR_RO(util_string, CLP_UTIL_STR_LEN);
static ssize_t report_error_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct zpci_report_error_header *report = (void *) buf;
struct device *dev = kobj_to_dev(kobj);
struct pci_dev *pdev = to_pci_dev(dev);
struct zpci_dev *zdev = to_zpci(pdev);
int ret;
if (off || (count < sizeof(*report)))
return -EINVAL;
ret = sclp_pci_report(report, zdev->fh, zdev->fid);
return ret ? ret : count;
}
static BIN_ATTR(report_error, S_IWUSR, NULL, report_error_write, PAGE_SIZE);
static struct bin_attribute *zpci_bin_attrs[] = {
&bin_attr_util_string,
&bin_attr_report_error,
NULL,
};
......
......@@ -75,6 +75,8 @@ static void dasd_block_timeout(unsigned long);
static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
static void dasd_profile_init(struct dasd_profile *, struct dentry *);
static void dasd_profile_exit(struct dasd_profile *);
static void dasd_hosts_init(struct dentry *, struct dasd_device *);
static void dasd_hosts_exit(struct dasd_device *);
/*
* SECTION: Operations on the device structure.
......@@ -267,6 +269,7 @@ static int dasd_state_known_to_basic(struct dasd_device *device)
dasd_debugfs_setup(dev_name(&device->cdev->dev),
dasd_debugfs_root_entry);
dasd_profile_init(&device->profile, device->debugfs_dentry);
dasd_hosts_init(device->debugfs_dentry, device);
/* register 'device' debug area, used for all DBF_DEV_XXX calls */
device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
......@@ -304,6 +307,7 @@ static int dasd_state_basic_to_known(struct dasd_device *device)
return rc;
dasd_device_clear_timer(device);
dasd_profile_exit(&device->profile);
dasd_hosts_exit(device);
debugfs_remove(device->debugfs_dentry);
DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
if (device->debug_area != NULL) {
......@@ -1150,6 +1154,58 @@ int dasd_profile_on(struct dasd_profile *profile)
#endif /* CONFIG_DASD_PROFILE */
static int dasd_hosts_show(struct seq_file *m, void *v)
{
struct dasd_device *device;
int rc = -EOPNOTSUPP;
device = m->private;
dasd_get_device(device);
if (device->discipline->hosts_print)
rc = device->discipline->hosts_print(device, m);
dasd_put_device(device);
return rc;
}
static int dasd_hosts_open(struct inode *inode, struct file *file)
{
struct dasd_device *device = inode->i_private;
return single_open(file, dasd_hosts_show, device);
}
static const struct file_operations dasd_hosts_fops = {
.owner = THIS_MODULE,
.open = dasd_hosts_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void dasd_hosts_exit(struct dasd_device *device)
{
debugfs_remove(device->hosts_dentry);
device->hosts_dentry = NULL;
}
static void dasd_hosts_init(struct dentry *base_dentry,
struct dasd_device *device)
{
struct dentry *pde;
umode_t mode;
if (!base_dentry)
return;
mode = S_IRUSR | S_IFREG;
pde = debugfs_create_file("host_access_list", mode, base_dentry,
device, &dasd_hosts_fops);
if (pde && !IS_ERR(pde))
device->hosts_dentry = pde;
}
/*
* Allocate memory for a channel program with 'cplength' channel
* command words and 'datasize' additional space. There are two
......@@ -1582,6 +1638,9 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct dasd_ccw_req *cqr, *next;
struct dasd_device *device;
unsigned long long now;
int nrf_suppressed = 0;
int fp_suppressed = 0;
u8 *sense = NULL;
int expires;
if (IS_ERR(irb)) {
......@@ -1617,7 +1676,23 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
dasd_put_device(device);
return;
}
/*
* In some cases 'File Protected' or 'No Record Found' errors
* might be expected and debug log messages for the
* corresponding interrupts shouldn't be written then.
* Check if either of the according suppress bits is set.
*/
sense = dasd_get_sense(irb);
if (sense) {
fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
}
if (!(fp_suppressed || nrf_suppressed))
device->discipline->dump_sense_dbf(device, irb, "int");
if (device->features & DASD_FEATURE_ERPLOG)
device->discipline->dump_sense(device, cqr, irb);
device->discipline->check_for_device_change(device, cqr, irb);
......@@ -2256,6 +2331,7 @@ static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
{
struct dasd_device *device;
struct dasd_ccw_req *cqr, *n;
u8 *sense = NULL;
int rc;
retry:
......@@ -2301,6 +2377,20 @@ static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
rc = 0;
list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
/*
* In some cases the 'File Protected' or 'Incorrect Length'
* error might be expected and error recovery would be
* unnecessary in these cases. Check if the according suppress
* bit is set.
*/
sense = dasd_get_sense(&cqr->irb);
if (sense && sense[1] & SNS1_FILE_PROTECTED &&
test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
continue;
if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
continue;
/*
* for alias devices simplify error recovery and
* return to upper layer
......
......@@ -1367,6 +1367,12 @@ dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense)
struct dasd_device *device = default_erp->startdev;
/*
* In some cases the 'No Record Found' error might be expected and
* log messages shouldn't be written then.
* Check if the according suppress bit is set.
*/
if (!test_bit(DASD_CQR_SUPPRESS_NRF, &default_erp->flags))
dev_err(&device->cdev->dev,
"The specified record was not found\n");
......@@ -1393,8 +1399,14 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
struct dasd_device *device = erp->startdev;
dev_err(&device->cdev->dev, "Accessing the DASD failed because of "
"a hardware error\n");
/*
* In some cases the 'File Protected' error might be expected and
* log messages shouldn't be written then.
* Check if the according suppress bit is set.
*/
if (!test_bit(DASD_CQR_SUPPRESS_FP, &erp->flags))
dev_err(&device->cdev->dev,
"Accessing the DASD failed because of a hardware error\n");
return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
......
......@@ -981,6 +981,32 @@ dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(safe_offline, 0200, NULL, dasd_safe_offline_store);
static ssize_t
dasd_access_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct dasd_device *device;
int count;
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return PTR_ERR(device);
if (device->discipline->host_access_count)
count = device->discipline->host_access_count(device);
else
count = -EOPNOTSUPP;
dasd_put_device(device);
if (count < 0)
return count;
return sprintf(buf, "%d\n", count);
}
static DEVICE_ATTR(host_access_count, 0444, dasd_access_show, NULL);
static ssize_t
dasd_discipline_show(struct device *dev, struct device_attribute *attr,
char *buf)
......@@ -1471,6 +1497,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_reservation_policy.attr,
&dev_attr_last_known_reservation_state.attr,
&dev_attr_safe_offline.attr,
&dev_attr_host_access_count.attr,
&dev_attr_path_masks.attr,
NULL,
};
......
......@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/compat.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <asm/css_chars.h>
#include <asm/debug.h>
......@@ -120,6 +121,11 @@ struct check_attention_work_data {
__u8 lpum;
};
static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
struct dasd_device *, struct dasd_device *,
unsigned int, int, unsigned int, unsigned int,
unsigned int, unsigned int);
/* initial attempt at a probe function. this can be simplified once
* the other detection code is gone */
static int
......@@ -256,10 +262,13 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
case DASD_ECKD_CCW_READ_CKD_MT:
case DASD_ECKD_CCW_READ_KD:
case DASD_ECKD_CCW_READ_KD_MT:
case DASD_ECKD_CCW_READ_COUNT:
data->mask.perm = 0x1;
data->attributes.operation = private->attrib.operation;
break;
case DASD_ECKD_CCW_READ_COUNT:
data->mask.perm = 0x1;
data->attributes.operation = DASD_BYPASS_CACHE;
break;
case DASD_ECKD_CCW_WRITE:
case DASD_ECKD_CCW_WRITE_MT:
case DASD_ECKD_CCW_WRITE_KD:
......@@ -528,10 +537,13 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
case DASD_ECKD_CCW_READ_CKD_MT:
case DASD_ECKD_CCW_READ_KD:
case DASD_ECKD_CCW_READ_KD_MT:
case DASD_ECKD_CCW_READ_COUNT:
dedata->mask.perm = 0x1;
dedata->attributes.operation = basepriv->attrib.operation;
break;
case DASD_ECKD_CCW_READ_COUNT:
dedata->mask.perm = 0x1;
dedata->attributes.operation = DASD_BYPASS_CACHE;
break;
case DASD_ECKD_CCW_READ_TRACK:
case DASD_ECKD_CCW_READ_TRACK_DATA:
dedata->mask.perm = 0x1;
......@@ -2095,6 +2107,180 @@ dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
return 0;
}
/*
* Build the TCW request for the format check
*/
static struct dasd_ccw_req *
dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
int enable_pav, struct eckd_count *fmt_buffer,
int rpt)
{
struct dasd_eckd_private *start_priv;
struct dasd_device *startdev = NULL;
struct tidaw *last_tidaw = NULL;
struct dasd_ccw_req *cqr;
struct itcw *itcw;
int itcw_size;
int count;
int rc;
int i;
if (enable_pav)
startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
start_priv = startdev->private;
count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
/*
* we're adding 'count' amount of tidaw to the itcw.
* calculate the corresponding itcw_size
*/
itcw_size = itcw_calc_size(0, count, 0);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
if (IS_ERR(cqr))
return cqr;
start_priv->count++;
itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
if (IS_ERR(itcw)) {
rc = -EINVAL;
goto out_err;
}
cqr->cpaddr = itcw_get_tcw(itcw);
rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
sizeof(struct eckd_count),
count * sizeof(struct eckd_count), 0, rpt);
if (rc)
goto out_err;
for (i = 0; i < count; i++) {
last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
sizeof(struct eckd_count));
if (IS_ERR(last_tidaw)) {
rc = -EINVAL;
goto out_err;
}
}
last_tidaw->flags |= TIDAW_FLAGS_LAST;
itcw_finalize(itcw);
cqr->cpmode = 1;
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->basedev = base;
cqr->retries = startdev->default_retries;
cqr->expires = startdev->default_expires * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
return cqr;
out_err:
dasd_sfree_request(cqr, startdev);
return ERR_PTR(rc);
}
/*
* Build the CCW request for the format check
*/
static struct dasd_ccw_req *
dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
int enable_pav, struct eckd_count *fmt_buffer, int rpt)
{
struct dasd_eckd_private *start_priv;
struct dasd_eckd_private *base_priv;
struct dasd_device *startdev = NULL;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
void *data;
int cplength, datasize;
int use_prefix;
int count;
int i;
if (enable_pav)
startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
start_priv = startdev->private;
base_priv = base->private;
count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
use_prefix = base_priv->features.feature[8] & 0x01;
if (use_prefix) {
cplength = 1;
datasize = sizeof(struct PFX_eckd_data);
} else {
cplength = 2;
datasize = sizeof(struct DE_eckd_data) +
sizeof(struct LO_eckd_data);
}
cplength += count;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
startdev);
if (IS_ERR(cqr))
return cqr;
start_priv->count++;
data = cqr->data;
ccw = cqr->cpaddr;
if (use_prefix) {
prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
count, 0, 0);
} else {
define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_READ_COUNT, startdev);
data += sizeof(struct DE_eckd_data);
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, data, fdata->start_unit, 0, count,
DASD_ECKD_CCW_READ_COUNT, base, 0);
}
for (i = 0; i < count; i++) {
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = (__u32)(addr_t) fmt_buffer;
ccw++;
fmt_buffer++;
}
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->basedev = base;
cqr->retries = DASD_RETRIES;
cqr->expires = startdev->default_expires * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
return cqr;
}
static struct dasd_ccw_req *
dasd_eckd_build_format(struct dasd_device *base,
struct format_data_t *fdata,
......@@ -2362,9 +2548,24 @@ dasd_eckd_build_format(struct dasd_device *base,
*/
static struct dasd_ccw_req *
dasd_eckd_format_build_ccw_req(struct dasd_device *base,
struct format_data_t *fdata, int enable_pav)
struct format_data_t *fdata, int enable_pav,
int tpm, struct eckd_count *fmt_buffer, int rpt)
{
return dasd_eckd_build_format(base, fdata, enable_pav);
struct dasd_ccw_req *ccw_req;
if (!fmt_buffer) {
ccw_req = dasd_eckd_build_format(base, fdata, enable_pav);
} else {
if (tpm)
ccw_req = dasd_eckd_build_check_tcw(base, fdata,
enable_pav,
fmt_buffer, rpt);
else
ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
fmt_buffer, rpt);
}
return ccw_req;
}
/*
......@@ -2409,12 +2610,15 @@ static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
*/
static int dasd_eckd_format_process_data(struct dasd_device *base,
struct format_data_t *fdata,
int enable_pav)
int enable_pav, int tpm,
struct eckd_count *fmt_buffer, int rpt,
struct irb *irb)
{
struct dasd_eckd_private *private = base->private;
struct dasd_ccw_req *cqr, *n;
struct list_head format_queue;
struct dasd_device *device;
char *sense = NULL;
int old_start, old_stop, format_step;
int step, retry;
int rc;
......@@ -2428,8 +2632,18 @@ static int dasd_eckd_format_process_data(struct dasd_device *base,
old_start = fdata->start_unit;
old_stop = fdata->stop_unit;
format_step = DASD_CQR_MAX_CCW / recs_per_track(&private->rdc_data, 0,
fdata->blksize);
if (!tpm && fmt_buffer != NULL) {
/* Command Mode / Format Check */
format_step = 1;
} else if (tpm && fmt_buffer != NULL) {
/* Transport Mode / Format Check */
format_step = DASD_CQR_MAX_CCW / rpt;
} else {
/* Normal Formatting */
format_step = DASD_CQR_MAX_CCW /
recs_per_track(&private->rdc_data, 0, fdata->blksize);
}
do {
retry = 0;
while (fdata->start_unit <= old_stop) {
......@@ -2440,7 +2654,8 @@ static int dasd_eckd_format_process_data(struct dasd_device *base,
}
cqr = dasd_eckd_format_build_ccw_req(base, fdata,
enable_pav);
enable_pav, tpm,
fmt_buffer, rpt);
if (IS_ERR(cqr)) {
rc = PTR_ERR(cqr);
if (rc == -ENOMEM) {
......@@ -2458,6 +2673,10 @@ static int dasd_eckd_format_process_data(struct dasd_device *base,
}
list_add_tail(&cqr->blocklist, &format_queue);
if (fmt_buffer) {
step = fdata->stop_unit - fdata->start_unit + 1;
fmt_buffer += rpt * step;
}
fdata->start_unit = fdata->stop_unit + 1;
fdata->stop_unit = old_stop;
}
......@@ -2468,15 +2687,41 @@ static int dasd_eckd_format_process_data(struct dasd_device *base,
list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
device = cqr->startdev;
private = device->private;
if (cqr->status == DASD_CQR_FAILED)
if (cqr->status == DASD_CQR_FAILED) {
/*
* Only get sense data if called by format
* check
*/
if (fmt_buffer && irb) {
sense = dasd_get_sense(&cqr->irb);
memcpy(irb, &cqr->irb, sizeof(*irb));
}
rc = -EIO;
}
list_del_init(&cqr->blocklist);
dasd_sfree_request(cqr, device);
private->count--;
}
if (rc)
if (rc && rc != -EIO)
goto out;
if (rc == -EIO) {
/*
* In case fewer than the expected records are on the
* track, we will most likely get a 'No Record Found'
* error (in command mode) or a 'File Protected' error
* (in transport mode). Those particular cases shouldn't
* pass the -EIO to the IOCTL, therefore reset the rc
* and continue.
*/
if (sense &&
(sense[1] & SNS1_NO_REC_FOUND ||
sense[1] & SNS1_FILE_PROTECTED))
retry = 1;
else
goto out;
}
} while (retry);
......@@ -2490,7 +2735,225 @@ static int dasd_eckd_format_process_data(struct dasd_device *base,
static int dasd_eckd_format_device(struct dasd_device *base,
struct format_data_t *fdata, int enable_pav)
{
return dasd_eckd_format_process_data(base, fdata, enable_pav);
return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
0, NULL);
}
/*
* Helper function to count consecutive records of a single track.
*/
static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
int max)
{
int head;
int i;
head = fmt_buffer[start].head;
/*
* There are 3 conditions where we stop counting:
* - if data reoccurs (same head and record may reoccur), which may
* happen due to the way DASD_ECKD_CCW_READ_COUNT works
* - when the head changes, because we're iterating over several tracks
* then (DASD_ECKD_CCW_READ_COUNT_MT)
* - when we've reached the end of sensible data in the buffer (the
* record will be 0 then)
*/
for (i = start; i < max; i++) {
if (i > start) {
if ((fmt_buffer[i].head == head &&
fmt_buffer[i].record == 1) ||
fmt_buffer[i].head != head ||
fmt_buffer[i].record == 0)
break;
}
}
return i - start;
}
/*
* Evaluate a given range of tracks. Data like number of records, blocksize,
* record ids, and key length are compared with expected data.
*
* If a mismatch occurs, the corresponding error bit is set, as well as
* additional information, depending on the error.
*/
static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
struct format_check_t *cdata,
int rpt_max, int rpt_exp,
int trk_per_cyl, int tpm)
{
struct ch_t geo;
int max_entries;
int count = 0;
int trkcount;
int blksize;
int pos = 0;
int i, j;
int kl;
trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
max_entries = trkcount * rpt_max;
for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
/* Calculate the correct next starting position in the buffer */
if (tpm) {
while (fmt_buffer[pos].record == 0 &&
fmt_buffer[pos].dl == 0) {
if (pos++ > max_entries)
break;
}
} else {
if (i != cdata->expect.start_unit)
pos += rpt_max - count;
}
/* Calculate the expected geo values for the current track */
set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
/* Count and check number of records */
count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
if (count < rpt_exp) {
cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
break;
}
if (count > rpt_exp) {
cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
break;
}
for (j = 0; j < count; j++, pos++) {
blksize = cdata->expect.blksize;
kl = 0;
/*
* Set special values when checking CDL formatted
* devices.
*/
if ((cdata->expect.intensity & 0x08) &&
geo.cyl == 0 && geo.head == 0) {
if (j < 3) {
blksize = sizes_trk0[j] - 4;
kl = 4;
}
}
if ((cdata->expect.intensity & 0x08) &&
geo.cyl == 0 && geo.head == 1) {
blksize = LABEL_SIZE - 44;
kl = 44;
}
/* Check blocksize */
if (fmt_buffer[pos].dl != blksize) {
cdata->result = DASD_FMT_ERR_BLKSIZE;
goto out;
}
/* Check if key length is 0 */
if (fmt_buffer[pos].kl != kl) {
cdata->result = DASD_FMT_ERR_KEY_LENGTH;
goto out;
}
/* Check if record_id is correct */
if (fmt_buffer[pos].cyl != geo.cyl ||
fmt_buffer[pos].head != geo.head ||
fmt_buffer[pos].record != (j + 1)) {
cdata->result = DASD_FMT_ERR_RECORD_ID;
goto out;
}
}
}
out:
/*
* In case of no errors, we need to decrease by one
* to get the correct positions.
*/
if (!cdata->result) {
i--;
pos--;
}
cdata->unit = i;
cdata->num_records = count;
cdata->rec = fmt_buffer[pos].record;
cdata->blksize = fmt_buffer[pos].dl;
cdata->key_length = fmt_buffer[pos].kl;
}
/*
* Check the format of a range of tracks of a DASD.
*/
static int dasd_eckd_check_device_format(struct dasd_device *base,
struct format_check_t *cdata,
int enable_pav)
{
struct dasd_eckd_private *private = base->private;
struct eckd_count *fmt_buffer;
struct irb irb;
int rpt_max, rpt_exp;
int fmt_buffer_size;
int trk_per_cyl;
int trkcount;
int tpm = 0;
int rc;
trk_per_cyl = private->rdc_data.trk_per_cyl;
/* Get maximum and expected amount of records per track */
rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
if (!fmt_buffer)
return -ENOMEM;
/*
* A certain FICON feature subset is needed to operate in transport
* mode. Additionally, the support for transport mode is implicitly
* checked by comparing the buffer size with fcx_max_data. As long as
* the buffer size is smaller we can operate in transport mode and
* process multiple tracks. If not, only one track at once is being
* processed using command mode.
*/
if ((private->features.feature[40] & 0x04) &&
fmt_buffer_size <= private->fcx_max_data)
tpm = 1;
rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
tpm, fmt_buffer, rpt_max, &irb);
if (rc && rc != -EIO)
goto out;
if (rc == -EIO) {
/*
* If our first attempt with transport mode enabled comes back
* with an incorrect length error, we're going to retry the
* check with command mode.
*/
if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
tpm = 0;
rc = dasd_eckd_format_process_data(base, &cdata->expect,
enable_pav, tpm,
fmt_buffer, rpt_max,
&irb);
if (rc)
goto out;
} else {
goto out;
}
}
dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
trk_per_cyl, tpm);
out:
kfree(fmt_buffer);
return rc;
}
static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
......@@ -3037,6 +3500,16 @@ static int prepare_itcw(struct itcw *itcw,
lredata->auxiliary.check_bytes = 0x2;
pfx_cmd = DASD_ECKD_CCW_PFX;
break;
case DASD_ECKD_CCW_READ_COUNT_MT:
dedata->mask.perm = 0x1;
dedata->attributes.operation = DASD_BYPASS_CACHE;
dedata->ga_extended |= 0x42;
dedata->blk_size = blksize;
lredata->operation.orientation = 0x2;
lredata->operation.operation = 0x16;
lredata->auxiliary.check_bytes = 0x01;
pfx_cmd = DASD_ECKD_CCW_PFX_READ;
break;
default:
DBF_DEV_EVENT(DBF_ERR, basedev,
"prepare itcw, unknown opcode 0x%x", cmd);
......@@ -3084,13 +3557,19 @@ static int prepare_itcw(struct itcw *itcw,
}
}
if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
lredata->auxiliary.length_valid = 0;
lredata->auxiliary.length_scope = 0;
lredata->sector = 0xff;
} else {
lredata->auxiliary.length_valid = 1;
lredata->auxiliary.length_scope = 1;
lredata->sector = sector;
}
lredata->auxiliary.imbedded_ccw_valid = 1;
lredata->length = tlf;
lredata->imbedded_ccw = cmd;
lredata->count = count;
lredata->sector = sector;
set_ch_t(&lredata->seek_addr, begcyl, beghead);
lredata->search_arg.cyl = lredata->seek_addr.cyl;
lredata->search_arg.head = lredata->seek_addr.head;
......@@ -4412,10 +4891,34 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
static void dasd_eckd_dump_sense(struct dasd_device *device,
struct dasd_ccw_req *req, struct irb *irb)
{
if (scsw_is_tm(&irb->scsw))
u8 *sense = dasd_get_sense(irb);
if (scsw_is_tm(&irb->scsw)) {
/*
* In some cases the 'File Protected' or 'Incorrect Length'
* error might be expected and log messages shouldn't be written
* then. Check if the according suppress bit is set.
*/
if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
return;
if (scsw_cstat(&irb->scsw) == 0x40 &&
test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
return;
dasd_eckd_dump_sense_tcw(device, req, irb);
else
} else {
/*
* In some cases the 'No Record Found' error might be expected
* and log messages shouldn't be written then. Check if the
* according suppress bit is set.
*/
if (sense && sense[1] & SNS1_NO_REC_FOUND &&
test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
return;
dasd_eckd_dump_sense_ccw(device, req, irb);
}
}
static int dasd_eckd_pm_freeze(struct dasd_device *device)
......@@ -4627,6 +5130,167 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
return rc;
}
static int dasd_eckd_query_host_access(struct dasd_device *device,
struct dasd_psf_query_host_access *data)
{
struct dasd_eckd_private *private = device->private;
struct dasd_psf_query_host_access *host_access;
struct dasd_psf_prssd_data *prssdp;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
/* not available for HYPER PAV alias devices */
if (!device->block && private->lcu->pav == HYPER_PAV)
return -EOPNOTSUPP;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
sizeof(struct dasd_psf_prssd_data) + 1,
device);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate read message buffer request");
return PTR_ERR(cqr);
}
host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
if (!host_access) {
dasd_sfree_request(cqr, device);
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate host_access buffer");
return -ENOMEM;
}
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = 10 * HZ;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */
/* LSS and Volume that will be queried */
prssdp->lss = private->ned->ID;
prssdp->volume = private->ned->unit_addr;
/* all other bytes of prssdp must be zero */
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = (__u32)(addr_t) prssdp;
/* Read Subsystem Data - query host access */
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_psf_query_host_access);
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = (__u32)(addr_t) host_access;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on(cqr);
if (rc == 0) {
*data = *host_access;
} else {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Reading host access data failed with rc=%d\n",
rc);
rc = -EOPNOTSUPP;
}
dasd_sfree_request(cqr, cqr->memdev);
kfree(host_access);
return rc;
}
/*
* return number of grouped devices
*/
static int dasd_eckd_host_access_count(struct dasd_device *device)
{
struct dasd_psf_query_host_access *access;
struct dasd_ckd_path_group_entry *entry;
struct dasd_ckd_host_information *info;
int count = 0;
int rc, i;
access = kzalloc(sizeof(*access), GFP_NOIO);
if (!access) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate access buffer");
return -ENOMEM;
}
rc = dasd_eckd_query_host_access(device, access);
if (rc) {
kfree(access);
return rc;
}
info = (struct dasd_ckd_host_information *)
access->host_access_information;
for (i = 0; i < info->entry_count; i++) {
entry = (struct dasd_ckd_path_group_entry *)
(info->entry + i * info->entry_size);
if (entry->status_flags & DASD_ECKD_PG_GROUPED)
count++;
}
kfree(access);
return count;
}
/*
* write host access information to a sequential file
*/
static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
{
struct dasd_psf_query_host_access *access;
struct dasd_ckd_path_group_entry *entry;
struct dasd_ckd_host_information *info;
char sysplex[9] = "";
int rc, i, j;
access = kzalloc(sizeof(*access), GFP_NOIO);
if (!access) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate access buffer");
return -ENOMEM;
}
rc = dasd_eckd_query_host_access(device, access);
if (rc) {
kfree(access);
return rc;
}
info = (struct dasd_ckd_host_information *)
access->host_access_information;
for (i = 0; i < info->entry_count; i++) {
entry = (struct dasd_ckd_path_group_entry *)
(info->entry + i * info->entry_size);
/* PGID */
seq_puts(m, "pgid ");
for (j = 0; j < 11; j++)
seq_printf(m, "%02x", entry->pgid[j]);
seq_putc(m, '\n');
/* FLAGS */
seq_printf(m, "status_flags %02x\n", entry->status_flags);
/* SYSPLEX NAME */
memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
EBCASC(sysplex, sizeof(sysplex));
seq_printf(m, "sysplex_name %8s\n", sysplex);
/* SUPPORTED CYLINDER */
seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
/* TIMESTAMP */
seq_printf(m, "timestamp %lu\n", (unsigned long)
entry->timestamp);
}
kfree(access);
return 0;
}
/*
* Perform Subsystem Function - CUIR response
*/
......@@ -5084,6 +5748,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
.term_IO = dasd_term_IO,
.handle_terminated_request = dasd_eckd_handle_terminated_request,
.format_device = dasd_eckd_format_device,
.check_device_format = dasd_eckd_check_device_format,
.erp_action = dasd_eckd_erp_action,
.erp_postaction = dasd_eckd_erp_postaction,
.check_for_device_change = dasd_eckd_check_for_device_change,
......@@ -5099,6 +5764,8 @@ static struct dasd_discipline dasd_eckd_discipline = {
.get_uid = dasd_eckd_get_uid,
.kick_validate = dasd_eckd_kick_validate_server,
.check_attention = dasd_eckd_check_attention,
.host_access_count = dasd_eckd_host_access_count,
.hosts_print = dasd_hosts_print,
};
static int __init
......
......@@ -35,6 +35,7 @@
#define DASD_ECKD_CCW_READ_MT 0x86
#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d
#define DASD_ECKD_CCW_READ_KD_MT 0x8e
#define DASD_ECKD_CCW_READ_COUNT_MT 0x92
#define DASD_ECKD_CCW_RELEASE 0x94
#define DASD_ECKD_CCW_WRITE_FULL_TRACK 0x95
#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
......@@ -53,6 +54,7 @@
*/
#define PSF_ORDER_PRSSD 0x18
#define PSF_ORDER_CUIR_RESPONSE 0x1A
#define PSF_SUBORDER_QHA 0x1C
#define PSF_ORDER_SSC 0x1D
/*
......@@ -81,6 +83,8 @@
#define ATTENTION_LENGTH_CUIR 0x0e
#define ATTENTION_FORMAT_CUIR 0x01
#define DASD_ECKD_PG_GROUPED 0x10
/*
* Size that is reportet for large volumes in the old 16-bit no_cyl field
*/
......@@ -403,13 +407,41 @@ struct dasd_psf_cuir_response {
__u8 ssid;
} __packed;
struct dasd_ckd_path_group_entry {
__u8 status_flags;
__u8 pgid[11];
__u8 sysplex_name[8];
__u32 timestamp;
__u32 cylinder;
__u8 reserved[4];
} __packed;
struct dasd_ckd_host_information {
__u8 access_flags;
__u8 entry_size;
__u16 entry_count;
__u8 entry[16390];
} __packed;
struct dasd_psf_query_host_access {
__u8 access_flag;
__u8 version;
__u16 CKD_length;
__u16 SCSI_length;
__u8 unused[10];
__u8 host_access_information[16394];
} __packed;
/*
* Perform Subsystem Function - Prepare for Read Subsystem Data
*/
struct dasd_psf_prssd_data {
unsigned char order;
unsigned char flags;
unsigned char reserved[4];
unsigned char reserved1;
unsigned char reserved2;
unsigned char lss;
unsigned char volume;
unsigned char suborder;
unsigned char varies[5];
} __attribute__ ((packed));
......
......@@ -236,6 +236,13 @@ struct dasd_ccw_req {
* stolen. Should not be combined with
* DASD_CQR_FLAGS_USE_ERP
*/
/*
* The following flags are used to suppress output of certain errors.
* These flags should only be used for format checks!
*/
#define DASD_CQR_SUPPRESS_NRF 4 /* Suppress 'No Record Found' error */
#define DASD_CQR_SUPPRESS_FP 5 /* Suppress 'File Protected' error*/
#define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */
/* Signature for error recovery functions. */
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
......@@ -318,7 +325,8 @@ struct dasd_discipline {
* Device operation functions. build_cp creates a ccw chain for
* a block device request, start_io starts the request and
* term_IO cancels it (e.g. in case of a timeout). format_device
* returns a ccw chain to be used to format the device.
* formats the device and check_device_format compares the format of
* a device with the expected format_data.
* handle_terminated_request allows to examine a cqr and prepare
* it for retry.
*/
......@@ -329,7 +337,9 @@ struct dasd_discipline {
int (*term_IO) (struct dasd_ccw_req *);
void (*handle_terminated_request) (struct dasd_ccw_req *);
int (*format_device) (struct dasd_device *,
struct format_data_t *, int enable_pav);
struct format_data_t *, int);
int (*check_device_format)(struct dasd_device *,
struct format_check_t *, int);
int (*free_cp) (struct dasd_ccw_req *, struct request *);
/*
......@@ -365,6 +375,8 @@ struct dasd_discipline {
int (*get_uid) (struct dasd_device *, struct dasd_uid *);
void (*kick_validate) (struct dasd_device *);
int (*check_attention)(struct dasd_device *, __u8);
int (*host_access_count)(struct dasd_device *);
int (*hosts_print)(struct dasd_device *, struct seq_file *);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
......@@ -487,6 +499,7 @@ struct dasd_device {
unsigned long blk_timeout;
struct dentry *debugfs_dentry;
struct dentry *hosts_dentry;
struct dasd_profile profile;
};
......
......@@ -238,6 +238,23 @@ dasd_format(struct dasd_block *block, struct format_data_t *fdata)
return rc;
}
static int dasd_check_format(struct dasd_block *block,
struct format_check_t *cdata)
{
struct dasd_device *base;
int rc;
base = block->base;
if (!base->discipline->check_device_format)
return -ENOTTY;
rc = base->discipline->check_device_format(base, cdata, 1);
if (rc == -EAGAIN)
rc = base->discipline->check_device_format(base, cdata, 0);
return rc;
}
/*
* Format device.
*/
......@@ -272,6 +289,47 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
}
rc = dasd_format(base->block, &fdata);
dasd_put_device(base);
return rc;
}
/*
* Check device format
*/
static int dasd_ioctl_check_format(struct block_device *bdev, void __user *argp)
{
struct format_check_t cdata;
struct dasd_device *base;
int rc = 0;
if (!argp)
return -EINVAL;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
if (bdev != bdev->bd_contains) {
pr_warn("%s: The specified DASD is a partition and cannot be checked\n",
dev_name(&base->cdev->dev));
rc = -EINVAL;
goto out_err;
}
if (copy_from_user(&cdata, argp, sizeof(cdata))) {
rc = -EFAULT;
goto out_err;
}
rc = dasd_check_format(base->block, &cdata);
if (rc)
goto out_err;
if (copy_to_user(argp, &cdata, sizeof(cdata)))
rc = -EFAULT;
out_err:
dasd_put_device(base);
return rc;
}
......@@ -519,6 +577,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
case BIODASDFMT:
rc = dasd_ioctl_format(bdev, argp);
break;
case BIODASDCHECKFMT:
rc = dasd_ioctl_check_format(bdev, argp);
break;
case BIODASDINFO:
rc = dasd_ioctl_information(block, cmd, argp);
break;
......
......@@ -18,6 +18,8 @@ obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
obj-$(CONFIG_PCI) += sclp_pci.o
obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
obj-$(CONFIG_VMCP) += vmcp.o
......
......@@ -400,7 +400,7 @@ con3270_deactivate(struct raw3270_view *view)
del_timer(&cp->timer);
}
static int
static void
con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Schedule tasklet to read aid. */
......@@ -418,7 +418,6 @@ con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
cp->update_flags = CON_UPDATE_ALL;
con3270_set_timer(cp, 1);
}
return RAW3270_IO_DONE;
}
/* Console view to a 3270 device. */
......
......@@ -217,7 +217,7 @@ fs3270_deactivate(struct raw3270_view *view)
fp->init->callback(fp->init, NULL);
}
static int
static void
fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Set indication and wake waiters for attention. */
......@@ -233,7 +233,6 @@ fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
return RAW3270_IO_DONE;
}
/*
......
......@@ -90,6 +90,8 @@ module_param(tubxcorrect, bool, 0);
*/
DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
static void __raw3270_disconnect(struct raw3270 *rp);
/*
* Encode array for 12 bit 3270 addresses.
*/
......@@ -228,29 +230,6 @@ raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
rq->ccw.flags |= CCW_FLAG_IDA;
}
/*
* Stop running ccw.
*/
static int
__raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
{
int retries;
int rc;
if (raw3270_request_final(rq))
return 0;
/* Check if interrupt has already been processed */
for (retries = 0; retries < 5; retries++) {
if (retries < 2)
rc = ccw_device_halt(rp->cdev, (long) rq);
else
rc = ccw_device_clear(rp->cdev, (long) rq);
if (rc == 0)
break; /* termination successful */
}
return rc;
}
/*
* Add the request to the request queue, try to start it if the
* 3270 device is idle. Return without waiting for end of i/o.
......@@ -342,7 +321,6 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
struct raw3270 *rp;
struct raw3270_view *view;
struct raw3270_request *rq;
int rc;
rp = dev_get_drvdata(&cdev->dev);
if (!rp)
......@@ -350,57 +328,31 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
rq = (struct raw3270_request *) intparm;
view = rq ? rq->view : rp->view;
if (IS_ERR(irb))
rc = RAW3270_IO_RETRY;
else if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
rq->rc = -EIO;
rc = RAW3270_IO_DONE;
} else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
DEV_STAT_UNIT_EXCEP)) {
if (!IS_ERR(irb)) {
/* Handle CE-DE-UE and subsequent UDE */
set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
rc = RAW3270_IO_BUSY;
} else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
/* Wait for UDE if busy flag is set. */
if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END)
clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
/* Got it, now retry. */
rc = RAW3270_IO_RETRY;
} else
rc = RAW3270_IO_BUSY;
} else if (view)
rc = view->fn->intv(view, rq, irb);
else
rc = RAW3270_IO_DONE;
if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END |
DEV_STAT_DEV_END |
DEV_STAT_UNIT_EXCEP))
set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
/* Handle disconnected devices */
if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
(irb->ecw[0] & SNS0_INTERVENTION_REQ)) {
set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
if (rp->state > RAW3270_STATE_RESET)
__raw3270_disconnect(rp);
}
/* Call interrupt handler of the view */
if (view)
view->fn->intv(view, rq, irb);
}
switch (rc) {
case RAW3270_IO_DONE:
break;
case RAW3270_IO_BUSY:
/*
* Intervention required by the operator. We have to wait
* for unsolicited device end.
*/
if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags))
/* Device busy, do not start I/O */
return;
case RAW3270_IO_RETRY:
if (!rq)
break;
rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
(unsigned long) rq, 0, 0);
if (rq->rc == 0)
return; /* Successfully restarted. */
break;
case RAW3270_IO_STOP:
if (!rq)
break;
__raw3270_halt_io(rp, rq);
rq->rc = -EIO;
break;
default:
BUG();
}
if (rq) {
BUG_ON(list_empty(&rq->list));
if (rq && !list_empty(&rq->list)) {
/* The request completed, remove from queue and do callback. */
list_del_init(&rq->list);
if (rq->callback)
......@@ -408,6 +360,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
/* Do put_device for get_device in raw3270_start. */
raw3270_put_view(view);
}
/*
* Try to start each request on request queue until one is
* started successful.
......@@ -685,23 +638,34 @@ raw3270_reset(struct raw3270_view *view)
return rc;
}
static int
static void
__raw3270_disconnect(struct raw3270 *rp)
{
struct raw3270_request *rq;
struct raw3270_view *view;
rp->state = RAW3270_STATE_INIT;
rp->view = &rp->init_view;
/* Cancel all queued requests */
while (!list_empty(&rp->req_queue)) {
rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
view = rq->view;
rq->rc = -EACCES;
list_del_init(&rq->list);
if (rq->callback)
rq->callback(rq, rq->callback_data);
raw3270_put_view(view);
}
/* Start from scratch */
__raw3270_reset_device(rp);
}
static void
raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
struct irb *irb)
{
struct raw3270 *rp;
/*
* Unit-Check Processing:
* Expect Command Reject or Intervention Required.
*/
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
/* Request finished abnormally. */
if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
return RAW3270_IO_BUSY;
}
}
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
if (irb->ecw[0] & SNS0_CMD_REJECT)
......@@ -715,7 +679,6 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
rp = view->dev;
raw3270_read_modified(rp);
}
return RAW3270_IO_DONE;
}
static struct raw3270_fn raw3270_init_fn = {
......
......@@ -125,19 +125,13 @@ raw3270_request_final(struct raw3270_request *rq)
void raw3270_buffer_address(struct raw3270 *, char *, unsigned short);
/* Return value of *intv (see raw3270_fn below) can be one of the following: */
#define RAW3270_IO_DONE 0 /* request finished */
#define RAW3270_IO_BUSY 1 /* request still active */
#define RAW3270_IO_RETRY 2 /* retry current request */
#define RAW3270_IO_STOP 3 /* kill current request */
/*
* Functions of a 3270 view.
*/
struct raw3270_fn {
int (*activate)(struct raw3270_view *);
void (*deactivate)(struct raw3270_view *);
int (*intv)(struct raw3270_view *,
void (*intv)(struct raw3270_view *,
struct raw3270_request *, struct irb *);
void (*release)(struct raw3270_view *);
void (*free)(struct raw3270_view *);
......
......@@ -17,33 +17,35 @@
#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
#define SCLP_CONSOLE_PAGES 6
#define SCLP_EVTYP_MASK(T) (1U << (32 - (T)))
#define EVTYP_OPCMD 0x01
#define EVTYP_MSG 0x02
#define EVTYP_CONFMGMDATA 0x04
#define EVTYP_DIAG_TEST 0x07
#define EVTYP_STATECHANGE 0x08
#define EVTYP_PMSGCMD 0x09
#define EVTYP_CNTLPROGOPCMD 0x20
#define EVTYP_CNTLPROGIDENT 0x0B
#define EVTYP_SIGQUIESCE 0x1D
#define EVTYP_ASYNC 0x0A
#define EVTYP_CTLPROGIDENT 0x0B
#define EVTYP_ERRNOTIFY 0x18
#define EVTYP_VT220MSG 0x1A
#define EVTYP_CONFMGMDATA 0x04
#define EVTYP_SDIAS 0x1C
#define EVTYP_ASYNC 0x0A
#define EVTYP_SIGQUIESCE 0x1D
#define EVTYP_OCF 0x1E
#define EVTYP_OPCMD_MASK 0x80000000
#define EVTYP_MSG_MASK 0x40000000
#define EVTYP_DIAG_TEST_MASK 0x02000000
#define EVTYP_STATECHANGE_MASK 0x01000000
#define EVTYP_PMSGCMD_MASK 0x00800000
#define EVTYP_CTLPROGOPCMD_MASK 0x00000001
#define EVTYP_CTLPROGIDENT_MASK 0x00200000
#define EVTYP_SIGQUIESCE_MASK 0x00000008
#define EVTYP_VT220MSG_MASK 0x00000040
#define EVTYP_CONFMGMDATA_MASK 0x10000000
#define EVTYP_SDIAS_MASK 0x00000010
#define EVTYP_ASYNC_MASK 0x00400000
#define EVTYP_OCF_MASK 0x00000004
#define EVTYP_OPCMD_MASK SCLP_EVTYP_MASK(EVTYP_OPCMD)
#define EVTYP_MSG_MASK SCLP_EVTYP_MASK(EVTYP_MSG)
#define EVTYP_CONFMGMDATA_MASK SCLP_EVTYP_MASK(EVTYP_CONFMGMDATA)
#define EVTYP_DIAG_TEST_MASK SCLP_EVTYP_MASK(EVTYP_DIAG_TEST)
#define EVTYP_STATECHANGE_MASK SCLP_EVTYP_MASK(EVTYP_STATECHANGE)
#define EVTYP_PMSGCMD_MASK SCLP_EVTYP_MASK(EVTYP_PMSGCMD)
#define EVTYP_ASYNC_MASK SCLP_EVTYP_MASK(EVTYP_ASYNC)
#define EVTYP_CTLPROGIDENT_MASK SCLP_EVTYP_MASK(EVTYP_CTLPROGIDENT)
#define EVTYP_ERRNOTIFY_MASK SCLP_EVTYP_MASK(EVTYP_ERRNOTIFY)
#define EVTYP_VT220MSG_MASK SCLP_EVTYP_MASK(EVTYP_VT220MSG)
#define EVTYP_SDIAS_MASK SCLP_EVTYP_MASK(EVTYP_SDIAS)
#define EVTYP_SIGQUIESCE_MASK SCLP_EVTYP_MASK(EVTYP_SIGQUIESCE)
#define EVTYP_OCF_MASK SCLP_EVTYP_MASK(EVTYP_OCF)
#define GNRLMSGFLGS_DOM 0x8000
#define GNRLMSGFLGS_SNDALRM 0x4000
......
......@@ -575,67 +575,6 @@ __initcall(sclp_detect_standby_memory);
#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* PCI I/O adapter configuration related functions.
*/
#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
#define SCLP_RECONFIG_PCI_ATPYE 2
struct pci_cfg_sccb {
struct sccb_header header;
u8 atype; /* adapter type */
u8 reserved1;
u16 reserved2;
u32 aid; /* adapter identifier */
} __packed;
static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
{
struct pci_cfg_sccb *sccb;
int rc;
if (!SCLP_HAS_PCI_RECONFIG)
return -EOPNOTSUPP;
sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->atype = SCLP_RECONFIG_PCI_ATPYE;
sccb->aid = fid;
rc = sclp_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
cmd, sccb->header.response_code);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
int sclp_pci_configure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_configure);
int sclp_pci_deconfigure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_deconfigure);
/*
* Channel path configuration related functions.
*/
......
......@@ -93,7 +93,7 @@ static struct sclp_req *cpi_prepare_req(void)
/* setup SCCB for Control-Program Identification */
sccb->header.length = sizeof(struct cpi_sccb);
sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
sccb->cpi_evbuf.header.type = 0x0b;
sccb->cpi_evbuf.header.type = EVTYP_CTLPROGIDENT;
evb = &sccb->cpi_evbuf;
/* set system type */
......
/*
* PCI I/O adapter configuration related functions.
*
* Copyright IBM Corp. 2016
*/
#define KMSG_COMPONENT "sclp_cmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/completion.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
#include <asm/sclp.h>
#include "sclp.h"
#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
#define SCLP_ATYPE_PCI 2
#define SCLP_ERRNOTIFY_AQ_REPAIR 1
#define SCLP_ERRNOTIFY_AQ_INFO_LOG 2
static DEFINE_MUTEX(sclp_pci_mutex);
static struct sclp_register sclp_pci_event = {
.send_mask = EVTYP_ERRNOTIFY_MASK,
};
struct err_notify_evbuf {
struct evbuf_header header;
u8 action;
u8 atype;
u32 fh;
u32 fid;
u8 data[0];
} __packed;
struct err_notify_sccb {
struct sccb_header header;
struct err_notify_evbuf evbuf;
} __packed;
struct pci_cfg_sccb {
struct sccb_header header;
u8 atype; /* adapter type */
u8 reserved1;
u16 reserved2;
u32 aid; /* adapter identifier */
} __packed;
static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
{
struct pci_cfg_sccb *sccb;
int rc;
if (!SCLP_HAS_PCI_RECONFIG)
return -EOPNOTSUPP;
sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->atype = SCLP_ATYPE_PCI;
sccb->aid = fid;
rc = sclp_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
cmd, sccb->header.response_code);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
int sclp_pci_configure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_configure);
int sclp_pci_deconfigure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_deconfigure);
static void sclp_pci_callback(struct sclp_req *req, void *data)
{
struct completion *completion = data;
complete(completion);
}
static int sclp_pci_check_report(struct zpci_report_error_header *report)
{
if (report->version != 1)
return -EINVAL;
if (report->action != SCLP_ERRNOTIFY_AQ_REPAIR &&
report->action != SCLP_ERRNOTIFY_AQ_INFO_LOG)
return -EINVAL;
if (report->length > (PAGE_SIZE - sizeof(struct err_notify_sccb)))
return -EINVAL;
return 0;
}
int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct err_notify_sccb *sccb;
struct sclp_req req;
int ret;
ret = sclp_pci_check_report(report);
if (ret)
return ret;
mutex_lock(&sclp_pci_mutex);
ret = sclp_register(&sclp_pci_event);
if (ret)
goto out_unlock;
if (!(sclp_pci_event.sclp_receive_mask & EVTYP_ERRNOTIFY_MASK)) {
ret = -EOPNOTSUPP;
goto out_unregister;
}
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb) {
ret = -ENOMEM;
goto out_unregister;
}
memset(&req, 0, sizeof(req));
req.callback_data = &completion;
req.callback = sclp_pci_callback;
req.command = SCLP_CMDW_WRITE_EVENT_DATA;
req.status = SCLP_REQ_FILLED;
req.sccb = sccb;
sccb->evbuf.header.length = sizeof(sccb->evbuf) + report->length;
sccb->evbuf.header.type = EVTYP_ERRNOTIFY;
sccb->header.length = sizeof(sccb->header) + sccb->evbuf.header.length;
sccb->evbuf.action = report->action;
sccb->evbuf.atype = SCLP_ATYPE_PCI;
sccb->evbuf.fh = fh;
sccb->evbuf.fid = fid;
memcpy(sccb->evbuf.data, report->data, report->length);
ret = sclp_add_request(&req);
if (ret)
goto out_free_req;
wait_for_completion(&completion);
if (req.status != SCLP_REQ_DONE) {
pr_warn("request failed (status=0x%02x)\n",
req.status);
ret = -EIO;
goto out_free_req;
}
if (sccb->header.response_code != 0x0020) {
pr_warn("request failed with response code 0x%x\n",
sccb->header.response_code);
ret = -EIO;
}
out_free_req:
free_page((unsigned long) sccb);
out_unregister:
sclp_unregister(&sclp_pci_event);
out_unlock:
mutex_unlock(&sclp_pci_mutex);
return ret;
}
......@@ -92,6 +92,7 @@ struct tty3270 {
unsigned char inattr; /* Visible/invisible input. */
int throttle, attn; /* tty throttle/unthrottle. */
struct tasklet_struct readlet; /* Tasklet to issue read request. */
struct tasklet_struct hanglet; /* Tasklet to hang up the tty. */
struct kbd_data *kbd; /* key_maps stuff. */
/* Escape sequence parsing. */
......@@ -318,6 +319,27 @@ tty3270_blank_line(struct tty3270 *tp)
tp->nr_up++;
}
/*
* Create a blank screen and remove all lines from the history.
*/
static void
tty3270_blank_screen(struct tty3270 *tp)
{
struct string *s, *n;
int i;
for (i = 0; i < tp->view.rows - 2; i++)
tp->screen[i].len = 0;
tp->nr_up = 0;
list_for_each_entry_safe(s, n, &tp->lines, list) {
list_del(&s->list);
if (!list_empty(&s->update))
list_del(&s->update);
tp->nr_lines--;
free_string(&tp->freemem, s);
}
}
/*
* Write request completion callback.
*/
......@@ -405,7 +427,10 @@ tty3270_update(struct tty3270 *tp)
if (raw3270_request_add_data(wrq, str, len) != 0)
break;
list_del_init(&s->update);
if (s->string[s->len - 4] == TO_RA)
sba = s->string + s->len - 3;
else
sba = invalid_sba;
}
if (list_empty(&tp->update))
updated |= TTY_UPDATE_LIST;
......@@ -621,6 +646,16 @@ tty3270_issue_read(struct tty3270 *tp, int lock)
}
}
/*
* Hang up the tty
*/
static void
tty3270_hangup_tasklet(struct tty3270 *tp)
{
tty_port_tty_hangup(&tp->port, true);
raw3270_put_view(&tp->view);
}
/*
* Switch to the tty view.
*/
......@@ -642,7 +677,7 @@ tty3270_deactivate(struct raw3270_view *view)
del_timer(&tp->timer);
}
static int
static void
tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Schedule tasklet to read aid. */
......@@ -654,17 +689,19 @@ tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
}
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
rq->rc = -EIO;
else
raw3270_get_view(&tp->view);
tasklet_schedule(&tp->hanglet);
} else {
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
} else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
/* Interrupt without an outstanding request -> update all */
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
}
return RAW3270_IO_DONE;
}
/*
......@@ -716,6 +753,9 @@ tty3270_alloc_view(void)
tasklet_init(&tp->readlet,
(void (*)(unsigned long)) tty3270_read_tasklet,
(unsigned long) tp->read);
tasklet_init(&tp->hanglet,
(void (*)(unsigned long)) tty3270_hangup_tasklet,
(unsigned long) tp);
INIT_WORK(&tp->resize_work, tty3270_resize_work);
return tp;
......@@ -814,6 +854,7 @@ static void tty3270_resize_work(struct work_struct *work)
return;
/* Switch to new output size */
spin_lock_bh(&tp->view.lock);
tty3270_blank_screen(tp);
oscreen = tp->screen;
orows = tp->view.rows;
tp->view.model = tp->n_model;
......@@ -824,7 +865,6 @@ static void tty3270_resize_work(struct work_struct *work)
free_string(&tp->freemem, tp->status);
tty3270_create_prompt(tp);
tty3270_create_status(tp);
tp->nr_up = 0;
while (tp->nr_lines < tp->view.rows - 2)
tty3270_blank_line(tp);
tp->update_flags = TTY_UPDATE_ALL;
......@@ -838,6 +878,7 @@ static void tty3270_resize_work(struct work_struct *work)
ws.ws_row = tp->view.rows - 2;
ws.ws_col = tp->view.cols;
tty_do_resize(tty, &ws);
tty_kref_put(tty);
}
static void
......@@ -845,6 +886,8 @@ tty3270_resize(struct raw3270_view *view, int model, int rows, int cols)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
if (tp->n_model == model && tp->n_rows == rows && tp->n_cols == cols)
return;
tp->n_model = model;
tp->n_rows = rows;
tp->n_cols = cols;
......@@ -923,10 +966,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
tp->port.low_latency = 0;
/* why to reassign? */
tty_port_tty_set(&tp->port, tty);
tp->inattr = TF_INPUT;
return tty_port_install(&tp->port, driver, tty);
goto port_install;
}
if (tty3270_max_index < tty->index + 1)
tty3270_max_index = tty->index + 1;
......@@ -952,7 +993,6 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
return rc;
}
tty_port_tty_set(&tp->port, tty);
tp->port.low_latency = 0;
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
......@@ -974,6 +1014,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
raw3270_activate_view(&tp->view);
port_install:
rc = tty_port_install(&tp->port, driver, tty);
if (rc) {
raw3270_put_view(&tp->view);
......@@ -1010,18 +1051,18 @@ tty3270_close(struct tty_struct *tty, struct file * filp)
if (tty->count > 1)
return;
if (tp) {
tty->driver_data = NULL;
if (tp)
tty_port_tty_set(&tp->port, NULL);
}
}
static void tty3270_cleanup(struct tty_struct *tty)
{
struct tty3270 *tp = tty->driver_data;
if (tp)
if (tp) {
tty->driver_data = NULL;
raw3270_put_view(&tp->view);
}
}
/*
......@@ -1788,7 +1829,22 @@ tty3270_unthrottle(struct tty_struct * tty)
static void
tty3270_hangup(struct tty_struct *tty)
{
// FIXME: implement
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return;
spin_lock_bh(&tp->view.lock);
tp->cx = tp->saved_cx = 0;
tp->cy = tp->saved_cy = 0;
tp->highlight = tp->saved_highlight = TAX_RESET;
tp->f_color = tp->saved_f_color = TAC_RESET;
tty3270_blank_screen(tp);
while (tp->nr_lines < tp->view.rows - 2)
tty3270_blank_line(tp);
tp->update_flags = TTY_UPDATE_ALL;
spin_unlock_bh(&tp->view.lock);
tty3270_set_timer(tp, 1);
}
static void
......
......@@ -787,7 +787,7 @@ static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev)
/*
* AP state machine jump table
*/
ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_STATE_RESET_START] = {
[AP_EVENT_POLL] = ap_sm_reset,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment