Commit f61a657f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:
 "The s390 patches for the 4.7 merge window have the usual bug fixes and
  cleanups, and the following new features:

   - An interface for dasd driver to query if a volume is online to
     another operating system

   - A new ioctl for the dasd driver to verify the format for a range of
     tracks

   - Following the example of x86 the struct fpu is now allocated with
     the task_struct

   - The 'report_error' interface for the PCI bus to send an
     adapter-error notification from user space to the service element
     of the machine"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (29 commits)
  s390/vmem: remove unused function parameter
  s390/vmem: fix identity mapping
  s390: add missing include statements
  s390: add missing declarations
  s390: make couple of variables and functions static
  s390/cache: remove superfluous locking
  s390/cpuinfo: simplify locking and skip offline cpus early
  s390/3270: hangup the 3270 tty after a disconnect
  s390/3270: handle reconnect of a tty with a different size
  s390/3270: avoid endless I/O loop with disconnected 3270 terminals
  s390/3270: fix garbled output on 3270 tty view
  s390/3270: fix view reference counting
  s390/3270: add missing tty_kref_put
  s390/dumpstack: implement and use return_address()
  s390/cpum_sf: Remove superfluous SMP function call
  s390/cpum_cf: Remove superfluous SMP function call
  s390/Kconfig: make z196 the default processor type
  s390/sclp: avoid compile warning in sclp_pci_report
  s390/fpu: allocate 'struct fpu' with the task_struct
  s390/crypto: cleanup and move the header with the cpacf definitions
  ...
parents 0e034f5c c53db522
......@@ -2558,6 +2558,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nohugeiomap [KNL,x86] Disable kernel huge I/O mappings.
nosmt [KNL,S390] Disable symmetric multithreading (SMT).
Equivalent to smt=1.
noxsave [BUGS=X86] Disables x86 extended register state save
and restore using xsave. The kernel will fallback to
enabling legacy floating-point and sse state.
......@@ -3753,6 +3756,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1: Fast pin select (default)
2: ATC IRMode
smt [KNL,S390] Set the maximum number of threads (logical
CPUs) to use per physical CPU on systems capable of
symmetric multithreading (SMT). Will be capped to the
actual hardware limit.
Format: <integer>
Default: -1 (no limit)
softlockup_panic=
[KNL] Should the soft-lockup detector generate panics.
Format: <integer>
......
......@@ -107,6 +107,7 @@ config S390
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANTS_PROT_NUMA_PROT_NONE
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
......@@ -210,7 +211,7 @@ config HAVE_MARCH_Z13_FEATURES
choice
prompt "Processor type"
default MARCH_Z900
default MARCH_Z196
config MARCH_Z900
bool "IBM zSeries model z800 and z900"
......
......@@ -28,7 +28,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <crypto/xts.h>
#include "crypt_s390.h"
#include <asm/cpacf.h>
#define AES_KEYLEN_128 1
#define AES_KEYLEN_192 2
......@@ -145,15 +145,15 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
switch (sctx->key_len) {
case 16:
crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
cpacf_km(CPACF_KM_AES_128_ENC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 24:
crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
cpacf_km(CPACF_KM_AES_192_ENC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 32:
crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
cpacf_km(CPACF_KM_AES_256_ENC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
}
......@@ -170,15 +170,15 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
switch (sctx->key_len) {
case 16:
crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
cpacf_km(CPACF_KM_AES_128_DEC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 24:
crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
cpacf_km(CPACF_KM_AES_192_DEC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 32:
crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
cpacf_km(CPACF_KM_AES_256_DEC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
}
......@@ -212,7 +212,7 @@ static void fallback_exit_cip(struct crypto_tfm *tfm)
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
......@@ -298,16 +298,16 @@ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
switch (key_len) {
case 16:
sctx->enc = KM_AES_128_ENCRYPT;
sctx->dec = KM_AES_128_DECRYPT;
sctx->enc = CPACF_KM_AES_128_ENC;
sctx->dec = CPACF_KM_AES_128_DEC;
break;
case 24:
sctx->enc = KM_AES_192_ENCRYPT;
sctx->dec = KM_AES_192_DECRYPT;
sctx->enc = CPACF_KM_AES_192_ENC;
sctx->dec = CPACF_KM_AES_192_DEC;
break;
case 32:
sctx->enc = KM_AES_256_ENCRYPT;
sctx->dec = KM_AES_256_DECRYPT;
sctx->enc = CPACF_KM_AES_256_ENC;
sctx->dec = CPACF_KM_AES_256_DEC;
break;
}
......@@ -326,7 +326,7 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n);
ret = cpacf_km(func, param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
......@@ -393,7 +393,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
static struct crypto_alg ecb_aes_alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + ecb */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
......@@ -427,16 +427,16 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
switch (key_len) {
case 16:
sctx->enc = KMC_AES_128_ENCRYPT;
sctx->dec = KMC_AES_128_DECRYPT;
sctx->enc = CPACF_KMC_AES_128_ENC;
sctx->dec = CPACF_KMC_AES_128_DEC;
break;
case 24:
sctx->enc = KMC_AES_192_ENCRYPT;
sctx->dec = KMC_AES_192_DECRYPT;
sctx->enc = CPACF_KMC_AES_192_ENC;
sctx->dec = CPACF_KMC_AES_192_DEC;
break;
case 32:
sctx->enc = KMC_AES_256_ENCRYPT;
sctx->dec = KMC_AES_256_DECRYPT;
sctx->enc = CPACF_KMC_AES_256_ENC;
sctx->dec = CPACF_KMC_AES_256_DEC;
break;
}
......@@ -465,7 +465,7 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, &param, out, in, n);
ret = cpacf_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
......@@ -509,7 +509,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg cbc_aes_alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + cbc */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
......@@ -596,8 +596,8 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
switch (key_len) {
case 32:
xts_ctx->enc = KM_XTS_128_ENCRYPT;
xts_ctx->dec = KM_XTS_128_DECRYPT;
xts_ctx->enc = CPACF_KM_XTS_128_ENC;
xts_ctx->dec = CPACF_KM_XTS_128_DEC;
memcpy(xts_ctx->key + 16, in_key, 16);
memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
break;
......@@ -607,8 +607,8 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
xts_fallback_setkey(tfm, in_key, key_len);
break;
case 64:
xts_ctx->enc = KM_XTS_256_ENCRYPT;
xts_ctx->dec = KM_XTS_256_DECRYPT;
xts_ctx->enc = CPACF_KM_XTS_256_ENC;
xts_ctx->dec = CPACF_KM_XTS_256_DEC;
memcpy(xts_ctx->key, in_key, 32);
memcpy(xts_ctx->pcc_key, in_key + 32, 32);
break;
......@@ -643,7 +643,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
/* remove decipher modifier bit from 'func' and call PCC */
ret = cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
if (ret < 0)
return -EIO;
......@@ -655,7 +656,7 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
ret = cpacf_km(func, &xts_param.key[offset], out, in, n);
if (ret < 0 || ret != n)
return -EIO;
......@@ -721,7 +722,7 @@ static void xts_fallback_exit(struct crypto_tfm *tfm)
static struct crypto_alg xts_aes_alg = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + xts */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
......@@ -751,16 +752,16 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
switch (key_len) {
case 16:
sctx->enc = KMCTR_AES_128_ENCRYPT;
sctx->dec = KMCTR_AES_128_DECRYPT;
sctx->enc = CPACF_KMCTR_AES_128_ENC;
sctx->dec = CPACF_KMCTR_AES_128_DEC;
break;
case 24:
sctx->enc = KMCTR_AES_192_ENCRYPT;
sctx->dec = KMCTR_AES_192_DECRYPT;
sctx->enc = CPACF_KMCTR_AES_192_ENC;
sctx->dec = CPACF_KMCTR_AES_192_DEC;
break;
case 32:
sctx->enc = KMCTR_AES_256_ENCRYPT;
sctx->dec = KMCTR_AES_256_DECRYPT;
sctx->enc = CPACF_KMCTR_AES_256_ENC;
sctx->dec = CPACF_KMCTR_AES_256_DEC;
break;
}
......@@ -804,8 +805,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
n = __ctrblk_init(ctrptr, nbytes);
else
n = AES_BLOCK_SIZE;
ret = crypt_s390_kmctr(func, sctx->key, out, in,
n, ctrptr);
ret = cpacf_kmctr(func, sctx->key, out, in, n, ctrptr);
if (ret < 0 || ret != n) {
if (ctrptr == ctrblk)
spin_unlock(&ctrblk_lock);
......@@ -837,7 +837,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
if (nbytes) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, sctx->key, buf, in,
ret = cpacf_kmctr(func, sctx->key, buf, in,
AES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != AES_BLOCK_SIZE)
return -EIO;
......@@ -875,7 +875,7 @@ static int ctr_aes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg ctr_aes_alg = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + ctr */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
......@@ -899,11 +899,11 @@ static int __init aes_s390_init(void)
{
int ret;
if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
if (cpacf_query(CPACF_KM, CPACF_KM_AES_128_ENC))
keylen_flag |= AES_KEYLEN_128;
if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
if (cpacf_query(CPACF_KM, CPACF_KM_AES_192_ENC))
keylen_flag |= AES_KEYLEN_192;
if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
if (cpacf_query(CPACF_KM, CPACF_KM_AES_256_ENC))
keylen_flag |= AES_KEYLEN_256;
if (!keylen_flag)
......@@ -926,22 +926,17 @@ static int __init aes_s390_init(void)
if (ret)
goto cbc_aes_err;
if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
crypt_s390_func_available(KM_XTS_256_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128_ENC) &&
cpacf_query(CPACF_KM, CPACF_KM_XTS_256_ENC)) {
ret = crypto_register_alg(&xts_aes_alg);
if (ret)
goto xts_aes_err;
xts_aes_alg_reg = 1;
}
if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128_ENC) &&
cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192_ENC) &&
cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256_ENC)) {
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
ret = -ENOMEM;
......
This diff is collapsed.
......@@ -20,8 +20,7 @@
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include "crypt_s390.h"
#include <asm/cpacf.h>
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
......@@ -54,20 +53,20 @@ static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_DEA_ENCRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
cpacf_km(CPACF_KM_DEA_ENC, ctx->key, out, in, DES_BLOCK_SIZE);
}
static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_DEA_DECRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
cpacf_km(CPACF_KM_DEA_DEC, ctx->key, out, in, DES_BLOCK_SIZE);
}
static struct crypto_alg des_alg = {
.cra_name = "des",
.cra_driver_name = "des-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -95,7 +94,7 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, key, out, in, n);
ret = cpacf_km(func, key, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
......@@ -128,7 +127,7 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, &param, out, in, n);
ret = cpacf_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
......@@ -149,7 +148,7 @@ static int ecb_des_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, ctx->key, &walk);
return ecb_desall_crypt(desc, CPACF_KM_DEA_ENC, ctx->key, &walk);
}
static int ecb_des_decrypt(struct blkcipher_desc *desc,
......@@ -160,13 +159,13 @@ static int ecb_des_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_DEA_DECRYPT, ctx->key, &walk);
return ecb_desall_crypt(desc, CPACF_KM_DEA_DEC, ctx->key, &walk);
}
static struct crypto_alg ecb_des_alg = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des + ecb */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -190,7 +189,7 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
return cbc_desall_crypt(desc, CPACF_KMC_DEA_ENC, &walk);
}
static int cbc_des_decrypt(struct blkcipher_desc *desc,
......@@ -200,13 +199,13 @@ static int cbc_des_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
return cbc_desall_crypt(desc, CPACF_KMC_DEA_DEC, &walk);
}
static struct crypto_alg cbc_des_alg = {
.cra_name = "cbc(des)",
.cra_driver_name = "cbc-des-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des + cbc */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -258,20 +257,20 @@ static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_TDEA_192_ENCRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
cpacf_km(CPACF_KM_TDEA_192_ENC, ctx->key, dst, src, DES_BLOCK_SIZE);
}
static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_TDEA_192_DECRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
cpacf_km(CPACF_KM_TDEA_192_DEC, ctx->key, dst, src, DES_BLOCK_SIZE);
}
static struct crypto_alg des3_alg = {
.cra_name = "des3_ede",
.cra_driver_name = "des3_ede-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -295,7 +294,7 @@ static int ecb_des3_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, ctx->key, &walk);
return ecb_desall_crypt(desc, CPACF_KM_TDEA_192_ENC, ctx->key, &walk);
}
static int ecb_des3_decrypt(struct blkcipher_desc *desc,
......@@ -306,13 +305,13 @@ static int ecb_des3_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, ctx->key, &walk);
return ecb_desall_crypt(desc, CPACF_KM_TDEA_192_DEC, ctx->key, &walk);
}
static struct crypto_alg ecb_des3_alg = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3_ede-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des3 + ecb */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -336,7 +335,7 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192_ENC, &walk);
}
static int cbc_des3_decrypt(struct blkcipher_desc *desc,
......@@ -346,13 +345,13 @@ static int cbc_des3_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192_DEC, &walk);
}
static struct crypto_alg cbc_des3_alg = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3_ede-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des3 + cbc */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -407,8 +406,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
n = __ctrblk_init(ctrptr, nbytes);
else
n = DES_BLOCK_SIZE;
ret = crypt_s390_kmctr(func, ctx->key, out, in,
n, ctrptr);
ret = cpacf_kmctr(func, ctx->key, out, in, n, ctrptr);
if (ret < 0 || ret != n) {
if (ctrptr == ctrblk)
spin_unlock(&ctrblk_lock);
......@@ -438,7 +436,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
if (nbytes) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, ctx->key, buf, in,
ret = cpacf_kmctr(func, ctx->key, buf, in,
DES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != DES_BLOCK_SIZE)
return -EIO;
......@@ -458,7 +456,7 @@ static int ctr_des_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_desall_crypt(desc, KMCTR_DEA_ENCRYPT, ctx, &walk);
return ctr_desall_crypt(desc, CPACF_KMCTR_DEA_ENC, ctx, &walk);
}
static int ctr_des_decrypt(struct blkcipher_desc *desc,
......@@ -469,13 +467,13 @@ static int ctr_des_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_desall_crypt(desc, KMCTR_DEA_DECRYPT, ctx, &walk);
return ctr_desall_crypt(desc, CPACF_KMCTR_DEA_DEC, ctx, &walk);
}
static struct crypto_alg ctr_des_alg = {
.cra_name = "ctr(des)",
.cra_driver_name = "ctr-des-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des + ctr */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -501,7 +499,7 @@ static int ctr_des3_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_desall_crypt(desc, KMCTR_TDEA_192_ENCRYPT, ctx, &walk);
return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192_ENC, ctx, &walk);
}
static int ctr_des3_decrypt(struct blkcipher_desc *desc,
......@@ -512,13 +510,13 @@ static int ctr_des3_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_desall_crypt(desc, KMCTR_TDEA_192_DECRYPT, ctx, &walk);
return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192_DEC, ctx, &walk);
}
static struct crypto_alg ctr_des3_alg = {
.cra_name = "ctr(des3_ede)",
.cra_driver_name = "ctr-des3_ede-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des3 + ede */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -540,8 +538,8 @@ static int __init des_s390_init(void)
{
int ret;
if (!crypt_s390_func_available(KM_DEA_ENCRYPT, CRYPT_S390_MSA) ||
!crypt_s390_func_available(KM_TDEA_192_ENCRYPT, CRYPT_S390_MSA))
if (!cpacf_query(CPACF_KM, CPACF_KM_DEA_ENC) ||
!cpacf_query(CPACF_KM, CPACF_KM_TDEA_192_ENC))
return -EOPNOTSUPP;
ret = crypto_register_alg(&des_alg);
......@@ -563,10 +561,8 @@ static int __init des_s390_init(void)
if (ret)
goto cbc_des3_err;
if (crypt_s390_func_available(KMCTR_DEA_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
crypt_s390_func_available(KMCTR_TDEA_192_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_DEA_ENC) &&
cpacf_query(CPACF_KMCTR, CPACF_KMCTR_TDEA_192_ENC)) {
ret = crypto_register_alg(&ctr_des_alg);
if (ret)
goto ctr_des_err;
......
......@@ -10,8 +10,7 @@
#include <crypto/internal/hash.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include "crypt_s390.h"
#include <asm/cpacf.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
......@@ -72,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
src += n;
if (!dctx->bytes) {
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf,
GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
......@@ -81,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) {
ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n);
if (ret != n)
return -EIO;
src += n;
......@@ -106,7 +105,7 @@ static int ghash_flush(struct ghash_desc_ctx *dctx)
memset(pos, 0, dctx->bytes);
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
......@@ -137,7 +136,7 @@ static struct shash_alg ghash_alg = {
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
......@@ -147,8 +146,7 @@ static struct shash_alg ghash_alg = {
static int __init ghash_mod_init(void)
{
if (!crypt_s390_func_available(KIMD_GHASH,
CRYPT_S390_MSA | CRYPT_S390_MSA4))
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_GHASH))
return -EOPNOTSUPP;
return crypto_register_shash(&ghash_alg);
......
......@@ -23,8 +23,7 @@
#include <asm/debug.h>
#include <asm/uaccess.h>
#include <asm/timex.h>
#include "crypt_s390.h"
#include <asm/cpacf.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
......@@ -136,7 +135,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
else
h = ebuf;
/* generate sha256 from this page */
if (crypt_s390_kimd(KIMD_SHA_256, h,
if (cpacf_kimd(CPACF_KIMD_SHA_256, h,
pg, PAGE_SIZE) != PAGE_SIZE) {
prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
ret = -EIO;
......@@ -164,7 +163,7 @@ static void prng_tdes_add_entropy(void)
int ret;
for (i = 0; i < 16; i++) {
ret = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
ret = cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
(char *)entropy, (char *)entropy,
sizeof(entropy));
BUG_ON(ret < 0 || ret != sizeof(entropy));
......@@ -311,8 +310,7 @@ static int __init prng_sha512_selftest(void)
memset(&ws, 0, sizeof(ws));
/* initial seed */
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
&ws, NULL, 0,
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED, &ws, NULL, 0,
seed, sizeof(seed));
if (ret < 0) {
pr_err("The prng self test seed operation for the "
......@@ -331,18 +329,16 @@ static int __init prng_sha512_selftest(void)
}
/* generate random bytes */
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf),
NULL, 0);
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf), NULL, 0);
if (ret < 0) {
pr_err("The prng self test generate operation for "
"the SHA-512 mode failed with rc=%d\n", ret);
prng_errorflag = PRNG_SELFTEST_FAILED;
return -EIO;
}
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf),
NULL, 0);
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf), NULL, 0);
if (ret < 0) {
pr_err("The prng self test generate operation for "
"the SHA-512 mode failed with rc=%d\n", ret);
......@@ -396,9 +392,8 @@ static int __init prng_sha512_instantiate(void)
get_tod_clock_ext(seed + 48);
/* initial seed of the ppno drng */
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
&prng_data->ppnows, NULL, 0,
seed, sizeof(seed));
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
&prng_data->ppnows, NULL, 0, seed, sizeof(seed));
if (ret < 0) {
prng_errorflag = PRNG_SEED_FAILED;
ret = -EIO;
......@@ -409,11 +404,9 @@ static int __init prng_sha512_instantiate(void)
bytes for the FIPS 140-2 Conditional Self Test */
if (fips_enabled) {
prng_data->prev = prng_data->buf + prng_chunk_size;
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&prng_data->ppnows,
prng_data->prev,
prng_chunk_size,
NULL, 0);
prng_data->prev, prng_chunk_size, NULL, 0);
if (ret < 0 || ret != prng_chunk_size) {
prng_errorflag = PRNG_GEN_FAILED;
ret = -EIO;
......@@ -447,9 +440,8 @@ static int prng_sha512_reseed(void)
return ret;
/* do a reseed of the ppno drng with this bytestring */
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
&prng_data->ppnows, NULL, 0,
seed, sizeof(seed));
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
&prng_data->ppnows, NULL, 0, seed, sizeof(seed));
if (ret) {
prng_errorflag = PRNG_RESEED_FAILED;
return -EIO;
......@@ -471,9 +463,8 @@ static int prng_sha512_generate(u8 *buf, size_t nbytes)
}
/* PPNO generate */
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
&prng_data->ppnows, buf, nbytes,
NULL, 0);
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&prng_data->ppnows, buf, nbytes, NULL, 0);
if (ret < 0 || ret != nbytes) {
prng_errorflag = PRNG_GEN_FAILED;
return -EIO;
......@@ -555,7 +546,7 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
* Note: you can still get strict X9.17 conformity by setting
* prng_chunk_size to 8 bytes.
*/
tmp = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
tmp = cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
prng_data->buf, prng_data->buf, n);
if (tmp < 0 || tmp != n) {
ret = -EIO;
......@@ -815,14 +806,13 @@ static int __init prng_init(void)
int ret;
/* check if the CPU has a PRNG */
if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA))
if (!cpacf_query(CPACF_KMC, CPACF_KMC_PRNG))
return -EOPNOTSUPP;
/* choose prng mode */
if (prng_mode != PRNG_MODE_TDES) {
/* check for MSA5 support for PPNO operations */
if (!crypt_s390_func_available(PPNO_SHA512_DRNG_GEN,
CRYPT_S390_MSA5)) {
if (!cpacf_query(CPACF_PPNO, CPACF_PPNO_SHA512_DRNG_GEN)) {
if (prng_mode == PRNG_MODE_SHA512) {
pr_err("The prng module cannot "
"start in SHA-512 mode\n");
......
......@@ -28,8 +28,8 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/sha.h>
#include <asm/cpacf.h>
#include "crypt_s390.h"
#include "sha.h"
static int sha1_init(struct shash_desc *desc)
......@@ -42,7 +42,7 @@ static int sha1_init(struct shash_desc *desc)
sctx->state[3] = SHA1_H3;
sctx->state[4] = SHA1_H4;
sctx->count = 0;
sctx->func = KIMD_SHA_1;
sctx->func = CPACF_KIMD_SHA_1;
return 0;
}
......@@ -66,7 +66,7 @@ static int sha1_import(struct shash_desc *desc, const void *in)
sctx->count = ictx->count;
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer));
sctx->func = KIMD_SHA_1;
sctx->func = CPACF_KIMD_SHA_1;
return 0;
}
......@@ -82,7 +82,7 @@ static struct shash_alg alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
......@@ -91,7 +91,7 @@ static struct shash_alg alg = {
static int __init sha1_s390_init(void)
{
if (!crypt_s390_func_available(KIMD_SHA_1, CRYPT_S390_MSA))
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_1))
return -EOPNOTSUPP;
return crypto_register_shash(&alg);
}
......
......@@ -18,8 +18,8 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/sha.h>
#include <asm/cpacf.h>
#include "crypt_s390.h"
#include "sha.h"
static int sha256_init(struct shash_desc *desc)
......@@ -35,7 +35,7 @@ static int sha256_init(struct shash_desc *desc)
sctx->state[6] = SHA256_H6;
sctx->state[7] = SHA256_H7;
sctx->count = 0;
sctx->func = KIMD_SHA_256;
sctx->func = CPACF_KIMD_SHA_256;
return 0;
}
......@@ -59,7 +59,7 @@ static int sha256_import(struct shash_desc *desc, const void *in)
sctx->count = ictx->count;
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->func = KIMD_SHA_256;
sctx->func = CPACF_KIMD_SHA_256;
return 0;
}
......@@ -75,7 +75,7 @@ static struct shash_alg sha256_alg = {
.base = {
.cra_name = "sha256",
.cra_driver_name= "sha256-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
......@@ -95,7 +95,7 @@ static int sha224_init(struct shash_desc *desc)
sctx->state[6] = SHA224_H6;
sctx->state[7] = SHA224_H7;
sctx->count = 0;
sctx->func = KIMD_SHA_256;
sctx->func = CPACF_KIMD_SHA_256;
return 0;
}
......@@ -112,7 +112,7 @@ static struct shash_alg sha224_alg = {
.base = {
.cra_name = "sha224",
.cra_driver_name= "sha224-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
......@@ -123,7 +123,7 @@ static int __init sha256_s390_init(void)
{
int ret;
if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA))
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_256))
return -EOPNOTSUPP;
ret = crypto_register_shash(&sha256_alg);
if (ret < 0)
......
......@@ -19,9 +19,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <asm/cpacf.h>
#include "sha.h"
#include "crypt_s390.h"
static int sha512_init(struct shash_desc *desc)
{
......@@ -36,7 +36,7 @@ static int sha512_init(struct shash_desc *desc)
*(__u64 *)&ctx->state[12] = 0x1f83d9abfb41bd6bULL;
*(__u64 *)&ctx->state[14] = 0x5be0cd19137e2179ULL;
ctx->count = 0;
ctx->func = KIMD_SHA_512;
ctx->func = CPACF_KIMD_SHA_512;
return 0;
}
......@@ -64,7 +64,7 @@ static int sha512_import(struct shash_desc *desc, const void *in)
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->func = KIMD_SHA_512;
sctx->func = CPACF_KIMD_SHA_512;
return 0;
}
......@@ -80,7 +80,7 @@ static struct shash_alg sha512_alg = {
.base = {
.cra_name = "sha512",
.cra_driver_name= "sha512-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
......@@ -102,7 +102,7 @@ static int sha384_init(struct shash_desc *desc)
*(__u64 *)&ctx->state[12] = 0xdb0c2e0d64f98fa7ULL;
*(__u64 *)&ctx->state[14] = 0x47b5481dbefa4fa4ULL;
ctx->count = 0;
ctx->func = KIMD_SHA_512;
ctx->func = CPACF_KIMD_SHA_512;
return 0;
}
......@@ -119,7 +119,7 @@ static struct shash_alg sha384_alg = {
.base = {
.cra_name = "sha384",
.cra_driver_name= "sha384-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_sha_ctx),
......@@ -133,7 +133,7 @@ static int __init init(void)
{
int ret;
if (!crypt_s390_func_available(KIMD_SHA_512, CRYPT_S390_MSA))
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_512))
return -EOPNOTSUPP;
if ((ret = crypto_register_shash(&sha512_alg)) < 0)
goto out;
......
......@@ -15,8 +15,8 @@
#include <crypto/internal/hash.h>
#include <linux/module.h>
#include <asm/cpacf.h>
#include "sha.h"
#include "crypt_s390.h"
int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
......@@ -35,7 +35,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
/* process one stored block */
if (index) {
memcpy(ctx->buf + index, data, bsize - index);
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
ret = cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize);
if (ret != bsize)
return -EIO;
data += bsize - index;
......@@ -45,7 +45,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
/* process as many blocks as possible */
if (len >= bsize) {
ret = crypt_s390_kimd(ctx->func, ctx->state, data,
ret = cpacf_kimd(ctx->func, ctx->state, data,
len & ~(bsize - 1));
if (ret != (len & ~(bsize - 1)))
return -EIO;
......@@ -89,7 +89,7 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
bits = ctx->count * 8;
memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
ret = cpacf_kimd(ctx->func, ctx->state, ctx->buf, end);
if (ret != end)
return -EIO;
......
This diff is collapsed.
......@@ -12,10 +12,12 @@
struct fpu {
__u32 fpc; /* Floating-point control */
void *regs; /* Pointer to the current save area */
union {
void *regs;
freg_t *fprs; /* Floating-point register save area */
__vector128 *vxrs; /* Vector register save area */
/* Floating-point register save area */
freg_t fprs[__NUM_FPRS];
/* Vector register save area */
__vector128 vxrs[__NUM_VXRS];
};
};
......
......@@ -12,7 +12,9 @@
#ifndef __ASSEMBLY__
#define ftrace_return_address(n) __builtin_return_address(n)
unsigned long return_address(int depth);
#define ftrace_return_address(n) return_address(n)
void _mcount(void);
void ftrace_caller(void);
......
......@@ -31,20 +31,41 @@ int pci_proc_domain(struct pci_bus *);
#define ZPCI_FC_BLOCKED 0x20
#define ZPCI_FC_DMA_ENABLED 0x10
#define ZPCI_FMB_DMA_COUNTER_VALID (1 << 23)
struct zpci_fmb_fmt0 {
u64 dma_rbytes;
u64 dma_wbytes;
};
struct zpci_fmb_fmt1 {
u64 rx_bytes;
u64 rx_packets;
u64 tx_bytes;
u64 tx_packets;
};
struct zpci_fmb_fmt2 {
u64 consumed_work_units;
u64 max_work_units;
};
struct zpci_fmb {
u32 format : 8;
u32 dma_valid : 1;
u32 : 23;
u32 fmt_ind : 24;
u32 samples;
u64 last_update;
/* hardware counters */
/* common counters */
u64 ld_ops;
u64 st_ops;
u64 stb_ops;
u64 rpcit_ops;
u64 dma_rbytes;
u64 dma_wbytes;
u64 pad[2];
/* format specific counters */
union {
struct zpci_fmb_fmt0 fmt0;
struct zpci_fmb_fmt1 fmt1;
struct zpci_fmb_fmt2 fmt2;
};
} __packed __aligned(128);
enum zpci_state {
......
......@@ -105,7 +105,6 @@ typedef struct {
* Thread structure
*/
struct thread_struct {
struct fpu fpu; /* FP and VX register save area */
unsigned int acrs[NUM_ACRS];
unsigned long ksp; /* kernel stack pointer */
mm_segment_t mm_segment;
......@@ -120,6 +119,11 @@ struct thread_struct {
/* cpu runtime instrumentation */
struct runtime_instr_cb *ri_cb;
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
/*
* Warning: 'fpu' is dynamically-sized. It *MUST* be at
* the end.
*/
struct fpu fpu; /* FP and VX register save area */
};
/* Flag to disable transactions. */
......@@ -155,10 +159,9 @@ struct stack_frame {
#define ARCH_MIN_TASKALIGN 8
extern __vector128 init_task_fpu_regs[__NUM_VXRS];
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
.fpu.regs = (void *)&init_task_fpu_regs, \
.fpu.regs = (void *) init_task.thread.fpu.fprs, \
}
/*
......
......@@ -72,6 +72,18 @@ struct sclp_info {
};
extern struct sclp_info sclp;
struct zpci_report_error_header {
u8 version; /* Interface version byte */
u8 action; /* Action qualifier byte
* 1: Deconfigure and repair action requested
* (OpenCrypto Problem Call Home)
* 2: Informational Report
* (OpenCrypto Successful Diagnostics Execution)
*/
u16 length; /* Length of Subsequent Data (up to 4K – SCLP header */
u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */
} __packed;
int sclp_get_core_info(struct sclp_core_info *info);
int sclp_core_configure(u8 core);
int sclp_core_deconfigure(u8 core);
......@@ -83,6 +95,7 @@ int sclp_chp_read_info(struct sclp_chp_info *info);
void sclp_get_ipl_info(struct sclp_ipl_info *info);
int sclp_pci_configure(u32 fid);
int sclp_pci_deconfigure(u32 fid);
int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid);
int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count);
int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count);
void sclp_early_detect(void);
......
......@@ -62,6 +62,7 @@ static inline struct thread_info *current_thread_info(void)
}
void arch_release_task_struct(struct task_struct *tsk);
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define THREAD_SIZE_ORDER THREAD_ORDER
......
......@@ -187,6 +187,36 @@ typedef struct format_data_t {
#define DASD_FMT_INT_INVAL 4 /* invalidate tracks */
#define DASD_FMT_INT_COMPAT 8 /* use OS/390 compatible disk layout */
/*
* struct format_check_t
* represents all data necessary to evaluate the format of
* different tracks of a dasd
*/
typedef struct format_check_t {
/* Input */
struct format_data_t expect;
/* Output */
unsigned int result; /* Error indication (DASD_FMT_ERR_*) */
unsigned int unit; /* Track that is in error */
unsigned int rec; /* Record that is in error */
unsigned int num_records; /* Records in the track in error */
unsigned int blksize; /* Blocksize of first record in error */
unsigned int key_length; /* Key length of first record in error */
} format_check_t;
/* Values returned in format_check_t when a format error is detected: */
/* Too few records were found on a single track */
#define DASD_FMT_ERR_TOO_FEW_RECORDS 1
/* Too many records were found on a single track */
#define DASD_FMT_ERR_TOO_MANY_RECORDS 2
/* Blocksize/data-length of a record was wrong */
#define DASD_FMT_ERR_BLKSIZE 3
/* A record ID is defined by cylinder, head, and record number (CHR). */
/* On mismatch, this error is set */
#define DASD_FMT_ERR_RECORD_ID 4
/* If key-length was != 0 */
#define DASD_FMT_ERR_KEY_LENGTH 5
/*
* struct attrib_data_t
......@@ -288,6 +318,8 @@ struct dasd_snid_ioctl_data {
/* Get Sense Path Group ID (SNID) data */
#define BIODASDSNID _IOWR(DASD_IOCTL_LETTER, 1, struct dasd_snid_ioctl_data)
/* Check device format according to format_check_t */
#define BIODASDCHECKFMT _IOWR(DASD_IOCTL_LETTER, 2, format_check_t)
#define BIODASDSYMMIO _IOWR(DASD_IOCTL_LETTER, 240, dasd_symmio_parms_t)
......
......@@ -72,7 +72,6 @@ void show_cacheinfo(struct seq_file *m)
if (!test_facility(34))
return;
get_online_cpus();
this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
cache = this_cpu_ci->info_list + idx;
......@@ -86,7 +85,6 @@ void show_cacheinfo(struct seq_file *m)
seq_printf(m, "associativity=%d", cache->ways_of_associativity);
seq_puts(m, "\n");
}
put_online_cpus();
}
static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
......
......@@ -173,7 +173,7 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count)
/*
* Copy memory of the old, dumped system to a user space virtual address
*/
int copy_oldmem_user(void __user *dst, void *src, size_t count)
static int copy_oldmem_user(void __user *dst, void *src, size_t count)
{
unsigned long from, len;
int rc;
......
......@@ -89,6 +89,30 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
}
EXPORT_SYMBOL_GPL(dump_trace);
struct return_address_data {
unsigned long address;
int depth;
};
static int __return_address(void *data, unsigned long address)
{
struct return_address_data *rd = data;
if (rd->depth--)
return 0;
rd->address = address;
return 1;
}
unsigned long return_address(int depth)
{
struct return_address_data rd = { .depth = depth + 2 };
dump_trace(__return_address, &rd, NULL, current_stack_pointer());
return rd.address;
}
EXPORT_SYMBOL_GPL(return_address);
static int show_address(void *data, unsigned long address)
{
printk("([<%016lx>] %pSR)\n", address, (void *)address);
......
#ifndef _ENTRY_H
#define _ENTRY_H
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/signal.h>
#include <asm/ptrace.h>
......@@ -75,4 +76,7 @@ long sys_s390_personality(unsigned int personality);
long sys_s390_runtime_instr(int command, int signum);
long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
DECLARE_PER_CPU(u64, mt_cycles[8]);
#endif /* _ENTRY_H */
......@@ -665,18 +665,21 @@ static struct pmu cpumf_pmu = {
static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action,
void *hcpu)
{
unsigned int cpu = (long) hcpu;
int flags;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
flags = PMC_INIT;
smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
local_irq_disable();
setup_pmc_cpu(&flags);
local_irq_enable();
break;
case CPU_DOWN_PREPARE:
flags = PMC_RELEASE;
smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
local_irq_disable();
setup_pmc_cpu(&flags);
local_irq_enable();
break;
default:
break;
......
......@@ -1510,7 +1510,6 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
static int cpumf_pmu_notifier(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned int cpu = (long) hcpu;
int flags;
/* Ignore the notification if no events are scheduled on the PMU.
......@@ -1523,11 +1522,15 @@ static int cpumf_pmu_notifier(struct notifier_block *self,
case CPU_ONLINE:
case CPU_DOWN_FAILED:
flags = PMC_INIT;
smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
local_irq_disable();
setup_pmc_cpu(&flags);
local_irq_enable();
break;
case CPU_DOWN_PREPARE:
flags = PMC_RELEASE;
smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
local_irq_disable();
setup_pmc_cpu(&flags);
local_irq_enable();
break;
default:
break;
......
......@@ -7,6 +7,7 @@
* Denis Joseph Barrow,
*/
#include <linux/elf-randomize.h>
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/sched.h>
......@@ -37,9 +38,6 @@
asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
/* FPU save area for the init task */
__vector128 init_task_fpu_regs[__NUM_VXRS] __init_task_data;
/*
* Return saved PC of a blocked thread. used in kernel/sched.
* resume in entry.S does not create a new stack frame, it
......@@ -85,35 +83,19 @@ void release_thread(struct task_struct *dead_task)
void arch_release_task_struct(struct task_struct *tsk)
{
/* Free either the floating-point or the vector register save area */
kfree(tsk->thread.fpu.regs);
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
size_t fpu_regs_size;
*dst = *src;
/*
* If the vector extension is available, it is enabled for all tasks,
* and, thus, the FPU register save area must be allocated accordingly.
*/
fpu_regs_size = MACHINE_HAS_VX ? sizeof(__vector128) * __NUM_VXRS
: sizeof(freg_t) * __NUM_FPRS;
dst->thread.fpu.regs = kzalloc(fpu_regs_size, GFP_KERNEL|__GFP_REPEAT);
if (!dst->thread.fpu.regs)
return -ENOMEM;
/*
* Save the floating-point or vector register state of the current
* task and set the CIF_FPU flag to lazy restore the FPU register
* state when returning to user space.
*/
save_fpu_regs();
dst->thread.fpu.fpc = current->thread.fpu.fpc;
memcpy(dst->thread.fpu.regs, current->thread.fpu.regs, fpu_regs_size);
memcpy(dst, src, arch_task_struct_size);
dst->thread.fpu.regs = dst->thread.fpu.fprs;
return 0;
}
......
......@@ -6,6 +6,7 @@
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/cpufeature.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/seq_file.h>
......@@ -84,7 +85,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_puts(m, "\n");
show_cacheinfo(m);
}
get_online_cpus();
if (cpu_online(n)) {
struct cpuid *id = &per_cpu(cpu_id, n);
seq_printf(m, "processor %li: "
......@@ -93,23 +93,31 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"machine = %04X\n",
n, id->version, id->ident, id->machine);
}
put_online_cpus();
return 0;
}
static inline void *c_update(loff_t *pos)
{
if (*pos)
*pos = cpumask_next(*pos - 1, cpu_online_mask);
return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
get_online_cpus();
return c_update(pos);
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
return c_update(pos);
}
static void c_stop(struct seq_file *m, void *v)
{
put_online_cpus();
}
const struct seq_operations cpuinfo_op = {
......
......@@ -808,6 +808,22 @@ static void __init setup_randomness(void)
free_page((unsigned long) vmms);
}
/*
* Find the correct size for the task_struct. This depends on
* the size of the struct fpu at the end of the thread_struct
* which is embedded in the task_struct.
*/
static void __init setup_task_size(void)
{
int task_size = sizeof(struct task_struct);
if (!MACHINE_HAS_VX) {
task_size -= sizeof(__vector128) * __NUM_VXRS;
task_size += sizeof(freg_t) * __NUM_FPRS;
}
arch_task_struct_size = task_size;
}
/*
* Setup function called from init/main.c just after the banner
* was printed.
......@@ -846,6 +862,7 @@ void __init setup_arch(char **cmdline_p)
os_info_init();
setup_ipl();
setup_task_size();
/* Do some memory reservations *before* memory is added to memblock */
reserve_memory_end();
......
......@@ -18,6 +18,8 @@
#include <asm/cpu_mf.h>
#include <asm/smp.h>
#include "entry.h"
static void virt_timer_expire(void);
static LIST_HEAD(virt_timer_list);
......
......@@ -631,6 +631,29 @@ void pfault_fini(void)
static DEFINE_SPINLOCK(pfault_lock);
static LIST_HEAD(pfault_list);
#define PF_COMPLETE 0x0080
/*
* The mechanism of our pfault code: if Linux is running as guest, runs a user
* space process and the user space process accesses a page that the host has
* paged out we get a pfault interrupt.
*
* This allows us, within the guest, to schedule a different process. Without
* this mechanism the host would have to suspend the whole virtual cpu until
* the page has been paged in.
*
* So when we get such an interrupt then we set the state of the current task
* to uninterruptible and also set the need_resched flag. Both happens within
* interrupt context(!). If we later on want to return to user space we
* recognize the need_resched flag and then call schedule(). It's not very
* obvious how this works...
*
* Of course we have a lot of additional fun with the completion interrupt (->
* host signals that a page of a process has been paged in and the process can
* continue to run). This interrupt can arrive on any cpu and, since we have
* virtual cpus, actually appear before the interrupt that signals that a page
* is missing.
*/
static void pfault_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
......@@ -639,10 +662,9 @@ static void pfault_interrupt(struct ext_code ext_code,
pid_t pid;
/*
* Get the external interruption subcode & pfault
* initial/completion signal bit. VM stores this
* in the 'cpu address' field associated with the
* external interrupt.
* Get the external interruption subcode & pfault initial/completion
* signal bit. VM stores this in the 'cpu address' field associated
* with the external interrupt.
*/
subcode = ext_code.subcode;
if ((subcode & 0xff00) != __SUBCODE_MASK)
......@@ -658,7 +680,7 @@ static void pfault_interrupt(struct ext_code ext_code,
if (!tsk)
return;
spin_lock(&pfault_lock);
if (subcode & 0x0080) {
if (subcode & PF_COMPLETE) {
/* signal bit is set -> a page has been swapped in by VM */
if (tsk->thread.pfault_wait == 1) {
/* Initial interrupt was faster than the completion
......@@ -687,8 +709,7 @@ static void pfault_interrupt(struct ext_code ext_code,
goto out;
if (tsk->thread.pfault_wait == 1) {
/* Already on the list with a reference: put to sleep */
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
goto block;
} else if (tsk->thread.pfault_wait == -1) {
/* Completion interrupt was faster than the initial
* interrupt (pfault_wait == -1). Set pfault_wait
......@@ -703,7 +724,11 @@ static void pfault_interrupt(struct ext_code ext_code,
get_task_struct(tsk);
tsk->thread.pfault_wait = 1;
list_add(&tsk->thread.list, &pfault_list);
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
block:
/* Since this must be a userspace fault, there
* is no kernel task state to trample. Rely on the
* return to userspace schedule() to block. */
__set_current_state(TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
}
}
......
......@@ -22,6 +22,7 @@
* Started by Ingo Molnar <mingo@elte.hu>
*/
#include <linux/elf-randomize.h>
#include <linux/personality.h>
#include <linux/mm.h>
#include <linux/mman.h>
......
......@@ -56,7 +56,7 @@ static inline pmd_t *vmem_pmd_alloc(void)
return pmd;
}
static pte_t __ref *vmem_pte_alloc(unsigned long address)
static pte_t __ref *vmem_pte_alloc(void)
{
pte_t *pte;
......@@ -121,7 +121,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
continue;
}
if (pmd_none(*pm_dir)) {
pt_dir = vmem_pte_alloc(address);
pt_dir = vmem_pte_alloc();
if (!pt_dir)
goto out;
pmd_populate(&init_mm, pm_dir, pt_dir);
......@@ -233,7 +233,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
pt_dir = vmem_pte_alloc(address);
pt_dir = vmem_pte_alloc();
if (!pt_dir)
goto out;
pmd_populate(&init_mm, pm_dir, pt_dir);
......@@ -370,7 +370,7 @@ void __init vmem_map_init(void)
ro_end = (unsigned long)&_eshared & PAGE_MASK;
for_each_memblock(memory, reg) {
start = reg->base;
end = reg->base + reg->size - 1;
end = reg->base + reg->size;
if (start >= ro_end || end <= ro_start)
vmem_add_mem(start, end - start, 0);
else if (start >= ro_start && end <= ro_end)
......
/*
* Copyright IBM Corp. 2012
* Copyright IBM Corp. 2012,2015
*
* Author(s):
* Jan Glauber <jang@linux.vnet.ibm.com>
......@@ -23,22 +23,45 @@ EXPORT_SYMBOL_GPL(pci_debug_msg_id);
debug_info_t *pci_debug_err_id;
EXPORT_SYMBOL_GPL(pci_debug_err_id);
static char *pci_perf_names[] = {
/* hardware counters */
static char *pci_common_names[] = {
"Load operations",
"Store operations",
"Store block operations",
"Refresh operations",
};
static char *pci_fmt0_names[] = {
"DMA read bytes",
"DMA write bytes",
};
static char *pci_fmt1_names[] = {
"Received bytes",
"Received packets",
"Transmitted bytes",
"Transmitted packets",
};
static char *pci_fmt2_names[] = {
"Consumed work units",
"Maximum work units",
};
static char *pci_sw_names[] = {
"Allocated pages",
"Mapped pages",
"Unmapped pages",
};
static void pci_fmb_show(struct seq_file *m, char *name[], int length,
u64 *data)
{
int i;
for (i = 0; i < length; i++, data++)
seq_printf(m, "%26s:\t%llu\n", name[i], *data);
}
static void pci_sw_counter_show(struct seq_file *m)
{
struct zpci_dev *zdev = m->private;
......@@ -53,8 +76,6 @@ static void pci_sw_counter_show(struct seq_file *m)
static int pci_perf_show(struct seq_file *m, void *v)
{
struct zpci_dev *zdev = m->private;
u64 *stat;
int i;
if (!zdev)
return 0;
......@@ -72,15 +93,27 @@ static int pci_perf_show(struct seq_file *m, void *v)
seq_printf(m, "Samples: %u\n", zdev->fmb->samples);
seq_printf(m, "Last update TOD: %Lx\n", zdev->fmb->last_update);
/* hardware counters */
stat = (u64 *) &zdev->fmb->ld_ops;
for (i = 0; i < 4; i++)
seq_printf(m, "%26s:\t%llu\n",
pci_perf_names[i], *(stat + i));
if (zdev->fmb->dma_valid)
for (i = 4; i < 6; i++)
seq_printf(m, "%26s:\t%llu\n",
pci_perf_names[i], *(stat + i));
pci_fmb_show(m, pci_common_names, ARRAY_SIZE(pci_common_names),
&zdev->fmb->ld_ops);
switch (zdev->fmb->format) {
case 0:
if (!(zdev->fmb->fmt_ind & ZPCI_FMB_DMA_COUNTER_VALID))
break;
pci_fmb_show(m, pci_fmt0_names, ARRAY_SIZE(pci_fmt0_names),
&zdev->fmb->fmt0.dma_rbytes);
break;
case 1:
pci_fmb_show(m, pci_fmt1_names, ARRAY_SIZE(pci_fmt1_names),
&zdev->fmb->fmt1.rx_bytes);
break;
case 2:
pci_fmb_show(m, pci_fmt2_names, ARRAY_SIZE(pci_fmt2_names),
&zdev->fmb->fmt2.consumed_work_units);
break;
default:
seq_puts(m, "Unknown format\n");
}
pci_sw_counter_show(m);
mutex_unlock(&zdev->lock);
......
......@@ -12,6 +12,8 @@
#include <linux/stat.h>
#include <linux/pci.h>
#include <asm/sclp.h>
#define zpci_attr(name, fmt, member) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
......@@ -77,8 +79,29 @@ static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
sizeof(zdev->util_str));
}
static BIN_ATTR_RO(util_string, CLP_UTIL_STR_LEN);
static ssize_t report_error_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct zpci_report_error_header *report = (void *) buf;
struct device *dev = kobj_to_dev(kobj);
struct pci_dev *pdev = to_pci_dev(dev);
struct zpci_dev *zdev = to_zpci(pdev);
int ret;
if (off || (count < sizeof(*report)))
return -EINVAL;
ret = sclp_pci_report(report, zdev->fh, zdev->fid);
return ret ? ret : count;
}
static BIN_ATTR(report_error, S_IWUSR, NULL, report_error_write, PAGE_SIZE);
static struct bin_attribute *zpci_bin_attrs[] = {
&bin_attr_util_string,
&bin_attr_report_error,
NULL,
};
......
......@@ -75,6 +75,8 @@ static void dasd_block_timeout(unsigned long);
static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
static void dasd_profile_init(struct dasd_profile *, struct dentry *);
static void dasd_profile_exit(struct dasd_profile *);
static void dasd_hosts_init(struct dentry *, struct dasd_device *);
static void dasd_hosts_exit(struct dasd_device *);
/*
* SECTION: Operations on the device structure.
......@@ -267,6 +269,7 @@ static int dasd_state_known_to_basic(struct dasd_device *device)
dasd_debugfs_setup(dev_name(&device->cdev->dev),
dasd_debugfs_root_entry);
dasd_profile_init(&device->profile, device->debugfs_dentry);
dasd_hosts_init(device->debugfs_dentry, device);
/* register 'device' debug area, used for all DBF_DEV_XXX calls */
device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
......@@ -304,6 +307,7 @@ static int dasd_state_basic_to_known(struct dasd_device *device)
return rc;
dasd_device_clear_timer(device);
dasd_profile_exit(&device->profile);
dasd_hosts_exit(device);
debugfs_remove(device->debugfs_dentry);
DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
if (device->debug_area != NULL) {
......@@ -1150,6 +1154,58 @@ int dasd_profile_on(struct dasd_profile *profile)
#endif /* CONFIG_DASD_PROFILE */
static int dasd_hosts_show(struct seq_file *m, void *v)
{
struct dasd_device *device;
int rc = -EOPNOTSUPP;
device = m->private;
dasd_get_device(device);
if (device->discipline->hosts_print)
rc = device->discipline->hosts_print(device, m);
dasd_put_device(device);
return rc;
}
static int dasd_hosts_open(struct inode *inode, struct file *file)
{
struct dasd_device *device = inode->i_private;
return single_open(file, dasd_hosts_show, device);
}
static const struct file_operations dasd_hosts_fops = {
.owner = THIS_MODULE,
.open = dasd_hosts_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void dasd_hosts_exit(struct dasd_device *device)
{
debugfs_remove(device->hosts_dentry);
device->hosts_dentry = NULL;
}
static void dasd_hosts_init(struct dentry *base_dentry,
struct dasd_device *device)
{
struct dentry *pde;
umode_t mode;
if (!base_dentry)
return;
mode = S_IRUSR | S_IFREG;
pde = debugfs_create_file("host_access_list", mode, base_dentry,
device, &dasd_hosts_fops);
if (pde && !IS_ERR(pde))
device->hosts_dentry = pde;
}
/*
* Allocate memory for a channel program with 'cplength' channel
* command words and 'datasize' additional space. There are two
......@@ -1582,6 +1638,9 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct dasd_ccw_req *cqr, *next;
struct dasd_device *device;
unsigned long long now;
int nrf_suppressed = 0;
int fp_suppressed = 0;
u8 *sense = NULL;
int expires;
if (IS_ERR(irb)) {
......@@ -1617,7 +1676,23 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
dasd_put_device(device);
return;
}
/*
* In some cases 'File Protected' or 'No Record Found' errors
* might be expected and debug log messages for the
* corresponding interrupts shouldn't be written then.
* Check if either of the according suppress bits is set.
*/
sense = dasd_get_sense(irb);
if (sense) {
fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
}
if (!(fp_suppressed || nrf_suppressed))
device->discipline->dump_sense_dbf(device, irb, "int");
if (device->features & DASD_FEATURE_ERPLOG)
device->discipline->dump_sense(device, cqr, irb);
device->discipline->check_for_device_change(device, cqr, irb);
......@@ -2256,6 +2331,7 @@ static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
{
struct dasd_device *device;
struct dasd_ccw_req *cqr, *n;
u8 *sense = NULL;
int rc;
retry:
......@@ -2301,6 +2377,20 @@ static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
rc = 0;
list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
/*
* In some cases the 'File Protected' or 'Incorrect Length'
* error might be expected and error recovery would be
* unnecessary in these cases. Check if the according suppress
* bit is set.
*/
sense = dasd_get_sense(&cqr->irb);
if (sense && sense[1] & SNS1_FILE_PROTECTED &&
test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
continue;
if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
continue;
/*
* for alias devices simplify error recovery and
* return to upper layer
......
......@@ -1367,6 +1367,12 @@ dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense)
struct dasd_device *device = default_erp->startdev;
/*
* In some cases the 'No Record Found' error might be expected and
* log messages shouldn't be written then.
* Check if the according suppress bit is set.
*/
if (!test_bit(DASD_CQR_SUPPRESS_NRF, &default_erp->flags))
dev_err(&device->cdev->dev,
"The specified record was not found\n");
......@@ -1393,8 +1399,14 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
struct dasd_device *device = erp->startdev;
dev_err(&device->cdev->dev, "Accessing the DASD failed because of "
"a hardware error\n");
/*
* In some cases the 'File Protected' error might be expected and
* log messages shouldn't be written then.
* Check if the according suppress bit is set.
*/
if (!test_bit(DASD_CQR_SUPPRESS_FP, &erp->flags))
dev_err(&device->cdev->dev,
"Accessing the DASD failed because of a hardware error\n");
return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
......
......@@ -981,6 +981,32 @@ dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(safe_offline, 0200, NULL, dasd_safe_offline_store);
static ssize_t
dasd_access_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct dasd_device *device;
int count;
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return PTR_ERR(device);
if (device->discipline->host_access_count)
count = device->discipline->host_access_count(device);
else
count = -EOPNOTSUPP;
dasd_put_device(device);
if (count < 0)
return count;
return sprintf(buf, "%d\n", count);
}
static DEVICE_ATTR(host_access_count, 0444, dasd_access_show, NULL);
static ssize_t
dasd_discipline_show(struct device *dev, struct device_attribute *attr,
char *buf)
......@@ -1471,6 +1497,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_reservation_policy.attr,
&dev_attr_last_known_reservation_state.attr,
&dev_attr_safe_offline.attr,
&dev_attr_host_access_count.attr,
&dev_attr_path_masks.attr,
NULL,
};
......
This diff is collapsed.
......@@ -35,6 +35,7 @@
#define DASD_ECKD_CCW_READ_MT 0x86
#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d
#define DASD_ECKD_CCW_READ_KD_MT 0x8e
#define DASD_ECKD_CCW_READ_COUNT_MT 0x92
#define DASD_ECKD_CCW_RELEASE 0x94
#define DASD_ECKD_CCW_WRITE_FULL_TRACK 0x95
#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
......@@ -53,6 +54,7 @@
*/
#define PSF_ORDER_PRSSD 0x18
#define PSF_ORDER_CUIR_RESPONSE 0x1A
#define PSF_SUBORDER_QHA 0x1C
#define PSF_ORDER_SSC 0x1D
/*
......@@ -81,6 +83,8 @@
#define ATTENTION_LENGTH_CUIR 0x0e
#define ATTENTION_FORMAT_CUIR 0x01
#define DASD_ECKD_PG_GROUPED 0x10
/*
* Size that is reportet for large volumes in the old 16-bit no_cyl field
*/
......@@ -403,13 +407,41 @@ struct dasd_psf_cuir_response {
__u8 ssid;
} __packed;
struct dasd_ckd_path_group_entry {
__u8 status_flags;
__u8 pgid[11];
__u8 sysplex_name[8];
__u32 timestamp;
__u32 cylinder;
__u8 reserved[4];
} __packed;
struct dasd_ckd_host_information {
__u8 access_flags;
__u8 entry_size;
__u16 entry_count;
__u8 entry[16390];
} __packed;
struct dasd_psf_query_host_access {
__u8 access_flag;
__u8 version;
__u16 CKD_length;
__u16 SCSI_length;
__u8 unused[10];
__u8 host_access_information[16394];
} __packed;
/*
* Perform Subsystem Function - Prepare for Read Subsystem Data
*/
struct dasd_psf_prssd_data {
unsigned char order;
unsigned char flags;
unsigned char reserved[4];
unsigned char reserved1;
unsigned char reserved2;
unsigned char lss;
unsigned char volume;
unsigned char suborder;
unsigned char varies[5];
} __attribute__ ((packed));
......
......@@ -236,6 +236,13 @@ struct dasd_ccw_req {
* stolen. Should not be combined with
* DASD_CQR_FLAGS_USE_ERP
*/
/*
* The following flags are used to suppress output of certain errors.
* These flags should only be used for format checks!
*/
#define DASD_CQR_SUPPRESS_NRF 4 /* Suppress 'No Record Found' error */
#define DASD_CQR_SUPPRESS_FP 5 /* Suppress 'File Protected' error*/
#define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */
/* Signature for error recovery functions. */
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
......@@ -318,7 +325,8 @@ struct dasd_discipline {
* Device operation functions. build_cp creates a ccw chain for
* a block device request, start_io starts the request and
* term_IO cancels it (e.g. in case of a timeout). format_device
* returns a ccw chain to be used to format the device.
* formats the device and check_device_format compares the format of
* a device with the expected format_data.
* handle_terminated_request allows to examine a cqr and prepare
* it for retry.
*/
......@@ -329,7 +337,9 @@ struct dasd_discipline {
int (*term_IO) (struct dasd_ccw_req *);
void (*handle_terminated_request) (struct dasd_ccw_req *);
int (*format_device) (struct dasd_device *,
struct format_data_t *, int enable_pav);
struct format_data_t *, int);
int (*check_device_format)(struct dasd_device *,
struct format_check_t *, int);
int (*free_cp) (struct dasd_ccw_req *, struct request *);
/*
......@@ -365,6 +375,8 @@ struct dasd_discipline {
int (*get_uid) (struct dasd_device *, struct dasd_uid *);
void (*kick_validate) (struct dasd_device *);
int (*check_attention)(struct dasd_device *, __u8);
int (*host_access_count)(struct dasd_device *);
int (*hosts_print)(struct dasd_device *, struct seq_file *);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
......@@ -487,6 +499,7 @@ struct dasd_device {
unsigned long blk_timeout;
struct dentry *debugfs_dentry;
struct dentry *hosts_dentry;
struct dasd_profile profile;
};
......
......@@ -238,6 +238,23 @@ dasd_format(struct dasd_block *block, struct format_data_t *fdata)
return rc;
}
static int dasd_check_format(struct dasd_block *block,
struct format_check_t *cdata)
{
struct dasd_device *base;
int rc;
base = block->base;
if (!base->discipline->check_device_format)
return -ENOTTY;
rc = base->discipline->check_device_format(base, cdata, 1);
if (rc == -EAGAIN)
rc = base->discipline->check_device_format(base, cdata, 0);
return rc;
}
/*
* Format device.
*/
......@@ -272,6 +289,47 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
}
rc = dasd_format(base->block, &fdata);
dasd_put_device(base);
return rc;
}
/*
* Check device format
*/
static int dasd_ioctl_check_format(struct block_device *bdev, void __user *argp)
{
struct format_check_t cdata;
struct dasd_device *base;
int rc = 0;
if (!argp)
return -EINVAL;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
if (bdev != bdev->bd_contains) {
pr_warn("%s: The specified DASD is a partition and cannot be checked\n",
dev_name(&base->cdev->dev));
rc = -EINVAL;
goto out_err;
}
if (copy_from_user(&cdata, argp, sizeof(cdata))) {
rc = -EFAULT;
goto out_err;
}
rc = dasd_check_format(base->block, &cdata);
if (rc)
goto out_err;
if (copy_to_user(argp, &cdata, sizeof(cdata)))
rc = -EFAULT;
out_err:
dasd_put_device(base);
return rc;
}
......@@ -519,6 +577,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
case BIODASDFMT:
rc = dasd_ioctl_format(bdev, argp);
break;
case BIODASDCHECKFMT:
rc = dasd_ioctl_check_format(bdev, argp);
break;
case BIODASDINFO:
rc = dasd_ioctl_information(block, cmd, argp);
break;
......
......@@ -18,6 +18,8 @@ obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
obj-$(CONFIG_PCI) += sclp_pci.o
obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
obj-$(CONFIG_VMCP) += vmcp.o
......
......@@ -400,7 +400,7 @@ con3270_deactivate(struct raw3270_view *view)
del_timer(&cp->timer);
}
static int
static void
con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Schedule tasklet to read aid. */
......@@ -418,7 +418,6 @@ con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
cp->update_flags = CON_UPDATE_ALL;
con3270_set_timer(cp, 1);
}
return RAW3270_IO_DONE;
}
/* Console view to a 3270 device. */
......
......@@ -217,7 +217,7 @@ fs3270_deactivate(struct raw3270_view *view)
fp->init->callback(fp->init, NULL);
}
static int
static void
fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Set indication and wake waiters for attention. */
......@@ -233,7 +233,6 @@ fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
return RAW3270_IO_DONE;
}
/*
......
......@@ -90,6 +90,8 @@ module_param(tubxcorrect, bool, 0);
*/
DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
static void __raw3270_disconnect(struct raw3270 *rp);
/*
* Encode array for 12 bit 3270 addresses.
*/
......@@ -228,29 +230,6 @@ raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
rq->ccw.flags |= CCW_FLAG_IDA;
}
/*
* Stop running ccw.
*/
static int
__raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
{
int retries;
int rc;
if (raw3270_request_final(rq))
return 0;
/* Check if interrupt has already been processed */
for (retries = 0; retries < 5; retries++) {
if (retries < 2)
rc = ccw_device_halt(rp->cdev, (long) rq);
else
rc = ccw_device_clear(rp->cdev, (long) rq);
if (rc == 0)
break; /* termination successful */
}
return rc;
}
/*
* Add the request to the request queue, try to start it if the
* 3270 device is idle. Return without waiting for end of i/o.
......@@ -342,7 +321,6 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
struct raw3270 *rp;
struct raw3270_view *view;
struct raw3270_request *rq;
int rc;
rp = dev_get_drvdata(&cdev->dev);
if (!rp)
......@@ -350,57 +328,31 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
rq = (struct raw3270_request *) intparm;
view = rq ? rq->view : rp->view;
if (IS_ERR(irb))
rc = RAW3270_IO_RETRY;
else if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
rq->rc = -EIO;
rc = RAW3270_IO_DONE;
} else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
DEV_STAT_UNIT_EXCEP)) {
if (!IS_ERR(irb)) {
/* Handle CE-DE-UE and subsequent UDE */
set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
rc = RAW3270_IO_BUSY;
} else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
/* Wait for UDE if busy flag is set. */
if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END)
clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
/* Got it, now retry. */
rc = RAW3270_IO_RETRY;
} else
rc = RAW3270_IO_BUSY;
} else if (view)
rc = view->fn->intv(view, rq, irb);
else
rc = RAW3270_IO_DONE;
if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END |
DEV_STAT_DEV_END |
DEV_STAT_UNIT_EXCEP))
set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
/* Handle disconnected devices */
if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
(irb->ecw[0] & SNS0_INTERVENTION_REQ)) {
set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
if (rp->state > RAW3270_STATE_RESET)
__raw3270_disconnect(rp);
}
/* Call interrupt handler of the view */
if (view)
view->fn->intv(view, rq, irb);
}
switch (rc) {
case RAW3270_IO_DONE:
break;
case RAW3270_IO_BUSY:
/*
* Intervention required by the operator. We have to wait
* for unsolicited device end.
*/
if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags))
/* Device busy, do not start I/O */
return;
case RAW3270_IO_RETRY:
if (!rq)
break;
rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
(unsigned long) rq, 0, 0);
if (rq->rc == 0)
return; /* Successfully restarted. */
break;
case RAW3270_IO_STOP:
if (!rq)
break;
__raw3270_halt_io(rp, rq);
rq->rc = -EIO;
break;
default:
BUG();
}
if (rq) {
BUG_ON(list_empty(&rq->list));
if (rq && !list_empty(&rq->list)) {
/* The request completed, remove from queue and do callback. */
list_del_init(&rq->list);
if (rq->callback)
......@@ -408,6 +360,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
/* Do put_device for get_device in raw3270_start. */
raw3270_put_view(view);
}
/*
* Try to start each request on request queue until one is
* started successful.
......@@ -685,23 +638,34 @@ raw3270_reset(struct raw3270_view *view)
return rc;
}
static int
static void
__raw3270_disconnect(struct raw3270 *rp)
{
struct raw3270_request *rq;
struct raw3270_view *view;
rp->state = RAW3270_STATE_INIT;
rp->view = &rp->init_view;
/* Cancel all queued requests */
while (!list_empty(&rp->req_queue)) {
rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
view = rq->view;
rq->rc = -EACCES;
list_del_init(&rq->list);
if (rq->callback)
rq->callback(rq, rq->callback_data);
raw3270_put_view(view);
}
/* Start from scratch */
__raw3270_reset_device(rp);
}
static void
raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
struct irb *irb)
{
struct raw3270 *rp;
/*
* Unit-Check Processing:
* Expect Command Reject or Intervention Required.
*/
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
/* Request finished abnormally. */
if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
return RAW3270_IO_BUSY;
}
}
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
if (irb->ecw[0] & SNS0_CMD_REJECT)
......@@ -715,7 +679,6 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
rp = view->dev;
raw3270_read_modified(rp);
}
return RAW3270_IO_DONE;
}
static struct raw3270_fn raw3270_init_fn = {
......
......@@ -125,19 +125,13 @@ raw3270_request_final(struct raw3270_request *rq)
void raw3270_buffer_address(struct raw3270 *, char *, unsigned short);
/* Return value of *intv (see raw3270_fn below) can be one of the following: */
#define RAW3270_IO_DONE 0 /* request finished */
#define RAW3270_IO_BUSY 1 /* request still active */
#define RAW3270_IO_RETRY 2 /* retry current request */
#define RAW3270_IO_STOP 3 /* kill current request */
/*
* Functions of a 3270 view.
*/
struct raw3270_fn {
int (*activate)(struct raw3270_view *);
void (*deactivate)(struct raw3270_view *);
int (*intv)(struct raw3270_view *,
void (*intv)(struct raw3270_view *,
struct raw3270_request *, struct irb *);
void (*release)(struct raw3270_view *);
void (*free)(struct raw3270_view *);
......
......@@ -17,33 +17,35 @@
#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
#define SCLP_CONSOLE_PAGES 6
#define SCLP_EVTYP_MASK(T) (1U << (32 - (T)))
#define EVTYP_OPCMD 0x01
#define EVTYP_MSG 0x02
#define EVTYP_CONFMGMDATA 0x04
#define EVTYP_DIAG_TEST 0x07
#define EVTYP_STATECHANGE 0x08
#define EVTYP_PMSGCMD 0x09
#define EVTYP_CNTLPROGOPCMD 0x20
#define EVTYP_CNTLPROGIDENT 0x0B
#define EVTYP_SIGQUIESCE 0x1D
#define EVTYP_ASYNC 0x0A
#define EVTYP_CTLPROGIDENT 0x0B
#define EVTYP_ERRNOTIFY 0x18
#define EVTYP_VT220MSG 0x1A
#define EVTYP_CONFMGMDATA 0x04
#define EVTYP_SDIAS 0x1C
#define EVTYP_ASYNC 0x0A
#define EVTYP_SIGQUIESCE 0x1D
#define EVTYP_OCF 0x1E
#define EVTYP_OPCMD_MASK 0x80000000
#define EVTYP_MSG_MASK 0x40000000
#define EVTYP_DIAG_TEST_MASK 0x02000000
#define EVTYP_STATECHANGE_MASK 0x01000000
#define EVTYP_PMSGCMD_MASK 0x00800000
#define EVTYP_CTLPROGOPCMD_MASK 0x00000001
#define EVTYP_CTLPROGIDENT_MASK 0x00200000
#define EVTYP_SIGQUIESCE_MASK 0x00000008
#define EVTYP_VT220MSG_MASK 0x00000040
#define EVTYP_CONFMGMDATA_MASK 0x10000000
#define EVTYP_SDIAS_MASK 0x00000010
#define EVTYP_ASYNC_MASK 0x00400000
#define EVTYP_OCF_MASK 0x00000004
#define EVTYP_OPCMD_MASK SCLP_EVTYP_MASK(EVTYP_OPCMD)
#define EVTYP_MSG_MASK SCLP_EVTYP_MASK(EVTYP_MSG)
#define EVTYP_CONFMGMDATA_MASK SCLP_EVTYP_MASK(EVTYP_CONFMGMDATA)
#define EVTYP_DIAG_TEST_MASK SCLP_EVTYP_MASK(EVTYP_DIAG_TEST)
#define EVTYP_STATECHANGE_MASK SCLP_EVTYP_MASK(EVTYP_STATECHANGE)
#define EVTYP_PMSGCMD_MASK SCLP_EVTYP_MASK(EVTYP_PMSGCMD)
#define EVTYP_ASYNC_MASK SCLP_EVTYP_MASK(EVTYP_ASYNC)
#define EVTYP_CTLPROGIDENT_MASK SCLP_EVTYP_MASK(EVTYP_CTLPROGIDENT)
#define EVTYP_ERRNOTIFY_MASK SCLP_EVTYP_MASK(EVTYP_ERRNOTIFY)
#define EVTYP_VT220MSG_MASK SCLP_EVTYP_MASK(EVTYP_VT220MSG)
#define EVTYP_SDIAS_MASK SCLP_EVTYP_MASK(EVTYP_SDIAS)
#define EVTYP_SIGQUIESCE_MASK SCLP_EVTYP_MASK(EVTYP_SIGQUIESCE)
#define EVTYP_OCF_MASK SCLP_EVTYP_MASK(EVTYP_OCF)
#define GNRLMSGFLGS_DOM 0x8000
#define GNRLMSGFLGS_SNDALRM 0x4000
......
......@@ -575,67 +575,6 @@ __initcall(sclp_detect_standby_memory);
#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* PCI I/O adapter configuration related functions.
*/
#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
#define SCLP_RECONFIG_PCI_ATPYE 2
struct pci_cfg_sccb {
struct sccb_header header;
u8 atype; /* adapter type */
u8 reserved1;
u16 reserved2;
u32 aid; /* adapter identifier */
} __packed;
static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
{
struct pci_cfg_sccb *sccb;
int rc;
if (!SCLP_HAS_PCI_RECONFIG)
return -EOPNOTSUPP;
sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->atype = SCLP_RECONFIG_PCI_ATPYE;
sccb->aid = fid;
rc = sclp_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
cmd, sccb->header.response_code);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
int sclp_pci_configure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_configure);
int sclp_pci_deconfigure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_deconfigure);
/*
* Channel path configuration related functions.
*/
......
......@@ -93,7 +93,7 @@ static struct sclp_req *cpi_prepare_req(void)
/* setup SCCB for Control-Program Identification */
sccb->header.length = sizeof(struct cpi_sccb);
sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
sccb->cpi_evbuf.header.type = 0x0b;
sccb->cpi_evbuf.header.type = EVTYP_CTLPROGIDENT;
evb = &sccb->cpi_evbuf;
/* set system type */
......
/*
* PCI I/O adapter configuration related functions.
*
* Copyright IBM Corp. 2016
*/
#define KMSG_COMPONENT "sclp_cmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/completion.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
#include <asm/sclp.h>
#include "sclp.h"
#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
#define SCLP_ATYPE_PCI 2
#define SCLP_ERRNOTIFY_AQ_REPAIR 1
#define SCLP_ERRNOTIFY_AQ_INFO_LOG 2
static DEFINE_MUTEX(sclp_pci_mutex);
static struct sclp_register sclp_pci_event = {
.send_mask = EVTYP_ERRNOTIFY_MASK,
};
struct err_notify_evbuf {
struct evbuf_header header;
u8 action;
u8 atype;
u32 fh;
u32 fid;
u8 data[0];
} __packed;
struct err_notify_sccb {
struct sccb_header header;
struct err_notify_evbuf evbuf;
} __packed;
struct pci_cfg_sccb {
struct sccb_header header;
u8 atype; /* adapter type */
u8 reserved1;
u16 reserved2;
u32 aid; /* adapter identifier */
} __packed;
static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
{
struct pci_cfg_sccb *sccb;
int rc;
if (!SCLP_HAS_PCI_RECONFIG)
return -EOPNOTSUPP;
sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->atype = SCLP_ATYPE_PCI;
sccb->aid = fid;
rc = sclp_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
cmd, sccb->header.response_code);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
int sclp_pci_configure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_configure);
int sclp_pci_deconfigure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_deconfigure);
static void sclp_pci_callback(struct sclp_req *req, void *data)
{
struct completion *completion = data;
complete(completion);
}
static int sclp_pci_check_report(struct zpci_report_error_header *report)
{
if (report->version != 1)
return -EINVAL;
if (report->action != SCLP_ERRNOTIFY_AQ_REPAIR &&
report->action != SCLP_ERRNOTIFY_AQ_INFO_LOG)
return -EINVAL;
if (report->length > (PAGE_SIZE - sizeof(struct err_notify_sccb)))
return -EINVAL;
return 0;
}
int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct err_notify_sccb *sccb;
struct sclp_req req;
int ret;
ret = sclp_pci_check_report(report);
if (ret)
return ret;
mutex_lock(&sclp_pci_mutex);
ret = sclp_register(&sclp_pci_event);
if (ret)
goto out_unlock;
if (!(sclp_pci_event.sclp_receive_mask & EVTYP_ERRNOTIFY_MASK)) {
ret = -EOPNOTSUPP;
goto out_unregister;
}
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb) {
ret = -ENOMEM;
goto out_unregister;
}
memset(&req, 0, sizeof(req));
req.callback_data = &completion;
req.callback = sclp_pci_callback;
req.command = SCLP_CMDW_WRITE_EVENT_DATA;
req.status = SCLP_REQ_FILLED;
req.sccb = sccb;
sccb->evbuf.header.length = sizeof(sccb->evbuf) + report->length;
sccb->evbuf.header.type = EVTYP_ERRNOTIFY;
sccb->header.length = sizeof(sccb->header) + sccb->evbuf.header.length;
sccb->evbuf.action = report->action;
sccb->evbuf.atype = SCLP_ATYPE_PCI;
sccb->evbuf.fh = fh;
sccb->evbuf.fid = fid;
memcpy(sccb->evbuf.data, report->data, report->length);
ret = sclp_add_request(&req);
if (ret)
goto out_free_req;
wait_for_completion(&completion);
if (req.status != SCLP_REQ_DONE) {
pr_warn("request failed (status=0x%02x)\n",
req.status);
ret = -EIO;
goto out_free_req;
}
if (sccb->header.response_code != 0x0020) {
pr_warn("request failed with response code 0x%x\n",
sccb->header.response_code);
ret = -EIO;
}
out_free_req:
free_page((unsigned long) sccb);
out_unregister:
sclp_unregister(&sclp_pci_event);
out_unlock:
mutex_unlock(&sclp_pci_mutex);
return ret;
}
......@@ -92,6 +92,7 @@ struct tty3270 {
unsigned char inattr; /* Visible/invisible input. */
int throttle, attn; /* tty throttle/unthrottle. */
struct tasklet_struct readlet; /* Tasklet to issue read request. */
struct tasklet_struct hanglet; /* Tasklet to hang up the tty. */
struct kbd_data *kbd; /* key_maps stuff. */
/* Escape sequence parsing. */
......@@ -318,6 +319,27 @@ tty3270_blank_line(struct tty3270 *tp)
tp->nr_up++;
}
/*
* Create a blank screen and remove all lines from the history.
*/
static void
tty3270_blank_screen(struct tty3270 *tp)
{
struct string *s, *n;
int i;
for (i = 0; i < tp->view.rows - 2; i++)
tp->screen[i].len = 0;
tp->nr_up = 0;
list_for_each_entry_safe(s, n, &tp->lines, list) {
list_del(&s->list);
if (!list_empty(&s->update))
list_del(&s->update);
tp->nr_lines--;
free_string(&tp->freemem, s);
}
}
/*
* Write request completion callback.
*/
......@@ -405,7 +427,10 @@ tty3270_update(struct tty3270 *tp)
if (raw3270_request_add_data(wrq, str, len) != 0)
break;
list_del_init(&s->update);
if (s->string[s->len - 4] == TO_RA)
sba = s->string + s->len - 3;
else
sba = invalid_sba;
}
if (list_empty(&tp->update))
updated |= TTY_UPDATE_LIST;
......@@ -621,6 +646,16 @@ tty3270_issue_read(struct tty3270 *tp, int lock)
}
}
/*
* Hang up the tty
*/
static void
tty3270_hangup_tasklet(struct tty3270 *tp)
{
tty_port_tty_hangup(&tp->port, true);
raw3270_put_view(&tp->view);
}
/*
* Switch to the tty view.
*/
......@@ -642,7 +677,7 @@ tty3270_deactivate(struct raw3270_view *view)
del_timer(&tp->timer);
}
static int
static void
tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Schedule tasklet to read aid. */
......@@ -654,17 +689,19 @@ tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
}
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
rq->rc = -EIO;
else
raw3270_get_view(&tp->view);
tasklet_schedule(&tp->hanglet);
} else {
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
} else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
/* Interrupt without an outstanding request -> update all */
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
}
return RAW3270_IO_DONE;
}
/*
......@@ -716,6 +753,9 @@ tty3270_alloc_view(void)
tasklet_init(&tp->readlet,
(void (*)(unsigned long)) tty3270_read_tasklet,
(unsigned long) tp->read);
tasklet_init(&tp->hanglet,
(void (*)(unsigned long)) tty3270_hangup_tasklet,
(unsigned long) tp);
INIT_WORK(&tp->resize_work, tty3270_resize_work);
return tp;
......@@ -814,6 +854,7 @@ static void tty3270_resize_work(struct work_struct *work)
return;
/* Switch to new output size */
spin_lock_bh(&tp->view.lock);
tty3270_blank_screen(tp);
oscreen = tp->screen;
orows = tp->view.rows;
tp->view.model = tp->n_model;
......@@ -824,7 +865,6 @@ static void tty3270_resize_work(struct work_struct *work)
free_string(&tp->freemem, tp->status);
tty3270_create_prompt(tp);
tty3270_create_status(tp);
tp->nr_up = 0;
while (tp->nr_lines < tp->view.rows - 2)
tty3270_blank_line(tp);
tp->update_flags = TTY_UPDATE_ALL;
......@@ -838,6 +878,7 @@ static void tty3270_resize_work(struct work_struct *work)
ws.ws_row = tp->view.rows - 2;
ws.ws_col = tp->view.cols;
tty_do_resize(tty, &ws);
tty_kref_put(tty);
}
static void
......@@ -845,6 +886,8 @@ tty3270_resize(struct raw3270_view *view, int model, int rows, int cols)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
if (tp->n_model == model && tp->n_rows == rows && tp->n_cols == cols)
return;
tp->n_model = model;
tp->n_rows = rows;
tp->n_cols = cols;
......@@ -923,10 +966,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
tp->port.low_latency = 0;
/* why to reassign? */
tty_port_tty_set(&tp->port, tty);
tp->inattr = TF_INPUT;
return tty_port_install(&tp->port, driver, tty);
goto port_install;
}
if (tty3270_max_index < tty->index + 1)
tty3270_max_index = tty->index + 1;
......@@ -952,7 +993,6 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
return rc;
}
tty_port_tty_set(&tp->port, tty);
tp->port.low_latency = 0;
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
......@@ -974,6 +1014,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
raw3270_activate_view(&tp->view);
port_install:
rc = tty_port_install(&tp->port, driver, tty);
if (rc) {
raw3270_put_view(&tp->view);
......@@ -1010,18 +1051,18 @@ tty3270_close(struct tty_struct *tty, struct file * filp)
if (tty->count > 1)
return;
if (tp) {
tty->driver_data = NULL;
if (tp)
tty_port_tty_set(&tp->port, NULL);
}
}
static void tty3270_cleanup(struct tty_struct *tty)
{
struct tty3270 *tp = tty->driver_data;
if (tp)
if (tp) {
tty->driver_data = NULL;
raw3270_put_view(&tp->view);
}
}
/*
......@@ -1788,7 +1829,22 @@ tty3270_unthrottle(struct tty_struct * tty)
static void
tty3270_hangup(struct tty_struct *tty)
{
// FIXME: implement
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return;
spin_lock_bh(&tp->view.lock);
tp->cx = tp->saved_cx = 0;
tp->cy = tp->saved_cy = 0;
tp->highlight = tp->saved_highlight = TAX_RESET;
tp->f_color = tp->saved_f_color = TAC_RESET;
tty3270_blank_screen(tp);
while (tp->nr_lines < tp->view.rows - 2)
tty3270_blank_line(tp);
tp->update_flags = TTY_UPDATE_ALL;
spin_unlock_bh(&tp->view.lock);
tty3270_set_timer(tp, 1);
}
static void
......
......@@ -787,7 +787,7 @@ static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev)
/*
* AP state machine jump table
*/
ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_STATE_RESET_START] = {
[AP_EVENT_POLL] = ap_sm_reset,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment