Commit 3f5595e3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Martin Schwidefsky:
 "Next to the usual bug fixes (including the TASK_SIZE fix), there is
  one larger crypto item. It allows to use protected keys with the
  in-kernel crypto API

  The protected key support has two parts, the pkey user space API to
  convert key formats and the paes crypto module that uses a protected
  key instead of a standard AES key"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390: TASK_SIZE for kernel threads
  s390/crypt: Add protected key AES module
  s390/dasd: fix spelling mistake: "supportet" -> "supported"
  s390/pkey: Introduce pkey kernel module
  s390/zcrypt: export additional symbols
  s390/zcrypt: Rework CONFIG_ZCRYPT Kconfig text.
  s390/zcrypt: Cleanup leftover module code.
  s390/nmi: purge tlbs after control register validation
  s390/nmi: fix order of register validation
  s390/crypto: Add PCKMO inline function
  s390/zcrypt: Enable request count reset for cards and queues.
  s390/mm: use _SEGMENT_ENTRY_EMPTY in the code
  s390/chsc: Add exception handler for CHSC instruction
  s390: opt into HAVE_COPY_THREAD_TLS
  s390: restore address space when returning to user space
  s390: rename CIF_ASCE to CIF_ASCE_PRIMARY
parents 12dfdfed fb94a687
...@@ -134,6 +134,7 @@ config S390 ...@@ -134,6 +134,7 @@ config S390
select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL select HAVE_CMPXCHG_LOCAL
select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
......
...@@ -678,6 +678,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m ...@@ -678,6 +678,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA512_S390=m
......
...@@ -628,6 +628,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m ...@@ -628,6 +628,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA512_S390=m
......
...@@ -6,7 +6,7 @@ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o ...@@ -6,7 +6,7 @@ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o paes_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o obj-$(CONFIG_S390_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o
......
/*
* Cryptographic API.
*
* s390 implementation of the AES Cipher Algorithm with protected keys.
*
* s390 Version:
* Copyright IBM Corp. 2017
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Harald Freudenberger <freude@de.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*
*/
#define KMSG_COMPONENT "paes_s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
#include <asm/pkey.h>
static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
struct s390_paes_ctx {
struct pkey_seckey sk;
struct pkey_protkey pk;
unsigned long fc;
};
struct s390_pxts_ctx {
struct pkey_seckey sk[2];
struct pkey_protkey pk[2];
unsigned long fc;
};
static inline int __paes_convert_key(struct pkey_seckey *sk,
struct pkey_protkey *pk)
{
int i, ret;
/* try three times in case of failure */
for (i = 0; i < 3; i++) {
ret = pkey_skey2pkey(sk, pk);
if (ret == 0)
break;
}
return ret;
}
static int __paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
if (__paes_convert_key(&ctx->sk, &ctx->pk))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
(ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
/* Check if the function code is available */
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
return ctx->fc ? 0 : -EINVAL;
}
static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
if (key_len != SECKEYBLOBSIZE)
return -EINVAL;
memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE);
if (__paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
return 0;
}
static int ecb_paes_crypt(struct blkcipher_desc *desc,
unsigned long modifier,
struct blkcipher_walk *walk)
{
struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int nbytes, n, k;
int ret;
ret = blkcipher_walk_virt(desc, walk);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey,
walk->dst.virt.addr, walk->src.virt.addr, n);
if (k)
ret = blkcipher_walk_done(desc, walk, nbytes - k);
if (k < n) {
if (__paes_set_key(ctx) != 0)
return blkcipher_walk_done(desc, walk, -EIO);
}
}
return ret;
}
static int ecb_paes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_paes_crypt(desc, CPACF_ENCRYPT, &walk);
}
static int ecb_paes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_paes_crypt(desc, CPACF_DECRYPT, &walk);
}
static struct crypto_alg ecb_paes_alg = {
.cra_name = "ecb(paes)",
.cra_driver_name = "ecb-paes-s390",
.cra_priority = 400, /* combo: aes + ecb */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_paes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ecb_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SECKEYBLOBSIZE,
.max_keysize = SECKEYBLOBSIZE,
.setkey = ecb_paes_set_key,
.encrypt = ecb_paes_encrypt,
.decrypt = ecb_paes_decrypt,
}
}
};
static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
if (__paes_convert_key(&ctx->sk, &ctx->pk))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 :
(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 :
(ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0;
/* Check if the function code is available */
ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
return ctx->fc ? 0 : -EINVAL;
}
static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE);
if (__cbc_paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
return 0;
}
static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
struct blkcipher_walk *walk)
{
struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int nbytes, n, k;
int ret;
struct {
u8 iv[AES_BLOCK_SIZE];
u8 key[MAXPROTKEYSIZE];
} param;
ret = blkcipher_walk_virt(desc, walk);
memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_kmc(ctx->fc | modifier, &param,
walk->dst.virt.addr, walk->src.virt.addr, n);
if (k)
ret = blkcipher_walk_done(desc, walk, nbytes - k);
if (n < k) {
if (__cbc_paes_set_key(ctx) != 0)
return blkcipher_walk_done(desc, walk, -EIO);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
}
}
memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
return ret;
}
static int cbc_paes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_paes_crypt(desc, 0, &walk);
}
static int cbc_paes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_paes_crypt(desc, CPACF_DECRYPT, &walk);
}
static struct crypto_alg cbc_paes_alg = {
.cra_name = "cbc(paes)",
.cra_driver_name = "cbc-paes-s390",
.cra_priority = 400, /* combo: aes + cbc */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_paes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(cbc_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SECKEYBLOBSIZE,
.max_keysize = SECKEYBLOBSIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = cbc_paes_set_key,
.encrypt = cbc_paes_encrypt,
.decrypt = cbc_paes_decrypt,
}
}
};
static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
{
unsigned long fc;
if (__paes_convert_key(&ctx->sk[0], &ctx->pk[0]) ||
__paes_convert_key(&ctx->sk[1], &ctx->pk[1]))
return -EINVAL;
if (ctx->pk[0].type != ctx->pk[1].type)
return -EINVAL;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PXTS_128 :
(ctx->pk[0].type == PKEY_KEYTYPE_AES_256) ?
CPACF_KM_PXTS_256 : 0;
/* Check if the function code is available */
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
return ctx->fc ? 0 : -EINVAL;
}
static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
u8 ckey[2 * AES_MAX_KEY_SIZE];
unsigned int ckey_len;
memcpy(ctx->sk[0].seckey, in_key, SECKEYBLOBSIZE);
memcpy(ctx->sk[1].seckey, in_key + SECKEYBLOBSIZE, SECKEYBLOBSIZE);
if (__xts_paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
/*
* xts_check_key verifies the key length is not odd and makes
* sure that the two keys are not the same. This can be done
* on the two protected keys as well
*/
ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ?
AES_KEYSIZE_128 : AES_KEYSIZE_256;
memcpy(ckey, ctx->pk[0].protkey, ckey_len);
memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
return xts_check_key(tfm, ckey, 2*ckey_len);
}
static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
struct blkcipher_walk *walk)
{
struct s390_pxts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int keylen, offset, nbytes, n, k;
int ret;
struct {
u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
u8 tweak[16];
u8 block[16];
u8 bit[16];
u8 xts[16];
} pcc_param;
struct {
u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
u8 init[16];
} xts_param;
ret = blkcipher_walk_virt(desc, walk);
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
retry:
memset(&pcc_param, 0, sizeof(pcc_param));
memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
cpacf_pcc(ctx->fc, pcc_param.key + offset);
memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
memcpy(xts_param.init, pcc_param.xts, 16);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
walk->dst.virt.addr, walk->src.virt.addr, n);
if (k)
ret = blkcipher_walk_done(desc, walk, nbytes - k);
if (k < n) {
if (__xts_paes_set_key(ctx) != 0)
return blkcipher_walk_done(desc, walk, -EIO);
goto retry;
}
}
return ret;
}
static int xts_paes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return xts_paes_crypt(desc, 0, &walk);
}
static int xts_paes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return xts_paes_crypt(desc, CPACF_DECRYPT, &walk);
}
static struct crypto_alg xts_paes_alg = {
.cra_name = "xts(paes)",
.cra_driver_name = "xts-paes-s390",
.cra_priority = 400, /* combo: aes + xts */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_pxts_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(xts_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = 2 * SECKEYBLOBSIZE,
.max_keysize = 2 * SECKEYBLOBSIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = xts_paes_set_key,
.encrypt = xts_paes_encrypt,
.decrypt = xts_paes_decrypt,
}
}
};
static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
if (__paes_convert_key(&ctx->sk, &ctx->pk))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 :
(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 :
(ctx->pk.type == PKEY_KEYTYPE_AES_256) ?
CPACF_KMCTR_PAES_256 : 0;
/* Check if the function code is available */
ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
return ctx->fc ? 0 : -EINVAL;
}
static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
memcpy(ctx->sk.seckey, in_key, key_len);
if (__ctr_paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
return 0;
}
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
{
unsigned int i, n;
/* only use complete blocks, max. PAGE_SIZE */
memcpy(ctrptr, iv, AES_BLOCK_SIZE);
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
ctrptr += AES_BLOCK_SIZE;
}
return n;
}
static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
struct blkcipher_walk *walk)
{
struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
u8 buf[AES_BLOCK_SIZE], *ctrptr;
unsigned int nbytes, n, k;
int ret, locked;
locked = spin_trylock(&ctrblk_lock);
ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
n = AES_BLOCK_SIZE;
if (nbytes >= 2*AES_BLOCK_SIZE && locked)
n = __ctrblk_init(ctrblk, walk->iv, nbytes);
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
k = cpacf_kmctr(ctx->fc | modifier, ctx->pk.protkey,
walk->dst.virt.addr, walk->src.virt.addr,
n, ctrptr);
if (k) {
if (ctrptr == ctrblk)
memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
crypto_inc(walk->iv, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
if (k < n) {
if (__ctr_paes_set_key(ctx) != 0)
return blkcipher_walk_done(desc, walk, -EIO);
}
}
if (locked)
spin_unlock(&ctrblk_lock);
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
if (nbytes) {
while (1) {
if (cpacf_kmctr(ctx->fc | modifier,
ctx->pk.protkey, buf,
walk->src.virt.addr, AES_BLOCK_SIZE,
walk->iv) == AES_BLOCK_SIZE)
break;
if (__ctr_paes_set_key(ctx) != 0)
return blkcipher_walk_done(desc, walk, -EIO);
}
memcpy(walk->dst.virt.addr, buf, nbytes);
crypto_inc(walk->iv, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
}
return ret;
}
static int ctr_paes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_paes_crypt(desc, 0, &walk);
}
static int ctr_paes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_paes_crypt(desc, CPACF_DECRYPT, &walk);
}
static struct crypto_alg ctr_paes_alg = {
.cra_name = "ctr(paes)",
.cra_driver_name = "ctr-paes-s390",
.cra_priority = 400, /* combo: aes + ctr */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_paes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ctr_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SECKEYBLOBSIZE,
.max_keysize = SECKEYBLOBSIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ctr_paes_set_key,
.encrypt = ctr_paes_encrypt,
.decrypt = ctr_paes_decrypt,
}
}
};
static inline void __crypto_unregister_alg(struct crypto_alg *alg)
{
if (!list_empty(&alg->cra_list))
crypto_unregister_alg(alg);
}
static void paes_s390_fini(void)
{
if (ctrblk)
free_page((unsigned long) ctrblk);
__crypto_unregister_alg(&ctr_paes_alg);
__crypto_unregister_alg(&xts_paes_alg);
__crypto_unregister_alg(&cbc_paes_alg);
__crypto_unregister_alg(&ecb_paes_alg);
}
static int __init paes_s390_init(void)
{
int ret;
/* Query available functions for KM, KMC and KMCTR */
cpacf_query(CPACF_KM, &km_functions);
cpacf_query(CPACF_KMC, &kmc_functions);
cpacf_query(CPACF_KMCTR, &kmctr_functions);
if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
ret = crypto_register_alg(&ecb_paes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
ret = crypto_register_alg(&cbc_paes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
ret = crypto_register_alg(&xts_paes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
ret = crypto_register_alg(&ctr_paes_alg);
if (ret)
goto out_err;
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
ret = -ENOMEM;
goto out_err;
}
}
return 0;
out_err:
paes_s390_fini();
return ret;
}
module_init(paes_s390_init);
module_exit(paes_s390_fini);
MODULE_ALIAS_CRYPTO("aes-all");
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys");
MODULE_LICENSE("GPL");
...@@ -229,6 +229,7 @@ CONFIG_CRYPTO_USER_API_HASH=m ...@@ -229,6 +229,7 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_ZCRYPT=m CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA512_S390=m
......
...@@ -28,8 +28,9 @@ ...@@ -28,8 +28,9 @@
#define CPACF_PPNO 0xb93c /* MSA5 */ #define CPACF_PPNO 0xb93c /* MSA5 */
/* /*
* Decryption modifier bit * En/decryption modifier bits
*/ */
#define CPACF_ENCRYPT 0x00
#define CPACF_DECRYPT 0x80 #define CPACF_DECRYPT 0x80
/* /*
...@@ -42,8 +43,13 @@ ...@@ -42,8 +43,13 @@
#define CPACF_KM_AES_128 0x12 #define CPACF_KM_AES_128 0x12
#define CPACF_KM_AES_192 0x13 #define CPACF_KM_AES_192 0x13
#define CPACF_KM_AES_256 0x14 #define CPACF_KM_AES_256 0x14
#define CPACF_KM_PAES_128 0x1a
#define CPACF_KM_PAES_192 0x1b
#define CPACF_KM_PAES_256 0x1c
#define CPACF_KM_XTS_128 0x32 #define CPACF_KM_XTS_128 0x32
#define CPACF_KM_XTS_256 0x34 #define CPACF_KM_XTS_256 0x34
#define CPACF_KM_PXTS_128 0x3a
#define CPACF_KM_PXTS_256 0x3c
/* /*
* Function codes for the KMC (CIPHER MESSAGE WITH CHAINING) * Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
...@@ -56,6 +62,9 @@ ...@@ -56,6 +62,9 @@
#define CPACF_KMC_AES_128 0x12 #define CPACF_KMC_AES_128 0x12
#define CPACF_KMC_AES_192 0x13 #define CPACF_KMC_AES_192 0x13
#define CPACF_KMC_AES_256 0x14 #define CPACF_KMC_AES_256 0x14
#define CPACF_KMC_PAES_128 0x1a
#define CPACF_KMC_PAES_192 0x1b
#define CPACF_KMC_PAES_256 0x1c
#define CPACF_KMC_PRNG 0x43 #define CPACF_KMC_PRNG 0x43
/* /*
...@@ -69,6 +78,9 @@ ...@@ -69,6 +78,9 @@
#define CPACF_KMCTR_AES_128 0x12 #define CPACF_KMCTR_AES_128 0x12
#define CPACF_KMCTR_AES_192 0x13 #define CPACF_KMCTR_AES_192 0x13
#define CPACF_KMCTR_AES_256 0x14 #define CPACF_KMCTR_AES_256 0x14
#define CPACF_KMCTR_PAES_128 0x1a
#define CPACF_KMCTR_PAES_192 0x1b
#define CPACF_KMCTR_PAES_256 0x1c
/* /*
* Function codes for the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) * Function codes for the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
...@@ -98,6 +110,18 @@ ...@@ -98,6 +110,18 @@
#define CPACF_KMAC_TDEA_128 0x02 #define CPACF_KMAC_TDEA_128 0x02
#define CPACF_KMAC_TDEA_192 0x03 #define CPACF_KMAC_TDEA_192 0x03
/*
* Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT)
* instruction
*/
#define CPACF_PCKMO_QUERY 0x00
#define CPACF_PCKMO_ENC_DES_KEY 0x01
#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02
#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03
#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
/* /*
* Function codes for the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION) * Function codes for the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
* instruction * instruction
...@@ -397,4 +421,24 @@ static inline void cpacf_pcc(unsigned long func, void *param) ...@@ -397,4 +421,24 @@ static inline void cpacf_pcc(unsigned long func, void *param)
: "cc", "memory"); : "cc", "memory");
} }
/**
* cpacf_pckmo() - executes the PCKMO (PERFORM CRYPTOGRAPHIC KEY
* MANAGEMENT) instruction
* @func: the function code passed to PCKMO; see CPACF_PCKMO_xxx defines
* @param: address of parameter block; see POP for details on each func
*
* Returns 0.
*/
static inline void cpacf_pckmo(long func, void *param)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
asm volatile(
" .insn rre,%[opc] << 16,0,0\n" /* PCKMO opcode */
:
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCKMO)
: "cc", "memory");
}
#endif /* _ASM_S390_CPACF_H */ #endif /* _ASM_S390_CPACF_H */
...@@ -63,7 +63,7 @@ static inline void set_user_asce(struct mm_struct *mm) ...@@ -63,7 +63,7 @@ static inline void set_user_asce(struct mm_struct *mm)
S390_lowcore.user_asce = mm->context.asce; S390_lowcore.user_asce = mm->context.asce;
if (current->thread.mm_segment.ar4) if (current->thread.mm_segment.ar4)
__ctl_load(S390_lowcore.user_asce, 7, 7); __ctl_load(S390_lowcore.user_asce, 7, 7);
set_cpu_flag(CIF_ASCE); set_cpu_flag(CIF_ASCE_PRIMARY);
} }
static inline void clear_user_asce(void) static inline void clear_user_asce(void)
...@@ -81,7 +81,7 @@ static inline void load_kernel_asce(void) ...@@ -81,7 +81,7 @@ static inline void load_kernel_asce(void)
__ctl_store(asce, 1, 1); __ctl_store(asce, 1, 1);
if (asce != S390_lowcore.kernel_asce) if (asce != S390_lowcore.kernel_asce)
__ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 1, 1);
set_cpu_flag(CIF_ASCE); set_cpu_flag(CIF_ASCE_PRIMARY);
} }
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
......
...@@ -640,12 +640,12 @@ static inline int pud_bad(pud_t pud) ...@@ -640,12 +640,12 @@ static inline int pud_bad(pud_t pud)
static inline int pmd_present(pmd_t pmd) static inline int pmd_present(pmd_t pmd)
{ {
return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
} }
static inline int pmd_none(pmd_t pmd) static inline int pmd_none(pmd_t pmd)
{ {
return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID; return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
} }
static inline unsigned long pmd_pfn(pmd_t pmd) static inline unsigned long pmd_pfn(pmd_t pmd)
...@@ -803,7 +803,7 @@ static inline void pud_clear(pud_t *pud) ...@@ -803,7 +803,7 @@ static inline void pud_clear(pud_t *pud)
static inline void pmd_clear(pmd_t *pmdp) static inline void pmd_clear(pmd_t *pmdp)
{ {
pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID; pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
} }
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
...@@ -1357,7 +1357,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) ...@@ -1357,7 +1357,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp) unsigned long addr, pmd_t *pmdp)
{ {
return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID)); return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
} }
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
...@@ -1367,10 +1367,10 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, ...@@ -1367,10 +1367,10 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
{ {
if (full) { if (full) {
pmd_t pmd = *pmdp; pmd_t pmd = *pmdp;
*pmdp = __pmd(_SEGMENT_ENTRY_INVALID); *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
return pmd; return pmd;
} }
return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID)); return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
} }
#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
...@@ -1384,7 +1384,7 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, ...@@ -1384,7 +1384,7 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
static inline void pmdp_invalidate(struct vm_area_struct *vma, static inline void pmdp_invalidate(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp) unsigned long addr, pmd_t *pmdp)
{ {
pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID)); pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
} }
#define __HAVE_ARCH_PMDP_SET_WRPROTECT #define __HAVE_ARCH_PMDP_SET_WRPROTECT
......
/*
* Kernelspace interface to the pkey device driver
*
* Copyright IBM Corp. 2016
*
* Author: Harald Freudenberger <freude@de.ibm.com>
*
*/
#ifndef _KAPI_PKEY_H
#define _KAPI_PKEY_H
#include <linux/ioctl.h>
#include <linux/types.h>
#include <uapi/asm/pkey.h>
/*
* Generate (AES) random secure key.
* @param cardnr may be -1 (use default card)
* @param domain may be -1 (use default domain)
* @param keytype one of the PKEY_KEYTYPE values
* @param seckey pointer to buffer receiving the secure key
* @return 0 on success, negative errno value on failure
*/
int pkey_genseckey(__u16 cardnr, __u16 domain,
__u32 keytype, struct pkey_seckey *seckey);
/*
* Generate (AES) secure key with given key value.
* @param cardnr may be -1 (use default card)
* @param domain may be -1 (use default domain)
* @param keytype one of the PKEY_KEYTYPE values
* @param clrkey pointer to buffer with clear key data
* @param seckey pointer to buffer receiving the secure key
* @return 0 on success, negative errno value on failure
*/
int pkey_clr2seckey(__u16 cardnr, __u16 domain, __u32 keytype,
const struct pkey_clrkey *clrkey,
struct pkey_seckey *seckey);
/*
* Derive (AES) proteced key from the (AES) secure key blob.
* @param cardnr may be -1 (use default card)
* @param domain may be -1 (use default domain)
* @param seckey pointer to buffer with the input secure key
* @param protkey pointer to buffer receiving the protected key and
* additional info (type, length)
* @return 0 on success, negative errno value on failure
*/
int pkey_sec2protkey(__u16 cardnr, __u16 domain,
const struct pkey_seckey *seckey,
struct pkey_protkey *protkey);
/*
* Derive (AES) protected key from a given clear key value.
* @param keytype one of the PKEY_KEYTYPE values
* @param clrkey pointer to buffer with clear key data
* @param protkey pointer to buffer receiving the protected key and
* additional info (type, length)
* @return 0 on success, negative errno value on failure
*/
int pkey_clr2protkey(__u32 keytype,
const struct pkey_clrkey *clrkey,
struct pkey_protkey *protkey);
/*
* Search for a matching crypto card based on the Master Key
* Verification Pattern provided inside a secure key.
* @param seckey pointer to buffer with the input secure key
* @param cardnr pointer to cardnr, receives the card number on success
* @param domain pointer to domain, receives the domain number on success
* @param verify if set, always verify by fetching verification pattern
* from card
* @return 0 on success, negative errno value on failure. If no card could be
* found, -ENODEV is returned.
*/
int pkey_findcard(const struct pkey_seckey *seckey,
__u16 *cardnr, __u16 *domain, int verify);
/*
* Find card and transform secure key to protected key.
* @param seckey pointer to buffer with the input secure key
* @param protkey pointer to buffer receiving the protected key and
* additional info (type, length)
* @return 0 on success, negative errno value on failure
*/
int pkey_skey2pkey(const struct pkey_seckey *seckey,
struct pkey_protkey *protkey);
#endif /* _KAPI_PKEY_H */
...@@ -14,14 +14,16 @@ ...@@ -14,14 +14,16 @@
#include <linux/const.h> #include <linux/const.h>
#define CIF_MCCK_PENDING 0 /* machine check handling is pending */ #define CIF_MCCK_PENDING 0 /* machine check handling is pending */
#define CIF_ASCE 1 /* user asce needs fixup / uaccess */ #define CIF_ASCE_PRIMARY 1 /* primary asce needs fixup / uaccess */
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ #define CIF_ASCE_SECONDARY 2 /* secondary asce needs fixup / uaccess */
#define CIF_FPU 3 /* restore FPU registers */ #define CIF_NOHZ_DELAY 3 /* delay HZ disable for a tick */
#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */ #define CIF_FPU 4 /* restore FPU registers */
#define CIF_ENABLED_WAIT 5 /* in enabled wait state */ #define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */
#define CIF_ENABLED_WAIT 6 /* in enabled wait state */
#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING) #define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
#define _CIF_ASCE _BITUL(CIF_ASCE) #define _CIF_ASCE_PRIMARY _BITUL(CIF_ASCE_PRIMARY)
#define _CIF_ASCE_SECONDARY _BITUL(CIF_ASCE_SECONDARY)
#define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY) #define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY)
#define _CIF_FPU _BITUL(CIF_FPU) #define _CIF_FPU _BITUL(CIF_FPU)
#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ) #define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
...@@ -89,7 +91,8 @@ extern void execve_tail(void); ...@@ -89,7 +91,8 @@ extern void execve_tail(void);
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
*/ */
#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) #define TASK_SIZE_OF(tsk) ((tsk)->mm ? \
(tsk)->mm->context.asce_limit : TASK_MAX_SIZE)
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
(1UL << 30) : (1UL << 41)) (1UL << 30) : (1UL << 41))
#define TASK_SIZE TASK_SIZE_OF(current) #define TASK_SIZE TASK_SIZE_OF(current)
...@@ -200,10 +203,12 @@ struct stack_frame { ...@@ -200,10 +203,12 @@ struct stack_frame {
struct task_struct; struct task_struct;
struct mm_struct; struct mm_struct;
struct seq_file; struct seq_file;
struct pt_regs;
typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable); typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
void dump_trace(dump_trace_func_t func, void *data, void dump_trace(dump_trace_func_t func, void *data,
struct task_struct *task, unsigned long sp); struct task_struct *task, unsigned long sp);
void show_registers(struct pt_regs *regs);
void show_cacheinfo(struct seq_file *m); void show_cacheinfo(struct seq_file *m);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
*/ */
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <asm/processor.h>
#include <asm/ctl_reg.h> #include <asm/ctl_reg.h>
#define VERIFY_READ 0 #define VERIFY_READ 0
...@@ -36,18 +37,20 @@ ...@@ -36,18 +37,20 @@
#define get_ds() (KERNEL_DS) #define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.mm_segment) #define get_fs() (current->thread.mm_segment)
#define set_fs(x) \
do { \
unsigned long __pto; \
current->thread.mm_segment = (x); \
__pto = current->thread.mm_segment.ar4 ? \
S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
__ctl_load(__pto, 7, 7); \
} while (0)
#define segment_eq(a,b) ((a).ar4 == (b).ar4) #define segment_eq(a,b) ((a).ar4 == (b).ar4)
static inline void set_fs(mm_segment_t fs)
{
current->thread.mm_segment = fs;
if (segment_eq(fs, KERNEL_DS)) {
set_cpu_flag(CIF_ASCE_SECONDARY);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
} else {
clear_cpu_flag(CIF_ASCE_SECONDARY);
__ctl_load(S390_lowcore.user_asce, 7, 7);
}
}
static inline int __range_ok(unsigned long addr, unsigned long size) static inline int __range_ok(unsigned long addr, unsigned long size)
{ {
return 1; return 1;
......
...@@ -24,6 +24,7 @@ header-y += mman.h ...@@ -24,6 +24,7 @@ header-y += mman.h
header-y += monwriter.h header-y += monwriter.h
header-y += msgbuf.h header-y += msgbuf.h
header-y += param.h header-y += param.h
header-y += pkey.h
header-y += poll.h header-y += poll.h
header-y += posix_types.h header-y += posix_types.h
header-y += ptrace.h header-y += ptrace.h
......
/*
* Userspace interface to the pkey device driver
*
* Copyright IBM Corp. 2017
*
* Author: Harald Freudenberger <freude@de.ibm.com>
*
*/
#ifndef _UAPI_PKEY_H
#define _UAPI_PKEY_H
#include <linux/ioctl.h>
#include <linux/types.h>
/*
* Ioctl calls supported by the pkey device driver
*/
#define PKEY_IOCTL_MAGIC 'p'
#define SECKEYBLOBSIZE 64 /* secure key blob size is always 64 bytes */
#define MAXPROTKEYSIZE 64 /* a protected key blob may be up to 64 bytes */
#define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */
/* defines for the type field within the pkey_protkey struct */
#define PKEY_KEYTYPE_AES_128 1
#define PKEY_KEYTYPE_AES_192 2
#define PKEY_KEYTYPE_AES_256 3
/* Struct to hold a secure key blob */
struct pkey_seckey {
__u8 seckey[SECKEYBLOBSIZE]; /* the secure key blob */
};
/* Struct to hold protected key and length info */
struct pkey_protkey {
__u32 type; /* key type, one of the PKEY_KEYTYPE values */
__u32 len; /* bytes actually stored in protkey[] */
__u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
};
/* Struct to hold a clear key value */
struct pkey_clrkey {
__u8 clrkey[MAXCLRKEYSIZE]; /* 16, 24, or 32 byte clear key value */
};
/*
* Generate secure key
*/
struct pkey_genseck {
__u16 cardnr; /* in: card to use or FFFF for any */
__u16 domain; /* in: domain or FFFF for any */
__u32 keytype; /* in: key type to generate */
struct pkey_seckey seckey; /* out: the secure key blob */
};
#define PKEY_GENSECK _IOWR(PKEY_IOCTL_MAGIC, 0x01, struct pkey_genseck)
/*
* Construct secure key from clear key value
*/
struct pkey_clr2seck {
__u16 cardnr; /* in: card to use or FFFF for any */
__u16 domain; /* in: domain or FFFF for any */
__u32 keytype; /* in: key type to generate */
struct pkey_clrkey clrkey; /* in: the clear key value */
struct pkey_seckey seckey; /* out: the secure key blob */
};
#define PKEY_CLR2SECK _IOWR(PKEY_IOCTL_MAGIC, 0x02, struct pkey_clr2seck)
/*
* Fabricate protected key from a secure key
*/
struct pkey_sec2protk {
__u16 cardnr; /* in: card to use or FFFF for any */
__u16 domain; /* in: domain or FFFF for any */
struct pkey_seckey seckey; /* in: the secure key blob */
struct pkey_protkey protkey; /* out: the protected key */
};
#define PKEY_SEC2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x03, struct pkey_sec2protk)
/*
* Fabricate protected key from an clear key value
*/
struct pkey_clr2protk {
__u32 keytype; /* in: key type to generate */
struct pkey_clrkey clrkey; /* in: the clear key value */
struct pkey_protkey protkey; /* out: the protected key */
};
#define PKEY_CLR2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x04, struct pkey_clr2protk)
/*
* Search for matching crypto card based on the Master Key
* Verification Pattern provided inside a secure key.
*/
struct pkey_findcard {
struct pkey_seckey seckey; /* in: the secure key blob */
__u16 cardnr; /* out: card number */
__u16 domain; /* out: domain number */
};
#define PKEY_FINDCARD _IOWR(PKEY_IOCTL_MAGIC, 0x05, struct pkey_findcard)
/*
* Combined together: findcard + sec2prot
*/
struct pkey_skey2pkey {
struct pkey_seckey seckey; /* in: the secure key blob */
struct pkey_protkey protkey; /* out: the protected key */
};
#define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey)
#endif /* _UAPI_PKEY_H */
...@@ -50,7 +50,8 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ ...@@ -50,7 +50,8 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_UPROBE) _TIF_UPROBE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT) _TIF_SYSCALL_TRACEPOINT)
_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU) _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
_CIF_ASCE_SECONDARY | _CIF_FPU)
_PIF_WORK = (_PIF_PER_TRAP) _PIF_WORK = (_PIF_PER_TRAP)
#define BASED(name) name-cleanup_critical(%r13) #define BASED(name) name-cleanup_critical(%r13)
...@@ -339,8 +340,8 @@ ENTRY(system_call) ...@@ -339,8 +340,8 @@ ENTRY(system_call)
jo .Lsysc_notify_resume jo .Lsysc_notify_resume
TSTMSK __LC_CPU_FLAGS,_CIF_FPU TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lsysc_vxrs jo .Lsysc_vxrs
TSTMSK __LC_CPU_FLAGS,_CIF_ASCE TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
jo .Lsysc_uaccess jnz .Lsysc_asce
j .Lsysc_return # beware of critical section cleanup j .Lsysc_return # beware of critical section cleanup
# #
...@@ -358,12 +359,15 @@ ENTRY(system_call) ...@@ -358,12 +359,15 @@ ENTRY(system_call)
jg s390_handle_mcck # TIF bit will be cleared by handler jg s390_handle_mcck # TIF bit will be cleared by handler
# #
# _CIF_ASCE is set, load user space asce # _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
# #
.Lsysc_uaccess: .Lsysc_asce:
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j .Lsysc_return TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY
jz .Lsysc_return
larl %r14,.Lsysc_return
jg set_fs_fixup
# #
# CIF_FPU is set, restore floating-point controls and floating-point registers. # CIF_FPU is set, restore floating-point controls and floating-point registers.
...@@ -661,8 +665,8 @@ ENTRY(io_int_handler) ...@@ -661,8 +665,8 @@ ENTRY(io_int_handler)
jo .Lio_notify_resume jo .Lio_notify_resume
TSTMSK __LC_CPU_FLAGS,_CIF_FPU TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lio_vxrs jo .Lio_vxrs
TSTMSK __LC_CPU_FLAGS,_CIF_ASCE TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
jo .Lio_uaccess jnz .Lio_asce
j .Lio_return # beware of critical section cleanup j .Lio_return # beware of critical section cleanup
# #
...@@ -675,12 +679,15 @@ ENTRY(io_int_handler) ...@@ -675,12 +679,15 @@ ENTRY(io_int_handler)
j .Lio_return j .Lio_return
# #
# _CIF_ASCE is set, load user space asce # _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
# #
.Lio_uaccess: .Lio_asce:
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j .Lio_return TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY
jz .Lio_return
larl %r14,.Lio_return
jg set_fs_fixup
# #
# CIF_FPU is set, restore floating-point controls and floating-point registers. # CIF_FPU is set, restore floating-point controls and floating-point registers.
......
...@@ -80,5 +80,6 @@ long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t); ...@@ -80,5 +80,6 @@ long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
DECLARE_PER_CPU(u64, mt_cycles[8]); DECLARE_PER_CPU(u64, mt_cycles[8]);
void verify_facilities(void); void verify_facilities(void);
void set_fs_fixup(void);
#endif /* _ENTRY_H */ #endif /* _ENTRY_H */
...@@ -116,6 +116,19 @@ static int notrace s390_validate_registers(union mci mci, int umode) ...@@ -116,6 +116,19 @@ static int notrace s390_validate_registers(union mci mci, int umode)
s390_handle_damage(); s390_handle_damage();
kill_task = 1; kill_task = 1;
} }
/* Validate control registers */
if (!mci.cr) {
/*
* Control registers have unknown contents.
* Can't recover and therefore stopping machine.
*/
s390_handle_damage();
} else {
asm volatile(
" lctlg 0,15,0(%0)\n"
" ptlb\n"
: : "a" (&S390_lowcore.cregs_save_area) : "memory");
}
if (!mci.fp) { if (!mci.fp) {
/* /*
* Floating point registers can't be restored. If the * Floating point registers can't be restored. If the
...@@ -208,18 +221,6 @@ static int notrace s390_validate_registers(union mci mci, int umode) ...@@ -208,18 +221,6 @@ static int notrace s390_validate_registers(union mci mci, int umode)
*/ */
kill_task = 1; kill_task = 1;
} }
/* Validate control registers */
if (!mci.cr) {
/*
* Control registers have unknown contents.
* Can't recover and therefore stopping machine.
*/
s390_handle_damage();
} else {
asm volatile(
" lctlg 0,15,0(%0)"
: : "a" (&S390_lowcore.cregs_save_area) : "memory");
}
/* /*
* We don't even try to validate the TOD register, since we simply * We don't even try to validate the TOD register, since we simply
* can't write something sensible into that register. * can't write something sensible into that register.
......
...@@ -100,8 +100,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) ...@@ -100,8 +100,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0; return 0;
} }
int copy_thread(unsigned long clone_flags, unsigned long new_stackp, int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
unsigned long arg, struct task_struct *p) unsigned long arg, struct task_struct *p, unsigned long tls)
{ {
struct fake_frame struct fake_frame
{ {
...@@ -156,7 +156,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, ...@@ -156,7 +156,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
/* Set a new TLS ? */ /* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) { if (clone_flags & CLONE_SETTLS) {
unsigned long tls = frame->childregs.gprs[6];
if (is_compat_task()) { if (is_compat_task()) {
p->thread.acrs[0] = (unsigned int)tls; p->thread.acrs[0] = (unsigned int)tls;
} else { } else {
...@@ -234,3 +233,16 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) ...@@ -234,3 +233,16 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
ret = PAGE_ALIGN(mm->brk + brk_rnd()); ret = PAGE_ALIGN(mm->brk + brk_rnd());
return (ret > mm->brk) ? ret : mm->brk; return (ret > mm->brk) ? ret : mm->brk;
} }
void set_fs_fixup(void)
{
struct pt_regs *regs = current_pt_regs();
static bool warned;
set_fs(USER_DS);
if (warned)
return;
WARN(1, "Unbalanced set_fs - int code: 0x%x\n", regs->int_code);
show_registers(regs);
warned = true;
}
...@@ -359,8 +359,8 @@ static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr) ...@@ -359,8 +359,8 @@ static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
spin_lock(&gmap->guest_table_lock); spin_lock(&gmap->guest_table_lock);
entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT); entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
if (entry) { if (entry) {
flush = (*entry != _SEGMENT_ENTRY_INVALID); flush = (*entry != _SEGMENT_ENTRY_EMPTY);
*entry = _SEGMENT_ENTRY_INVALID; *entry = _SEGMENT_ENTRY_EMPTY;
} }
spin_unlock(&gmap->guest_table_lock); spin_unlock(&gmap->guest_table_lock);
return flush; return flush;
...@@ -589,7 +589,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) ...@@ -589,7 +589,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
return rc; return rc;
ptl = pmd_lock(mm, pmd); ptl = pmd_lock(mm, pmd);
spin_lock(&gmap->guest_table_lock); spin_lock(&gmap->guest_table_lock);
if (*table == _SEGMENT_ENTRY_INVALID) { if (*table == _SEGMENT_ENTRY_EMPTY) {
rc = radix_tree_insert(&gmap->host_to_guest, rc = radix_tree_insert(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT, table); vmaddr >> PMD_SHIFT, table);
if (!rc) if (!rc)
......
...@@ -62,7 +62,7 @@ static inline unsigned long __pte_to_rste(pte_t pte) ...@@ -62,7 +62,7 @@ static inline unsigned long __pte_to_rste(pte_t pte)
rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC, rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
_SEGMENT_ENTRY_NOEXEC); _SEGMENT_ENTRY_NOEXEC);
} else } else
rste = _SEGMENT_ENTRY_INVALID; rste = _SEGMENT_ENTRY_EMPTY;
return rste; return rste;
} }
......
...@@ -62,19 +62,32 @@ config CRYPTO_DEV_GEODE ...@@ -62,19 +62,32 @@ config CRYPTO_DEV_GEODE
will be called geode-aes. will be called geode-aes.
config ZCRYPT config ZCRYPT
tristate "Support for PCI-attached cryptographic adapters" tristate "Support for s390 cryptographic adapters"
depends on S390 depends on S390
select HW_RANDOM select HW_RANDOM
help help
Select this option if you want to use a PCI-attached cryptographic Select this option if you want to enable support for
adapter like: s390 cryptographic adapters like:
+ PCI Cryptographic Accelerator (PCICA)
+ PCI Cryptographic Coprocessor (PCICC)
+ PCI-X Cryptographic Coprocessor (PCIXCC) + PCI-X Cryptographic Coprocessor (PCIXCC)
+ Crypto Express2 Coprocessor (CEX2C) + Crypto Express 2,3,4 or 5 Coprocessor (CEXxC)
+ Crypto Express2 Accelerator (CEX2A) + Crypto Express 2,3,4 or 5 Accelerator (CEXxA)
+ Crypto Express3 Coprocessor (CEX3C) + Crypto Express 4 or 5 EP11 Coprocessor (CEXxP)
+ Crypto Express3 Accelerator (CEX3A)
config PKEY
tristate "Kernel API for protected key handling"
depends on S390
depends on ZCRYPT
help
With this option enabled the pkey kernel module provides an API
for creation and handling of protected keys. Other parts of the
kernel or userspace applications may use these functions.
Select this option if you want to enable the kernel and userspace
API for proteced key handling.
Please note that creation of protected keys from secure keys
requires to have at least one CEX card in coprocessor mode
available at runtime.
config CRYPTO_SHA1_S390 config CRYPTO_SHA1_S390
tristate "SHA1 digest algorithm" tristate "SHA1 digest algorithm"
...@@ -124,6 +137,7 @@ config CRYPTO_AES_S390 ...@@ -124,6 +137,7 @@ config CRYPTO_AES_S390
depends on S390 depends on S390
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER select CRYPTO_BLKCIPHER
select PKEY
help help
This is the s390 hardware accelerated implementation of the This is the s390 hardware accelerated implementation of the
AES cipher algorithms (FIPS-197). AES cipher algorithms (FIPS-197).
......
...@@ -4864,7 +4864,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, ...@@ -4864,7 +4864,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
break; break;
case 3: /* tsa_intrg */ case 3: /* tsa_intrg */
len += sprintf(page + len, PRINTK_HEADER len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.intrg.: not supportet yet\n"); " tsb->tsa.intrg.: not supported yet\n");
break; break;
} }
......
...@@ -165,13 +165,15 @@ int tpi(struct tpi_info *addr) ...@@ -165,13 +165,15 @@ int tpi(struct tpi_info *addr)
int chsc(void *chsc_area) int chsc(void *chsc_area)
{ {
typedef struct { char _[4096]; } addr_type; typedef struct { char _[4096]; } addr_type;
int cc; int cc = -EIO;
asm volatile( asm volatile(
" .insn rre,0xb25f0000,%2,0\n" " .insn rre,0xb25f0000,%2,0\n"
" ipm %0\n" "0: ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
: "=d" (cc), "=m" (*(addr_type *) chsc_area) "1:\n"
EX_TABLE(0b, 1b)
: "+d" (cc), "=m" (*(addr_type *) chsc_area)
: "d" (chsc_area), "m" (*(addr_type *) chsc_area) : "d" (chsc_area), "m" (*(addr_type *) chsc_area)
: "cc"); : "cc");
trace_s390_cio_chsc(chsc_area, cc); trace_s390_cio_chsc(chsc_area, cc);
......
...@@ -10,3 +10,7 @@ zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o ...@@ -10,3 +10,7 @@ zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
obj-$(CONFIG_ZCRYPT) += zcrypt.o obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o # adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
# pkey kernel module
pkey-objs := pkey_api.o
obj-$(CONFIG_PKEY) += pkey.o
...@@ -1107,16 +1107,6 @@ static void ap_config_timeout(unsigned long ptr) ...@@ -1107,16 +1107,6 @@ static void ap_config_timeout(unsigned long ptr)
queue_work(system_long_wq, &ap_scan_work); queue_work(system_long_wq, &ap_scan_work);
} }
static void ap_reset_domain(void)
{
int i;
if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index))
return;
for (i = 0; i < AP_DEVICES; i++)
ap_rapq(AP_MKQID(i, ap_domain_index));
}
static void ap_reset_all(void) static void ap_reset_all(void)
{ {
int i, j; int i, j;
......
...@@ -58,9 +58,9 @@ static ssize_t ap_functions_show(struct device *dev, ...@@ -58,9 +58,9 @@ static ssize_t ap_functions_show(struct device *dev,
static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
static ssize_t ap_request_count_show(struct device *dev, static ssize_t ap_req_count_show(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct ap_card *ac = to_ap_card(dev); struct ap_card *ac = to_ap_card(dev);
unsigned int req_cnt; unsigned int req_cnt;
...@@ -72,7 +72,23 @@ static ssize_t ap_request_count_show(struct device *dev, ...@@ -72,7 +72,23 @@ static ssize_t ap_request_count_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
} }
static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); static ssize_t ap_req_count_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ap_card *ac = to_ap_card(dev);
struct ap_queue *aq;
spin_lock_bh(&ap_list_lock);
for_each_ap_queue(aq, ac)
aq->total_request_count = 0;
spin_unlock_bh(&ap_list_lock);
atomic_set(&ac->total_request_count, 0);
return count;
}
static DEVICE_ATTR(request_count, 0644, ap_req_count_show, ap_req_count_store);
static ssize_t ap_requestq_count_show(struct device *dev, static ssize_t ap_requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
......
...@@ -459,9 +459,9 @@ EXPORT_SYMBOL(ap_queue_resume); ...@@ -459,9 +459,9 @@ EXPORT_SYMBOL(ap_queue_resume);
/* /*
* AP queue related attributes. * AP queue related attributes.
*/ */
static ssize_t ap_request_count_show(struct device *dev, static ssize_t ap_req_count_show(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct ap_queue *aq = to_ap_queue(dev); struct ap_queue *aq = to_ap_queue(dev);
unsigned int req_cnt; unsigned int req_cnt;
...@@ -472,7 +472,20 @@ static ssize_t ap_request_count_show(struct device *dev, ...@@ -472,7 +472,20 @@ static ssize_t ap_request_count_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
} }
static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); static ssize_t ap_req_count_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ap_queue *aq = to_ap_queue(dev);
spin_lock_bh(&aq->lock);
aq->total_request_count = 0;
spin_unlock_bh(&aq->lock);
return count;
}
static DEVICE_ATTR(request_count, 0644, ap_req_count_show, ap_req_count_store);
static ssize_t ap_requestq_count_show(struct device *dev, static ssize_t ap_requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
......
/*
* pkey device driver
*
* Copyright IBM Corp. 2017
* Author(s): Harald Freudenberger
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*
*/
#define KMSG_COMPONENT "pkey"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kallsyms.h>
#include <linux/debugfs.h>
#include <asm/zcrypt.h>
#include <asm/cpacf.h>
#include <asm/pkey.h>
#include "zcrypt_api.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 protected key interface");
/* Size of parameter block used for all cca requests/replies */
#define PARMBSIZE 512
/* Size of vardata block used for some of the cca requests/replies */
#define VARDATASIZE 4096
/*
* debug feature data and functions
*/
static debug_info_t *debug_info;
#define DEBUG_DBG(...) debug_sprintf_event(debug_info, 6, ##__VA_ARGS__)
#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__)
#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__)
#define DEBUG_ERR(...) debug_sprintf_event(debug_info, 3, ##__VA_ARGS__)
static void __init pkey_debug_init(void)
{
debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long));
debug_register_view(debug_info, &debug_sprintf_view);
debug_set_level(debug_info, 3);
}
static void __exit pkey_debug_exit(void)
{
debug_unregister(debug_info);
}
/* inside view of a secure key token (only type 0x01 version 0x04) */
struct secaeskeytoken {
u8 type; /* 0x01 for internal key token */
u8 res0[3];
u8 version; /* should be 0x04 */
u8 res1[1];
u8 flag; /* key flags */
u8 res2[1];
u64 mkvp; /* master key verification pattern */
u8 key[32]; /* key value (encrypted) */
u8 cv[8]; /* control vector */
u16 bitsize; /* key bit size */
u16 keysize; /* key byte size */
u8 tvv[4]; /* token validation value */
} __packed;
/*
* Simple check if the token is a valid CCA secure AES key
* token. If keybitsize is given, the bitsize of the key is
* also checked. Returns 0 on success or errno value on failure.
*/
static int check_secaeskeytoken(u8 *token, int keybitsize)
{
struct secaeskeytoken *t = (struct secaeskeytoken *) token;
if (t->type != 0x01) {
DEBUG_ERR(
"check_secaeskeytoken secure token check failed, type mismatch 0x%02x != 0x01\n",
(int) t->type);
return -EINVAL;
}
if (t->version != 0x04) {
DEBUG_ERR(
"check_secaeskeytoken secure token check failed, version mismatch 0x%02x != 0x04\n",
(int) t->version);
return -EINVAL;
}
if (keybitsize > 0 && t->bitsize != keybitsize) {
DEBUG_ERR(
"check_secaeskeytoken secure token check failed, bitsize mismatch %d != %d\n",
(int) t->bitsize, keybitsize);
return -EINVAL;
}
return 0;
}
/*
* Allocate consecutive memory for request CPRB, request param
* block, reply CPRB and reply param block and fill in values
* for the common fields. Returns 0 on success or errno value
* on failure.
*/
static int alloc_and_prep_cprbmem(size_t paramblen,
u8 **pcprbmem,
struct CPRBX **preqCPRB,
struct CPRBX **prepCPRB)
{
u8 *cprbmem;
size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen;
struct CPRBX *preqcblk, *prepcblk;
/*
* allocate consecutive memory for request CPRB, request param
* block, reply CPRB and reply param block
*/
cprbmem = kmalloc(2 * cprbplusparamblen, GFP_KERNEL);
if (!cprbmem)
return -ENOMEM;
memset(cprbmem, 0, 2 * cprbplusparamblen);
preqcblk = (struct CPRBX *) cprbmem;
prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen);
/* fill request cprb struct */
preqcblk->cprb_len = sizeof(struct CPRBX);
preqcblk->cprb_ver_id = 0x02;
memcpy(preqcblk->func_id, "T2", 2);
preqcblk->rpl_msgbl = cprbplusparamblen;
if (paramblen) {
preqcblk->req_parmb =
((u8 *) preqcblk) + sizeof(struct CPRBX);
preqcblk->rpl_parmb =
((u8 *) prepcblk) + sizeof(struct CPRBX);
}
*pcprbmem = cprbmem;
*preqCPRB = preqcblk;
*prepCPRB = prepcblk;
return 0;
}
/*
* Free the cprb memory allocated with the function above.
* If the scrub value is not zero, the memory is filled
* with zeros before freeing (useful if there was some
* clear key material in there).
*/
static void free_cprbmem(void *mem, size_t paramblen, int scrub)
{
if (scrub)
memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen));
kfree(mem);
}
/*
* Helper function to prepare the xcrb struct
*/
static inline void prep_xcrb(struct ica_xcRB *pxcrb,
u16 cardnr,
struct CPRBX *preqcblk,
struct CPRBX *prepcblk)
{
memset(pxcrb, 0, sizeof(*pxcrb));
pxcrb->agent_ID = 0x4341; /* 'CA' */
pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr);
pxcrb->request_control_blk_length =
preqcblk->cprb_len + preqcblk->req_parml;
pxcrb->request_control_blk_addr = (void *) preqcblk;
pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl;
pxcrb->reply_control_blk_addr = (void *) prepcblk;
}
/*
* Helper function which calls zcrypt_send_cprb with
* memory management segment adjusted to kernel space
* so that the copy_from_user called within this
* function do in fact copy from kernel space.
*/
static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb)
{
int rc;
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
rc = zcrypt_send_cprb(xcrb);
set_fs(old_fs);
return rc;
}
/*
* Generate (random) AES secure key.
*/
int pkey_genseckey(u16 cardnr, u16 domain,
u32 keytype, struct pkey_seckey *seckey)
{
int i, rc, keysize;
int seckeysize;
u8 *mem;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct kgreqparm {
u8 subfunc_code[2];
u16 rule_array_len;
struct lv1 {
u16 len;
char key_form[8];
char key_length[8];
char key_type1[8];
char key_type2[8];
} lv1;
struct lv2 {
u16 len;
struct keyid {
u16 len;
u16 attr;
u8 data[SECKEYBLOBSIZE];
} keyid[6];
} lv2;
} *preqparm;
struct kgrepparm {
u8 subfunc_code[2];
u16 rule_array_len;
struct lv3 {
u16 len;
u16 keyblocklen;
struct {
u16 toklen;
u16 tokattr;
u8 tok[0];
/* ... some more data ... */
} keyblock;
} lv3;
} *prepparm;
/* get already prepared memory for 2 cprbs with param block each */
rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
if (rc)
return rc;
/* fill request cprb struct */
preqcblk->domain = domain;
/* fill request cprb param block with KG request */
preqparm = (struct kgreqparm *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "KG", 2);
preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
preqparm->lv1.len = sizeof(struct lv1);
memcpy(preqparm->lv1.key_form, "OP ", 8);
switch (keytype) {
case PKEY_KEYTYPE_AES_128:
keysize = 16;
memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8);
break;
case PKEY_KEYTYPE_AES_192:
keysize = 24;
memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8);
break;
case PKEY_KEYTYPE_AES_256:
keysize = 32;
memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
break;
default:
DEBUG_ERR(
"pkey_genseckey unknown/unsupported keytype %d\n",
keytype);
rc = -EINVAL;
goto out;
}
memcpy(preqparm->lv1.key_type1, "AESDATA ", 8);
preqparm->lv2.len = sizeof(struct lv2);
for (i = 0; i < 6; i++) {
preqparm->lv2.keyid[i].len = sizeof(struct keyid);
preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10);
}
preqcblk->req_parml = sizeof(struct kgreqparm);
/* fill xcrb struct */
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = _zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR(
"pkey_genseckey zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
(int) cardnr, (int) domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR(
"pkey_genseckey secure key generate failure, card response %d/%d\n",
(int) prepcblk->ccp_rtcode,
(int) prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
/* process response cprb param block */
prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepparm = (struct kgrepparm *) prepcblk->rpl_parmb;
/* check length of the returned secure key token */
seckeysize = prepparm->lv3.keyblock.toklen
- sizeof(prepparm->lv3.keyblock.toklen)
- sizeof(prepparm->lv3.keyblock.tokattr);
if (seckeysize != SECKEYBLOBSIZE) {
DEBUG_ERR(
"pkey_genseckey secure token size mismatch %d != %d bytes\n",
seckeysize, SECKEYBLOBSIZE);
rc = -EIO;
goto out;
}
/* check secure key token */
rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize);
if (rc) {
rc = -EIO;
goto out;
}
/* copy the generated secure key token */
memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
out:
free_cprbmem(mem, PARMBSIZE, 0);
return rc;
}
EXPORT_SYMBOL(pkey_genseckey);
/*
* Generate an AES secure key with given key value.
*/
int pkey_clr2seckey(u16 cardnr, u16 domain, u32 keytype,
const struct pkey_clrkey *clrkey,
struct pkey_seckey *seckey)
{
int rc, keysize, seckeysize;
u8 *mem;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct cmreqparm {
u8 subfunc_code[2];
u16 rule_array_len;
char rule_array[8];
struct lv1 {
u16 len;
u8 clrkey[0];
} lv1;
struct lv2 {
u16 len;
struct keyid {
u16 len;
u16 attr;
u8 data[SECKEYBLOBSIZE];
} keyid;
} lv2;
} *preqparm;
struct lv2 *plv2;
struct cmrepparm {
u8 subfunc_code[2];
u16 rule_array_len;
struct lv3 {
u16 len;
u16 keyblocklen;
struct {
u16 toklen;
u16 tokattr;
u8 tok[0];
/* ... some more data ... */
} keyblock;
} lv3;
} *prepparm;
/* get already prepared memory for 2 cprbs with param block each */
rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
if (rc)
return rc;
/* fill request cprb struct */
preqcblk->domain = domain;
/* fill request cprb param block with CM request */
preqparm = (struct cmreqparm *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "CM", 2);
memcpy(preqparm->rule_array, "AES ", 8);
preqparm->rule_array_len =
sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
switch (keytype) {
case PKEY_KEYTYPE_AES_128:
keysize = 16;
break;
case PKEY_KEYTYPE_AES_192:
keysize = 24;
break;
case PKEY_KEYTYPE_AES_256:
keysize = 32;
break;
default:
DEBUG_ERR(
"pkey_clr2seckey unknown/unsupported keytype %d\n",
keytype);
rc = -EINVAL;
goto out;
}
preqparm->lv1.len = sizeof(struct lv1) + keysize;
memcpy(preqparm->lv1.clrkey, clrkey->clrkey, keysize);
plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize);
plv2->len = sizeof(struct lv2);
plv2->keyid.len = sizeof(struct keyid);
plv2->keyid.attr = 0x30;
preqcblk->req_parml = sizeof(struct cmreqparm) + keysize;
/* fill xcrb struct */
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = _zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR(
"pkey_clr2seckey zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
(int) cardnr, (int) domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR(
"pkey_clr2seckey clear key import failure, card response %d/%d\n",
(int) prepcblk->ccp_rtcode,
(int) prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
/* process response cprb param block */
prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepparm = (struct cmrepparm *) prepcblk->rpl_parmb;
/* check length of the returned secure key token */
seckeysize = prepparm->lv3.keyblock.toklen
- sizeof(prepparm->lv3.keyblock.toklen)
- sizeof(prepparm->lv3.keyblock.tokattr);
if (seckeysize != SECKEYBLOBSIZE) {
DEBUG_ERR(
"pkey_clr2seckey secure token size mismatch %d != %d bytes\n",
seckeysize, SECKEYBLOBSIZE);
rc = -EIO;
goto out;
}
/* check secure key token */
rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize);
if (rc) {
rc = -EIO;
goto out;
}
/* copy the generated secure key token */
memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
out:
free_cprbmem(mem, PARMBSIZE, 1);
return rc;
}
EXPORT_SYMBOL(pkey_clr2seckey);
/*
* Derive a proteced key from the secure key blob.
*/
int pkey_sec2protkey(u16 cardnr, u16 domain,
const struct pkey_seckey *seckey,
struct pkey_protkey *protkey)
{
int rc;
u8 *mem;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct uskreqparm {
u8 subfunc_code[2];
u16 rule_array_len;
struct lv1 {
u16 len;
u16 attr_len;
u16 attr_flags;
} lv1;
struct lv2 {
u16 len;
u16 attr_len;
u16 attr_flags;
u8 token[0]; /* cca secure key token */
} lv2 __packed;
} *preqparm;
struct uskrepparm {
u8 subfunc_code[2];
u16 rule_array_len;
struct lv3 {
u16 len;
u16 attr_len;
u16 attr_flags;
struct cpacfkeyblock {
u8 version; /* version of this struct */
u8 flags[2];
u8 algo;
u8 form;
u8 pad1[3];
u16 keylen;
u8 key[64]; /* the key (keylen bytes) */
u16 keyattrlen;
u8 keyattr[32];
u8 pad2[1];
u8 vptype;
u8 vp[32]; /* verification pattern */
} keyblock;
} lv3 __packed;
} *prepparm;
/* get already prepared memory for 2 cprbs with param block each */
rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
if (rc)
return rc;
/* fill request cprb struct */
preqcblk->domain = domain;
/* fill request cprb param block with USK request */
preqparm = (struct uskreqparm *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "US", 2);
preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
preqparm->lv1.len = sizeof(struct lv1);
preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len);
preqparm->lv1.attr_flags = 0x0001;
preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE;
preqparm->lv2.attr_len = sizeof(struct lv2)
- sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE;
preqparm->lv2.attr_flags = 0x0000;
memcpy(preqparm->lv2.token, seckey->seckey, SECKEYBLOBSIZE);
preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE;
/* fill xcrb struct */
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = _zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR(
"pkey_sec2protkey zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
(int) cardnr, (int) domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR(
"pkey_sec2protkey unwrap secure key failure, card response %d/%d\n",
(int) prepcblk->ccp_rtcode,
(int) prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
/* process response cprb param block */
prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepparm = (struct uskrepparm *) prepcblk->rpl_parmb;
/* check the returned keyblock */
if (prepparm->lv3.keyblock.version != 0x01) {
DEBUG_ERR(
"pkey_sec2protkey reply param keyblock version mismatch 0x%02x != 0x01\n",
(int) prepparm->lv3.keyblock.version);
rc = -EIO;
goto out;
}
/* copy the tanslated protected key */
switch (prepparm->lv3.keyblock.keylen) {
case 16+32:
protkey->type = PKEY_KEYTYPE_AES_128;
break;
case 24+32:
protkey->type = PKEY_KEYTYPE_AES_192;
break;
case 32+32:
protkey->type = PKEY_KEYTYPE_AES_256;
break;
default:
DEBUG_ERR("pkey_sec2protkey unknown/unsupported keytype %d\n",
prepparm->lv3.keyblock.keylen);
rc = -EIO;
goto out;
}
protkey->len = prepparm->lv3.keyblock.keylen;
memcpy(protkey->protkey, prepparm->lv3.keyblock.key, protkey->len);
out:
free_cprbmem(mem, PARMBSIZE, 0);
return rc;
}
EXPORT_SYMBOL(pkey_sec2protkey);
/*
* Create a protected key from a clear key value.
*/
int pkey_clr2protkey(u32 keytype,
const struct pkey_clrkey *clrkey,
struct pkey_protkey *protkey)
{
long fc;
int keysize;
u8 paramblock[64];
switch (keytype) {
case PKEY_KEYTYPE_AES_128:
keysize = 16;
fc = CPACF_PCKMO_ENC_AES_128_KEY;
break;
case PKEY_KEYTYPE_AES_192:
keysize = 24;
fc = CPACF_PCKMO_ENC_AES_192_KEY;
break;
case PKEY_KEYTYPE_AES_256:
keysize = 32;
fc = CPACF_PCKMO_ENC_AES_256_KEY;
break;
default:
DEBUG_ERR("pkey_clr2protkey unknown/unsupported keytype %d\n",
keytype);
return -EINVAL;
}
/* prepare param block */
memset(paramblock, 0, sizeof(paramblock));
memcpy(paramblock, clrkey->clrkey, keysize);
/* call the pckmo instruction */
cpacf_pckmo(fc, paramblock);
/* copy created protected key */
protkey->type = keytype;
protkey->len = keysize + 32;
memcpy(protkey->protkey, paramblock, keysize + 32);
return 0;
}
EXPORT_SYMBOL(pkey_clr2protkey);
/*
* query cryptographic facility from adapter
*/
static int query_crypto_facility(u16 cardnr, u16 domain,
const char *keyword,
u8 *rarray, size_t *rarraylen,
u8 *varray, size_t *varraylen)
{
int rc;
u16 len;
u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct fqreqparm {
u8 subfunc_code[2];
u16 rule_array_len;
char rule_array[8];
struct lv1 {
u16 len;
u8 data[VARDATASIZE];
} lv1;
u16 dummylen;
} *preqparm;
size_t parmbsize = sizeof(struct fqreqparm);
struct fqrepparm {
u8 subfunc_code[2];
u8 lvdata[0];
} *prepparm;
/* get already prepared memory for 2 cprbs with param block each */
rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk);
if (rc)
return rc;
/* fill request cprb struct */
preqcblk->domain = domain;
/* fill request cprb param block with FQ request */
preqparm = (struct fqreqparm *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "FQ", 2);
strncpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
preqparm->rule_array_len =
sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
preqparm->lv1.len = sizeof(preqparm->lv1);
preqparm->dummylen = sizeof(preqparm->dummylen);
preqcblk->req_parml = parmbsize;
/* fill xcrb struct */
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = _zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR(
"query_crypto_facility zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
(int) cardnr, (int) domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR(
"query_crypto_facility unwrap secure key failure, card response %d/%d\n",
(int) prepcblk->ccp_rtcode,
(int) prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
/* process response cprb param block */
prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
prepparm = (struct fqrepparm *) prepcblk->rpl_parmb;
ptr = prepparm->lvdata;
/* check and possibly copy reply rule array */
len = *((u16 *) ptr);
if (len > sizeof(u16)) {
ptr += sizeof(u16);
len -= sizeof(u16);
if (rarray && rarraylen && *rarraylen > 0) {
*rarraylen = (len > *rarraylen ? *rarraylen : len);
memcpy(rarray, ptr, *rarraylen);
}
ptr += len;
}
/* check and possible copy reply var array */
len = *((u16 *) ptr);
if (len > sizeof(u16)) {
ptr += sizeof(u16);
len -= sizeof(u16);
if (varray && varraylen && *varraylen > 0) {
*varraylen = (len > *varraylen ? *varraylen : len);
memcpy(varray, ptr, *varraylen);
}
ptr += len;
}
out:
free_cprbmem(mem, parmbsize, 0);
return rc;
}
/*
* Fetch just the mkvp value via query_crypto_facility from adapter.
*/
static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp)
{
int rc, found = 0;
size_t rlen, vlen;
u8 *rarray, *varray, *pg;
pg = (u8 *) __get_free_page(GFP_KERNEL);
if (!pg)
return -ENOMEM;
rarray = pg;
varray = pg + PAGE_SIZE/2;
rlen = vlen = PAGE_SIZE/2;
rc = query_crypto_facility(cardnr, domain, "STATICSA",
rarray, &rlen, varray, &vlen);
if (rc == 0 && rlen > 8*8 && vlen > 184+8) {
if (rarray[64] == '2') {
/* current master key state is valid */
*mkvp = *((u64 *)(varray + 184));
found = 1;
}
}
free_page((unsigned long) pg);
return found ? 0 : -ENOENT;
}
/* struct to hold cached mkvp info for each card/domain */
struct mkvp_info {
struct list_head list;
u16 cardnr;
u16 domain;
u64 mkvp;
};
/* a list with mkvp_info entries */
static LIST_HEAD(mkvp_list);
static DEFINE_SPINLOCK(mkvp_list_lock);
static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
{
int rc = -ENOENT;
struct mkvp_info *ptr;
spin_lock_bh(&mkvp_list_lock);
list_for_each_entry(ptr, &mkvp_list, list) {
if (ptr->cardnr == cardnr &&
ptr->domain == domain) {
*mkvp = ptr->mkvp;
rc = 0;
break;
}
}
spin_unlock_bh(&mkvp_list_lock);
return rc;
}
static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
{
int found = 0;
struct mkvp_info *ptr;
spin_lock_bh(&mkvp_list_lock);
list_for_each_entry(ptr, &mkvp_list, list) {
if (ptr->cardnr == cardnr &&
ptr->domain == domain) {
ptr->mkvp = mkvp;
found = 1;
break;
}
}
if (!found) {
ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
if (!ptr) {
spin_unlock_bh(&mkvp_list_lock);
return;
}
ptr->cardnr = cardnr;
ptr->domain = domain;
ptr->mkvp = mkvp;
list_add(&ptr->list, &mkvp_list);
}
spin_unlock_bh(&mkvp_list_lock);
}
static void mkvp_cache_scrub(u16 cardnr, u16 domain)
{
struct mkvp_info *ptr;
spin_lock_bh(&mkvp_list_lock);
list_for_each_entry(ptr, &mkvp_list, list) {
if (ptr->cardnr == cardnr &&
ptr->domain == domain) {
list_del(&ptr->list);
kfree(ptr);
break;
}
}
spin_unlock_bh(&mkvp_list_lock);
}
static void __exit mkvp_cache_free(void)
{
struct mkvp_info *ptr, *pnext;
spin_lock_bh(&mkvp_list_lock);
list_for_each_entry_safe(ptr, pnext, &mkvp_list, list) {
list_del(&ptr->list);
kfree(ptr);
}
spin_unlock_bh(&mkvp_list_lock);
}
/*
* Search for a matching crypto card based on the Master Key
* Verification Pattern provided inside a secure key.
*/
int pkey_findcard(const struct pkey_seckey *seckey,
u16 *pcardnr, u16 *pdomain, int verify)
{
struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
struct zcrypt_device_matrix *device_matrix;
u16 card, dom;
u64 mkvp;
int i, rc;
/* mkvp must not be zero */
if (t->mkvp == 0)
return -EINVAL;
/* fetch status of all crypto cards */
device_matrix = kmalloc(sizeof(struct zcrypt_device_matrix),
GFP_KERNEL);
if (!device_matrix)
return -ENOMEM;
zcrypt_device_status_mask(device_matrix);
/* walk through all crypto cards */
for (i = 0; i < MAX_ZDEV_ENTRIES; i++) {
card = AP_QID_CARD(device_matrix->device[i].qid);
dom = AP_QID_QUEUE(device_matrix->device[i].qid);
if (device_matrix->device[i].online &&
device_matrix->device[i].functions & 0x04) {
/* an enabled CCA Coprocessor card */
/* try cached mkvp */
if (mkvp_cache_fetch(card, dom, &mkvp) == 0 &&
t->mkvp == mkvp) {
if (!verify)
break;
/* verify: fetch mkvp from adapter */
if (fetch_mkvp(card, dom, &mkvp) == 0) {
mkvp_cache_update(card, dom, mkvp);
if (t->mkvp == mkvp)
break;
}
}
} else {
/* Card is offline and/or not a CCA card. */
/* del mkvp entry from cache if it exists */
mkvp_cache_scrub(card, dom);
}
}
if (i >= MAX_ZDEV_ENTRIES) {
/* nothing found, so this time without cache */
for (i = 0; i < MAX_ZDEV_ENTRIES; i++) {
if (!(device_matrix->device[i].online &&
device_matrix->device[i].functions & 0x04))
continue;
card = AP_QID_CARD(device_matrix->device[i].qid);
dom = AP_QID_QUEUE(device_matrix->device[i].qid);
/* fresh fetch mkvp from adapter */
if (fetch_mkvp(card, dom, &mkvp) == 0) {
mkvp_cache_update(card, dom, mkvp);
if (t->mkvp == mkvp)
break;
}
}
}
if (i < MAX_ZDEV_ENTRIES) {
if (pcardnr)
*pcardnr = card;
if (pdomain)
*pdomain = dom;
rc = 0;
} else
rc = -ENODEV;
kfree(device_matrix);
return rc;
}
EXPORT_SYMBOL(pkey_findcard);
/*
* Find card and transform secure key into protected key.
*/
int pkey_skey2pkey(const struct pkey_seckey *seckey,
struct pkey_protkey *protkey)
{
u16 cardnr, domain;
int rc, verify;
/*
* The pkey_sec2protkey call may fail when a card has been
* addressed where the master key was changed after last fetch
* of the mkvp into the cache. So first try without verify then
* with verify enabled (thus refreshing the mkvp for each card).
*/
for (verify = 0; verify < 2; verify++) {
rc = pkey_findcard(seckey, &cardnr, &domain, verify);
if (rc)
continue;
rc = pkey_sec2protkey(cardnr, domain, seckey, protkey);
if (rc == 0)
break;
}
if (rc)
DEBUG_DBG("pkey_skey2pkey failed rc=%d\n", rc);
return rc;
}
EXPORT_SYMBOL(pkey_skey2pkey);
/*
* File io functions
*/
static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int rc;
switch (cmd) {
case PKEY_GENSECK: {
struct pkey_genseck __user *ugs = (void __user *) arg;
struct pkey_genseck kgs;
if (copy_from_user(&kgs, ugs, sizeof(kgs)))
return -EFAULT;
rc = pkey_genseckey(kgs.cardnr, kgs.domain,
kgs.keytype, &kgs.seckey);
DEBUG_DBG("pkey_ioctl pkey_genseckey()=%d\n", rc);
if (rc)
break;
if (copy_to_user(ugs, &kgs, sizeof(kgs)))
return -EFAULT;
break;
}
case PKEY_CLR2SECK: {
struct pkey_clr2seck __user *ucs = (void __user *) arg;
struct pkey_clr2seck kcs;
if (copy_from_user(&kcs, ucs, sizeof(kcs)))
return -EFAULT;
rc = pkey_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
&kcs.clrkey, &kcs.seckey);
DEBUG_DBG("pkey_ioctl pkey_clr2seckey()=%d\n", rc);
if (rc)
break;
if (copy_to_user(ucs, &kcs, sizeof(kcs)))
return -EFAULT;
memzero_explicit(&kcs, sizeof(kcs));
break;
}
case PKEY_SEC2PROTK: {
struct pkey_sec2protk __user *usp = (void __user *) arg;
struct pkey_sec2protk ksp;
if (copy_from_user(&ksp, usp, sizeof(ksp)))
return -EFAULT;
rc = pkey_sec2protkey(ksp.cardnr, ksp.domain,
&ksp.seckey, &ksp.protkey);
DEBUG_DBG("pkey_ioctl pkey_sec2protkey()=%d\n", rc);
if (rc)
break;
if (copy_to_user(usp, &ksp, sizeof(ksp)))
return -EFAULT;
break;
}
case PKEY_CLR2PROTK: {
struct pkey_clr2protk __user *ucp = (void __user *) arg;
struct pkey_clr2protk kcp;
if (copy_from_user(&kcp, ucp, sizeof(kcp)))
return -EFAULT;
rc = pkey_clr2protkey(kcp.keytype,
&kcp.clrkey, &kcp.protkey);
DEBUG_DBG("pkey_ioctl pkey_clr2protkey()=%d\n", rc);
if (rc)
break;
if (copy_to_user(ucp, &kcp, sizeof(kcp)))
return -EFAULT;
memzero_explicit(&kcp, sizeof(kcp));
break;
}
case PKEY_FINDCARD: {
struct pkey_findcard __user *ufc = (void __user *) arg;
struct pkey_findcard kfc;
if (copy_from_user(&kfc, ufc, sizeof(kfc)))
return -EFAULT;
rc = pkey_findcard(&kfc.seckey,
&kfc.cardnr, &kfc.domain, 1);
DEBUG_DBG("pkey_ioctl pkey_findcard()=%d\n", rc);
if (rc)
break;
if (copy_to_user(ufc, &kfc, sizeof(kfc)))
return -EFAULT;
break;
}
case PKEY_SKEY2PKEY: {
struct pkey_skey2pkey __user *usp = (void __user *) arg;
struct pkey_skey2pkey ksp;
if (copy_from_user(&ksp, usp, sizeof(ksp)))
return -EFAULT;
rc = pkey_skey2pkey(&ksp.seckey, &ksp.protkey);
DEBUG_DBG("pkey_ioctl pkey_skey2pkey()=%d\n", rc);
if (rc)
break;
if (copy_to_user(usp, &ksp, sizeof(ksp)))
return -EFAULT;
break;
}
default:
/* unknown/unsupported ioctl cmd */
return -ENOTTY;
}
return rc;
}
/*
* Sysfs and file io operations
*/
static const struct file_operations pkey_fops = {
.owner = THIS_MODULE,
.open = nonseekable_open,
.llseek = no_llseek,
.unlocked_ioctl = pkey_unlocked_ioctl,
};
static struct miscdevice pkey_dev = {
.name = "pkey",
.minor = MISC_DYNAMIC_MINOR,
.mode = 0666,
.fops = &pkey_fops,
};
/*
* Module init
*/
int __init pkey_init(void)
{
cpacf_mask_t pckmo_functions;
/* check for pckmo instructions available */
if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
return -EOPNOTSUPP;
if (!cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_128_KEY) ||
!cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_192_KEY) ||
!cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_256_KEY))
return -EOPNOTSUPP;
pkey_debug_init();
return misc_register(&pkey_dev);
}
/*
* Module exit
*/
static void __exit pkey_exit(void)
{
misc_deregister(&pkey_dev);
mkvp_cache_free();
pkey_debug_exit();
}
module_init(pkey_init);
module_exit(pkey_exit);
...@@ -374,7 +374,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) ...@@ -374,7 +374,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
return rc; return rc;
} }
static long zcrypt_send_cprb(struct ica_xcRB *xcRB) long zcrypt_send_cprb(struct ica_xcRB *xcRB)
{ {
struct zcrypt_card *zc, *pref_zc; struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq; struct zcrypt_queue *zq, *pref_zq;
...@@ -444,6 +444,7 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB) ...@@ -444,6 +444,7 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
AP_QID_CARD(qid), AP_QID_QUEUE(qid)); AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc; return rc;
} }
EXPORT_SYMBOL(zcrypt_send_cprb);
static bool is_desired_ep11_card(unsigned int dev_id, static bool is_desired_ep11_card(unsigned int dev_id,
unsigned short target_num, unsigned short target_num,
...@@ -619,7 +620,7 @@ static long zcrypt_rng(char *buffer) ...@@ -619,7 +620,7 @@ static long zcrypt_rng(char *buffer)
return rc; return rc;
} }
static void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix) void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
{ {
struct zcrypt_card *zc; struct zcrypt_card *zc;
struct zcrypt_queue *zq; struct zcrypt_queue *zq;
......
...@@ -190,5 +190,7 @@ void zcrypt_msgtype_unregister(struct zcrypt_ops *); ...@@ -190,5 +190,7 @@ void zcrypt_msgtype_unregister(struct zcrypt_ops *);
struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int); struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
int zcrypt_api_init(void); int zcrypt_api_init(void);
void zcrypt_api_exit(void); void zcrypt_api_exit(void);
long zcrypt_send_cprb(struct ica_xcRB *xcRB);
void zcrypt_device_status_mask(struct zcrypt_device_matrix *devstatus);
#endif /* _ZCRYPT_API_H_ */ #endif /* _ZCRYPT_API_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment