Commit c7d4d259 authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/crypto: cleanup and move the header with the cpacf definitions

The CPACF instructions are going be used in KVM as well, move the
defines and the inline functions from arch/s390/crypt/crypt_s390.h
to arch/s390/include/asm. Rename the header to cpacf.h and replace
the crypt_s390_xxx names with cpacf_xxx.

While we are at it, cleanup the header as well. The encoding for
the CPACF operations is odd, there is an enum for each of the CPACF
instructions with the hardware function code in the lower 8 bits of
each entry and a software defined number for the CPACF instruction
in the upper 8 bits. Remove the superfluous software number and
replace the enums with simple defines.

The crypt_s390_func_available() function tests for the presence
of a specific CPACF operations. The new name of the function is
cpacf_query and it works slightly different than before. It gets
passed an opcode of an CPACF instruction and a function code for
this instruction. The facility_mask parameter is gone, the opcode
is used to find the correct MSA facility bit to check if the CPACF
instruction itself is available. If it is the query function of the
given instruction is used to test if the requested CPACF operation
is present.
Acked-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent f9dc447e
......@@ -28,7 +28,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <crypto/xts.h>
#include "crypt_s390.h"
#include <asm/cpacf.h>
#define AES_KEYLEN_128 1
#define AES_KEYLEN_192 2
......@@ -145,16 +145,16 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
switch (sctx->key_len) {
case 16:
crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
cpacf_km(CPACF_KM_AES_128_ENC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 24:
crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
cpacf_km(CPACF_KM_AES_192_ENC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 32:
crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
cpacf_km(CPACF_KM_AES_256_ENC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
}
}
......@@ -170,16 +170,16 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
switch (sctx->key_len) {
case 16:
crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
cpacf_km(CPACF_KM_AES_128_DEC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 24:
crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
cpacf_km(CPACF_KM_AES_192_DEC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 32:
crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
cpacf_km(CPACF_KM_AES_256_DEC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
}
}
......@@ -212,7 +212,7 @@ static void fallback_exit_cip(struct crypto_tfm *tfm)
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
......@@ -298,16 +298,16 @@ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
switch (key_len) {
case 16:
sctx->enc = KM_AES_128_ENCRYPT;
sctx->dec = KM_AES_128_DECRYPT;
sctx->enc = CPACF_KM_AES_128_ENC;
sctx->dec = CPACF_KM_AES_128_DEC;
break;
case 24:
sctx->enc = KM_AES_192_ENCRYPT;
sctx->dec = KM_AES_192_DECRYPT;
sctx->enc = CPACF_KM_AES_192_ENC;
sctx->dec = CPACF_KM_AES_192_DEC;
break;
case 32:
sctx->enc = KM_AES_256_ENCRYPT;
sctx->dec = KM_AES_256_DECRYPT;
sctx->enc = CPACF_KM_AES_256_ENC;
sctx->dec = CPACF_KM_AES_256_DEC;
break;
}
......@@ -326,7 +326,7 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n);
ret = cpacf_km(func, param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
......@@ -393,7 +393,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
static struct crypto_alg ecb_aes_alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + ecb */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
......@@ -427,16 +427,16 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
switch (key_len) {
case 16:
sctx->enc = KMC_AES_128_ENCRYPT;
sctx->dec = KMC_AES_128_DECRYPT;
sctx->enc = CPACF_KMC_AES_128_ENC;
sctx->dec = CPACF_KMC_AES_128_DEC;
break;
case 24:
sctx->enc = KMC_AES_192_ENCRYPT;
sctx->dec = KMC_AES_192_DECRYPT;
sctx->enc = CPACF_KMC_AES_192_ENC;
sctx->dec = CPACF_KMC_AES_192_DEC;
break;
case 32:
sctx->enc = KMC_AES_256_ENCRYPT;
sctx->dec = KMC_AES_256_DECRYPT;
sctx->enc = CPACF_KMC_AES_256_ENC;
sctx->dec = CPACF_KMC_AES_256_DEC;
break;
}
......@@ -465,7 +465,7 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, &param, out, in, n);
ret = cpacf_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
......@@ -509,7 +509,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg cbc_aes_alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + cbc */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
......@@ -596,8 +596,8 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
switch (key_len) {
case 32:
xts_ctx->enc = KM_XTS_128_ENCRYPT;
xts_ctx->dec = KM_XTS_128_DECRYPT;
xts_ctx->enc = CPACF_KM_XTS_128_ENC;
xts_ctx->dec = CPACF_KM_XTS_128_DEC;
memcpy(xts_ctx->key + 16, in_key, 16);
memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
break;
......@@ -607,8 +607,8 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
xts_fallback_setkey(tfm, in_key, key_len);
break;
case 64:
xts_ctx->enc = KM_XTS_256_ENCRYPT;
xts_ctx->dec = KM_XTS_256_DECRYPT;
xts_ctx->enc = CPACF_KM_XTS_256_ENC;
xts_ctx->dec = CPACF_KM_XTS_256_DEC;
memcpy(xts_ctx->key, in_key, 32);
memcpy(xts_ctx->pcc_key, in_key + 32, 32);
break;
......@@ -643,7 +643,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
/* remove decipher modifier bit from 'func' and call PCC */
ret = cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
if (ret < 0)
return -EIO;
......@@ -655,7 +656,7 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
ret = cpacf_km(func, &xts_param.key[offset], out, in, n);
if (ret < 0 || ret != n)
return -EIO;
......@@ -721,7 +722,7 @@ static void xts_fallback_exit(struct crypto_tfm *tfm)
static struct crypto_alg xts_aes_alg = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + xts */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
......@@ -751,16 +752,16 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
switch (key_len) {
case 16:
sctx->enc = KMCTR_AES_128_ENCRYPT;
sctx->dec = KMCTR_AES_128_DECRYPT;
sctx->enc = CPACF_KMCTR_AES_128_ENC;
sctx->dec = CPACF_KMCTR_AES_128_DEC;
break;
case 24:
sctx->enc = KMCTR_AES_192_ENCRYPT;
sctx->dec = KMCTR_AES_192_DECRYPT;
sctx->enc = CPACF_KMCTR_AES_192_ENC;
sctx->dec = CPACF_KMCTR_AES_192_DEC;
break;
case 32:
sctx->enc = KMCTR_AES_256_ENCRYPT;
sctx->dec = KMCTR_AES_256_DECRYPT;
sctx->enc = CPACF_KMCTR_AES_256_ENC;
sctx->dec = CPACF_KMCTR_AES_256_DEC;
break;
}
......@@ -804,8 +805,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
n = __ctrblk_init(ctrptr, nbytes);
else
n = AES_BLOCK_SIZE;
ret = crypt_s390_kmctr(func, sctx->key, out, in,
n, ctrptr);
ret = cpacf_kmctr(func, sctx->key, out, in, n, ctrptr);
if (ret < 0 || ret != n) {
if (ctrptr == ctrblk)
spin_unlock(&ctrblk_lock);
......@@ -837,8 +837,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
if (nbytes) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, sctx->key, buf, in,
AES_BLOCK_SIZE, ctrbuf);
ret = cpacf_kmctr(func, sctx->key, buf, in,
AES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != AES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes);
......@@ -875,7 +875,7 @@ static int ctr_aes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg ctr_aes_alg = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + ctr */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
......@@ -899,11 +899,11 @@ static int __init aes_s390_init(void)
{
int ret;
if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
if (cpacf_query(CPACF_KM, CPACF_KM_AES_128_ENC))
keylen_flag |= AES_KEYLEN_128;
if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
if (cpacf_query(CPACF_KM, CPACF_KM_AES_192_ENC))
keylen_flag |= AES_KEYLEN_192;
if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
if (cpacf_query(CPACF_KM, CPACF_KM_AES_256_ENC))
keylen_flag |= AES_KEYLEN_256;
if (!keylen_flag)
......@@ -926,22 +926,17 @@ static int __init aes_s390_init(void)
if (ret)
goto cbc_aes_err;
if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
crypt_s390_func_available(KM_XTS_256_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128_ENC) &&
cpacf_query(CPACF_KM, CPACF_KM_XTS_256_ENC)) {
ret = crypto_register_alg(&xts_aes_alg);
if (ret)
goto xts_aes_err;
xts_aes_alg_reg = 1;
}
if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128_ENC) &&
cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192_ENC) &&
cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256_ENC)) {
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
ret = -ENOMEM;
......
This diff is collapsed.
......@@ -20,8 +20,7 @@
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include "crypt_s390.h"
#include <asm/cpacf.h>
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
......@@ -54,20 +53,20 @@ static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_DEA_ENCRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
cpacf_km(CPACF_KM_DEA_ENC, ctx->key, out, in, DES_BLOCK_SIZE);
}
static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_DEA_DECRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
cpacf_km(CPACF_KM_DEA_DEC, ctx->key, out, in, DES_BLOCK_SIZE);
}
static struct crypto_alg des_alg = {
.cra_name = "des",
.cra_driver_name = "des-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -95,7 +94,7 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, key, out, in, n);
ret = cpacf_km(func, key, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
......@@ -128,7 +127,7 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, &param, out, in, n);
ret = cpacf_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
......@@ -149,7 +148,7 @@ static int ecb_des_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, ctx->key, &walk);
return ecb_desall_crypt(desc, CPACF_KM_DEA_ENC, ctx->key, &walk);
}
static int ecb_des_decrypt(struct blkcipher_desc *desc,
......@@ -160,13 +159,13 @@ static int ecb_des_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_DEA_DECRYPT, ctx->key, &walk);
return ecb_desall_crypt(desc, CPACF_KM_DEA_DEC, ctx->key, &walk);
}
static struct crypto_alg ecb_des_alg = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des + ecb */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -190,7 +189,7 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
return cbc_desall_crypt(desc, CPACF_KMC_DEA_ENC, &walk);
}
static int cbc_des_decrypt(struct blkcipher_desc *desc,
......@@ -200,13 +199,13 @@ static int cbc_des_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
return cbc_desall_crypt(desc, CPACF_KMC_DEA_DEC, &walk);
}
static struct crypto_alg cbc_des_alg = {
.cra_name = "cbc(des)",
.cra_driver_name = "cbc-des-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des + cbc */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -258,20 +257,20 @@ static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_TDEA_192_ENCRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
cpacf_km(CPACF_KM_TDEA_192_ENC, ctx->key, dst, src, DES_BLOCK_SIZE);
}
static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_TDEA_192_DECRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
cpacf_km(CPACF_KM_TDEA_192_DEC, ctx->key, dst, src, DES_BLOCK_SIZE);
}
static struct crypto_alg des3_alg = {
.cra_name = "des3_ede",
.cra_driver_name = "des3_ede-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -295,7 +294,7 @@ static int ecb_des3_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, ctx->key, &walk);
return ecb_desall_crypt(desc, CPACF_KM_TDEA_192_ENC, ctx->key, &walk);
}
static int ecb_des3_decrypt(struct blkcipher_desc *desc,
......@@ -306,13 +305,13 @@ static int ecb_des3_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, ctx->key, &walk);
return ecb_desall_crypt(desc, CPACF_KM_TDEA_192_DEC, ctx->key, &walk);
}
static struct crypto_alg ecb_des3_alg = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3_ede-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des3 + ecb */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -336,7 +335,7 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192_ENC, &walk);
}
static int cbc_des3_decrypt(struct blkcipher_desc *desc,
......@@ -346,13 +345,13 @@ static int cbc_des3_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192_DEC, &walk);
}
static struct crypto_alg cbc_des3_alg = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3_ede-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des3 + cbc */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -407,8 +406,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
n = __ctrblk_init(ctrptr, nbytes);
else
n = DES_BLOCK_SIZE;
ret = crypt_s390_kmctr(func, ctx->key, out, in,
n, ctrptr);
ret = cpacf_kmctr(func, ctx->key, out, in, n, ctrptr);
if (ret < 0 || ret != n) {
if (ctrptr == ctrblk)
spin_unlock(&ctrblk_lock);
......@@ -438,8 +436,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
if (nbytes) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, ctx->key, buf, in,
DES_BLOCK_SIZE, ctrbuf);
ret = cpacf_kmctr(func, ctx->key, buf, in,
DES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != DES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes);
......@@ -458,7 +456,7 @@ static int ctr_des_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_desall_crypt(desc, KMCTR_DEA_ENCRYPT, ctx, &walk);
return ctr_desall_crypt(desc, CPACF_KMCTR_DEA_ENC, ctx, &walk);
}
static int ctr_des_decrypt(struct blkcipher_desc *desc,
......@@ -469,13 +467,13 @@ static int ctr_des_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_desall_crypt(desc, KMCTR_DEA_DECRYPT, ctx, &walk);
return ctr_desall_crypt(desc, CPACF_KMCTR_DEA_DEC, ctx, &walk);
}
static struct crypto_alg ctr_des_alg = {
.cra_name = "ctr(des)",
.cra_driver_name = "ctr-des-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des + ctr */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -501,7 +499,7 @@ static int ctr_des3_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_desall_crypt(desc, KMCTR_TDEA_192_ENCRYPT, ctx, &walk);
return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192_ENC, ctx, &walk);
}
static int ctr_des3_decrypt(struct blkcipher_desc *desc,
......@@ -512,13 +510,13 @@ static int ctr_des3_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_desall_crypt(desc, KMCTR_TDEA_192_DECRYPT, ctx, &walk);
return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192_DEC, ctx, &walk);
}
static struct crypto_alg ctr_des3_alg = {
.cra_name = "ctr(des3_ede)",
.cra_driver_name = "ctr-des3_ede-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: des3 + ede */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_des_ctx),
......@@ -540,8 +538,8 @@ static int __init des_s390_init(void)
{
int ret;
if (!crypt_s390_func_available(KM_DEA_ENCRYPT, CRYPT_S390_MSA) ||
!crypt_s390_func_available(KM_TDEA_192_ENCRYPT, CRYPT_S390_MSA))
if (!cpacf_query(CPACF_KM, CPACF_KM_DEA_ENC) ||
!cpacf_query(CPACF_KM, CPACF_KM_TDEA_192_ENC))
return -EOPNOTSUPP;
ret = crypto_register_alg(&des_alg);
......@@ -563,10 +561,8 @@ static int __init des_s390_init(void)
if (ret)
goto cbc_des3_err;
if (crypt_s390_func_available(KMCTR_DEA_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
crypt_s390_func_available(KMCTR_TDEA_192_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_DEA_ENC) &&
cpacf_query(CPACF_KMCTR, CPACF_KMCTR_TDEA_192_ENC)) {
ret = crypto_register_alg(&ctr_des_alg);
if (ret)
goto ctr_des_err;
......
......@@ -10,8 +10,7 @@
#include <crypto/internal/hash.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include "crypt_s390.h"
#include <asm/cpacf.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
......@@ -72,8 +71,8 @@ static int ghash_update(struct shash_desc *desc,
src += n;
if (!dctx->bytes) {
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
GHASH_BLOCK_SIZE);
ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf,
GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
}
......@@ -81,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) {
ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n);
if (ret != n)
return -EIO;
src += n;
......@@ -106,7 +105,7 @@ static int ghash_flush(struct ghash_desc_ctx *dctx)
memset(pos, 0, dctx->bytes);
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
......@@ -137,7 +136,7 @@ static struct shash_alg ghash_alg = {
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
......@@ -147,8 +146,7 @@ static struct shash_alg ghash_alg = {
static int __init ghash_mod_init(void)
{
if (!crypt_s390_func_available(KIMD_GHASH,
CRYPT_S390_MSA | CRYPT_S390_MSA4))
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_GHASH))
return -EOPNOTSUPP;
return crypto_register_shash(&ghash_alg);
......
......@@ -23,8 +23,7 @@
#include <asm/debug.h>
#include <asm/uaccess.h>
#include <asm/timex.h>
#include "crypt_s390.h"
#include <asm/cpacf.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
......@@ -136,8 +135,8 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
else
h = ebuf;
/* generate sha256 from this page */
if (crypt_s390_kimd(KIMD_SHA_256, h,
pg, PAGE_SIZE) != PAGE_SIZE) {
if (cpacf_kimd(CPACF_KIMD_SHA_256, h,
pg, PAGE_SIZE) != PAGE_SIZE) {
prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
ret = -EIO;
goto out;
......@@ -164,9 +163,9 @@ static void prng_tdes_add_entropy(void)
int ret;
for (i = 0; i < 16; i++) {
ret = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
(char *)entropy, (char *)entropy,
sizeof(entropy));
ret = cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
(char *)entropy, (char *)entropy,
sizeof(entropy));
BUG_ON(ret < 0 || ret != sizeof(entropy));
memcpy(prng_data->prngws.parm_block, entropy, sizeof(entropy));
}
......@@ -311,9 +310,8 @@ static int __init prng_sha512_selftest(void)
memset(&ws, 0, sizeof(ws));
/* initial seed */
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
&ws, NULL, 0,
seed, sizeof(seed));
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED, &ws, NULL, 0,
seed, sizeof(seed));
if (ret < 0) {
pr_err("The prng self test seed operation for the "
"SHA-512 mode failed with rc=%d\n", ret);
......@@ -331,18 +329,16 @@ static int __init prng_sha512_selftest(void)
}
/* generate random bytes */
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf),
NULL, 0);
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf), NULL, 0);
if (ret < 0) {
pr_err("The prng self test generate operation for "
"the SHA-512 mode failed with rc=%d\n", ret);
prng_errorflag = PRNG_SELFTEST_FAILED;
return -EIO;
}
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf),
NULL, 0);
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf), NULL, 0);
if (ret < 0) {
pr_err("The prng self test generate operation for "
"the SHA-512 mode failed with rc=%d\n", ret);
......@@ -396,9 +392,8 @@ static int __init prng_sha512_instantiate(void)
get_tod_clock_ext(seed + 48);
/* initial seed of the ppno drng */
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
&prng_data->ppnows, NULL, 0,
seed, sizeof(seed));
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
&prng_data->ppnows, NULL, 0, seed, sizeof(seed));
if (ret < 0) {
prng_errorflag = PRNG_SEED_FAILED;
ret = -EIO;
......@@ -409,11 +404,9 @@ static int __init prng_sha512_instantiate(void)
bytes for the FIPS 140-2 Conditional Self Test */
if (fips_enabled) {
prng_data->prev = prng_data->buf + prng_chunk_size;
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
&prng_data->ppnows,
prng_data->prev,
prng_chunk_size,
NULL, 0);
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&prng_data->ppnows,
prng_data->prev, prng_chunk_size, NULL, 0);
if (ret < 0 || ret != prng_chunk_size) {
prng_errorflag = PRNG_GEN_FAILED;
ret = -EIO;
......@@ -447,9 +440,8 @@ static int prng_sha512_reseed(void)
return ret;
/* do a reseed of the ppno drng with this bytestring */
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
&prng_data->ppnows, NULL, 0,
seed, sizeof(seed));
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
&prng_data->ppnows, NULL, 0, seed, sizeof(seed));
if (ret) {
prng_errorflag = PRNG_RESEED_FAILED;
return -EIO;
......@@ -471,9 +463,8 @@ static int prng_sha512_generate(u8 *buf, size_t nbytes)
}
/* PPNO generate */
ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
&prng_data->ppnows, buf, nbytes,
NULL, 0);
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&prng_data->ppnows, buf, nbytes, NULL, 0);
if (ret < 0 || ret != nbytes) {
prng_errorflag = PRNG_GEN_FAILED;
return -EIO;
......@@ -555,8 +546,8 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
* Note: you can still get strict X9.17 conformity by setting
* prng_chunk_size to 8 bytes.
*/
tmp = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
prng_data->buf, prng_data->buf, n);
tmp = cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
prng_data->buf, prng_data->buf, n);
if (tmp < 0 || tmp != n) {
ret = -EIO;
break;
......@@ -815,14 +806,13 @@ static int __init prng_init(void)
int ret;
/* check if the CPU has a PRNG */
if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA))
if (!cpacf_query(CPACF_KMC, CPACF_KMC_PRNG))
return -EOPNOTSUPP;
/* choose prng mode */
if (prng_mode != PRNG_MODE_TDES) {
/* check for MSA5 support for PPNO operations */
if (!crypt_s390_func_available(PPNO_SHA512_DRNG_GEN,
CRYPT_S390_MSA5)) {
if (!cpacf_query(CPACF_PPNO, CPACF_PPNO_SHA512_DRNG_GEN)) {
if (prng_mode == PRNG_MODE_SHA512) {
pr_err("The prng module cannot "
"start in SHA-512 mode\n");
......
......@@ -28,8 +28,8 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/sha.h>
#include <asm/cpacf.h>
#include "crypt_s390.h"
#include "sha.h"
static int sha1_init(struct shash_desc *desc)
......@@ -42,7 +42,7 @@ static int sha1_init(struct shash_desc *desc)
sctx->state[3] = SHA1_H3;
sctx->state[4] = SHA1_H4;
sctx->count = 0;
sctx->func = KIMD_SHA_1;
sctx->func = CPACF_KIMD_SHA_1;
return 0;
}
......@@ -66,7 +66,7 @@ static int sha1_import(struct shash_desc *desc, const void *in)
sctx->count = ictx->count;
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer));
sctx->func = KIMD_SHA_1;
sctx->func = CPACF_KIMD_SHA_1;
return 0;
}
......@@ -82,7 +82,7 @@ static struct shash_alg alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
......@@ -91,7 +91,7 @@ static struct shash_alg alg = {
static int __init sha1_s390_init(void)
{
if (!crypt_s390_func_available(KIMD_SHA_1, CRYPT_S390_MSA))
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_1))
return -EOPNOTSUPP;
return crypto_register_shash(&alg);
}
......
......@@ -18,8 +18,8 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/sha.h>
#include <asm/cpacf.h>
#include "crypt_s390.h"
#include "sha.h"
static int sha256_init(struct shash_desc *desc)
......@@ -35,7 +35,7 @@ static int sha256_init(struct shash_desc *desc)
sctx->state[6] = SHA256_H6;
sctx->state[7] = SHA256_H7;
sctx->count = 0;
sctx->func = KIMD_SHA_256;
sctx->func = CPACF_KIMD_SHA_256;
return 0;
}
......@@ -59,7 +59,7 @@ static int sha256_import(struct shash_desc *desc, const void *in)
sctx->count = ictx->count;
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->func = KIMD_SHA_256;
sctx->func = CPACF_KIMD_SHA_256;
return 0;
}
......@@ -75,7 +75,7 @@ static struct shash_alg sha256_alg = {
.base = {
.cra_name = "sha256",
.cra_driver_name= "sha256-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
......@@ -95,7 +95,7 @@ static int sha224_init(struct shash_desc *desc)
sctx->state[6] = SHA224_H6;
sctx->state[7] = SHA224_H7;
sctx->count = 0;
sctx->func = KIMD_SHA_256;
sctx->func = CPACF_KIMD_SHA_256;
return 0;
}
......@@ -112,7 +112,7 @@ static struct shash_alg sha224_alg = {
.base = {
.cra_name = "sha224",
.cra_driver_name= "sha224-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
......@@ -123,7 +123,7 @@ static int __init sha256_s390_init(void)
{
int ret;
if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA))
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_256))
return -EOPNOTSUPP;
ret = crypto_register_shash(&sha256_alg);
if (ret < 0)
......
......@@ -19,9 +19,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <asm/cpacf.h>
#include "sha.h"
#include "crypt_s390.h"
static int sha512_init(struct shash_desc *desc)
{
......@@ -36,7 +36,7 @@ static int sha512_init(struct shash_desc *desc)
*(__u64 *)&ctx->state[12] = 0x1f83d9abfb41bd6bULL;
*(__u64 *)&ctx->state[14] = 0x5be0cd19137e2179ULL;
ctx->count = 0;
ctx->func = KIMD_SHA_512;
ctx->func = CPACF_KIMD_SHA_512;
return 0;
}
......@@ -64,7 +64,7 @@ static int sha512_import(struct shash_desc *desc, const void *in)
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->func = KIMD_SHA_512;
sctx->func = CPACF_KIMD_SHA_512;
return 0;
}
......@@ -80,7 +80,7 @@ static struct shash_alg sha512_alg = {
.base = {
.cra_name = "sha512",
.cra_driver_name= "sha512-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
......@@ -102,7 +102,7 @@ static int sha384_init(struct shash_desc *desc)
*(__u64 *)&ctx->state[12] = 0xdb0c2e0d64f98fa7ULL;
*(__u64 *)&ctx->state[14] = 0x47b5481dbefa4fa4ULL;
ctx->count = 0;
ctx->func = KIMD_SHA_512;
ctx->func = CPACF_KIMD_SHA_512;
return 0;
}
......@@ -119,7 +119,7 @@ static struct shash_alg sha384_alg = {
.base = {
.cra_name = "sha384",
.cra_driver_name= "sha384-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_sha_ctx),
......@@ -133,7 +133,7 @@ static int __init init(void)
{
int ret;
if (!crypt_s390_func_available(KIMD_SHA_512, CRYPT_S390_MSA))
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_512))
return -EOPNOTSUPP;
if ((ret = crypto_register_shash(&sha512_alg)) < 0)
goto out;
......
......@@ -15,8 +15,8 @@
#include <crypto/internal/hash.h>
#include <linux/module.h>
#include <asm/cpacf.h>
#include "sha.h"
#include "crypt_s390.h"
int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
......@@ -35,7 +35,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
/* process one stored block */
if (index) {
memcpy(ctx->buf + index, data, bsize - index);
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
ret = cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize);
if (ret != bsize)
return -EIO;
data += bsize - index;
......@@ -45,8 +45,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
/* process as many blocks as possible */
if (len >= bsize) {
ret = crypt_s390_kimd(ctx->func, ctx->state, data,
len & ~(bsize - 1));
ret = cpacf_kimd(ctx->func, ctx->state, data,
len & ~(bsize - 1));
if (ret != (len & ~(bsize - 1)))
return -EIO;
data += ret;
......@@ -89,7 +89,7 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
bits = ctx->count * 8;
memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
ret = cpacf_kimd(ctx->func, ctx->state, ctx->buf, end);
if (ret != end)
return -EIO;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment