Commit 131a395c authored by Jan Glauber's avatar Jan Glauber Committed by Martin Schwidefsky

[S390] crypto: cleanup.

Cleanup code and remove obsolete documentation.
Signed-off-by: default avatarJan Glauber <jan.glauber@de.ibm.com>
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 6d4740c8
crypto-API support for z990 Message Security Assist (MSA) instructions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
AUTHOR: Thomas Spatzier (tspat@de.ibm.com)
1. Introduction crypto-API
~~~~~~~~~~~~~~~~~~~~~~~~~~
See Documentation/crypto/api-intro.txt for an introduction/description of the
kernel crypto API.
According to api-intro.txt support for z990 crypto instructions has been added
in the algorithm api layer of the crypto API. Several files containing z990
optimized implementations of crypto algorithms are placed in the
arch/s390/crypto directory.
2. Probing for availability of MSA
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It should be possible to use Kernels with the z990 crypto implementations both
on machines with MSA available and on those without MSA (pre z990 or z990
without MSA). Therefore a simple probing mechanism has been implemented:
In the init function of each crypto module the availability of MSA and of the
respective crypto algorithm in particular will be tested. If the algorithm is
available the module will load and register its algorithm with the crypto API.
If the respective crypto algorithm is not available, the init function will
return -ENOSYS. In that case a fallback to the standard software implementation
of the crypto algorithm must be taken ( -> the standard crypto modules are
also built when compiling the kernel).
3. Ensuring z990 crypto module preference
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If z990 crypto instructions are available the optimized modules should be
preferred instead of standard modules.
3.1. compiled-in modules
~~~~~~~~~~~~~~~~~~~~~~~~
For compiled-in modules it has to be ensured that the z990 modules are linked
before the standard crypto modules. Then, on system startup the init functions
of z990 crypto modules will be called first and query for availability of z990
crypto instructions. If instruction is available, the z990 module will register
its crypto algorithm implementation -> the load of the standard module will fail
since the algorithm is already registered.
If z990 crypto instruction is not available the load of the z990 module will
fail -> the standard module will load and register its algorithm.
3.2. dynamic modules
~~~~~~~~~~~~~~~~~~~~
A system administrator has to take care of giving preference to z990 crypto
modules. If MSA is available appropriate lines have to be added to
/etc/modprobe.conf.
Example: z990 crypto instruction for SHA1 algorithm is available
add the following line to /etc/modprobe.conf (assuming the
z990 crypto modules for SHA1 is called sha1_z990):
alias sha1 sha1_z990
-> when the sha1 algorithm is requested through the crypto API
(which has a module autoloader) the z990 module will be loaded.
TBD: a userspace module probing mechanism
something like 'probe sha1 sha1_z990 sha1' in modprobe.conf
-> try module sha1_z990, if it fails to load standard module sha1
the 'probe' statement is currently not supported in modprobe.conf
4. Currently implemented z990 crypto algorithms
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following crypto algorithms with z990 MSA support are currently implemented.
The name of each algorithm under which it is registered in crypto API and the
name of the respective module is given in square brackets.
- SHA1 Digest Algorithm [sha1 -> sha1_z990]
- DES Encrypt/Decrypt Algorithm (64bit key) [des -> des_z990]
- Triple DES Encrypt/Decrypt Algorithm (128bit key) [des3_ede128 -> des_z990]
- Triple DES Encrypt/Decrypt Algorithm (192bit key) [des3_ede -> des_z990]
In order to load, for example, the sha1_z990 module when the sha1 algorithm is
requested (see 3.2.) add 'alias sha1 sha1_z990' to /etc/modprobe.conf.
...@@ -25,99 +25,100 @@ ...@@ -25,99 +25,100 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <asm/scatterlist.h>
#include <asm/byteorder.h>
#include "crypt_s390.h" #include "crypt_s390.h"
#define SHA1_DIGEST_SIZE 20 #define SHA1_DIGEST_SIZE 20
#define SHA1_BLOCK_SIZE 64 #define SHA1_BLOCK_SIZE 64
struct crypt_s390_sha1_ctx { struct s390_sha1_ctx {
u64 count; u64 count; /* message length */
u32 state[5]; u32 state[5];
u32 buf_len; u8 buf[2 * SHA1_BLOCK_SIZE];
u8 buffer[2 * SHA1_BLOCK_SIZE];
}; };
static void sha1_init(struct crypto_tfm *tfm) static void sha1_init(struct crypto_tfm *tfm)
{ {
struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
ctx->state[0] = 0x67452301; sctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89; sctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE; sctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476; sctx->state[3] = 0x10325476;
ctx->state[4] = 0xC3D2E1F0; sctx->state[4] = 0xC3D2E1F0;
sctx->count = 0;
ctx->count = 0;
ctx->buf_len = 0;
} }
static void sha1_update(struct crypto_tfm *tfm, const u8 *data, static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
unsigned int len) unsigned int len)
{ {
struct crypt_s390_sha1_ctx *sctx; struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
long imd_len; unsigned int index;
int ret;
sctx = crypto_tfm_ctx(tfm);
sctx->count += len * 8; /* message bit length */ /* how much is already in the buffer? */
index = sctx->count & 0x3f;
/* anything in buffer yet? -> must be completed */
if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { sctx->count += len;
/* complete full block and hash */
memcpy(sctx->buffer + sctx->buf_len, data, if (index + len < SHA1_BLOCK_SIZE)
SHA1_BLOCK_SIZE - sctx->buf_len); goto store;
crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer,
/* process one stored block */
if (index) {
memcpy(sctx->buf + index, data, SHA1_BLOCK_SIZE - index);
ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf,
SHA1_BLOCK_SIZE); SHA1_BLOCK_SIZE);
data += SHA1_BLOCK_SIZE - sctx->buf_len; BUG_ON(ret != SHA1_BLOCK_SIZE);
len -= SHA1_BLOCK_SIZE - sctx->buf_len; data += SHA1_BLOCK_SIZE - index;
sctx->buf_len = 0; len -= SHA1_BLOCK_SIZE - index;
} }
/* rest of data contains full blocks? */ /* process as many blocks as possible */
imd_len = len & ~0x3ful; if (len >= SHA1_BLOCK_SIZE) {
if (imd_len) { ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, data,
crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); len & ~(SHA1_BLOCK_SIZE - 1));
data += imd_len; BUG_ON(ret != (len & ~(SHA1_BLOCK_SIZE - 1)));
len -= imd_len; data += ret;
} len -= ret;
/* anything left? store in buffer */
if (len) {
memcpy(sctx->buffer + sctx->buf_len , data, len);
sctx->buf_len += len;
} }
}
store:
/* anything left? */
if (len)
memcpy(sctx->buf + index , data, len);
}
static void pad_message(struct crypt_s390_sha1_ctx* sctx) /* Add padding and return the message digest. */
static void sha1_final(struct crypto_tfm *tfm, u8 *out)
{ {
int index; struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
u64 bits;
unsigned int index, end;
int ret;
/* must perform manual padding */
index = sctx->count & 0x3f;
end = (index < 56) ? SHA1_BLOCK_SIZE : (2 * SHA1_BLOCK_SIZE);
index = sctx->buf_len;
sctx->buf_len = (sctx->buf_len < 56) ?
SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE;
/* start pad with 1 */ /* start pad with 1 */
sctx->buffer[index] = 0x80; sctx->buf[index] = 0x80;
/* pad with zeros */ /* pad with zeros */
index++; index++;
memset(sctx->buffer + index, 0x00, sctx->buf_len - index); memset(sctx->buf + index, 0x00, end - index - 8);
/* append length */
memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count,
sizeof sctx->count);
}
/* Add padding and return the message digest. */ /* append message length */
static void sha1_final(struct crypto_tfm *tfm, u8 *out) bits = sctx->count * 8;
{ memcpy(sctx->buf + end - 8, &bits, sizeof(bits));
struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, end);
BUG_ON(ret != end);
/* must perform manual padding */
pad_message(sctx);
crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len);
/* copy digest to out */ /* copy digest to out */
memcpy(out, sctx->state, SHA1_DIGEST_SIZE); memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
/* wipe context */ /* wipe context */
memset(sctx, 0, sizeof *sctx); memset(sctx, 0, sizeof *sctx);
} }
...@@ -128,7 +129,7 @@ static struct crypto_alg alg = { ...@@ -128,7 +129,7 @@ static struct crypto_alg alg = {
.cra_priority = CRYPT_S390_PRIORITY, .cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = SHA1_BLOCK_SIZE, .cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), .cra_ctxsize = sizeof(struct s390_sha1_ctx),
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = { .cra_u = { .digest = {
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#define SHA256_BLOCK_SIZE 64 #define SHA256_BLOCK_SIZE 64
struct s390_sha256_ctx { struct s390_sha256_ctx {
u64 count; u64 count; /* message length */
u32 state[8]; u32 state[8];
u8 buf[2 * SHA256_BLOCK_SIZE]; u8 buf[2 * SHA256_BLOCK_SIZE];
}; };
...@@ -54,10 +54,9 @@ static void sha256_update(struct crypto_tfm *tfm, const u8 *data, ...@@ -54,10 +54,9 @@ static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
int ret; int ret;
/* how much is already in the buffer? */ /* how much is already in the buffer? */
index = sctx->count / 8 & 0x3f; index = sctx->count & 0x3f;
/* update message bit length */ sctx->count += len;
sctx->count += len * 8;
if ((index + len) < SHA256_BLOCK_SIZE) if ((index + len) < SHA256_BLOCK_SIZE)
goto store; goto store;
...@@ -87,12 +86,17 @@ static void sha256_update(struct crypto_tfm *tfm, const u8 *data, ...@@ -87,12 +86,17 @@ static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
memcpy(sctx->buf + index , data, len); memcpy(sctx->buf + index , data, len);
} }
static void pad_message(struct s390_sha256_ctx* sctx) /* Add padding and return the message digest */
static void sha256_final(struct crypto_tfm *tfm, u8 *out)
{ {
int index, end; struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
u64 bits;
unsigned int index, end;
int ret;
index = sctx->count / 8 & 0x3f; /* must perform manual padding */
end = index < 56 ? SHA256_BLOCK_SIZE : 2 * SHA256_BLOCK_SIZE; index = sctx->count & 0x3f;
end = (index < 56) ? SHA256_BLOCK_SIZE : (2 * SHA256_BLOCK_SIZE);
/* start pad with 1 */ /* start pad with 1 */
sctx->buf[index] = 0x80; sctx->buf[index] = 0x80;
...@@ -102,21 +106,11 @@ static void pad_message(struct s390_sha256_ctx* sctx) ...@@ -102,21 +106,11 @@ static void pad_message(struct s390_sha256_ctx* sctx)
memset(sctx->buf + index, 0x00, end - index - 8); memset(sctx->buf + index, 0x00, end - index - 8);
/* append message length */ /* append message length */
memcpy(sctx->buf + end - 8, &sctx->count, sizeof sctx->count); bits = sctx->count * 8;
memcpy(sctx->buf + end - 8, &bits, sizeof(bits));
sctx->count = end * 8;
}
/* Add padding and return the message digest */
static void sha256_final(struct crypto_tfm *tfm, u8 *out)
{
struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
/* must perform manual padding */
pad_message(sctx);
crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, end);
sctx->count / 8); BUG_ON(ret != end);
/* copy digest to out */ /* copy digest to out */
memcpy(out, sctx->state, SHA256_DIGEST_SIZE); memcpy(out, sctx->state, SHA256_DIGEST_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment