Commit 4d2fa8b4 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "Here is the crypto update for 5.3:

  API:
   - Test shash interface directly in testmgr
   - cra_driver_name is now mandatory

  Algorithms:
   - Replace arc4 crypto_cipher with library helper
   - Implement 5 way interleave for ECB, CBC and CTR on arm64
   - Add xxhash
   - Add continuous self-test on noise source to drbg
   - Update jitter RNG

  Drivers:
   - Add support for SHA204A random number generator
   - Add support for 7211 in iproc-rng200
   - Fix fuzz test failures in inside-secure
   - Fix fuzz test failures in talitos
   - Fix fuzz test failures in qat"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (143 commits)
  crypto: stm32/hash - remove interruptible condition for dma
  crypto: stm32/hash - Fix hmac issue more than 256 bytes
  crypto: stm32/crc32 - rename driver file
  crypto: amcc - remove memset after dma_alloc_coherent
  crypto: ccp - Switch to SPDX license identifiers
  crypto: ccp - Validate the the error value used to index error messages
  crypto: doc - Fix formatting of new crypto engine content
  crypto: doc - Add parameter documentation
  crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
  crypto: arm64/aes-ce - add 5 way interleave routines
  crypto: talitos - drop icv_ool
  crypto: talitos - fix hash on SEC1.
  crypto: talitos - move struct talitos_edesc into talitos.h
  lib/scatterlist: Fix mapping iterator when sg->offset is greater than PAGE_SIZE
  crypto/NX: Set receive window credits to max number of CRBs in RxFIFO
  crypto: asymmetric_keys - select CRYPTO_HASH where needed
  crypto: serpent - mark __serpent_setkey_sbox noinline
  crypto: testmgr - dynamically allocate crypto_shash
  crypto: testmgr - dynamically allocate testvec_config
  crypto: talitos - eliminate unneeded 'done' functions at build time
  ...
parents 8b681508 f3880a23
......@@ -4,111 +4,89 @@ Code Examples
Code Example For Symmetric Key Cipher Operation
-----------------------------------------------
::
/* tie all data structures together */
struct skcipher_def {
struct scatterlist sg;
struct crypto_skcipher *tfm;
struct skcipher_request *req;
struct crypto_wait wait;
};
/* Perform cipher operation */
static unsigned int test_skcipher_encdec(struct skcipher_def *sk,
int enc)
{
int rc;
if (enc)
rc = crypto_wait_req(crypto_skcipher_encrypt(sk->req), &sk->wait);
else
rc = crypto_wait_req(crypto_skcipher_decrypt(sk->req), &sk->wait);
if (rc)
pr_info("skcipher encrypt returned with result %d\n", rc);
This code encrypts some data with AES-256-XTS. For sake of example,
all inputs are random bytes, the encryption is done in-place, and it's
assumed the code is running in a context where it can sleep.
return rc;
}
::
/* Initialize and trigger cipher operation */
static int test_skcipher(void)
{
struct skcipher_def sk;
struct crypto_skcipher *skcipher = NULL;
struct skcipher_request *req = NULL;
char *scratchpad = NULL;
char *ivdata = NULL;
unsigned char key[32];
int ret = -EFAULT;
skcipher = crypto_alloc_skcipher("cbc-aes-aesni", 0, 0);
if (IS_ERR(skcipher)) {
pr_info("could not allocate skcipher handle\n");
return PTR_ERR(skcipher);
}
req = skcipher_request_alloc(skcipher, GFP_KERNEL);
if (!req) {
pr_info("could not allocate skcipher request\n");
ret = -ENOMEM;
goto out;
}
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done,
&sk.wait);
/* AES 256 with random key */
get_random_bytes(&key, 32);
if (crypto_skcipher_setkey(skcipher, key, 32)) {
pr_info("key could not be set\n");
ret = -EAGAIN;
goto out;
}
/* IV will be random */
ivdata = kmalloc(16, GFP_KERNEL);
if (!ivdata) {
pr_info("could not allocate ivdata\n");
goto out;
}
get_random_bytes(ivdata, 16);
/* Input data will be random */
scratchpad = kmalloc(16, GFP_KERNEL);
if (!scratchpad) {
pr_info("could not allocate scratchpad\n");
goto out;
}
get_random_bytes(scratchpad, 16);
sk.tfm = skcipher;
sk.req = req;
/* We encrypt one block */
sg_init_one(&sk.sg, scratchpad, 16);
skcipher_request_set_crypt(req, &sk.sg, &sk.sg, 16, ivdata);
crypto_init_wait(&sk.wait);
/* encrypt data */
ret = test_skcipher_encdec(&sk, 1);
if (ret)
goto out;
pr_info("Encryption triggered successfully\n");
struct crypto_skcipher *tfm = NULL;
struct skcipher_request *req = NULL;
u8 *data = NULL;
const size_t datasize = 512; /* data size in bytes */
struct scatterlist sg;
DECLARE_CRYPTO_WAIT(wait);
u8 iv[16]; /* AES-256-XTS takes a 16-byte IV */
u8 key[64]; /* AES-256-XTS takes a 64-byte key */
int err;
/*
* Allocate a tfm (a transformation object) and set the key.
*
* In real-world use, a tfm and key are typically used for many
* encryption/decryption operations. But in this example, we'll just do a
* single encryption operation with it (which is not very efficient).
*/
tfm = crypto_alloc_skcipher("xts(aes)", 0, 0);
if (IS_ERR(tfm)) {
pr_err("Error allocating xts(aes) handle: %ld\n", PTR_ERR(tfm));
return PTR_ERR(tfm);
}
get_random_bytes(key, sizeof(key));
err = crypto_skcipher_setkey(tfm, key, sizeof(key));
if (err) {
pr_err("Error setting key: %d\n", err);
goto out;
}
/* Allocate a request object */
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
err = -ENOMEM;
goto out;
}
/* Prepare the input data */
data = kmalloc(datasize, GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto out;
}
get_random_bytes(data, datasize);
/* Initialize the IV */
get_random_bytes(iv, sizeof(iv));
/*
* Encrypt the data in-place.
*
* For simplicity, in this example we wait for the request to complete
* before proceeding, even if the underlying implementation is asynchronous.
*
* To decrypt instead of encrypt, just change crypto_skcipher_encrypt() to
* crypto_skcipher_decrypt().
*/
sg_init_one(&sg, data, datasize);
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &wait);
skcipher_request_set_crypt(req, &sg, &sg, datasize, iv);
err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
if (err) {
pr_err("Error encrypting data: %d\n", err);
goto out;
}
pr_debug("Encryption was successful\n");
out:
if (skcipher)
crypto_free_skcipher(skcipher);
if (req)
crypto_free_skcipher(tfm);
skcipher_request_free(req);
if (ivdata)
kfree(ivdata);
if (scratchpad)
kfree(scratchpad);
return ret;
kfree(data);
return err;
}
......
......@@ -5,7 +5,7 @@ Block Cipher Algorithm Definitions
:doc: Block Cipher Algorithm Definitions
.. kernel-doc:: include/linux/crypto.h
:functions: crypto_alg ablkcipher_alg blkcipher_alg cipher_alg
:functions: crypto_alg ablkcipher_alg blkcipher_alg cipher_alg compress_alg
Symmetric Key Cipher API
------------------------
......
......@@ -208,9 +208,7 @@ the aforementioned cipher types:
- CRYPTO_ALG_TYPE_KPP Key-agreement Protocol Primitive (KPP) such as
an ECDH or DH implementation
- CRYPTO_ALG_TYPE_DIGEST Raw message digest
- CRYPTO_ALG_TYPE_HASH Alias for CRYPTO_ALG_TYPE_DIGEST
- CRYPTO_ALG_TYPE_HASH Raw message digest
- CRYPTO_ALG_TYPE_SHASH Synchronous multi-block hash
......
=============
CRYPTO ENGINE
.. SPDX-License-Identifier: GPL-2.0
Crypto Engine
=============
Overview
--------
The crypto engine API (CE), is a crypto queue manager.
The crypto engine (CE) API is a crypto queue manager.
Requirement
-----------
You have to put at start of your tfm_ctx the struct crypto_engine_ctx::
You must put, at the start of your transform context your_tfm_ctx, the structure
crypto_engine:
::
struct your_tfm_ctx {
struct crypto_engine_ctx enginectx;
...
};
struct your_tfm_ctx {
struct crypto_engine engine;
...
};
Why: Since CE manage only crypto_async_request, it cannot know the underlying
request_type and so have access only on the TFM.
So using container_of for accessing __ctx is impossible.
Furthermore, the crypto engine cannot know the "struct your_tfm_ctx",
so it must assume that crypto_engine_ctx is at start of it.
The crypto engine only manages asynchronous requests in the form of
crypto_async_request. It cannot know the underlying request type and thus only
has access to the transform structure. It is not possible to access the context
using container_of. In addition, the engine knows nothing about your
structure "``struct your_tfm_ctx``". The engine assumes (requires) the placement
of the known member ``struct crypto_engine`` at the beginning.
Order of operations
-------------------
You have to obtain a struct crypto_engine via crypto_engine_alloc_init().
And start it via crypto_engine_start().
Before transferring any request, you have to fill the enginectx.
- prepare_request: (taking a function pointer) If you need to do some processing before doing the request
- unprepare_request: (taking a function pointer) Undoing what's done in prepare_request
- do_one_request: (taking a function pointer) Do encryption for current request
Note: that those three functions get the crypto_async_request associated with the received request.
So your need to get the original request via container_of(areq, struct yourrequesttype_request, base);
When your driver receive a crypto_request, you have to transfer it to
the cryptoengine via one of:
- crypto_transfer_ablkcipher_request_to_engine()
- crypto_transfer_aead_request_to_engine()
- crypto_transfer_akcipher_request_to_engine()
- crypto_transfer_hash_request_to_engine()
- crypto_transfer_skcipher_request_to_engine()
At the end of the request process, a call to one of the following function is needed:
- crypto_finalize_ablkcipher_request
- crypto_finalize_aead_request
- crypto_finalize_akcipher_request
- crypto_finalize_hash_request
- crypto_finalize_skcipher_request
You are required to obtain a struct crypto_engine via ``crypto_engine_alloc_init()``.
Start it via ``crypto_engine_start()``. When finished with your work, shut down the
engine using ``crypto_engine_stop()`` and destroy the engine with
``crypto_engine_exit()``.
Before transferring any request, you have to fill the context enginectx by
providing functions for the following:
* ``prepare_crypt_hardware``: Called once before any prepare functions are
called.
* ``unprepare_crypt_hardware``: Called once after all unprepare functions have
been called.
* ``prepare_cipher_request``/``prepare_hash_request``: Called before each
corresponding request is performed. If some processing or other preparatory
work is required, do it here.
* ``unprepare_cipher_request``/``unprepare_hash_request``: Called after each
request is handled. Clean up / undo what was done in the prepare function.
* ``cipher_one_request``/``hash_one_request``: Handle the current request by
performing the operation.
Note that these functions access the crypto_async_request structure
associated with the received request. You are able to retrieve the original
request by using:
::
container_of(areq, struct yourrequesttype_request, base);
When your driver receives a crypto_request, you must to transfer it to
the crypto engine via one of:
* crypto_transfer_ablkcipher_request_to_engine()
* crypto_transfer_aead_request_to_engine()
* crypto_transfer_akcipher_request_to_engine()
* crypto_transfer_hash_request_to_engine()
* crypto_transfer_skcipher_request_to_engine()
At the end of the request process, a call to one of the following functions is needed:
* crypto_finalize_ablkcipher_request()
* crypto_finalize_aead_request()
* crypto_finalize_akcipher_request()
* crypto_finalize_hash_request()
* crypto_finalize_skcipher_request()
......@@ -66,16 +66,3 @@ sha@f8034000 {
dmas = <&dma1 2 17>;
dma-names = "tx";
};
* Eliptic Curve Cryptography (I2C)
Required properties:
- compatible : must be "atmel,atecc508a".
- reg: I2C bus address of the device.
- clock-frequency: must be present in the i2c controller node.
Example:
atecc508a@c0 {
compatible = "atmel,atecc508a";
reg = <0xC0>;
};
......@@ -2,6 +2,7 @@ HWRNG support for the iproc-rng200 driver
Required properties:
- compatible : Must be one of:
"brcm,bcm7211-rng200"
"brcm,bcm7278-rng200"
"brcm,iproc-rng200"
- reg : base address and size of control register block
......
......@@ -52,6 +52,10 @@ properties:
- at,24c08
# i2c trusted platform module (TPM)
- atmel,at97sc3204t
# i2c h/w symmetric crypto module
- atmel,atsha204a
# i2c h/w elliptic curve crypto module
- atmel,atecc508a
# CM32181: Ambient Light Sensor
- capella,cm32181
# CM3232: Ambient Light Sensor
......
......@@ -4257,6 +4257,7 @@ F: crypto/
F: drivers/crypto/
F: include/crypto/
F: include/linux/crypto*
F: lib/crypto/
CRYPTOGRAPHIC RANDOM NUMBER GENERATOR
M: Neil Horman <nhorman@tuxdriver.com>
......
......@@ -100,6 +100,29 @@ ahbbridge0: bus@40000000 {
reg = <0x40000000 0x800000>;
ranges;
crypto: crypto@40240000 {
compatible = "fsl,sec-v4.0";
#address-cells = <1>;
#size-cells = <1>;
reg = <0x40240000 0x10000>;
ranges = <0 0x40240000 0x10000>;
clocks = <&pcc2 IMX7ULP_CLK_CAAM>,
<&scg1 IMX7ULP_CLK_NIC1_BUS_DIV>;
clock-names = "aclk", "ipg";
sec_jr0: jr0@1000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x1000 0x1000>;
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
};
sec_jr1: jr1@2000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x2000 0x1000>;
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
};
};
lpuart4: serial@402d0000 {
compatible = "fsl,imx7ulp-lpuart";
reg = <0x402d0000 0x1000>;
......
......@@ -63,7 +63,7 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
}
static int chacha_neon_stream_xor(struct skcipher_request *req,
struct chacha_ctx *ctx, u8 *iv)
const struct chacha_ctx *ctx, const u8 *iv)
{
struct skcipher_walk walk;
u32 state[16];
......
......@@ -34,7 +34,7 @@ int sha512_arm_update(struct shash_desc *desc, const u8 *data,
(sha512_block_fn *)sha512_block_data_order);
}
int sha512_arm_final(struct shash_desc *desc, u8 *out)
static int sha512_arm_final(struct shash_desc *desc, u8 *out)
{
sha512_base_do_finalize(desc,
(sha512_block_fn *)sha512_block_data_order);
......
......@@ -15,6 +15,8 @@
.arch armv8-a+crypto
xtsmask .req v16
cbciv .req v16
vctr .req v16
.macro xts_reload_mask, tmp
.endm
......@@ -49,7 +51,7 @@
load_round_keys \rounds, \temp
.endm
.macro do_enc_Nx, de, mc, k, i0, i1, i2, i3
.macro do_enc_Nx, de, mc, k, i0, i1, i2, i3, i4
aes\de \i0\().16b, \k\().16b
aes\mc \i0\().16b, \i0\().16b
.ifnb \i1
......@@ -60,27 +62,34 @@
aes\mc \i2\().16b, \i2\().16b
aes\de \i3\().16b, \k\().16b
aes\mc \i3\().16b, \i3\().16b
.ifnb \i4
aes\de \i4\().16b, \k\().16b
aes\mc \i4\().16b, \i4\().16b
.endif
.endif
.endif
.endm
/* up to 4 interleaved encryption rounds with the same round key */
.macro round_Nx, enc, k, i0, i1, i2, i3
/* up to 5 interleaved encryption rounds with the same round key */
.macro round_Nx, enc, k, i0, i1, i2, i3, i4
.ifc \enc, e
do_enc_Nx e, mc, \k, \i0, \i1, \i2, \i3
do_enc_Nx e, mc, \k, \i0, \i1, \i2, \i3, \i4
.else
do_enc_Nx d, imc, \k, \i0, \i1, \i2, \i3
do_enc_Nx d, imc, \k, \i0, \i1, \i2, \i3, \i4
.endif
.endm
/* up to 4 interleaved final rounds */
.macro fin_round_Nx, de, k, k2, i0, i1, i2, i3
/* up to 5 interleaved final rounds */
.macro fin_round_Nx, de, k, k2, i0, i1, i2, i3, i4
aes\de \i0\().16b, \k\().16b
.ifnb \i1
aes\de \i1\().16b, \k\().16b
.ifnb \i3
aes\de \i2\().16b, \k\().16b
aes\de \i3\().16b, \k\().16b
.ifnb \i4
aes\de \i4\().16b, \k\().16b
.endif
.endif
.endif
eor \i0\().16b, \i0\().16b, \k2\().16b
......@@ -89,47 +98,52 @@
.ifnb \i3
eor \i2\().16b, \i2\().16b, \k2\().16b
eor \i3\().16b, \i3\().16b, \k2\().16b
.ifnb \i4
eor \i4\().16b, \i4\().16b, \k2\().16b
.endif
.endif
.endif
.endm
/* up to 4 interleaved blocks */
.macro do_block_Nx, enc, rounds, i0, i1, i2, i3
/* up to 5 interleaved blocks */
.macro do_block_Nx, enc, rounds, i0, i1, i2, i3, i4
cmp \rounds, #12
blo 2222f /* 128 bits */
beq 1111f /* 192 bits */
round_Nx \enc, v17, \i0, \i1, \i2, \i3
round_Nx \enc, v18, \i0, \i1, \i2, \i3
1111: round_Nx \enc, v19, \i0, \i1, \i2, \i3
round_Nx \enc, v20, \i0, \i1, \i2, \i3
round_Nx \enc, v17, \i0, \i1, \i2, \i3, \i4
round_Nx \enc, v18, \i0, \i1, \i2, \i3, \i4
1111: round_Nx \enc, v19, \i0, \i1, \i2, \i3, \i4
round_Nx \enc, v20, \i0, \i1, \i2, \i3, \i4
2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29
round_Nx \enc, \key, \i0, \i1, \i2, \i3
round_Nx \enc, \key, \i0, \i1, \i2, \i3, \i4
.endr
fin_round_Nx \enc, v30, v31, \i0, \i1, \i2, \i3
fin_round_Nx \enc, v30, v31, \i0, \i1, \i2, \i3, \i4
.endm
.macro encrypt_block, in, rounds, t0, t1, t2
do_block_Nx e, \rounds, \in
.endm
.macro encrypt_block2x, i0, i1, rounds, t0, t1, t2
do_block_Nx e, \rounds, \i0, \i1
.endm
.macro encrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
do_block_Nx e, \rounds, \i0, \i1, \i2, \i3
.endm
.macro decrypt_block, in, rounds, t0, t1, t2
do_block_Nx d, \rounds, \in
.macro encrypt_block5x, i0, i1, i2, i3, i4, rounds, t0, t1, t2
do_block_Nx e, \rounds, \i0, \i1, \i2, \i3, \i4
.endm
.macro decrypt_block2x, i0, i1, rounds, t0, t1, t2
do_block_Nx d, \rounds, \i0, \i1
.macro decrypt_block, in, rounds, t0, t1, t2
do_block_Nx d, \rounds, \in
.endm
.macro decrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
do_block_Nx d, \rounds, \i0, \i1, \i2, \i3
.endm
.macro decrypt_block5x, i0, i1, i2, i3, i4, rounds, t0, t1, t2
do_block_Nx d, \rounds, \i0, \i1, \i2, \i3, \i4
.endm
#define MAX_STRIDE 5
#include "aes-modes.S"
......@@ -10,6 +10,18 @@
.text
.align 4
#ifndef MAX_STRIDE
#define MAX_STRIDE 4
#endif
#if MAX_STRIDE == 4
#define ST4(x...) x
#define ST5(x...)
#else
#define ST4(x...)
#define ST5(x...) x
#endif
aes_encrypt_block4x:
encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
ret
......@@ -20,6 +32,18 @@ aes_decrypt_block4x:
ret
ENDPROC(aes_decrypt_block4x)
#if MAX_STRIDE == 5
aes_encrypt_block5x:
encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
ret
ENDPROC(aes_encrypt_block5x)
aes_decrypt_block5x:
decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
ret
ENDPROC(aes_decrypt_block5x)
#endif
/*
* aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks)
......@@ -34,14 +58,17 @@ AES_ENTRY(aes_ecb_encrypt)
enc_prepare w3, x2, x5
.LecbencloopNx:
subs w4, w4, #4
subs w4, w4, #MAX_STRIDE
bmi .Lecbenc1x
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
bl aes_encrypt_block4x
ST4( bl aes_encrypt_block4x )
ST5( ld1 {v4.16b}, [x1], #16 )
ST5( bl aes_encrypt_block5x )
st1 {v0.16b-v3.16b}, [x0], #64
ST5( st1 {v4.16b}, [x0], #16 )
b .LecbencloopNx
.Lecbenc1x:
adds w4, w4, #4
adds w4, w4, #MAX_STRIDE
beq .Lecbencout
.Lecbencloop:
ld1 {v0.16b}, [x1], #16 /* get next pt block */
......@@ -62,14 +89,17 @@ AES_ENTRY(aes_ecb_decrypt)
dec_prepare w3, x2, x5
.LecbdecloopNx:
subs w4, w4, #4
subs w4, w4, #MAX_STRIDE
bmi .Lecbdec1x
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
bl aes_decrypt_block4x
ST4( bl aes_decrypt_block4x )
ST5( ld1 {v4.16b}, [x1], #16 )
ST5( bl aes_decrypt_block5x )
st1 {v0.16b-v3.16b}, [x0], #64
ST5( st1 {v4.16b}, [x0], #16 )
b .LecbdecloopNx
.Lecbdec1x:
adds w4, w4, #4
adds w4, w4, #MAX_STRIDE
beq .Lecbdecout
.Lecbdecloop:
ld1 {v0.16b}, [x1], #16 /* get next ct block */
......@@ -129,39 +159,56 @@ AES_ENTRY(aes_cbc_decrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
ld1 {v7.16b}, [x5] /* get iv */
ld1 {cbciv.16b}, [x5] /* get iv */
dec_prepare w3, x2, x6
.LcbcdecloopNx:
subs w4, w4, #4
subs w4, w4, #MAX_STRIDE
bmi .Lcbcdec1x
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
#if MAX_STRIDE == 5
ld1 {v4.16b}, [x1], #16 /* get 1 ct block */
mov v5.16b, v0.16b
mov v6.16b, v1.16b
mov v7.16b, v2.16b
bl aes_decrypt_block5x
sub x1, x1, #32
eor v0.16b, v0.16b, cbciv.16b
eor v1.16b, v1.16b, v5.16b
ld1 {v5.16b}, [x1], #16 /* reload 1 ct block */
ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */
eor v2.16b, v2.16b, v6.16b
eor v3.16b, v3.16b, v7.16b
eor v4.16b, v4.16b, v5.16b
#else
mov v4.16b, v0.16b
mov v5.16b, v1.16b
mov v6.16b, v2.16b
bl aes_decrypt_block4x
sub x1, x1, #16
eor v0.16b, v0.16b, v7.16b
eor v0.16b, v0.16b, cbciv.16b
eor v1.16b, v1.16b, v4.16b
ld1 {v7.16b}, [x1], #16 /* reload 1 ct block */
ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */
eor v2.16b, v2.16b, v5.16b
eor v3.16b, v3.16b, v6.16b
#endif
st1 {v0.16b-v3.16b}, [x0], #64
ST5( st1 {v4.16b}, [x0], #16 )
b .LcbcdecloopNx
.Lcbcdec1x:
adds w4, w4, #4
adds w4, w4, #MAX_STRIDE
beq .Lcbcdecout
.Lcbcdecloop:
ld1 {v1.16b}, [x1], #16 /* get next ct block */
mov v0.16b, v1.16b /* ...and copy to v0 */
decrypt_block v0, w3, x2, x6, w7
eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
mov v7.16b, v1.16b /* ct is next iv */
eor v0.16b, v0.16b, cbciv.16b /* xor with iv => pt */
mov cbciv.16b, v1.16b /* ct is next iv */
st1 {v0.16b}, [x0], #16
subs w4, w4, #1
bne .Lcbcdecloop
.Lcbcdecout:
st1 {v7.16b}, [x5] /* return iv */
st1 {cbciv.16b}, [x5] /* return iv */
ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_cbc_decrypt)
......@@ -255,51 +302,60 @@ AES_ENTRY(aes_ctr_encrypt)
mov x29, sp
enc_prepare w3, x2, x6
ld1 {v4.16b}, [x5]
ld1 {vctr.16b}, [x5]
umov x6, v4.d[1] /* keep swabbed ctr in reg */
umov x6, vctr.d[1] /* keep swabbed ctr in reg */
rev x6, x6
cmn w6, w4 /* 32 bit overflow? */
bcs .Lctrloop
.LctrloopNx:
subs w4, w4, #4
subs w4, w4, #MAX_STRIDE
bmi .Lctr1x
add w7, w6, #1
mov v0.16b, v4.16b
mov v0.16b, vctr.16b
add w8, w6, #2
mov v1.16b, v4.16b
mov v1.16b, vctr.16b
add w9, w6, #3
mov v2.16b, vctr.16b
add w9, w6, #3
mov v2.16b, v4.16b
rev w7, w7
mov v3.16b, v4.16b
mov v3.16b, vctr.16b
rev w8, w8
ST5( mov v4.16b, vctr.16b )
mov v1.s[3], w7
rev w9, w9
ST5( add w10, w6, #4 )
mov v2.s[3], w8
ST5( rev w10, w10 )
mov v3.s[3], w9
ST5( mov v4.s[3], w10 )
ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */
bl aes_encrypt_block4x
ST4( bl aes_encrypt_block4x )
ST5( bl aes_encrypt_block5x )
eor v0.16b, v5.16b, v0.16b
ld1 {v5.16b}, [x1], #16 /* get 1 input block */
ST4( ld1 {v5.16b}, [x1], #16 )
eor v1.16b, v6.16b, v1.16b
ST5( ld1 {v5.16b-v6.16b}, [x1], #32 )
eor v2.16b, v7.16b, v2.16b
eor v3.16b, v5.16b, v3.16b
ST5( eor v4.16b, v6.16b, v4.16b )
st1 {v0.16b-v3.16b}, [x0], #64
add x6, x6, #4
ST5( st1 {v4.16b}, [x0], #16 )
add x6, x6, #MAX_STRIDE
rev x7, x6
ins v4.d[1], x7
ins vctr.d[1], x7
cbz w4, .Lctrout
b .LctrloopNx
.Lctr1x:
adds w4, w4, #4
adds w4, w4, #MAX_STRIDE
beq .Lctrout
.Lctrloop:
mov v0.16b, v4.16b
mov v0.16b, vctr.16b
encrypt_block v0, w3, x2, x8, w7
adds x6, x6, #1 /* increment BE ctr */
rev x7, x6
ins v4.d[1], x7
ins vctr.d[1], x7
bcs .Lctrcarry /* overflow? */
.Lctrcarrydone:
......@@ -311,7 +367,7 @@ AES_ENTRY(aes_ctr_encrypt)
bne .Lctrloop
.Lctrout:
st1 {v4.16b}, [x5] /* return next CTR value */
st1 {vctr.16b}, [x5] /* return next CTR value */
ldp x29, x30, [sp], #16
ret
......@@ -320,11 +376,11 @@ AES_ENTRY(aes_ctr_encrypt)
b .Lctrout
.Lctrcarry:
umov x7, v4.d[0] /* load upper word of ctr */
umov x7, vctr.d[0] /* load upper word of ctr */
rev x7, x7 /* ... to handle the carry */
add x7, x7, #1
rev x7, x7
ins v4.d[0], x7
ins vctr.d[0], x7
b .Lctrcarrydone
AES_ENDPROC(aes_ctr_encrypt)
......
......@@ -12,6 +12,8 @@
#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
xtsmask .req v7
cbciv .req v7
vctr .req v4
.macro xts_reload_mask, tmp
xts_load_mask \tmp
......@@ -114,26 +116,9 @@
/*
* Interleaved versions: functionally equivalent to the
* ones above, but applied to 2 or 4 AES states in parallel.
* ones above, but applied to AES states in parallel.
*/
.macro sub_bytes_2x, in0, in1
sub v8.16b, \in0\().16b, v15.16b
tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
sub v9.16b, \in1\().16b, v15.16b
tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
sub v10.16b, v8.16b, v15.16b
tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b
sub v11.16b, v9.16b, v15.16b
tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b
sub v8.16b, v10.16b, v15.16b
tbx \in0\().16b, {v24.16b-v27.16b}, v10.16b
sub v9.16b, v11.16b, v15.16b
tbx \in1\().16b, {v24.16b-v27.16b}, v11.16b
tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b
tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b
.endm
.macro sub_bytes_4x, in0, in1, in2, in3
sub v8.16b, \in0\().16b, v15.16b
tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
......@@ -212,25 +197,6 @@
eor \in1\().16b, \in1\().16b, v11.16b
.endm
.macro do_block_2x, enc, in0, in1, rounds, rk, rkp, i
ld1 {v15.4s}, [\rk]
add \rkp, \rk, #16
mov \i, \rounds
1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
movi v15.16b, #0x40
tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
sub_bytes_2x \in0, \in1
subs \i, \i, #1
ld1 {v15.4s}, [\rkp], #16
beq 2222f
mix_columns_2x \in0, \in1, \enc
b 1111b
2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
.endm
.macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
ld1 {v15.4s}, [\rk]
add \rkp, \rk, #16
......@@ -257,14 +223,6 @@
eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
.endm
.macro encrypt_block2x, in0, in1, rounds, rk, rkp, i
do_block_2x 1, \in0, \in1, \rounds, \rk, \rkp, \i
.endm
.macro decrypt_block2x, in0, in1, rounds, rk, rkp, i
do_block_2x 0, \in0, \in1, \rounds, \rk, \rkp, \i
.endm
.macro encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
do_block_4x 1, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
.endm
......
......@@ -60,7 +60,7 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
}
static int chacha_neon_stream_xor(struct skcipher_request *req,
struct chacha_ctx *ctx, u8 *iv)
const struct chacha_ctx *ctx, const u8 *iv)
{
struct skcipher_walk walk;
u32 state[16];
......
......@@ -52,7 +52,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
if (!crypto_simd_usable())
return crypto_sha1_finup(desc, data, len, out);
......
......@@ -57,7 +57,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
if (!crypto_simd_usable()) {
if (len)
......
......@@ -371,20 +371,6 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
}
}
static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
aesni_enc(ctx, dst, src);
}
static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
aesni_dec(ctx, dst, src);
}
static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
......@@ -920,7 +906,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
}
#endif
static struct crypto_alg aesni_algs[] = { {
static struct crypto_alg aesni_cipher_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-aesni",
.cra_priority = 300,
......@@ -937,24 +923,7 @@ static struct crypto_alg aesni_algs[] = { {
.cia_decrypt = aes_decrypt
}
}
}, {
.cra_name = "__aes",
.cra_driver_name = "__aes-aesni",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = __aes_encrypt,
.cia_decrypt = __aes_decrypt
}
}
} };
};
static struct skcipher_alg aesni_skciphers[] = {
{
......@@ -1150,7 +1119,7 @@ static int __init aesni_init(void)
#endif
#endif
err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
err = crypto_register_alg(&aesni_cipher_alg);
if (err)
return err;
......@@ -1158,7 +1127,7 @@ static int __init aesni_init(void)
ARRAY_SIZE(aesni_skciphers),
aesni_simd_skciphers);
if (err)
goto unregister_algs;
goto unregister_cipher;
err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
aesni_simd_aeads);
......@@ -1170,8 +1139,8 @@ static int __init aesni_init(void)
unregister_skciphers:
simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
aesni_simd_skciphers);
unregister_algs:
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
unregister_cipher:
crypto_unregister_alg(&aesni_cipher_alg);
return err;
}
......@@ -1181,7 +1150,7 @@ static void __exit aesni_exit(void)
aesni_simd_aeads);
simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
aesni_simd_skciphers);
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
crypto_unregister_alg(&aesni_cipher_alg);
}
late_initcall(aesni_init);
......
......@@ -124,7 +124,7 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
}
static int chacha_simd_stream_xor(struct skcipher_walk *walk,
struct chacha_ctx *ctx, u8 *iv)
const struct chacha_ctx *ctx, const u8 *iv)
{
u32 *state, state_buf[16 + 2] __aligned(8);
int next_yield = 4096; /* bytes until next FPU yield */
......
......@@ -61,7 +61,6 @@ config CRYPTO_BLKCIPHER2
tristate
select CRYPTO_ALGAPI2
select CRYPTO_RNG2
select CRYPTO_WORKQUEUE
config CRYPTO_HASH
tristate
......@@ -137,10 +136,11 @@ config CRYPTO_USER
Userspace configuration for cryptographic instantiations such as
cbc(aes).
if CRYPTO_MANAGER2
config CRYPTO_MANAGER_DISABLE_TESTS
bool "Disable run-time self tests"
default y
depends on CRYPTO_MANAGER2
help
Disable run-time self tests that normally take place at
algorithm registration.
......@@ -155,14 +155,10 @@ config CRYPTO_MANAGER_EXTRA_TESTS
This is intended for developer use only, as these tests take much
longer to run than the normal self tests.
endif # if CRYPTO_MANAGER2
config CRYPTO_GF128MUL
tristate "GF(2^128) multiplication functions"
help
Efficient table driven implementation of multiplications in the
field GF(2^128). This is needed by some cypher modes. This
option will be selected automatically if you select such a
cipher mode. Only select this option by hand if you expect to load
an external module that requires these functions.
tristate
config CRYPTO_NULL
tristate "Null algorithms"
......@@ -186,15 +182,11 @@ config CRYPTO_PCRYPT
This converts an arbitrary crypto algorithm into a parallel
algorithm that executes in kernel threads.
config CRYPTO_WORKQUEUE
tristate
config CRYPTO_CRYPTD
tristate "Software async crypto daemon"
select CRYPTO_BLKCIPHER
select CRYPTO_HASH
select CRYPTO_MANAGER
select CRYPTO_WORKQUEUE
help
This is a generic software asynchronous crypto daemon that
converts an arbitrary synchronous software crypto algorithm
......@@ -279,6 +271,7 @@ config CRYPTO_CCM
select CRYPTO_CTR
select CRYPTO_HASH
select CRYPTO_AEAD
select CRYPTO_MANAGER
help
Support for Counter with CBC MAC. Required for IPsec.
......@@ -288,6 +281,7 @@ config CRYPTO_GCM
select CRYPTO_AEAD
select CRYPTO_GHASH
select CRYPTO_NULL
select CRYPTO_MANAGER
help
Support for Galois/Counter Mode (GCM) and Galois Message
Authentication Code (GMAC). Required for IPSec.
......@@ -297,6 +291,7 @@ config CRYPTO_CHACHA20POLY1305
select CRYPTO_CHACHA20
select CRYPTO_POLY1305
select CRYPTO_AEAD
select CRYPTO_MANAGER
help
ChaCha20-Poly1305 AEAD support, RFC7539.
......@@ -411,6 +406,7 @@ config CRYPTO_SEQIV
select CRYPTO_BLKCIPHER
select CRYPTO_NULL
select CRYPTO_RNG_DEFAULT
select CRYPTO_MANAGER
help
This IV generator generates an IV based on a sequence number by
xoring it with a salt. This algorithm is mainly useful for CTR
......@@ -420,7 +416,7 @@ config CRYPTO_ECHAINIV
select CRYPTO_AEAD
select CRYPTO_NULL
select CRYPTO_RNG_DEFAULT
default m
select CRYPTO_MANAGER
help
This IV generator generates an IV based on the encryption of
a sequence number xored with a salt. This is the default
......@@ -456,6 +452,7 @@ config CRYPTO_CTR
config CRYPTO_CTS
tristate "CTS support"
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
help
CTS: Cipher Text Stealing
This is the Cipher Text Stealing mode as described by
......@@ -521,6 +518,7 @@ config CRYPTO_XTS
config CRYPTO_KEYWRAP
tristate "Key wrapping support"
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
help
Support for key wrapping (NIST SP800-38F / RFC3394) without
padding.
......@@ -551,6 +549,7 @@ config CRYPTO_ADIANTUM
select CRYPTO_CHACHA20
select CRYPTO_POLY1305
select CRYPTO_NHPOLY1305
select CRYPTO_MANAGER
help
Adiantum is a tweakable, length-preserving encryption mode
designed for fast and secure disk encryption, especially on
......@@ -684,6 +683,14 @@ config CRYPTO_CRC32_MIPS
instructions, when available.
config CRYPTO_XXHASH
tristate "xxHash hash algorithm"
select CRYPTO_HASH
select XXHASH
help
xxHash non-cryptographic hash algorithm. Extremely fast, working at
speeds close to RAM limits.
config CRYPTO_CRCT10DIF
tristate "CRCT10DIF algorithm"
select CRYPTO_HASH
......@@ -1230,9 +1237,13 @@ config CRYPTO_ANUBIS
<https://www.cosic.esat.kuleuven.be/nessie/reports/>
<http://www.larc.usp.br/~pbarreto/AnubisPage.html>
config CRYPTO_LIB_ARC4
tristate
config CRYPTO_ARC4
tristate "ARC4 cipher algorithm"
select CRYPTO_BLKCIPHER
select CRYPTO_LIB_ARC4
help
ARC4 cipher algorithm.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment