Commit c65058b7 authored by Eric Biggers's avatar Eric Biggers Committed by Herbert Xu

crypto: skcipher - remove the "blkcipher" algorithm type

Now that all "blkcipher" algorithms have been converted to "skcipher",
remove the blkcipher algorithm type.

The skcipher (symmetric key cipher) algorithm type was introduced a few
years ago to replace both blkcipher and ablkcipher (synchronous and
asynchronous block cipher).  The advantages of skcipher include:

  - A much less confusing name, since none of these algorithm types have
    ever actually been for raw block ciphers, but rather for all
    length-preserving encryption modes including block cipher modes of
    operation, stream ciphers, and other length-preserving modes.

  - It unified blkcipher and ablkcipher into a single algorithm type
    which supports both synchronous and asynchronous implementations.
    Note, blkcipher already operated only on scatterlists, so the fact
    that skcipher does too isn't a regression in functionality.

  - Better type safety by using struct skcipher_alg, struct
    crypto_skcipher, etc. instead of crypto_alg, crypto_tfm, etc.

  - It sometimes simplifies the implementations of algorithms.

Also, the blkcipher API was no longer being tested.
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 53253064
......@@ -5,7 +5,7 @@ Block Cipher Algorithm Definitions
:doc: Block Cipher Algorithm Definitions
.. kernel-doc:: include/linux/crypto.h
:functions: crypto_alg ablkcipher_alg blkcipher_alg cipher_alg compress_alg
:functions: crypto_alg ablkcipher_alg cipher_alg compress_alg
Symmetric Key Cipher API
------------------------
......@@ -51,12 +51,3 @@ Asynchronous Cipher Request Handle - Deprecated
.. kernel-doc:: include/linux/crypto.h
:functions: crypto_ablkcipher_reqsize ablkcipher_request_set_tfm ablkcipher_request_alloc ablkcipher_request_free ablkcipher_request_set_callback ablkcipher_request_set_crypt
Synchronous Block Cipher API - Deprecated
-----------------------------------------
.. kernel-doc:: include/linux/crypto.h
:doc: Synchronous Block Cipher API
.. kernel-doc:: include/linux/crypto.h
:functions: crypto_alloc_blkcipher crypto_free_blkcipher crypto_has_blkcipher crypto_blkcipher_name crypto_blkcipher_ivsize crypto_blkcipher_blocksize crypto_blkcipher_setkey crypto_blkcipher_encrypt crypto_blkcipher_encrypt_iv crypto_blkcipher_decrypt crypto_blkcipher_decrypt_iv crypto_blkcipher_set_iv crypto_blkcipher_get_iv
......@@ -201,8 +201,6 @@ the aforementioned cipher types:
- CRYPTO_ALG_TYPE_AEAD Authenticated Encryption with Associated Data
(MAC)
- CRYPTO_ALG_TYPE_BLKCIPHER Synchronous multi-block cipher
- CRYPTO_ALG_TYPE_ABLKCIPHER Asynchronous multi-block cipher
- CRYPTO_ALG_TYPE_KPP Key-agreement Protocol Primitive (KPP) such as
......
......@@ -128,25 +128,20 @@ process requests that are unaligned. This implies, however, additional
overhead as the kernel crypto API needs to perform the realignment of
the data which may imply moving of data.
Cipher Definition With struct blkcipher_alg and ablkcipher_alg
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Cipher Definition With struct skcipher_alg
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Struct blkcipher_alg defines a synchronous block cipher whereas struct
ablkcipher_alg defines an asynchronous block cipher.
Struct skcipher_alg defines a multi-block cipher, or more generally, a
length-preserving symmetric cipher algorithm.
Please refer to the single block cipher description for schematics of
the block cipher usage.
Scatterlist handling
~~~~~~~~~~~~~~~~~~~~
Specifics Of Asynchronous Multi-Block Cipher
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
There are a couple of specifics to the asynchronous interface.
First of all, some of the drivers will want to use the Generic
ScatterWalk in case the hardware needs to be fed separate chunks of the
scatterlist which contains the plaintext and will contain the
ciphertext. Please refer to the ScatterWalk interface offered by the
Linux kernel scatter / gather list implementation.
Some drivers will want to use the Generic ScatterWalk in case the
hardware needs to be fed separate chunks of the scatterlist which
contains the plaintext and will contain the ciphertext. Please refer
to the ScatterWalk interface offered by the Linux kernel scatter /
gather list implementation.
Hashing [HASH]
--------------
......
......@@ -16,7 +16,6 @@ obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o
obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
crypto_blkcipher-y := ablkcipher.o
crypto_blkcipher-y += blkcipher.o
crypto_blkcipher-y += skcipher.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
......
......@@ -406,7 +406,7 @@ EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
*
* The returned transform is of a non-determinate type. Most people
* should use one of the more specific allocation functions such as
* crypto_alloc_blkcipher.
* crypto_alloc_skcipher().
*
* In case of error the return value is an error pointer.
*/
......
This diff is collapsed.
......@@ -919,7 +919,7 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
return PTR_ERR(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
case CRYPTO_ALG_TYPE_SKCIPHER:
return cryptd_create_skcipher(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_HASH:
return cryptd_create_hash(tmpl, tb, &queue);
......
......@@ -213,10 +213,6 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
if (crypto_report_cipher(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_BLKCIPHER:
if (crypto_report_cipher(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_CIPHER:
if (crypto_report_cipher(skb, alg))
goto nla_put_failure;
......
......@@ -486,7 +486,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
type = algt->type & algt->mask;
switch (type) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
case CRYPTO_ALG_TYPE_SKCIPHER:
skcipher_inst = kzalloc(sizeof(*skcipher_inst) +
sizeof(*ictx), GFP_KERNEL);
if (!skcipher_inst)
......@@ -586,7 +586,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
base->cra_alignmask = block_base->cra_alignmask;
base->cra_priority = block_base->cra_priority;
if (type == CRYPTO_ALG_TYPE_BLKCIPHER) {
if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
skcipher_inst->alg.setkey = essiv_skcipher_setkey;
skcipher_inst->alg.encrypt = essiv_skcipher_encrypt;
skcipher_inst->alg.decrypt = essiv_skcipher_decrypt;
......@@ -628,7 +628,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
out_free_hash:
crypto_mod_put(_hash_alg);
out_drop_skcipher:
if (type == CRYPTO_ALG_TYPE_BLKCIPHER)
if (type == CRYPTO_ALG_TYPE_SKCIPHER)
crypto_drop_skcipher(&ictx->u.skcipher_spawn);
else
crypto_drop_aead(&ictx->u.aead_spawn);
......
......@@ -580,9 +580,6 @@ EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
{
if (alg->cra_type == &crypto_blkcipher_type)
return sizeof(struct crypto_blkcipher *);
if (alg->cra_type == &crypto_ablkcipher_type)
return sizeof(struct crypto_ablkcipher *);
......@@ -595,105 +592,6 @@ static void skcipher_set_needkey(struct crypto_skcipher *tfm)
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
}
static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
struct crypto_blkcipher *blkcipher = *ctx;
int err;
crypto_blkcipher_clear_flags(blkcipher, ~0);
crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_blkcipher_setkey(blkcipher, key, keylen);
crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
CRYPTO_TFM_RES_MASK);
if (unlikely(err)) {
skcipher_set_needkey(tfm);
return err;
}
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
}
static int skcipher_crypt_blkcipher(struct skcipher_request *req,
int (*crypt)(struct blkcipher_desc *,
struct scatterlist *,
struct scatterlist *,
unsigned int))
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
struct blkcipher_desc desc = {
.tfm = *ctx,
.info = req->iv,
.flags = req->base.flags,
};
return crypt(&desc, req->dst, req->src, req->cryptlen);
}
static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
return skcipher_crypt_blkcipher(req, alg->encrypt);
}
static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
return skcipher_crypt_blkcipher(req, alg->decrypt);
}
static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
{
struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
crypto_free_blkcipher(*ctx);
}
static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
{
struct crypto_alg *calg = tfm->__crt_alg;
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
struct crypto_blkcipher *blkcipher;
struct crypto_tfm *btfm;
if (!crypto_mod_get(calg))
return -EAGAIN;
btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(btfm)) {
crypto_mod_put(calg);
return PTR_ERR(btfm);
}
blkcipher = __crypto_blkcipher_cast(btfm);
*ctx = blkcipher;
tfm->exit = crypto_exit_skcipher_ops_blkcipher;
skcipher->setkey = skcipher_setkey_blkcipher;
skcipher->encrypt = skcipher_encrypt_blkcipher;
skcipher->decrypt = skcipher_decrypt_blkcipher;
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
skcipher->keysize = calg->cra_blkcipher.max_keysize;
skcipher_set_needkey(skcipher);
return 0;
}
static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
......@@ -888,9 +786,6 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
return crypto_init_skcipher_ops_blkcipher(tfm);
if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type)
return crypto_init_skcipher_ops_ablkcipher(tfm);
......@@ -973,7 +868,7 @@ static const struct crypto_type crypto_skcipher_type = {
#endif
.report = crypto_skcipher_report,
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.tfmsize = offsetof(struct crypto_skcipher, base),
};
......
......@@ -85,36 +85,6 @@ struct scatter_walk {
unsigned int offset;
};
struct blkcipher_walk {
union {
struct {
struct page *page;
unsigned long offset;
} phys;
struct {
u8 *page;
u8 *addr;
} virt;
} src, dst;
struct scatter_walk in;
unsigned int nbytes;
struct scatter_walk out;
unsigned int total;
void *page;
u8 *buffer;
u8 *iv;
unsigned int ivsize;
int flags;
unsigned int walk_blocksize;
unsigned int cipher_blocksize;
unsigned int alignmask;
};
struct ablkcipher_walk {
struct {
struct page *page;
......@@ -133,7 +103,6 @@ struct ablkcipher_walk {
};
extern const struct crypto_type crypto_ablkcipher_type;
extern const struct crypto_type crypto_blkcipher_type;
void crypto_mod_put(struct crypto_alg *alg);
......@@ -233,20 +202,6 @@ static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
}
}
int blkcipher_walk_done(struct blkcipher_desc *desc,
struct blkcipher_walk *walk, int err);
int blkcipher_walk_virt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk);
int blkcipher_walk_phys(struct blkcipher_desc *desc,
struct blkcipher_walk *walk);
int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
unsigned int blocksize);
int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
struct crypto_aead *tfm,
unsigned int blocksize);
int ablkcipher_walk_done(struct ablkcipher_request *req,
struct ablkcipher_walk *walk, int err);
int ablkcipher_walk_phys(struct ablkcipher_request *req,
......@@ -286,25 +241,6 @@ static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
return crypto_tfm_ctx_aligned(&tfm->base);
}
static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
struct crypto_spawn *spawn)
{
u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
u32 mask = CRYPTO_ALG_TYPE_MASK;
return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
}
static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
{
return crypto_tfm_ctx_aligned(&tfm->base);
}
static inline struct crypto_cipher *crypto_spawn_cipher(
struct crypto_spawn *spawn)
{
......@@ -319,16 +255,6 @@ static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
}
static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
walk->in.sg = src;
walk->out.sg = dst;
walk->total = nbytes;
}
static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
struct scatterlist *dst,
struct scatterlist *src,
......
......@@ -182,10 +182,6 @@ static inline u32 skcipher_request_flags(struct skcipher_request *req)
static inline unsigned int crypto_skcipher_alg_min_keysize(
struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blkcipher.min_keysize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_ablkcipher.min_keysize;
......@@ -195,10 +191,6 @@ static inline unsigned int crypto_skcipher_alg_min_keysize(
static inline unsigned int crypto_skcipher_alg_max_keysize(
struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blkcipher.max_keysize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_ablkcipher.max_keysize;
......@@ -208,10 +200,6 @@ static inline unsigned int crypto_skcipher_alg_max_keysize(
static inline unsigned int crypto_skcipher_alg_walksize(
struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blocksize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_blocksize;
......
......@@ -241,10 +241,6 @@ static inline struct skcipher_alg *crypto_skcipher_alg(
static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blkcipher.ivsize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_ablkcipher.ivsize;
......@@ -290,10 +286,6 @@ static inline unsigned int crypto_skcipher_blocksize(
static inline unsigned int crypto_skcipher_alg_chunksize(
struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blocksize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_blocksize;
......
This diff is collapsed.
......@@ -626,8 +626,8 @@ static const struct xfrm_algo_list xfrm_aalg_list = {
static const struct xfrm_algo_list xfrm_ealg_list = {
.algs = ealg_list,
.entries = ARRAY_SIZE(ealg_list),
.type = CRYPTO_ALG_TYPE_BLKCIPHER,
.mask = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.mask = CRYPTO_ALG_TYPE_MASK,
};
static const struct xfrm_algo_list xfrm_calg_list = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment