Commit 341476d6 authored by Herbert Xu's avatar Herbert Xu

crypto: chainiv - Offer normal cipher functionality without RNG

The RNG may not be available during early boot, e.g., the relevant
modules may not be included in the initramfs.  As the RNG Is only
needed for IPsec, we should not let this prevent use of ciphers
without IV generators, e.g., for disk encryption.

This patch postpones the RNG allocation to the init function so
that one failure during early boot does not make the RNG unavailable
for all subsequent users of the same cipher.

More importantly, it lets the cipher live even if RNG allocation
fails.  Of course we no longer offer IV generation and which will
fail with an error if invoked.  But all other cipher capabilities
will function as usual.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 9aa867e4
...@@ -83,21 +83,34 @@ static int chainiv_givencrypt(struct skcipher_givcrypt_request *req) ...@@ -83,21 +83,34 @@ static int chainiv_givencrypt(struct skcipher_givcrypt_request *req)
static int chainiv_init_common(struct crypto_tfm *tfm, char iv[]) static int chainiv_init_common(struct crypto_tfm *tfm, char iv[])
{ {
struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
int err = 0;
tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
return crypto_rng_get_bytes(crypto_default_rng, iv, if (iv) {
crypto_ablkcipher_ivsize(geniv)) ?: err = crypto_rng_get_bytes(crypto_default_rng, iv,
skcipher_geniv_init(tfm); crypto_ablkcipher_ivsize(geniv));
crypto_put_default_rng();
}
return err ?: skcipher_geniv_init(tfm);
} }
static int chainiv_init(struct crypto_tfm *tfm) static int chainiv_init(struct crypto_tfm *tfm)
{ {
struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm); struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
char *iv;
spin_lock_init(&ctx->lock); spin_lock_init(&ctx->lock);
return chainiv_init_common(tfm, ctx->iv); iv = NULL;
if (!crypto_get_default_rng()) {
crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
iv = ctx->iv;
}
return chainiv_init_common(tfm, iv);
} }
static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx) static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
...@@ -216,14 +229,23 @@ static void async_chainiv_do_postponed(struct work_struct *work) ...@@ -216,14 +229,23 @@ static void async_chainiv_do_postponed(struct work_struct *work)
static int async_chainiv_init(struct crypto_tfm *tfm) static int async_chainiv_init(struct crypto_tfm *tfm)
{ {
struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
char *iv;
spin_lock_init(&ctx->lock); spin_lock_init(&ctx->lock);
crypto_init_queue(&ctx->queue, 100); crypto_init_queue(&ctx->queue, 100);
INIT_WORK(&ctx->postponed, async_chainiv_do_postponed); INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
return chainiv_init_common(tfm, ctx->iv); iv = NULL;
if (!crypto_get_default_rng()) {
crypto_ablkcipher_crt(geniv)->givencrypt =
async_chainiv_givencrypt;
iv = ctx->iv;
}
return chainiv_init_common(tfm, iv);
} }
static void async_chainiv_exit(struct crypto_tfm *tfm) static void async_chainiv_exit(struct crypto_tfm *tfm)
...@@ -241,21 +263,14 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb) ...@@ -241,21 +263,14 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
{ {
struct crypto_attr_type *algt; struct crypto_attr_type *algt;
struct crypto_instance *inst; struct crypto_instance *inst;
int err;
algt = crypto_get_attr_type(tb); algt = crypto_get_attr_type(tb);
if (IS_ERR(algt)) if (IS_ERR(algt))
return ERR_CAST(algt); return ERR_CAST(algt);
err = crypto_get_default_rng();
if (err)
return ERR_PTR(err);
inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0); inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
if (IS_ERR(inst)) if (IS_ERR(inst))
goto put_rng; goto out;
inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt;
inst->alg.cra_init = chainiv_init; inst->alg.cra_init = chainiv_init;
inst->alg.cra_exit = skcipher_geniv_exit; inst->alg.cra_exit = skcipher_geniv_exit;
...@@ -265,8 +280,6 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb) ...@@ -265,8 +280,6 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
if (!crypto_requires_sync(algt->type, algt->mask)) { if (!crypto_requires_sync(algt->type, algt->mask)) {
inst->alg.cra_flags |= CRYPTO_ALG_ASYNC; inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
inst->alg.cra_ablkcipher.givencrypt = async_chainiv_givencrypt;
inst->alg.cra_init = async_chainiv_init; inst->alg.cra_init = async_chainiv_init;
inst->alg.cra_exit = async_chainiv_exit; inst->alg.cra_exit = async_chainiv_exit;
...@@ -277,22 +290,12 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb) ...@@ -277,22 +290,12 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
out: out:
return inst; return inst;
put_rng:
crypto_put_default_rng();
goto out;
}
static void chainiv_free(struct crypto_instance *inst)
{
skcipher_geniv_free(inst);
crypto_put_default_rng();
} }
static struct crypto_template chainiv_tmpl = { static struct crypto_template chainiv_tmpl = {
.name = "chainiv", .name = "chainiv",
.alloc = chainiv_alloc, .alloc = chainiv_alloc,
.free = chainiv_free, .free = skcipher_geniv_free,
.module = THIS_MODULE, .module = THIS_MODULE,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment