Commit 822be00f authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: remove direct blkcipher_walk dependency on transform

In order to allow other uses of the blkcipher walk API than the blkcipher
algos themselves, this patch copies some of the transform data members to the
walk struct so the transform is only accessed at walk init time.
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent d9e79726
...@@ -70,14 +70,12 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) ...@@ -70,14 +70,12 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page); return max(start, end_page);
} }
static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm, static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
struct blkcipher_walk *walk,
unsigned int bsize) unsigned int bsize)
{ {
u8 *addr; u8 *addr;
unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
addr = blkcipher_get_spot(addr, bsize); addr = blkcipher_get_spot(addr, bsize);
scatterwalk_copychunks(addr, &walk->out, bsize, 1); scatterwalk_copychunks(addr, &walk->out, bsize, 1);
return bsize; return bsize;
...@@ -105,7 +103,6 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, ...@@ -105,7 +103,6 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
int blkcipher_walk_done(struct blkcipher_desc *desc, int blkcipher_walk_done(struct blkcipher_desc *desc,
struct blkcipher_walk *walk, int err) struct blkcipher_walk *walk, int err)
{ {
struct crypto_blkcipher *tfm = desc->tfm;
unsigned int nbytes = 0; unsigned int nbytes = 0;
if (likely(err >= 0)) { if (likely(err >= 0)) {
...@@ -117,7 +114,7 @@ int blkcipher_walk_done(struct blkcipher_desc *desc, ...@@ -117,7 +114,7 @@ int blkcipher_walk_done(struct blkcipher_desc *desc,
err = -EINVAL; err = -EINVAL;
goto err; goto err;
} else } else
n = blkcipher_done_slow(tfm, walk, n); n = blkcipher_done_slow(walk, n);
nbytes = walk->total - n; nbytes = walk->total - n;
err = 0; err = 0;
...@@ -136,7 +133,7 @@ int blkcipher_walk_done(struct blkcipher_desc *desc, ...@@ -136,7 +133,7 @@ int blkcipher_walk_done(struct blkcipher_desc *desc,
} }
if (walk->iv != desc->info) if (walk->iv != desc->info)
memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm)); memcpy(desc->info, walk->iv, walk->ivsize);
if (walk->buffer != walk->page) if (walk->buffer != walk->page)
kfree(walk->buffer); kfree(walk->buffer);
if (walk->page) if (walk->page)
...@@ -226,22 +223,20 @@ static inline int blkcipher_next_fast(struct blkcipher_desc *desc, ...@@ -226,22 +223,20 @@ static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
static int blkcipher_walk_next(struct blkcipher_desc *desc, static int blkcipher_walk_next(struct blkcipher_desc *desc,
struct blkcipher_walk *walk) struct blkcipher_walk *walk)
{ {
struct crypto_blkcipher *tfm = desc->tfm;
unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
unsigned int bsize; unsigned int bsize;
unsigned int n; unsigned int n;
int err; int err;
n = walk->total; n = walk->total;
if (unlikely(n < crypto_blkcipher_blocksize(tfm))) { if (unlikely(n < walk->cipher_blocksize)) {
desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
return blkcipher_walk_done(desc, walk, -EINVAL); return blkcipher_walk_done(desc, walk, -EINVAL);
} }
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY | walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
BLKCIPHER_WALK_DIFF); BLKCIPHER_WALK_DIFF);
if (!scatterwalk_aligned(&walk->in, alignmask) || if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
!scatterwalk_aligned(&walk->out, alignmask)) { !scatterwalk_aligned(&walk->out, walk->alignmask)) {
walk->flags |= BLKCIPHER_WALK_COPY; walk->flags |= BLKCIPHER_WALK_COPY;
if (!walk->page) { if (!walk->page) {
walk->page = (void *)__get_free_page(GFP_ATOMIC); walk->page = (void *)__get_free_page(GFP_ATOMIC);
...@@ -250,12 +245,12 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc, ...@@ -250,12 +245,12 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
} }
} }
bsize = min(walk->blocksize, n); bsize = min(walk->walk_blocksize, n);
n = scatterwalk_clamp(&walk->in, n); n = scatterwalk_clamp(&walk->in, n);
n = scatterwalk_clamp(&walk->out, n); n = scatterwalk_clamp(&walk->out, n);
if (unlikely(n < bsize)) { if (unlikely(n < bsize)) {
err = blkcipher_next_slow(desc, walk, bsize, alignmask); err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
goto set_phys_lowmem; goto set_phys_lowmem;
} }
...@@ -277,28 +272,26 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc, ...@@ -277,28 +272,26 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
return err; return err;
} }
static inline int blkcipher_copy_iv(struct blkcipher_walk *walk, static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
struct crypto_blkcipher *tfm,
unsigned int alignmask)
{ {
unsigned bs = walk->blocksize; unsigned bs = walk->walk_blocksize;
unsigned int ivsize = crypto_blkcipher_ivsize(tfm); unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
unsigned aligned_bs = ALIGN(bs, alignmask + 1); unsigned int size = aligned_bs * 2 +
unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) - walk->ivsize + max(aligned_bs, walk->ivsize) -
(alignmask + 1); (walk->alignmask + 1);
u8 *iv; u8 *iv;
size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
walk->buffer = kmalloc(size, GFP_ATOMIC); walk->buffer = kmalloc(size, GFP_ATOMIC);
if (!walk->buffer) if (!walk->buffer)
return -ENOMEM; return -ENOMEM;
iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
iv = blkcipher_get_spot(iv, bs) + aligned_bs; iv = blkcipher_get_spot(iv, bs) + aligned_bs;
iv = blkcipher_get_spot(iv, bs) + aligned_bs; iv = blkcipher_get_spot(iv, bs) + aligned_bs;
iv = blkcipher_get_spot(iv, ivsize); iv = blkcipher_get_spot(iv, walk->ivsize);
walk->iv = memcpy(iv, walk->iv, ivsize); walk->iv = memcpy(iv, walk->iv, walk->ivsize);
return 0; return 0;
} }
...@@ -306,7 +299,10 @@ int blkcipher_walk_virt(struct blkcipher_desc *desc, ...@@ -306,7 +299,10 @@ int blkcipher_walk_virt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk) struct blkcipher_walk *walk)
{ {
walk->flags &= ~BLKCIPHER_WALK_PHYS; walk->flags &= ~BLKCIPHER_WALK_PHYS;
walk->blocksize = crypto_blkcipher_blocksize(desc->tfm); walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
walk->cipher_blocksize = walk->walk_blocksize;
walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
return blkcipher_walk_first(desc, walk); return blkcipher_walk_first(desc, walk);
} }
EXPORT_SYMBOL_GPL(blkcipher_walk_virt); EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
...@@ -315,7 +311,10 @@ int blkcipher_walk_phys(struct blkcipher_desc *desc, ...@@ -315,7 +311,10 @@ int blkcipher_walk_phys(struct blkcipher_desc *desc,
struct blkcipher_walk *walk) struct blkcipher_walk *walk)
{ {
walk->flags |= BLKCIPHER_WALK_PHYS; walk->flags |= BLKCIPHER_WALK_PHYS;
walk->blocksize = crypto_blkcipher_blocksize(desc->tfm); walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
walk->cipher_blocksize = walk->walk_blocksize;
walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
return blkcipher_walk_first(desc, walk); return blkcipher_walk_first(desc, walk);
} }
EXPORT_SYMBOL_GPL(blkcipher_walk_phys); EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
...@@ -323,9 +322,6 @@ EXPORT_SYMBOL_GPL(blkcipher_walk_phys); ...@@ -323,9 +322,6 @@ EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
static int blkcipher_walk_first(struct blkcipher_desc *desc, static int blkcipher_walk_first(struct blkcipher_desc *desc,
struct blkcipher_walk *walk) struct blkcipher_walk *walk)
{ {
struct crypto_blkcipher *tfm = desc->tfm;
unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
if (WARN_ON_ONCE(in_irq())) if (WARN_ON_ONCE(in_irq()))
return -EDEADLK; return -EDEADLK;
...@@ -335,8 +331,8 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc, ...@@ -335,8 +331,8 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
walk->buffer = NULL; walk->buffer = NULL;
walk->iv = desc->info; walk->iv = desc->info;
if (unlikely(((unsigned long)walk->iv & alignmask))) { if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
int err = blkcipher_copy_iv(walk, tfm, alignmask); int err = blkcipher_copy_iv(walk);
if (err) if (err)
return err; return err;
} }
...@@ -353,7 +349,10 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc, ...@@ -353,7 +349,10 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
unsigned int blocksize) unsigned int blocksize)
{ {
walk->flags &= ~BLKCIPHER_WALK_PHYS; walk->flags &= ~BLKCIPHER_WALK_PHYS;
walk->blocksize = blocksize; walk->walk_blocksize = blocksize;
walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
return blkcipher_walk_first(desc, walk); return blkcipher_walk_first(desc, walk);
} }
EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block); EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
......
...@@ -100,9 +100,12 @@ struct blkcipher_walk { ...@@ -100,9 +100,12 @@ struct blkcipher_walk {
void *page; void *page;
u8 *buffer; u8 *buffer;
u8 *iv; u8 *iv;
unsigned int ivsize;
int flags; int flags;
unsigned int blocksize; unsigned int walk_blocksize;
unsigned int cipher_blocksize;
unsigned int alignmask;
}; };
struct ablkcipher_walk { struct ablkcipher_walk {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment