Commit 3957f2b3 authored by James Morris's avatar James Morris Committed by Sridhar Samudrala

[CRYPTO]: in/out scatterlist support for ciphers.

- Merge scatterwalk patch from Adam J. Richter <adam@yggdrasil.com>
  API change: cipher methods now take in/out scatterlists and nbytes 
  params.
- Merge gss_krb5_crypto update from Adam J. Richter <adam@yggdrasil.com>
- Add KM_SOFTIRQn (instead of KM_CRYPTO_IN etc).
- Add asm/kmap_types.h to crypto/internal.h
- Update cipher.c credits.
- Update cipher.c documentation.
parent 8f5e762b
......@@ -4,6 +4,7 @@
* Cipher operations.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com>.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
......@@ -16,12 +17,22 @@
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <asm/scatterlist.h>
#include "internal.h"
typedef void (cryptfn_t)(void *, u8 *, const u8 *);
typedef void (procfn_t)(struct crypto_tfm *, u8 *, cryptfn_t, int enc);
typedef void (procfn_t)(struct crypto_tfm *, u8 *, u8*, cryptfn_t, int enc);
struct scatter_walk {
struct scatterlist *sg;
struct page *page;
void *data;
unsigned int len_this_page;
unsigned int len_this_segment;
unsigned int offset;
};
static inline void xor_64(u8 *a, const u8 *b)
{
......@@ -37,165 +48,191 @@ static inline void xor_128(u8 *a, const u8 *b)
((u32 *)a)[3] ^= ((u32 *)b)[3];
}
static inline unsigned int sglen(struct scatterlist *sg, unsigned int nsg)
/* Define sg_next is an inline routine now in case we want to change
scatterlist to a linked list later. */
static inline struct scatterlist *sg_next(struct scatterlist *sg)
{
unsigned int i, n;
for (i = 0, n = 0; i < nsg; i++)
n += sg[i].length;
return n;
return sg + 1;
}
/*
* Do not call this unless the total length of all of the fragments
* has been verified as multiple of the block size.
*/
static unsigned int copy_chunks(struct crypto_tfm *tfm, u8 *buf,
struct scatterlist *sg, unsigned int sgidx,
unsigned int rlen, unsigned int *last, int in)
void *which_buf(struct scatter_walk *walk, unsigned int nbytes, void *scratch)
{
unsigned int i, copied, coff, j, aligned;
unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
for (i = sgidx, j = copied = 0, aligned = 0 ; copied < bsize; i++) {
unsigned int len = sg[i].length;
unsigned int clen;
char *p;
if (copied) {
coff = 0;
clen = min(len, bsize - copied);
if (len == bsize - copied)
aligned = 1; /* last + right aligned */
} else {
coff = len - rlen;
clen = rlen;
}
if (nbytes <= walk->len_this_page &&
(((unsigned long)walk->data) & (PAGE_CACHE_SIZE - 1)) + nbytes <=
PAGE_CACHE_SIZE)
return walk->data;
else
return scratch;
}
p = crypto_kmap(sg[i].page) + sg[i].offset + coff;
if (in)
memcpy(&buf[copied], p, clen);
static void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
{
if (out)
memcpy(sgdata, buf, nbytes);
else
memcpy(buf, sgdata, nbytes);
}
static void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
{
unsigned int rest_of_page;
walk->sg = sg;
walk->page = sg->page;
walk->len_this_segment = sg->length;
rest_of_page = PAGE_CACHE_SIZE - (sg->offset & (PAGE_CACHE_SIZE - 1));
walk->len_this_page = min(sg->length, rest_of_page);
walk->offset = sg->offset;
}
static void scatterwalk_map(struct scatter_walk *walk, int out)
{
walk->data = crypto_kmap(walk->page, out) + walk->offset;
}
static void scatter_page_done(struct scatter_walk *walk, int out,
unsigned int more)
{
/* walk->data may be pointing the first byte of the next page;
however, we know we transfered at least one byte. So,
walk->data - 1 will be a virutual address in the mapped page. */
if (out)
flush_dcache_page(walk->page);
if (more) {
walk->len_this_segment -= walk->len_this_page;
if (walk->len_this_segment) {
walk->page++;
walk->len_this_page = min(walk->len_this_segment,
(unsigned)PAGE_CACHE_SIZE);
walk->offset = 0;
}
else
memcpy(p, &buf[copied], clen);
crypto_kunmap(p);
*last = aligned ? 0 : clen;
copied += clen;
scatterwalk_start(walk, sg_next(walk->sg));
}
return i - sgidx - 2 + aligned;
}
static inline unsigned int gather_chunks(struct crypto_tfm *tfm, u8 *buf,
struct scatterlist *sg,
unsigned int sgidx, unsigned int rlen,
unsigned int *last)
static void scatter_done(struct scatter_walk *walk, int out, int more)
{
return copy_chunks(tfm, buf, sg, sgidx, rlen, last, 1);
crypto_kunmap(walk->data, out);
if (walk->len_this_page == 0 || !more)
scatter_page_done(walk, out, more);
}
static inline unsigned int scatter_chunks(struct crypto_tfm *tfm, u8 *buf,
struct scatterlist *sg,
unsigned int sgidx, unsigned int rlen,
unsigned int *last)
/*
* Do not call this unless the total length of all of the fragments
* has been verified as multiple of the block size.
*/
static int copy_chunks(void *buf, struct scatter_walk *walk,
size_t nbytes, int out)
{
return copy_chunks(tfm, buf, sg, sgidx, rlen, last, 0);
if (buf != walk->data) {
while (nbytes > walk->len_this_page) {
memcpy_dir(buf, walk->data, walk->len_this_page, out);
buf += walk->len_this_page;
nbytes -= walk->len_this_page;
crypto_kunmap(walk->data, out);
scatter_page_done(walk, out, 1);
scatterwalk_map(walk, out);
}
memcpy_dir(buf, walk->data, nbytes, out);
}
walk->offset += nbytes;
walk->len_this_page -= nbytes;
walk->len_this_segment -= nbytes;
return 0;
}
/*
* Generic encrypt/decrypt wrapper for ciphers.
*
* If we find a a remnant at the end of a frag, we have to encrypt or
* decrypt across possibly multiple page boundaries via a temporary
* block, then continue processing with a chunk offset until the end
* of a frag is block aligned.
*
* The code is further complicated by having to remap a page after
* processing a block then yielding. The data will be offset from the
* start of page at the scatterlist offset, the chunking offset (coff)
* and the block offset (boff).
* Generic encrypt/decrypt wrapper for ciphers, handles operations across
* multiple page boundaries by using temporary blocks. In user context,
* the kernel is given a chance to schedule us once per block.
*/
static int crypt(struct crypto_tfm *tfm, struct scatterlist *sg,
unsigned int nsg, cryptfn_t crfn, procfn_t prfn, int enc)
static int crypt(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes, cryptfn_t crfn, procfn_t prfn, int enc)
{
unsigned int i, coff;
unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
u8 tmp[bsize];
struct scatter_walk walk_in, walk_out;
const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
u8 tmp_src[nbytes > src->length ? bsize : 0];
u8 tmp_dst[nbytes > dst->length ? bsize : 0];
if (sglen(sg, nsg) % bsize) {
if (!nbytes)
return 0;
if (nbytes % bsize) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
return -EINVAL;
}
for (i = 0, coff = 0; i < nsg; i++) {
unsigned int n = 0, boff = 0;
unsigned int len = sg[i].length - coff;
char *p = crypto_kmap(sg[i].page) + sg[i].offset + coff;
while (len) {
if (len < bsize) {
crypto_kunmap(p);
n = gather_chunks(tfm, tmp, sg, i, len, &coff);
prfn(tfm, tmp, crfn, enc);
scatter_chunks(tfm, tmp, sg, i, len, &coff);
crypto_yield(tfm);
goto unmapped;
} else {
prfn(tfm, p, crfn, enc);
crypto_kunmap(p);
crypto_yield(tfm);
/* remap and point to recalculated offset */
boff += bsize;
p = crypto_kmap(sg[i].page)
+ sg[i].offset + coff + boff;
len -= bsize;
/* End of frag with no remnant? */
if (coff && len == 0)
coff = 0;
}
}
crypto_kunmap(p);
unmapped:
i += n;
scatterwalk_start(&walk_in, src);
scatterwalk_start(&walk_out, dst);
for(;;) {
u8 *src_p, *dst_p;
scatterwalk_map(&walk_in, 0);
scatterwalk_map(&walk_out, 1);
src_p = which_buf(&walk_in, bsize, tmp_src);
dst_p = which_buf(&walk_out, bsize, tmp_dst);
nbytes -= bsize;
copy_chunks(src_p, &walk_in, bsize, 0);
prfn(tfm, dst_p, src_p, crfn, enc);
scatter_done(&walk_in, 0, nbytes);
copy_chunks(dst_p, &walk_out, bsize, 1);
scatter_done(&walk_out, 1, nbytes);
if (!nbytes)
return 0;
crypto_yield(tfm);
}
return 0;
}
static void cbc_process(struct crypto_tfm *tfm,
u8 *block, cryptfn_t fn, int enc)
u8 *dst, u8 *src, cryptfn_t fn, int enc)
{
/* Null encryption */
if (!tfm->crt_cipher.cit_iv)
return;
if (enc) {
tfm->crt_u.cipher.cit_xor_block(tfm->crt_cipher.cit_iv, block);
fn(tfm->crt_ctx, block, tfm->crt_cipher.cit_iv);
memcpy(tfm->crt_cipher.cit_iv, block,
tfm->crt_u.cipher.cit_xor_block(tfm->crt_cipher.cit_iv, src);
fn(tfm->crt_ctx, dst, tfm->crt_cipher.cit_iv);
memcpy(tfm->crt_cipher.cit_iv, dst,
crypto_tfm_alg_blocksize(tfm));
} else {
u8 buf[crypto_tfm_alg_blocksize(tfm)];
const int need_stack = (src == dst);
u8 stack[need_stack ? crypto_tfm_alg_blocksize(tfm) : 0];
u8 *buf = need_stack ? stack : dst;
fn(tfm->crt_ctx, buf, block);
fn(tfm->crt_ctx, buf, src);
tfm->crt_u.cipher.cit_xor_block(buf, tfm->crt_cipher.cit_iv);
memcpy(tfm->crt_cipher.cit_iv, block,
memcpy(tfm->crt_cipher.cit_iv, src,
crypto_tfm_alg_blocksize(tfm));
memcpy(block, buf, crypto_tfm_alg_blocksize(tfm));
if (buf != dst)
memcpy(dst, buf, crypto_tfm_alg_blocksize(tfm));
}
}
static void ecb_process(struct crypto_tfm *tfm, u8 *block,
static void ecb_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
cryptfn_t fn, int enc)
{
fn(tfm->crt_ctx, block, block);
fn(tfm->crt_ctx, dst, src);
}
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
......@@ -211,35 +248,44 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
}
static int ecb_encrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg)
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
return crypt(tfm, sg, nsg,
return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_encrypt, ecb_process, 1);
}
static int ecb_decrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg)
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return crypt(tfm, sg, nsg,
return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_decrypt, ecb_process, 1);
}
static int cbc_encrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg)
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return crypt(tfm, sg, nsg,
return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_encrypt, cbc_process, 1);
}
static int cbc_decrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg)
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return crypt(tfm, sg, nsg,
return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_decrypt, cbc_process, 0);
}
static int nocrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg)
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return -ENOSYS;
}
......
......@@ -28,10 +28,10 @@ static void update(struct crypto_tfm *tfm,
unsigned int i;
for (i = 0; i < nsg; i++) {
char *p = crypto_kmap(sg[i].page) + sg[i].offset;
char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset;
tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx,
p, sg[i].length);
crypto_kunmap(p);
crypto_kunmap(p, 0);
crypto_yield(tfm);
}
}
......@@ -49,10 +49,10 @@ static void digest(struct crypto_tfm *tfm,
tfm->crt_digest.dit_init(tfm);
for (i = 0; i < nsg; i++) {
char *p = crypto_kmap(sg[i].page) + sg[i].offset;
char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset;
tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx,
p, sg[i].length);
crypto_kunmap(p);
crypto_kunmap(p, 0);
crypto_yield(tfm);
}
crypto_digest_final(tfm, out);
......
......@@ -16,17 +16,29 @@
#include <linux/init.h>
#include <asm/hardirq.h>
#include <asm/softirq.h>
#include <asm/kmap_types.h>
static inline void *crypto_kmap(struct page *page)
static enum km_type km_types[] = {
KM_USER0,
KM_USER1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
};
static inline enum km_type crypto_kmap_type(int out)
{
return km_types[(in_softirq() ? 2 : 0) + out];
}
static inline void *crypto_kmap(struct page *page, int out)
{
return kmap_atomic(page, in_softirq() ?
KM_CRYPTO_SOFTIRQ : KM_CRYPTO_USER);
return kmap_atomic(page, crypto_kmap_type(out));
}
static inline void crypto_kunmap(void *vaddr)
static inline void crypto_kunmap(void *vaddr, int out)
{
kunmap_atomic(vaddr, in_softirq() ?
KM_CRYPTO_SOFTIRQ : KM_CRYPTO_USER);
kunmap_atomic(vaddr, crypto_kmap_type(out));
}
static inline void crypto_yield(struct crypto_tfm *tfm)
......
......@@ -703,7 +703,7 @@ test_des(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = len;
ret = crypto_cipher_encrypt(tfm, sg, 1);
ret = crypto_cipher_encrypt(tfm, sg, sg, len);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......@@ -746,7 +746,7 @@ test_des(void)
sg[1].offset = ((long) p & ~PAGE_MASK);
sg[1].length = 8;
ret = crypto_cipher_encrypt(tfm, sg, 2);
ret = crypto_cipher_encrypt(tfm, sg, sg, 16);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......@@ -814,7 +814,7 @@ test_des(void)
sg[2].offset = ((long) p & ~PAGE_MASK);
sg[2].length = 8;
ret = crypto_cipher_encrypt(tfm, sg, 3);
ret = crypto_cipher_encrypt(tfm, sg, sg, 32);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
......@@ -890,7 +890,7 @@ test_des(void)
sg[3].offset = ((long) p & ~PAGE_MASK);
sg[3].length = 18;
ret = crypto_cipher_encrypt(tfm, sg, 4);
ret = crypto_cipher_encrypt(tfm, sg, sg, 24);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
......@@ -979,7 +979,7 @@ test_des(void)
sg[4].offset = ((long) p & ~PAGE_MASK);
sg[4].length = 8;
ret = crypto_cipher_encrypt(tfm, sg, 5);
ret = crypto_cipher_encrypt(tfm, sg, sg, 16);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
......@@ -1078,7 +1078,7 @@ test_des(void)
sg[7].offset = ((long) p & ~PAGE_MASK);
sg[7].length = 1;
ret = crypto_cipher_encrypt(tfm, sg, 8);
ret = crypto_cipher_encrypt(tfm, sg, sg, 8);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......@@ -1120,7 +1120,7 @@ test_des(void)
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = len;
ret = crypto_cipher_decrypt(tfm, sg, 1);
ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("des_decrypt() failed flags=%x\n",
tfm->crt_flags);
......@@ -1163,7 +1163,7 @@ test_des(void)
sg[1].offset = ((long) p & ~PAGE_MASK);
sg[1].length = 8;
ret = crypto_cipher_decrypt(tfm, sg, 2);
ret = crypto_cipher_decrypt(tfm, sg, sg, 16);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......@@ -1220,7 +1220,7 @@ test_des(void)
sg[2].offset = ((long) p & ~PAGE_MASK);
sg[2].length = 1;
ret = crypto_cipher_decrypt(tfm, sg, 3);
ret = crypto_cipher_decrypt(tfm, sg, sg, 16);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
......@@ -1290,7 +1290,7 @@ test_des(void)
crypto_cipher_set_iv(tfm, des_tv[i].iv,
crypto_tfm_alg_ivsize(tfm));
ret = crypto_cipher_encrypt(tfm, sg, 1);
ret = crypto_cipher_encrypt(tfm, sg, sg, len);
if (ret) {
printk("des_cbc_encrypt() failed flags=%x\n",
tfm->crt_flags);
......@@ -1349,7 +1349,7 @@ test_des(void)
crypto_cipher_set_iv(tfm, des_tv[i].iv, crypto_tfm_alg_ivsize(tfm));
ret = crypto_cipher_encrypt(tfm, sg, 2);
ret = crypto_cipher_encrypt(tfm, sg, sg, 24);
if (ret) {
printk("des_cbc_decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......@@ -1398,7 +1398,7 @@ test_des(void)
crypto_cipher_set_iv(tfm, des_tv[i].iv,
crypto_tfm_alg_blocksize(tfm));
ret = crypto_cipher_decrypt(tfm, sg, 1);
ret = crypto_cipher_decrypt(tfm, sg, sg, len);
if (ret) {
printk("des_cbc_decrypt() failed flags=%x\n",
tfm->crt_flags);
......@@ -1450,7 +1450,7 @@ test_des(void)
crypto_cipher_set_iv(tfm, des_tv[i].iv, crypto_tfm_alg_ivsize(tfm));
ret = crypto_cipher_decrypt(tfm, sg, 2);
ret = crypto_cipher_decrypt(tfm, sg, sg, 8);
if (ret) {
printk("des_cbc_decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......@@ -1518,7 +1518,7 @@ test_des3_ede(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = len;
ret = crypto_cipher_encrypt(tfm, sg, 1);
ret = crypto_cipher_encrypt(tfm, sg, sg, len);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......@@ -1561,7 +1561,7 @@ test_des3_ede(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = len;
ret = crypto_cipher_decrypt(tfm, sg, 1);
ret = crypto_cipher_decrypt(tfm, sg, sg, len);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......@@ -1624,7 +1624,7 @@ test_blowfish(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = bf_tv[i].plen;
ret = crypto_cipher_encrypt(tfm, sg, 1);
ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......@@ -1666,7 +1666,7 @@ test_blowfish(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = bf_tv[i].plen;
ret = crypto_cipher_decrypt(tfm, sg, 1);
ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......@@ -1719,7 +1719,7 @@ test_blowfish(void)
crypto_cipher_set_iv(tfm, bf_tv[i].iv,
crypto_tfm_alg_ivsize(tfm));
ret = crypto_cipher_encrypt(tfm, sg, 1);
ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("blowfish_cbc_encrypt() failed flags=%x\n",
tfm->crt_flags);
......@@ -1764,7 +1764,7 @@ test_blowfish(void)
crypto_cipher_set_iv(tfm, bf_tv[i].iv,
crypto_tfm_alg_ivsize(tfm));
ret = crypto_cipher_decrypt(tfm, sg, 1);
ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("blowfish_cbc_decrypt() failed flags=%x\n",
tfm->crt_flags);
......@@ -1829,7 +1829,7 @@ test_twofish(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = tf_tv[i].plen;
ret = crypto_cipher_encrypt(tfm, sg, 1);
ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......@@ -1871,7 +1871,7 @@ test_twofish(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = tf_tv[i].plen;
ret = crypto_cipher_decrypt(tfm, sg, 1);
ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......@@ -1924,7 +1924,7 @@ test_twofish(void)
crypto_cipher_set_iv(tfm, tf_tv[i].iv,
crypto_tfm_alg_ivsize(tfm));
ret = crypto_cipher_encrypt(tfm, sg, 1);
ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("blowfish_cbc_encrypt() failed flags=%x\n",
tfm->crt_flags);
......@@ -1970,7 +1970,7 @@ test_twofish(void)
crypto_cipher_set_iv(tfm, tf_tv[i].iv,
crypto_tfm_alg_ivsize(tfm));
ret = crypto_cipher_decrypt(tfm, sg, 1);
ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("blowfish_cbc_decrypt() failed flags=%x\n",
tfm->crt_flags);
......@@ -2030,7 +2030,7 @@ test_serpent(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = sizeof(serp_tv[i].plaintext);
ret = crypto_cipher_encrypt(tfm, sg, 1);
ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......@@ -2070,7 +2070,7 @@ test_serpent(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = sizeof(serp_tv[i].plaintext);
ret = crypto_cipher_decrypt(tfm, sg, 1);
ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......@@ -2133,7 +2133,7 @@ test_aes(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = aes_tv[i].plen;
ret = crypto_cipher_encrypt(tfm, sg, 1);
ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......@@ -2175,7 +2175,7 @@ test_aes(void)
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = aes_tv[i].plen;
ret = crypto_cipher_decrypt(tfm, sg, 1);
ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
......
......@@ -23,8 +23,8 @@ D(7) KM_PTE0,
D(8) KM_PTE1,
D(9) KM_IRQ0,
D(10) KM_IRQ1,
D(11) KM_CRYPTO_USER,
D(12) KM_CRYPTO_SOFTIRQ,
D(11) KM_SOFTIRQ0,
D(12) KM_SOFTIRQ1,
D(13) KM_TYPE_NR
};
......
......@@ -22,8 +22,8 @@ D(8) KM_PTE1,
D(9) KM_PTE2,
D(10) KM_IRQ0,
D(11) KM_IRQ1,
D(12) KM_CRYPTO_USER,
D(13) KM_CRYPTO_SOFTIRQ,
D(12) KM_SOFTIRQ0,
D(13) KM_SOFTIRQ1,
D(14) KM_TYPE_NR
};
......
......@@ -21,8 +21,8 @@ D(7) KM_PTE0,
D(8) KM_PTE1,
D(9) KM_IRQ0,
D(10) KM_IRQ1,
D(11) KM_CRYPTO_USER,
D(12) KM_CRYPTO_SOFTIRQ,
D(11) KM_SOFTIRQ0,
D(12) KM_SOFTIRQ1,
D(13) KM_TYPE_NR
};
......
......@@ -14,8 +14,8 @@ enum km_type {
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
KM_CRYPTO_USER,
KM_CRYPTO_SOFTIRQ,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_TYPE_NR
};
......
......@@ -14,8 +14,8 @@ enum km_type {
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
KM_CRYPTO_USER,
KM_CRYPTO_SOFTIRQ,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_TYPE_NR
};
......
......@@ -14,8 +14,8 @@ enum km_type {
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
KM_CRYPTO_USER,
KM_CRYPTO_SOFTIRQ,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_TYPE_NR
};
......
......@@ -13,8 +13,8 @@ enum km_type {
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
KM_CRYPTO_USER,
KM_CRYPTO_SOFTIRQ,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_TYPE_NR
};
......
......@@ -17,8 +17,8 @@ enum km_type {
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
KM_CRYPTO_USER,
KM_CRYPTO_SOFTIRQ,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_TYPE_NR
};
......
......@@ -11,8 +11,8 @@ enum km_type {
KM_BIO_DST_IRQ,
KM_IRQ0,
KM_IRQ1,
KM_CRYPTO_USER,
KM_CRYPTO_SOFTIRQ,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_TYPE_NR
};
......
......@@ -131,9 +131,13 @@ struct cipher_tfm {
int (*cit_setkey)(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen);
int (*cit_encrypt)(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg);
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes);
int (*cit_decrypt)(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg);
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes);
void (*cit_xor_block)(u8 *dst, const u8 *src);
};
......@@ -274,19 +278,21 @@ static inline int crypto_cipher_setkey(struct crypto_tfm *tfm,
}
static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
struct scatterlist *sg,
unsigned int nsg)
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
return tfm->crt_cipher.cit_encrypt(tfm, sg, nsg);
return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes);
}
static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
struct scatterlist *sg,
unsigned int nsg)
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
return tfm->crt_cipher.cit_decrypt(tfm, sg, nsg);
return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes);
}
static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm,
......
......@@ -423,7 +423,7 @@ int esp_output(struct sk_buff *skb)
goto error;
}
skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
crypto_cipher_encrypt(tfm, sg, nfrags);
crypto_cipher_encrypt(tfm, sg, sg, clen);
if (unlikely(sg != sgbuf))
kfree(sg);
} while (0);
......@@ -520,7 +520,7 @@ int esp_input(struct xfrm_state *x, struct sk_buff *skb)
goto out;
}
skb_to_sgvec(skb, sg, 8+esp->conf.ivlen, elen);
crypto_cipher_decrypt(esp->conf.tfm, sg, nfrags);
crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
if (unlikely(sg != sgbuf))
kfree(sg);
......
......@@ -78,7 +78,7 @@ krb5_encrypt(
sg[0].offset = ((long)out & ~PAGE_MASK);
sg[0].length = length;
ret = crypto_cipher_encrypt(tfm, sg, 1);
ret = crypto_cipher_encrypt(tfm, sg, sg, length);
out:
dprintk("gss_k5encrypt returns %d\n",ret);
......@@ -117,7 +117,7 @@ krb5_decrypt(
sg[0].offset = ((long)out & ~PAGE_MASK);
sg[0].length = length;
ret = crypto_cipher_decrypt(tfm, sg, 1);
ret = crypto_cipher_decrypt(tfm, sg, sg, length);
out:
dprintk("gss_k5decrypt returns %d\n",ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment