Commit 9290777f authored by David S. Miller's avatar David S. Miller

Merge davem@nuts.ninka.net:/home/davem/src/BK/net-2.5

into kernel.bkbits.net:/home/davem/net-2.5
parents c98a2447 de148876
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* Cipher operations. * Cipher operations.
* *
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com>.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free * under the terms of the GNU General Public License as published by the Free
...@@ -16,12 +17,22 @@ ...@@ -16,12 +17,22 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include "internal.h" #include "internal.h"
typedef void (cryptfn_t)(void *, u8 *, const u8 *); typedef void (cryptfn_t)(void *, u8 *, const u8 *);
typedef void (procfn_t)(struct crypto_tfm *, u8 *, cryptfn_t, int enc); typedef void (procfn_t)(struct crypto_tfm *, u8 *, u8*, cryptfn_t, int enc);
struct scatter_walk {
struct scatterlist *sg;
struct page *page;
void *data;
unsigned int len_this_page;
unsigned int len_this_segment;
unsigned int offset;
};
static inline void xor_64(u8 *a, const u8 *b) static inline void xor_64(u8 *a, const u8 *b)
{ {
...@@ -37,165 +48,191 @@ static inline void xor_128(u8 *a, const u8 *b) ...@@ -37,165 +48,191 @@ static inline void xor_128(u8 *a, const u8 *b)
((u32 *)a)[3] ^= ((u32 *)b)[3]; ((u32 *)a)[3] ^= ((u32 *)b)[3];
} }
static inline unsigned int sglen(struct scatterlist *sg, unsigned int nsg)
/* Define sg_next is an inline routine now in case we want to change
scatterlist to a linked list later. */
static inline struct scatterlist *sg_next(struct scatterlist *sg)
{ {
unsigned int i, n; return sg + 1;
for (i = 0, n = 0; i < nsg; i++)
n += sg[i].length;
return n;
} }
/* void *which_buf(struct scatter_walk *walk, unsigned int nbytes, void *scratch)
* Do not call this unless the total length of all of the fragments
* has been verified as multiple of the block size.
*/
static unsigned int copy_chunks(struct crypto_tfm *tfm, u8 *buf,
struct scatterlist *sg, unsigned int sgidx,
unsigned int rlen, unsigned int *last, int in)
{ {
unsigned int i, copied, coff, j, aligned; if (nbytes <= walk->len_this_page &&
unsigned int bsize = crypto_tfm_alg_blocksize(tfm); (((unsigned long)walk->data) & (PAGE_CACHE_SIZE - 1)) + nbytes <=
PAGE_CACHE_SIZE)
for (i = sgidx, j = copied = 0, aligned = 0 ; copied < bsize; i++) { return walk->data;
unsigned int len = sg[i].length; else
unsigned int clen; return scratch;
char *p; }
if (copied) {
coff = 0;
clen = min(len, bsize - copied);
if (len == bsize - copied)
aligned = 1; /* last + right aligned */
} else {
coff = len - rlen;
clen = rlen;
}
p = crypto_kmap(sg[i].page) + sg[i].offset + coff; static void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
{
if (in) if (out)
memcpy(&buf[copied], p, clen); memcpy(sgdata, buf, nbytes);
else
memcpy(buf, sgdata, nbytes);
}
static void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
{
unsigned int rest_of_page;
walk->sg = sg;
walk->page = sg->page;
walk->len_this_segment = sg->length;
rest_of_page = PAGE_CACHE_SIZE - (sg->offset & (PAGE_CACHE_SIZE - 1));
walk->len_this_page = min(sg->length, rest_of_page);
walk->offset = sg->offset;
}
static void scatterwalk_map(struct scatter_walk *walk, int out)
{
walk->data = crypto_kmap(walk->page, out) + walk->offset;
}
static void scatter_page_done(struct scatter_walk *walk, int out,
unsigned int more)
{
/* walk->data may be pointing the first byte of the next page;
however, we know we transfered at least one byte. So,
walk->data - 1 will be a virutual address in the mapped page. */
if (out)
flush_dcache_page(walk->page);
if (more) {
walk->len_this_segment -= walk->len_this_page;
if (walk->len_this_segment) {
walk->page++;
walk->len_this_page = min(walk->len_this_segment,
(unsigned)PAGE_CACHE_SIZE);
walk->offset = 0;
}
else else
memcpy(p, &buf[copied], clen); scatterwalk_start(walk, sg_next(walk->sg));
crypto_kunmap(p);
*last = aligned ? 0 : clen;
copied += clen;
} }
return i - sgidx - 2 + aligned;
} }
static inline unsigned int gather_chunks(struct crypto_tfm *tfm, u8 *buf, static void scatter_done(struct scatter_walk *walk, int out, int more)
struct scatterlist *sg,
unsigned int sgidx, unsigned int rlen,
unsigned int *last)
{ {
return copy_chunks(tfm, buf, sg, sgidx, rlen, last, 1); crypto_kunmap(walk->data, out);
if (walk->len_this_page == 0 || !more)
scatter_page_done(walk, out, more);
} }
static inline unsigned int scatter_chunks(struct crypto_tfm *tfm, u8 *buf, /*
struct scatterlist *sg, * Do not call this unless the total length of all of the fragments
unsigned int sgidx, unsigned int rlen, * has been verified as multiple of the block size.
unsigned int *last) */
static int copy_chunks(void *buf, struct scatter_walk *walk,
size_t nbytes, int out)
{ {
return copy_chunks(tfm, buf, sg, sgidx, rlen, last, 0); if (buf != walk->data) {
while (nbytes > walk->len_this_page) {
memcpy_dir(buf, walk->data, walk->len_this_page, out);
buf += walk->len_this_page;
nbytes -= walk->len_this_page;
crypto_kunmap(walk->data, out);
scatter_page_done(walk, out, 1);
scatterwalk_map(walk, out);
}
memcpy_dir(buf, walk->data, nbytes, out);
}
walk->offset += nbytes;
walk->len_this_page -= nbytes;
walk->len_this_segment -= nbytes;
return 0;
} }
/* /*
* Generic encrypt/decrypt wrapper for ciphers. * Generic encrypt/decrypt wrapper for ciphers, handles operations across
* * multiple page boundaries by using temporary blocks. In user context,
* If we find a a remnant at the end of a frag, we have to encrypt or * the kernel is given a chance to schedule us once per block.
* decrypt across possibly multiple page boundaries via a temporary
* block, then continue processing with a chunk offset until the end
* of a frag is block aligned.
*
* The code is further complicated by having to remap a page after
* processing a block then yielding. The data will be offset from the
* start of page at the scatterlist offset, the chunking offset (coff)
* and the block offset (boff).
*/ */
static int crypt(struct crypto_tfm *tfm, struct scatterlist *sg, static int crypt(struct crypto_tfm *tfm,
unsigned int nsg, cryptfn_t crfn, procfn_t prfn, int enc) struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes, cryptfn_t crfn, procfn_t prfn, int enc)
{ {
unsigned int i, coff; struct scatter_walk walk_in, walk_out;
unsigned int bsize = crypto_tfm_alg_blocksize(tfm); const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
u8 tmp[bsize]; u8 tmp_src[nbytes > src->length ? bsize : 0];
u8 tmp_dst[nbytes > dst->length ? bsize : 0];
if (sglen(sg, nsg) % bsize) { if (!nbytes)
return 0;
if (nbytes % bsize) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
return -EINVAL; return -EINVAL;
} }
for (i = 0, coff = 0; i < nsg; i++) { scatterwalk_start(&walk_in, src);
unsigned int n = 0, boff = 0; scatterwalk_start(&walk_out, dst);
unsigned int len = sg[i].length - coff;
char *p = crypto_kmap(sg[i].page) + sg[i].offset + coff; for(;;) {
u8 *src_p, *dst_p;
while (len) {
if (len < bsize) { scatterwalk_map(&walk_in, 0);
crypto_kunmap(p); scatterwalk_map(&walk_out, 1);
n = gather_chunks(tfm, tmp, sg, i, len, &coff); src_p = which_buf(&walk_in, bsize, tmp_src);
prfn(tfm, tmp, crfn, enc); dst_p = which_buf(&walk_out, bsize, tmp_dst);
scatter_chunks(tfm, tmp, sg, i, len, &coff);
crypto_yield(tfm); nbytes -= bsize;
goto unmapped;
} else {
prfn(tfm, p, crfn, enc);
crypto_kunmap(p);
crypto_yield(tfm);
/* remap and point to recalculated offset */
boff += bsize;
p = crypto_kmap(sg[i].page)
+ sg[i].offset + coff + boff;
len -= bsize;
/* End of frag with no remnant? */
if (coff && len == 0)
coff = 0;
}
}
crypto_kunmap(p);
unmapped:
i += n;
copy_chunks(src_p, &walk_in, bsize, 0);
prfn(tfm, dst_p, src_p, crfn, enc);
scatter_done(&walk_in, 0, nbytes);
copy_chunks(dst_p, &walk_out, bsize, 1);
scatter_done(&walk_out, 1, nbytes);
if (!nbytes)
return 0;
crypto_yield(tfm);
} }
return 0;
} }
static void cbc_process(struct crypto_tfm *tfm, static void cbc_process(struct crypto_tfm *tfm,
u8 *block, cryptfn_t fn, int enc) u8 *dst, u8 *src, cryptfn_t fn, int enc)
{ {
/* Null encryption */ /* Null encryption */
if (!tfm->crt_cipher.cit_iv) if (!tfm->crt_cipher.cit_iv)
return; return;
if (enc) { if (enc) {
tfm->crt_u.cipher.cit_xor_block(tfm->crt_cipher.cit_iv, block); tfm->crt_u.cipher.cit_xor_block(tfm->crt_cipher.cit_iv, src);
fn(tfm->crt_ctx, block, tfm->crt_cipher.cit_iv); fn(tfm->crt_ctx, dst, tfm->crt_cipher.cit_iv);
memcpy(tfm->crt_cipher.cit_iv, block, memcpy(tfm->crt_cipher.cit_iv, dst,
crypto_tfm_alg_blocksize(tfm)); crypto_tfm_alg_blocksize(tfm));
} else { } else {
u8 buf[crypto_tfm_alg_blocksize(tfm)]; const int need_stack = (src == dst);
u8 stack[need_stack ? crypto_tfm_alg_blocksize(tfm) : 0];
u8 *buf = need_stack ? stack : dst;
fn(tfm->crt_ctx, buf, block); fn(tfm->crt_ctx, buf, src);
tfm->crt_u.cipher.cit_xor_block(buf, tfm->crt_cipher.cit_iv); tfm->crt_u.cipher.cit_xor_block(buf, tfm->crt_cipher.cit_iv);
memcpy(tfm->crt_cipher.cit_iv, block, memcpy(tfm->crt_cipher.cit_iv, src,
crypto_tfm_alg_blocksize(tfm)); crypto_tfm_alg_blocksize(tfm));
memcpy(block, buf, crypto_tfm_alg_blocksize(tfm)); if (buf != dst)
memcpy(dst, buf, crypto_tfm_alg_blocksize(tfm));
} }
} }
static void ecb_process(struct crypto_tfm *tfm, u8 *block, static void ecb_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
cryptfn_t fn, int enc) cryptfn_t fn, int enc)
{ {
fn(tfm->crt_ctx, block, block); fn(tfm->crt_ctx, dst, src);
} }
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
...@@ -211,35 +248,44 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) ...@@ -211,35 +248,44 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
} }
static int ecb_encrypt(struct crypto_tfm *tfm, static int ecb_encrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg) struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{ {
return crypt(tfm, sg, nsg, return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_encrypt, ecb_process, 1); tfm->__crt_alg->cra_cipher.cia_encrypt, ecb_process, 1);
} }
static int ecb_decrypt(struct crypto_tfm *tfm, static int ecb_decrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg) struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{ {
return crypt(tfm, sg, nsg, return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_decrypt, ecb_process, 1); tfm->__crt_alg->cra_cipher.cia_decrypt, ecb_process, 1);
} }
static int cbc_encrypt(struct crypto_tfm *tfm, static int cbc_encrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg) struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{ {
return crypt(tfm, sg, nsg, return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_encrypt, cbc_process, 1); tfm->__crt_alg->cra_cipher.cia_encrypt, cbc_process, 1);
} }
static int cbc_decrypt(struct crypto_tfm *tfm, static int cbc_decrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg) struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{ {
return crypt(tfm, sg, nsg, return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_decrypt, cbc_process, 0); tfm->__crt_alg->cra_cipher.cia_decrypt, cbc_process, 0);
} }
static int nocrypt(struct crypto_tfm *tfm, static int nocrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg) struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{ {
return -ENOSYS; return -ENOSYS;
} }
......
...@@ -28,10 +28,10 @@ static void update(struct crypto_tfm *tfm, ...@@ -28,10 +28,10 @@ static void update(struct crypto_tfm *tfm,
unsigned int i; unsigned int i;
for (i = 0; i < nsg; i++) { for (i = 0; i < nsg; i++) {
char *p = crypto_kmap(sg[i].page) + sg[i].offset; char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset;
tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx, tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx,
p, sg[i].length); p, sg[i].length);
crypto_kunmap(p); crypto_kunmap(p, 0);
crypto_yield(tfm); crypto_yield(tfm);
} }
} }
...@@ -49,10 +49,10 @@ static void digest(struct crypto_tfm *tfm, ...@@ -49,10 +49,10 @@ static void digest(struct crypto_tfm *tfm,
tfm->crt_digest.dit_init(tfm); tfm->crt_digest.dit_init(tfm);
for (i = 0; i < nsg; i++) { for (i = 0; i < nsg; i++) {
char *p = crypto_kmap(sg[i].page) + sg[i].offset; char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset;
tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx, tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx,
p, sg[i].length); p, sg[i].length);
crypto_kunmap(p); crypto_kunmap(p, 0);
crypto_yield(tfm); crypto_yield(tfm);
} }
crypto_digest_final(tfm, out); crypto_digest_final(tfm, out);
......
...@@ -16,17 +16,29 @@ ...@@ -16,17 +16,29 @@
#include <linux/init.h> #include <linux/init.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <asm/softirq.h> #include <asm/softirq.h>
#include <asm/kmap_types.h>
static inline void *crypto_kmap(struct page *page) static enum km_type km_types[] = {
KM_USER0,
KM_USER1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
};
static inline enum km_type crypto_kmap_type(int out)
{
return km_types[(in_softirq() ? 2 : 0) + out];
}
static inline void *crypto_kmap(struct page *page, int out)
{ {
return kmap_atomic(page, in_softirq() ? return kmap_atomic(page, crypto_kmap_type(out));
KM_CRYPTO_SOFTIRQ : KM_CRYPTO_USER);
} }
static inline void crypto_kunmap(void *vaddr) static inline void crypto_kunmap(void *vaddr, int out)
{ {
kunmap_atomic(vaddr, in_softirq() ? kunmap_atomic(vaddr, crypto_kmap_type(out));
KM_CRYPTO_SOFTIRQ : KM_CRYPTO_USER);
} }
static inline void crypto_yield(struct crypto_tfm *tfm) static inline void crypto_yield(struct crypto_tfm *tfm)
......
...@@ -703,7 +703,7 @@ test_des(void) ...@@ -703,7 +703,7 @@ test_des(void)
sg[0].page = virt_to_page(p); sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK); sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = len; sg[0].length = len;
ret = crypto_cipher_encrypt(tfm, sg, 1); ret = crypto_cipher_encrypt(tfm, sg, sg, len);
if (ret) { if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags); printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
...@@ -746,7 +746,7 @@ test_des(void) ...@@ -746,7 +746,7 @@ test_des(void)
sg[1].offset = ((long) p & ~PAGE_MASK); sg[1].offset = ((long) p & ~PAGE_MASK);
sg[1].length = 8; sg[1].length = 8;
ret = crypto_cipher_encrypt(tfm, sg, 2); ret = crypto_cipher_encrypt(tfm, sg, sg, 16);
if (ret) { if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags); printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
...@@ -814,7 +814,7 @@ test_des(void) ...@@ -814,7 +814,7 @@ test_des(void)
sg[2].offset = ((long) p & ~PAGE_MASK); sg[2].offset = ((long) p & ~PAGE_MASK);
sg[2].length = 8; sg[2].length = 8;
ret = crypto_cipher_encrypt(tfm, sg, 3); ret = crypto_cipher_encrypt(tfm, sg, sg, 32);
if (ret) { if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags); printk("decrypt() failed flags=%x\n", tfm->crt_flags);
...@@ -890,7 +890,7 @@ test_des(void) ...@@ -890,7 +890,7 @@ test_des(void)
sg[3].offset = ((long) p & ~PAGE_MASK); sg[3].offset = ((long) p & ~PAGE_MASK);
sg[3].length = 18; sg[3].length = 18;
ret = crypto_cipher_encrypt(tfm, sg, 4); ret = crypto_cipher_encrypt(tfm, sg, sg, 24);
if (ret) { if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags); printk("encrypt() failed flags=%x\n", tfm->crt_flags);
...@@ -979,7 +979,7 @@ test_des(void) ...@@ -979,7 +979,7 @@ test_des(void)
sg[4].offset = ((long) p & ~PAGE_MASK); sg[4].offset = ((long) p & ~PAGE_MASK);
sg[4].length = 8; sg[4].length = 8;
ret = crypto_cipher_encrypt(tfm, sg, 5); ret = crypto_cipher_encrypt(tfm, sg, sg, 16);
if (ret) { if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags); printk("encrypt() failed flags=%x\n", tfm->crt_flags);
...@@ -1078,7 +1078,7 @@ test_des(void) ...@@ -1078,7 +1078,7 @@ test_des(void)
sg[7].offset = ((long) p & ~PAGE_MASK); sg[7].offset = ((long) p & ~PAGE_MASK);
sg[7].length = 1; sg[7].length = 1;
ret = crypto_cipher_encrypt(tfm, sg, 8); ret = crypto_cipher_encrypt(tfm, sg, sg, 8);
if (ret) { if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags); printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
...@@ -1120,7 +1120,7 @@ test_des(void) ...@@ -1120,7 +1120,7 @@ test_des(void)
sg[0].offset = ((long) p & ~PAGE_MASK); sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = len; sg[0].length = len;
ret = crypto_cipher_decrypt(tfm, sg, 1); ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) { if (ret) {
printk("des_decrypt() failed flags=%x\n", printk("des_decrypt() failed flags=%x\n",
tfm->crt_flags); tfm->crt_flags);
...@@ -1163,7 +1163,7 @@ test_des(void) ...@@ -1163,7 +1163,7 @@ test_des(void)
sg[1].offset = ((long) p & ~PAGE_MASK); sg[1].offset = ((long) p & ~PAGE_MASK);
sg[1].length = 8; sg[1].length = 8;
ret = crypto_cipher_decrypt(tfm, sg, 2); ret = crypto_cipher_decrypt(tfm, sg, sg, 16);
if (ret) { if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags); printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
...@@ -1220,7 +1220,7 @@ test_des(void) ...@@ -1220,7 +1220,7 @@ test_des(void)
sg[2].offset = ((long) p & ~PAGE_MASK); sg[2].offset = ((long) p & ~PAGE_MASK);
sg[2].length = 1; sg[2].length = 1;
ret = crypto_cipher_decrypt(tfm, sg, 3); ret = crypto_cipher_decrypt(tfm, sg, sg, 16);
if (ret) { if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags); printk("decrypt() failed flags=%x\n", tfm->crt_flags);
...@@ -1290,7 +1290,7 @@ test_des(void) ...@@ -1290,7 +1290,7 @@ test_des(void)
crypto_cipher_set_iv(tfm, des_tv[i].iv, crypto_cipher_set_iv(tfm, des_tv[i].iv,
crypto_tfm_alg_ivsize(tfm)); crypto_tfm_alg_ivsize(tfm));
ret = crypto_cipher_encrypt(tfm, sg, 1); ret = crypto_cipher_encrypt(tfm, sg, sg, len);
if (ret) { if (ret) {
printk("des_cbc_encrypt() failed flags=%x\n", printk("des_cbc_encrypt() failed flags=%x\n",
tfm->crt_flags); tfm->crt_flags);
...@@ -1349,7 +1349,7 @@ test_des(void) ...@@ -1349,7 +1349,7 @@ test_des(void)
crypto_cipher_set_iv(tfm, des_tv[i].iv, crypto_tfm_alg_ivsize(tfm)); crypto_cipher_set_iv(tfm, des_tv[i].iv, crypto_tfm_alg_ivsize(tfm));
ret = crypto_cipher_encrypt(tfm, sg, 2); ret = crypto_cipher_encrypt(tfm, sg, sg, 24);
if (ret) { if (ret) {
printk("des_cbc_decrypt() failed flags=%x\n", tfm->crt_flags); printk("des_cbc_decrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
...@@ -1398,7 +1398,7 @@ test_des(void) ...@@ -1398,7 +1398,7 @@ test_des(void)
crypto_cipher_set_iv(tfm, des_tv[i].iv, crypto_cipher_set_iv(tfm, des_tv[i].iv,
crypto_tfm_alg_blocksize(tfm)); crypto_tfm_alg_blocksize(tfm));
ret = crypto_cipher_decrypt(tfm, sg, 1); ret = crypto_cipher_decrypt(tfm, sg, sg, len);
if (ret) { if (ret) {
printk("des_cbc_decrypt() failed flags=%x\n", printk("des_cbc_decrypt() failed flags=%x\n",
tfm->crt_flags); tfm->crt_flags);
...@@ -1450,7 +1450,7 @@ test_des(void) ...@@ -1450,7 +1450,7 @@ test_des(void)
crypto_cipher_set_iv(tfm, des_tv[i].iv, crypto_tfm_alg_ivsize(tfm)); crypto_cipher_set_iv(tfm, des_tv[i].iv, crypto_tfm_alg_ivsize(tfm));
ret = crypto_cipher_decrypt(tfm, sg, 2); ret = crypto_cipher_decrypt(tfm, sg, sg, 8);
if (ret) { if (ret) {
printk("des_cbc_decrypt() failed flags=%x\n", tfm->crt_flags); printk("des_cbc_decrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
...@@ -1518,7 +1518,7 @@ test_des3_ede(void) ...@@ -1518,7 +1518,7 @@ test_des3_ede(void)
sg[0].page = virt_to_page(p); sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK); sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = len; sg[0].length = len;
ret = crypto_cipher_encrypt(tfm, sg, 1); ret = crypto_cipher_encrypt(tfm, sg, sg, len);
if (ret) { if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags); printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
...@@ -1561,7 +1561,7 @@ test_des3_ede(void) ...@@ -1561,7 +1561,7 @@ test_des3_ede(void)
sg[0].page = virt_to_page(p); sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK); sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = len; sg[0].length = len;
ret = crypto_cipher_decrypt(tfm, sg, 1); ret = crypto_cipher_decrypt(tfm, sg, sg, len);
if (ret) { if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags); printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
...@@ -1624,7 +1624,7 @@ test_blowfish(void) ...@@ -1624,7 +1624,7 @@ test_blowfish(void)
sg[0].page = virt_to_page(p); sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK); sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = bf_tv[i].plen; sg[0].length = bf_tv[i].plen;
ret = crypto_cipher_encrypt(tfm, sg, 1); ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) { if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags); printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
...@@ -1666,7 +1666,7 @@ test_blowfish(void) ...@@ -1666,7 +1666,7 @@ test_blowfish(void)
sg[0].page = virt_to_page(p); sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK); sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = bf_tv[i].plen; sg[0].length = bf_tv[i].plen;
ret = crypto_cipher_decrypt(tfm, sg, 1); ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) { if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags); printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
...@@ -1719,7 +1719,7 @@ test_blowfish(void) ...@@ -1719,7 +1719,7 @@ test_blowfish(void)
crypto_cipher_set_iv(tfm, bf_tv[i].iv, crypto_cipher_set_iv(tfm, bf_tv[i].iv,
crypto_tfm_alg_ivsize(tfm)); crypto_tfm_alg_ivsize(tfm));
ret = crypto_cipher_encrypt(tfm, sg, 1); ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) { if (ret) {
printk("blowfish_cbc_encrypt() failed flags=%x\n", printk("blowfish_cbc_encrypt() failed flags=%x\n",
tfm->crt_flags); tfm->crt_flags);
...@@ -1764,7 +1764,7 @@ test_blowfish(void) ...@@ -1764,7 +1764,7 @@ test_blowfish(void)
crypto_cipher_set_iv(tfm, bf_tv[i].iv, crypto_cipher_set_iv(tfm, bf_tv[i].iv,
crypto_tfm_alg_ivsize(tfm)); crypto_tfm_alg_ivsize(tfm));
ret = crypto_cipher_decrypt(tfm, sg, 1); ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) { if (ret) {
printk("blowfish_cbc_decrypt() failed flags=%x\n", printk("blowfish_cbc_decrypt() failed flags=%x\n",
tfm->crt_flags); tfm->crt_flags);
...@@ -1829,7 +1829,7 @@ test_twofish(void) ...@@ -1829,7 +1829,7 @@ test_twofish(void)
sg[0].page = virt_to_page(p); sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK); sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = tf_tv[i].plen; sg[0].length = tf_tv[i].plen;
ret = crypto_cipher_encrypt(tfm, sg, 1); ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) { if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags); printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
...@@ -1871,7 +1871,7 @@ test_twofish(void) ...@@ -1871,7 +1871,7 @@ test_twofish(void)
sg[0].page = virt_to_page(p); sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK); sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = tf_tv[i].plen; sg[0].length = tf_tv[i].plen;
ret = crypto_cipher_decrypt(tfm, sg, 1); ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) { if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags); printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
...@@ -1924,7 +1924,7 @@ test_twofish(void) ...@@ -1924,7 +1924,7 @@ test_twofish(void)
crypto_cipher_set_iv(tfm, tf_tv[i].iv, crypto_cipher_set_iv(tfm, tf_tv[i].iv,
crypto_tfm_alg_ivsize(tfm)); crypto_tfm_alg_ivsize(tfm));
ret = crypto_cipher_encrypt(tfm, sg, 1); ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) { if (ret) {
printk("blowfish_cbc_encrypt() failed flags=%x\n", printk("blowfish_cbc_encrypt() failed flags=%x\n",
tfm->crt_flags); tfm->crt_flags);
...@@ -1970,7 +1970,7 @@ test_twofish(void) ...@@ -1970,7 +1970,7 @@ test_twofish(void)
crypto_cipher_set_iv(tfm, tf_tv[i].iv, crypto_cipher_set_iv(tfm, tf_tv[i].iv,
crypto_tfm_alg_ivsize(tfm)); crypto_tfm_alg_ivsize(tfm));
ret = crypto_cipher_decrypt(tfm, sg, 1); ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) { if (ret) {
printk("blowfish_cbc_decrypt() failed flags=%x\n", printk("blowfish_cbc_decrypt() failed flags=%x\n",
tfm->crt_flags); tfm->crt_flags);
...@@ -2030,7 +2030,7 @@ test_serpent(void) ...@@ -2030,7 +2030,7 @@ test_serpent(void)
sg[0].page = virt_to_page(p); sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK); sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = sizeof(serp_tv[i].plaintext); sg[0].length = sizeof(serp_tv[i].plaintext);
ret = crypto_cipher_encrypt(tfm, sg, 1); ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) { if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags); printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
...@@ -2070,7 +2070,7 @@ test_serpent(void) ...@@ -2070,7 +2070,7 @@ test_serpent(void)
sg[0].page = virt_to_page(p); sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK); sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = sizeof(serp_tv[i].plaintext); sg[0].length = sizeof(serp_tv[i].plaintext);
ret = crypto_cipher_decrypt(tfm, sg, 1); ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) { if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags); printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
...@@ -2133,7 +2133,7 @@ test_aes(void) ...@@ -2133,7 +2133,7 @@ test_aes(void)
sg[0].page = virt_to_page(p); sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK); sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = aes_tv[i].plen; sg[0].length = aes_tv[i].plen;
ret = crypto_cipher_encrypt(tfm, sg, 1); ret = crypto_cipher_encrypt(tfm, sg, sg, sg[0].length);
if (ret) { if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags); printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
...@@ -2175,7 +2175,7 @@ test_aes(void) ...@@ -2175,7 +2175,7 @@ test_aes(void)
sg[0].page = virt_to_page(p); sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK); sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = aes_tv[i].plen; sg[0].length = aes_tv[i].plen;
ret = crypto_cipher_decrypt(tfm, sg, 1); ret = crypto_cipher_decrypt(tfm, sg, sg, sg[0].length);
if (ret) { if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags); printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out; goto out;
......
...@@ -23,8 +23,8 @@ D(7) KM_PTE0, ...@@ -23,8 +23,8 @@ D(7) KM_PTE0,
D(8) KM_PTE1, D(8) KM_PTE1,
D(9) KM_IRQ0, D(9) KM_IRQ0,
D(10) KM_IRQ1, D(10) KM_IRQ1,
D(11) KM_CRYPTO_USER, D(11) KM_SOFTIRQ0,
D(12) KM_CRYPTO_SOFTIRQ, D(12) KM_SOFTIRQ1,
D(13) KM_TYPE_NR D(13) KM_TYPE_NR
}; };
......
...@@ -22,8 +22,8 @@ D(8) KM_PTE1, ...@@ -22,8 +22,8 @@ D(8) KM_PTE1,
D(9) KM_PTE2, D(9) KM_PTE2,
D(10) KM_IRQ0, D(10) KM_IRQ0,
D(11) KM_IRQ1, D(11) KM_IRQ1,
D(12) KM_CRYPTO_USER, D(12) KM_SOFTIRQ0,
D(13) KM_CRYPTO_SOFTIRQ, D(13) KM_SOFTIRQ1,
D(14) KM_TYPE_NR D(14) KM_TYPE_NR
}; };
......
...@@ -21,8 +21,8 @@ D(7) KM_PTE0, ...@@ -21,8 +21,8 @@ D(7) KM_PTE0,
D(8) KM_PTE1, D(8) KM_PTE1,
D(9) KM_IRQ0, D(9) KM_IRQ0,
D(10) KM_IRQ1, D(10) KM_IRQ1,
D(11) KM_CRYPTO_USER, D(11) KM_SOFTIRQ0,
D(12) KM_CRYPTO_SOFTIRQ, D(12) KM_SOFTIRQ1,
D(13) KM_TYPE_NR D(13) KM_TYPE_NR
}; };
......
...@@ -14,8 +14,8 @@ enum km_type { ...@@ -14,8 +14,8 @@ enum km_type {
KM_PTE1, KM_PTE1,
KM_IRQ0, KM_IRQ0,
KM_IRQ1, KM_IRQ1,
KM_CRYPTO_USER, KM_SOFTIRQ0,
KM_CRYPTO_SOFTIRQ, KM_SOFTIRQ1,
KM_TYPE_NR KM_TYPE_NR
}; };
......
...@@ -14,8 +14,8 @@ enum km_type { ...@@ -14,8 +14,8 @@ enum km_type {
KM_PTE1, KM_PTE1,
KM_IRQ0, KM_IRQ0,
KM_IRQ1, KM_IRQ1,
KM_CRYPTO_USER, KM_SOFTIRQ0,
KM_CRYPTO_SOFTIRQ, KM_SOFTIRQ1,
KM_TYPE_NR KM_TYPE_NR
}; };
......
...@@ -14,8 +14,8 @@ enum km_type { ...@@ -14,8 +14,8 @@ enum km_type {
KM_PTE1, KM_PTE1,
KM_IRQ0, KM_IRQ0,
KM_IRQ1, KM_IRQ1,
KM_CRYPTO_USER, KM_SOFTIRQ0,
KM_CRYPTO_SOFTIRQ, KM_SOFTIRQ1,
KM_TYPE_NR KM_TYPE_NR
}; };
......
...@@ -13,8 +13,8 @@ enum km_type { ...@@ -13,8 +13,8 @@ enum km_type {
KM_PTE1, KM_PTE1,
KM_IRQ0, KM_IRQ0,
KM_IRQ1, KM_IRQ1,
KM_CRYPTO_USER, KM_SOFTIRQ0,
KM_CRYPTO_SOFTIRQ, KM_SOFTIRQ1,
KM_TYPE_NR KM_TYPE_NR
}; };
......
...@@ -17,8 +17,8 @@ enum km_type { ...@@ -17,8 +17,8 @@ enum km_type {
KM_PTE1, KM_PTE1,
KM_IRQ0, KM_IRQ0,
KM_IRQ1, KM_IRQ1,
KM_CRYPTO_USER, KM_SOFTIRQ0,
KM_CRYPTO_SOFTIRQ, KM_SOFTIRQ1,
KM_TYPE_NR KM_TYPE_NR
}; };
......
...@@ -11,8 +11,8 @@ enum km_type { ...@@ -11,8 +11,8 @@ enum km_type {
KM_BIO_DST_IRQ, KM_BIO_DST_IRQ,
KM_IRQ0, KM_IRQ0,
KM_IRQ1, KM_IRQ1,
KM_CRYPTO_USER, KM_SOFTIRQ0,
KM_CRYPTO_SOFTIRQ, KM_SOFTIRQ1,
KM_TYPE_NR KM_TYPE_NR
}; };
......
...@@ -131,9 +131,13 @@ struct cipher_tfm { ...@@ -131,9 +131,13 @@ struct cipher_tfm {
int (*cit_setkey)(struct crypto_tfm *tfm, int (*cit_setkey)(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen); const u8 *key, unsigned int keylen);
int (*cit_encrypt)(struct crypto_tfm *tfm, int (*cit_encrypt)(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg); struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes);
int (*cit_decrypt)(struct crypto_tfm *tfm, int (*cit_decrypt)(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg); struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes);
void (*cit_xor_block)(u8 *dst, const u8 *src); void (*cit_xor_block)(u8 *dst, const u8 *src);
}; };
...@@ -274,19 +278,21 @@ static inline int crypto_cipher_setkey(struct crypto_tfm *tfm, ...@@ -274,19 +278,21 @@ static inline int crypto_cipher_setkey(struct crypto_tfm *tfm,
} }
static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm, static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, struct scatterlist *dst,
unsigned int nsg) struct scatterlist *src,
unsigned int nbytes)
{ {
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
return tfm->crt_cipher.cit_encrypt(tfm, sg, nsg); return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes);
} }
static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm, static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
struct scatterlist *sg, struct scatterlist *dst,
unsigned int nsg) struct scatterlist *src,
unsigned int nbytes)
{ {
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
return tfm->crt_cipher.cit_decrypt(tfm, sg, nsg); return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes);
} }
static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm, static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm,
......
...@@ -163,6 +163,7 @@ struct xfrm_usersa_info { ...@@ -163,6 +163,7 @@ struct xfrm_usersa_info {
struct xfrm_usersa_id { struct xfrm_usersa_id {
xfrm_address_t saddr; xfrm_address_t saddr;
__u32 spi; __u32 spi;
__u16 family;
__u8 proto; __u8 proto;
}; };
......
...@@ -167,7 +167,7 @@ static inline int ip_route_connect(struct rtable **rp, u32 dst, ...@@ -167,7 +167,7 @@ static inline int ip_route_connect(struct rtable **rp, u32 dst,
ip_rt_put(*rp); ip_rt_put(*rp);
*rp = NULL; *rp = NULL;
} }
return ip_route_output_flow(rp, &fl, sk, 1); return ip_route_output_flow(rp, &fl, sk, 0);
} }
static inline int ip_route_newports(struct rtable **rp, u16 sport, u16 dport, static inline int ip_route_newports(struct rtable **rp, u16 sport, u16 dport,
......
...@@ -105,7 +105,6 @@ struct xfrm_state ...@@ -105,7 +105,6 @@ struct xfrm_state
u16 family; u16 family;
xfrm_address_t saddr; xfrm_address_t saddr;
int header_len; int header_len;
int trailer_len;
} props; } props;
struct xfrm_lifetime_cfg lft; struct xfrm_lifetime_cfg lft;
......
...@@ -361,7 +361,7 @@ static int ah_init_state(struct xfrm_state *x, void *args) ...@@ -361,7 +361,7 @@ static int ah_init_state(struct xfrm_state *x, void *args)
ahp->icv = ah_hmac_digest; ahp->icv = ah_hmac_digest;
/* /*
* Lookup the algorithm description maintained by pfkey, * Lookup the algorithm description maintained by xfrm_algo,
* verify crypto transform properties, and store information * verify crypto transform properties, and store information
* we need for AH processing. This lookup cannot fail here * we need for AH processing. This lookup cannot fail here
* after a successful crypto_alloc_tfm(). * after a successful crypto_alloc_tfm().
......
...@@ -823,6 +823,34 @@ int unregister_inetaddr_notifier(struct notifier_block *nb) ...@@ -823,6 +823,34 @@ int unregister_inetaddr_notifier(struct notifier_block *nb)
return notifier_chain_unregister(&inetaddr_chain, nb); return notifier_chain_unregister(&inetaddr_chain, nb);
} }
/* Rename ifa_labels for a device name change. Make some effort to preserve existing
* alias numbering and to create unique labels if possible.
*/
static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
{
struct in_ifaddr *ifa;
int named = 0;
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
char old[IFNAMSIZ], *dot;
memcpy(old, ifa->ifa_label, IFNAMSIZ);
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
if (named++ == 0)
continue;
dot = strchr(ifa->ifa_label, ':');
if (dot == NULL) {
sprintf(old, ":%d", named);
dot = old;
}
if (strlen(dot) + strlen(dev->name) < IFNAMSIZ) {
strcat(ifa->ifa_label, dot);
} else {
strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
}
}
}
/* Called only under RTNL semaphore */ /* Called only under RTNL semaphore */
static int inetdev_event(struct notifier_block *this, unsigned long event, static int inetdev_event(struct notifier_block *this, unsigned long event,
...@@ -873,14 +901,10 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, ...@@ -873,14 +901,10 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
inetdev_destroy(in_dev); inetdev_destroy(in_dev);
break; break;
case NETDEV_CHANGENAME: case NETDEV_CHANGENAME:
if (in_dev->ifa_list) { /* Do not notify about label change, this event is
struct in_ifaddr *ifa; * not interesting to applications using netlink.
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) */
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); inetdev_changename(dev, in_dev);
/* Do not notify about label change, this event is
not interesting to applications using netlink.
*/
}
break; break;
} }
out: out:
......
...@@ -10,6 +10,9 @@ ...@@ -10,6 +10,9 @@
#define MAX_SG_ONSTACK 4 #define MAX_SG_ONSTACK 4
typedef void (icv_update_fn_t)(struct crypto_tfm *,
struct scatterlist *, unsigned int);
/* BUGS: /* BUGS:
* - we assume replay seqno is always present. * - we assume replay seqno is always present.
*/ */
...@@ -30,37 +33,40 @@ struct esp_data ...@@ -30,37 +33,40 @@ struct esp_data
struct crypto_tfm *tfm; /* crypto handle */ struct crypto_tfm *tfm; /* crypto handle */
} conf; } conf;
/* Integrity. It is active when authlen != 0 */ /* Integrity. It is active when icv_full_len != 0 */
struct { struct {
u8 *key; /* Key */ u8 *key; /* Key */
int key_len; /* Length of the key */ int key_len; /* Length of the key */
u8 *work_digest; u8 *work_icv;
/* authlen is length of trailer containing auth token. int icv_full_len;
* If it is not zero it is assumed to be int icv_trunc_len;
* >= crypto_tfm_alg_digestsize(atfm) */ void (*icv)(struct esp_data*,
int authlen; struct sk_buff *skb,
void (*digest)(struct esp_data*, int offset, int len, u8 *icv);
struct sk_buff *skb,
int offset,
int len,
u8 *digest);
struct crypto_tfm *tfm; struct crypto_tfm *tfm;
} auth; } auth;
}; };
/* Move to common area: it is shared with AH. */ /* Move to common area: it is shared with AH. */
void skb_digest_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
int offset, int len) int offset, int len, icv_update_fn_t icv_update)
{ {
int start = skb->len - skb->data_len; int start = skb->len - skb->data_len;
int i, copy = start - offset; int i, copy = start - offset;
struct scatterlist sg;
/* Checksum header. */ /* Checksum header. */
if (copy > 0) { if (copy > 0) {
if (copy > len) if (copy > len)
copy = len; copy = len;
tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx, skb->data+offset, copy);
sg.page = virt_to_page(skb->data + offset);
sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
sg.length = copy;
icv_update(tfm, &sg, 1);
if ((len -= copy) == 0) if ((len -= copy) == 0)
return; return;
offset += copy; offset += copy;
...@@ -73,14 +79,17 @@ void skb_digest_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, ...@@ -73,14 +79,17 @@ void skb_digest_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
end = start + skb_shinfo(skb)->frags[i].size; end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
u8 *vaddr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len) if (copy > len)
copy = len; copy = len;
vaddr = kmap_skb_frag(frag);
tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx, vaddr+frag->page_offset+offset-start, copy); sg.page = frag->page;
kunmap_skb_frag(vaddr); sg.offset = frag->page_offset + offset-start;
sg.length = copy;
icv_update(tfm, &sg, 1);
if (!(len -= copy)) if (!(len -= copy))
return; return;
offset += copy; offset += copy;
...@@ -100,7 +109,7 @@ void skb_digest_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, ...@@ -100,7 +109,7 @@ void skb_digest_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
if (copy > len) if (copy > len)
copy = len; copy = len;
skb_digest_walk(list, tfm, offset-start, copy); skb_icv_walk(list, tfm, offset-start, copy, icv_update);
if ((len -= copy) == 0) if ((len -= copy) == 0)
return; return;
offset += copy; offset += copy;
...@@ -188,12 +197,13 @@ esp_hmac_digest(struct esp_data *esp, struct sk_buff *skb, int offset, ...@@ -188,12 +197,13 @@ esp_hmac_digest(struct esp_data *esp, struct sk_buff *skb, int offset,
int len, u8 *auth_data) int len, u8 *auth_data)
{ {
struct crypto_tfm *tfm = esp->auth.tfm; struct crypto_tfm *tfm = esp->auth.tfm;
char *digest = esp->auth.work_digest; char *icv = esp->auth.work_icv;
memset(auth_data, 0, esp->auth.icv_trunc_len);
crypto_hmac_init(tfm, esp->auth.key, &esp->auth.key_len); crypto_hmac_init(tfm, esp->auth.key, &esp->auth.key_len);
skb_digest_walk(skb, tfm, offset, len); skb_icv_walk(skb, tfm, offset, len, crypto_hmac_update);
crypto_hmac_final(tfm, esp->auth.key, &esp->auth.key_len, digest); crypto_hmac_final(tfm, esp->auth.key, &esp->auth.key_len, icv);
memcpy(auth_data, digest, esp->auth.authlen); memcpy(auth_data, icv, esp->auth.icv_trunc_len);
} }
/* Check that skb data bits are writable. If they are not, copy data /* Check that skb data bits are writable. If they are not, copy data
...@@ -317,6 +327,7 @@ int esp_output(struct sk_buff *skb) ...@@ -317,6 +327,7 @@ int esp_output(struct sk_buff *skb)
struct sk_buff *trailer; struct sk_buff *trailer;
int blksize; int blksize;
int clen; int clen;
int alen;
int nfrags; int nfrags;
union { union {
struct iphdr iph; struct iphdr iph;
...@@ -347,13 +358,14 @@ int esp_output(struct sk_buff *skb) ...@@ -347,13 +358,14 @@ int esp_output(struct sk_buff *skb)
clen = skb->len; clen = skb->len;
esp = x->data; esp = x->data;
alen = esp->auth.icv_trunc_len;
tfm = esp->conf.tfm; tfm = esp->conf.tfm;
blksize = crypto_tfm_alg_blocksize(tfm); blksize = crypto_tfm_alg_blocksize(tfm);
clen = (clen + 2 + blksize-1)&~(blksize-1); clen = (clen + 2 + blksize-1)&~(blksize-1);
if (esp->conf.padlen) if (esp->conf.padlen)
clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1); clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1);
if ((nfrags = skb_cow_data(skb, clen-skb->len+esp->auth.authlen, &trailer)) < 0) if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0)
goto error; goto error;
/* Fill padding... */ /* Fill padding... */
...@@ -373,7 +385,7 @@ int esp_output(struct sk_buff *skb) ...@@ -373,7 +385,7 @@ int esp_output(struct sk_buff *skb)
top_iph->ihl = 5; top_iph->ihl = 5;
top_iph->version = 4; top_iph->version = 4;
top_iph->tos = iph->tos; /* DS disclosed */ top_iph->tos = iph->tos; /* DS disclosed */
top_iph->tot_len = htons(skb->len + esp->auth.authlen); top_iph->tot_len = htons(skb->len + alen);
top_iph->frag_off = iph->frag_off&htons(IP_DF); top_iph->frag_off = iph->frag_off&htons(IP_DF);
if (!(top_iph->frag_off)) if (!(top_iph->frag_off))
ip_select_ident(top_iph, dst, 0); ip_select_ident(top_iph, dst, 0);
...@@ -388,7 +400,7 @@ int esp_output(struct sk_buff *skb) ...@@ -388,7 +400,7 @@ int esp_output(struct sk_buff *skb)
top_iph = (struct iphdr*)skb_push(skb, iph->ihl*4); top_iph = (struct iphdr*)skb_push(skb, iph->ihl*4);
memcpy(top_iph, &tmp_iph, iph->ihl*4); memcpy(top_iph, &tmp_iph, iph->ihl*4);
iph = &tmp_iph.iph; iph = &tmp_iph.iph;
top_iph->tot_len = htons(skb->len + esp->auth.authlen); top_iph->tot_len = htons(skb->len + alen);
top_iph->protocol = IPPROTO_ESP; top_iph->protocol = IPPROTO_ESP;
top_iph->check = 0; top_iph->check = 0;
top_iph->frag_off = iph->frag_off; top_iph->frag_off = iph->frag_off;
...@@ -411,7 +423,7 @@ int esp_output(struct sk_buff *skb) ...@@ -411,7 +423,7 @@ int esp_output(struct sk_buff *skb)
goto error; goto error;
} }
skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
crypto_cipher_encrypt(tfm, sg, nfrags); crypto_cipher_encrypt(tfm, sg, sg, clen);
if (unlikely(sg != sgbuf)) if (unlikely(sg != sgbuf))
kfree(sg); kfree(sg);
} while (0); } while (0);
...@@ -421,10 +433,10 @@ int esp_output(struct sk_buff *skb) ...@@ -421,10 +433,10 @@ int esp_output(struct sk_buff *skb)
crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
} }
if (esp->auth.authlen) { if (esp->auth.icv_full_len) {
esp->auth.digest(esp, skb, (u8*)esph-skb->data, esp->auth.icv(esp, skb, (u8*)esph-skb->data,
8+esp->conf.ivlen+clen, trailer->tail); 8+esp->conf.ivlen+clen, trailer->tail);
pskb_put(skb, trailer, esp->auth.authlen); pskb_put(skb, trailer, alen);
} }
ip_send_check(top_iph); ip_send_check(top_iph);
...@@ -445,6 +457,11 @@ int esp_output(struct sk_buff *skb) ...@@ -445,6 +457,11 @@ int esp_output(struct sk_buff *skb)
return err; return err;
} }
/*
* Note: detecting truncated vs. non-truncated authentication data is very
* expensive, so we only support truncated data, which is the recommended
* and common case.
*/
int esp_input(struct xfrm_state *x, struct sk_buff *skb) int esp_input(struct xfrm_state *x, struct sk_buff *skb)
{ {
struct iphdr *iph; struct iphdr *iph;
...@@ -452,7 +469,8 @@ int esp_input(struct xfrm_state *x, struct sk_buff *skb) ...@@ -452,7 +469,8 @@ int esp_input(struct xfrm_state *x, struct sk_buff *skb)
struct esp_data *esp = x->data; struct esp_data *esp = x->data;
struct sk_buff *trailer; struct sk_buff *trailer;
int blksize = crypto_tfm_alg_blocksize(esp->conf.tfm); int blksize = crypto_tfm_alg_blocksize(esp->conf.tfm);
int elen = skb->len - 8 - esp->conf.ivlen - esp->auth.authlen; int alen = esp->auth.icv_trunc_len;
int elen = skb->len - 8 - esp->conf.ivlen - alen;
int nfrags; int nfrags;
if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
...@@ -462,17 +480,16 @@ int esp_input(struct xfrm_state *x, struct sk_buff *skb) ...@@ -462,17 +480,16 @@ int esp_input(struct xfrm_state *x, struct sk_buff *skb)
goto out; goto out;
/* If integrity check is required, do this. */ /* If integrity check is required, do this. */
if (esp->auth.authlen) { if (esp->auth.icv_full_len) {
u8 sum[esp->auth.authlen]; u8 sum[esp->auth.icv_full_len];
u8 sum1[esp->auth.authlen]; u8 sum1[alen];
esp->auth.icv(esp, skb, 0, skb->len-alen, sum);
esp->auth.digest(esp, skb, 0, skb->len-esp->auth.authlen, sum); if (skb_copy_bits(skb, skb->len-alen, sum1, alen))
if (skb_copy_bits(skb, skb->len-esp->auth.authlen, sum1,
esp->auth.authlen))
BUG(); BUG();
if (unlikely(memcmp(sum, sum1, esp->auth.authlen))) { if (unlikely(memcmp(sum, sum1, alen))) {
x->stats.integrity_failed++; x->stats.integrity_failed++;
goto out; goto out;
} }
...@@ -503,12 +520,11 @@ int esp_input(struct xfrm_state *x, struct sk_buff *skb) ...@@ -503,12 +520,11 @@ int esp_input(struct xfrm_state *x, struct sk_buff *skb)
goto out; goto out;
} }
skb_to_sgvec(skb, sg, 8+esp->conf.ivlen, elen); skb_to_sgvec(skb, sg, 8+esp->conf.ivlen, elen);
crypto_cipher_decrypt(esp->conf.tfm, sg, nfrags); crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
if (unlikely(sg != sgbuf)) if (unlikely(sg != sgbuf))
kfree(sg); kfree(sg);
if (skb_copy_bits(skb, skb->len-esp->auth.authlen-2, if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
nexthdr, 2))
BUG(); BUG();
padlen = nexthdr[0]; padlen = nexthdr[0];
...@@ -518,7 +534,7 @@ int esp_input(struct xfrm_state *x, struct sk_buff *skb) ...@@ -518,7 +534,7 @@ int esp_input(struct xfrm_state *x, struct sk_buff *skb)
/* ... check padding bits here. Silly. :-) */ /* ... check padding bits here. Silly. :-) */
iph->protocol = nexthdr[1]; iph->protocol = nexthdr[1];
pskb_trim(skb, skb->len - esp->auth.authlen - padlen - 2); pskb_trim(skb, skb->len - alen - padlen - 2);
memcpy(workbuf, skb->nh.raw, iph->ihl*4); memcpy(workbuf, skb->nh.raw, iph->ihl*4);
skb->h.raw = skb_pull(skb, 8 + esp->conf.ivlen); skb->h.raw = skb_pull(skb, 8 + esp->conf.ivlen);
skb->nh.raw += 8 + esp->conf.ivlen; skb->nh.raw += 8 + esp->conf.ivlen;
...@@ -546,7 +562,7 @@ static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) ...@@ -546,7 +562,7 @@ static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
if (esp->conf.padlen) if (esp->conf.padlen)
mtu = (mtu + esp->conf.padlen-1)&~(esp->conf.padlen-1); mtu = (mtu + esp->conf.padlen-1)&~(esp->conf.padlen-1);
return mtu + x->props.header_len + esp->auth.authlen; return mtu + x->props.header_len + esp->auth.icv_full_len;
} }
void esp4_err(struct sk_buff *skb, u32 info) void esp4_err(struct sk_buff *skb, u32 info)
...@@ -583,9 +599,9 @@ void esp_destroy(struct xfrm_state *x) ...@@ -583,9 +599,9 @@ void esp_destroy(struct xfrm_state *x)
crypto_free_tfm(esp->auth.tfm); crypto_free_tfm(esp->auth.tfm);
esp->auth.tfm = NULL; esp->auth.tfm = NULL;
} }
if (esp->auth.work_digest) { if (esp->auth.work_icv) {
kfree(esp->auth.work_digest); kfree(esp->auth.work_icv);
esp->auth.work_digest = NULL; esp->auth.work_icv = NULL;
} }
} }
...@@ -593,11 +609,12 @@ int esp_init_state(struct xfrm_state *x, void *args) ...@@ -593,11 +609,12 @@ int esp_init_state(struct xfrm_state *x, void *args)
{ {
struct esp_data *esp = NULL; struct esp_data *esp = NULL;
/* null auth and encryption can have zero length keys */
if (x->aalg) { if (x->aalg) {
if (x->aalg->alg_key_len == 0 || x->aalg->alg_key_len > 512) if (x->aalg->alg_key_len > 512)
goto error; goto error;
} }
if (x->ealg == NULL || x->ealg->alg_key_len == 0) if (x->ealg == NULL)
goto error; goto error;
esp = kmalloc(sizeof(*esp), GFP_KERNEL); esp = kmalloc(sizeof(*esp), GFP_KERNEL);
...@@ -607,21 +624,32 @@ int esp_init_state(struct xfrm_state *x, void *args) ...@@ -607,21 +624,32 @@ int esp_init_state(struct xfrm_state *x, void *args)
memset(esp, 0, sizeof(*esp)); memset(esp, 0, sizeof(*esp));
if (x->aalg) { if (x->aalg) {
int digestsize; struct xfrm_algo_desc *aalg_desc;
esp->auth.key = x->aalg->alg_key; esp->auth.key = x->aalg->alg_key;
esp->auth.key_len = (x->aalg->alg_key_len+7)/8; esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
if (esp->auth.tfm == NULL) if (esp->auth.tfm == NULL)
goto error; goto error;
esp->auth.digest = esp_hmac_digest; esp->auth.icv = esp_hmac_digest;
digestsize = crypto_tfm_alg_digestsize(esp->auth.tfm);
/* XXX RFC2403 and RFC 2404 truncate auth to 96 bit */ aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name);
esp->auth.authlen = 12; BUG_ON(!aalg_desc);
if (esp->auth.authlen > digestsize) /* XXX */
BUG(); if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
esp->auth.work_digest = kmalloc(digestsize, GFP_KERNEL); crypto_tfm_alg_digestsize(esp->auth.tfm)) {
if (!esp->auth.work_digest) printk(KERN_INFO "ESP: %s digestsize %u != %hu\n",
x->aalg->alg_name,
crypto_tfm_alg_digestsize(esp->auth.tfm),
aalg_desc->uinfo.auth.icv_fullbits/8);
goto error;
}
esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
if (!esp->auth.work_icv)
goto error; goto error;
} }
esp->conf.key = x->ealg->alg_key; esp->conf.key = x->ealg->alg_key;
...@@ -639,7 +667,6 @@ int esp_init_state(struct xfrm_state *x, void *args) ...@@ -639,7 +667,6 @@ int esp_init_state(struct xfrm_state *x, void *args)
x->props.header_len = 8 + esp->conf.ivlen; x->props.header_len = 8 + esp->conf.ivlen;
if (x->props.mode) if (x->props.mode)
x->props.header_len += 20; x->props.header_len += 20;
x->props.trailer_len = esp->auth.authlen + crypto_tfm_alg_blocksize(esp->conf.tfm);
x->data = esp; x->data = esp;
return 0; return 0;
...@@ -647,8 +674,8 @@ int esp_init_state(struct xfrm_state *x, void *args) ...@@ -647,8 +674,8 @@ int esp_init_state(struct xfrm_state *x, void *args)
if (esp) { if (esp) {
if (esp->auth.tfm) if (esp->auth.tfm)
crypto_free_tfm(esp->auth.tfm); crypto_free_tfm(esp->auth.tfm);
if (esp->auth.work_digest) if (esp->auth.work_icv)
kfree(esp->auth.work_digest); kfree(esp->auth.work_icv);
if (esp->conf.tfm) if (esp->conf.tfm)
crypto_free_tfm(esp->conf.tfm); crypto_free_tfm(esp->conf.tfm);
kfree(esp); kfree(esp);
......
...@@ -60,7 +60,7 @@ static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len, ...@@ -60,7 +60,7 @@ static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len,
struct sk_buff *skb); struct sk_buff *skb);
static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
static int tcp_v6_xmit(struct sk_buff *skb); static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok);
static struct tcp_func ipv6_mapped; static struct tcp_func ipv6_mapped;
static struct tcp_func ipv6_specific; static struct tcp_func ipv6_specific;
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
* created test case so that I was able to fix nasty bug * created test case so that I was able to fix nasty bug
* and many others. thanks. * and many others. thanks.
* *
* $Id: sch_htb.c,v 1.14 2002/09/28 12:55:22 devik Exp devik $ * $Id: sch_htb.c,v 1.17 2003/01/29 09:22:18 devik Exp devik $
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -71,16 +71,12 @@ ...@@ -71,16 +71,12 @@
#define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */ #define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
#define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock) #define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
#define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock) #define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
#define HTB_VER 0x30007 /* major must be matched with number suplied by TC as version */ #define HTB_VER 0x3000a /* major must be matched with number suplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER #if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h" #error "Mismatched sch_htb.c and pkt_sch.h"
#endif #endif
/* temporary debug defines to be removed after beta stage */
#define DEVIK_MEND(N)
#define DEVIK_MSTART(N)
/* debugging support; S is subsystem, these are defined: /* debugging support; S is subsystem, these are defined:
0 - netlink messages 0 - netlink messages
1 - enqueue 1 - enqueue
...@@ -421,7 +417,6 @@ static void htb_add_to_wait_tree (struct htb_sched *q, ...@@ -421,7 +417,6 @@ static void htb_add_to_wait_tree (struct htb_sched *q,
if ((delay <= 0 || delay > cl->mbuffer) && net_ratelimit()) if ((delay <= 0 || delay > cl->mbuffer) && net_ratelimit())
printk(KERN_ERR "HTB: suspicious delay in wait_tree d=%ld cl=%X h=%d\n",delay,cl->classid,debug_hint); printk(KERN_ERR "HTB: suspicious delay in wait_tree d=%ld cl=%X h=%d\n",delay,cl->classid,debug_hint);
#endif #endif
DEVIK_MSTART(9);
cl->pq_key = jiffies + PSCHED_US2JIFFIE(delay); cl->pq_key = jiffies + PSCHED_US2JIFFIE(delay);
if (cl->pq_key == jiffies) if (cl->pq_key == jiffies)
cl->pq_key++; cl->pq_key++;
...@@ -440,7 +435,6 @@ static void htb_add_to_wait_tree (struct htb_sched *q, ...@@ -440,7 +435,6 @@ static void htb_add_to_wait_tree (struct htb_sched *q,
} }
rb_link_node(&cl->pq_node, parent, p); rb_link_node(&cl->pq_node, parent, p);
rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]); rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
DEVIK_MEND(9);
} }
/** /**
...@@ -678,7 +672,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -678,7 +672,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
struct htb_sched *q = (struct htb_sched *)sch->data; struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = htb_classify(skb,sch); struct htb_class *cl = htb_classify(skb,sch);
DEVIK_MSTART(0);
if (cl == HTB_DIRECT || !cl) { if (cl == HTB_DIRECT || !cl) {
/* enqueue to helper queue */ /* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen && cl) { if (q->direct_queue.qlen < q->direct_qlen && cl) {
...@@ -687,25 +680,20 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -687,25 +680,20 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} else { } else {
kfree_skb (skb); kfree_skb (skb);
sch->stats.drops++; sch->stats.drops++;
DEVIK_MEND(0);
return NET_XMIT_DROP; return NET_XMIT_DROP;
} }
} else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
sch->stats.drops++; sch->stats.drops++;
cl->stats.drops++; cl->stats.drops++;
DEVIK_MEND(0);
return NET_XMIT_DROP; return NET_XMIT_DROP;
} else { } else {
cl->stats.packets++; cl->stats.bytes += skb->len; cl->stats.packets++; cl->stats.bytes += skb->len;
DEVIK_MSTART(1);
htb_activate (q,cl); htb_activate (q,cl);
DEVIK_MEND(1);
} }
sch->q.qlen++; sch->q.qlen++;
sch->stats.packets++; sch->stats.bytes += skb->len; sch->stats.packets++; sch->stats.bytes += skb->len;
HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",cl?cl->classid:0,skb); HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",cl?cl->classid:0,skb);
DEVIK_MEND(0);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -941,7 +929,6 @@ htb_dequeue_tree(struct htb_sched *q,int prio,int level) ...@@ -941,7 +929,6 @@ htb_dequeue_tree(struct htb_sched *q,int prio,int level)
//struct htb_sched *q = (struct htb_sched *)sch->data; //struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl,*start; struct htb_class *cl,*start;
/* look initial class up in the row */ /* look initial class up in the row */
DEVIK_MSTART(6);
start = cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio); start = cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
do { do {
...@@ -960,8 +947,6 @@ htb_dequeue_tree(struct htb_sched *q,int prio,int level) ...@@ -960,8 +947,6 @@ htb_dequeue_tree(struct htb_sched *q,int prio,int level)
cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio); cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
} while (cl != start); } while (cl != start);
DEVIK_MEND(6);
DEVIK_MSTART(7);
if (likely(skb != NULL)) { if (likely(skb != NULL)) {
if ((cl->un.leaf.deficit[level] -= skb->len) < 0) { if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
HTB_DBG(4,2,"htb_next_cl oldptr=%p quant_add=%d\n", HTB_DBG(4,2,"htb_next_cl oldptr=%p quant_add=%d\n",
...@@ -973,11 +958,8 @@ htb_dequeue_tree(struct htb_sched *q,int prio,int level) ...@@ -973,11 +958,8 @@ htb_dequeue_tree(struct htb_sched *q,int prio,int level)
gives us slightly better performance */ gives us slightly better performance */
if (!cl->un.leaf.q->q.qlen) if (!cl->un.leaf.q->q.qlen)
htb_deactivate (q,cl); htb_deactivate (q,cl);
DEVIK_MSTART(8);
htb_charge_class (q,cl,level,skb->len); htb_charge_class (q,cl,level,skb->len);
DEVIK_MEND(8);
} }
DEVIK_MEND(7);
return skb; return skb;
} }
...@@ -1005,6 +987,9 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) ...@@ -1005,6 +987,9 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
struct htb_sched *q = (struct htb_sched *)sch->data; struct htb_sched *q = (struct htb_sched *)sch->data;
int level; int level;
long min_delay; long min_delay;
#ifdef HTB_DEBUG
int evs_used = 0;
#endif
HTB_DBG(3,1,"htb_deq dircnt=%d qlen=%d\n",skb_queue_len(&q->direct_queue), HTB_DBG(3,1,"htb_deq dircnt=%d qlen=%d\n",skb_queue_len(&q->direct_queue),
sch->q.qlen); sch->q.qlen);
...@@ -1016,27 +1001,26 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) ...@@ -1016,27 +1001,26 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
return skb; return skb;
} }
DEVIK_MSTART(2);
if (!sch->q.qlen) goto fin; if (!sch->q.qlen) goto fin;
PSCHED_GET_TIME(q->now); PSCHED_GET_TIME(q->now);
min_delay = HZ*5; min_delay = LONG_MAX;
q->nwc_hit = 0; q->nwc_hit = 0;
for (level = 0; level < TC_HTB_MAXDEPTH; level++) { for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
/* common case optimization - skip event handler quickly */ /* common case optimization - skip event handler quickly */
int m; int m;
long delay; long delay;
DEVIK_MSTART(3);
if (jiffies - q->near_ev_cache[level] < 0x80000000 || 0) { if (jiffies - q->near_ev_cache[level] < 0x80000000 || 0) {
delay = htb_do_events(q,level); delay = htb_do_events(q,level);
q->near_ev_cache[level] += delay ? delay : HZ; q->near_ev_cache[level] += delay ? delay : HZ;
#ifdef HTB_DEBUG
evs_used++;
#endif
} else } else
delay = q->near_ev_cache[level] - jiffies; delay = q->near_ev_cache[level] - jiffies;
if (delay && min_delay > delay) if (delay && min_delay > delay)
min_delay = delay; min_delay = delay;
DEVIK_MEND(3);
DEVIK_MSTART(5);
m = ~q->row_mask[level]; m = ~q->row_mask[level];
while (m != (int)(-1)) { while (m != (int)(-1)) {
int prio = ffz (m); int prio = ffz (m);
...@@ -1045,24 +1029,24 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) ...@@ -1045,24 +1029,24 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
if (likely(skb != NULL)) { if (likely(skb != NULL)) {
sch->q.qlen--; sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED; sch->flags &= ~TCQ_F_THROTTLED;
DEVIK_MEND(5);
goto fin; goto fin;
} }
} }
DEVIK_MEND(5);
} }
DEVIK_MSTART(4);
#ifdef HTB_DEBUG #ifdef HTB_DEBUG
if (!q->nwc_hit && min_delay >= 5*HZ && net_ratelimit()) { if (!q->nwc_hit && min_delay >= 10*HZ && net_ratelimit()) {
printk(KERN_ERR "HTB: mindelay=%ld, report it please !\n",min_delay); if (min_delay == LONG_MAX) {
htb_debug_dump(q); printk(KERN_ERR "HTB: dequeue bug (%d), report it please !\n",
evs_used);
htb_debug_dump(q);
} else
printk(KERN_WARNING "HTB: mindelay=%ld, some class has "
"too small rate\n",min_delay);
} }
#endif #endif
htb_delay_by (sch,min_delay); htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay);
DEVIK_MEND(4);
fin: fin:
HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,jiffies,skb); HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,jiffies,skb);
DEVIK_MEND(2);
return skb; return skb;
} }
...@@ -1433,6 +1417,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1433,6 +1417,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (!rtab || !ctab) goto failure; if (!rtab || !ctab) goto failure;
if (!cl) { /* new class */ if (!cl) { /* new class */
struct Qdisc *new_q;
/* check for valid classid */ /* check for valid classid */
if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch)) if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch))
goto failure; goto failure;
...@@ -1456,6 +1441,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1456,6 +1441,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
cl->magic = HTB_CMAGIC; cl->magic = HTB_CMAGIC;
#endif #endif
/* create leaf qdisc early because it uses kmalloc(GPF_KERNEL)
so that can't be used inside of sch_tree_lock
-- thanks to Karlis Peisenieks */
new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
sch_tree_lock(sch); sch_tree_lock(sch);
if (parent && !parent->level) { if (parent && !parent->level) {
/* turn parent into inner node */ /* turn parent into inner node */
...@@ -1474,8 +1463,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1474,8 +1463,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
memset (&parent->un.inner,0,sizeof(parent->un.inner)); memset (&parent->un.inner,0,sizeof(parent->un.inner));
} }
/* leaf (we) needs elementary qdisc */ /* leaf (we) needs elementary qdisc */
if (!(cl->un.leaf.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops))) cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
cl->un.leaf.q = &noop_qdisc;
cl->classid = classid; cl->parent = parent; cl->classid = classid; cl->parent = parent;
...@@ -1503,11 +1491,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1503,11 +1491,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (!cl->level) { if (!cl->level) {
cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum; cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
if (!hopt->quantum && cl->un.leaf.quantum < 1000) { if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.", cl->classid); printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid);
cl->un.leaf.quantum = 1000; cl->un.leaf.quantum = 1000;
} }
if (!hopt->quantum && cl->un.leaf.quantum > 200000) { if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.", cl->classid); printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid);
cl->un.leaf.quantum = 200000; cl->un.leaf.quantum = 200000;
} }
if (hopt->quantum) if (hopt->quantum)
......
...@@ -78,7 +78,7 @@ krb5_encrypt( ...@@ -78,7 +78,7 @@ krb5_encrypt(
sg[0].offset = ((long)out & ~PAGE_MASK); sg[0].offset = ((long)out & ~PAGE_MASK);
sg[0].length = length; sg[0].length = length;
ret = crypto_cipher_encrypt(tfm, sg, 1); ret = crypto_cipher_encrypt(tfm, sg, sg, length);
out: out:
dprintk("gss_k5encrypt returns %d\n",ret); dprintk("gss_k5encrypt returns %d\n",ret);
...@@ -117,7 +117,7 @@ krb5_decrypt( ...@@ -117,7 +117,7 @@ krb5_decrypt(
sg[0].offset = ((long)out & ~PAGE_MASK); sg[0].offset = ((long)out & ~PAGE_MASK);
sg[0].length = length; sg[0].length = length;
ret = crypto_cipher_decrypt(tfm, sg, 1); ret = crypto_cipher_decrypt(tfm, sg, sg, length);
out: out:
dprintk("gss_k5decrypt returns %d\n",ret); dprintk("gss_k5decrypt returns %d\n",ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment