Commit f049274b authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (79 commits)
  [IPX]: Fix NULL pointer dereference on ipx unload
  [ATM]: atmarp.h needs to always include linux/types.h
  [NET]: Fix net/socket.c warnings.
  [NET]: cleanup sock_from_file()
  [NET]: change layout of ehash table
  [S390]: Add AF_IUCV socket support
  [S390]: Adapt special message interface to new IUCV API
  [S390]: Adapt netiucv driver to new IUCV API
  [S390]: Adapt vmlogrdr driver to new IUCV API
  [S390]: Adapt monreader driver to new IUCV API
  [S390]: Rewrite of the IUCV base code, part 2
  [S390]: Rewrite of the IUCV base code, part 1
  [X.25]: Adds /proc/net/x25/forward to view active forwarded calls.
  [X.25]: Adds /proc/sys/net/x25/x25_forward to control forwarding.
  [X.25]: Add call forwarding
  [XFRM]: xfrm_migrate() needs exporting to modules.
  [PFKEYV2]: CONFIG_NET_KEY_MIGRATE option
  [PFKEYV2]: Extension for dynamic update of endpoint address(es)
  [XFRM]: CONFIG_XFRM_MIGRATE option
  [XFRM]: User interface for handling XFRM_MSG_MIGRATE
  ...
parents b37df859 1539b98b
......@@ -193,6 +193,7 @@ Original developers of the crypto algorithms:
Kartikey Mahendra Bhatt (CAST6)
Jon Oberheide (ARC4)
Jouni Malinen (Michael MIC)
NTT(Nippon Telegraph and Telephone Corporation) (Camellia)
SHA1 algorithm contributors:
Jean-Francois Dive
......@@ -246,6 +247,9 @@ Tiger algorithm contributors:
VIA PadLock contributors:
Michal Ludvig
Camellia algorithm contributors:
NTT(Nippon Telegraph and Telephone Corporation) (Camellia)
Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com>
Please send any credits updates or corrections to:
......
......@@ -179,6 +179,8 @@ CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
# CONFIG_XFRM_SUB_POLICY is not set
CONFIG_NET_KEY=y
CONFIG_IUCV=m
CONFIG_AFIUCV=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
# CONFIG_IP_ADVANCED_ROUTER is not set
......@@ -508,7 +510,6 @@ CONFIG_NET_ETHERNET=y
#
CONFIG_LCS=m
CONFIG_CTC=m
CONFIG_IUCV=m
# CONFIG_NETIUCV is not set
# CONFIG_SMSGIUCV is not set
# CONFIG_CLAW is not set
......
......@@ -149,6 +149,15 @@ config CRYPTO_CBC
CBC: Cipher Block Chaining mode
This block cipher algorithm is required for IPSec.
config CRYPTO_PCBC
tristate "PCBC support"
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
default m
help
PCBC: Propagating Cipher Block Chaining mode
This block cipher algorithm is required for RxRPC.
config CRYPTO_LRW
tristate "LRW support (EXPERIMENTAL)"
depends on EXPERIMENTAL
......@@ -168,6 +177,13 @@ config CRYPTO_DES
help
DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
config CRYPTO_FCRYPT
tristate "FCrypt cipher algorithm"
select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER
help
FCrypt algorithm used by RxRPC.
config CRYPTO_BLOWFISH
tristate "Blowfish cipher algorithm"
select CRYPTO_ALGAPI
......@@ -409,6 +425,21 @@ config CRYPTO_CRC32C
See Castagnoli93. This implementation uses lib/libcrc32c.
Module will be crc32c.
config CRYPTO_CAMELLIA
tristate "Camellia cipher algorithms"
depends on CRYPTO
select CRYPTO_ALGAPI
help
Camellia cipher algorithms module.
Camellia is a symmetric key block cipher developed jointly
at NTT and Mitsubishi Electric Corporation.
The Camellia specifies three key sizes: 128, 192 and 256 bits.
See also:
<https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html>
config CRYPTO_TEST
tristate "Testing module"
depends on m
......
......@@ -27,13 +27,16 @@ obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
obj-$(CONFIG_CRYPTO_LRW) += lrw.o
obj-$(CONFIG_CRYPTO_DES) += des.o
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o
obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o
obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o
obj-$(CONFIG_CRYPTO_AES) += aes.o
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia.o
obj-$(CONFIG_CRYPTO_CAST5) += cast5.o
obj-$(CONFIG_CRYPTO_CAST6) += cast6.o
obj-$(CONFIG_CRYPTO_ARC4) += arc4.o
......
......@@ -377,7 +377,8 @@ void crypto_drop_spawn(struct crypto_spawn *spawn)
}
EXPORT_SYMBOL_GPL(crypto_drop_spawn);
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn)
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
u32 mask)
{
struct crypto_alg *alg;
struct crypto_alg *alg2;
......@@ -396,11 +397,19 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn)
return ERR_PTR(-EAGAIN);
}
tfm = __crypto_alloc_tfm(alg, 0);
tfm = ERR_PTR(-EINVAL);
if (unlikely((alg->cra_flags ^ type) & mask))
goto out_put_alg;
tfm = __crypto_alloc_tfm(alg, type, mask);
if (IS_ERR(tfm))
crypto_mod_put(alg);
goto out_put_alg;
return tfm;
out_put_alg:
crypto_mod_put(alg);
return tfm;
}
EXPORT_SYMBOL_GPL(crypto_spawn_tfm);
......
......@@ -212,31 +212,12 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);
static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags)
static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
tfm->crt_flags = flags & CRYPTO_TFM_REQ_MASK;
flags &= ~CRYPTO_TFM_REQ_MASK;
const struct crypto_type *type_obj = tfm->__crt_alg->cra_type;
switch (crypto_tfm_alg_type(tfm)) {
case CRYPTO_ALG_TYPE_CIPHER:
return crypto_init_cipher_flags(tfm, flags);
case CRYPTO_ALG_TYPE_DIGEST:
return crypto_init_digest_flags(tfm, flags);
case CRYPTO_ALG_TYPE_COMPRESS:
return crypto_init_compress_flags(tfm, flags);
}
return 0;
}
static int crypto_init_ops(struct crypto_tfm *tfm)
{
const struct crypto_type *type = tfm->__crt_alg->cra_type;
if (type)
return type->init(tfm);
if (type_obj)
return type_obj->init(tfm, type, mask);
switch (crypto_tfm_alg_type(tfm)) {
case CRYPTO_ALG_TYPE_CIPHER:
......@@ -285,29 +266,29 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
}
}
static unsigned int crypto_ctxsize(struct crypto_alg *alg, int flags)
static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
{
const struct crypto_type *type = alg->cra_type;
const struct crypto_type *type_obj = alg->cra_type;
unsigned int len;
len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1);
if (type)
return len + type->ctxsize(alg);
if (type_obj)
return len + type_obj->ctxsize(alg, type, mask);
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
default:
BUG();
case CRYPTO_ALG_TYPE_CIPHER:
len += crypto_cipher_ctxsize(alg, flags);
len += crypto_cipher_ctxsize(alg);
break;
case CRYPTO_ALG_TYPE_DIGEST:
len += crypto_digest_ctxsize(alg, flags);
len += crypto_digest_ctxsize(alg);
break;
case CRYPTO_ALG_TYPE_COMPRESS:
len += crypto_compress_ctxsize(alg, flags);
len += crypto_compress_ctxsize(alg);
break;
}
......@@ -322,24 +303,21 @@ void crypto_shoot_alg(struct crypto_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_shoot_alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 flags)
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask)
{
struct crypto_tfm *tfm = NULL;
unsigned int tfm_size;
int err = -ENOMEM;
tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, flags);
tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask);
tfm = kzalloc(tfm_size, GFP_KERNEL);
if (tfm == NULL)
goto out_err;
tfm->__crt_alg = alg;
err = crypto_init_flags(tfm, flags);
if (err)
goto out_free_tfm;
err = crypto_init_ops(tfm);
err = crypto_init_ops(tfm, type, mask);
if (err)
goto out_free_tfm;
......@@ -362,31 +340,6 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 flags)
}
EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
{
struct crypto_tfm *tfm = NULL;
int err;
do {
struct crypto_alg *alg;
alg = crypto_alg_mod_lookup(name, 0, CRYPTO_ALG_ASYNC);
err = PTR_ERR(alg);
if (IS_ERR(alg))
continue;
tfm = __crypto_alloc_tfm(alg, flags);
err = 0;
if (IS_ERR(tfm)) {
crypto_mod_put(alg);
err = PTR_ERR(tfm);
tfm = NULL;
}
} while (err == -EAGAIN && !signal_pending(current));
return tfm;
}
/*
* crypto_alloc_base - Locate algorithm and allocate transform
* @alg_name: Name of algorithm
......@@ -420,7 +373,7 @@ struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
goto err;
}
tfm = __crypto_alloc_tfm(alg, 0);
tfm = __crypto_alloc_tfm(alg, type, mask);
if (!IS_ERR(tfm))
return tfm;
......@@ -466,7 +419,6 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
kfree(tfm);
}
EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
EXPORT_SYMBOL_GPL(crypto_free_tfm);
int crypto_has_alg(const char *name, u32 type, u32 mask)
......
......@@ -16,6 +16,7 @@
#include <linux/crypto.h>
#include <linux/errno.h>
#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
......@@ -313,6 +314,9 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
struct crypto_blkcipher *tfm = desc->tfm;
unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
walk->nbytes = walk->total;
if (unlikely(!walk->total))
return 0;
......@@ -345,7 +349,8 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key,
return cipher->setkey(tfm, key, keylen);
}
static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg)
static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask)
{
struct blkcipher_alg *cipher = &alg->cra_blkcipher;
unsigned int len = alg->cra_ctxsize;
......@@ -358,7 +363,7 @@ static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg)
return len;
}
static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm)
static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
......
This diff is collapsed.
......@@ -243,6 +243,7 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_cipher *cipher;
switch (crypto_tfm_alg_blocksize(tfm)) {
case 8:
......@@ -260,11 +261,11 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
ctx->xor = xor_quad;
}
tfm = crypto_spawn_tfm(spawn);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = crypto_cipher_cast(tfm);
ctx->child = cipher;
return 0;
}
......
This diff is collapsed.
......@@ -34,11 +34,6 @@ static int crypto_decompress(struct crypto_tfm *tfm,
dlen);
}
int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags)
{
return flags ? -EINVAL : 0;
}
int crypto_init_compress_ops(struct crypto_tfm *tfm)
{
struct compress_tfm *ops = &tfm->crt_compress;
......
......@@ -14,7 +14,9 @@
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/hardirq.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
......@@ -29,7 +31,7 @@ static int init(struct hash_desc *desc)
return 0;
}
static int update(struct hash_desc *desc,
static int update2(struct hash_desc *desc,
struct scatterlist *sg, unsigned int nbytes)
{
struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
......@@ -81,6 +83,14 @@ static int update(struct hash_desc *desc,
return 0;
}
static int update(struct hash_desc *desc,
struct scatterlist *sg, unsigned int nbytes)
{
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
return update2(desc, sg, nbytes);
}
static int final(struct hash_desc *desc, u8 *out)
{
struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
......@@ -118,16 +128,14 @@ static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen)
static int digest(struct hash_desc *desc,
struct scatterlist *sg, unsigned int nbytes, u8 *out)
{
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
init(desc);
update(desc, sg, nbytes);
update2(desc, sg, nbytes);
return final(desc, out);
}
int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags)
{
return flags ? -EINVAL : 0;
}
int crypto_init_digest_ops(struct crypto_tfm *tfm)
{
struct hash_tfm *ops = &tfm->crt_hash;
......
......@@ -99,12 +99,13 @@ static int crypto_ecb_init_tfm(struct crypto_tfm *tfm)
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_cipher *cipher;
tfm = crypto_spawn_tfm(spawn);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = crypto_cipher_cast(tfm);
ctx->child = cipher;
return 0;
}
......
This diff is collapsed.
......@@ -16,12 +16,13 @@
#include "internal.h"
static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg)
static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask)
{
return alg->cra_ctxsize;
}
static int crypto_init_hash_ops(struct crypto_tfm *tfm)
static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
struct hash_tfm *crt = &tfm->crt_hash;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
......
......@@ -172,15 +172,16 @@ static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg,
static int hmac_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_hash *hash;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm));
tfm = crypto_spawn_tfm(spawn);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
hash = crypto_spawn_hash(spawn);
if (IS_ERR(hash))
return PTR_ERR(hash);
ctx->child = crypto_hash_cast(tfm);
ctx->child = hash;
return 0;
}
......
......@@ -83,8 +83,7 @@ static inline void crypto_exit_proc(void)
{ }
#endif
static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg,
int flags)
static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg)
{
unsigned int len = alg->cra_ctxsize;
......@@ -96,23 +95,12 @@ static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg,
return len;
}
static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg,
int flags)
static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg)
{
unsigned int len = alg->cra_ctxsize;
switch (flags & CRYPTO_TFM_MODE_MASK) {
case CRYPTO_TFM_MODE_CBC:
len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
len += alg->cra_blocksize;
break;
}
return len;
return alg->cra_ctxsize;
}
static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg,
int flags)
static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize;
}
......@@ -121,10 +109,6 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags);
int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags);
int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags);
int crypto_init_digest_ops(struct crypto_tfm *tfm);
int crypto_init_cipher_ops(struct crypto_tfm *tfm);
int crypto_init_compress_ops(struct crypto_tfm *tfm);
......@@ -136,7 +120,8 @@ void crypto_exit_compress_ops(struct crypto_tfm *tfm);
void crypto_larval_error(const char *name, u32 type, u32 mask);
void crypto_shoot_alg(struct crypto_alg *alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 flags);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst);
......
......@@ -201,21 +201,22 @@ static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
static int init_tfm(struct crypto_tfm *tfm)
{
struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct priv *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
tfm = crypto_spawn_tfm(spawn);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
if (crypto_tfm_alg_blocksize(tfm) != 16) {
if (crypto_cipher_blocksize(cipher) != 16) {
*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
return -EINVAL;
}
ctx->child = crypto_cipher_cast(tfm);
ctx->child = cipher;
return 0;
}
......
/*
* PCBC: Propagating Cipher Block Chaining mode
*
* Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* Derived from cbc.c
* - Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/algapi.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
struct crypto_pcbc_ctx {
struct crypto_cipher *child;
void (*xor)(u8 *dst, const u8 *src, unsigned int bs);
};
static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(parent);
struct crypto_cipher *child = ctx->child;
int err;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
struct crypto_cipher *tfm,
void (*xor)(u8 *, const u8 *,
unsigned int))
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_encrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
do {
xor(iv, src, bsize);
fn(crypto_cipher_tfm(tfm), dst, iv);
memcpy(iv, dst, bsize);
xor(iv, src, bsize);
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
}
static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
struct crypto_cipher *tfm,
void (*xor)(u8 *, const u8 *,
unsigned int))
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_encrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
u8 tmpbuf[bsize];
do {
memcpy(tmpbuf, src, bsize);
xor(iv, tmpbuf, bsize);
fn(crypto_cipher_tfm(tfm), src, iv);
memcpy(iv, src, bsize);
xor(iv, tmpbuf, bsize);
src += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_pcbc_encrypt_inplace(desc, &walk, child,
xor);
else
nbytes = crypto_pcbc_encrypt_segment(desc, &walk, child,
xor);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
struct crypto_cipher *tfm,
void (*xor)(u8 *, const u8 *,
unsigned int))
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_decrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
do {
fn(crypto_cipher_tfm(tfm), dst, src);
xor(dst, iv, bsize);
memcpy(iv, src, bsize);
xor(iv, dst, bsize);
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
struct crypto_cipher *tfm,
void (*xor)(u8 *, const u8 *,
unsigned int))
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_decrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
u8 tmpbuf[bsize];
do {
memcpy(tmpbuf, src, bsize);
fn(crypto_cipher_tfm(tfm), src, src);
xor(src, iv, bsize);
memcpy(iv, tmpbuf, bsize);
xor(iv, src, bsize);
src += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_pcbc_decrypt_inplace(desc, &walk, child,
xor);
else
nbytes = crypto_pcbc_decrypt_segment(desc, &walk, child,
xor);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
{
do {
*a++ ^= *b++;
} while (--bs);
}
static void xor_quad(u8 *dst, const u8 *src, unsigned int bs)
{
u32 *a = (u32 *)dst;
u32 *b = (u32 *)src;
do {
*a++ ^= *b++;
} while ((bs -= 4));
}
static void xor_64(u8 *a, const u8 *b, unsigned int bs)
{
((u32 *)a)[0] ^= ((u32 *)b)[0];
((u32 *)a)[1] ^= ((u32 *)b)[1];
}
static void xor_128(u8 *a, const u8 *b, unsigned int bs)
{
((u32 *)a)[0] ^= ((u32 *)b)[0];
((u32 *)a)[1] ^= ((u32 *)b)[1];
((u32 *)a)[2] ^= ((u32 *)b)[2];
((u32 *)a)[3] ^= ((u32 *)b)[3];
}
static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_cipher *cipher;
switch (crypto_tfm_alg_blocksize(tfm)) {
case 8:
ctx->xor = xor_64;
break;
case 16:
ctx->xor = xor_128;
break;
default:
if (crypto_tfm_alg_blocksize(tfm) % 4)
ctx->xor = xor_byte;
else
ctx->xor = xor_quad;
}
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static struct crypto_instance *crypto_pcbc_alloc(void *param, unsigned int len)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));
inst = crypto_alloc_instance("pcbc", alg);
if (IS_ERR(inst))
goto out_put_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
inst->alg.cra_priority = alg->cra_priority;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_type = &crypto_blkcipher_type;
if (!(alg->cra_blocksize % 4))
inst->alg.cra_alignmask |= 3;
inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
inst->alg.cra_ctxsize = sizeof(struct crypto_pcbc_ctx);
inst->alg.cra_init = crypto_pcbc_init_tfm;
inst->alg.cra_exit = crypto_pcbc_exit_tfm;
inst->alg.cra_blkcipher.setkey = crypto_pcbc_setkey;
inst->alg.cra_blkcipher.encrypt = crypto_pcbc_encrypt;
inst->alg.cra_blkcipher.decrypt = crypto_pcbc_decrypt;
out_put_alg:
crypto_mod_put(alg);
return inst;
}
static void crypto_pcbc_free(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(inst);
}
static struct crypto_template crypto_pcbc_tmpl = {
.name = "pcbc",
.alloc = crypto_pcbc_alloc,
.free = crypto_pcbc_free,
.module = THIS_MODULE,
};
static int __init crypto_pcbc_module_init(void)
{
return crypto_register_template(&crypto_pcbc_tmpl);
}
static void __exit crypto_pcbc_module_exit(void)
{
crypto_unregister_template(&crypto_pcbc_tmpl);
}
module_init(crypto_pcbc_module_init);
module_exit(crypto_pcbc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PCBC block cipher algorithm");
......@@ -12,6 +12,7 @@
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
* 2004-08-09 Added cipher speed tests (Reyk Floeter <reyk@vantronix.net>)
* 2003-09-14 Rewritten by Kartikey Mahendra Bhatt
*
......@@ -71,7 +72,8 @@ static char *check[] = {
"des", "md5", "des3_ede", "rot13", "sha1", "sha256", "blowfish",
"twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6",
"arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", NULL
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
"camellia", NULL
};
static void hexdump(unsigned char *buf, unsigned int len)
......@@ -765,7 +767,7 @@ static void test_deflate(void)
memcpy(tvmem, deflate_comp_tv_template, tsize);
tv = (void *)tvmem;
tfm = crypto_alloc_tfm("deflate", 0);
tfm = crypto_alloc_comp("deflate", 0, CRYPTO_ALG_ASYNC);
if (tfm == NULL) {
printk("failed to load transform for deflate\n");
return;
......@@ -964,6 +966,26 @@ static void do_test(void)
test_cipher("ecb(xeta)", DECRYPT, xeta_dec_tv_template,
XETA_DEC_TEST_VECTORS);
//FCrypt
test_cipher("pcbc(fcrypt)", ENCRYPT, fcrypt_pcbc_enc_tv_template,
FCRYPT_ENC_TEST_VECTORS);
test_cipher("pcbc(fcrypt)", DECRYPT, fcrypt_pcbc_dec_tv_template,
FCRYPT_DEC_TEST_VECTORS);
//CAMELLIA
test_cipher("ecb(camellia)", ENCRYPT,
camellia_enc_tv_template,
CAMELLIA_ENC_TEST_VECTORS);
test_cipher("ecb(camellia)", DECRYPT,
camellia_dec_tv_template,
CAMELLIA_DEC_TEST_VECTORS);
test_cipher("cbc(camellia)", ENCRYPT,
camellia_cbc_enc_tv_template,
CAMELLIA_CBC_ENC_TEST_VECTORS);
test_cipher("cbc(camellia)", DECRYPT,
camellia_cbc_dec_tv_template,
CAMELLIA_CBC_DEC_TEST_VECTORS);
test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS);
test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS);
test_hash("wp512", wp512_tv_template, WP512_TEST_VECTORS);
......@@ -980,6 +1002,10 @@ static void do_test(void)
HMAC_SHA1_TEST_VECTORS);
test_hash("hmac(sha256)", hmac_sha256_tv_template,
HMAC_SHA256_TEST_VECTORS);
test_hash("hmac(sha384)", hmac_sha384_tv_template,
HMAC_SHA384_TEST_VECTORS);
test_hash("hmac(sha512)", hmac_sha512_tv_template,
HMAC_SHA512_TEST_VECTORS);
test_hash("xcbc(aes)", aes_xcbc128_tv_template,
XCBC_AES_TEST_VECTORS);
......@@ -1177,6 +1203,28 @@ static void do_test(void)
XETA_DEC_TEST_VECTORS);
break;
case 31:
test_cipher("pcbc(fcrypt)", ENCRYPT, fcrypt_pcbc_enc_tv_template,
FCRYPT_ENC_TEST_VECTORS);
test_cipher("pcbc(fcrypt)", DECRYPT, fcrypt_pcbc_dec_tv_template,
FCRYPT_DEC_TEST_VECTORS);
break;
case 32:
test_cipher("ecb(camellia)", ENCRYPT,
camellia_enc_tv_template,
CAMELLIA_ENC_TEST_VECTORS);
test_cipher("ecb(camellia)", DECRYPT,
camellia_dec_tv_template,
CAMELLIA_DEC_TEST_VECTORS);
test_cipher("cbc(camellia)", ENCRYPT,
camellia_cbc_enc_tv_template,
CAMELLIA_CBC_ENC_TEST_VECTORS);
test_cipher("cbc(camellia)", DECRYPT,
camellia_cbc_dec_tv_template,
CAMELLIA_CBC_DEC_TEST_VECTORS);
break;
case 100:
test_hash("hmac(md5)", hmac_md5_tv_template,
HMAC_MD5_TEST_VECTORS);
......@@ -1192,6 +1240,16 @@ static void do_test(void)
HMAC_SHA256_TEST_VECTORS);
break;
case 103:
test_hash("hmac(sha384)", hmac_sha384_tv_template,
HMAC_SHA384_TEST_VECTORS);
break;
case 104:
test_hash("hmac(sha512)", hmac_sha512_tv_template,
HMAC_SHA512_TEST_VECTORS);
break;
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
......@@ -1260,6 +1318,17 @@ static void do_test(void)
des_speed_template);
break;
case 205:
test_cipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
camellia_speed_template);
test_cipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
camellia_speed_template);
test_cipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
camellia_speed_template);
test_cipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
camellia_speed_template);
break;
case 300:
/* fall through */
......
This diff is collapsed.
......@@ -21,6 +21,7 @@
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/rtnetlink.h>
......@@ -47,7 +48,7 @@ static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
* +------------------------
*/
struct crypto_xcbc_ctx {
struct crypto_tfm *child;
struct crypto_cipher *child;
u8 *odds;
u8 *prev;
u8 *key;
......@@ -75,8 +76,7 @@ static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent,
if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen)))
return err;
ctx->child->__crt_alg->cra_cipher.cia_encrypt(ctx->child, key1,
ctx->consts);
crypto_cipher_encrypt_one(ctx->child, key1, ctx->consts);
return crypto_cipher_setkey(ctx->child, key1, bs);
}
......@@ -86,7 +86,7 @@ static int crypto_xcbc_digest_setkey(struct crypto_hash *parent,
{
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
if (keylen != crypto_tfm_alg_blocksize(ctx->child))
if (keylen != crypto_cipher_blocksize(ctx->child))
return -EINVAL;
ctx->keylen = keylen;
......@@ -108,13 +108,13 @@ static int crypto_xcbc_digest_init(struct hash_desc *pdesc)
return 0;
}
static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
static int crypto_xcbc_digest_update2(struct hash_desc *pdesc,
struct scatterlist *sg,
unsigned int nbytes)
{
struct crypto_hash *parent = pdesc->tfm;
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
struct crypto_tfm *tfm = ctx->child;
struct crypto_cipher *tfm = ctx->child;
int bs = crypto_hash_blocksize(parent);
unsigned int i = 0;
......@@ -142,7 +142,7 @@ static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
offset += len;
crypto_kunmap(p, 0);
crypto_yield(tfm->crt_flags);
crypto_yield(pdesc->flags);
continue;
}
......@@ -152,7 +152,7 @@ static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
p += bs - ctx->len;
ctx->xor(ctx->prev, ctx->odds, bs);
tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, ctx->prev, ctx->prev);
crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev);
/* clearing the length */
ctx->len = 0;
......@@ -160,7 +160,8 @@ static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
/* encrypting the rest of data */
while (len > bs) {
ctx->xor(ctx->prev, p, bs);
tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, ctx->prev, ctx->prev);
crypto_cipher_encrypt_one(tfm, ctx->prev,
ctx->prev);
p += bs;
len -= bs;
}
......@@ -171,7 +172,7 @@ static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
ctx->len = len;
}
crypto_kunmap(p, 0);
crypto_yield(tfm->crt_flags);
crypto_yield(pdesc->flags);
slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
offset = 0;
pg++;
......@@ -183,11 +184,20 @@ static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
return 0;
}
static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
struct scatterlist *sg,
unsigned int nbytes)
{
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
return crypto_xcbc_digest_update2(pdesc, sg, nbytes);
}
static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
{
struct crypto_hash *parent = pdesc->tfm;
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
struct crypto_tfm *tfm = ctx->child;
struct crypto_cipher *tfm = ctx->child;
int bs = crypto_hash_blocksize(parent);
int err = 0;
......@@ -197,13 +207,14 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
return err;
tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, key2, (const u8*)(ctx->consts+bs));
crypto_cipher_encrypt_one(tfm, key2,
(u8 *)(ctx->consts + bs));
ctx->xor(ctx->prev, ctx->odds, bs);
ctx->xor(ctx->prev, key2, bs);
_crypto_xcbc_digest_setkey(parent, ctx);
tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, out, ctx->prev);
crypto_cipher_encrypt_one(tfm, out, ctx->prev);
} else {
u8 key3[bs];
unsigned int rlen;
......@@ -218,14 +229,15 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
return err;
tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, key3, (const u8*)(ctx->consts+bs*2));
crypto_cipher_encrypt_one(tfm, key3,
(u8 *)(ctx->consts + bs * 2));
ctx->xor(ctx->prev, ctx->odds, bs);
ctx->xor(ctx->prev, key3, bs);
_crypto_xcbc_digest_setkey(parent, ctx);
tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, out, ctx->prev);
crypto_cipher_encrypt_one(tfm, out, ctx->prev);
}
return 0;
......@@ -234,21 +246,25 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
static int crypto_xcbc_digest(struct hash_desc *pdesc,
struct scatterlist *sg, unsigned int nbytes, u8 *out)
{
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
crypto_xcbc_digest_init(pdesc);
crypto_xcbc_digest_update(pdesc, sg, nbytes);
crypto_xcbc_digest_update2(pdesc, sg, nbytes);
return crypto_xcbc_digest_final(pdesc, out);
}
static int xcbc_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm));
tfm = crypto_spawn_tfm(spawn);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
switch(bs) {
case 16:
......@@ -258,7 +274,7 @@ static int xcbc_init_tfm(struct crypto_tfm *tfm)
return -EINVAL;
}
ctx->child = crypto_cipher_cast(tfm);
ctx->child = cipher;
ctx->odds = (u8*)(ctx+1);
ctx->prev = ctx->odds + bs;
ctx->key = ctx->prev + bs;
......
......@@ -457,7 +457,7 @@ static struct pci_driver geode_aes_driver = {
static int __init
geode_aes_init(void)
{
return pci_module_init(&geode_aes_driver);
return pci_register_driver(&geode_aes_driver);
}
static void __exit
......
......@@ -184,7 +184,7 @@ static int tlb_initialize(struct bonding *bond)
spin_lock_init(&(bond_info->tx_hashtbl_lock));
new_hashtbl = kmalloc(size, GFP_KERNEL);
new_hashtbl = kzalloc(size, GFP_KERNEL);
if (!new_hashtbl) {
printk(KERN_ERR DRV_NAME
": %s: Error: Failed to allocate TLB hash table\n",
......@@ -195,8 +195,6 @@ static int tlb_initialize(struct bonding *bond)
bond_info->tx_hashtbl = new_hashtbl;
memset(bond_info->tx_hashtbl, 0, size);
for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1);
}
......
......@@ -1343,14 +1343,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
"inaccurate.\n", bond_dev->name, slave_dev->name);
}
new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL);
new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
if (!new_slave) {
res = -ENOMEM;
goto err_undo_flags;
}
memset(new_slave, 0, sizeof(struct slave));
/* save slave's original flags before calling
* netdev_set_master and dev_open
*/
......
......@@ -1343,15 +1343,12 @@ static int __init slip_init(void)
printk(KERN_INFO "SLIP linefill/keepalive option.\n");
#endif
slip_devs = kmalloc(sizeof(struct net_device *)*slip_maxdev, GFP_KERNEL);
slip_devs = kzalloc(sizeof(struct net_device *)*slip_maxdev, GFP_KERNEL);
if (!slip_devs) {
printk(KERN_ERR "SLIP: Can't allocate slip devices array! Uaargh! (-> No SLIP available)\n");
return -ENOMEM;
}
/* Clear the pointer array, we allocate devices when we need them */
memset(slip_devs, 0, sizeof(struct net_device *)*slip_maxdev);
/* Fill in our line protocol discipline, and register it */
if ((status = tty_register_ldisc(N_SLIP, &sl_ldisc)) != 0) {
printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status);
......
......@@ -3380,7 +3380,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
}
next_pkt_nopost:
sw_idx++;
sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
/* Refresh hw_idx to see if there is new work */
if (sw_idx == hw_idx) {
......
This diff is collapsed.
This diff is collapsed.
......@@ -22,13 +22,6 @@ config CTC
available. This option is also available as a module which will be
called ctc.ko. If you do not know what it is, it's safe to say "Y".
config IUCV
tristate "IUCV support (VM only)"
help
Select this option if you want to use inter-user communication
under VM or VIF. If unsure, say "Y" to enable a fast communication
link between VM guests.
config NETIUCV
tristate "IUCV network device support (VM only)"
depends on IUCV && NETDEVICES
......
......@@ -4,7 +4,6 @@
ctc-objs := ctcmain.o ctcdbug.o
obj-$(CONFIG_IUCV) += iucv.o
obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -828,9 +828,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
mutex_unlock(&crypt_stat->cs_tfm_mutex);
goto out;
}
crypto_blkcipher_set_flags(crypt_stat->tfm,
(ECRYPTFS_DEFAULT_CHAINING_MODE
| CRYPTO_TFM_REQ_WEAK_KEY));
crypto_blkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
mutex_unlock(&crypt_stat->cs_tfm_mutex);
rc = 0;
out:
......
......@@ -176,7 +176,6 @@ ecryptfs_get_key_payload_data(struct key *key)
#define ECRYPTFS_FILE_SIZE_BYTES 8
#define ECRYPTFS_DEFAULT_CIPHER "aes"
#define ECRYPTFS_DEFAULT_KEY_BYTES 16
#define ECRYPTFS_DEFAULT_CHAINING_MODE CRYPTO_TFM_MODE_CBC
#define ECRYPTFS_DEFAULT_HASH "md5"
#define ECRYPTFS_TAG_3_PACKET_TYPE 0x8C
#define ECRYPTFS_TAG_11_PACKET_TYPE 0xED
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment