Commit 48565b09 authored by David S. Miller's avatar David S. Miller

Merge davem@nuts.ninka.net:/home/davem/src/BK/net-2.5

into kernel.bkbits.net:/home/davem/net-2.5
parents 27d23d3f e74356b9
......@@ -186,6 +186,7 @@ Original developers of the crypto algorithms:
Kazunori Miyazawa / USAGI (HMAC)
Matthew Skala (Twofish)
Dag Arne Osvik (Serpent)
Brian Gladman (AES)
DES algorithm contributors:
Raimar Falke
......@@ -203,6 +204,10 @@ Twofish algorithm contributors:
SHA256 algorithm contributors:
Andrew McDonald
AES algorithm contributors:
Alexander Kjeldaas
Adam J. Richter
Please send any credits updates or corrections to:
James Morris <jmorris@intercode.com.au>
......@@ -1462,10 +1462,14 @@ routing issues.
error_burst and error_cost
--------------------------
These parameters are used to limit the warning messages written to the kernel
log from the routing code. The higher the error_cost factor is, the fewer
messages will be written. Error_burst controls when messages will be dropped.
The default settings limit warning messages to one every five seconds.
These parameters are used to limit how many ICMP destination unreachable to
send from the host in question. ICMP destination unreachable messages are
sent when we can not reach the next hop, while trying to transmit a packet.
It will also print some error messages to kernel logs if someone is ignoring
our ICMP redirects. The higher the error_cost factor is, the fewer
destination unreachable and error messages will be let through. Error_burst
controls when destination unreachable messages and error messages will be
dropped. The default settings limit warning messages to five every second.
flush
-----
......
......@@ -94,6 +94,26 @@ config CRYPTO_SERPENT
See also:
http://www.cl.cam.ac.uk/~rja14/serpent.html
config CRYPTO_AES
tristate "AES cipher algorithms"
depends on CRYPTO
help
AES cipher algorithms (FIPS-197). AES uses the Rijndael
algorithm.
Rijndael appears to be consistently a very good performer in
both hardware and software across a wide range of computing
environments regardless of its use in feedback or non-feedback
modes. Its key setup time is excellent, and its key agility is
good. Rijndael's very low memory requirements make it very well
suited for restricted-space environments, in which it also
demonstrates excellent performance. Rijndael's operations are
among the easiest to defend against power and timing attacks.
The AES specifies three key sizes: 128, 192 and 256 bits
See http://csrc.nist.gov/encryption/aes/ for more information.
config CRYPTO_TEST
tristate "Testing module"
depends on CRYPTO
......
......@@ -20,5 +20,6 @@ obj-$(CONFIG_CRYPTO_DES) += des.o
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o
obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o
obj-$(CONFIG_CRYPTO_AES) += aes.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
This diff is collapsed.
......@@ -1983,6 +1983,111 @@ test_serpent(void)
crypto_free_tfm(tfm);
}
void
test_aes(void)
{
unsigned int ret, i;
unsigned int tsize;
char *p, *q;
struct crypto_tfm *tfm;
char *key;
struct aes_tv *aes_tv;
struct scatterlist sg[1];
printk("\ntesting aes encryption\n");
tsize = sizeof (aes_enc_tv_template);
if (tsize > TVMEMSIZE) {
printk("template (%u) too big for tvmem (%u)\n", tsize,
TVMEMSIZE);
return;
}
memcpy(tvmem, aes_enc_tv_template, tsize);
aes_tv = (void *) tvmem;
tfm = crypto_alloc_tfm("aes", 0);
if (tfm == NULL) {
printk("failed to load transform for aes (default ecb)\n");
return;
}
for (i = 0; i < AES_ENC_TEST_VECTORS; i++) {
printk("test %u (%d bit key):\n",
i + 1, aes_tv[i].keylen * 8);
key = aes_tv[i].key;
ret = crypto_cipher_setkey(tfm, key, aes_tv[i].keylen);
if (ret) {
printk("setkey() failed flags=%x\n", tfm->crt_flags);
if (!aes_tv[i].fail)
goto out;
}
p = aes_tv[i].plaintext;
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = aes_tv[i].plen;
ret = crypto_cipher_encrypt(tfm, sg, 1);
if (ret) {
printk("encrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
}
q = kmap(sg[0].page) + sg[0].offset;
hexdump(q, aes_tv[i].rlen);
printk("%s\n", memcmp(q, aes_tv[i].result, aes_tv[i].rlen) ?
"fail" : "pass");
}
printk("\ntesting aes decryption\n");
tsize = sizeof (aes_dec_tv_template);
if (tsize > TVMEMSIZE) {
printk("template (%u) too big for tvmem (%u)\n", tsize,
TVMEMSIZE);
return;
}
memcpy(tvmem, aes_dec_tv_template, tsize);
aes_tv = (void *) tvmem;
for (i = 0; i < AES_DEC_TEST_VECTORS; i++) {
printk("test %u (%d bit key):\n",
i + 1, aes_tv[i].keylen * 8);
key = aes_tv[i].key;
ret = crypto_cipher_setkey(tfm, key, aes_tv[i].keylen);
if (ret) {
printk("setkey() failed flags=%x\n", tfm->crt_flags);
if (!aes_tv[i].fail)
goto out;
}
p = aes_tv[i].plaintext;
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = aes_tv[i].plen;
ret = crypto_cipher_decrypt(tfm, sg, 1);
if (ret) {
printk("decrypt() failed flags=%x\n", tfm->crt_flags);
goto out;
}
q = kmap(sg[0].page) + sg[0].offset;
hexdump(q, aes_tv[i].rlen);
printk("%s\n", memcmp(q, aes_tv[i].result, aes_tv[i].rlen) ?
"fail" : "pass");
}
out:
crypto_free_tfm(tfm);
}
static void
test_available(void)
{
......@@ -2011,6 +2116,7 @@ do_test(void)
test_blowfish();
test_twofish();
test_serpent();
test_aes();
#ifdef CONFIG_CRYPTO_HMAC
test_hmac_md5();
test_hmac_sha1();
......@@ -2054,6 +2160,10 @@ do_test(void)
test_serpent();
break;
case 10:
test_aes();
break;
#ifdef CONFIG_CRYPTO_HMAC
case 100:
test_hmac_md5();
......
......@@ -1480,4 +1480,97 @@ struct serpent_tv serpent_dec_tv_template[] =
}
};
/*
* AES test vectors.
*/
#define AES_ENC_TEST_VECTORS 3
#define AES_DEC_TEST_VECTORS 3
struct aes_tv {
unsigned int keylen;
unsigned int plen;
unsigned int rlen;
int fail;
char key[32];
char iv[8];
char plaintext[16];
char result[16];
};
struct aes_tv aes_enc_tv_template[] = {
/* From FIPS-197 */
{
16, 16, 16, 0,
{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
{ 0 },
{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff },
{ 0x69, 0xc4, 0xe0, 0xd8, 0x6a, 0x7b, 0x04, 0x30,
0xd8, 0xcd, 0xb7, 0x80, 0x70, 0xb4, 0xc5, 0x5a },
},
{
24, 16, 16, 0,
{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 },
{ 0 },
{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff },
{ 0xdd, 0xa9, 0x7c, 0xa4, 0x86, 0x4c, 0xdf, 0xe0,
0x6e, 0xaf, 0x70, 0xa0, 0xec, 0x0d, 0x71, 0x91 },
},
{
32, 16, 16, 0,
{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
{ 0 },
{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff },
{ 0x8e, 0xa2, 0xb7, 0xca, 0x51, 0x67, 0x45, 0xbf,
0xea, 0xfc, 0x49, 0x90, 0x4b, 0x49, 0x60, 0x89 },
},
};
struct aes_tv aes_dec_tv_template[] = {
/* From FIPS-197 */
{
16, 16, 16, 0,
{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
{ 0 },
{ 0x69, 0xc4, 0xe0, 0xd8, 0x6a, 0x7b, 0x04, 0x30,
0xd8, 0xcd, 0xb7, 0x80, 0x70, 0xb4, 0xc5, 0x5a },
{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff },
},
{
24, 16, 16, 0,
{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 },
{ 0 },
{ 0xdd, 0xa9, 0x7c, 0xa4, 0x86, 0x4c, 0xdf, 0xe0,
0x6e, 0xaf, 0x70, 0xa0, 0xec, 0x0d, 0x71, 0x91 },
{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff },
},
{
32, 16, 16, 0,
{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
{ 0 },
{ 0x8e, 0xa2, 0xb7, 0xca, 0x51, 0x67, 0x45, 0xbf,
0xea, 0xfc, 0x49, 0x90, 0x4b, 0x49, 0x60, 0x89 },
{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff },
},
};
#endif /* _CRYPTO_TCRYPT_H */
......@@ -242,17 +242,25 @@ struct sadb_x_ipsecrequest {
#define SADB_SATYPE_MAX 9
/* Authentication algorithms */
#define SADB_AALG_NONE 0
#define SADB_AALG_MD5HMAC 2
#define SADB_AALG_SHA1HMAC 3
#define SADB_AALG_MAX 3
#define SADB_AALG_NONE 0
#define SADB_AALG_MD5HMAC 2
#define SADB_AALG_SHA1HMAC 3
#define SADB_X_AALG_SHA2_256HMAC 5
#define SADB_X_AALG_SHA2_384HMAC 6
#define SADB_X_AALG_SHA2_512HMAC 7
#define SADB_X_AALG_RIPEMD160HMAC 8
#define SADB_X_AALG_NULL 251 /* kame */
#define SADB_AALG_MAX 251
/* Encryption algorithms */
#define SADB_EALG_NONE 0
#define SADB_EALG_DESCBC 1
#define SADB_EALG_3DESCBC 2
#define SADB_EALG_NULL 11
#define SADB_EALG_MAX 11
#define SADB_EALG_NONE 0
#define SADB_EALG_DESCBC 1
#define SADB_EALG_3DESCBC 2
#define SADB_X_EALG_CASTCBC 6
#define SADB_X_EALG_BLOWFISHCBC 7
#define SADB_EALG_NULL 11
#define SADB_X_EALG_AESCBC 12
#define SADB_EALG_MAX 12
/* Extension Header values */
#define SADB_EXT_RESERVED 0
......
......@@ -637,8 +637,7 @@ DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
#define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field)
#define TCP_DEC_STATS(field) SNMP_DEC_STATS(tcp_statistics, field)
extern void tcp_put_port(struct sock *sk);
extern void __tcp_put_port(struct sock *sk);
extern __inline__ void tcp_put_port(struct sock *sk);
extern void tcp_inherit_port(struct sock *sk, struct sock *child);
extern void tcp_v4_err(struct sk_buff *skb, u32);
......
#ifndef _NET_XFRM_H
#define _NET_XFRM_H
#include <linux/xfrm.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/crypto.h>
#include <linux/pfkeyv2.h>
#include <net/dst.h>
#include <net/route.h>
#define XFRM_ALIGN8(len) (((len) + 7) & ~7)
extern struct semaphore xfrm_cfg_sem;
/* Organization of SPD aka "XFRM rules"
......@@ -347,6 +353,29 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
}
}
/*
* xfrm algorithm information
*/
struct xfrm_algo_auth_info {
u16 icv_truncbits;
u16 icv_fullbits;
};
struct xfrm_algo_encr_info {
u16 blockbits;
u16 defkeybits;
};
struct xfrm_algo_desc {
char *name;
u8 available:1;
union {
struct xfrm_algo_auth_info auth;
struct xfrm_algo_encr_info encr;
} uinfo;
struct sadb_alg desc;
};
extern void xfrm_state_init(void);
extern void xfrm_input_init(void);
extern int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), void *);
......@@ -385,3 +414,15 @@ extern wait_queue_head_t km_waitq;
extern void km_warn_expired(struct xfrm_state *x);
extern void km_expired(struct xfrm_state *x);
extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *pol);
extern void xfrm_probe_algs(void);
extern int xfrm_count_auth_supported(void);
extern int xfrm_count_enc_supported(void);
extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name);
extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name);
#endif /* _NET_XFRM_H */
......@@ -225,8 +225,8 @@ struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
/*
* Slab constructor for a skb head.
*/
static inline void skb_headerinit(void *p, kmem_cache_t *cache,
unsigned long flags)
static void skb_headerinit(void *p, kmem_cache_t *cache,
unsigned long flags)
{
struct sk_buff *skb = p;
......
......@@ -22,4 +22,4 @@ obj-$(CONFIG_IP_PNP) += ipconfig.o
obj-$(CONFIG_NETFILTER) += netfilter/
obj-$(CONFIG_XFRM_USER) += xfrm_user.o
obj-y += xfrm_policy.o xfrm_state.o xfrm_input.o
obj-y += xfrm_policy.o xfrm_state.o xfrm_input.o xfrm_algo.o
......@@ -7,26 +7,31 @@
#include <net/icmp.h>
#include <asm/scatterlist.h>
#define AH_HLEN_NOICV 12
typedef void (icv_update_fn_t)(struct crypto_tfm *,
struct scatterlist *, unsigned int);
struct ah_data
{
u8 *key;
int key_len;
u8 *work_digest;
int digest_len;
u8 *work_icv;
int icv_full_len;
int icv_trunc_len;
void (*digest)(struct ah_data*,
struct sk_buff *skb,
u8 *digest);
void (*icv)(struct ah_data*,
struct sk_buff *skb, u8 *icv);
struct crypto_tfm *tfm;
};
/* Clear mutable options and find final destination to substitute
* into IP header for digest calculation. Options are already checked
* into IP header for icv calculation. Options are already checked
* for validity, so paranoia is not required. */
int ip_clear_mutable_options(struct iphdr *iph, u32 *daddr)
static int ip_clear_mutable_options(struct iphdr *iph, u32 *daddr)
{
unsigned char * optptr = (unsigned char*)(iph+1);
int l = iph->ihl*4 - 20;
......@@ -66,7 +71,8 @@ int ip_clear_mutable_options(struct iphdr *iph, u32 *daddr)
return 0;
}
void skb_ah_walk(const struct sk_buff *skb, struct crypto_tfm *tfm)
static void skb_ah_walk(const struct sk_buff *skb,
struct crypto_tfm *tfm, icv_update_fn_t icv_update)
{
int offset = 0;
int len = skb->len;
......@@ -83,7 +89,7 @@ void skb_ah_walk(const struct sk_buff *skb, struct crypto_tfm *tfm)
sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
sg.length = copy;
crypto_hmac_update(tfm, &sg, 1);
icv_update(tfm, &sg, 1);
if ((len -= copy) == 0)
return;
......@@ -106,7 +112,7 @@ void skb_ah_walk(const struct sk_buff *skb, struct crypto_tfm *tfm)
sg.offset = frag->page_offset + offset-start;
sg.length = copy;
crypto_hmac_update(tfm, &sg, 1);
icv_update(tfm, &sg, 1);
if (!(len -= copy))
return;
......@@ -127,7 +133,7 @@ void skb_ah_walk(const struct sk_buff *skb, struct crypto_tfm *tfm)
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
skb_ah_walk(list, tfm);
skb_ah_walk(list, tfm, icv_update);
if ((len -= copy) == 0)
return;
offset += copy;
......@@ -144,14 +150,14 @@ ah_hmac_digest(struct ah_data *ahp, struct sk_buff *skb, u8 *auth_data)
{
struct crypto_tfm *tfm = ahp->tfm;
memset(auth_data, 0, ahp->digest_len);
memset(auth_data, 0, ahp->icv_trunc_len);
crypto_hmac_init(tfm, ahp->key, &ahp->key_len);
skb_ah_walk(skb, tfm);
crypto_hmac_final(tfm, ahp->key, &ahp->key_len, ahp->work_digest);
memcpy(auth_data, ahp->work_digest, ahp->digest_len);
skb_ah_walk(skb, tfm, crypto_hmac_update);
crypto_hmac_final(tfm, ahp->key, &ahp->key_len, ahp->work_icv);
memcpy(auth_data, ahp->work_icv, ahp->icv_trunc_len);
}
int ah_output(struct sk_buff *skb)
static int ah_output(struct sk_buff *skb)
{
int err;
struct dst_entry *dst = skb->dst;
......@@ -210,11 +216,13 @@ int ah_output(struct sk_buff *skb)
ah->nexthdr = iph->protocol;
}
ahp = x->data;
ah->hdrlen = (((ahp->digest_len + 12 + 7)&~7)>>2)-2;
ah->hdrlen = (XFRM_ALIGN8(ahp->icv_trunc_len +
AH_HLEN_NOICV) >> 2) - 2;
ah->reserved = 0;
ah->spi = x->id.spi;
ah->seq_no = htonl(++x->replay.oseq);
ahp->digest(ahp, skb, ah->auth_data);
ahp->icv(ahp, skb, ah->auth_data);
top_iph->tos = iph->tos;
top_iph->ttl = iph->ttl;
if (x->props.mode) {
......@@ -246,6 +254,7 @@ int ah_output(struct sk_buff *skb)
int ah_input(struct xfrm_state *x, struct sk_buff *skb)
{
int ah_hlen;
struct iphdr *iph;
struct ip_auth_hdr *ah;
struct ah_data *ahp;
......@@ -255,13 +264,14 @@ int ah_input(struct xfrm_state *x, struct sk_buff *skb)
goto out;
ah = (struct ip_auth_hdr*)skb->data;
ahp = x->data;
if (((ah->hdrlen+2)<<2) != ((ahp->digest_len + 12 + 7)&~7))
ah_hlen = (ah->hdrlen + 2) << 2;
if (ah_hlen != XFRM_ALIGN8(ahp->icv_full_len + AH_HLEN_NOICV) &&
ah_hlen != XFRM_ALIGN8(ahp->icv_trunc_len + AH_HLEN_NOICV))
goto out;
if (!pskb_may_pull(skb, (ah->hdrlen+2)<<2))
if (!pskb_may_pull(skb, ah_hlen))
goto out;
/* We are going to _remove_ AH header to keep sockets happy,
......@@ -285,17 +295,18 @@ int ah_input(struct xfrm_state *x, struct sk_buff *skb)
goto out;
}
{
u8 auth_data[ahp->digest_len];
memcpy(auth_data, ah->auth_data, ahp->digest_len);
u8 auth_data[ahp->icv_trunc_len];
memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
skb_push(skb, skb->data - skb->nh.raw);
ahp->digest(ahp, skb, ah->auth_data);
if (memcmp(ah->auth_data, auth_data, ahp->digest_len)) {
ahp->icv(ahp, skb, ah->auth_data);
if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) {
x->stats.integrity_failed++;
goto out;
}
}
((struct iphdr*)work_buf)->protocol = ah->nexthdr;
skb->nh.raw = skb_pull(skb, (ah->hdrlen+2)<<2);
skb->nh.raw = skb_pull(skb, ah_hlen);
memcpy(skb->nh.raw, work_buf, iph->ihl*4);
skb->nh.iph->tot_len = htons(skb->len);
skb_pull(skb, skb->nh.iph->ihl*4);
......@@ -325,12 +336,13 @@ void ah4_err(struct sk_buff *skb, u32 info)
xfrm_state_put(x);
}
int ah_init_state(struct xfrm_state *x, void *args)
static int ah_init_state(struct xfrm_state *x, void *args)
{
struct ah_data *ahp = NULL;
struct xfrm_algo_desc *aalg_desc;
if (x->aalg == NULL || x->aalg->alg_key_len == 0 ||
x->aalg->alg_key_len > 512)
/* null auth can use a zero length key */
if (x->aalg->alg_key_len > 512)
goto error;
ahp = kmalloc(sizeof(*ahp), GFP_KERNEL);
......@@ -344,13 +356,33 @@ int ah_init_state(struct xfrm_state *x, void *args)
ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
if (!ahp->tfm)
goto error;
ahp->digest = ah_hmac_digest;
ahp->digest_len = 12;
ahp->work_digest = kmalloc(crypto_tfm_alg_digestsize(ahp->tfm),
GFP_KERNEL);
if (!ahp->work_digest)
ahp->icv = ah_hmac_digest;
/*
* Lookup the algorithm description maintained by pfkey,
* verify crypto transform properties, and store information
* we need for AH processing. This lookup cannot fail here
* after a successful crypto_alloc_tfm().
*/
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name);
BUG_ON(!aalg_desc);
if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
crypto_tfm_alg_digestsize(ahp->tfm)) {
printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm),
aalg_desc->uinfo.auth.icv_fullbits/8);
goto error;
}
ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL);
if (!ahp->work_icv)
goto error;
x->props.header_len = (12 + ahp->digest_len + 7)&~7;
x->props.header_len = XFRM_ALIGN8(ahp->icv_trunc_len + AH_HLEN_NOICV);
if (x->props.mode)
x->props.header_len += 20;
x->data = ahp;
......@@ -359,8 +391,8 @@ int ah_init_state(struct xfrm_state *x, void *args)
error:
if (ahp) {
if (ahp->work_digest)
kfree(ahp->work_digest);
if (ahp->work_icv)
kfree(ahp->work_icv);
if (ahp->tfm)
crypto_free_tfm(ahp->tfm);
kfree(ahp);
......@@ -368,13 +400,13 @@ int ah_init_state(struct xfrm_state *x, void *args)
return -EINVAL;
}
void ah_destroy(struct xfrm_state *x)
static void ah_destroy(struct xfrm_state *x)
{
struct ah_data *ahp = x->data;
if (ahp->work_digest) {
kfree(ahp->work_digest);
ahp->work_digest = NULL;
if (ahp->work_icv) {
kfree(ahp->work_icv);
ahp->work_icv = NULL;
}
if (ahp->tfm) {
crypto_free_tfm(ahp->tfm);
......@@ -399,7 +431,7 @@ static struct inet_protocol ah4_protocol = {
.no_policy = 1,
};
int __init ah4_init(void)
static int __init ah4_init(void)
{
SET_MODULE_OWNER(&ah_type);
if (xfrm_register_type(&ah_type) < 0) {
......
......@@ -137,12 +137,12 @@ static __inline__ struct fib_node * fz_chain(fn_key_t key, struct fn_zone *fz)
return fz->fz_hash[fn_hash(key, fz).datum];
}
extern __inline__ int fn_key_eq(fn_key_t a, fn_key_t b)
static __inline__ int fn_key_eq(fn_key_t a, fn_key_t b)
{
return a.datum == b.datum;
}
extern __inline__ int fn_key_leq(fn_key_t a, fn_key_t b)
static __inline__ int fn_key_leq(fn_key_t a, fn_key_t b)
{
return a.datum <= b.datum;
}
......@@ -703,7 +703,7 @@ FTprint("tb(%d)_delete: %d %08x/%d %d\n", tb->tb_id, r->rtm_type, rta->rta_dst ?
return -ESRCH;
}
extern __inline__ int
static __inline__ int
fn_flush_list(struct fib_node ** fp, int z, struct fn_hash *table)
{
int found = 0;
......
......@@ -185,7 +185,7 @@ static __inline__ void ipq_put(struct ipq *ipq)
/* Kill ipq entry. It is not destroyed immediately,
* because caller (and someone more) holds reference count.
*/
static __inline__ void ipq_kill(struct ipq *ipq)
static void ipq_kill(struct ipq *ipq)
{
if (del_timer(&ipq->timer))
atomic_dec(&ipq->refcnt);
......
......@@ -198,7 +198,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
return -EINVAL;
}
__inline__ int ip_finish_output(struct sk_buff *skb)
int ip_finish_output(struct sk_buff *skb)
{
struct net_device *dev = skb->dst->dev;
......@@ -685,7 +685,7 @@ skb_can_coalesce(struct sk_buff *skb, int i, struct page *page, int off)
return 0;
}
static inline void
static void
skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size)
{
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
......
......@@ -201,7 +201,7 @@ struct rt_cache_stat *rt_cache_stat;
static int rt_intern_hash(unsigned hash, struct rtable *rth,
struct rtable **res);
static __inline__ unsigned rt_hash_code(u32 daddr, u32 saddr, u8 tos)
static unsigned rt_hash_code(u32 daddr, u32 saddr, u8 tos)
{
unsigned hash = ((daddr & 0xF0F0F0F0) >> 4) |
((daddr & 0x0F0F0F0F) << 4);
......@@ -430,7 +430,7 @@ static __inline__ int rt_valuable(struct rtable *rth)
rth->u.dst.expires;
}
static __inline__ int rt_may_expire(struct rtable *rth, int tmo1, int tmo2)
static int rt_may_expire(struct rtable *rth, int tmo1, int tmo2)
{
int age;
int ret = 0;
......
......@@ -412,7 +412,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_opt *tp, struct sk_b
* To save cycles in the RFC 1323 implementation it was better to break
* it up into three procedures. -- erics
*/
static __inline__ void tcp_rtt_estimator(struct tcp_opt *tp, __u32 mrtt)
static void tcp_rtt_estimator(struct tcp_opt *tp, __u32 mrtt)
{
long m = mrtt; /* RTT */
......@@ -2243,14 +2243,14 @@ static __inline__ int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr
return 1;
}
extern __inline__ void
static __inline__ void
tcp_store_ts_recent(struct tcp_opt *tp)
{
tp->ts_recent = tp->rcv_tsval;
tp->ts_recent_stamp = xtime.tv_sec;
}
extern __inline__ void
static __inline__ void
tcp_replace_ts_recent(struct tcp_opt *tp, u32 seq)
{
if (tp->saw_tstamp && !after(seq, tp->rcv_wup)) {
......@@ -2309,7 +2309,7 @@ static int tcp_disordered_ack(struct tcp_opt *tp, struct sk_buff *skb)
(s32)(tp->ts_recent - tp->rcv_tsval) <= (tp->rto*1024)/HZ);
}
extern __inline__ int tcp_paws_discard(struct tcp_opt *tp, struct sk_buff *skb)
static __inline__ int tcp_paws_discard(struct tcp_opt *tp, struct sk_buff *skb)
{
return ((s32)(tp->ts_recent - tp->rcv_tsval) > TCP_PAWS_WINDOW &&
xtime.tv_sec < tp->ts_recent_stamp + TCP_PAWS_24DAYS &&
......@@ -3155,7 +3155,7 @@ static __inline__ void tcp_data_snd_check(struct sock *sk)
/*
* Check if sending an ack is needed.
*/
static __inline__ void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
{
struct tcp_opt *tp = tcp_sk(sk);
......@@ -3265,7 +3265,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
}
/* This is the 'fast' part of urgent handling. */
static inline void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
{
struct tcp_opt *tp = tcp_sk(sk);
......
......@@ -286,7 +286,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
/* Get rid of any references to a local port held by the
* given sock.
*/
__inline__ void __tcp_put_port(struct sock *sk)
static void __tcp_put_port(struct sock *sk)
{
struct inet_opt *inet = inet_sk(sk);
struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
......@@ -308,7 +308,7 @@ __inline__ void __tcp_put_port(struct sock *sk)
spin_unlock(&head->lock);
}
void tcp_put_port(struct sock *sk)
__inline__ void tcp_put_port(struct sock *sk)
{
local_bh_disable();
__tcp_put_port(sk);
......
/*
* xfrm algorithm interface
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/kernel.h>
#include <linux/pfkeyv2.h>
#include <net/xfrm.h>
/*
* Algorithms supported by IPsec. These entries contain properties which
* are used in key negotiation and xfrm processing, and are used to verify
* that instantiated crypto transforms have correct parameters for IPsec
* purposes.
*/
static struct xfrm_algo_desc aalg_list[] = {
{
.name = "digest_null",
.uinfo = {
.auth = {
.icv_truncbits = 0,
.icv_fullbits = 0,
}
},
.desc = {
.sadb_alg_id = SADB_X_AALG_NULL,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 0,
.sadb_alg_maxbits = 0
}
},
{
.name = "md5",
.uinfo = {
.auth = {
.icv_truncbits = 96,
.icv_fullbits = 128,
}
},
.desc = {
.sadb_alg_id = SADB_AALG_MD5HMAC,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 128
}
},
{
.name = "sha1",
.uinfo = {
.auth = {
.icv_truncbits = 96,
.icv_fullbits = 160,
}
},
.desc = {
.sadb_alg_id = SADB_AALG_SHA1HMAC,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 160,
.sadb_alg_maxbits = 160
}
},
{
.name = "sha256",
.uinfo = {
.auth = {
.icv_truncbits = 128,
.icv_fullbits = 256,
}
},
.desc = {
.sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 256,
.sadb_alg_maxbits = 256
}
},
{
.name = "ripemd160",
.uinfo = {
.auth = {
.icv_truncbits = 96,
.icv_fullbits = 160,
}
},
.desc = {
.sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 160,
.sadb_alg_maxbits = 160
}
},
};
static struct xfrm_algo_desc ealg_list[] = {
{
.name = "cipher_null",
.uinfo = {
.encr = {
.blockbits = 8,
.defkeybits = 0,
}
},
.desc = {
.sadb_alg_id = SADB_EALG_NULL,
.sadb_alg_ivlen = 0,
.sadb_alg_minbits = 0,
.sadb_alg_maxbits = 0
}
},
{
.name = "des",
.uinfo = {
.encr = {
.blockbits = 64,
.defkeybits = 64,
}
},
.desc = {
.sadb_alg_id = SADB_EALG_DESCBC,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 64,
.sadb_alg_maxbits = 64
}
},
{
.name = "des3_ede",
.uinfo = {
.encr = {
.blockbits = 64,
.defkeybits = 192,
}
},
.desc = {
.sadb_alg_id = SADB_EALG_3DESCBC,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 192,
.sadb_alg_maxbits = 192
}
},
{
.name = "cast128",
.uinfo = {
.encr = {
.blockbits = 64,
.defkeybits = 128,
}
},
.desc = {
.sadb_alg_id = SADB_X_EALG_CASTCBC,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 40,
.sadb_alg_maxbits = 128
}
},
{
.name = "blowfish",
.uinfo = {
.encr = {
.blockbits = 64,
.defkeybits = 128,
}
},
.desc = {
.sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 40,
.sadb_alg_maxbits = 448
}
},
{
.name = "aes",
.uinfo = {
.encr = {
.blockbits = 128,
.defkeybits = 128,
}
},
.desc = {
.sadb_alg_id = SADB_X_EALG_AESCBC,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 256
}
},
};
static inline int aalg_entries(void)
{
return sizeof(aalg_list) / sizeof(aalg_list[0]);
}
static inline int ealg_entries(void)
{
return sizeof(ealg_list) / sizeof(ealg_list[0]);
}
struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
{
int i;
for (i = 0; i < aalg_entries(); i++) {
if (aalg_list[i].desc.sadb_alg_id == alg_id) {
if (aalg_list[i].available)
return &aalg_list[i];
else
break;
}
}
return NULL;
}
struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
{
int i;
for (i = 0; i < ealg_entries(); i++) {
if (ealg_list[i].desc.sadb_alg_id == alg_id) {
if (ealg_list[i].available)
return &ealg_list[i];
else
break;
}
}
return NULL;
}
struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name)
{
int i;
if (!name)
return NULL;
for (i=0; i < aalg_entries(); i++) {
if (strcmp(name, aalg_list[i].name) == 0) {
if (aalg_list[i].available)
return &aalg_list[i];
else
break;
}
}
return NULL;
}
struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name)
{
int i;
if (!name)
return NULL;
for (i=0; i < ealg_entries(); i++) {
if (strcmp(name, ealg_list[i].name) == 0) {
if (ealg_list[i].available)
return &ealg_list[i];
else
break;
}
}
return NULL;
}
struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
{
if (idx >= aalg_entries())
return NULL;
return &aalg_list[idx];
}
struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
{
if (idx >= ealg_entries())
return NULL;
return &ealg_list[idx];
}
/*
* Probe for the availability of crypto algorithms, and set the available
* flag for any algorithms found on the system. This is typically called by
* pfkey during userspace SA add, update or register.
*/
void xfrm_probe_algs(void)
{
int i, status;
BUG_ON(in_softirq());
for (i = 0; i < aalg_entries(); i++) {
status = crypto_alg_available(aalg_list[i].name, 0);
if (aalg_list[i].available != status)
aalg_list[i].available = status;
}
for (i = 0; i < ealg_entries(); i++) {
status = crypto_alg_available(ealg_list[i].name, 0);
if (ealg_list[i].available != status)
ealg_list[i].available = status;
}
}
int xfrm_count_auth_supported(void)
{
int i, n;
for (i = 0, n = 0; i < aalg_entries(); i++)
if (aalg_list[i].available)
n++;
return n;
}
int xfrm_count_enc_supported(void)
{
int i, n;
for (i = 0, n = 0; i < ealg_entries(); i++)
if (ealg_list[i].available)
n++;
return n;
}
......@@ -917,7 +917,7 @@ xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int idx)
return -1;
}
static inline void
static void
_decode_session(struct sk_buff *skb, struct flowi *fl)
{
struct iphdr *iph = skb->nh.iph;
......
This diff is collapsed.
......@@ -323,6 +323,15 @@ EXPORT_SYMBOL(xfrm_policy_flush);
EXPORT_SYMBOL(xfrm_policy_byid);
EXPORT_SYMBOL(xfrm_policy_list);
EXPORT_SYMBOL_GPL(xfrm_probe_algs);
EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
#if defined (CONFIG_IPV6_MODULE) || defined (CONFIG_IP_SCTP_MODULE)
/* inet functions common to v4 and v6 */
......@@ -388,7 +397,6 @@ EXPORT_SYMBOL(tcp_v4_send_check);
EXPORT_SYMBOL(tcp_v4_conn_request);
EXPORT_SYMBOL(tcp_create_openreq_child);
EXPORT_SYMBOL(tcp_bucket_create);
EXPORT_SYMBOL(__tcp_put_port);
EXPORT_SYMBOL(tcp_put_port);
EXPORT_SYMBOL(tcp_inherit_port);
EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
......
......@@ -153,7 +153,7 @@ static inline int unix_may_send(unix_socket *sk, unix_socket *osk)
return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
}
static inline unix_socket * unix_peer_get(unix_socket *s)
static unix_socket *unix_peer_get(unix_socket *s)
{
unix_socket *peer;
......
......@@ -92,7 +92,7 @@ static unix_socket *gc_current=GC_HEAD; /* stack of objects to mark */
atomic_t unix_tot_inflight = ATOMIC_INIT(0);
extern inline unix_socket *unix_get_socket(struct file *filp)
static unix_socket *unix_get_socket(struct file *filp)
{
unix_socket * u_sock = NULL;
struct inode *inode = filp->f_dentry->d_inode;
......@@ -141,19 +141,19 @@ void unix_notinflight(struct file *fp)
* Garbage Collector Support Functions
*/
extern inline unix_socket *pop_stack(void)
static inline unix_socket *pop_stack(void)
{
unix_socket *p=gc_current;
gc_current = unix_sk(p)->gc_tree;
return p;
}
extern inline int empty_stack(void)
static inline int empty_stack(void)
{
return gc_current == GC_HEAD;
}
extern inline void maybe_unmark_and_push(unix_socket *x)
static void maybe_unmark_and_push(unix_socket *x)
{
struct unix_sock *u = unix_sk(x);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment