net/core/skbuff.c

include/linux/skbuff.h
  - remove spurious spaces and tabs at end of lines
  - make sure if, while, for, switch has a space before the opening '('
  - make sure no line has more than 80 chars
  - move initializations to the declaration line where possible
  - bitwise, logical and arithmetic operators have spaces before and after,
    improving readability of complex expressions
  - remove uneeded () in returns
  - use kdoc comments
  - other minor cleanups

Sizes:
Before:
   text    data     bss     dec     hex filename
   7088       8    2080    9176    23d8 net/core/skbuff.o
After:
   text    data     bss     dec     hex filename
   7056       4    2080    9140    23b4 net/core/skbuff.o
parent d0f0cde1
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
* as published by the Free Software Foundation; either version * as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#ifndef _LINUX_SKBUFF_H #ifndef _LINUX_SKBUFF_H
#define _LINUX_SKBUFF_H #define _LINUX_SKBUFF_H
...@@ -35,10 +35,13 @@ ...@@ -35,10 +35,13 @@
#define CHECKSUM_HW 1 #define CHECKSUM_HW 1
#define CHECKSUM_UNNECESSARY 2 #define CHECKSUM_UNNECESSARY 2
#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1)) #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
#define SKB_MAX_ORDER(X,ORDER) (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1)) ~(SMP_CACHE_BYTES - 1))
#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X),0)) #define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \
#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0,2)) sizeof(struct skb_shared_info)) & \
~(SMP_CACHE_BYTES - 1))
#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
/* A. Checksumming of received packets by device. /* A. Checksumming of received packets by device.
* *
...@@ -79,7 +82,7 @@ ...@@ -79,7 +82,7 @@
*/ */
#ifdef __i386__ #ifdef __i386__
#define NET_CALLER(arg) (*(((void**)&arg)-1)) #define NET_CALLER(arg) (*(((void **)&arg) - 1))
#else #else
#define NET_CALLER(arg) __builtin_return_address(0) #define NET_CALLER(arg) __builtin_return_address(0)
#endif #endif
...@@ -97,8 +100,8 @@ struct nf_ct_info { ...@@ -97,8 +100,8 @@ struct nf_ct_info {
struct sk_buff_head { struct sk_buff_head {
/* These two members must be first. */ /* These two members must be first. */
struct sk_buff * next; struct sk_buff *next;
struct sk_buff * prev; struct sk_buff *prev;
__u32 qlen; __u32 qlen;
spinlock_t lock; spinlock_t lock;
...@@ -110,8 +113,7 @@ struct sk_buff; ...@@ -110,8 +113,7 @@ struct sk_buff;
typedef struct skb_frag_struct skb_frag_t; typedef struct skb_frag_struct skb_frag_t;
struct skb_frag_struct struct skb_frag_struct {
{
struct page *page; struct page *page;
__u16 page_offset; __u16 page_offset;
__u16 size; __u16 size;
...@@ -127,19 +129,54 @@ struct skb_shared_info { ...@@ -127,19 +129,54 @@ struct skb_shared_info {
skb_frag_t frags[MAX_SKB_FRAGS]; skb_frag_t frags[MAX_SKB_FRAGS];
}; };
/**
* struct sk_buff - socket buffer
* @next: Next buffer in list
* @prev: Previous buffer in list
* @list: List we are on
* @sk: Socket we are owned by
* @stamp: Time we arrived
* @dev: Device we arrived on/are leaving by
* @h: Transport layer header
* @nh: Network layer header
* @mac: Link layer header
* @dst: FIXME: Describe this field
* @cb: Control buffer. Free for use by every layer. Put private vars here
* @len: Length of actual data
* @data_len: Data length
* @csum: Checksum
* @__unused: Dead field, may be reused
* @cloned: Head may be cloned (check refcnt to be sure)
* @pkt_type: Packet class
* @ip_summed: Driver fed us an IP checksum
* @priority: Packet queueing priority
* @users: User count - see {datagram,tcp}.c
* @protocol: Packet protocol from driver
* @security: Security level of packet
* @truesize: Buffer size
* @head: Head of buffer
* @data: Data head pointer
* @tail: Tail pointer
* @end: End pointer
* @destructor: Destruct function
* @nfmark: Can be used for communication between hooks
* @nfcache: Cache info
* @nfct: Associated connection, if any
* @nf_debug: Netfilter debugging
* @tc_index: Traffic control index
*/
struct sk_buff { struct sk_buff {
/* These two members must be first. */ /* These two members must be first. */
struct sk_buff * next; /* Next buffer in list */ struct sk_buff *next;
struct sk_buff * prev; /* Previous buffer in list */ struct sk_buff *prev;
struct sk_buff_head * list; /* List we are on */ struct sk_buff_head *list;
struct sock *sk; /* Socket we are owned by */ struct sock *sk;
struct timeval stamp; /* Time we arrived */ struct timeval stamp;
struct net_device *dev; /* Device we arrived on/are leaving by */ struct net_device *dev;
/* Transport layer header */ union {
union
{
struct tcphdr *th; struct tcphdr *th;
struct udphdr *uh; struct udphdr *uh;
struct icmphdr *icmph; struct icmphdr *icmph;
...@@ -149,72 +186,63 @@ struct sk_buff { ...@@ -149,72 +186,63 @@ struct sk_buff {
unsigned char *raw; unsigned char *raw;
} h; } h;
/* Network layer header */ union {
union
{
struct iphdr *iph; struct iphdr *iph;
struct ipv6hdr *ipv6h; struct ipv6hdr *ipv6h;
struct arphdr *arph; struct arphdr *arph;
struct ipxhdr *ipxh; struct ipxhdr *ipxh;
unsigned char *raw; unsigned char *raw;
} nh; } nh;
/* Link layer header */ union {
union
{
struct ethhdr *ethernet; struct ethhdr *ethernet;
unsigned char *raw; unsigned char *raw;
} mac; } mac;
struct dst_entry *dst; struct dst_entry *dst;
/* /*
* This is the control buffer. It is free to use for every * This is the control buffer. It is free to use for every
* layer. Please put your private variables there. If you * layer. Please put your private variables there. If you
* want to keep them across layers you have to do a skb_clone() * want to keep them across layers you have to do a skb_clone()
* first. This is owned by whoever has the skb queued ATM. * first. This is owned by whoever has the skb queued ATM.
*/ */
char cb[48]; char cb[48];
unsigned int len; /* Length of actual data */ unsigned int len,
unsigned int data_len; data_len,
unsigned int csum; /* Checksum */ csum;
unsigned char __unused, /* Dead field, may be reused */ unsigned char __unused,
cloned, /* head may be cloned (check refcnt to be sure). */ cloned,
pkt_type, /* Packet class */ pkt_type,
ip_summed; /* Driver fed us an IP checksum */ ip_summed;
__u32 priority; /* Packet queueing priority */ __u32 priority;
atomic_t users; /* User count - see datagram.c,tcp.c */ atomic_t users;
unsigned short protocol; /* Packet protocol from driver. */ unsigned short protocol,
unsigned short security; /* Security level of packet */ security;
unsigned int truesize; /* Buffer size */ unsigned int truesize;
unsigned char *head; /* Head of buffer */ unsigned char *head,
unsigned char *data; /* Data head pointer */ *data,
unsigned char *tail; /* Tail pointer */ *tail,
unsigned char *end; /* End pointer */ *end;
void (*destructor)(struct sk_buff *); /* Destruct function */ void (*destructor)(struct sk_buff *skb);
#ifdef CONFIG_NETFILTER #ifdef CONFIG_NETFILTER
/* Can be used for communication between hooks. */ unsigned long nfmark;
unsigned long nfmark; __u32 nfcache;
/* Cache info */ struct nf_ct_info *nfct;
__u32 nfcache;
/* Associated connection, if any */
struct nf_ct_info *nfct;
#ifdef CONFIG_NETFILTER_DEBUG #ifdef CONFIG_NETFILTER_DEBUG
unsigned int nf_debug; unsigned int nf_debug;
#endif #endif
#endif /*CONFIG_NETFILTER*/ #endif /* CONFIG_NETFILTER */
#if defined(CONFIG_HIPPI) #if defined(CONFIG_HIPPI)
union{ union {
__u32 ifield; __u32 ifield;
} private; } private;
#endif #endif
#ifdef CONFIG_NET_SCHED #ifdef CONFIG_NET_SCHED
__u32 tc_index; /* traffic control index */ __u32 tc_index; /* traffic control index */
#endif #endif
}; };
...@@ -229,21 +257,24 @@ struct sk_buff { ...@@ -229,21 +257,24 @@ struct sk_buff {
#include <asm/system.h> #include <asm/system.h>
extern void __kfree_skb(struct sk_buff *skb); extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff * alloc_skb(unsigned int size, int priority); extern struct sk_buff *alloc_skb(unsigned int size, int priority);
extern void kfree_skbmem(struct sk_buff *skb); extern void kfree_skbmem(struct sk_buff *skb);
extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority); extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority);
extern struct sk_buff * skb_copy(const struct sk_buff *skb, int priority); extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority);
extern struct sk_buff * pskb_copy(struct sk_buff *skb, int gfp_mask); extern struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask);
extern int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask); extern int pskb_expand_head(struct sk_buff *skb,
extern struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom); int nhead, int ntail, int gfp_mask);
extern struct sk_buff * skb_copy_expand(const struct sk_buff *skb, extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
int newheadroom, unsigned int headroom);
int newtailroom, extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
int priority); int newheadroom, int newtailroom,
int priority);
#define dev_kfree_skb(a) kfree_skb(a) #define dev_kfree_skb(a) kfree_skb(a)
extern void skb_over_panic(struct sk_buff *skb, int len, void *here); extern void skb_over_panic(struct sk_buff *skb, int len,
extern void skb_under_panic(struct sk_buff *skb, int len, void *here); void *here);
extern void skb_under_panic(struct sk_buff *skb, int len,
void *here);
/* Internal */ /* Internal */
#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end)) #define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
...@@ -254,10 +285,9 @@ extern void skb_under_panic(struct sk_buff *skb, int len, void *here); ...@@ -254,10 +285,9 @@ extern void skb_under_panic(struct sk_buff *skb, int len, void *here);
* *
* Returns true if the queue is empty, false otherwise. * Returns true if the queue is empty, false otherwise.
*/ */
static inline int skb_queue_empty(struct sk_buff_head *list) static inline int skb_queue_empty(struct sk_buff_head *list)
{ {
return (list->next == (struct sk_buff *) list); return list->next == (struct sk_buff *)list;
} }
/** /**
...@@ -267,7 +297,6 @@ static inline int skb_queue_empty(struct sk_buff_head *list) ...@@ -267,7 +297,6 @@ static inline int skb_queue_empty(struct sk_buff_head *list)
* Makes another reference to a socket buffer and returns a pointer * Makes another reference to a socket buffer and returns a pointer
* to the buffer. * to the buffer.
*/ */
static inline struct sk_buff *skb_get(struct sk_buff *skb) static inline struct sk_buff *skb_get(struct sk_buff *skb)
{ {
atomic_inc(&skb->users); atomic_inc(&skb->users);
...@@ -275,10 +304,10 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb) ...@@ -275,10 +304,10 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
} }
/* /*
* If users==1, we are the only owner and are can avoid redundant * If users == 1, we are the only owner and are can avoid redundant
* atomic change. * atomic change.
*/ */
/** /**
* kfree_skb - free an sk_buff * kfree_skb - free an sk_buff
* @skb: buffer to free * @skb: buffer to free
...@@ -286,7 +315,6 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb) ...@@ -286,7 +315,6 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
* Drop a reference to the buffer and free it if the usage count has * Drop a reference to the buffer and free it if the usage count has
* hit zero. * hit zero.
*/ */
static inline void kfree_skb(struct sk_buff *skb) static inline void kfree_skb(struct sk_buff *skb)
{ {
if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users)) if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
...@@ -297,7 +325,7 @@ static inline void kfree_skb(struct sk_buff *skb) ...@@ -297,7 +325,7 @@ static inline void kfree_skb(struct sk_buff *skb)
static inline void kfree_skb_fast(struct sk_buff *skb) static inline void kfree_skb_fast(struct sk_buff *skb)
{ {
if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users)) if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
kfree_skbmem(skb); kfree_skbmem(skb);
} }
/** /**
...@@ -308,7 +336,6 @@ static inline void kfree_skb_fast(struct sk_buff *skb) ...@@ -308,7 +336,6 @@ static inline void kfree_skb_fast(struct sk_buff *skb)
* one of multiple shared copies of the buffer. Cloned buffers are * one of multiple shared copies of the buffer. Cloned buffers are
* shared data so must not be written to under normal circumstances. * shared data so must not be written to under normal circumstances.
*/ */
static inline int skb_cloned(struct sk_buff *skb) static inline int skb_cloned(struct sk_buff *skb)
{ {
return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1; return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
...@@ -321,17 +348,16 @@ static inline int skb_cloned(struct sk_buff *skb) ...@@ -321,17 +348,16 @@ static inline int skb_cloned(struct sk_buff *skb)
* Returns true if more than one person has a reference to this * Returns true if more than one person has a reference to this
* buffer. * buffer.
*/ */
static inline int skb_shared(struct sk_buff *skb) static inline int skb_shared(struct sk_buff *skb)
{ {
return (atomic_read(&skb->users) != 1); return atomic_read(&skb->users) != 1;
} }
/** /**
* skb_share_check - check if buffer is shared and if so clone it * skb_share_check - check if buffer is shared and if so clone it
* @skb: buffer to check * @skb: buffer to check
* @pri: priority for memory allocation * @pri: priority for memory allocation
* *
* If the buffer is shared the buffer is cloned and the old copy * If the buffer is shared the buffer is cloned and the old copy
* drops a reference. A new clone with a single reference is returned. * drops a reference. A new clone with a single reference is returned.
* If the buffer is not shared the original buffer is returned. When * If the buffer is not shared the original buffer is returned. When
...@@ -340,26 +366,23 @@ static inline int skb_shared(struct sk_buff *skb) ...@@ -340,26 +366,23 @@ static inline int skb_shared(struct sk_buff *skb)
* *
* NULL is returned on a memory allocation failure. * NULL is returned on a memory allocation failure.
*/ */
static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri) static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
{ {
if (skb_shared(skb)) { if (skb_shared(skb)) {
struct sk_buff *nskb; struct sk_buff *nskb = skb_clone(skb, pri);
nskb = skb_clone(skb, pri);
kfree_skb(skb); kfree_skb(skb);
return nskb; skb = nskb;
} }
return skb; return skb;
} }
/* /*
* Copy shared buffers into a new sk_buff. We effectively do COW on * Copy shared buffers into a new sk_buff. We effectively do COW on
* packets to handle cases where we have a local reader and forward * packets to handle cases where we have a local reader and forward
* and a couple of other messy ones. The normal one is tcpdumping * and a couple of other messy ones. The normal one is tcpdumping
* a packet thats being forwarded. * a packet thats being forwarded.
*/ */
/** /**
* skb_unshare - make a copy of a shared buffer * skb_unshare - make a copy of a shared buffer
* @skb: buffer to check * @skb: buffer to check
...@@ -373,15 +396,14 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri) ...@@ -373,15 +396,14 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
* *
* %NULL is returned on a memory allocation failure. * %NULL is returned on a memory allocation failure.
*/ */
static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri) static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
{ {
struct sk_buff *nskb; if (skb_cloned(skb)) {
if(!skb_cloned(skb)) struct sk_buff *nskb = skb_copy(skb, pri);
return skb; kfree_skb(skb); /* Free our shared copy */
nskb=skb_copy(skb, pri); skb = nskb;
kfree_skb(skb); /* Free our shared copy */ }
return nskb; return skb;
} }
/** /**
...@@ -397,7 +419,6 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri) ...@@ -397,7 +419,6 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
* The reference count is not incremented and the reference is therefore * The reference count is not incremented and the reference is therefore
* volatile. Use with caution. * volatile. Use with caution.
*/ */
static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
{ {
struct sk_buff *list = ((struct sk_buff *)list_)->next; struct sk_buff *list = ((struct sk_buff *)list_)->next;
...@@ -419,7 +440,6 @@ static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) ...@@ -419,7 +440,6 @@ static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
* The reference count is not incremented and the reference is therefore * The reference count is not incremented and the reference is therefore
* volatile. Use with caution. * volatile. Use with caution.
*/ */
static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
{ {
struct sk_buff *list = ((struct sk_buff *)list_)->prev; struct sk_buff *list = ((struct sk_buff *)list_)->prev;
...@@ -432,19 +452,17 @@ static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) ...@@ -432,19 +452,17 @@ static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
* skb_queue_len - get queue length * skb_queue_len - get queue length
* @list_: list to measure * @list_: list to measure
* *
* Return the length of an &sk_buff queue. * Return the length of an &sk_buff queue.
*/ */
static inline __u32 skb_queue_len(struct sk_buff_head *list_) static inline __u32 skb_queue_len(struct sk_buff_head *list_)
{ {
return(list_->qlen); return list_->qlen;
} }
static inline void skb_queue_head_init(struct sk_buff_head *list) static inline void skb_queue_head_init(struct sk_buff_head *list)
{ {
spin_lock_init(&list->lock); spin_lock_init(&list->lock);
list->prev = (struct sk_buff *)list; list->prev = list->next = (struct sk_buff *)list;
list->next = (struct sk_buff *)list;
list->qlen = 0; list->qlen = 0;
} }
...@@ -464,9 +482,9 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) ...@@ -464,9 +482,9 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
* and you must therefore hold required locks before calling it. * and you must therefore hold required locks before calling it.
* *
* A buffer cannot be placed on two lists at the same time. * A buffer cannot be placed on two lists at the same time.
*/ */
static inline void __skb_queue_head(struct sk_buff_head *list,
static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) struct sk_buff *newsk)
{ {
struct sk_buff *prev, *next; struct sk_buff *prev, *next;
...@@ -476,8 +494,7 @@ static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *n ...@@ -476,8 +494,7 @@ static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *n
next = prev->next; next = prev->next;
newsk->next = next; newsk->next = next;
newsk->prev = prev; newsk->prev = prev;
next->prev = newsk; next->prev = prev->next = newsk;
prev->next = newsk;
} }
...@@ -491,9 +508,9 @@ static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *n ...@@ -491,9 +508,9 @@ static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *n
* safely. * safely.
* *
* A buffer cannot be placed on two lists at the same time. * A buffer cannot be placed on two lists at the same time.
*/ */
static inline void skb_queue_head(struct sk_buff_head *list,
static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) struct sk_buff *newsk)
{ {
unsigned long flags; unsigned long flags;
...@@ -511,10 +528,9 @@ static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *new ...@@ -511,10 +528,9 @@ static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *new
* and you must therefore hold required locks before calling it. * and you must therefore hold required locks before calling it.
* *
* A buffer cannot be placed on two lists at the same time. * A buffer cannot be placed on two lists at the same time.
*/ */
static inline void __skb_queue_tail(struct sk_buff_head *list,
struct sk_buff *newsk)
static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
{ {
struct sk_buff *prev, *next; struct sk_buff *prev, *next;
...@@ -524,8 +540,7 @@ static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *n ...@@ -524,8 +540,7 @@ static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *n
prev = next->prev; prev = next->prev;
newsk->next = next; newsk->next = next;
newsk->prev = prev; newsk->prev = prev;
next->prev = newsk; next->prev = prev->next = newsk;
prev->next = newsk;
} }
/** /**
...@@ -538,9 +553,9 @@ static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *n ...@@ -538,9 +553,9 @@ static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *n
* safely. * safely.
* *
* A buffer cannot be placed on two lists at the same time. * A buffer cannot be placed on two lists at the same time.
*/ */
static inline void skb_queue_tail(struct sk_buff_head *list,
static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) struct sk_buff *newsk)
{ {
unsigned long flags; unsigned long flags;
...@@ -557,7 +572,6 @@ static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *new ...@@ -557,7 +572,6 @@ static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *new
* so must be used with appropriate locks held only. The head item is * so must be used with appropriate locks held only. The head item is
* returned or %NULL if the list is empty. * returned or %NULL if the list is empty.
*/ */
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{ {
struct sk_buff *next, *prev, *result; struct sk_buff *next, *prev, *result;
...@@ -566,13 +580,12 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) ...@@ -566,13 +580,12 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
next = prev->next; next = prev->next;
result = NULL; result = NULL;
if (next != prev) { if (next != prev) {
result = next; result = next;
next = next->next; next = next->next;
list->qlen--; list->qlen--;
next->prev = prev; next->prev = prev;
prev->next = next; prev->next = next;
result->next = NULL; result->next = result->prev = NULL;
result->prev = NULL;
result->list = NULL; result->list = NULL;
} }
return result; return result;
...@@ -603,13 +616,12 @@ static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list) ...@@ -603,13 +616,12 @@ static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
*/ */
static inline void __skb_insert(struct sk_buff *newsk, static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff * prev, struct sk_buff *next, struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head * list) struct sk_buff_head *list)
{ {
newsk->next = next; newsk->next = next;
newsk->prev = prev; newsk->prev = prev;
next->prev = newsk; next->prev = prev->next = newsk;
prev->next = newsk;
newsk->list = list; newsk->list = list;
list->qlen++; list->qlen++;
} }
...@@ -666,17 +678,15 @@ static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk) ...@@ -666,17 +678,15 @@ static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
* remove sk_buff from list. _Must_ be called atomically, and with * remove sk_buff from list. _Must_ be called atomically, and with
* the list known.. * the list known..
*/ */
static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{ {
struct sk_buff * next, * prev; struct sk_buff *next, *prev;
list->qlen--; list->qlen--;
next = skb->next; next = skb->next;
prev = skb->prev; prev = skb->prev;
skb->next = NULL; skb->next = skb->prev = NULL;
skb->prev = NULL; skb->list = NULL;
skb->list = NULL;
next->prev = prev; next->prev = prev;
prev->next = next; prev->next = next;
} }
...@@ -687,22 +697,21 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) ...@@ -687,22 +697,21 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
* *
* Place a packet after a given packet in a list. The list locks are taken * Place a packet after a given packet in a list. The list locks are taken
* and this function is atomic with respect to other list locked calls * and this function is atomic with respect to other list locked calls
* *
* Works even without knowing the list it is sitting on, which can be * Works even without knowing the list it is sitting on, which can be
* handy at times. It also means that THE LIST MUST EXIST when you * handy at times. It also means that THE LIST MUST EXIST when you
* unlink. Thus a list must have its contents unlinked before it is * unlink. Thus a list must have its contents unlinked before it is
* destroyed. * destroyed.
*/ */
static inline void skb_unlink(struct sk_buff *skb) static inline void skb_unlink(struct sk_buff *skb)
{ {
struct sk_buff_head *list = skb->list; struct sk_buff_head *list = skb->list;
if(list) { if (list) {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&list->lock, flags); spin_lock_irqsave(&list->lock, flags);
if(skb->list == list) if (skb->list == list)
__skb_unlink(skb, skb->list); __skb_unlink(skb, skb->list);
spin_unlock_irqrestore(&list->lock, flags); spin_unlock_irqrestore(&list->lock, flags);
} }
...@@ -718,10 +727,9 @@ static inline void skb_unlink(struct sk_buff *skb) ...@@ -718,10 +727,9 @@ static inline void skb_unlink(struct sk_buff *skb)
* so must be used with appropriate locks held only. The tail item is * so must be used with appropriate locks held only. The tail item is
* returned or %NULL if the list is empty. * returned or %NULL if the list is empty.
*/ */
static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{ {
struct sk_buff *skb = skb_peek_tail(list); struct sk_buff *skb = skb_peek_tail(list);
if (skb) if (skb)
__skb_unlink(skb, list); __skb_unlink(skb, list);
return skb; return skb;
...@@ -735,7 +743,6 @@ static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) ...@@ -735,7 +743,6 @@ static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
* may be used safely with other locking list functions. The tail item is * may be used safely with other locking list functions. The tail item is
* returned or %NULL if the list is empty. * returned or %NULL if the list is empty.
*/ */
static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
{ {
unsigned long flags; unsigned long flags;
...@@ -757,83 +764,81 @@ static inline int skb_headlen(const struct sk_buff *skb) ...@@ -757,83 +764,81 @@ static inline int skb_headlen(const struct sk_buff *skb)
return skb->len - skb->data_len; return skb->len - skb->data_len;
} }
#define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) BUG(); } while (0) #define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) \
#define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) BUG(); } while (0) BUG(); } while (0)
#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) BUG(); } while (0) #define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) \
BUG(); } while (0)
#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) \
BUG(); } while (0)
/* /*
* Add data to an sk_buff * Add data to an sk_buff
*/ */
static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
{ {
unsigned char *tmp=skb->tail; unsigned char *tmp = skb->tail;
SKB_LINEAR_ASSERT(skb); SKB_LINEAR_ASSERT(skb);
skb->tail+=len; skb->tail += len;
skb->len+=len; skb->len += len;
return tmp; return tmp;
} }
/** /**
* skb_put - add data to a buffer * skb_put - add data to a buffer
* @skb: buffer to use * @skb: buffer to use
* @len: amount of data to add * @len: amount of data to add
* *
* This function extends the used data area of the buffer. If this would * This function extends the used data area of the buffer. If this would
* exceed the total buffer size the kernel will panic. A pointer to the * exceed the total buffer size the kernel will panic. A pointer to the
* first byte of the extra data is returned. * first byte of the extra data is returned.
*/ */
static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len) static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
{ {
unsigned char *tmp=skb->tail; unsigned char *tmp = skb->tail;
SKB_LINEAR_ASSERT(skb); SKB_LINEAR_ASSERT(skb);
skb->tail+=len; skb->tail += len;
skb->len+=len; skb->len += len;
if(skb->tail>skb->end) { if (skb->tail>skb->end)
skb_over_panic(skb, len, current_text_addr()); skb_over_panic(skb, len, current_text_addr());
}
return tmp; return tmp;
} }
static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
{ {
skb->data-=len; skb->data -= len;
skb->len+=len; skb->len += len;
return skb->data; return skb->data;
} }
/** /**
* skb_push - add data to the start of a buffer * skb_push - add data to the start of a buffer
* @skb: buffer to use * @skb: buffer to use
* @len: amount of data to add * @len: amount of data to add
* *
* This function extends the used data area of the buffer at the buffer * This function extends the used data area of the buffer at the buffer
* start. If this would exceed the total buffer headroom the kernel will * start. If this would exceed the total buffer headroom the kernel will
* panic. A pointer to the first byte of the extra data is returned. * panic. A pointer to the first byte of the extra data is returned.
*/ */
static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len) static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
{ {
skb->data-=len; skb->data -= len;
skb->len+=len; skb->len += len;
if(skb->data<skb->head) { if (skb->data<skb->head)
skb_under_panic(skb, len, current_text_addr()); skb_under_panic(skb, len, current_text_addr());
}
return skb->data; return skb->data;
} }
static inline char *__skb_pull(struct sk_buff *skb, unsigned int len) static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
{ {
skb->len-=len; skb->len -= len;
if (skb->len < skb->data_len) if (skb->len < skb->data_len)
BUG(); BUG();
return skb->data+=len; return skb->data += len;
} }
/** /**
* skb_pull - remove data from the start of a buffer * skb_pull - remove data from the start of a buffer
* @skb: buffer to use * @skb: buffer to use
* @len: amount of data to remove * @len: amount of data to remove
* *
* This function removes data from the start of a buffer, returning * This function removes data from the start of a buffer, returning
...@@ -841,30 +846,25 @@ static inline char *__skb_pull(struct sk_buff *skb, unsigned int len) ...@@ -841,30 +846,25 @@ static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
* is returned. Once the data has been pulled future pushes will overwrite * is returned. Once the data has been pulled future pushes will overwrite
* the old data. * the old data.
*/ */
static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len) {
{ return (len > skb->len) ? NULL : __skb_pull(skb, len);
if (len > skb->len)
return NULL;
return __skb_pull(skb,len);
} }
extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta); extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len) static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
{ {
if (len > skb_headlen(skb) && if (len > skb_headlen(skb) &&
__pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL) !__pskb_pull_tail(skb, len-skb_headlen(skb)))
return NULL; return NULL;
skb->len -= len; skb->len -= len;
return skb->data += len; return skb->data += len;
} }
static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len) static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
{ {
if (len > skb->len) return (len > skb->len) ? NULL : __pskb_pull(skb, len);
return NULL;
return __pskb_pull(skb,len);
} }
static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
...@@ -873,7 +873,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) ...@@ -873,7 +873,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
return 1; return 1;
if (len > skb->len) if (len > skb->len)
return 0; return 0;
return (__pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL); return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
} }
/** /**
...@@ -882,10 +882,9 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) ...@@ -882,10 +882,9 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
* *
* Return the number of bytes of free space at the head of an &sk_buff. * Return the number of bytes of free space at the head of an &sk_buff.
*/ */
static inline int skb_headroom(const struct sk_buff *skb) static inline int skb_headroom(const struct sk_buff *skb)
{ {
return skb->data-skb->head; return skb->data - skb->head;
} }
/** /**
...@@ -894,10 +893,9 @@ static inline int skb_headroom(const struct sk_buff *skb) ...@@ -894,10 +893,9 @@ static inline int skb_headroom(const struct sk_buff *skb)
* *
* Return the number of bytes of free space at the tail of an sk_buff * Return the number of bytes of free space at the tail of an sk_buff
*/ */
static inline int skb_tailroom(const struct sk_buff *skb) static inline int skb_tailroom(const struct sk_buff *skb)
{ {
return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail; return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
} }
/** /**
...@@ -908,11 +906,10 @@ static inline int skb_tailroom(const struct sk_buff *skb) ...@@ -908,11 +906,10 @@ static inline int skb_tailroom(const struct sk_buff *skb)
* Increase the headroom of an empty &sk_buff by reducing the tail * Increase the headroom of an empty &sk_buff by reducing the tail
* room. This is only allowed for an empty buffer. * room. This is only allowed for an empty buffer.
*/ */
static inline void skb_reserve(struct sk_buff *skb, unsigned int len) static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
{ {
skb->data+=len; skb->data += len;
skb->tail+=len; skb->tail += len;
} }
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc); extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
...@@ -920,11 +917,10 @@ extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc); ...@@ -920,11 +917,10 @@ extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
static inline void __skb_trim(struct sk_buff *skb, unsigned int len) static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
{ {
if (!skb->data_len) { if (!skb->data_len) {
skb->len = len; skb->len = len;
skb->tail = skb->data+len; skb->tail = skb->data + len;
} else { } else
___pskb_trim(skb, len, 0); ___pskb_trim(skb, len, 0);
}
} }
/** /**
...@@ -935,31 +931,26 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len) ...@@ -935,31 +931,26 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
* Cut the length of a buffer down by removing data from the tail. If * Cut the length of a buffer down by removing data from the tail. If
* the buffer is already under the length specified it is not modified. * the buffer is already under the length specified it is not modified.
*/ */
static inline void skb_trim(struct sk_buff *skb, unsigned int len) static inline void skb_trim(struct sk_buff *skb, unsigned int len)
{ {
if (skb->len > len) { if (skb->len > len)
__skb_trim(skb, len); __skb_trim(skb, len);
}
} }
static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
{ {
if (!skb->data_len) { if (!skb->data_len) {
skb->len = len; skb->len = len;
skb->tail = skb->data+len; skb->tail = skb->data+len;
return 0; return 0;
} else {
return ___pskb_trim(skb, len, 1);
} }
return ___pskb_trim(skb, len, 1);
} }
static inline int pskb_trim(struct sk_buff *skb, unsigned int len) static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
{ {
if (len < skb->len) return (len < skb->len) ? __pskb_trim(skb, len) : 0;
return __pskb_trim(skb, len);
return 0;
} }
/** /**
...@@ -970,47 +961,41 @@ static inline int pskb_trim(struct sk_buff *skb, unsigned int len) ...@@ -970,47 +961,41 @@ static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
* destructor function and make the @skb unowned. The buffer continues * destructor function and make the @skb unowned. The buffer continues
* to exist but is no longer charged to its former owner. * to exist but is no longer charged to its former owner.
*/ */
static inline void skb_orphan(struct sk_buff *skb) static inline void skb_orphan(struct sk_buff *skb)
{ {
if (skb->destructor) if (skb->destructor)
skb->destructor(skb); skb->destructor(skb);
skb->destructor = NULL; skb->destructor = NULL;
skb->sk = NULL; skb->sk = NULL;
} }
/** /**
* skb_purge - empty a list * skb_queue_purge - empty a list
* @list: list to empty * @list: list to empty
* *
* Delete all buffers on an &sk_buff list. Each buffer is removed from * Delete all buffers on an &sk_buff list. Each buffer is removed from
* the list and one reference dropped. This function takes the list * the list and one reference dropped. This function takes the list
* lock and is atomic with respect to other list locking functions. * lock and is atomic with respect to other list locking functions.
*/ */
static inline void skb_queue_purge(struct sk_buff_head *list) static inline void skb_queue_purge(struct sk_buff_head *list)
{ {
struct sk_buff *skb; struct sk_buff *skb;
while ((skb=skb_dequeue(list))!=NULL) while ((skb = skb_dequeue(list)) != NULL)
kfree_skb(skb); kfree_skb(skb);
} }
/** /**
* __skb_purge - empty a list * __skb_queue_purge - empty a list
* @list: list to empty * @list: list to empty
* *
* Delete all buffers on an &sk_buff list. Each buffer is removed from * Delete all buffers on an &sk_buff list. Each buffer is removed from
* the list and one reference dropped. This function does not take the * the list and one reference dropped. This function does not take the
* list lock and the caller must hold the relevant locks to use it. * list lock and the caller must hold the relevant locks to use it.
*/ */
static inline void __skb_queue_purge(struct sk_buff_head *list) static inline void __skb_queue_purge(struct sk_buff_head *list)
{ {
struct sk_buff *skb; struct sk_buff *skb;
while ((skb=__skb_dequeue(list))!=NULL) while ((skb = __skb_dequeue(list)) != NULL)
kfree_skb(skb); kfree_skb(skb);
} }
...@@ -1026,15 +1011,12 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) ...@@ -1026,15 +1011,12 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
* *
* %NULL is returned in there is no free memory. * %NULL is returned in there is no free memory.
*/ */
static inline struct sk_buff *__dev_alloc_skb(unsigned int length, static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
int gfp_mask) int gfp_mask)
{ {
struct sk_buff *skb; struct sk_buff *skb = alloc_skb(length + 16, gfp_mask);
skb = alloc_skb(length+16, gfp_mask);
if (skb) if (skb)
skb_reserve(skb,16); skb_reserve(skb, 16);
return skb; return skb;
} }
...@@ -1050,7 +1032,6 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, ...@@ -1050,7 +1032,6 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
* %NULL is returned in there is no free memory. Although this function * %NULL is returned in there is no free memory. Although this function
* allocates memory it can be called from an interrupt. * allocates memory it can be called from an interrupt.
*/ */
static inline struct sk_buff *dev_alloc_skb(unsigned int length) static inline struct sk_buff *dev_alloc_skb(unsigned int length)
{ {
return __dev_alloc_skb(length, GFP_ATOMIC); return __dev_alloc_skb(length, GFP_ATOMIC);
...@@ -1068,9 +1049,7 @@ static inline struct sk_buff *dev_alloc_skb(unsigned int length) ...@@ -1068,9 +1049,7 @@ static inline struct sk_buff *dev_alloc_skb(unsigned int length)
* The result is skb with writable area skb->head...skb->tail * The result is skb with writable area skb->head...skb->tail
* and at least @headroom of space at head. * and at least @headroom of space at head.
*/ */
static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
static inline int
skb_cow(struct sk_buff *skb, unsigned int headroom)
{ {
int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb); int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
...@@ -1078,7 +1057,7 @@ skb_cow(struct sk_buff *skb, unsigned int headroom) ...@@ -1078,7 +1057,7 @@ skb_cow(struct sk_buff *skb, unsigned int headroom)
delta = 0; delta = 0;
if (delta || skb_cloned(skb)) if (delta || skb_cloned(skb))
return pskb_expand_head(skb, (delta+15)&~15, 0, GFP_ATOMIC); return pskb_expand_head(skb, (delta + 15) & ~15, 0, GFP_ATOMIC);
return 0; return 0;
} }
...@@ -1088,7 +1067,8 @@ skb_cow(struct sk_buff *skb, unsigned int headroom) ...@@ -1088,7 +1067,8 @@ skb_cow(struct sk_buff *skb, unsigned int headroom)
* @gfp: allocation mode * @gfp: allocation mode
* *
* If there is no free memory -ENOMEM is returned, otherwise zero * If there is no free memory -ENOMEM is returned, otherwise zero
* is returned and the old skb data released. */ * is returned and the old skb data released.
*/
int skb_linearize(struct sk_buff *skb, int gfp); int skb_linearize(struct sk_buff *skb, int gfp);
static inline void *kmap_skb_frag(const skb_frag_t *frag) static inline void *kmap_skb_frag(const skb_frag_t *frag)
...@@ -1113,34 +1093,45 @@ static inline void kunmap_skb_frag(void *vaddr) ...@@ -1113,34 +1093,45 @@ static inline void kunmap_skb_frag(void *vaddr)
#define skb_queue_walk(queue, skb) \ #define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \ for (skb = (queue)->next; \
(skb != (struct sk_buff *)(queue)); \ (skb != (struct sk_buff *)(queue)); \
skb=skb->next) skb = skb->next)
extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err); extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
extern unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); int noblock, int *err);
extern int skb_copy_datagram(const struct sk_buff *from, int offset, char *to,int size); extern unsigned int datagram_poll(struct file *file, struct socket *sock,
extern int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, struct iovec *to,int size); struct poll_table_struct *wait);
extern int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump); extern int skb_copy_datagram(const struct sk_buff *from,
extern int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov); int offset, char *to, int size);
extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb); extern int skb_copy_datagram_iovec(const struct sk_buff *from,
int offset, struct iovec *to,
extern unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum); int size);
extern int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); extern int skb_copy_and_csum_datagram(const struct sk_buff *skb,
extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum); int offset, u8 *to, int len,
extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); unsigned int *csump);
extern int skb_copy_and_csum_datagram_iovec(const
struct sk_buff *skb,
int hlen,
struct iovec *iov);
extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
extern unsigned int skb_checksum(const struct sk_buff *skb, int offset,
int len, unsigned int csum);
extern int skb_copy_bits(const struct sk_buff *skb, int offset,
void *to, int len);
extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb,
int offset, u8 *to, int len,
unsigned int csum);
extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
extern void skb_init(void); extern void skb_init(void);
extern void skb_add_mtu(int mtu); extern void skb_add_mtu(int mtu);
#ifdef CONFIG_NETFILTER #ifdef CONFIG_NETFILTER
static inline void static inline void nf_conntrack_put(struct nf_ct_info *nfct)
nf_conntrack_put(struct nf_ct_info *nfct)
{ {
if (nfct && atomic_dec_and_test(&nfct->master->use)) if (nfct && atomic_dec_and_test(&nfct->master->use))
nfct->master->destroy(nfct->master); nfct->master->destroy(nfct->master);
} }
static inline void static inline void nf_conntrack_get(struct nf_ct_info *nfct)
nf_conntrack_get(struct nf_ct_info *nfct)
{ {
if (nfct) if (nfct)
atomic_inc(&nfct->master->use); atomic_inc(&nfct->master->use);
......
...@@ -6,8 +6,9 @@ ...@@ -6,8 +6,9 @@
* *
* Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $ * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
* *
* Fixes: * Fixes:
* Alan Cox : Fixed the worst of the load balancer bugs. * Alan Cox : Fixed the worst of the load
* balancer bugs.
* Dave Platt : Interrupt stacking fix. * Dave Platt : Interrupt stacking fix.
* Richard Kooijman : Timestamp fixes. * Richard Kooijman : Timestamp fixes.
* Alan Cox : Changed buffer format. * Alan Cox : Changed buffer format.
...@@ -21,8 +22,8 @@ ...@@ -21,8 +22,8 @@
* Andi Kleen : slabified it. * Andi Kleen : slabified it.
* *
* NOTE: * NOTE:
* The __skb_ routines should be called with interrupts * The __skb_ routines should be called with interrupts
* disabled, or you better be *real* sure that the operation is atomic * disabled, or you better be *real* sure that the operation is atomic
* with respect to whatever list is being frobbed (e.g. via lock_sock() * with respect to whatever list is being frobbed (e.g. via lock_sock()
* or via disabling bottom half handlers, etc). * or via disabling bottom half handlers, etc).
* *
...@@ -73,7 +74,7 @@ static union { ...@@ -73,7 +74,7 @@ static union {
/* /*
* Keep out-of-line to prevent kernel bloat. * Keep out-of-line to prevent kernel bloat.
* __builtin_return_address is not used because it is not always * __builtin_return_address is not used because it is not always
* reliable. * reliable.
*/ */
/** /**
...@@ -84,10 +85,9 @@ static union { ...@@ -84,10 +85,9 @@ static union {
* *
* Out of line support code for skb_put(). Not user callable. * Out of line support code for skb_put(). Not user callable.
*/ */
void skb_over_panic(struct sk_buff *skb, int sz, void *here) void skb_over_panic(struct sk_buff *skb, int sz, void *here)
{ {
printk("skput:over: %p:%d put:%d dev:%s", printk(KERN_INFO "skput:over: %p:%d put:%d dev:%s",
here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>"); here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
BUG(); BUG();
} }
...@@ -100,29 +100,27 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here) ...@@ -100,29 +100,27 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
* *
* Out of line support code for skb_push(). Not user callable. * Out of line support code for skb_push(). Not user callable.
*/ */
void skb_under_panic(struct sk_buff *skb, int sz, void *here) void skb_under_panic(struct sk_buff *skb, int sz, void *here)
{ {
printk("skput:under: %p:%d put:%d dev:%s", printk(KERN_INFO "skput:under: %p:%d put:%d dev:%s",
here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>"); here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
BUG(); BUG();
} }
static __inline__ struct sk_buff *skb_head_from_pool(void) static __inline__ struct sk_buff *skb_head_from_pool(void)
{ {
struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list; struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
struct sk_buff *skb = NULL;
if (skb_queue_len(list)) { if (skb_queue_len(list)) {
struct sk_buff *skb;
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
skb = __skb_dequeue(list); skb = __skb_dequeue(list);
local_irq_restore(flags); local_irq_restore(flags);
return skb;
} }
return NULL; return skb;
} }
static __inline__ void skb_head_to_pool(struct sk_buff *skb) static __inline__ void skb_head_to_pool(struct sk_buff *skb)
...@@ -135,17 +133,15 @@ static __inline__ void skb_head_to_pool(struct sk_buff *skb) ...@@ -135,17 +133,15 @@ static __inline__ void skb_head_to_pool(struct sk_buff *skb)
local_irq_save(flags); local_irq_save(flags);
__skb_queue_head(list, skb); __skb_queue_head(list, skb);
local_irq_restore(flags); local_irq_restore(flags);
} else
return; kmem_cache_free(skbuff_head_cache, skb);
}
kmem_cache_free(skbuff_head_cache, skb);
} }
/* Allocate a new skbuff. We do this ourselves so we can fill in a few /* Allocate a new skbuff. We do this ourselves so we can fill in a few
* 'private' fields and also do memory statistics to find all the * 'private' fields and also do memory statistics to find all the
* [BEEP] leaks. * [BEEP] leaks.
* *
*/ */
/** /**
...@@ -160,14 +156,13 @@ static __inline__ void skb_head_to_pool(struct sk_buff *skb) ...@@ -160,14 +156,13 @@ static __inline__ void skb_head_to_pool(struct sk_buff *skb)
* Buffers may only be allocated from interrupts using a @gfp_mask of * Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC. * %GFP_ATOMIC.
*/ */
struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
struct sk_buff *alloc_skb(unsigned int size,int gfp_mask)
{ {
struct sk_buff *skb; struct sk_buff *skb;
u8 *data; u8 *data;
if (in_interrupt() && (gfp_mask & __GFP_WAIT)) { if (in_interrupt() && (gfp_mask & __GFP_WAIT)) {
static int count = 0; static int count;
if (++count < 5) { if (++count < 5) {
printk(KERN_ERR "alloc_skb called nonatomically " printk(KERN_ERR "alloc_skb called nonatomically "
"from interrupt %p\n", NET_CALLER(size)); "from interrupt %p\n", NET_CALLER(size));
...@@ -178,76 +173,74 @@ struct sk_buff *alloc_skb(unsigned int size,int gfp_mask) ...@@ -178,76 +173,74 @@ struct sk_buff *alloc_skb(unsigned int size,int gfp_mask)
/* Get the HEAD */ /* Get the HEAD */
skb = skb_head_from_pool(); skb = skb_head_from_pool();
if (skb == NULL) { if (!skb) {
skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask & ~__GFP_DMA); skb = kmem_cache_alloc(skbuff_head_cache,
if (skb == NULL) gfp_mask & ~__GFP_DMA);
goto nohead; if (!skb)
goto out;
} }
/* Get the DATA. Size must match skb_add_mtu(). */ /* Get the DATA. Size must match skb_add_mtu(). */
size = SKB_DATA_ALIGN(size); size = SKB_DATA_ALIGN(size);
data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
if (data == NULL) if (!data)
goto nodata; goto nodata;
/* XXX: does not include slab overhead */ /* XXX: does not include slab overhead */
skb->truesize = size + sizeof(struct sk_buff); skb->truesize = size + sizeof(struct sk_buff);
/* Load the data pointers. */ /* Load the data pointers. */
skb->head = data; skb->head = skb->data = skb->tail = data;
skb->data = data; skb->end = data + size;
skb->tail = data;
skb->end = data + size;
/* Set up other state */ /* Set up other state */
skb->len = 0; skb->len = 0;
skb->cloned = 0; skb->cloned = 0;
skb->data_len = 0; skb->data_len = 0;
atomic_set(&skb->users, 1); atomic_set(&skb->users, 1);
atomic_set(&(skb_shinfo(skb)->dataref), 1); atomic_set(&(skb_shinfo(skb)->dataref), 1);
skb_shinfo(skb)->nr_frags = 0; skb_shinfo(skb)->nr_frags = 0;
skb_shinfo(skb)->frag_list = NULL; skb_shinfo(skb)->frag_list = NULL;
out:
return skb; return skb;
nodata: nodata:
skb_head_to_pool(skb); skb_head_to_pool(skb);
nohead: skb = NULL;
return NULL; goto out;
} }
/* /*
* Slab constructor for a skb head. * Slab constructor for a skb head.
*/ */
static inline void skb_headerinit(void *p, kmem_cache_t *cache, static inline void skb_headerinit(void *p, kmem_cache_t *cache,
unsigned long flags) unsigned long flags)
{ {
struct sk_buff *skb = p; struct sk_buff *skb = p;
skb->next = NULL; skb->next = skb->prev = NULL;
skb->prev = NULL; skb->list = NULL;
skb->list = NULL; skb->sk = NULL;
skb->sk = NULL; skb->stamp.tv_sec = 0; /* No idea about time */
skb->stamp.tv_sec=0; /* No idea about time */ skb->dev = NULL;
skb->dev = NULL; skb->dst = NULL;
skb->dst = NULL;
memset(skb->cb, 0, sizeof(skb->cb)); memset(skb->cb, 0, sizeof(skb->cb));
skb->pkt_type = PACKET_HOST; /* Default type */ skb->pkt_type = PACKET_HOST; /* Default type */
skb->ip_summed = 0; skb->ip_summed = 0;
skb->priority = 0; skb->priority = 0;
skb->security = 0; /* By default packets are insecure */ skb->security = 0; /* By default packets are insecure */
skb->destructor = NULL; skb->destructor = NULL;
#ifdef CONFIG_NETFILTER #ifdef CONFIG_NETFILTER
skb->nfmark = skb->nfcache = 0; skb->nfmark = skb->nfcache = 0;
skb->nfct = NULL; skb->nfct = NULL;
#ifdef CONFIG_NETFILTER_DEBUG #ifdef CONFIG_NETFILTER_DEBUG
skb->nf_debug = 0; skb->nf_debug = 0;
#endif #endif
#endif #endif
#ifdef CONFIG_NET_SCHED #ifdef CONFIG_NET_SCHED
skb->tc_index = 0; skb->tc_index = 0;
#endif #endif
} }
...@@ -268,7 +261,7 @@ static void skb_clone_fraglist(struct sk_buff *skb) ...@@ -268,7 +261,7 @@ static void skb_clone_fraglist(struct sk_buff *skb)
{ {
struct sk_buff *list; struct sk_buff *list;
for (list = skb_shinfo(skb)->frag_list; list; list=list->next) for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
skb_get(list); skb_get(list);
} }
...@@ -290,7 +283,7 @@ static void skb_release_data(struct sk_buff *skb) ...@@ -290,7 +283,7 @@ static void skb_release_data(struct sk_buff *skb)
} }
/* /*
* Free an skbuff by memory without cleaning the state. * Free an skbuff by memory without cleaning the state.
*/ */
void kfree_skbmem(struct sk_buff *skb) void kfree_skbmem(struct sk_buff *skb)
{ {
...@@ -299,10 +292,10 @@ void kfree_skbmem(struct sk_buff *skb) ...@@ -299,10 +292,10 @@ void kfree_skbmem(struct sk_buff *skb)
} }
/** /**
* __kfree_skb - private function * __kfree_skb - private function
* @skb: buffer * @skb: buffer
* *
* Free an sk_buff. Release anything attached to the buffer. * Free an sk_buff. Release anything attached to the buffer.
* Clean the state. This is an internal helper function. Users should * Clean the state. This is an internal helper function. Users should
* always call kfree_skb * always call kfree_skb
*/ */
...@@ -317,10 +310,9 @@ void __kfree_skb(struct sk_buff *skb) ...@@ -317,10 +310,9 @@ void __kfree_skb(struct sk_buff *skb)
dst_release(skb->dst); dst_release(skb->dst);
if(skb->destructor) { if(skb->destructor) {
if (in_irq()) { if (in_irq())
printk(KERN_WARNING "Warning: kfree_skb on hard IRQ %p\n", printk(KERN_WARNING "Warning: kfree_skb on "
NET_CALLER(skb)); "hard IRQ %p\n", NET_CALLER(skb));
}
skb->destructor(skb); skb->destructor(skb);
} }
#ifdef CONFIG_NETFILTER #ifdef CONFIG_NETFILTER
...@@ -337,18 +329,17 @@ void __kfree_skb(struct sk_buff *skb) ...@@ -337,18 +329,17 @@ void __kfree_skb(struct sk_buff *skb)
* *
* Duplicate an &sk_buff. The new one is not owned by a socket. Both * Duplicate an &sk_buff. The new one is not owned by a socket. Both
* copies share the same packet data but not structure. The new * copies share the same packet data but not structure. The new
* buffer has a reference count of 1. If the allocation fails the * buffer has a reference count of 1. If the allocation fails the
* function returns %NULL otherwise the new buffer is returned. * function returns %NULL otherwise the new buffer is returned.
* *
* If this function is called from an interrupt gfp_mask() must be * If this function is called from an interrupt gfp_mask() must be
* %GFP_ATOMIC. * %GFP_ATOMIC.
*/ */
struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
{ {
struct sk_buff *n; struct sk_buff *n = skb_head_from_pool();
n = skb_head_from_pool();
if (!n) { if (!n) {
n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
if (!n) if (!n)
...@@ -414,32 +405,32 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) ...@@ -414,32 +405,32 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
*/ */
unsigned long offset = new->data - old->data; unsigned long offset = new->data - old->data;
new->list=NULL; new->list = NULL;
new->sk=NULL; new->sk = NULL;
new->dev=old->dev; new->dev = old->dev;
new->priority=old->priority; new->priority = old->priority;
new->protocol=old->protocol; new->protocol = old->protocol;
new->dst=dst_clone(old->dst); new->dst = dst_clone(old->dst);
new->h.raw=old->h.raw+offset; new->h.raw = old->h.raw + offset;
new->nh.raw=old->nh.raw+offset; new->nh.raw = old->nh.raw + offset;
new->mac.raw=old->mac.raw+offset; new->mac.raw = old->mac.raw + offset;
memcpy(new->cb, old->cb, sizeof(old->cb)); memcpy(new->cb, old->cb, sizeof(old->cb));
atomic_set(&new->users, 1); atomic_set(&new->users, 1);
new->pkt_type=old->pkt_type; new->pkt_type = old->pkt_type;
new->stamp=old->stamp; new->stamp = old->stamp;
new->destructor = NULL; new->destructor = NULL;
new->security=old->security; new->security = old->security;
#ifdef CONFIG_NETFILTER #ifdef CONFIG_NETFILTER
new->nfmark=old->nfmark; new->nfmark = old->nfmark;
new->nfcache=old->nfcache; new->nfcache = old->nfcache;
new->nfct=old->nfct; new->nfct = old->nfct;
nf_conntrack_get(new->nfct); nf_conntrack_get(new->nfct);
#ifdef CONFIG_NETFILTER_DEBUG #ifdef CONFIG_NETFILTER_DEBUG
new->nf_debug=old->nf_debug; new->nf_debug = old->nf_debug;
#endif #endif
#endif #endif
#ifdef CONFIG_NET_SCHED #ifdef CONFIG_NET_SCHED
new->tc_index = old->tc_index; new->tc_index = old->tc_index;
#endif #endif
} }
...@@ -449,7 +440,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) ...@@ -449,7 +440,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
* @gfp_mask: allocation priority * @gfp_mask: allocation priority
* *
* Make a copy of both an &sk_buff and its data. This is used when the * Make a copy of both an &sk_buff and its data. This is used when the
* caller wishes to modify the data and needs a private copy of the * caller wishes to modify the data and needs a private copy of the
* data to alter. Returns %NULL on failure or the pointer to the buffer * data to alter. Returns %NULL on failure or the pointer to the buffer
* on success. The returned buffer has a reference count of 1. * on success. The returned buffer has a reference count of 1.
* *
...@@ -459,31 +450,29 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) ...@@ -459,31 +450,29 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
* function is not recommended for use in circumstances when only * function is not recommended for use in circumstances when only
* header is going to be modified. Use pskb_copy() instead. * header is going to be modified. Use pskb_copy() instead.
*/ */
struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask) struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
{ {
struct sk_buff *n; int headerlen = skb->data - skb->head;
int headerlen = skb->data-skb->head;
/* /*
* Allocate the copy buffer * Allocate the copy buffer
*/ */
n=alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask); struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len,
if(n==NULL) gfp_mask);
if (!n)
return NULL; return NULL;
/* Set the data pointer */ /* Set the data pointer */
skb_reserve(n,headerlen); skb_reserve(n, headerlen);
/* Set the tail pointer and length */ /* Set the tail pointer and length */
skb_put(n,skb->len); skb_put(n, skb->len);
n->csum = skb->csum; n->csum = skb->csum;
n->ip_summed = skb->ip_summed; n->ip_summed = skb->ip_summed;
if (skb_copy_bits(skb, -headerlen, n->head, headerlen+skb->len)) if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
BUG(); BUG();
copy_skb_header(n, skb); copy_skb_header(n, skb);
return n; return n;
} }
...@@ -494,7 +483,7 @@ int skb_linearize(struct sk_buff *skb, int gfp_mask) ...@@ -494,7 +483,7 @@ int skb_linearize(struct sk_buff *skb, int gfp_mask)
u8 *data; u8 *data;
long offset; long offset;
int headerlen = skb->data - skb->head; int headerlen = skb->data - skb->head;
int expand = (skb->tail+skb->data_len) - skb->end; int expand = (skb->tail + skb->data_len) - skb->end;
if (skb_shared(skb)) if (skb_shared(skb))
BUG(); BUG();
...@@ -502,14 +491,14 @@ int skb_linearize(struct sk_buff *skb, int gfp_mask) ...@@ -502,14 +491,14 @@ int skb_linearize(struct sk_buff *skb, int gfp_mask)
if (expand <= 0) if (expand <= 0)
expand = 0; expand = 0;
size = (skb->end - skb->head + expand); size = skb->end - skb->head + expand;
size = SKB_DATA_ALIGN(size); size = SKB_DATA_ALIGN(size);
data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
if (data == NULL) if (!data)
return -ENOMEM; return -ENOMEM;
/* Copy entire thing */ /* Copy entire thing */
if (skb_copy_bits(skb, -headerlen, data, headerlen+skb->len)) if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
BUG(); BUG();
/* Offset between the two in bytes */ /* Offset between the two in bytes */
...@@ -522,22 +511,22 @@ int skb_linearize(struct sk_buff *skb, int gfp_mask) ...@@ -522,22 +511,22 @@ int skb_linearize(struct sk_buff *skb, int gfp_mask)
skb->end = data + size; skb->end = data + size;
/* Set up new pointers */ /* Set up new pointers */
skb->h.raw += offset; skb->h.raw += offset;
skb->nh.raw += offset; skb->nh.raw += offset;
skb->mac.raw += offset; skb->mac.raw += offset;
skb->tail += offset; skb->tail += offset;
skb->data += offset; skb->data += offset;
/* Set up shinfo */ /* Set up shinfo */
atomic_set(&(skb_shinfo(skb)->dataref), 1); atomic_set(&(skb_shinfo(skb)->dataref), 1);
skb_shinfo(skb)->nr_frags = 0; skb_shinfo(skb)->nr_frags = 0;
skb_shinfo(skb)->frag_list = NULL; skb_shinfo(skb)->frag_list = NULL;
/* We are no longer a clone, even if we were. */ /* We are no longer a clone, even if we were. */
skb->cloned = 0; skb->cloned = 0;
skb->tail += skb->data_len; skb->tail += skb->data_len;
skb->data_len = 0; skb->data_len = 0;
return 0; return 0;
} }
...@@ -557,26 +546,25 @@ int skb_linearize(struct sk_buff *skb, int gfp_mask) ...@@ -557,26 +546,25 @@ int skb_linearize(struct sk_buff *skb, int gfp_mask)
struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask) struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
{ {
struct sk_buff *n;
/* /*
* Allocate the copy buffer * Allocate the copy buffer
*/ */
n=alloc_skb(skb->end - skb->head, gfp_mask); struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask);
if(n==NULL)
return NULL; if (!n)
goto out;
/* Set the data pointer */ /* Set the data pointer */
skb_reserve(n,skb->data-skb->head); skb_reserve(n, skb->data - skb->head);
/* Set the tail pointer and length */ /* Set the tail pointer and length */
skb_put(n,skb_headlen(skb)); skb_put(n, skb_headlen(skb));
/* Copy the bytes */ /* Copy the bytes */
memcpy(n->data, skb->data, n->len); memcpy(n->data, skb->data, n->len);
n->csum = skb->csum; n->csum = skb->csum;
n->ip_summed = skb->ip_summed; n->ip_summed = skb->ip_summed;
n->data_len = skb->data_len; n->data_len = skb->data_len;
n->len = skb->len; n->len = skb->len;
if (skb_shinfo(skb)->nr_frags) { if (skb_shinfo(skb)->nr_frags) {
int i; int i;
...@@ -594,7 +582,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask) ...@@ -594,7 +582,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
} }
copy_skb_header(n, skb); copy_skb_header(n, skb);
out:
return n; return n;
} }
...@@ -627,15 +615,15 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask) ...@@ -627,15 +615,15 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
size = SKB_DATA_ALIGN(size); size = SKB_DATA_ALIGN(size);
data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
if (data == NULL) if (!data)
goto nodata; goto nodata;
/* Copy only real data... and, alas, header. This should be /* Copy only real data... and, alas, header. This should be
* optimized for the cases when header is void. */ * optimized for the cases when header is void. */
memcpy(data+nhead, skb->head, skb->tail-skb->head); memcpy(data + nhead, skb->head, skb->tail - skb->head);
memcpy(data+size, skb->end, sizeof(struct skb_shared_info)); memcpy(data + size, skb->end, sizeof(struct skb_shared_info));
for (i=0; i<skb_shinfo(skb)->nr_frags; i++) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
get_page(skb_shinfo(skb)->frags[i].page); get_page(skb_shinfo(skb)->frags[i].page);
if (skb_shinfo(skb)->frag_list) if (skb_shinfo(skb)->frag_list)
...@@ -643,17 +631,16 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask) ...@@ -643,17 +631,16 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
skb_release_data(skb); skb_release_data(skb);
off = (data+nhead) - skb->head; off = (data + nhead) - skb->head;
skb->head = data; skb->head = data;
skb->end = data+size; skb->end = data + size;
skb->data += off;
skb->data += off; skb->tail += off;
skb->tail += off;
skb->mac.raw += off; skb->mac.raw += off;
skb->h.raw += off; skb->h.raw += off;
skb->nh.raw += off; skb->nh.raw += off;
skb->cloned = 0; skb->cloned = 0;
atomic_set(&skb_shinfo(skb)->dataref, 1); atomic_set(&skb_shinfo(skb)->dataref, 1);
return 0; return 0;
...@@ -663,22 +650,22 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask) ...@@ -663,22 +650,22 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
/* Make private copy of skb with writable head and some headroom */ /* Make private copy of skb with writable head and some headroom */
struct sk_buff * struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
{ {
struct sk_buff *skb2; struct sk_buff *skb2;
int delta = headroom - skb_headroom(skb); int delta = headroom - skb_headroom(skb);
if (delta <= 0) if (delta <= 0)
return pskb_copy(skb, GFP_ATOMIC); skb2 = pskb_copy(skb, GFP_ATOMIC);
else {
skb2 = skb_clone(skb, GFP_ATOMIC); skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2 == NULL || if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
!pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) GFP_ATOMIC)) {
return skb2; kfree_skb(skb2);
skb2 = NULL;
kfree_skb(skb2); }
return NULL; }
return skb2;
} }
...@@ -689,10 +676,10 @@ skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) ...@@ -689,10 +676,10 @@ skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
* @newtailroom: new free bytes at tail * @newtailroom: new free bytes at tail
* @gfp_mask: allocation priority * @gfp_mask: allocation priority
* *
* Make a copy of both an &sk_buff and its data and while doing so * Make a copy of both an &sk_buff and its data and while doing so
* allocate additional space. * allocate additional space.
* *
* This is used when the caller wishes to modify the data and needs a * This is used when the caller wishes to modify the data and needs a
* private copy of the data to alter as well as more space for new fields. * private copy of the data to alter as well as more space for new fields.
* Returns %NULL on failure or the pointer to the buffer * Returns %NULL on failure or the pointer to the buffer
* on success. The returned buffer has a reference count of 1. * on success. The returned buffer has a reference count of 1.
...@@ -700,34 +687,28 @@ skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) ...@@ -700,34 +687,28 @@ skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
* You must pass %GFP_ATOMIC as the allocation priority if this function * You must pass %GFP_ATOMIC as the allocation priority if this function
* is called from an interrupt. * is called from an interrupt.
*/ */
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
int newheadroom, int newheadroom, int newtailroom, int gfp_mask)
int newtailroom,
int gfp_mask)
{ {
struct sk_buff *n;
/* /*
* Allocate the copy buffer * Allocate the copy buffer
*/ */
struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
n=alloc_skb(newheadroom + skb->len + newtailroom, gfp_mask);
gfp_mask); if (!n)
if(n==NULL)
return NULL; return NULL;
skb_reserve(n,newheadroom); skb_reserve(n, newheadroom);
/* Set the tail pointer and length */ /* Set the tail pointer and length */
skb_put(n,skb->len); skb_put(n, skb->len);
/* Copy the data only. */ /* Copy the data only. */
if (skb_copy_bits(skb, 0, n->data, skb->len)) if (skb_copy_bits(skb, 0, n->data, skb->len))
BUG(); BUG();
copy_skb_header(n, skb); copy_skb_header(n, skb);
return n; return n;
} }
...@@ -742,7 +723,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc) ...@@ -742,7 +723,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
int nfrags = skb_shinfo(skb)->nr_frags; int nfrags = skb_shinfo(skb)->nr_frags;
int i; int i;
for (i=0; i<nfrags; i++) { for (i = 0; i < nfrags; i++) {
int end = offset + skb_shinfo(skb)->frags[i].size; int end = offset + skb_shinfo(skb)->frags[i].size;
if (end > len) { if (end > len) {
if (skb_cloned(skb)) { if (skb_cloned(skb)) {
...@@ -755,7 +736,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc) ...@@ -755,7 +736,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
put_page(skb_shinfo(skb)->frags[i].page); put_page(skb_shinfo(skb)->frags[i].page);
skb_shinfo(skb)->nr_frags--; skb_shinfo(skb)->nr_frags--;
} else { } else {
skb_shinfo(skb)->frags[i].size = len-offset; skb_shinfo(skb)->frags[i].size = len - offset;
} }
} }
offset = end; offset = end;
...@@ -763,17 +744,17 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc) ...@@ -763,17 +744,17 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
if (offset < len) { if (offset < len) {
skb->data_len -= skb->len - len; skb->data_len -= skb->len - len;
skb->len = len; skb->len = len;
} else { } else {
if (len <= skb_headlen(skb)) { if (len <= skb_headlen(skb)) {
skb->len = len; skb->len = len;
skb->data_len = 0; skb->data_len = 0;
skb->tail = skb->data + len; skb->tail = skb->data + len;
if (skb_shinfo(skb)->frag_list && !skb_cloned(skb)) if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
skb_drop_fraglist(skb); skb_drop_fraglist(skb);
} else { } else {
skb->data_len -= skb->len - len; skb->data_len -= skb->len - len;
skb->len = len; skb->len = len;
} }
} }
...@@ -781,7 +762,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc) ...@@ -781,7 +762,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
} }
/** /**
* __pskb_pull_tail - advance tail of skb header * __pskb_pull_tail - advance tail of skb header
* @skb: buffer to reallocate * @skb: buffer to reallocate
* @delta: number of bytes to advance tail * @delta: number of bytes to advance tail
* *
...@@ -805,18 +786,17 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc) ...@@ -805,18 +786,17 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
* *
* It is pretty complicated. Luckily, it is called only in exceptional cases. * It is pretty complicated. Luckily, it is called only in exceptional cases.
*/ */
unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta) unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
{ {
int i, k, eat;
/* If skb has not enough free space at tail, get new one /* If skb has not enough free space at tail, get new one
* plus 128 bytes for future expansions. If we have enough * plus 128 bytes for future expansions. If we have enough
* room at tail, reallocate without expansion only if skb is cloned. * room at tail, reallocate without expansion only if skb is cloned.
*/ */
eat = (skb->tail+delta) - skb->end; int i, k, eat = (skb->tail + delta) - skb->end;
if (eat > 0 || skb_cloned(skb)) { if (eat > 0 || skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, eat>0 ? eat+128 : 0, GFP_ATOMIC)) if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
GFP_ATOMIC))
return NULL; return NULL;
} }
...@@ -826,12 +806,12 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta) ...@@ -826,12 +806,12 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
/* Optimization: no fragments, no reasons to preestimate /* Optimization: no fragments, no reasons to preestimate
* size of pulled pages. Superb. * size of pulled pages. Superb.
*/ */
if (skb_shinfo(skb)->frag_list == NULL) if (!skb_shinfo(skb)->frag_list)
goto pull_pages; goto pull_pages;
/* Estimate size of pulled pages. */ /* Estimate size of pulled pages. */
eat = delta; eat = delta;
for (i=0; i<skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
if (skb_shinfo(skb)->frags[i].size >= eat) if (skb_shinfo(skb)->frags[i].size >= eat)
goto pull_pages; goto pull_pages;
eat -= skb_shinfo(skb)->frags[i].size; eat -= skb_shinfo(skb)->frags[i].size;
...@@ -850,7 +830,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta) ...@@ -850,7 +830,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
struct sk_buff *insp = NULL; struct sk_buff *insp = NULL;
do { do {
if (list == NULL) if (!list)
BUG(); BUG();
if (list->len <= eat) { if (list->len <= eat) {
...@@ -864,7 +844,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta) ...@@ -864,7 +844,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
if (skb_shared(list)) { if (skb_shared(list)) {
/* Sucks! We need to fork list. :-( */ /* Sucks! We need to fork list. :-( */
clone = skb_clone(list, GFP_ATOMIC); clone = skb_clone(list, GFP_ATOMIC);
if (clone == NULL) if (!clone)
return NULL; return NULL;
insp = list->next; insp = list->next;
list = clone; list = clone;
...@@ -873,7 +853,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta) ...@@ -873,7 +853,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
* problems. */ * problems. */
insp = list; insp = list;
} }
if (pskb_pull(list, eat) == NULL) { if (!pskb_pull(list, eat)) {
if (clone) if (clone)
kfree_skb(clone); kfree_skb(clone);
return NULL; return NULL;
...@@ -898,7 +878,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta) ...@@ -898,7 +878,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
pull_pages: pull_pages:
eat = delta; eat = delta;
k = 0; k = 0;
for (i=0; i<skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
if (skb_shinfo(skb)->frags[i].size <= eat) { if (skb_shinfo(skb)->frags[i].size <= eat) {
put_page(skb_shinfo(skb)->frags[i].page); put_page(skb_shinfo(skb)->frags[i].page);
eat -= skb_shinfo(skb)->frags[i].size; eat -= skb_shinfo(skb)->frags[i].size;
...@@ -914,7 +894,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta) ...@@ -914,7 +894,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
} }
skb_shinfo(skb)->nr_frags = k; skb_shinfo(skb)->nr_frags = k;
skb->tail += delta; skb->tail += delta;
skb->data_len -= delta; skb->data_len -= delta;
return skb->tail; return skb->tail;
...@@ -927,68 +907,70 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) ...@@ -927,68 +907,70 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
int i, copy; int i, copy;
int start = skb->len - skb->data_len; int start = skb->len - skb->data_len;
if (offset > (int)skb->len-len) if (offset > (int)skb->len - len)
goto fault; goto fault;
/* Copy header. */ /* Copy header. */
if ((copy = start-offset) > 0) { if ((copy = start - offset) > 0) {
if (copy > len) if (copy > len)
copy = len; copy = len;
memcpy(to, skb->data + offset, copy); memcpy(to, skb->data + offset, copy);
if ((len -= copy) == 0) if ((len -= copy) == 0)
return 0; return 0;
offset += copy; offset += copy;
to += copy; to += copy;
} }
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end; int end;
BUG_TRAP(start <= offset+len); BUG_TRAP(start <= offset + len);
end = start + skb_shinfo(skb)->frags[i].size; end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end-offset) > 0) { if ((copy = end - offset) > 0) {
u8 *vaddr; u8 *vaddr;
if (copy > len) if (copy > len)
copy = len; copy = len;
vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
memcpy(to, vaddr+skb_shinfo(skb)->frags[i].page_offset+ memcpy(to,
offset-start, copy); vaddr + skb_shinfo(skb)->frags[i].page_offset+
offset - start, copy);
kunmap_skb_frag(vaddr); kunmap_skb_frag(vaddr);
if ((len -= copy) == 0) if ((len -= copy) == 0)
return 0; return 0;
offset += copy; offset += copy;
to += copy; to += copy;
} }
start = end; start = end;
} }
if (skb_shinfo(skb)->frag_list) { if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list; struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (list = skb_shinfo(skb)->frag_list; list; list=list->next) { for (; list; list = list->next) {
int end; int end;
BUG_TRAP(start <= offset+len); BUG_TRAP(start <= offset + len);
end = start + list->len; end = start + list->len;
if ((copy = end-offset) > 0) { if ((copy = end - offset) > 0) {
if (copy > len) if (copy > len)
copy = len; copy = len;
if (skb_copy_bits(list, offset-start, to, copy)) if (skb_copy_bits(list, offset - start,
to, copy))
goto fault; goto fault;
if ((len -= copy) == 0) if ((len -= copy) == 0)
return 0; return 0;
offset += copy; offset += copy;
to += copy; to += copy;
} }
start = end; start = end;
} }
} }
if (len == 0) if (!len)
return 0; return 0;
fault: fault:
...@@ -997,30 +979,31 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) ...@@ -997,30 +979,31 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
/* Checksum skb data. */ /* Checksum skb data. */
unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum) unsigned int skb_checksum(const struct sk_buff *skb, int offset,
int len, unsigned int csum)
{ {
int i, copy;
int start = skb->len - skb->data_len; int start = skb->len - skb->data_len;
int i, copy = start - offset;
int pos = 0; int pos = 0;
/* Checksum header. */ /* Checksum header. */
if ((copy = start-offset) > 0) { if (copy > 0) {
if (copy > len) if (copy > len)
copy = len; copy = len;
csum = csum_partial(skb->data+offset, copy, csum); csum = csum_partial(skb->data + offset, copy, csum);
if ((len -= copy) == 0) if ((len -= copy) == 0)
return csum; return csum;
offset += copy; offset += copy;
pos = copy; pos = copy;
} }
for (i=0; i<skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end; int end;
BUG_TRAP(start <= offset+len); BUG_TRAP(start <= offset + len);
end = start + skb_shinfo(skb)->frags[i].size; end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end-offset) > 0) { if ((copy = end - offset) > 0) {
unsigned int csum2; unsigned int csum2;
u8 *vaddr; u8 *vaddr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
...@@ -1029,74 +1012,76 @@ unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsign ...@@ -1029,74 +1012,76 @@ unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsign
copy = len; copy = len;
vaddr = kmap_skb_frag(frag); vaddr = kmap_skb_frag(frag);
csum2 = csum_partial(vaddr + frag->page_offset + csum2 = csum_partial(vaddr + frag->page_offset +
offset-start, copy, 0); offset - start, copy, 0);
kunmap_skb_frag(vaddr); kunmap_skb_frag(vaddr);
csum = csum_block_add(csum, csum2, pos); csum = csum_block_add(csum, csum2, pos);
if (!(len -= copy)) if (!(len -= copy))
return csum; return csum;
offset += copy; offset += copy;
pos += copy; pos += copy;
} }
start = end; start = end;
} }
if (skb_shinfo(skb)->frag_list) { if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list; struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (list = skb_shinfo(skb)->frag_list; list; list=list->next) { for (; list; list = list->next) {
int end; int end;
BUG_TRAP(start <= offset+len); BUG_TRAP(start <= offset + len);
end = start + list->len; end = start + list->len;
if ((copy = end-offset) > 0) { if ((copy = end - offset) > 0) {
unsigned int csum2; unsigned int csum2;
if (copy > len) if (copy > len)
copy = len; copy = len;
csum2 = skb_checksum(list, offset-start, copy, 0); csum2 = skb_checksum(list, offset - start,
copy, 0);
csum = csum_block_add(csum, csum2, pos); csum = csum_block_add(csum, csum2, pos);
if ((len -= copy) == 0) if ((len -= copy) == 0)
return csum; return csum;
offset += copy; offset += copy;
pos += copy; pos += copy;
} }
start = end; start = end;
} }
} }
if (len == 0) if (len)
return csum; BUG();
BUG();
return csum; return csum;
} }
/* Both of above in one bottle. */ /* Both of above in one bottle. */
unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum) unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
u8 *to, int len, unsigned int csum)
{ {
int i, copy;
int start = skb->len - skb->data_len; int start = skb->len - skb->data_len;
int i, copy = start - offset;
int pos = 0; int pos = 0;
/* Copy header. */ /* Copy header. */
if ((copy = start-offset) > 0) { if (copy > 0) {
if (copy > len) if (copy > len)
copy = len; copy = len;
csum = csum_partial_copy_nocheck(skb->data+offset, to, copy, csum); csum = csum_partial_copy_nocheck(skb->data + offset, to,
copy, csum);
if ((len -= copy) == 0) if ((len -= copy) == 0)
return csum; return csum;
offset += copy; offset += copy;
to += copy; to += copy;
pos = copy; pos = copy;
} }
for (i=0; i<skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end; int end;
BUG_TRAP(start <= offset+len); BUG_TRAP(start <= offset + len);
end = start + skb_shinfo(skb)->frags[i].size; end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end-offset) > 0) { if ((copy = end - offset) > 0) {
unsigned int csum2; unsigned int csum2;
u8 *vaddr; u8 *vaddr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
...@@ -1104,47 +1089,49 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *t ...@@ -1104,47 +1089,49 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *t
if (copy > len) if (copy > len)
copy = len; copy = len;
vaddr = kmap_skb_frag(frag); vaddr = kmap_skb_frag(frag);
csum2 = csum_partial_copy_nocheck(vaddr + frag->page_offset + csum2 = csum_partial_copy_nocheck(vaddr +
offset-start, to, copy, 0); frag->page_offset +
offset - start, to,
copy, 0);
kunmap_skb_frag(vaddr); kunmap_skb_frag(vaddr);
csum = csum_block_add(csum, csum2, pos); csum = csum_block_add(csum, csum2, pos);
if (!(len -= copy)) if (!(len -= copy))
return csum; return csum;
offset += copy; offset += copy;
to += copy; to += copy;
pos += copy; pos += copy;
} }
start = end; start = end;
} }
if (skb_shinfo(skb)->frag_list) { if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list; struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (list = skb_shinfo(skb)->frag_list; list; list=list->next) { for (; list; list = list->next) {
unsigned int csum2; unsigned int csum2;
int end; int end;
BUG_TRAP(start <= offset+len); BUG_TRAP(start <= offset + len);
end = start + list->len; end = start + list->len;
if ((copy = end-offset) > 0) { if ((copy = end - offset) > 0) {
if (copy > len) if (copy > len)
copy = len; copy = len;
csum2 = skb_copy_and_csum_bits(list, offset-start, to, copy, 0); csum2 = skb_copy_and_csum_bits(list,
offset - start,
to, copy, 0);
csum = csum_block_add(csum, csum2, pos); csum = csum_block_add(csum, csum2, pos);
if ((len -= copy) == 0) if ((len -= copy) == 0)
return csum; return csum;
offset += copy; offset += copy;
to += copy; to += copy;
pos += copy; pos += copy;
} }
start = end; start = end;
} }
} }
if (len == 0) if (len)
return csum; BUG();
BUG();
return csum; return csum;
} }
...@@ -1165,8 +1152,8 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) ...@@ -1165,8 +1152,8 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
csum = 0; csum = 0;
if (csstart != skb->len) if (csstart != skb->len)
csum = skb_copy_and_csum_bits(skb, csstart, to+csstart, csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
skb->len-csstart, 0); skb->len - csstart, 0);
if (skb->ip_summed == CHECKSUM_HW) { if (skb->ip_summed == CHECKSUM_HW) {
long csstuff = csstart + skb->csum; long csstuff = csstart + skb->csum;
...@@ -1176,7 +1163,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) ...@@ -1176,7 +1163,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
} }
#if 0 #if 0
/* /*
* Tune the memory allocator for a new MTU size. * Tune the memory allocator for a new MTU size.
*/ */
void skb_add_mtu(int mtu) void skb_add_mtu(int mtu)
...@@ -1200,6 +1187,6 @@ void __init skb_init(void) ...@@ -1200,6 +1187,6 @@ void __init skb_init(void)
if (!skbuff_head_cache) if (!skbuff_head_cache)
panic("cannot create skbuff cache"); panic("cannot create skbuff cache");
for (i=0; i<NR_CPUS; i++) for (i = 0; i < NR_CPUS; i++)
skb_queue_head_init(&skb_head_pool[i].list); skb_queue_head_init(&skb_head_pool[i].list);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment