Commit 86a76caf authored by Victor Fusco's avatar Victor Fusco Committed by David S. Miller

[NET]: Fix sparse warnings

From: Victor Fusco <victor@cetuc.puc-rio.br>

Fix the sparse warning "implicit cast to nocast type"
Signed-off-by: default avatarVictor Fusco <victor@cetuc.puc-rio.br>
Signed-off-by: default avatarDomen Puncer <domen@coderock.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b03efcfb
...@@ -300,20 +300,26 @@ struct sk_buff { ...@@ -300,20 +300,26 @@ struct sk_buff {
#include <asm/system.h> #include <asm/system.h>
extern void __kfree_skb(struct sk_buff *skb); extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *alloc_skb(unsigned int size, int priority); extern struct sk_buff *alloc_skb(unsigned int size,
unsigned int __nocast priority);
extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
unsigned int size, int priority); unsigned int size,
unsigned int __nocast priority);
extern void kfree_skbmem(struct sk_buff *skb); extern void kfree_skbmem(struct sk_buff *skb);
extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority); extern struct sk_buff *skb_clone(struct sk_buff *skb,
extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority); unsigned int __nocast priority);
extern struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask); extern struct sk_buff *skb_copy(const struct sk_buff *skb,
unsigned int __nocast priority);
extern struct sk_buff *pskb_copy(struct sk_buff *skb,
unsigned int __nocast gfp_mask);
extern int pskb_expand_head(struct sk_buff *skb, extern int pskb_expand_head(struct sk_buff *skb,
int nhead, int ntail, int gfp_mask); int nhead, int ntail,
unsigned int __nocast gfp_mask);
extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
unsigned int headroom); unsigned int headroom);
extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
int newheadroom, int newtailroom, int newheadroom, int newtailroom,
int priority); unsigned int __nocast priority);
extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad); extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad);
#define dev_kfree_skb(a) kfree_skb(a) #define dev_kfree_skb(a) kfree_skb(a)
extern void skb_over_panic(struct sk_buff *skb, int len, extern void skb_over_panic(struct sk_buff *skb, int len,
...@@ -464,7 +470,8 @@ static inline int skb_shared(const struct sk_buff *skb) ...@@ -464,7 +470,8 @@ static inline int skb_shared(const struct sk_buff *skb)
* *
* NULL is returned on a memory allocation failure. * NULL is returned on a memory allocation failure.
*/ */
static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri) static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
unsigned int __nocast pri)
{ {
might_sleep_if(pri & __GFP_WAIT); might_sleep_if(pri & __GFP_WAIT);
if (skb_shared(skb)) { if (skb_shared(skb)) {
...@@ -1001,7 +1008,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) ...@@ -1001,7 +1008,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
* %NULL is returned in there is no free memory. * %NULL is returned in there is no free memory.
*/ */
static inline struct sk_buff *__dev_alloc_skb(unsigned int length, static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
int gfp_mask) unsigned int __nocast gfp_mask)
{ {
struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); struct sk_buff *skb = alloc_skb(length + 16, gfp_mask);
if (likely(skb)) if (likely(skb))
...@@ -1114,8 +1121,8 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i, ...@@ -1114,8 +1121,8 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i,
* If there is no free memory -ENOMEM is returned, otherwise zero * If there is no free memory -ENOMEM is returned, otherwise zero
* is returned and the old skb data released. * is returned and the old skb data released.
*/ */
extern int __skb_linearize(struct sk_buff *skb, int gfp); extern int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp);
static inline int skb_linearize(struct sk_buff *skb, int gfp) static inline int skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp)
{ {
return __skb_linearize(skb, gfp); return __skb_linearize(skb, gfp);
} }
......
...@@ -684,16 +684,17 @@ extern void FASTCALL(release_sock(struct sock *sk)); ...@@ -684,16 +684,17 @@ extern void FASTCALL(release_sock(struct sock *sk));
#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
extern struct sock *sk_alloc(int family, int priority, extern struct sock *sk_alloc(int family,
unsigned int __nocast priority,
struct proto *prot, int zero_it); struct proto *prot, int zero_it);
extern void sk_free(struct sock *sk); extern void sk_free(struct sock *sk);
extern struct sk_buff *sock_wmalloc(struct sock *sk, extern struct sk_buff *sock_wmalloc(struct sock *sk,
unsigned long size, int force, unsigned long size, int force,
int priority); unsigned int __nocast priority);
extern struct sk_buff *sock_rmalloc(struct sock *sk, extern struct sk_buff *sock_rmalloc(struct sock *sk,
unsigned long size, int force, unsigned long size, int force,
int priority); unsigned int __nocast priority);
extern void sock_wfree(struct sk_buff *skb); extern void sock_wfree(struct sk_buff *skb);
extern void sock_rfree(struct sk_buff *skb); extern void sock_rfree(struct sk_buff *skb);
...@@ -708,7 +709,8 @@ extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, ...@@ -708,7 +709,8 @@ extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
unsigned long size, unsigned long size,
int noblock, int noblock,
int *errcode); int *errcode);
extern void *sock_kmalloc(struct sock *sk, int size, int priority); extern void *sock_kmalloc(struct sock *sk, int size,
unsigned int __nocast priority);
extern void sock_kfree_s(struct sock *sk, void *mem, int size); extern void sock_kfree_s(struct sock *sk, void *mem, int size);
extern void sk_send_sigurg(struct sock *sk); extern void sk_send_sigurg(struct sock *sk);
...@@ -1132,7 +1134,8 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) ...@@ -1132,7 +1134,8 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
} }
static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
int size, int mem, int gfp) int size, int mem,
unsigned int __nocast gfp)
{ {
struct sk_buff *skb; struct sk_buff *skb;
int hdr_len; int hdr_len;
...@@ -1155,7 +1158,8 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, ...@@ -1155,7 +1158,8 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
} }
static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
int size, int gfp) int size,
unsigned int __nocast gfp)
{ {
return sk_stream_alloc_pskb(sk, size, 0, gfp); return sk_stream_alloc_pskb(sk, size, 0, gfp);
} }
...@@ -1188,7 +1192,7 @@ static inline int sock_writeable(const struct sock *sk) ...@@ -1188,7 +1192,7 @@ static inline int sock_writeable(const struct sock *sk)
return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
} }
static inline int gfp_any(void) static inline unsigned int __nocast gfp_any(void)
{ {
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
} }
......
...@@ -860,7 +860,8 @@ extern void tcp_send_probe0(struct sock *); ...@@ -860,7 +860,8 @@ extern void tcp_send_probe0(struct sock *);
extern void tcp_send_partial(struct sock *); extern void tcp_send_partial(struct sock *);
extern int tcp_write_wakeup(struct sock *); extern int tcp_write_wakeup(struct sock *);
extern void tcp_send_fin(struct sock *sk); extern void tcp_send_fin(struct sock *sk);
extern void tcp_send_active_reset(struct sock *sk, int priority); extern void tcp_send_active_reset(struct sock *sk,
unsigned int __nocast priority);
extern int tcp_send_synack(struct sock *); extern int tcp_send_synack(struct sock *);
extern void tcp_push_one(struct sock *, unsigned int mss_now); extern void tcp_push_one(struct sock *, unsigned int mss_now);
extern void tcp_send_ack(struct sock *sk); extern void tcp_send_ack(struct sock *sk);
......
...@@ -1127,7 +1127,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) ...@@ -1127,7 +1127,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
extern void skb_release_data(struct sk_buff *); extern void skb_release_data(struct sk_buff *);
/* Keep head the same: replace data */ /* Keep head the same: replace data */
int __skb_linearize(struct sk_buff *skb, int gfp_mask) int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask)
{ {
unsigned int size; unsigned int size;
u8 *data; u8 *data;
......
...@@ -129,7 +129,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) ...@@ -129,7 +129,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
* Buffers may only be allocated from interrupts using a @gfp_mask of * Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC. * %GFP_ATOMIC.
*/ */
struct sk_buff *alloc_skb(unsigned int size, int gfp_mask) struct sk_buff *alloc_skb(unsigned int size, unsigned int __nocast gfp_mask)
{ {
struct sk_buff *skb; struct sk_buff *skb;
u8 *data; u8 *data;
...@@ -182,7 +182,8 @@ struct sk_buff *alloc_skb(unsigned int size, int gfp_mask) ...@@ -182,7 +182,8 @@ struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
* %GFP_ATOMIC. * %GFP_ATOMIC.
*/ */
struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
unsigned int size, int gfp_mask) unsigned int size,
unsigned int __nocast gfp_mask)
{ {
struct sk_buff *skb; struct sk_buff *skb;
u8 *data; u8 *data;
...@@ -322,7 +323,7 @@ void __kfree_skb(struct sk_buff *skb) ...@@ -322,7 +323,7 @@ void __kfree_skb(struct sk_buff *skb)
* %GFP_ATOMIC. * %GFP_ATOMIC.
*/ */
struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
{ {
struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
...@@ -460,7 +461,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) ...@@ -460,7 +461,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
* header is going to be modified. Use pskb_copy() instead. * header is going to be modified. Use pskb_copy() instead.
*/ */
struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask) struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_mask)
{ {
int headerlen = skb->data - skb->head; int headerlen = skb->data - skb->head;
/* /*
...@@ -499,7 +500,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask) ...@@ -499,7 +500,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
* The returned buffer has a reference count of 1. * The returned buffer has a reference count of 1.
*/ */
struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask) struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask)
{ {
/* /*
* Allocate the copy buffer * Allocate the copy buffer
...@@ -557,7 +558,8 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask) ...@@ -557,7 +558,8 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
* reloaded after call to this function. * reloaded after call to this function.
*/ */
int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask) int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
unsigned int __nocast gfp_mask)
{ {
int i; int i;
u8 *data; u8 *data;
...@@ -647,7 +649,8 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) ...@@ -647,7 +649,8 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
* only by netfilter in the cases when checksum is recalculated? --ANK * only by netfilter in the cases when checksum is recalculated? --ANK
*/ */
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
int newheadroom, int newtailroom, int gfp_mask) int newheadroom, int newtailroom,
unsigned int __nocast gfp_mask)
{ {
/* /*
* Allocate the copy buffer * Allocate the copy buffer
......
...@@ -622,7 +622,8 @@ int sock_getsockopt(struct socket *sock, int level, int optname, ...@@ -622,7 +622,8 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
* @prot: struct proto associated with this new sock instance * @prot: struct proto associated with this new sock instance
* @zero_it: if we should zero the newly allocated sock * @zero_it: if we should zero the newly allocated sock
*/ */
struct sock *sk_alloc(int family, int priority, struct proto *prot, int zero_it) struct sock *sk_alloc(int family, unsigned int __nocast priority,
struct proto *prot, int zero_it)
{ {
struct sock *sk = NULL; struct sock *sk = NULL;
kmem_cache_t *slab = prot->slab; kmem_cache_t *slab = prot->slab;
...@@ -750,7 +751,8 @@ unsigned long sock_i_ino(struct sock *sk) ...@@ -750,7 +751,8 @@ unsigned long sock_i_ino(struct sock *sk)
/* /*
* Allocate a skb from the socket's send buffer. * Allocate a skb from the socket's send buffer.
*/ */
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority) struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
unsigned int __nocast priority)
{ {
if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
struct sk_buff * skb = alloc_skb(size, priority); struct sk_buff * skb = alloc_skb(size, priority);
...@@ -765,7 +767,8 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int ...@@ -765,7 +767,8 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int
/* /*
* Allocate a skb from the socket's receive buffer. * Allocate a skb from the socket's receive buffer.
*/ */
struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority) struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
unsigned int __nocast priority)
{ {
if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
struct sk_buff *skb = alloc_skb(size, priority); struct sk_buff *skb = alloc_skb(size, priority);
...@@ -780,7 +783,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int ...@@ -780,7 +783,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int
/* /*
* Allocate a memory block from the socket's option memory buffer. * Allocate a memory block from the socket's option memory buffer.
*/ */
void *sock_kmalloc(struct sock *sk, int size, int priority) void *sock_kmalloc(struct sock *sk, int size, unsigned int __nocast priority)
{ {
if ((unsigned)size <= sysctl_optmem_max && if ((unsigned)size <= sysctl_optmem_max &&
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
......
...@@ -1613,7 +1613,7 @@ void tcp_send_fin(struct sock *sk) ...@@ -1613,7 +1613,7 @@ void tcp_send_fin(struct sock *sk)
* was unread data in the receive queue. This behavior is recommended * was unread data in the receive queue. This behavior is recommended
* by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM
*/ */
void tcp_send_active_reset(struct sock *sk, int priority) void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb; struct sk_buff *skb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment