Commit d9f2d50e authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/davem/BK/net-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents 06029544 04c1e5a1
......@@ -635,7 +635,7 @@ int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
sk->state = PPPOX_CONNECTED;
}
sk->num = sp->sa_addr.pppoe.sid;
po->num = sp->sa_addr.pppoe.sid;
end:
release_sock(sk);
......@@ -788,7 +788,7 @@ int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
hdr.ver = 1;
hdr.type = 1;
hdr.code = 0;
hdr.sid = sk->num;
hdr.sid = po->num;
lock_sock(sk);
......@@ -862,7 +862,7 @@ int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
hdr.ver = 1;
hdr.type = 1;
hdr.code = 0;
hdr.sid = sk->num;
hdr.sid = po->num;
hdr.length = htons(skb->len);
if (!dev)
......
......@@ -53,6 +53,7 @@ struct econet_opt
unsigned char port;
unsigned char station;
unsigned char net;
unsigned short num;
};
#define ec_sk(__sk) ((struct econet_opt *)(__sk)->protinfo)
......
......@@ -127,6 +127,7 @@ struct pppox_opt {
union {
struct pppoe_opt pppoe;
} proto;
unsigned short num;
};
#define pppoe_dev proto.pppoe.dev
#define pppoe_pa proto.pppoe.pa
......
......@@ -52,70 +52,16 @@ struct vlan_hdr {
unsigned short h_vlan_encapsulated_proto; /* packet type ID field (or len) */
};
/* Find a VLAN device by the MAC address of it's Ethernet device, and
* it's VLAN ID. The default configuration is to have VLAN's scope
* to be box-wide, so the MAC will be ignored. The mac will only be
* looked at if we are configured to have a seperate set of VLANs per
* each MAC addressable interface. Note that this latter option does
* NOT follow the spec for VLANs, but may be useful for doing very
* large quantities of VLAN MUX/DEMUX onto FrameRelay or ATM PVCs.
*/
struct net_device *find_802_1Q_vlan_dev(struct net_device* real_dev,
unsigned short VID); /* vlan.c */
#define VLAN_VID_MASK 0xfff
/* found in af_inet.c */
extern int (*vlan_ioctl_hook)(unsigned long arg);
/* found in vlan_dev.c */
struct net_device_stats* vlan_dev_get_stats(struct net_device* dev);
int vlan_dev_rebuild_header(struct sk_buff *skb);
int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type* ptype);
int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, void *daddr, void *saddr,
unsigned len);
int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
int vlan_dev_change_mtu(struct net_device *dev, int new_mtu);
int vlan_dev_set_mac_address(struct net_device *dev, void* addr);
int vlan_dev_open(struct net_device* dev);
int vlan_dev_stop(struct net_device* dev);
int vlan_dev_init(struct net_device* dev);
void vlan_dev_destruct(struct net_device* dev);
void vlan_dev_copy_and_sum(struct sk_buff *dest, unsigned char *src,
int length, int base);
int vlan_dev_set_ingress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
int vlan_dev_set_egress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
int vlan_dev_set_vlan_flag(char* dev_name, __u32 flag, short flag_val);
/* VLAN multicast stuff */
/* Delete all of the MC list entries from this vlan device. Also deals
* with the underlying device...
*/
void vlan_flush_mc_list(struct net_device* dev);
/* copy the mc_list into the vlan_info structure. */
void vlan_copy_mc_list(struct dev_mc_list* mc_list, struct vlan_dev_info* vlan_info);
/** dmi is a single entry into a dev_mc_list, a single node. mc_list is
* an entire list, and we'll iterate through it.
*/
int vlan_should_add_mc(struct dev_mc_list *dmi, struct dev_mc_list *mc_list);
/** Taken from Gleb + Lennert's VLAN code, and modified... */
void vlan_dev_set_multicast_list(struct net_device *vlan_dev);
int vlan_collection_add_vlan(struct vlan_collection* vc, unsigned short vlan_id,
unsigned short flags);
int vlan_collection_remove_vlan(struct vlan_collection* vc,
struct net_device* vlan_dev);
int vlan_collection_remove_vlan_id(struct vlan_collection* vc, unsigned short vlan_id);
/* found in vlan.c */
/* Our listing of VLAN group(s) */
extern struct vlan_group* p802_1Q_vlan_list;
#define VLAN_NAME "vlan"
/* if this changes, algorithm will have to be reworked because this
* depends on completely exhausting the VLAN identifier space. Thus
* it gives constant time look-up, but it many cases it wastes memory.
* it gives constant time look-up, but in many cases it wastes memory.
*/
#define VLAN_GROUP_ARRAY_LEN 4096
......@@ -170,56 +116,73 @@ struct vlan_dev_info {
/* inline functions */
/* Used in vlan_skb_recv */
static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
static inline struct net_device_stats *vlan_dev_get_stats(struct net_device *dev)
{
if (VLAN_DEV_INFO(skb->dev)->flags & 1) {
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb) {
/* Lifted from Gleb's VLAN code... */
memmove(skb->data - ETH_HLEN,
skb->data - VLAN_ETH_HLEN, 12);
skb->mac.raw += VLAN_HLEN;
}
}
return skb;
return &(VLAN_DEV_INFO(dev)->dev_stats);
}
static inline unsigned short vlan_dev_get_egress_qos_mask(struct net_device* dev,
struct sk_buff* skb)
static inline __u32 vlan_get_ingress_priority(struct net_device *dev,
unsigned short vlan_tag)
{
struct vlan_priority_tci_mapping *mp =
VLAN_DEV_INFO(dev)->egress_priority_map[(skb->priority & 0xF)];
while (mp) {
if (mp->priority == skb->priority) {
return mp->vlan_qos; /* This should already be shifted to mask
* correctly with the VLAN's TCI
*/
}
mp = mp->next;
}
return 0;
}
struct vlan_dev_info *vip = VLAN_DEV_INFO(dev);
static inline int vlan_dmi_equals(struct dev_mc_list *dmi1,
struct dev_mc_list *dmi2)
{
return ((dmi1->dmi_addrlen == dmi2->dmi_addrlen) &&
(memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0));
return vip->ingress_priority_map[(vlan_tag >> 13) & 0x7];
}
static inline void vlan_destroy_mc_list(struct dev_mc_list *mc_list)
/* VLAN tx hw acceleration helpers. */
struct vlan_skb_tx_cookie {
u32 magic;
u32 vlan_tag;
};
#define VLAN_TX_COOKIE_MAGIC 0x564c414e /* "VLAN" in ascii. */
#define VLAN_TX_SKB_CB(__skb) ((struct vlan_skb_tx_cookie *)&((__skb)->cb[0]))
#define vlan_tx_tag_present(__skb) \
(VLAN_TX_SKB_CB(__skb)->magic == VLAN_TX_COOKIE_MAGIC)
#define vlan_tx_tag_get(__skb) (VLAN_TX_SKB_CB(__skb)->vlan_tag)
/* VLAN rx hw acceleration helper. This acts like netif_rx(). */
static inline int vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
unsigned short vlan_tag)
{
struct dev_mc_list *dmi = mc_list;
struct dev_mc_list *next;
struct net_device_stats *stats;
while(dmi) {
next = dmi->next;
kfree(dmi);
dmi = next;
skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK];
if (skb->dev == NULL) {
kfree_skb(skb);
/* Not NET_RX_DROP, this is not being dropped
* due to congestion.
*/
return 0;
}
skb->dev->last_rx = jiffies;
stats = vlan_dev_get_stats(skb->dev);
stats->rx_packets++;
stats->rx_bytes += skb->len;
skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tag);
switch (skb->pkt_type) {
case PACKET_BROADCAST:
break;
case PACKET_MULTICAST:
stats->multicast++;
break;
case PACKET_OTHERHOST:
/* Our lower layer thinks this is not local, let's make sure.
* This allows the VLAN to have a different MAC than the underlying
* device, and still route correctly.
*/
if (!memcmp(skb->mac.ethernet->h_dest, skb->dev->dev_addr, ETH_ALEN))
skb->pkt_type = PACKET_HOST;
break;
};
return netif_rx(skb);
}
#endif /* __KERNEL__ */
......
......@@ -116,17 +116,24 @@ struct ip_options {
#define optlength(opt) (sizeof(struct ip_options) + opt->optlen)
struct inet_opt {
/* Socket demultiplex comparisons on incoming packets. */
__u32 daddr; /* Foreign IPv4 addr */
__u32 rcv_saddr; /* Bound local IPv4 addr */
__u16 dport; /* Destination port */
__u16 num; /* Local port */
__u32 saddr; /* Sending source */
int ttl; /* TTL setting */
int tos; /* TOS */
unsigned cmsg_flags;
struct ip_options *opt;
__u16 sport; /* Source port */
unsigned char hdrincl; /* Include headers ? */
__u8 mc_ttl; /* Multicasting TTL */
__u8 mc_loop; /* Loopback */
__u8 pmtudisc;
__u16 id; /* ID counter for DF pkts */
unsigned recverr : 1,
freebind : 1;
__u16 id; /* ID counter for DF pkts */
__u8 pmtudisc;
int mc_index; /* Multicast device index */
__u32 mc_addr;
struct ip_mc_socklist *mc_list; /* Group array */
......
......@@ -40,6 +40,7 @@
#endif
struct divert_blk;
struct vlan_group;
#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
functions are available. */
......@@ -357,6 +358,10 @@ struct net_device
#define NETIF_F_DYNALLOC 16 /* Self-dectructable device. */
#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
/* Called after device is detached from network. */
void (*uninit)(struct net_device *dev);
......@@ -398,6 +403,13 @@ struct net_device
#define HAVE_TX_TIMEOUT
void (*tx_timeout) (struct net_device *dev);
void (*vlan_rx_register)(struct net_device *dev,
struct vlan_group *grp);
void (*vlan_rx_add_vid)(struct net_device *dev,
unsigned short vid);
void (*vlan_rx_kill_vid)(struct net_device *dev,
unsigned short vid);
int (*hard_header_parse)(struct sk_buff *skb,
unsigned char *haddr);
int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
......
......@@ -82,10 +82,7 @@ struct ip_conntrack_expect
#endif
#include <linux/netfilter_ipv4/ip_conntrack_ftp.h>
#if defined(CONFIG_IP_NF_IRC) || defined(CONFIG_IP_NF_IRC_MODULE)
#include <linux/netfilter_ipv4/ip_conntrack_irc.h>
#endif
struct ip_conntrack
{
......@@ -125,9 +122,7 @@ struct ip_conntrack
union {
struct ip_ct_ftp ct_ftp_info;
#if defined(CONFIG_IP_NF_IRC) || defined(CONFIG_IP_NF_IRC_MODULE)
struct ip_ct_irc ct_irc_info;
#endif
} help;
#ifdef CONFIG_IP_NF_NAT_NEEDED
......
......@@ -197,7 +197,8 @@ static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, str
* does not change, they drop every other packet in
* a TCP stream using header compression.
*/
iph->id = (sk && sk->daddr) ? htons(inet_sk(sk)->id++) : 0;
iph->id = (sk && inet_sk(sk)->daddr) ?
htons(inet_sk(sk)->id++) : 0;
} else
__ip_select_ident(iph, dst);
}
......
......@@ -83,28 +83,22 @@ do { spin_lock_init(&((__sk)->lock.slock)); \
} while(0);
struct sock {
/* Socket demultiplex comparisons on incoming packets. */
__u32 daddr; /* Foreign IPv4 addr */
__u32 rcv_saddr; /* Bound local IPv4 addr */
__u16 dport; /* Destination port */
unsigned short num; /* Local port */
int bound_dev_if; /* Bound device index if != 0 */
/* Begin of struct sock/struct tcp_tw_bucket shared layout */
volatile unsigned char state, /* Connection state */
zapped; /* ax25 & ipx means !linked */
unsigned char reuse; /* SO_REUSEADDR setting */
unsigned char shutdown;
int bound_dev_if; /* Bound device index if != 0 */
/* Main hash linkage for various protocol lookup tables. */
struct sock *next;
struct sock **pprev;
struct sock *bind_next;
struct sock **bind_pprev;
volatile unsigned char state, /* Connection state */
zapped; /* In ax25 & ipx means not linked */
__u16 sport; /* Source port */
unsigned short family; /* Address family */
unsigned char reuse; /* SO_REUSEADDR setting */
unsigned char shutdown;
atomic_t refcnt; /* Reference count */
unsigned short family; /* Address family */
/* End of struct sock/struct tcp_tw_bucket shared layout */
unsigned char use_write_queue;
unsigned char userlocks;
socket_lock_t lock; /* Synchronizer... */
int rcvbuf; /* Size of receive buffer in bytes */
......@@ -118,7 +112,6 @@ struct sock {
atomic_t omem_alloc; /* "o" is "option" or "other" */
int wmem_queued; /* Persistent queue size */
int forward_alloc; /* Space allocated forward. */
__u32 saddr; /* Sending source */
unsigned int allocation; /* Allocation mode */
int sndbuf; /* Size of send buffer in bytes */
struct sock *prev;
......@@ -137,9 +130,7 @@ struct sock {
bsdism;
unsigned char debug;
unsigned char rcvtstamp;
unsigned char use_write_queue;
unsigned char userlocks;
/* Hole of 3 bytes. Try to pack. */
/* Hole of 1 byte. Try to pack. */
int route_caps;
int proc;
unsigned long lingertime;
......@@ -759,16 +750,13 @@ static inline void sk_wake_async(struct sock *sk, int how, int band)
#define SOCK_MIN_SNDBUF 2048
#define SOCK_MIN_RCVBUF 256
/* Must be less or equal SOCK_MIN_SNDBUF */
#define SOCK_MIN_WRITE_SPACE SOCK_MIN_SNDBUF
/*
* Default write policy as shown to user space via poll/select/SIGIO
* Kernel internally doesn't use the MIN_WRITE_SPACE threshold.
*/
static inline int sock_writeable(struct sock *sk)
{
return sock_wspace(sk) >= SOCK_MIN_WRITE_SPACE;
return atomic_read(&sk->wmem_alloc) < (sk->sndbuf / 2);
}
static inline int gfp_any(void)
......
......@@ -53,7 +53,7 @@ struct tcp_ehash_bucket {
* 2) If all sockets have sk->reuse set, and none of them are in
* TCP_LISTEN state, the port may be shared.
* Failing that, goto test 3.
* 3) If all sockets are bound to a specific sk->rcv_saddr local
* 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
* address, and none of them are the same, the port may be
* shared.
* Failing this, the port cannot be shared.
......@@ -162,23 +162,26 @@ struct tcp_tw_bucket {
* XXX Yes I know this is gross, but I'd have to edit every single
* XXX networking file if I created a "struct sock_header". -DaveM
*/
__u32 daddr;
__u32 rcv_saddr;
__u16 dport;
unsigned short num;
volatile unsigned char state, /* Connection state */
substate; /* "zapped" -> "substate" */
unsigned char reuse; /* SO_REUSEADDR setting */
unsigned char rcv_wscale; /* also TW bucket specific */
int bound_dev_if;
/* Main hash linkage for various protocol lookup tables. */
struct sock *next;
struct sock **pprev;
struct sock *bind_next;
struct sock **bind_pprev;
unsigned char state,
substate; /* "zapped" is replaced with "substate" */
__u16 sport;
unsigned short family;
unsigned char reuse,
rcv_wscale; /* It is also TW bucket specific */
atomic_t refcnt;
unsigned short family;
/* End of struct sock/struct tcp_tw_bucket shared layout */
__u16 sport;
/* Socket demultiplex comparisons on incoming packets. */
/* these five are in inet_opt */
__u32 daddr;
__u32 rcv_saddr;
__u16 dport;
__u16 num;
/* And these are ours. */
int hashent;
int timeout;
......@@ -236,20 +239,20 @@ extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
__u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));
#endif /* __BIG_ENDIAN */
#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
(((*((__u64 *)&((__sk)->daddr)))== (__cookie)) && \
((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
(((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \
((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
(!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
#else /* 32-bit arch */
#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
(((__sk)->daddr == (__saddr)) && \
((__sk)->rcv_saddr == (__daddr)) && \
((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
((inet_sk(__sk)->daddr == (__saddr)) && \
(inet_sk(__sk)->rcv_saddr == (__daddr)) && \
((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
(!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
#endif /* 64-bit arch */
#define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
(((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
(((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
((__sk)->family == AF_INET6) && \
!ipv6_addr_cmp(&inet6_sk(__sk)->daddr, (__saddr)) && \
!ipv6_addr_cmp(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
......@@ -263,7 +266,7 @@ static __inline__ int tcp_lhashfn(unsigned short num)
static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
{
return tcp_lhashfn(sk->num);
return tcp_lhashfn(inet_sk(sk)->num);
}
#define MAX_TCP_HEADER (128 + MAX_HEADER)
......
......@@ -23,6 +23,7 @@
#define _UDP_H
#include <linux/udp.h>
#include <linux/ip.h>
#include <net/sock.h>
#define UDP_HTABLE_SIZE 128
......@@ -41,7 +42,7 @@ static inline int udp_lport_inuse(u16 num)
struct sock *sk = udp_hash[num & (UDP_HTABLE_SIZE - 1)];
for(; sk != NULL; sk = sk->next) {
if(sk->num == num)
if (inet_sk(sk)->num == num)
return 1;
}
return 0;
......
This diff is collapsed.
......@@ -30,14 +30,48 @@ I'll bet they might prove useful again... --Ben
extern unsigned short vlan_name_type;
/* Counter for how many NON-VLAN protos we've received on a VLAN. */
extern unsigned long vlan_bad_proto_recvd;
int vlan_ioctl_handler(unsigned long arg);
/* Add some headers for the public VLAN methods. */
int unregister_802_1Q_vlan_device(const char* vlan_IF_name);
struct net_device *register_802_1Q_vlan_device(const char* eth_IF_name,
unsigned short VID);
#define VLAN_GRP_HASH_SHIFT 5
#define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT)
#define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1)
extern struct vlan_group *vlan_group_hash[VLAN_GRP_HASH_SIZE];
extern spinlock_t vlan_group_lock;
/* Find a VLAN device by the MAC address of it's Ethernet device, and
* it's VLAN ID. The default configuration is to have VLAN's scope
* to be box-wide, so the MAC will be ignored. The mac will only be
* looked at if we are configured to have a seperate set of VLANs per
* each MAC addressable interface. Note that this latter option does
* NOT follow the spec for VLANs, but may be useful for doing very
* large quantities of VLAN MUX/DEMUX onto FrameRelay or ATM PVCs.
*
* Must be invoked with vlan_group_lock held and that lock MUST NOT
* be dropped until a reference is obtained on the returned device.
* You may drop the lock earlier if you are running under the RTNL
* semaphore, however.
*/
struct net_device *__find_vlan_dev(struct net_device* real_dev,
unsigned short VID); /* vlan.c */
/* found in vlan_dev.c */
int vlan_dev_rebuild_header(struct sk_buff *skb);
int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type* ptype);
int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, void *daddr, void *saddr,
unsigned len);
int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
int vlan_dev_change_mtu(struct net_device *dev, int new_mtu);
int vlan_dev_set_mac_address(struct net_device *dev, void* addr);
int vlan_dev_open(struct net_device* dev);
int vlan_dev_stop(struct net_device* dev);
int vlan_dev_init(struct net_device* dev);
void vlan_dev_destruct(struct net_device* dev);
int vlan_dev_set_ingress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
int vlan_dev_set_egress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
int vlan_dev_set_vlan_flag(char* dev_name, __u32 flag, short flag_val);
void vlan_dev_set_multicast_list(struct net_device *vlan_dev);
#endif /* !(__BEN_VLAN_802_1Q_INC__) */
This diff is collapsed.
......@@ -272,7 +272,7 @@ static int vlan_proc_get_vlan_info(char* buf, unsigned int cnt)
{
struct net_device *vlandev = NULL;
struct vlan_group *grp = NULL;
int i = 0;
int h, i;
char *nm_type = NULL;
struct vlan_dev_info *dev_info = NULL;
......@@ -292,46 +292,34 @@ static int vlan_proc_get_vlan_info(char* buf, unsigned int cnt)
nm_type = "UNKNOWN";
}
cnt += sprintf(buf + cnt, "Name-Type: %s bad_proto_recvd: %lu\n",
nm_type, vlan_bad_proto_recvd);
cnt += sprintf(buf + cnt, "Name-Type: %s\n", nm_type);
for (grp = p802_1Q_vlan_list; grp != NULL; grp = grp->next) {
/* loop through all devices for this device */
#ifdef VLAN_DEBUG
printk(VLAN_DBG __FUNCTION__ ": found a group, addr: %p\n",grp);
#endif
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = grp->vlan_devices[i];
if (!vlandev)
continue;
#ifdef VLAN_DEBUG
printk(VLAN_DBG __FUNCTION__
": found a vlan_dev, addr: %p\n", vlandev);
#endif
if ((cnt + 100) > VLAN_PROC_BUFSZ) {
if ((cnt+strlen(term_msg)) < VLAN_PROC_BUFSZ)
cnt += sprintf(buf+cnt, "%s", term_msg);
spin_lock_bh(&vlan_group_lock);
for (h = 0; h < VLAN_GRP_HASH_SIZE; h++) {
for (grp = vlan_group_hash[h]; grp != NULL; grp = grp->next) {
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = grp->vlan_devices[i];
if (!vlandev)
continue;
return cnt;
}
if (!vlandev->priv) {
printk(KERN_ERR __FUNCTION__
": ERROR: vlandev->priv is NULL\n");
continue;
}
if ((cnt + 100) > VLAN_PROC_BUFSZ) {
if ((cnt+strlen(term_msg)) < VLAN_PROC_BUFSZ)
cnt += sprintf(buf+cnt, "%s", term_msg);
dev_info = VLAN_DEV_INFO(vlandev);
goto out;
}
#ifdef VLAN_DEBUG
printk(VLAN_DBG __FUNCTION__
": got a good vlandev, addr: %p\n",
VLAN_DEV_INFO(vlandev));
#endif
cnt += sprintf(buf + cnt, "%-15s| %d | %s\n",
vlandev->name, dev_info->vlan_id,
dev_info->real_dev->name);
dev_info = VLAN_DEV_INFO(vlandev);
cnt += sprintf(buf + cnt, "%-15s| %d | %s\n",
vlandev->name,
dev_info->vlan_id,
dev_info->real_dev->name);
}
}
}
out:
spin_unlock_bh(&vlan_group_lock);
return cnt;
}
......@@ -365,11 +353,7 @@ static int vlandev_get_info(char *buf, char **start,
int cnt = 0;
int i;
#ifdef VLAN_DEBUG
printk(VLAN_DBG __FUNCTION__ ": vlandev: %p\n", vlandev);
#endif
if ((vlandev == NULL) || (!vlandev->priv_flags & IFF_802_1Q_VLAN))
if ((vlandev == NULL) || (!(vlandev->priv_flags & IFF_802_1Q_VLAN)))
return 0;
dev_info = VLAN_DEV_INFO(vlandev);
......@@ -426,7 +410,7 @@ static int vlandev_get_info(char *buf, char **start,
cnt += sprintf(buf + cnt, "EGRESSS priority Mappings: ");
for (i = 0; i<16; i++) {
for (i = 0; i < 16; i++) {
mp = dev_info->egress_priority_map[i];
while (mp) {
cnt += sprintf(buf + cnt, "%lu:%hu ",
......
......@@ -44,10 +44,8 @@ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
tristate ' Multi-Protocol Over ATM (MPOA) support' CONFIG_ATM_MPOA
fi
fi
dep_tristate '802.1Q VLAN Support (EXPERIMENTAL)' CONFIG_VLAN_8021Q $CONFIG_EXPERIMENTAL
fi
tristate '802.1Q VLAN Support' CONFIG_VLAN_8021Q
comment ' '
tristate 'The IPX protocol' CONFIG_IPX
......
......@@ -554,7 +554,7 @@ static int econet_create(struct socket *sock, int protocol)
memset(eo, 0, sizeof(*eo));
sk->zapped=0;
sk->family = PF_ECONET;
sk->num = protocol;
eo->num = protocol;
sklist_insert_socket(&econet_sklist, sk);
return(0);
......
......@@ -270,14 +270,15 @@ int inet_getsockopt(struct socket *sock, int level, int optname,
static int inet_autobind(struct sock *sk)
{
struct inet_opt *inet = inet_sk(sk);
/* We may need to bind the socket. */
lock_sock(sk);
if (sk->num == 0) {
if (!inet->num) {
if (sk->prot->get_port(sk, 0) != 0) {
release_sock(sk);
return -EAGAIN;
}
sk->sport = htons(sk->num);
inet->sport = htons(inet->num);
}
release_sock(sk);
return 0;
......@@ -397,7 +398,7 @@ static int inet_create(struct socket *sock, int protocol)
inet = inet_sk(sk);
if (SOCK_RAW == sock->type) {
sk->num = protocol;
inet->num = protocol;
if (IPPROTO_RAW == protocol)
inet->hdrincl = 1;
}
......@@ -430,13 +431,13 @@ static int inet_create(struct socket *sock, int protocol)
atomic_inc(&inet_sock_nr);
#endif
if (sk->num) {
if (inet->num) {
/* It assumes that any protocol which allows
* the user to assign a number at socket
* creation time automatically
* shares.
*/
sk->sport = htons(sk->num);
inet->sport = htons(inet->num);
/* Add to protocol hash chains. */
sk->prot->hash(sk);
......@@ -551,28 +552,27 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* Check these errors (active socket, double bind). */
err = -EINVAL;
if ((sk->state != TCP_CLOSE) ||
(sk->num != 0))
if (sk->state != TCP_CLOSE || inet->num)
goto out;
sk->rcv_saddr = sk->saddr = addr->sin_addr.s_addr;
inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
sk->saddr = 0; /* Use device */
inet->saddr = 0; /* Use device */
/* Make sure we are allowed to bind here. */
if (sk->prot->get_port(sk, snum) != 0) {
sk->saddr = sk->rcv_saddr = 0;
inet->saddr = inet->rcv_saddr = 0;
err = -EADDRINUSE;
goto out;
}
if (sk->rcv_saddr)
if (inet->rcv_saddr)
sk->userlocks |= SOCK_BINDADDR_LOCK;
if (snum)
sk->userlocks |= SOCK_BINDPORT_LOCK;
sk->sport = htons(sk->num);
sk->daddr = 0;
sk->dport = 0;
inet->sport = htons(inet->num);
inet->daddr = 0;
inet->dport = 0;
sk_dst_reset(sk);
err = 0;
out:
......@@ -588,7 +588,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
if (uaddr->sa_family == AF_UNSPEC)
return sk->prot->disconnect(sk, flags);
if (sk->num==0 && inet_autobind(sk) != 0)
if (!inet_sk(sk)->num && inet_autobind(sk))
return -EAGAIN;
return sk->prot->connect(sk, (struct sockaddr *)uaddr, addr_len);
}
......@@ -627,6 +627,7 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
int addr_len, int flags)
{
struct sock *sk=sock->sk;
struct inet_opt *inet = inet_sk(sk);
int err;
long timeo;
......@@ -655,10 +656,10 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
goto out;
err = -EAGAIN;
if (sk->num == 0) {
if (!inet->num) {
if (sk->prot->get_port(sk, 0) != 0)
goto out;
sk->sport = htons(sk->num);
inet->sport = htons(inet->num);
}
err = sk->prot->connect(sk, uaddr, addr_len);
......@@ -748,21 +749,22 @@ static int inet_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sock *sk = sock->sk;
struct inet_opt *inet = inet_sk(sk);
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
sin->sin_family = AF_INET;
if (peer) {
if (!sk->dport)
if (!inet->dport)
return -ENOTCONN;
if (((1<<sk->state)&(TCPF_CLOSE|TCPF_SYN_SENT)) && peer == 1)
return -ENOTCONN;
sin->sin_port = sk->dport;
sin->sin_addr.s_addr = sk->daddr;
sin->sin_port = inet->dport;
sin->sin_addr.s_addr = inet->daddr;
} else {
__u32 addr = sk->rcv_saddr;
__u32 addr = inet->rcv_saddr;
if (!addr)
addr = sk->saddr;
sin->sin_port = sk->sport;
addr = inet->saddr;
sin->sin_port = inet->sport;
sin->sin_addr.s_addr = addr;
}
*uaddr_len = sizeof(*sin);
......@@ -792,7 +794,7 @@ int inet_sendmsg(struct socket *sock, struct msghdr *msg, int size,
struct sock *sk = sock->sk;
/* We may need to bind the socket. */
if (sk->num==0 && inet_autobind(sk) != 0)
if (!inet_sk(sk)->num && inet_autobind(sk))
return -EAGAIN;
return sk->prot->sendmsg(sk, msg, size);
......
......@@ -166,7 +166,7 @@ int ip_call_ra_chain(struct sk_buff *skb)
/* If socket is bound to an interface, only report
* the packet if it came from that interface.
*/
if (sk && sk->num == protocol
if (sk && inet_sk(sk)->num == protocol
&& ((sk->bound_dev_if == 0)
|| (sk->bound_dev_if == skb->dev->ifindex))) {
if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
......
......@@ -135,9 +135,10 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
iph->version = 4;
iph->ihl = 5;
iph->tos = inet->tos;
iph->frag_off = 0;
if (ip_dont_fragment(sk, &rt->u.dst))
iph->frag_off |= htons(IP_DF);
iph->frag_off = __constant_htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = inet->ttl;
iph->daddr = rt->rt_dst;
iph->saddr = rt->rt_src;
......@@ -308,9 +309,6 @@ static inline int ip_queue_xmit2(struct sk_buff *skb)
if (skb->len > rt->u.dst.pmtu)
goto fragment;
if (ip_dont_fragment(sk, &rt->u.dst))
iph->frag_off |= __constant_htons(IP_DF);
ip_select_ident(iph, &rt->u.dst, sk);
/* Add an IP checksum. */
......@@ -324,7 +322,6 @@ static inline int ip_queue_xmit2(struct sk_buff *skb)
/* Reject packet ONLY if TCP might fragment
* it itself, if were careful enough.
*/
iph->frag_off |= __constant_htons(IP_DF);
NETDEBUG(printk(KERN_DEBUG "sending pkt_too_big to self\n"));
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
......@@ -360,7 +357,7 @@ int ip_queue_xmit(struct sk_buff *skb)
u32 daddr;
/* Use correct destination address if we have options. */
daddr = sk->daddr;
daddr = inet->daddr;
if(opt && opt->srr)
daddr = opt->faddr;
......@@ -368,7 +365,7 @@ int ip_queue_xmit(struct sk_buff *skb)
* keep trying until route appears or the connection times itself
* out.
*/
if (ip_route_output(&rt, daddr, sk->saddr,
if (ip_route_output(&rt, daddr, inet->saddr,
RT_CONN_FLAGS(sk),
sk->bound_dev_if))
goto no_route;
......@@ -385,7 +382,10 @@ int ip_queue_xmit(struct sk_buff *skb)
iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
*((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
iph->tot_len = htons(skb->len);
iph->frag_off = 0;
if (ip_dont_fragment(sk, &rt->u.dst))
iph->frag_off = __constant_htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = inet->ttl;
iph->protocol = sk->protocol;
iph->saddr = rt->rt_src;
......@@ -395,7 +395,7 @@ int ip_queue_xmit(struct sk_buff *skb)
if(opt && opt->optlen) {
iph->ihl += opt->optlen >> 2;
ip_options_build(skb, opt, sk->daddr, rt, 0);
ip_options_build(skb, opt, inet->daddr, rt, 0);
}
return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
......@@ -452,7 +452,7 @@ static int ip_build_xmit_slow(struct sock *sk,
mtu = rt->u.dst.pmtu;
if (ip_dont_fragment(sk, &rt->u.dst))
df = htons(IP_DF);
df = __constant_htons(IP_DF);
length -= sizeof(struct iphdr);
......@@ -471,7 +471,7 @@ static int ip_build_xmit_slow(struct sock *sk,
}
if (length + fragheaderlen > 0xFFFF) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, mtu);
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
return -EMSGSIZE;
}
......@@ -503,7 +503,7 @@ static int ip_build_xmit_slow(struct sock *sk,
*/
if (offset > 0 && inet->pmtudisc == IP_PMTUDISC_DO) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, mtu);
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
return -EMSGSIZE;
}
if (flags&MSG_PROBE)
......@@ -573,7 +573,7 @@ static int ip_build_xmit_slow(struct sock *sk,
/*
* Any further fragments will have MF set.
*/
mf = htons(IP_MF);
mf = __constant_htons(IP_MF);
}
if (rt->rt_type == RTN_MULTICAST)
iph->ttl = inet->mc_ttl;
......@@ -659,7 +659,8 @@ int ip_build_xmit(struct sock *sk,
return ip_build_xmit_slow(sk,getfrag,frag,length,ipc,rt,flags);
} else {
if (length > rt->u.dst.dev->mtu) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, rt->u.dst.dev->mtu);
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport,
rt->u.dst.dev->mtu);
return -EMSGSIZE;
}
}
......@@ -671,7 +672,7 @@ int ip_build_xmit(struct sock *sk,
*/
df = 0;
if (ip_dont_fragment(sk, &rt->u.dst))
df = htons(IP_DF);
df = __constant_htons(IP_DF);
/*
* Fast path for unfragmented frames without options.
......@@ -775,7 +776,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
*/
offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
not_last_frag = iph->frag_off & htons(IP_MF);
not_last_frag = iph->frag_off & __constant_htons(IP_MF);
/*
* Keep copying data until we run out.
......@@ -860,7 +861,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
* last fragment then keep MF on each bit
*/
if (left > 0 || not_last_frag)
iph->frag_off |= htons(IP_MF);
iph->frag_off |= __constant_htons(IP_MF);
ptr += len;
offset += len;
......
......@@ -193,7 +193,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct s
{
struct ip_ra_chain *ra, *new_ra, **rap;
if (sk->type != SOCK_RAW || sk->num == IPPROTO_RAW)
if (sk->type != SOCK_RAW || inet_sk(sk)->num == IPPROTO_RAW)
return -EINVAL;
new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
......@@ -435,7 +435,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int opt
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (sk->family == PF_INET ||
(!((1<<sk->state)&(TCPF_LISTEN|TCPF_CLOSE))
&& sk->daddr != LOOPBACK4_IPV6)) {
&& inet->daddr != LOOPBACK4_IPV6)) {
#endif
if (opt)
tp->ext_header_len = opt->optlen;
......@@ -771,8 +771,8 @@ int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *op
if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
struct in_pktinfo info;
info.ipi_addr.s_addr = sk->rcv_saddr;
info.ipi_spec_dst.s_addr = sk->rcv_saddr;
info.ipi_addr.s_addr = inet->rcv_saddr;
info.ipi_spec_dst.s_addr = inet->rcv_saddr;
info.ipi_ifindex = inet->mc_index;
put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
}
......
......@@ -75,9 +75,7 @@ if [ "$CONFIG_IP_NF_IPTABLES" != "n" ]; then
dep_tristate ' MARK target support' CONFIG_IP_NF_TARGET_MARK $CONFIG_IP_NF_MANGLE
fi
dep_tristate ' LOG target support' CONFIG_IP_NF_TARGET_LOG $CONFIG_IP_NF_IPTABLES
if [ "$CONFIG_NETLINK" != "n" ]; then
dep_tristate ' ULOG target support' CONFIG_IP_NF_TARGET_ULOG $CONFIG_NETLINK $CONFIG_IP_NF_IPTABLES
fi
dep_tristate ' ULOG target support' CONFIG_IP_NF_TARGET_ULOG $CONFIG_IP_NF_IPTABLES
dep_tristate ' TCPMSS target support' CONFIG_IP_NF_TARGET_TCPMSS $CONFIG_IP_NF_IPTABLES
fi
......
......@@ -31,15 +31,13 @@ ipchains-objs := $(ip_nf_compat-objs) ipchains_core.o
# connection tracking
obj-$(CONFIG_IP_NF_CONNTRACK) += ip_conntrack.o
# IRC support
obj-$(CONFIG_IP_NF_IRC) += ip_conntrack_irc.o
obj-$(CONFIG_IP_NF_NAT_IRC) += ip_nat_irc.o
# connection tracking helpers
obj-$(CONFIG_IP_NF_FTP) += ip_conntrack_ftp.o
obj-$(CONFIG_IP_NF_IRC) += ip_conntrack_irc.o
# NAT helpers
obj-$(CONFIG_IP_NF_NAT_FTP) += ip_nat_ftp.o
obj-$(CONFIG_IP_NF_NAT_IRC) += ip_nat_irc.o
# generic IP tables
obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
......
......@@ -969,9 +969,12 @@ ip_ct_selective_cleanup(int (*kill)(const struct ip_conntrack *i, void *data),
static int
getorigdst(struct sock *sk, int optval, void *user, int *len)
{
struct inet_opt *inet = inet_sk(sk);
struct ip_conntrack_tuple_hash *h;
struct ip_conntrack_tuple tuple = { { sk->rcv_saddr, { sk->sport } },
{ sk->daddr, { sk->dport },
struct ip_conntrack_tuple tuple = { { inet->rcv_saddr,
{ inet->sport } },
{ inet->daddr,
{ inet->dport },
IPPROTO_TCP } };
/* We only do TCP at the moment: is there a better way? */
......
......@@ -308,6 +308,8 @@ module_init(init);
module_exit(fini);
EXPORT_SYMBOL(ip_nat_setup_info);
EXPORT_SYMBOL(ip_nat_protocol_register);
EXPORT_SYMBOL(ip_nat_protocol_unregister);
EXPORT_SYMBOL(ip_nat_helper_register);
EXPORT_SYMBOL(ip_nat_helper_unregister);
EXPORT_SYMBOL(ip_nat_expect_register);
......@@ -316,4 +318,5 @@ EXPORT_SYMBOL(ip_nat_cheat_check);
EXPORT_SYMBOL(ip_nat_mangle_tcp_packet);
EXPORT_SYMBOL(ip_nat_seq_adjust);
EXPORT_SYMBOL(ip_nat_delete_sack);
EXPORT_SYMBOL(ip_nat_used_tuple);
MODULE_LICENSE("GPL");
......@@ -234,11 +234,8 @@ static void send_unreach(struct sk_buff *skb_in, int code)
iph->tos=tos;
iph->tot_len = htons(length);
/* This abbreviates icmp->send->ip_build_xmit->ip_dont_fragment */
if (!ipv4_config.no_pmtu_disc
&& !(rt->u.dst.mxlock&(1<<RTAX_MTU)))
iph->frag_off = htons(IP_DF);
else iph->frag_off = 0;
/* PMTU discovery never applies to ICMP packets. */
iph->frag_off = 0;
iph->ttl = MAXTTL;
ip_select_ident(iph, &rt->u.dst, NULL);
......
......@@ -70,7 +70,8 @@ rwlock_t raw_v4_lock = RW_LOCK_UNLOCKED;
static void raw_v4_hash(struct sock *sk)
{
struct sock **skp = &raw_v4_htable[sk->num & (RAWV4_HTABLE_SIZE - 1)];
struct sock **skp = &raw_v4_htable[inet_sk(sk)->num &
(RAWV4_HTABLE_SIZE - 1)];
write_lock_bh(&raw_v4_lock);
if ((sk->next = *skp) != NULL)
......@@ -103,9 +104,11 @@ struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num,
struct sock *s = sk;
for (s = sk; s; s = s->next) {
if (s->num == num &&
!(s->daddr && s->daddr != raddr) &&
!(s->rcv_saddr && s->rcv_saddr != laddr) &&
struct inet_opt *inet = inet_sk(s);
if (inet->num == num &&
!(inet->daddr && inet->daddr != raddr) &&
!(inet->rcv_saddr && inet->rcv_saddr != laddr) &&
!(s->bound_dev_if && s->bound_dev_if != dif))
break; /* gotcha */
}
......@@ -364,10 +367,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, int len)
err = -EINVAL;
if (sk->state != TCP_ESTABLISHED)
goto out;
daddr = sk->daddr;
daddr = inet->daddr;
}
ipc.addr = sk->saddr;
ipc.addr = inet->saddr;
ipc.opt = NULL;
ipc.oif = sk->bound_dev_if;
......@@ -458,6 +461,7 @@ static void raw_close(struct sock *sk, long timeout)
/* This gets rid of all the nasties in af_inet. -DaveM */
static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_opt *inet = inet_sk(sk);
struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
int ret = -EINVAL;
int chk_addr_ret;
......@@ -469,9 +473,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
goto out;
sk->rcv_saddr = sk->saddr = addr->sin_addr.s_addr;
inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
sk->saddr = 0; /* Use device */
inet->saddr = 0; /* Use device */
sk_dst_reset(sk);
ret = 0;
out: return ret;
......@@ -534,7 +538,7 @@ out: return err ? : copied;
static int raw_init(struct sock *sk)
{
struct raw_opt *tp = raw4_sk(sk);
if (sk->num == IPPROTO_ICMP)
if (inet_sk(sk)->num == IPPROTO_ICMP)
memset(&tp->filter, 0, sizeof(tp->filter));
return 0;
}
......@@ -574,7 +578,7 @@ static int raw_setsockopt(struct sock *sk, int level, int optname,
return ip_setsockopt(sk, level, optname, optval, optlen);
if (optname == ICMP_FILTER) {
if (sk->num != IPPROTO_ICMP)
if (inet_sk(sk)->num != IPPROTO_ICMP)
return -EOPNOTSUPP;
else
return raw_seticmpfilter(sk, optval, optlen);
......@@ -589,7 +593,7 @@ static int raw_getsockopt(struct sock *sk, int level, int optname,
return ip_getsockopt(sk, level, optname, optval, optlen);
if (optname == ICMP_FILTER) {
if (sk->num != IPPROTO_ICMP)
if (inet_sk(sk)->num != IPPROTO_ICMP)
return -EOPNOTSUPP;
else
return raw_geticmpfilter(sk, optval, optlen);
......@@ -627,13 +631,14 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
static void get_raw_sock(struct sock *sp, char *tmpbuf, int i)
{
unsigned int dest = sp->daddr,
src = sp->rcv_saddr;
struct inet_opt *inet = inet_sk(sp);
unsigned int dest = inet->daddr,
src = inet->rcv_saddr;
__u16 destp = 0,
srcp = sp->num;
srcp = inet->num;
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %ld %d %p",
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
i, src, srcp, dest, destp, sp->state,
atomic_read(&sp->wmem_alloc), atomic_read(&sp->rmem_alloc),
0, 0L, 0,
......
......@@ -524,6 +524,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
int tcp_listen_start(struct sock *sk)
{
struct inet_opt *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_listen_opt *lopt;
......@@ -552,8 +553,8 @@ int tcp_listen_start(struct sock *sk)
* after validation is complete.
*/
sk->state = TCP_LISTEN;
if (sk->prot->get_port(sk, sk->num) == 0) {
sk->sport = htons(sk->num);
if (!sk->prot->get_port(sk, inet->num)) {
inet->sport = htons(inet->num);
sk_dst_reset(sk);
sk->prot->hash(sk);
......@@ -1786,8 +1787,8 @@ void tcp_destroy_sock(struct sock *sk)
/* It cannot be in hash table! */
BUG_TRAP(sk->pprev==NULL);
/* If it has not 0 sk->num, it must be bound */
BUG_TRAP(!sk->num || sk->prev!=NULL);
/* If it has not 0 inet_sk(sk)->num, it must be bound */
BUG_TRAP(!inet_sk(sk)->num || sk->prev);
#ifdef TCP_DEBUG
if (sk->zapped) {
......@@ -1988,6 +1989,7 @@ extern __inline__ int tcp_need_reset(int state)
int tcp_disconnect(struct sock *sk, int flags)
{
struct inet_opt *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
int old_state;
int err = 0;
......@@ -2015,11 +2017,10 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_writequeue_purge(sk);
__skb_queue_purge(&tp->out_of_order_queue);
sk->dport = 0;
inet->dport = 0;
if (!(sk->userlocks&SOCK_BINDADDR_LOCK)) {
sk->rcv_saddr = 0;
sk->saddr = 0;
inet->rcv_saddr = inet->saddr = 0;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (sk->family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
......@@ -2049,7 +2050,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_sack_reset(tp);
__sk_dst_reset(sk);
BUG_TRAP(!sk->num || sk->prev);
BUG_TRAP(!inet->num || sk->prev);
sk->error_report(sk);
return err;
......
......@@ -44,6 +44,7 @@ static struct sock *tcpnl;
static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
int ext, u32 pid, u32 seq)
{
struct inet_opt *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct tcpdiagmsg *r;
struct nlmsghdr *nlh;
......@@ -64,10 +65,6 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
r->tcpdiag_timer = 0;
r->tcpdiag_retrans = 0;
r->id.tcpdiag_sport = sk->sport;
r->id.tcpdiag_dport = sk->dport;
r->id.tcpdiag_src[0] = sk->rcv_saddr;
r->id.tcpdiag_dst[0] = sk->daddr;
r->id.tcpdiag_if = sk->bound_dev_if;
*((struct sock **)&r->id.tcpdiag_cookie) = sk;
......@@ -77,6 +74,10 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
if (tmo < 0)
tmo = 0;
r->id.tcpdiag_sport = tw->sport;
r->id.tcpdiag_dport = tw->dport;
r->id.tcpdiag_src[0] = tw->rcv_saddr;
r->id.tcpdiag_dst[0] = tw->daddr;
r->tcpdiag_state = tw->substate;
r->tcpdiag_timer = 3;
r->tcpdiag_expires = (tmo*1000+HZ-1)/HZ;
......@@ -94,6 +95,11 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
return skb->len;
}
r->id.tcpdiag_sport = inet->sport;
r->id.tcpdiag_dport = inet->dport;
r->id.tcpdiag_src[0] = inet->rcv_saddr;
r->id.tcpdiag_dst[0] = inet->daddr;
#ifdef CONFIG_IPV6
if (r->tcpdiag_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
......@@ -291,6 +297,7 @@ int tcpdiag_bc_run(char *bc, int len, struct sock *sk)
{
while (len > 0) {
int yes = 1;
struct inet_opt *inet = inet_sk(sk);
struct tcpdiag_bc_op *op = (struct tcpdiag_bc_op*)bc;
switch (op->code) {
......@@ -300,16 +307,16 @@ int tcpdiag_bc_run(char *bc, int len, struct sock *sk)
yes = 0;
break;
case TCPDIAG_BC_S_GE:
yes = (sk->num >= op[1].no);
yes = inet->num >= op[1].no;
break;
case TCPDIAG_BC_S_LE:
yes = (sk->num <= op[1].no);
yes = inet->num <= op[1].no;
break;
case TCPDIAG_BC_D_GE:
yes = (ntohs(sk->dport) >= op[1].no);
yes = ntohs(inet->dport) >= op[1].no;
break;
case TCPDIAG_BC_D_LE:
yes = (ntohs(sk->dport) <= op[1].no);
yes = ntohs(inet->dport) <= op[1].no;
break;
case TCPDIAG_BC_AUTO:
yes = !(sk->userlocks&SOCK_BINDPORT_LOCK);
......@@ -321,7 +328,8 @@ int tcpdiag_bc_run(char *bc, int len, struct sock *sk)
u32 *addr;
if (cond->port != -1 &&
cond->port != (op->code == TCPDIAG_BC_S_COND ? sk->num : ntohs(sk->dport))) {
cond->port != (op->code == TCPDIAG_BC_S_COND ?
inet->num : ntohs(inet->dport))) {
yes = 0;
break;
}
......@@ -341,9 +349,9 @@ int tcpdiag_bc_run(char *bc, int len, struct sock *sk)
#endif
{
if (op->code == TCPDIAG_BC_S_COND)
addr = &sk->rcv_saddr;
addr = &inet->rcv_saddr;
else
addr = &sk->daddr;
addr = &inet->daddr;
}
if (bitstring_match(addr, cond->addr, cond->prefix_len))
......@@ -453,12 +461,14 @@ int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
for (sk = tcp_listening_hash[i], num = 0;
sk != NULL;
sk = sk->next, num++) {
struct inet_opt *inet = inet_sk(sk);
if (num < s_num)
continue;
if (!(r->tcpdiag_states&TCPF_LISTEN) ||
r->id.tcpdiag_dport)
continue;
if (r->id.tcpdiag_sport != sk->sport && r->id.tcpdiag_sport)
if (r->id.tcpdiag_sport != inet->sport &&
r->id.tcpdiag_sport)
continue;
if (bc && !tcpdiag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), sk))
continue;
......@@ -491,13 +501,16 @@ int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
for (sk = head->chain, num = 0;
sk != NULL;
sk = sk->next, num++) {
struct inet_opt *inet = inet_sk(sk);
if (num < s_num)
continue;
if (!(r->tcpdiag_states&(1<<sk->state)))
continue;
if (r->id.tcpdiag_sport != sk->sport && r->id.tcpdiag_sport)
if (r->id.tcpdiag_sport != inet->sport &&
r->id.tcpdiag_sport)
continue;
if (r->id.tcpdiag_dport != sk->dport && r->id.tcpdiag_dport)
if (r->id.tcpdiag_dport != inet->dport && r->id.tcpdiag_dport)
continue;
if (bc && !tcpdiag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), sk))
continue;
......@@ -513,13 +526,17 @@ int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
for (sk = tcp_ehash[i+tcp_ehash_size].chain;
sk != NULL;
sk = sk->next, num++) {
struct inet_opt *inet = inet_sk(sk);
if (num < s_num)
continue;
if (!(r->tcpdiag_states&(1<<sk->zapped)))
continue;
if (r->id.tcpdiag_sport != sk->sport && r->id.tcpdiag_sport)
if (r->id.tcpdiag_sport != inet->sport &&
r->id.tcpdiag_sport)
continue;
if (r->id.tcpdiag_dport != sk->dport && r->id.tcpdiag_dport)
if (r->id.tcpdiag_dport != inet->dport &&
r->id.tcpdiag_dport)
continue;
if (bc && !tcpdiag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), sk))
continue;
......
......@@ -1329,9 +1329,10 @@ static __inline__ int tcp_packet_delayed(struct tcp_opt *tp)
#if FASTRETRANS_DEBUG > 1
static void DBGUNDO(struct sock *sk, struct tcp_opt *tp, const char *msg)
{
struct inet_opt *inet = inet_sk(sk);
printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n",
msg,
NIPQUAD(sk->daddr), ntohs(sk->dport),
NIPQUAD(inet->daddr), ntohs(inet->dport),
tp->snd_cwnd, tp->left_out,
tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out);
}
......@@ -2570,15 +2571,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
__set_current_state(TASK_RUNNING);
local_bh_enable();
if (skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov,
chunk)) {
sk->err = EFAULT;
sk->error_report(sk);
if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
eaten = (chunk == skb->len && !th->fin);
}
local_bh_disable();
tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
eaten = (chunk == skb->len && !th->fin);
}
if (eaten <= 0) {
......@@ -3178,17 +3176,8 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
tp->ucopy.iov);
if (!err) {
update:
tp->ucopy.len -= chunk;
tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
local_bh_disable();
return 0;
}
if (err == -EFAULT) {
sk->err = EFAULT;
sk->error_report(sk);
goto update;
}
local_bh_disable();
......@@ -3327,19 +3316,16 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tp->copied_seq == tp->rcv_nxt &&
len - tcp_header_len <= tp->ucopy.len &&
sk->lock.users) {
eaten = 1;
NET_INC_STATS_BH(TCPHPHitsToUser);
__set_current_state(TASK_RUNNING);
if (tcp_copy_to_iovec(sk, skb, tcp_header_len))
goto csum_error;
__skb_pull(skb,tcp_header_len);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
} else {
if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) {
__skb_pull(skb, tcp_header_len);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
NET_INC_STATS_BH(TCPHPHitsToUser);
eaten = 1;
}
}
if (!eaten) {
if (tcp_checksum_complete_user(sk, skb))
goto csum_error;
......
This diff is collapsed.
......@@ -75,17 +75,16 @@ void tcp_timewait_kill(struct tcp_tw_bucket *tw)
/* Disassociate with bind bucket. */
bhead = &tcp_bhash[tcp_bhashfn(tw->num)];
spin_lock(&bhead->lock);
if ((tb = tw->tb) != NULL) {
if(tw->bind_next)
tw->bind_next->bind_pprev = tw->bind_pprev;
*(tw->bind_pprev) = tw->bind_next;
tw->tb = NULL;
if (tb->owners == NULL) {
if (tb->next)
tb->next->pprev = tb->pprev;
*(tb->pprev) = tb->next;
kmem_cache_free(tcp_bucket_cachep, tb);
}
tb = tw->tb;
if(tw->bind_next)
tw->bind_next->bind_pprev = tw->bind_pprev;
*(tw->bind_pprev) = tw->bind_next;
tw->tb = NULL;
if (tb->owners == NULL) {
if (tb->next)
tb->next->pprev = tb->pprev;
*(tb->pprev) = tb->next;
kmem_cache_free(tcp_bucket_cachep, tb);
}
spin_unlock(&bhead->lock);
......@@ -304,9 +303,23 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
struct tcp_bind_hashbucket *bhead;
struct sock **head, *sktw;
/* Step 1: Put TW into bind hash. Original socket stays there too.
Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in
binding cache, even if it is closed.
*/
bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
spin_lock(&bhead->lock);
tw->tb = (struct tcp_bind_bucket *)sk->prev;
BUG_TRAP(sk->prev!=NULL);
if ((tw->bind_next = tw->tb->owners) != NULL)
tw->tb->owners->bind_pprev = &tw->bind_next;
tw->tb->owners = (struct sock*)tw;
tw->bind_pprev = &tw->tb->owners;
spin_unlock(&bhead->lock);
write_lock(&ehead->lock);
/* Step 1: Remove SK from established hash. */
/* Step 2: Remove SK from established hash. */
if (sk->pprev) {
if(sk->next)
sk->next->pprev = sk->pprev;
......@@ -315,7 +328,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
sock_prot_dec_use(sk->prot);
}
/* Step 2: Hash TW into TIMEWAIT half of established hash table. */
/* Step 3: Hash TW into TIMEWAIT half of established hash table. */
head = &(ehead + tcp_ehash_size)->chain;
sktw = (struct sock *)tw;
if((sktw->next = *head) != NULL)
......@@ -325,20 +338,6 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
atomic_inc(&tw->refcnt);
write_unlock(&ehead->lock);
/* Step 3: Put TW into bind hash. Original socket stays there too.
Note, that any socket with sk->num!=0 MUST be bound in binding
cache, even if it is closed.
*/
bhead = &tcp_bhash[tcp_bhashfn(sk->num)];
spin_lock(&bhead->lock);
tw->tb = (struct tcp_bind_bucket *)sk->prev;
BUG_TRAP(sk->prev!=NULL);
if ((tw->bind_next = tw->tb->owners) != NULL)
tw->tb->owners->bind_pprev = &tw->bind_next;
tw->tb->owners = (struct sock*)tw;
tw->bind_pprev = &tw->tb->owners;
spin_unlock(&bhead->lock);
}
/*
......@@ -357,17 +356,18 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC);
if(tw != NULL) {
struct inet_opt *inet = inet_sk(sk);
int rto = (tp->rto<<2) - (tp->rto>>1);
/* Give us an identity. */
tw->daddr = sk->daddr;
tw->rcv_saddr = sk->rcv_saddr;
tw->daddr = inet->daddr;
tw->rcv_saddr = inet->rcv_saddr;
tw->bound_dev_if= sk->bound_dev_if;
tw->num = sk->num;
tw->num = inet->num;
tw->state = TCP_TIME_WAIT;
tw->substate = state;
tw->sport = sk->sport;
tw->dport = sk->dport;
tw->sport = inet->sport;
tw->dport = inet->dport;
tw->family = sk->family;
tw->reuse = sk->reuse;
tw->rcv_wscale = tp->rcv_wscale;
......@@ -660,7 +660,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
newsk->prev = NULL;
/* Clone the TCP header template */
newsk->dport = req->rmt_port;
inet_sk(newsk)->dport = req->rmt_port;
sock_lock_init(newsk);
bh_lock_sock(newsk);
......
......@@ -188,6 +188,7 @@ static __inline__ u16 tcp_select_window(struct sock *sk)
int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
{
if(skb != NULL) {
struct inet_opt *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
int tcp_header_size = tp->tcp_header_len;
......@@ -227,8 +228,8 @@ int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
skb_set_owner_w(skb, sk);
/* Build TCP header and checksum it. */
th->source = sk->sport;
th->dest = sk->dport;
th->source = inet->sport;
th->dest = inet->dport;
th->seq = htonl(tcb->seq);
th->ack_seq = htonl(tp->rcv_nxt);
*(((__u16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | tcb->flags);
......@@ -1120,7 +1121,7 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
th->syn = 1;
th->ack = 1;
TCP_ECN_make_synack(req, th);
th->source = sk->sport;
th->source = inet_sk(sk)->sport;
th->dest = req->rmt_port;
TCP_SKB_CB(skb)->seq = req->snt_isn;
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
......
......@@ -334,10 +334,12 @@ static void tcp_retransmit_timer(struct sock *sk)
* we cannot allow such beasts to hang infinitely.
*/
#ifdef TCP_DEBUG
if (net_ratelimit())
if (net_ratelimit()) {
struct inet_opt *inet = inet_sk(sk);
printk(KERN_DEBUG "TCP: Treason uncloaked! Peer %u.%u.%u.%u:%u/%u shrinks window %u:%u. Repaired.\n",
NIPQUAD(sk->daddr), htons(sk->dport), sk->num,
tp->snd_una, tp->snd_nxt);
NIPQUAD(inet->daddr), htons(inet->dport),
inet->num, tp->snd_una, tp->snd_nxt);
}
#endif
if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
tcp_write_err(sk);
......
......@@ -108,6 +108,8 @@ int udp_port_rover;
static int udp_v4_get_port(struct sock *sk, unsigned short snum)
{
struct inet_opt *inet = inet_sk(sk);
write_lock_bh(&udp_hash_lock);
if (snum == 0) {
int best_size_so_far, best, result, i;
......@@ -118,11 +120,11 @@ static int udp_v4_get_port(struct sock *sk, unsigned short snum)
best_size_so_far = 32767;
best = result = udp_port_rover;
for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
struct sock *sk;
struct sock *sk2;
int size;
sk = udp_hash[result & (UDP_HTABLE_SIZE - 1)];
if (!sk) {
sk2 = udp_hash[result & (UDP_HTABLE_SIZE - 1)];
if (!sk2) {
if (result > sysctl_local_port_range[1])
result = sysctl_local_port_range[0] +
((result - sysctl_local_port_range[0]) &
......@@ -133,7 +135,7 @@ static int udp_v4_get_port(struct sock *sk, unsigned short snum)
do {
if (++size >= best_size_so_far)
goto next;
} while ((sk = sk->next) != NULL);
} while ((sk2 = sk2->next) != NULL);
best_size_so_far = size;
best = result;
next:;
......@@ -157,17 +159,19 @@ static int udp_v4_get_port(struct sock *sk, unsigned short snum)
for (sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
sk2 != NULL;
sk2 = sk2->next) {
if (sk2->num == snum &&
struct inet_opt *inet2 = inet_sk(sk2);
if (inet2->num == snum &&
sk2 != sk &&
sk2->bound_dev_if == sk->bound_dev_if &&
(!sk2->rcv_saddr ||
!sk->rcv_saddr ||
sk2->rcv_saddr == sk->rcv_saddr) &&
(!inet2->rcv_saddr ||
!inet->rcv_saddr ||
inet2->rcv_saddr == inet->rcv_saddr) &&
(!sk2->reuse || !sk->reuse))
goto fail;
}
}
sk->num = snum;
inet->num = snum;
if (sk->pprev == NULL) {
struct sock **skp = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
if ((sk->next = *skp) != NULL)
......@@ -198,7 +202,7 @@ static void udp_v4_unhash(struct sock *sk)
sk->next->pprev = sk->pprev;
*sk->pprev = sk->next;
sk->pprev = NULL;
sk->num = 0;
inet_sk(sk)->num = 0;
sock_prot_dec_use(sk->prot);
__sock_put(sk);
}
......@@ -215,20 +219,22 @@ struct sock *udp_v4_lookup_longway(u32 saddr, u16 sport, u32 daddr, u16 dport, i
int badness = -1;
for(sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk != NULL; sk = sk->next) {
if(sk->num == hnum) {
struct inet_opt *inet = inet_sk(sk);
if (inet->num == hnum) {
int score = 0;
if(sk->rcv_saddr) {
if(sk->rcv_saddr != daddr)
if (inet->rcv_saddr) {
if (inet->rcv_saddr != daddr)
continue;
score++;
}
if(sk->daddr) {
if(sk->daddr != saddr)
if (inet->daddr) {
if (inet->daddr != saddr)
continue;
score++;
}
if(sk->dport) {
if(sk->dport != sport)
if (inet->dport) {
if (inet->dport != sport)
continue;
score++;
}
......@@ -269,10 +275,12 @@ static inline struct sock *udp_v4_mcast_next(struct sock *sk,
struct sock *s = sk;
unsigned short hnum = ntohs(loc_port);
for(; s; s = s->next) {
if ((s->num != hnum) ||
(s->daddr && s->daddr!=rmt_addr) ||
(s->dport != rmt_port && s->dport != 0) ||
(s->rcv_saddr && s->rcv_saddr != loc_addr) ||
struct inet_opt *inet = inet_sk(s);
if (inet->num != hnum ||
(inet->daddr && inet->daddr != rmt_addr) ||
(inet->dport != rmt_port && inet->dport) ||
(inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
(s->bound_dev_if && s->bound_dev_if != dif))
continue;
break;
......@@ -469,15 +477,15 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, int len)
} else {
if (sk->state != TCP_ESTABLISHED)
return -ENOTCONN;
ufh.daddr = sk->daddr;
ufh.uh.dest = sk->dport;
ufh.daddr = inet->daddr;
ufh.uh.dest = inet->dport;
/* Open fast path for connected socket.
Route will not be used, if at least one option is set.
*/
connected = 1;
}
ipc.addr = sk->saddr;
ufh.uh.source = sk->sport;
ipc.addr = inet->saddr;
ufh.uh.source = inet->sport;
ipc.opt = NULL;
ipc.oif = sk->bound_dev_if;
......@@ -728,7 +736,7 @@ int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk_dst_reset(sk);
err = ip_route_connect(&rt, usin->sin_addr.s_addr, sk->saddr,
err = ip_route_connect(&rt, usin->sin_addr.s_addr, inet->saddr,
RT_CONN_FLAGS(sk), sk->bound_dev_if);
if (err)
return err;
......@@ -736,12 +744,12 @@ int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
ip_rt_put(rt);
return -EACCES;
}
if(!sk->saddr)
sk->saddr = rt->rt_src; /* Update source address */
if(!sk->rcv_saddr)
sk->rcv_saddr = rt->rt_src;
sk->daddr = rt->rt_dst;
sk->dport = usin->sin_port;
if (!inet->saddr)
inet->saddr = rt->rt_src; /* Update source address */
if (!inet->rcv_saddr)
inet->rcv_saddr = rt->rt_src;
inet->daddr = rt->rt_dst;
inet->dport = usin->sin_port;
sk->state = TCP_ESTABLISHED;
inet->id = jiffies;
......@@ -751,17 +759,17 @@ int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
int udp_disconnect(struct sock *sk, int flags)
{
struct inet_opt *inet = inet_sk(sk);
/*
* 1003.1g - break association.
*/
sk->state = TCP_CLOSE;
sk->daddr = 0;
sk->dport = 0;
inet->daddr = 0;
inet->dport = 0;
sk->bound_dev_if = 0;
if (!(sk->userlocks&SOCK_BINDADDR_LOCK)) {
sk->rcv_saddr = 0;
sk->saddr = 0;
inet->rcv_saddr = inet->saddr = 0;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (sk->family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
......@@ -773,7 +781,7 @@ int udp_disconnect(struct sock *sk, int flags)
}
if (!(sk->userlocks&SOCK_BINDPORT_LOCK)) {
sk->prot->unhash(sk);
sk->sport = 0;
inet->sport = 0;
}
sk_dst_reset(sk);
return 0;
......@@ -962,15 +970,16 @@ int udp_rcv(struct sk_buff *skb)
static void get_udp_sock(struct sock *sp, char *tmpbuf, int i)
{
struct inet_opt *inet = inet_sk(sp);
unsigned int dest, src;
__u16 destp, srcp;
dest = sp->daddr;
src = sp->rcv_saddr;
destp = ntohs(sp->dport);
srcp = ntohs(sp->sport);
dest = inet->daddr;
src = inet->rcv_saddr;
destp = ntohs(inet->dport);
srcp = ntohs(inet->sport);
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %ld %d %p",
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
i, src, srcp, dest, destp, sp->state,
atomic_read(&sp->wmem_alloc), atomic_read(&sp->rmem_alloc),
0, 0L, 0,
......
......@@ -200,7 +200,7 @@ static int inet6_create(struct socket *sock, int protocol)
inet = inet_sk(sk);
if (SOCK_RAW == sock->type) {
sk->num = protocol;
inet->num = protocol;
if (IPPROTO_RAW == protocol)
inet->hdrincl = 1;
}
......@@ -241,12 +241,12 @@ static int inet6_create(struct socket *sock, int protocol)
#endif
MOD_INC_USE_COUNT;
if (sk->num) {
if (inet->num) {
/* It assumes that any protocol which allows
* the user to assign a number at socket
* creation time automatically shares.
*/
sk->sport = ntohs(sk->num);
inet->sport = ntohs(inet->num);
sk->prot->hash(sk);
}
if (sk->prot->init) {
......@@ -278,6 +278,7 @@ static int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_in6 *addr=(struct sockaddr_in6 *)uaddr;
struct sock *sk = sock->sk;
struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
__u32 v4addr = 0;
unsigned short snum;
......@@ -318,8 +319,7 @@ static int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
lock_sock(sk);
/* Check these errors (active socket, double bind). */
if ((sk->state != TCP_CLOSE) ||
(sk->num != 0)) {
if (sk->state != TCP_CLOSE || inet->num) {
release_sock(sk);
return -EINVAL;
}
......@@ -340,8 +340,8 @@ static int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
}
}
sk->rcv_saddr = v4addr;
sk->saddr = v4addr;
inet->rcv_saddr = v4addr;
inet->saddr = v4addr;
ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
......@@ -350,8 +350,7 @@ static int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* Make sure we are allowed to bind here. */
if (sk->prot->get_port(sk, snum) != 0) {
sk->rcv_saddr = 0;
sk->saddr = 0;
inet->rcv_saddr = inet->saddr = 0;
memset(&np->rcv_saddr, 0, sizeof(struct in6_addr));
memset(&np->saddr, 0, sizeof(struct in6_addr));
......@@ -363,9 +362,9 @@ static int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
sk->userlocks |= SOCK_BINDADDR_LOCK;
if (snum)
sk->userlocks |= SOCK_BINDPORT_LOCK;
sk->sport = ntohs(sk->num);
sk->dport = 0;
sk->daddr = 0;
inet->sport = ntohs(inet->num);
inet->dport = 0;
inet->daddr = 0;
release_sock(sk);
return 0;
......@@ -421,17 +420,18 @@ static int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
{
struct sockaddr_in6 *sin=(struct sockaddr_in6 *)uaddr;
struct sock *sk = sock->sk;
struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
sin->sin6_family = AF_INET6;
sin->sin6_flowinfo = 0;
sin->sin6_scope_id = 0;
if (peer) {
if (!sk->dport)
if (!inet->dport)
return -ENOTCONN;
if (((1<<sk->state)&(TCPF_CLOSE|TCPF_SYN_SENT)) && peer == 1)
return -ENOTCONN;
sin->sin6_port = sk->dport;
sin->sin6_port = inet->dport;
memcpy(&sin->sin6_addr, &np->daddr, sizeof(struct in6_addr));
if (np->sndflow)
sin->sin6_flowinfo = np->flow_label;
......@@ -443,7 +443,7 @@ static int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
memcpy(&sin->sin6_addr, &np->rcv_saddr,
sizeof(struct in6_addr));
sin->sin6_port = sk->sport;
sin->sin6_port = inet->sport;
}
if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin->sin6_scope_id = sk->bound_dev_if;
......@@ -675,6 +675,11 @@ static int __init inet6_init(void)
*/
inet6_register_protosw(&rawv6_protosw);
/* Register the family here so that the init calls below will
* be able to create sockets. (?? is this dangerous ??)
*/
(void) sock_register(&inet6_family_ops);
/*
* ipngwg API draft makes clear that the correct semantics
* for TCP and UDP is to consider one TCP and UDP instance
......@@ -719,9 +724,6 @@ static int __init inet6_init(void)
udpv6_init();
tcpv6_init();
/* Now the userspace is allowed to create INET6 sockets. */
(void) sock_register(&inet6_family_ops);
return 0;
#ifdef CONFIG_PROC_FS
......
......@@ -79,7 +79,7 @@ int ip6_ra_control(struct sock *sk, int sel, void (*destructor)(struct sock *))
struct ip6_ra_chain *ra, *new_ra, **rap;
/* RA packet may be delivered ONLY to IPPROTO_RAW socket */
if (sk->type != SOCK_RAW || sk->num != IPPROTO_RAW)
if (sk->type != SOCK_RAW || inet_sk(sk)->num != IPPROTO_RAW)
return -EINVAL;
new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
......@@ -283,7 +283,7 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, char *optval,
if (opt) {
struct tcp_opt *tp = tcp_sk(sk);
if (!((1<<sk->state)&(TCPF_LISTEN|TCPF_CLOSE))
&& sk->daddr != LOOPBACK4_IPV6) {
&& inet_sk(sk)->daddr != LOOPBACK4_IPV6) {
tp->ext_header_len = opt->opt_flen + opt->opt_nflen;
tcp_sync_mss(sk, tp->pmtu_cookie);
}
......
......@@ -84,58 +84,63 @@ static int pndisc_constructor(struct pneigh_entry *n);
static void pndisc_destructor(struct pneigh_entry *n);
static void pndisc_redo(struct sk_buff *skb);
static struct neigh_ops ndisc_generic_ops =
{
AF_INET6,
NULL,
ndisc_solicit,
ndisc_error_report,
neigh_resolve_output,
neigh_connected_output,
dev_queue_xmit,
dev_queue_xmit
static struct neigh_ops ndisc_generic_ops = {
family: AF_INET6,
solicit: ndisc_solicit,
error_report: ndisc_error_report,
output: neigh_resolve_output,
connected_output: neigh_connected_output,
hh_output: dev_queue_xmit,
queue_xmit: dev_queue_xmit,
};
static struct neigh_ops ndisc_hh_ops =
{
AF_INET6,
NULL,
ndisc_solicit,
ndisc_error_report,
neigh_resolve_output,
neigh_resolve_output,
dev_queue_xmit,
dev_queue_xmit
static struct neigh_ops ndisc_hh_ops = {
family: AF_INET6,
solicit: ndisc_solicit,
error_report: ndisc_error_report,
output: neigh_resolve_output,
connected_output: neigh_resolve_output,
hh_output: dev_queue_xmit,
queue_xmit: dev_queue_xmit,
};
static struct neigh_ops ndisc_direct_ops =
{
AF_INET6,
NULL,
NULL,
NULL,
dev_queue_xmit,
dev_queue_xmit,
dev_queue_xmit,
dev_queue_xmit
static struct neigh_ops ndisc_direct_ops = {
family: AF_INET6,
output: dev_queue_xmit,
connected_output: dev_queue_xmit,
hh_output: dev_queue_xmit,
queue_xmit: dev_queue_xmit,
};
struct neigh_table nd_tbl =
{
NULL,
AF_INET6,
sizeof(struct neighbour) + sizeof(struct in6_addr),
sizeof(struct in6_addr),
ndisc_hash,
ndisc_constructor,
pndisc_constructor,
pndisc_destructor,
pndisc_redo,
"ndisc_cache",
{ NULL, NULL, &nd_tbl, 0, NULL, NULL,
30*HZ, 1*HZ, 60*HZ, 30*HZ, 5*HZ, 3, 3, 0, 3, 1*HZ, (8*HZ)/10, 64, 0 },
30*HZ, 128, 512, 1024,
struct neigh_table nd_tbl = {
family: AF_INET6,
entry_size: sizeof(struct neighbour) + sizeof(struct in6_addr),
key_len: sizeof(struct in6_addr),
hash: ndisc_hash,
constructor: ndisc_constructor,
pconstructor: pndisc_constructor,
pdestructor: pndisc_destructor,
proxy_redo: pndisc_redo,
id: "ndisc_cache",
parms: {
tbl: &nd_tbl,
base_reachable_time: 30 * HZ,
retrans_time: 1 * HZ,
gc_staletime: 60 * HZ,
reachable_time: 30 * HZ,
delay_probe_time: 5 * HZ,
queue_len: 3,
ucast_probes: 3,
mcast_probes: 3,
anycast_delay: 1 * HZ,
proxy_delay: (8 * HZ) / 10,
proxy_qlen: 64,
},
gc_interval: 30 * HZ,
gc_thresh1: 128,
gc_thresh2: 512,
gc_thresh3: 1024,
};
#define NDISC_OPT_SPACE(len) (((len)+2+7)&~7)
......
......@@ -50,7 +50,8 @@ rwlock_t raw_v6_lock = RW_LOCK_UNLOCKED;
static void raw_v6_hash(struct sock *sk)
{
struct sock **skp = &raw_v6_htable[sk->num & (RAWV6_HTABLE_SIZE - 1)];
struct sock **skp = &raw_v6_htable[inet_sk(sk)->num &
(RAWV6_HTABLE_SIZE - 1)];
write_lock_bh(&raw_v6_lock);
if ((sk->next = *skp) != NULL)
......@@ -85,7 +86,7 @@ struct sock *__raw_v6_lookup(struct sock *sk, unsigned short num,
int addr_type = ipv6_addr_type(loc_addr);
for(s = sk; s; s = s->next) {
if(s->num == num) {
if (inet_sk(s)->num == num) {
struct ipv6_pinfo *np = inet6_sk(s);
if (!ipv6_addr_any(&np->daddr) &&
......@@ -186,6 +187,7 @@ struct sock * ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
/* This cleans up af_inet6 a bit. -DaveM */
static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
__u32 v4addr = 0;
......@@ -233,8 +235,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
}
}
sk->rcv_saddr = v4addr;
sk->saddr = v4addr;
inet->rcv_saddr = inet->saddr = v4addr;
ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
if (!(addr_type & IPV6_ADDR_MULTICAST))
ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
......@@ -439,6 +440,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, int len)
{
struct ipv6_txoptions opt_space;
struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name;
struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
......@@ -478,7 +480,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, int len)
proto = ntohs(sin6->sin6_port);
if (!proto)
proto = sk->num;
proto = inet->num;
if (proto > 255)
return(-EINVAL);
......@@ -507,7 +509,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, int len)
if (sk->state != TCP_ESTABLISHED)
return(-EINVAL);
proto = sk->num;
proto = inet->num;
daddr = &np->daddr;
fl.fl6_flowlabel = np->flow_label;
}
......@@ -635,7 +637,7 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
break;
case SOL_ICMPV6:
if (sk->num != IPPROTO_ICMPV6)
if (inet_sk(sk)->num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_seticmpfilter(sk, level, optname, optval,
optlen);
......@@ -678,7 +680,7 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname,
break;
case SOL_ICMPV6:
if (sk->num != IPPROTO_ICMPV6)
if (inet_sk(sk)->num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_geticmpfilter(sk, level, optname, optval,
optlen);
......@@ -741,7 +743,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
static void rawv6_close(struct sock *sk, long timeout)
{
if (sk->num == IPPROTO_RAW)
if (inet_sk(sk)->num == IPPROTO_RAW)
ip6_ra_control(sk, -1, NULL);
inet_sock_release(sk);
......@@ -764,10 +766,10 @@ static void get_raw6_sock(struct sock *sp, char *tmpbuf, int i)
dest = &np->daddr;
src = &np->rcv_saddr;
destp = 0;
srcp = sp->num;
srcp = inet_sk(sp)->num;
sprintf(tmpbuf,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
"%02X %08X:%08X %02X:%08lX %08X %5d %8d %ld %d %p",
"%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
......
This diff is collapsed.
......@@ -65,11 +65,11 @@ static int udp_v6_get_port(struct sock *sk, unsigned short snum)
best_size_so_far = 32767;
best = result = udp_port_rover;
for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
struct sock *sk;
struct sock *sk2;
int size;
sk = udp_hash[result & (UDP_HTABLE_SIZE - 1)];
if (!sk) {
sk2 = udp_hash[result & (UDP_HTABLE_SIZE - 1)];
if (!sk2) {
if (result > sysctl_local_port_range[1])
result = sysctl_local_port_range[0] +
((result - sysctl_local_port_range[0]) &
......@@ -80,7 +80,7 @@ static int udp_v6_get_port(struct sock *sk, unsigned short snum)
do {
if (++size >= best_size_so_far)
goto next;
} while ((sk = sk->next) != NULL);
} while ((sk2 = sk2->next) != NULL);
best_size_so_far = size;
best = result;
next:;
......@@ -104,23 +104,24 @@ static int udp_v6_get_port(struct sock *sk, unsigned short snum)
for (sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
sk2 != NULL;
sk2 = sk2->next) {
struct inet_opt *inet2 = inet_sk(sk2);
struct ipv6_pinfo *np2 = inet6_sk(sk2);
if (sk2->num == snum &&
if (inet2->num == snum &&
sk2 != sk &&
sk2->bound_dev_if == sk->bound_dev_if &&
(!sk2->rcv_saddr ||
(!inet2->rcv_saddr ||
addr_type == IPV6_ADDR_ANY ||
!ipv6_addr_cmp(&np->rcv_saddr, &np2->rcv_saddr) ||
(addr_type == IPV6_ADDR_MAPPED &&
sk2->family == AF_INET &&
sk->rcv_saddr == sk2->rcv_saddr)) &&
inet_sk(sk)->rcv_saddr == inet2->rcv_saddr)) &&
(!sk2->reuse || !sk->reuse))
goto fail;
}
}
sk->num = snum;
inet_sk(sk)->num = snum;
if (sk->pprev == NULL) {
struct sock **skp = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
if ((sk->next = *skp) != NULL)
......@@ -151,7 +152,7 @@ static void udp_v6_unhash(struct sock *sk)
sk->next->pprev = sk->pprev;
*sk->pprev = sk->next;
sk->pprev = NULL;
sk->num = 0;
inet_sk(sk)->num = 0;
sock_prot_dec_use(sk->prot);
__sock_put(sk);
}
......@@ -167,12 +168,13 @@ static struct sock *udp_v6_lookup(struct in6_addr *saddr, u16 sport,
read_lock(&udp_hash_lock);
for(sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk != NULL; sk = sk->next) {
if((sk->num == hnum) &&
(sk->family == PF_INET6)) {
struct inet_opt *inet = inet_sk(sk);
if (inet->num == hnum && sk->family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
int score = 0;
if(sk->dport) {
if(sk->dport != sport)
if (inet->dport) {
if (inet->dport != sport)
continue;
score++;
}
......@@ -213,6 +215,7 @@ static struct sock *udp_v6_lookup(struct in6_addr *saddr, u16 sport,
int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *daddr;
struct in6_addr saddr;
......@@ -268,16 +271,16 @@ int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (err < 0)
return err;
ipv6_addr_set(&np->daddr, 0, 0, htonl(0x0000ffff), sk->daddr);
ipv6_addr_set(&np->daddr, 0, 0, htonl(0x0000ffff), inet->daddr);
if (ipv6_addr_any(&np->saddr)) {
ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000ffff),
sk->saddr);
inet->saddr);
}
if (ipv6_addr_any(&np->rcv_saddr)) {
ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000ffff),
sk->rcv_saddr);
inet->rcv_saddr);
}
return 0;
}
......@@ -300,7 +303,7 @@ int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
ipv6_addr_copy(&np->daddr, daddr);
np->flow_label = fl.fl6_flowlabel;
sk->dport = usin->sin6_port;
inet->dport = usin->sin6_port;
/*
* Check for a route to destination an obtain the
......@@ -311,8 +314,8 @@ int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
fl.fl6_dst = &np->daddr;
fl.fl6_src = &saddr;
fl.oif = sk->bound_dev_if;
fl.uli_u.ports.dport = sk->dport;
fl.uli_u.ports.sport = sk->sport;
fl.uli_u.ports.dport = inet->dport;
fl.uli_u.ports.sport = inet->sport;
if (flowlabel) {
if (flowlabel->opt && flowlabel->opt->srcrt) {
......@@ -344,7 +347,7 @@ int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (ipv6_addr_any(&np->rcv_saddr)) {
ipv6_addr_copy(&np->rcv_saddr, &saddr);
sk->rcv_saddr = LOOPBACK4_IPV6;
inet->rcv_saddr = LOOPBACK4_IPV6;
}
sk->state = TCP_ESTABLISHED;
}
......@@ -528,10 +531,12 @@ static struct sock *udp_v6_mcast_next(struct sock *sk,
struct sock *s = sk;
unsigned short num = ntohs(loc_port);
for(; s; s = s->next) {
if(s->num == num) {
struct inet_opt *inet = inet_sk(s);
if (inet->num == num) {
struct ipv6_pinfo *np = inet6_sk(s);
if(s->dport) {
if(s->dport != rmt_port)
if (inet->dport) {
if (inet->dport != rmt_port)
continue;
}
if (!ipv6_addr_any(&np->daddr) &&
......@@ -757,6 +762,7 @@ static int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, int ulen)
{
struct ipv6_txoptions opt_space;
struct udpv6fakehdr udh;
struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name;
struct ipv6_txoptions *opt = NULL;
......@@ -818,7 +824,7 @@ static int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, int ulen)
if (sk->state != TCP_ESTABLISHED)
return -ENOTCONN;
udh.uh.dest = sk->dport;
udh.uh.dest = inet->dport;
daddr = &np->daddr;
fl.fl6_flowlabel = np->flow_label;
}
......@@ -867,7 +873,7 @@ static int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, int ulen)
if (opt && opt->srcrt)
udh.daddr = daddr;
udh.uh.source = sk->sport;
udh.uh.source = inet->sport;
udh.uh.len = len < 0x10000 ? htons(len) : 0;
udh.uh.check = 0;
udh.iov = msg->msg_iov;
......@@ -905,17 +911,18 @@ static struct inet6_protocol udpv6_protocol = {
static void get_udp6_sock(struct sock *sp, char *tmpbuf, int i)
{
struct inet_opt *inet = inet_sk(sp);
struct ipv6_pinfo *np = inet6_sk(sp);
struct in6_addr *dest, *src;
__u16 destp, srcp;
dest = &np->daddr;
src = &np->rcv_saddr;
destp = ntohs(sp->dport);
srcp = ntohs(sp->sport);
destp = ntohs(inet->dport);
srcp = ntohs(inet->sport);
sprintf(tmpbuf,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
"%02X %08X:%08X %02X:%08lX %08X %5d %8d %ld %d %p",
"%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
......
......@@ -1700,7 +1700,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
if (sk->state == TCP_ESTABLISHED) {
if ((self->tx_flow == FLOW_START) &&
(sk->sndbuf - (int)atomic_read(&sk->wmem_alloc) >= SOCK_MIN_WRITE_SPACE))
sock_writeable(sk))
{
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
}
......@@ -1708,13 +1708,13 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
break;
case SOCK_SEQPACKET:
if ((self->tx_flow == FLOW_START) &&
(sk->sndbuf - (int)atomic_read(&sk->wmem_alloc) >= SOCK_MIN_WRITE_SPACE))
sock_writeable(sk))
{
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
}
break;
case SOCK_DGRAM:
if (sk->sndbuf - (int)atomic_read(&sk->wmem_alloc) >= SOCK_MIN_WRITE_SPACE)
if (sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
break;
default:
......
......@@ -180,6 +180,7 @@ struct packet_opt
spinlock_t bind_lock;
char running; /* prot_hook is attached*/
int ifindex; /* bound device */
unsigned short num;
struct tpacket_stats stats;
#ifdef CONFIG_PACKET_MULTICAST
struct packet_mclist *mclist;
......@@ -678,8 +679,10 @@ static int packet_sendmsg(struct socket *sock, struct msghdr *msg, int len,
*/
if (saddr == NULL) {
ifindex = pkt_sk(sk)->ifindex;
proto = sk->num;
struct packet_opt *po = pkt_sk(sk);
ifindex = po->ifindex;
proto = po->num;
addr = NULL;
} else {
err = -EINVAL;
......@@ -839,7 +842,7 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, int protocol)
po->running = 0;
}
sk->num = protocol;
po->num = protocol;
po->prot_hook.type = protocol;
po->prot_hook.dev = dev;
......@@ -894,7 +897,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add
dev = dev_get_by_name(name);
if (dev) {
err = packet_do_bind(sk, dev, sk->num);
err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
dev_put(dev);
}
return err;
......@@ -924,7 +927,7 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len
if (dev == NULL)
goto out;
}
err = packet_do_bind(sk, dev, sll->sll_protocol ? : sk->num);
err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
if (dev)
dev_put(dev);
......@@ -972,7 +975,7 @@ static int packet_create(struct socket *sock, int protocol)
goto out_free;
memset(po, 0, sizeof(*po));
sk->family = PF_PACKET;
sk->num = protocol;
po->num = protocol;
sk->destruct = packet_sock_destruct;
atomic_inc(&packet_socks_nr);
......@@ -1131,7 +1134,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
sll->sll_family = AF_PACKET;
sll->sll_ifindex = po->ifindex;
sll->sll_protocol = sk->num;
sll->sll_protocol = po->num;
dev = dev_get_by_index(po->ifindex);
if (dev) {
sll->sll_hatype = dev->type;
......@@ -1410,7 +1413,8 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
break;
case NETDEV_UP:
spin_lock(&po->bind_lock);
if (dev->ifindex == po->ifindex && sk->num && po->running==0) {
if (dev->ifindex == po->ifindex && po->num &&
!po->running) {
dev_add_pack(&po->prot_hook);
sock_hold(sk);
po->running = 1;
......@@ -1861,7 +1865,7 @@ static int packet_read_proc(char *buffer, char **start, off_t offset,
s,
atomic_read(&s->refcnt),
s->type,
ntohs(s->num),
ntohs(po->num),
po->ifindex,
po->running,
atomic_read(&s->rmem_alloc),
......
......@@ -7,7 +7,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: J Hadi Salim (hadi@nortelnetworks.com) 1998,1999
* Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
*
* 991129: - Bug fix with grio mode
* - a better sing. AvgQ mode with Grio(WRED)
......@@ -436,7 +436,7 @@ static int gred_change(struct Qdisc *sch, struct rtattr *opt)
if (table->tab[table->def] == NULL) {
table->tab[table->def]=
kmalloc(sizeof(struct gred_sched_data), GFP_KERNEL);
if (NULL == table->tab[ctl->DP])
if (NULL == table->tab[table->def])
return -ENOMEM;
memset(table->tab[table->def], 0,
......@@ -498,7 +498,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
{
unsigned long qave;
struct rtattr *rta;
struct tc_gred_qopt *opt;
struct tc_gred_qopt *opt = NULL ;
struct tc_gred_qopt *dst;
struct gred_sched *table = (struct gred_sched *)sch->data;
struct gred_sched_data *q;
......@@ -520,7 +520,6 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
if (!table->initd) {
DPRINTK("NO GRED Queues setup!\n");
return -1;
}
for (i=0;i<MAX_DPs;i++) {
......@@ -577,9 +576,12 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
RTA_PUT(skb, TCA_GRED_PARMS, sizeof(struct tc_gred_qopt)*MAX_DPs, opt);
rta->rta_len = skb->tail - b;
kfree(opt);
return skb->len;
rtattr_failure:
if (opt)
kfree(opt);
DPRINTK("gred_dump: FAILURE!!!!\n");
/* also free the opt struct here */
......
......@@ -1161,7 +1161,8 @@ if (svsk->sk_sk == NULL)
/* Register socket with portmapper */
if (*errp >= 0 && pmap_register)
*errp = svc_register(serv, inet->protocol, ntohs(inet->sport));
*errp = svc_register(serv, inet->protocol,
ntohs(inet_sk(inet)->sport));
if (*errp < 0) {
inet->user_data = NULL;
......
......@@ -67,9 +67,6 @@
#include <asm/uaccess.h>
/* Following value should be > 32k + RPC overhead */
#define XPRT_MIN_WRITE_SPACE (35000 + SOCK_MIN_WRITE_SPACE)
extern spinlock_t rpc_queue_lock;
/*
......@@ -1099,9 +1096,8 @@ udp_write_space(struct sock *sk)
if (xprt->shutdown)
return;
/* Wait until we have enough socket memory */
if (sock_wspace(sk) < min_t(int, sk->sndbuf,XPRT_MIN_WRITE_SPACE))
/* Wait until we have enough socket memory. */
if (sock_writeable(sk))
return;
if (!xprt_test_and_set_wspace(xprt)) {
......
......@@ -1767,7 +1767,7 @@ static int unix_read_proc(char *buffer, char **start, off_t offset,
struct unix_sock *u = unix_sk(s);
unix_state_rlock(s);
len+=sprintf(buffer+len,"%p: %08X %08X %08X %04X %02X %5ld",
len+=sprintf(buffer+len,"%p: %08X %08X %08X %04X %02X %5lu",
s,
atomic_read(&s->refcnt),
0,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment