Commit 21f130a2 authored by David S. Miller's avatar David S. Miller

Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

parents bc5e8fdf c58ec932
......@@ -2211,6 +2211,15 @@ D: OV511 driver
S: (address available on request)
S: USA
N: Ian McDonald
E: iam4@cs.waikato.ac.nz
E: imcdnzl@gmail.com
W: http://wand.net.nz/~iam4
W: http://imcdnzl.blogspot.com
D: DCCP, CCID3
S: Hamilton
S: New Zealand
N: Patrick McHardy
E: kaber@trash.net
P: 1024D/12155E80 B128 7DE6 FF0A C2B2 48BE AB4C C9D4 964E 1215 5E80
......@@ -2246,19 +2255,12 @@ S: D-90453 Nuernberg
S: Germany
N: Arnaldo Carvalho de Melo
E: acme@conectiva.com.br
E: acme@kernel.org
E: acme@gnu.org
W: http://bazar2.conectiva.com.br/~acme
W: http://advogato.org/person/acme
E: acme@mandriva.com
E: acme@ghostprotocols.net
W: http://oops.ghostprotocols.net:81/blog/
P: 1024D/9224DF01 D5DF E3BB E3C8 BCBB F8AD 841A B6AB 4681 9224 DF01
D: wanrouter hacking
D: misc Makefile, Config.in, drivers and network stacks fixes
D: IPX & LLC network stacks maintainer
D: Cyclom 2X synchronous card driver
D: wl3501 PCMCIA wireless card driver
D: i18n for minicom, net-tools, util-linux, fetchmail, etc
S: Conectiva S.A.
D: IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks
S: Mandriva
S: R. Tocantins, 89 - Cristo Rei
S: 80050-430 - Curitiba - Paran
S: Brazil
......
......@@ -686,6 +686,13 @@ P: Guennadi Liakhovetski
M: g.liakhovetski@gmx.de
S: Maintained
DCCP PROTOCOL
P: Arnaldo Carvalho de Melo
M: acme@mandriva.com
L: dccp@vger.kernel.org
W: http://www.wlug.org.nz/DCCP
S: Maintained
DECnet NETWORK LAYER
P: Patrick Caulfield
M: patrick@tykepenguin.com
......@@ -2271,12 +2278,6 @@ M: R.E.Wolff@BitWizard.nl
L: linux-kernel@vger.kernel.org ?
S: Supported
SPX NETWORK LAYER
P: Jay Schulist
M: jschlst@samba.org
L: netdev@vger.kernel.org
S: Supported
SRM (Alpha) environment access
P: Jan-Benedict Glaw
M: jbglaw@lug-owl.de
......
......@@ -67,8 +67,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.39"
#define DRV_MODULE_RELDATE "September 5, 2005"
#define DRV_MODULE_VERSION "3.40"
#define DRV_MODULE_RELDATE "September 15, 2005"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
......@@ -3442,31 +3442,47 @@ static void tg3_tx_timeout(struct net_device *dev)
schedule_work(&tp->reset_task);
}
/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
{
u32 base = (u32) mapping & 0xffffffff;
return ((base > 0xffffdcc0) &&
(base + len + 8 < base));
}
static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
u32 guilty_entry, int guilty_len,
u32 last_plus_one, u32 *start, u32 mss)
u32 last_plus_one, u32 *start,
u32 base_flags, u32 mss)
{
struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
dma_addr_t new_addr;
dma_addr_t new_addr = 0;
u32 entry = *start;
int i;
int i, ret = 0;
if (!new_skb) {
dev_kfree_skb(skb);
return -1;
ret = -1;
} else {
/* New SKB is guaranteed to be linear. */
entry = *start;
new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
PCI_DMA_TODEVICE);
/* Make sure new skb does not cross any 4G boundaries.
* Drop the packet if it does.
*/
if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
ret = -1;
dev_kfree_skb(new_skb);
new_skb = NULL;
} else {
tg3_set_txd(tp, entry, new_addr, new_skb->len,
base_flags, 1 | (mss << 1));
*start = NEXT_TX(entry);
}
}
/* New SKB is guaranteed to be linear. */
entry = *start;
new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
PCI_DMA_TODEVICE);
tg3_set_txd(tp, entry, new_addr, new_skb->len,
(skb->ip_summed == CHECKSUM_HW) ?
TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
*start = NEXT_TX(entry);
/* Now clean up the sw ring entries. */
i = 0;
while (entry != last_plus_one) {
......@@ -3491,7 +3507,7 @@ static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
dev_kfree_skb(skb);
return 0;
return ret;
}
static void tg3_set_txd(struct tg3 *tp, int entry,
......@@ -3517,19 +3533,10 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
}
static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
{
u32 base = (u32) mapping & 0xffffffff;
return ((base > 0xffffdcc0) &&
(base + len + 8 < base));
}
static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
dma_addr_t mapping;
unsigned int i;
u32 len, entry, base_flags, mss;
int would_hit_hwbug;
......@@ -3624,7 +3631,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
would_hit_hwbug = 0;
if (tg3_4g_overflow_test(mapping, len))
would_hit_hwbug = entry + 1;
would_hit_hwbug = 1;
tg3_set_txd(tp, entry, mapping, len, base_flags,
(skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
......@@ -3648,12 +3655,8 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
tp->tx_buffers[entry].skb = NULL;
pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
if (tg3_4g_overflow_test(mapping, len)) {
/* Only one should match. */
if (would_hit_hwbug)
BUG();
would_hit_hwbug = entry + 1;
}
if (tg3_4g_overflow_test(mapping, len))
would_hit_hwbug = 1;
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
tg3_set_txd(tp, entry, mapping, len,
......@@ -3669,34 +3672,15 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (would_hit_hwbug) {
u32 last_plus_one = entry;
u32 start;
unsigned int len = 0;
would_hit_hwbug -= 1;
entry = entry - 1 - skb_shinfo(skb)->nr_frags;
entry &= (TG3_TX_RING_SIZE - 1);
start = entry;
i = 0;
while (entry != last_plus_one) {
if (i == 0)
len = skb_headlen(skb);
else
len = skb_shinfo(skb)->frags[i-1].size;
if (entry == would_hit_hwbug)
break;
i++;
entry = NEXT_TX(entry);
}
start = entry - 1 - skb_shinfo(skb)->nr_frags;
start &= (TG3_TX_RING_SIZE - 1);
/* If the workaround fails due to memory/mapping
* failure, silently drop this packet.
*/
if (tigon3_4gb_hwbug_workaround(tp, skb,
entry, len,
last_plus_one,
&start, mss))
if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
&start, base_flags, mss))
goto out_unlock;
entry = start;
......@@ -9271,6 +9255,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
static struct pci_device_id write_reorder_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_FE_GATE_700C) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_K8_NB) },
{ },
};
u32 misc_ctrl_reg;
......@@ -9285,7 +9271,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
#endif
/* If we have an AMD 762 chipset, write
/* If we have an AMD 762 or K8 chipset, write
* reordering to the mailbox registers done by the host
* controller can cause major troubles. We read back from
* every mailbox register write to force the writes to be
......@@ -9532,7 +9518,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->write32_rx_mbox = tg3_write_indirect_mbox;
iounmap(tp->regs);
tp->regs = 0;
tp->regs = NULL;
pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
pci_cmd &= ~PCI_COMMAND_MEMORY;
......@@ -10680,7 +10666,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err_out_iounmap:
if (tp->regs) {
iounmap(tp->regs);
tp->regs = 0;
tp->regs = NULL;
}
err_out_free_dev:
......@@ -10705,7 +10691,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
if (tp->regs) {
iounmap(tp->regs);
tp->regs = 0;
tp->regs = NULL;
}
free_netdev(dev);
pci_release_regions(pdev);
......
......@@ -4,16 +4,6 @@
#include <linux/types.h>
#include <asm/byteorder.h>
/* Structure describing an Internet (DCCP) socket address. */
struct sockaddr_dccp {
__u16 sdccp_family; /* Address family */
__u16 sdccp_port; /* Port number */
__u32 sdccp_addr; /* Internet address */
__u32 sdccp_service; /* Service */
/* Pad to size of `struct sockaddr': 16 bytes . */
__u32 sdccp_pad;
};
/**
* struct dccp_hdr - generic part of DCCP packet header
*
......@@ -188,6 +178,9 @@ enum {
/* DCCP socket options */
#define DCCP_SOCKOPT_PACKET_SIZE 1
#define DCCP_SOCKOPT_SERVICE 2
#define DCCP_SERVICE_LIST_MAX_LEN 32
#ifdef __KERNEL__
......@@ -382,6 +375,25 @@ enum dccp_role {
DCCP_ROLE_SERVER,
};
struct dccp_service_list {
__u32 dccpsl_nr;
__u32 dccpsl_list[0];
};
#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
static inline int dccp_list_has_service(const struct dccp_service_list *sl,
const u32 service)
{
if (likely(sl != NULL)) {
u32 i = sl->dccpsl_nr;
while (i--)
if (sl->dccpsl_list[i] == service)
return 1;
}
return 0;
}
/**
* struct dccp_sock - DCCP socket state
*
......@@ -417,7 +429,8 @@ struct dccp_sock {
__u64 dccps_gss;
__u64 dccps_gsr;
__u64 dccps_gar;
unsigned long dccps_service;
__u32 dccps_service;
struct dccp_service_list *dccps_service_list;
struct timeval dccps_timestamp_time;
__u32 dccps_timestamp_echo;
__u32 dccps_packet_size;
......@@ -443,6 +456,11 @@ static inline struct dccp_sock *dccp_sk(const struct sock *sk)
return (struct dccp_sock *)sk;
}
static inline int dccp_service_not_initialized(const struct sock *sk)
{
return dccp_sk(sk)->dccps_service == DCCP_SERVICE_INVALID_VALUE;
}
static inline const char *dccp_role(const struct sock *sk)
{
switch (dccp_sk(sk)->dccps_role) {
......
......@@ -491,6 +491,7 @@
#define PCI_DEVICE_ID_AMI_MEGARAID2 0x9060
#define PCI_VENDOR_ID_AMD 0x1022
#define PCI_DEVICE_ID_AMD_K8_NB 0x1100
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
#define PCI_DEVICE_ID_AMD_SCSI 0x2020
......
......@@ -140,6 +140,7 @@ config BRIDGE_NETFILTER
If unsure, say N.
source "net/netfilter/Kconfig"
source "net/ipv4/netfilter/Kconfig"
source "net/ipv6/netfilter/Kconfig"
source "net/decnet/netfilter/Kconfig"
......@@ -206,8 +207,6 @@ config NET_PKTGEN
To compile this code as a module, choose M here: the
module will be called pktgen.
source "net/netfilter/Kconfig"
endmenu
endmenu
......
......@@ -258,13 +258,12 @@ extern int dccp_v4_send_reset(struct sock *sk,
extern void dccp_send_close(struct sock *sk, const int active);
struct dccp_skb_cb {
__u8 dccpd_type;
__u8 dccpd_reset_code;
__u8 dccpd_service;
__u8 dccpd_ccval;
__u8 dccpd_type:4;
__u8 dccpd_ccval:4;
__u8 dccpd_reset_code;
__u16 dccpd_opt_len;
__u64 dccpd_seq;
__u64 dccpd_ack_seq;
int dccpd_opt_len;
};
#define DCCP_SKB_CB(__skb) ((struct dccp_skb_cb *)&((__skb)->cb[0]))
......
......@@ -384,9 +384,9 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
}
out_invalid_packet:
return 1; /* dccp_v4_do_rcv will send a reset, but...
FIXME: the reset code should be
DCCP_RESET_CODE_PACKET_ERROR */
/* dccp_v4_do_rcv will send a reset */
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
return 1;
}
static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
......@@ -433,6 +433,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
struct dccp_hdr *dh, unsigned len)
{
struct dccp_sock *dp = dccp_sk(sk);
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
const int old_state = sk->sk_state;
int queued = 0;
......@@ -473,7 +474,8 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
if (dh->dccph_type == DCCP_PKT_RESET)
goto discard;
/* Caller (dccp_v4_do_rcv) will send Reset(No Connection)*/
/* Caller (dccp_v4_do_rcv) will send Reset */
dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
return 1;
}
......@@ -487,8 +489,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
if (dccp_parse_options(sk, skb))
goto discard;
if (DCCP_SKB_CB(skb)->dccpd_ack_seq !=
DCCP_PKT_WITHOUT_ACK_SEQ)
if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
dccp_event_ack_recv(sk, skb);
ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
......@@ -500,7 +501,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
*/
if (dp->dccps_options.dccpo_send_ack_vector) {
if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts, sk,
DCCP_SKB_CB(skb)->dccpd_seq,
dcb->dccpd_seq,
DCCP_ACKPKTS_STATE_RECEIVED))
goto discard;
/*
......@@ -551,8 +552,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
dh->dccph_type == DCCP_PKT_REQUEST) ||
(sk->sk_state == DCCP_RESPOND &&
dh->dccph_type == DCCP_PKT_DATA)) {
dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
DCCP_PKT_SYNC);
dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
goto discard;
} else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
dccp_rcv_closereq(sk, skb);
......@@ -563,13 +563,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
}
if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) {
dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
DCCP_PKT_SYNCACK);
dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK);
goto discard;
}
switch (sk->sk_state) {
case DCCP_CLOSED:
dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
return 1;
case DCCP_REQUESTING:
......
......@@ -246,6 +246,9 @@ static int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
dp->dccps_role = DCCP_ROLE_CLIENT;
if (dccp_service_not_initialized(sk))
return -EPROTO;
if (addr_len < sizeof(struct sockaddr_in))
return -EINVAL;
......@@ -661,6 +664,16 @@ static inline u64 dccp_v4_init_sequence(const struct sock *sk,
dccp_hdr(skb)->dccph_sport);
}
static inline int dccp_bad_service_code(const struct sock *sk,
const __u32 service)
{
const struct dccp_sock *dp = dccp_sk(sk);
if (dp->dccps_service == service)
return 0;
return !dccp_list_has_service(dp->dccps_service_list, service);
}
int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct inet_request_sock *ireq;
......@@ -669,13 +682,22 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
struct dccp_request_sock *dreq;
const __u32 saddr = skb->nh.iph->saddr;
const __u32 daddr = skb->nh.iph->daddr;
const __u32 service = dccp_hdr_request(skb)->dccph_req_service;
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
__u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
struct dst_entry *dst = NULL;
/* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
if (((struct rtable *)skb->dst)->rt_flags &
(RTCF_BROADCAST | RTCF_MULTICAST))
(RTCF_BROADCAST | RTCF_MULTICAST)) {
reset_code = DCCP_RESET_CODE_NO_CONNECTION;
goto drop;
}
if (dccp_bad_service_code(sk, service)) {
reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
goto drop;
}
/*
* TW buckets are converted to open requests without
* limitations, they conserve resources and peer is
......@@ -718,9 +740,9 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* dccp_create_openreq_child.
*/
dreq = dccp_rsk(req);
dreq->dreq_isr = DCCP_SKB_CB(skb)->dccpd_seq;
dreq->dreq_iss = dccp_v4_init_sequence(sk, skb);
dreq->dreq_service = dccp_hdr_request(skb)->dccph_req_service;
dreq->dreq_isr = dcb->dccpd_seq;
dreq->dreq_iss = dccp_v4_init_sequence(sk, skb);
dreq->dreq_service = service;
if (dccp_v4_send_response(sk, req, dst))
goto drop_and_free;
......@@ -735,6 +757,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
__reqsk_free(req);
drop:
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
dcb->dccpd_reset_code = reset_code;
return -1;
}
......@@ -1005,7 +1028,6 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
reset:
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
dccp_v4_ctl_send_reset(skb);
discard:
kfree_skb(skb);
......@@ -1280,6 +1302,7 @@ static int dccp_v4_init_sock(struct sock *sk)
sk->sk_write_space = dccp_write_space;
dp->dccps_mss_cache = 536;
dp->dccps_role = DCCP_ROLE_UNDEFINED;
dp->dccps_service = DCCP_SERVICE_INVALID_VALUE;
return 0;
}
......@@ -1301,6 +1324,11 @@ static int dccp_v4_destroy_sock(struct sock *sk)
if (inet_csk(sk)->icsk_bind_hash != NULL)
inet_put_port(&dccp_hashinfo, sk);
if (dp->dccps_service_list != NULL) {
kfree(dp->dccps_service_list);
dp->dccps_service_list = NULL;
}
ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
dccp_ackpkts_free(dp->dccps_hc_rx_ackpkts);
......
......@@ -93,9 +93,11 @@ struct sock *dccp_create_openreq_child(struct sock *sk,
struct inet_connection_sock *newicsk = inet_csk(sk);
struct dccp_sock *newdp = dccp_sk(newsk);
newdp->dccps_role = DCCP_ROLE_SERVER;
newdp->dccps_hc_rx_ackpkts = NULL;
newdp->dccps_role = DCCP_ROLE_SERVER;
newicsk->icsk_rto = DCCP_TIMEOUT_INIT;
newdp->dccps_service_list = NULL;
newdp->dccps_service = dreq->dreq_service;
newicsk->icsk_rto = DCCP_TIMEOUT_INIT;
do_gettimeofday(&newdp->dccps_epoch);
if (newdp->dccps_options.dccpo_send_ack_vector) {
......
......@@ -85,7 +85,7 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
switch (dcb->dccpd_type) {
case DCCP_PKT_REQUEST:
dccp_hdr_request(skb)->dccph_req_service =
dcb->dccpd_service;
dp->dccps_service;
break;
case DCCP_PKT_RESET:
dccp_hdr_reset(skb)->dccph_reset_code =
......@@ -270,6 +270,7 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
struct request_sock *req)
{
struct dccp_hdr *dh;
struct dccp_request_sock *dreq;
const int dccp_header_size = sizeof(struct dccp_hdr) +
sizeof(struct dccp_hdr_ext) +
sizeof(struct dccp_hdr_response);
......@@ -285,8 +286,9 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
skb->dst = dst_clone(dst);
skb->csum = 0;
dreq = dccp_rsk(req);
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
DCCP_SKB_CB(skb)->dccpd_seq = dccp_rsk(req)->dreq_iss;
DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss;
dccp_insert_options(sk, skb);
skb->h.raw = skb_push(skb, dccp_header_size);
......@@ -300,8 +302,9 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
dh->dccph_type = DCCP_PKT_RESPONSE;
dh->dccph_x = 1;
dccp_hdr_set_seq(dh, dccp_rsk(req)->dreq_iss);
dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dccp_rsk(req)->dreq_isr);
dccp_hdr_set_seq(dh, dreq->dreq_iss);
dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr);
dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
dh->dccph_checksum = dccp_v4_checksum(skb, inet_rsk(req)->loc_addr,
inet_rsk(req)->rmt_addr);
......@@ -397,9 +400,6 @@ int dccp_connect(struct sock *sk)
skb_reserve(skb, MAX_DCCP_HEADER);
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
/* FIXME: set service to something meaningful, coming
* from userspace*/
DCCP_SKB_CB(skb)->dccpd_service = 0;
skb->csum = 0;
skb_set_owner_w(skb, sk);
......
......@@ -94,7 +94,15 @@ EXPORT_SYMBOL_GPL(dccp_state_name);
static inline int dccp_listen_start(struct sock *sk)
{
dccp_sk(sk)->dccps_role = DCCP_ROLE_LISTEN;
struct dccp_sock *dp = dccp_sk(sk);
dp->dccps_role = DCCP_ROLE_LISTEN;
/*
* Apps need to use setsockopt(DCCP_SOCKOPT_SERVICE)
* before calling listen()
*/
if (dccp_service_not_initialized(sk))
return -EPROTO;
return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
}
......@@ -202,6 +210,42 @@ int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
return -ENOIOCTLCMD;
}
static int dccp_setsockopt_service(struct sock *sk, const u32 service,
char __user *optval, int optlen)
{
struct dccp_sock *dp = dccp_sk(sk);
struct dccp_service_list *sl = NULL;
if (service == DCCP_SERVICE_INVALID_VALUE ||
optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
return -EINVAL;
if (optlen > sizeof(service)) {
sl = kmalloc(optlen, GFP_KERNEL);
if (sl == NULL)
return -ENOMEM;
sl->dccpsl_nr = optlen / sizeof(u32) - 1;
if (copy_from_user(sl->dccpsl_list,
optval + sizeof(service),
optlen - sizeof(service)) ||
dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
kfree(sl);
return -EFAULT;
}
}
lock_sock(sk);
dp->dccps_service = service;
if (dp->dccps_service_list != NULL)
kfree(dp->dccps_service_list);
dp->dccps_service_list = sl;
release_sock(sk);
return 0;
}
int dccp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, int optlen)
{
......@@ -218,8 +262,10 @@ int dccp_setsockopt(struct sock *sk, int level, int optname,
if (get_user(val, (int __user *)optval))
return -EFAULT;
lock_sock(sk);
if (optname == DCCP_SOCKOPT_SERVICE)
return dccp_setsockopt_service(sk, val, optval, optlen);
lock_sock(sk);
dp = dccp_sk(sk);
err = 0;
......@@ -236,6 +282,37 @@ int dccp_setsockopt(struct sock *sk, int level, int optname,
return err;
}
static int dccp_getsockopt_service(struct sock *sk, int len,
u32 __user *optval,
int __user *optlen)
{
const struct dccp_sock *dp = dccp_sk(sk);
const struct dccp_service_list *sl;
int err = -ENOENT, slen = 0, total_len = sizeof(u32);
lock_sock(sk);
if (dccp_service_not_initialized(sk))
goto out;
if ((sl = dp->dccps_service_list) != NULL) {
slen = sl->dccpsl_nr * sizeof(u32);
total_len += slen;
}
err = -EINVAL;
if (total_len > len)
goto out;
err = 0;
if (put_user(total_len, optlen) ||
put_user(dp->dccps_service, optval) ||
(sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
err = -EFAULT;
out:
release_sock(sk);
return err;
}
int dccp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
......@@ -248,6 +325,10 @@ int dccp_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
if (optname == DCCP_SOCKOPT_SERVICE)
return dccp_getsockopt_service(sk, len,
(u32 __user *)optval, optlen);
len = min_t(unsigned int, len, sizeof(int));
if (len < 0)
return -EINVAL;
......
......@@ -8,6 +8,7 @@ menu "IP: Netfilter Configuration"
# connection tracking, helpers and protocols
config IP_NF_CONNTRACK
tristate "Connection tracking (required for masq/NAT)"
select NETFILTER_NETLINK if IP_NF_CONNTRACK_NETLINK!=n
---help---
Connection tracking keeps a record of what packets have passed
through your machine, in order to figure out how they are related
......@@ -51,6 +52,15 @@ config IP_NF_CONNTRACK_EVENTS
IF unsure, say `N'.
config IP_NF_CONNTRACK_NETLINK
tristate 'Connection tracking netlink interface'
depends on IP_NF_CONNTRACK && NETFILTER_NETLINK
default IP_NF_CONNTRACK if NETFILTER_NETLINK=y
default m if NETFILTER_NETLINK=m
help
This option enables support for a netlink-based userspace interface
config IP_NF_CT_PROTO_SCTP
tristate 'SCTP protocol connection tracking support (EXPERIMENTAL)'
depends on IP_NF_CONNTRACK && EXPERIMENTAL
......@@ -774,11 +784,5 @@ config IP_NF_ARP_MANGLE
Allows altering the ARP packet payload: source and destination
hardware and network addresses.
config IP_NF_CONNTRACK_NETLINK
tristate 'Connection tracking netlink interface'
depends on IP_NF_CONNTRACK && NETFILTER_NETLINK
help
This option enables support for a netlink-based userspace interface
endmenu
......@@ -1143,7 +1143,10 @@ void ip_ct_refresh_acct(struct ip_conntrack *ct,
if (del_timer(&ct->timeout)) {
ct->timeout.expires = jiffies + extra_jiffies;
add_timer(&ct->timeout);
ip_conntrack_event_cache(IPCT_REFRESH, skb);
/* FIXME: We loose some REFRESH events if this function
* is called without an skb. I'll fix this later -HW */
if (skb)
ip_conntrack_event_cache(IPCT_REFRESH, skb);
}
ct_add_counters(ct, ctinfo, skb);
write_unlock_bh(&ip_conntrack_lock);
......
This diff is collapsed.
......@@ -1862,7 +1862,8 @@ asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned int flag
if (err < 0)
goto out_freeiov;
}
err = __put_user(msg_sys.msg_flags, COMPAT_FLAGS(msg));
err = __put_user((msg_sys.msg_flags & ~MSG_CMSG_COMPAT),
COMPAT_FLAGS(msg));
if (err)
goto out_freeiov;
if (MSG_CMSG_COMPAT & flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment