Commit e4cb0407 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
  [INET_DIAG]: Fix oops in netlink_rcv_skb
  [IPv6]: Fix NULL pointer dereference in ip6_flush_pending_frames
  [NETFILTER]: Fix/improve deadlock condition on module removal netfilter
  [NETFILTER]: nf_conntrack_ipv4: fix "Frag of proto ..." messages
  [NET] DOC: Update networking/multiqueue.txt with correct information.
  [IPV6]: Freeing alive inet6 address
  [DECNET]: Fix interface address listing regression.
  [IPV4] devinet: show all addresses assigned to interface
  [NET]: Do not dereference iov if length is zero
  [TG3]: Workaround MSI bug on 5714/5780.
  [Bluetooth] Fix parameter list for event filter command
  [Bluetooth] Update security filter for Bluetooth 2.1
  [Bluetooth] Add compat handling for timestamp structure
  [Bluetooth] Add missing stat.byte_rx counter modification
parents cabe4569 0a9c7301
......@@ -58,9 +58,13 @@ software, so it's a straight round-robin qdisc. It uses the same syntax and
classification priomap that sch_prio uses, so it should be intuitive to
configure for people who've used sch_prio.
The PRIO qdisc naturally plugs into a multiqueue device. If PRIO has been
built with NET_SCH_PRIO_MQ, then upon load, it will make sure the number of
bands requested is equal to the number of queues on the hardware. If they
In order to utilitize the multiqueue features of the qdiscs, the network
device layer needs to enable multiple queue support. This can be done by
selecting NETDEVICES_MULTIQUEUE under Drivers.
The PRIO qdisc naturally plugs into a multiqueue device. If
NETDEVICES_MULTIQUEUE is selected, then on qdisc load, the number of
bands requested is compared to the number of queues on the hardware. If they
are equal, it sets a one-to-one mapping up between the queues and bands. If
they're not equal, it will not load the qdisc. This is the same behavior
for RR. Once the association is made, any skb that is classified will have
......
......@@ -691,15 +691,18 @@ static void hci_usb_rx_complete(struct urb *urb)
urb->iso_frame_desc[i].offset,
urb->iso_frame_desc[i].actual_length);
if (!urb->iso_frame_desc[i].status)
if (!urb->iso_frame_desc[i].status) {
husb->hdev->stat.byte_rx += urb->iso_frame_desc[i].actual_length;
hci_recv_fragment(husb->hdev, _urb->type,
urb->transfer_buffer + urb->iso_frame_desc[i].offset,
urb->iso_frame_desc[i].actual_length);
}
}
#else
;
#endif
} else {
husb->hdev->stat.byte_rx += count;
err = hci_recv_fragment(husb->hdev, _urb->type, urb->transfer_buffer, count);
if (err < 0) {
BT_ERR("%s corrupted packet: type %d count %d",
......
......@@ -64,8 +64,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.80"
#define DRV_MODULE_RELDATE "August 2, 2007"
#define DRV_MODULE_VERSION "3.81"
#define DRV_MODULE_RELDATE "September 5, 2007"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
......@@ -7127,6 +7127,10 @@ static int tg3_open(struct net_device *dev)
} else if (pci_enable_msi(tp->pdev) == 0) {
u32 msi_mode;
/* Hardware bug - MSI won't work if INTX disabled. */
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
pci_intx(tp->pdev, 1);
msi_mode = tr32(MSGINT_MODE);
tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
......@@ -12172,6 +12176,11 @@ static int tg3_resume(struct pci_dev *pdev)
if (err)
return err;
/* Hardware bug - MSI won't work if INTX disabled. */
if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
pci_intx(tp->pdev, 1);
netif_device_attach(dev);
tg3_full_lock(tp, 0);
......
......@@ -88,9 +88,8 @@ struct nf_sockopt_ops
int (*compat_get)(struct sock *sk, int optval,
void __user *user, int *len);
/* Number of users inside set() or get(). */
unsigned int use;
struct task_struct *cleanup_task;
/* Use the module struct to lock set/get code in place */
struct module *owner;
};
/* Each queued (to userspace) skbuff has one of these. */
......
......@@ -183,6 +183,7 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
{
struct sk_buff *skb;
__le16 param;
__u8 flt_type;
BT_DBG("%s %ld", hdev->name, opt);
......@@ -233,11 +234,8 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
/* Optional initialization */
/* Clear Event Filters */
{
struct hci_cp_set_event_flt cp;
cp.flt_type = HCI_FLT_CLEAR_ALL;
hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
}
flt_type = HCI_FLT_CLEAR_ALL;
hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, 1, &flt_type);
/* Page timeout ~20 secs */
param = cpu_to_le16(0x8000);
......
......@@ -37,6 +37,7 @@
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/compat.h>
#include <linux/socket.h>
#include <linux/ioctl.h>
#include <net/sock.h>
......@@ -70,15 +71,15 @@ static struct hci_sec_filter hci_sec_filter = {
{
{ 0x0 },
/* OGF_LINK_CTL */
{ 0xbe000006, 0x00000001, 0x000000, 0x00 },
{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
/* OGF_LINK_POLICY */
{ 0x00005200, 0x00000000, 0x000000, 0x00 },
{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
/* OGF_HOST_CTL */
{ 0xaab00200, 0x2b402aaa, 0x020154, 0x00 },
{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
/* OGF_INFO_PARAM */
{ 0x000002be, 0x00000000, 0x000000, 0x00 },
{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
/* OGF_STATUS_PARAM */
{ 0x000000ea, 0x00000000, 0x000000, 0x00 }
{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
}
};
......@@ -342,9 +343,23 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
if (mask & HCI_CMSG_TSTAMP) {
struct timeval tv;
void *data;
int len;
skb_get_timestamp(skb, &tv);
put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, sizeof(tv), &tv);
if (msg->msg_flags & MSG_CMSG_COMPAT) {
struct compat_timeval ctv;
ctv.tv_sec = tv.tv_sec;
ctv.tv_usec = tv.tv_usec;
data = &ctv;
len = sizeof(ctv);
} else {
data = &tv;
len = sizeof(tv);
}
put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
}
}
......
......@@ -1513,6 +1513,7 @@ static struct nf_sockopt_ops ebt_sockopts =
.get_optmin = EBT_BASE_CTL,
.get_optmax = EBT_SO_GET_MAX + 1,
.get = do_ebt_get_ctl,
.owner = THIS_MODULE,
};
static int __init ebtables_init(void)
......
......@@ -450,6 +450,9 @@ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
__wsum csum;
int chunk = skb->len - hlen;
if (!chunk)
return 0;
/* Skip filled elements.
* Pretty silly, look at memcpy_toiovec, though 8)
*/
......
......@@ -814,7 +814,7 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
for (ifa = dn_db->ifa_list, dn_idx = 0; ifa;
ifa = ifa->ifa_next, dn_idx++) {
if (dn_idx < skip_naddr)
goto cont;
continue;
if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWADDR,
......
......@@ -1193,7 +1193,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
ifa = ifa->ifa_next, ip_idx++) {
if (ip_idx < s_ip_idx)
goto cont;
continue;
if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_NEWADDR, NLM_F_MULTI) <= 0)
......
......@@ -836,12 +836,16 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return inet_diag_get_exact(skb, nlh);
}
static DEFINE_MUTEX(inet_diag_mutex);
static void inet_diag_rcv(struct sock *sk, int len)
{
unsigned int qlen = 0;
do {
mutex_lock(&inet_diag_mutex);
netlink_run_queue(sk, &qlen, &inet_diag_rcv_msg);
mutex_unlock(&inet_diag_mutex);
} while (qlen);
}
......
......@@ -2339,6 +2339,7 @@ static struct nf_sockopt_ops ip_vs_sockopts = {
.get_optmin = IP_VS_BASE_CTL,
.get_optmax = IP_VS_SO_GET_MAX+1,
.get = do_ip_vs_get_ctl,
.owner = THIS_MODULE,
};
......
......@@ -1161,6 +1161,7 @@ static struct nf_sockopt_ops arpt_sockopts = {
.get_optmin = ARPT_BASE_CTL,
.get_optmax = ARPT_SO_GET_MAX+1,
.get = do_arpt_get_ctl,
.owner = THIS_MODULE,
};
static int __init arp_tables_init(void)
......
......@@ -2296,6 +2296,7 @@ static struct nf_sockopt_ops ipt_sockopts = {
#ifdef CONFIG_COMPAT
.compat_get = compat_do_ipt_get_ctl,
#endif
.owner = THIS_MODULE,
};
static struct xt_match icmp_matchstruct __read_mostly = {
......
......@@ -87,14 +87,10 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
if (iph == NULL)
return -NF_DROP;
/* Never happen */
if (iph->frag_off & htons(IP_OFFSET)) {
if (net_ratelimit()) {
printk(KERN_ERR "ipv4_get_l4proto: Frag of proto %u\n",
iph->protocol);
}
/* Conntrack defragments packets, we might still see fragments
* inside ICMP packets though. */
if (iph->frag_off & htons(IP_OFFSET))
return -NF_DROP;
}
*dataoff = nhoff + (iph->ihl << 2);
*protonum = iph->protocol;
......@@ -403,6 +399,7 @@ static struct nf_sockopt_ops so_getorigdst = {
.get_optmin = SO_ORIGINAL_DST,
.get_optmax = SO_ORIGINAL_DST+1,
.get = &getorigdst,
.owner = THIS_MODULE,
};
struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
......
......@@ -1427,8 +1427,9 @@ void ip6_flush_pending_frames(struct sock *sk)
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
IP6_INC_STATS(ip6_dst_idev(skb->dst),
IPSTATS_MIB_OUTDISCARDS);
if (skb->dst)
IP6_INC_STATS(ip6_dst_idev(skb->dst),
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
}
......
......@@ -736,7 +736,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
* so fail our DAD process
*/
addrconf_dad_failure(ifp);
goto out;
return;
} else {
/*
* This is not a dad solicitation.
......
......@@ -1462,6 +1462,7 @@ static struct nf_sockopt_ops ip6t_sockopts = {
.get_optmin = IP6T_BASE_CTL,
.get_optmax = IP6T_SO_GET_MAX+1,
.get = do_ip6t_get_ctl,
.owner = THIS_MODULE,
};
static struct xt_match icmp6_matchstruct __read_mostly = {
......
......@@ -55,18 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
{
/* No point being interruptible: we're probably in cleanup_module() */
restart:
mutex_lock(&nf_sockopt_mutex);
if (reg->use != 0) {
/* To be woken by nf_sockopt call... */
/* FIXME: Stuart Young's name appears gratuitously. */
set_current_state(TASK_UNINTERRUPTIBLE);
reg->cleanup_task = current;
mutex_unlock(&nf_sockopt_mutex);
schedule();
goto restart;
}
list_del(&reg->list);
mutex_unlock(&nf_sockopt_mutex);
}
......@@ -86,10 +75,11 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
list_for_each(i, &nf_sockopts) {
ops = (struct nf_sockopt_ops *)i;
if (ops->pf == pf) {
if (!try_module_get(ops->owner))
goto out_nosup;
if (get) {
if (val >= ops->get_optmin
&& val < ops->get_optmax) {
ops->use++;
mutex_unlock(&nf_sockopt_mutex);
ret = ops->get(sk, val, opt, len);
goto out;
......@@ -97,23 +87,20 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
} else {
if (val >= ops->set_optmin
&& val < ops->set_optmax) {
ops->use++;
mutex_unlock(&nf_sockopt_mutex);
ret = ops->set(sk, val, opt, *len);
goto out;
}
}
module_put(ops->owner);
}
}
out_nosup:
mutex_unlock(&nf_sockopt_mutex);
return -ENOPROTOOPT;
out:
mutex_lock(&nf_sockopt_mutex);
ops->use--;
if (ops->cleanup_task)
wake_up_process(ops->cleanup_task);
mutex_unlock(&nf_sockopt_mutex);
module_put(ops->owner);
return ret;
}
......@@ -144,10 +131,12 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
list_for_each(i, &nf_sockopts) {
ops = (struct nf_sockopt_ops *)i;
if (ops->pf == pf) {
if (!try_module_get(ops->owner))
goto out_nosup;
if (get) {
if (val >= ops->get_optmin
&& val < ops->get_optmax) {
ops->use++;
mutex_unlock(&nf_sockopt_mutex);
if (ops->compat_get)
ret = ops->compat_get(sk,
......@@ -160,7 +149,6 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
} else {
if (val >= ops->set_optmin
&& val < ops->set_optmax) {
ops->use++;
mutex_unlock(&nf_sockopt_mutex);
if (ops->compat_set)
ret = ops->compat_set(sk,
......@@ -171,17 +159,15 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
goto out;
}
}
module_put(ops->owner);
}
}
out_nosup:
mutex_unlock(&nf_sockopt_mutex);
return -ENOPROTOOPT;
out:
mutex_lock(&nf_sockopt_mutex);
ops->use--;
if (ops->cleanup_task)
wake_up_process(ops->cleanup_task);
mutex_unlock(&nf_sockopt_mutex);
module_put(ops->owner);
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment