Commit 51a26ae7 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "Just a small pile of fixes"

 1) Fix race conditions in IP fragmentation LRU list handling, from
    Konstantin Khlebnikov.

 2) vfree() is no longer verboten in interrupts, so deferring is
    pointless, from Al Viro.

 3) Conversion from mutex to semaphore in netpoll left trylock test
    inverted, caught by Dan Carpenter.

 4) 3c59x uses wrong base address when releasing regions, from Sergei
    Shtylyov.

 5) Bounds checking in TIPC from Dan Carpenter.

 6) Fastopen cookies should not be expired as aggressively as other TCP
    metrics.  From Eric Dumazet.

 7) Fix retrieval of MAC address in ibmveth, from Ben Herrenschmidt.

 8) Don't use "u16" in virtio user headers, from Stephen Hemminger

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  tipc: potential divide by zero in tipc_link_recv_fragment()
  tipc: add a bounds check in link_recv_changeover_msg()
  net/usb: new driver for RTL8152
  3c59x: fix freeing nonexistent resource on driver unload
  netpoll: inverted down_trylock() test
  rps_dev_flow_table_release(): no need to delay vfree()
  fib_trie: no need to delay vfree()
  net: frag, fix race conditions in LRU list maintenance
  tcp: do not expire TCP fastopen cookies
  net/eth/ibmveth: Fixup retrieval of MAC address
  virtio: don't expose u16 in userspace api
parents 2b69703f 6bf15191
......@@ -951,7 +951,7 @@ static int vortex_eisa_remove(struct device *device)
unregister_netdev(dev);
iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
release_region(dev->base_addr, VORTEX_TOTAL_SIZE);
release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
free_netdev(dev);
return 0;
......
......@@ -1322,7 +1322,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
int rc, i;
int rc, i, mac_len;
struct net_device *netdev;
struct ibmveth_adapter *adapter;
unsigned char *mac_addr_p;
......@@ -1332,11 +1332,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
dev->unit_address);
mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
NULL);
&mac_len);
if (!mac_addr_p) {
dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
return -EINVAL;
}
/* Workaround for old/broken pHyp */
if (mac_len == 8)
mac_addr_p += 2;
else if (mac_len != 6) {
dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
mac_len);
return -EINVAL;
}
mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
VETH_MCAST_FILTER_SIZE, NULL);
......@@ -1361,17 +1369,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
/*
* Some older boxes running PHYP non-natively have an OF that returns
* a 8-byte local-mac-address field (and the first 2 bytes have to be
* ignored) while newer boxes' OF return a 6-byte field. Note that
* IEEE 1275 specifies that local-mac-address must be a 6-byte field.
* The RPA doc specifies that the first byte must be 10b, so we'll
* just look for it to solve this 8 vs. 6 byte field issue
*/
if ((*mac_addr_p & 0x3) != 0x02)
mac_addr_p += 2;
adapter->mac_addr = 0;
memcpy(&adapter->mac_addr, mac_addr_p, 6);
......
......@@ -93,6 +93,17 @@ config USB_RTL8150
To compile this driver as a module, choose M here: the
module will be called rtl8150.
config USB_RTL8152
tristate "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
select NET_CORE
select MII
help
This option adds support for Realtek RTL8152 based USB 2.0
10/100 Ethernet adapters.
To compile this driver as a module, choose M here: the
module will be called r8152.
config USB_USBNET
tristate "Multi-purpose USB Networking Framework"
select NET_CORE
......
......@@ -6,6 +6,7 @@ obj-$(CONFIG_USB_CATC) += catc.o
obj-$(CONFIG_USB_KAWETH) += kaweth.o
obj-$(CONFIG_USB_PEGASUS) += pegasus.o
obj-$(CONFIG_USB_RTL8150) += rtl8150.o
obj-$(CONFIG_USB_RTL8152) += r8152.o
obj-$(CONFIG_USB_HSO) += hso.o
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
asix-y := asix_devices.o asix_common.o ax88172a.o
......
......@@ -479,6 +479,7 @@ static const struct driver_info wwan_info = {
#define NOVATEL_VENDOR_ID 0x1410
#define ZTE_VENDOR_ID 0x19D2
#define DELL_VENDOR_ID 0x413C
#define REALTEK_VENDOR_ID 0x0bda
static const struct usb_device_id products [] = {
/*
......@@ -619,6 +620,15 @@ static const struct usb_device_id products [] = {
.driver_info = 0,
},
/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */
#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
{
USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
#endif
/*
* WHITELIST!!!
*
......
This diff is collapsed.
......@@ -593,7 +593,6 @@ struct rps_dev_flow {
struct rps_dev_flow_table {
unsigned int mask;
struct rcu_head rcu;
struct work_struct free_work;
struct rps_dev_flow flows[0];
};
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
......
......@@ -141,14 +141,15 @@ static inline int sum_frag_mem_limit(struct netns_frags *nf)
static inline void inet_frag_lru_move(struct inet_frag_queue *q)
{
spin_lock(&q->net->lru_lock);
list_move_tail(&q->lru_list, &q->net->lru_list);
if (!list_empty(&q->lru_list))
list_move_tail(&q->lru_list, &q->net->lru_list);
spin_unlock(&q->net->lru_lock);
}
static inline void inet_frag_lru_del(struct inet_frag_queue *q)
{
spin_lock(&q->net->lru_lock);
list_del(&q->lru_list);
list_del_init(&q->lru_list);
q->net->nqueues--;
spin_unlock(&q->net->lru_lock);
}
......
......@@ -191,7 +191,7 @@ struct virtio_net_ctrl_mac {
* specified.
*/
struct virtio_net_ctrl_mq {
u16 virtqueue_pairs;
__u16 virtqueue_pairs;
};
#define VIRTIO_NET_CTRL_MQ 4
......
......@@ -606,21 +606,11 @@ static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
return sprintf(buf, "%lu\n", val);
}
static void rps_dev_flow_table_release_work(struct work_struct *work)
{
struct rps_dev_flow_table *table = container_of(work,
struct rps_dev_flow_table, free_work);
vfree(table);
}
static void rps_dev_flow_table_release(struct rcu_head *rcu)
{
struct rps_dev_flow_table *table = container_of(rcu,
struct rps_dev_flow_table, rcu);
INIT_WORK(&table->free_work, rps_dev_flow_table_release_work);
schedule_work(&table->free_work);
vfree(table);
}
static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
......
......@@ -206,7 +206,7 @@ static void netpoll_poll_dev(struct net_device *dev)
* the dev_open/close paths use this to block netpoll activity
* while changing device state
*/
if (!down_trylock(&ni->dev_lock))
if (down_trylock(&ni->dev_lock))
return;
if (!netif_running(dev)) {
......
......@@ -125,7 +125,6 @@ struct tnode {
unsigned int empty_children; /* KEYLENGTH bits needed */
union {
struct rcu_head rcu;
struct work_struct work;
struct tnode *tnode_free;
};
struct rt_trie_node __rcu *child[0];
......@@ -383,12 +382,6 @@ static struct tnode *tnode_alloc(size_t size)
return vzalloc(size);
}
static void __tnode_vfree(struct work_struct *arg)
{
struct tnode *tn = container_of(arg, struct tnode, work);
vfree(tn);
}
static void __tnode_free_rcu(struct rcu_head *head)
{
struct tnode *tn = container_of(head, struct tnode, rcu);
......@@ -397,10 +390,8 @@ static void __tnode_free_rcu(struct rcu_head *head)
if (size <= PAGE_SIZE)
kfree(tn);
else {
INIT_WORK(&tn->work, __tnode_vfree);
schedule_work(&tn->work);
}
else
vfree(tn);
}
static inline void tnode_free(struct tnode *tn)
......
......@@ -305,6 +305,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
spin_lock_init(&q->lock);
atomic_set(&q->refcnt, 1);
INIT_LIST_HEAD(&q->lru_list);
return q;
}
......
......@@ -96,7 +96,8 @@ struct tcpm_hash_bucket {
static DEFINE_SPINLOCK(tcp_metrics_lock);
static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
bool fastopen_clear)
{
u32 val;
......@@ -122,9 +123,11 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
tm->tcpm_ts = 0;
tm->tcpm_ts_stamp = 0;
tm->tcpm_fastopen.mss = 0;
tm->tcpm_fastopen.syn_loss = 0;
tm->tcpm_fastopen.cookie.len = 0;
if (fastopen_clear) {
tm->tcpm_fastopen.mss = 0;
tm->tcpm_fastopen.syn_loss = 0;
tm->tcpm_fastopen.cookie.len = 0;
}
}
static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
......@@ -154,7 +157,7 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
}
tm->tcpm_addr = *addr;
tcpm_suck_dst(tm, dst);
tcpm_suck_dst(tm, dst, true);
if (likely(!reclaim)) {
tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
......@@ -171,7 +174,7 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
{
if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
tcpm_suck_dst(tm, dst);
tcpm_suck_dst(tm, dst, false);
}
#define TCP_METRICS_RECLAIM_DEPTH 5
......
......@@ -2306,8 +2306,11 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
u32 msg_typ = msg_type(tunnel_msg);
u32 msg_count = msg_msgcnt(tunnel_msg);
u32 bearer_id = msg_bearer_id(tunnel_msg);
dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
if (bearer_id >= MAX_BEARERS)
goto exit;
dest_link = (*l_ptr)->owner->links[bearer_id];
if (!dest_link)
goto exit;
if (dest_link == *l_ptr) {
......@@ -2521,14 +2524,16 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
u32 msg_sz = msg_size(imsg);
u32 fragm_sz = msg_data_sz(fragm);
u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
u32 exp_fragm_cnt;
u32 max = TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE;
if (msg_type(imsg) == TIPC_MCAST_MSG)
max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
if (msg_size(imsg) > max) {
if (fragm_sz == 0 || msg_size(imsg) > max) {
kfree_skb(fbuf);
return 0;
}
exp_fragm_cnt = msg_sz / fragm_sz + !!(msg_sz % fragm_sz);
pbuf = tipc_buf_acquire(msg_size(imsg));
if (pbuf != NULL) {
pbuf->next = *pending;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment