Commit e304dfdb authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking from David Miller:

1) IPV4 routing metrics can become stale when routes are changed by the
   administrator, fix from Steffen Klassert.

2) atl1c does "val |= XXX;" where XXX is a bit number not a bit mask,
   fix by using set_bit.  From Dan Carpenter.

3) Memory accounting bug in carl9170 driver results in wedged TX queue.
   Fix from Nicolas Cavallari.

4) iwlwifi accidently uses "sizeof(ptr)" instead of "sizeof(*ptr)", fix
   from Johannes Berg.

5) Openvswitch doesn't honor dp_ifindex when doing vport lookups, fix
   from Ben Pfaff.

6) ehea conversion to 64-bit stats lost multicast and rx_errors
   accounting, fix from Eric Dumazet.

7) Bridge state transition logging in br_stp_disable_port() is busted,
   it's emitted at the wrong time and the message is in the wrong tense,
   fix from Paulius Zaleckas.

8) mlx4 device erroneously invokes the queue resize firmware operation
   twice, fix from Jack Morgenstein.

9) Fix deadlock in usbnet, need to drop lock when invoking usb_unlink_urb()
   otherwise we recurse into taking it again.  Fix from Sebastian Siewior.

10) hyperv network driver uses the wrong driver name string, fix from
    Haiyang Zhang.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  net/hyperv: Use the built-in macro KBUILD_MODNAME for this driver
  net/usbnet: avoid recursive locking in usbnet_stop()
  route: Remove redirect_genid
  inetpeer: Invalidate the inetpeer tree along with the routing cache
  mlx4_core: fix bug in modify_cq wrapper for resize flow.
  atl1c: set ATL1C_WORK_EVENT_RESET bit correctly
  bridge: fix state reporting when port is disabled
  bridge: br_log_state() s/entering/entered/
  ehea: restore multicast and rx_errors fields
  openvswitch: Fix checksum update for actions on UDP packets.
  openvswitch: Honor dp_ifindex, when specified, for vport lookup by name.
  iwlwifi: fix wowlan suspend
  mwifiex: reset encryption mode flag before association
  carl9170: fix frame delivery if sta is in powersave mode
  carl9170: Fix memory accounting when sta is in power-save mode.
parents 9f8050c4 d31b20fc
...@@ -1710,7 +1710,7 @@ static irqreturn_t atl1c_intr(int irq, void *data) ...@@ -1710,7 +1710,7 @@ static irqreturn_t atl1c_intr(int irq, void *data)
"atl1c hardware error (status = 0x%x)\n", "atl1c hardware error (status = 0x%x)\n",
status & ISR_ERROR); status & ISR_ERROR);
/* reset MAC */ /* reset MAC */
adapter->work_event |= ATL1C_WORK_EVENT_RESET; set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
schedule_work(&adapter->common_task); schedule_work(&adapter->common_task);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -336,7 +336,9 @@ static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev, ...@@ -336,7 +336,9 @@ static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
stats->tx_bytes = tx_bytes; stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets; stats->rx_packets = rx_packets;
return &port->stats; stats->multicast = port->stats.multicast;
stats->rx_errors = port->stats.rx_errors;
return stats;
} }
static void ehea_update_stats(struct work_struct *work) static void ehea_update_stats(struct work_struct *work)
......
...@@ -2255,8 +2255,7 @@ int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, ...@@ -2255,8 +2255,7 @@ int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
if (vhcr->op_modifier == 0) { if (vhcr->op_modifier == 0) {
err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
if (err) goto ex_put;
goto ex_put;
} }
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
......
...@@ -313,7 +313,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, ...@@ -313,7 +313,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
static void netvsc_get_drvinfo(struct net_device *net, static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info) struct ethtool_drvinfo *info)
{ {
strcpy(info->driver, "hv_netvsc"); strcpy(info->driver, KBUILD_MODNAME);
strcpy(info->version, HV_DRV_VERSION); strcpy(info->version, HV_DRV_VERSION);
strcpy(info->fw_version, "N/A"); strcpy(info->fw_version, "N/A");
} }
...@@ -485,7 +485,7 @@ MODULE_DEVICE_TABLE(vmbus, id_table); ...@@ -485,7 +485,7 @@ MODULE_DEVICE_TABLE(vmbus, id_table);
/* The one and only one */ /* The one and only one */
static struct hv_driver netvsc_drv = { static struct hv_driver netvsc_drv = {
.name = "netvsc", .name = KBUILD_MODNAME,
.id_table = id_table, .id_table = id_table,
.probe = netvsc_probe, .probe = netvsc_probe,
.remove = netvsc_remove, .remove = netvsc_remove,
......
...@@ -589,6 +589,7 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) ...@@ -589,6 +589,7 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
entry = (struct skb_data *) skb->cb; entry = (struct skb_data *) skb->cb;
urb = entry->urb; urb = entry->urb;
spin_unlock_irqrestore(&q->lock, flags);
// during some PM-driven resume scenarios, // during some PM-driven resume scenarios,
// these (async) unlinks complete immediately // these (async) unlinks complete immediately
retval = usb_unlink_urb (urb); retval = usb_unlink_urb (urb);
...@@ -596,6 +597,7 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) ...@@ -596,6 +597,7 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
netdev_dbg(dev->net, "unlink urb err, %d\n", retval); netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
else else
count++; count++;
spin_lock_irqsave(&q->lock, flags);
} }
spin_unlock_irqrestore (&q->lock, flags); spin_unlock_irqrestore (&q->lock, flags);
return count; return count;
......
...@@ -1234,6 +1234,7 @@ static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb) ...@@ -1234,6 +1234,7 @@ static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
{ {
struct ieee80211_sta *sta; struct ieee80211_sta *sta;
struct carl9170_sta_info *sta_info; struct carl9170_sta_info *sta_info;
struct ieee80211_tx_info *tx_info;
rcu_read_lock(); rcu_read_lock();
sta = __carl9170_get_tx_sta(ar, skb); sta = __carl9170_get_tx_sta(ar, skb);
...@@ -1241,16 +1242,18 @@ static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb) ...@@ -1241,16 +1242,18 @@ static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
goto out_rcu; goto out_rcu;
sta_info = (void *) sta->drv_priv; sta_info = (void *) sta->drv_priv;
if (unlikely(sta_info->sleeping)) { tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_info *tx_info;
if (unlikely(sta_info->sleeping) &&
!(tx_info->flags & (IEEE80211_TX_CTL_POLL_RESPONSE |
IEEE80211_TX_CTL_CLEAR_PS_FILT))) {
rcu_read_unlock(); rcu_read_unlock();
tx_info = IEEE80211_SKB_CB(skb);
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
atomic_dec(&ar->tx_ampdu_upload); atomic_dec(&ar->tx_ampdu_upload);
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
carl9170_release_dev_space(ar, skb);
carl9170_tx_status(ar, skb, false); carl9170_tx_status(ar, skb, false);
return true; return true;
} }
......
...@@ -1240,7 +1240,7 @@ int iwlagn_suspend(struct iwl_priv *priv, ...@@ -1240,7 +1240,7 @@ int iwlagn_suspend(struct iwl_priv *priv,
.flags = CMD_SYNC, .flags = CMD_SYNC,
.data[0] = key_data.rsc_tsc, .data[0] = key_data.rsc_tsc,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY, .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
.len[0] = sizeof(key_data.rsc_tsc), .len[0] = sizeof(*key_data.rsc_tsc),
}; };
ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd); ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
......
...@@ -846,6 +846,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid, ...@@ -846,6 +846,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
priv->sec_info.wpa_enabled = false; priv->sec_info.wpa_enabled = false;
priv->sec_info.wpa2_enabled = false; priv->sec_info.wpa2_enabled = false;
priv->wep_key_curr_index = 0; priv->wep_key_curr_index = 0;
priv->sec_info.encryption_mode = 0;
ret = mwifiex_set_encode(priv, NULL, 0, 0, 1); ret = mwifiex_set_encode(priv, NULL, 0, 0, 1);
if (mode == NL80211_IFTYPE_ADHOC) { if (mode == NL80211_IFTYPE_ADHOC) {
......
...@@ -35,12 +35,12 @@ struct inet_peer { ...@@ -35,12 +35,12 @@ struct inet_peer {
u32 metrics[RTAX_MAX]; u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */ u32 rate_tokens; /* rate limiting for ICMP */
int redirect_genid;
unsigned long rate_last; unsigned long rate_last;
unsigned long pmtu_expires; unsigned long pmtu_expires;
u32 pmtu_orig; u32 pmtu_orig;
u32 pmtu_learned; u32 pmtu_learned;
struct inetpeer_addr_base redirect_learned; struct inetpeer_addr_base redirect_learned;
struct list_head gc_list;
/* /*
* Once inet_peer is queued for deletion (refcnt == -1), following fields * Once inet_peer is queued for deletion (refcnt == -1), following fields
* are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
...@@ -96,6 +96,8 @@ static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr, ...@@ -96,6 +96,8 @@ static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr,
extern void inet_putpeer(struct inet_peer *p); extern void inet_putpeer(struct inet_peer *p);
extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
extern void inetpeer_invalidate_tree(int family);
/* /*
* temporary check to make sure we dont access rid, ip_id_count, tcp_ts, * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
* tcp_ts_stamp if no refcount is taken on inet_peer * tcp_ts_stamp if no refcount is taken on inet_peer
......
...@@ -31,7 +31,7 @@ static const char *const br_port_state_names[] = { ...@@ -31,7 +31,7 @@ static const char *const br_port_state_names[] = {
void br_log_state(const struct net_bridge_port *p) void br_log_state(const struct net_bridge_port *p)
{ {
br_info(p->br, "port %u(%s) entering %s state\n", br_info(p->br, "port %u(%s) entered %s state\n",
(unsigned) p->port_no, p->dev->name, (unsigned) p->port_no, p->dev->name,
br_port_state_names[p->state]); br_port_state_names[p->state]);
} }
......
...@@ -98,14 +98,13 @@ void br_stp_disable_port(struct net_bridge_port *p) ...@@ -98,14 +98,13 @@ void br_stp_disable_port(struct net_bridge_port *p)
struct net_bridge *br = p->br; struct net_bridge *br = p->br;
int wasroot; int wasroot;
br_log_state(p);
wasroot = br_is_root_bridge(br); wasroot = br_is_root_bridge(br);
br_become_designated_port(p); br_become_designated_port(p);
p->state = BR_STATE_DISABLED; p->state = BR_STATE_DISABLED;
p->topology_change_ack = 0; p->topology_change_ack = 0;
p->config_pending = 0; p->config_pending = 0;
br_log_state(p);
br_ifinfo_notify(RTM_NEWLINK, p); br_ifinfo_notify(RTM_NEWLINK, p);
del_timer(&p->message_age_timer); del_timer(&p->message_age_timer);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/net.h> #include <linux/net.h>
#include <linux/workqueue.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/inetpeer.h> #include <net/inetpeer.h>
#include <net/secure_seq.h> #include <net/secure_seq.h>
...@@ -66,6 +67,11 @@ ...@@ -66,6 +67,11 @@
static struct kmem_cache *peer_cachep __read_mostly; static struct kmem_cache *peer_cachep __read_mostly;
static LIST_HEAD(gc_list);
static const int gc_delay = 60 * HZ;
static struct delayed_work gc_work;
static DEFINE_SPINLOCK(gc_lock);
#define node_height(x) x->avl_height #define node_height(x) x->avl_height
#define peer_avl_empty ((struct inet_peer *)&peer_fake_node) #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
...@@ -102,6 +108,50 @@ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries m ...@@ -102,6 +108,50 @@ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries m
int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
static void inetpeer_gc_worker(struct work_struct *work)
{
struct inet_peer *p, *n;
LIST_HEAD(list);
spin_lock_bh(&gc_lock);
list_replace_init(&gc_list, &list);
spin_unlock_bh(&gc_lock);
if (list_empty(&list))
return;
list_for_each_entry_safe(p, n, &list, gc_list) {
if(need_resched())
cond_resched();
if (p->avl_left != peer_avl_empty) {
list_add_tail(&p->avl_left->gc_list, &list);
p->avl_left = peer_avl_empty;
}
if (p->avl_right != peer_avl_empty) {
list_add_tail(&p->avl_right->gc_list, &list);
p->avl_right = peer_avl_empty;
}
n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
if (!atomic_read(&p->refcnt)) {
list_del(&p->gc_list);
kmem_cache_free(peer_cachep, p);
}
}
if (list_empty(&list))
return;
spin_lock_bh(&gc_lock);
list_splice(&list, &gc_list);
spin_unlock_bh(&gc_lock);
schedule_delayed_work(&gc_work, gc_delay);
}
/* Called from ip_output.c:ip_init */ /* Called from ip_output.c:ip_init */
void __init inet_initpeers(void) void __init inet_initpeers(void)
...@@ -126,6 +176,7 @@ void __init inet_initpeers(void) ...@@ -126,6 +176,7 @@ void __init inet_initpeers(void)
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
NULL); NULL);
INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
} }
static int addr_compare(const struct inetpeer_addr *a, static int addr_compare(const struct inetpeer_addr *a,
...@@ -447,9 +498,8 @@ struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create) ...@@ -447,9 +498,8 @@ struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
p->rate_last = 0; p->rate_last = 0;
p->pmtu_expires = 0; p->pmtu_expires = 0;
p->pmtu_orig = 0; p->pmtu_orig = 0;
p->redirect_genid = 0;
memset(&p->redirect_learned, 0, sizeof(p->redirect_learned)); memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
INIT_LIST_HEAD(&p->gc_list);
/* Link the node. */ /* Link the node. */
link_to_pool(p, base); link_to_pool(p, base);
...@@ -509,3 +559,30 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) ...@@ -509,3 +559,30 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
return rc; return rc;
} }
EXPORT_SYMBOL(inet_peer_xrlim_allow); EXPORT_SYMBOL(inet_peer_xrlim_allow);
void inetpeer_invalidate_tree(int family)
{
struct inet_peer *old, *new, *prev;
struct inet_peer_base *base = family_to_base(family);
write_seqlock_bh(&base->lock);
old = base->root;
if (old == peer_avl_empty_rcu)
goto out;
new = peer_avl_empty_rcu;
prev = cmpxchg(&base->root, old, new);
if (prev == old) {
base->total = 0;
spin_lock(&gc_lock);
list_add_tail(&prev->gc_list, &gc_list);
spin_unlock(&gc_lock);
schedule_delayed_work(&gc_work, gc_delay);
}
out:
write_sequnlock_bh(&base->lock);
}
EXPORT_SYMBOL(inetpeer_invalidate_tree);
...@@ -132,7 +132,6 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; ...@@ -132,7 +132,6 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256; static int ip_rt_min_advmss __read_mostly = 256;
static int rt_chain_length_max __read_mostly = 20; static int rt_chain_length_max __read_mostly = 20;
static int redirect_genid;
static struct delayed_work expires_work; static struct delayed_work expires_work;
static unsigned long expires_ljiffies; static unsigned long expires_ljiffies;
...@@ -937,7 +936,7 @@ static void rt_cache_invalidate(struct net *net) ...@@ -937,7 +936,7 @@ static void rt_cache_invalidate(struct net *net)
get_random_bytes(&shuffle, sizeof(shuffle)); get_random_bytes(&shuffle, sizeof(shuffle));
atomic_add(shuffle + 1U, &net->ipv4.rt_genid); atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
redirect_genid++; inetpeer_invalidate_tree(AF_INET);
} }
/* /*
...@@ -1485,10 +1484,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, ...@@ -1485,10 +1484,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
peer = rt->peer; peer = rt->peer;
if (peer) { if (peer) {
if (peer->redirect_learned.a4 != new_gw || if (peer->redirect_learned.a4 != new_gw) {
peer->redirect_genid != redirect_genid) {
peer->redirect_learned.a4 = new_gw; peer->redirect_learned.a4 = new_gw;
peer->redirect_genid = redirect_genid;
atomic_inc(&__rt_peer_genid); atomic_inc(&__rt_peer_genid);
} }
check_peer_redir(&rt->dst, peer); check_peer_redir(&rt->dst, peer);
...@@ -1793,8 +1790,6 @@ static void ipv4_validate_peer(struct rtable *rt) ...@@ -1793,8 +1790,6 @@ static void ipv4_validate_peer(struct rtable *rt)
if (peer) { if (peer) {
check_peer_pmtu(&rt->dst, peer); check_peer_pmtu(&rt->dst, peer);
if (peer->redirect_genid != redirect_genid)
peer->redirect_learned.a4 = 0;
if (peer->redirect_learned.a4 && if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) peer->redirect_learned.a4 != rt->rt_gateway)
check_peer_redir(&rt->dst, peer); check_peer_redir(&rt->dst, peer);
...@@ -1958,8 +1953,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4, ...@@ -1958,8 +1953,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
dst_init_metrics(&rt->dst, peer->metrics, false); dst_init_metrics(&rt->dst, peer->metrics, false);
check_peer_pmtu(&rt->dst, peer); check_peer_pmtu(&rt->dst, peer);
if (peer->redirect_genid != redirect_genid)
peer->redirect_learned.a4 = 0;
if (peer->redirect_learned.a4 && if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) { peer->redirect_learned.a4 != rt->rt_gateway) {
rt->rt_gateway = peer->redirect_learned.a4; rt->rt_gateway = peer->redirect_learned.a4;
......
/* /*
* Copyright (c) 2007-2011 Nicira Networks. * Copyright (c) 2007-2012 Nicira Networks.
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public * modify it under the terms of version 2 of the GNU General Public
...@@ -145,9 +145,16 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, ...@@ -145,9 +145,16 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
*addr, new_addr, 1); *addr, new_addr, 1);
} else if (nh->protocol == IPPROTO_UDP) { } else if (nh->protocol == IPPROTO_UDP) {
if (likely(transport_len >= sizeof(struct udphdr))) if (likely(transport_len >= sizeof(struct udphdr))) {
inet_proto_csum_replace4(&udp_hdr(skb)->check, skb, struct udphdr *uh = udp_hdr(skb);
*addr, new_addr, 1);
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace4(&uh->check, skb,
*addr, new_addr, 1);
if (!uh->check)
uh->check = CSUM_MANGLED_0;
}
}
} }
csum_replace4(&nh->check, *addr, new_addr); csum_replace4(&nh->check, *addr, new_addr);
...@@ -197,8 +204,22 @@ static void set_tp_port(struct sk_buff *skb, __be16 *port, ...@@ -197,8 +204,22 @@ static void set_tp_port(struct sk_buff *skb, __be16 *port,
skb->rxhash = 0; skb->rxhash = 0;
} }
static int set_udp_port(struct sk_buff *skb, static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
const struct ovs_key_udp *udp_port_key) {
struct udphdr *uh = udp_hdr(skb);
if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
set_tp_port(skb, port, new_port, &uh->check);
if (!uh->check)
uh->check = CSUM_MANGLED_0;
} else {
*port = new_port;
skb->rxhash = 0;
}
}
static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
{ {
struct udphdr *uh; struct udphdr *uh;
int err; int err;
...@@ -210,16 +231,15 @@ static int set_udp_port(struct sk_buff *skb, ...@@ -210,16 +231,15 @@ static int set_udp_port(struct sk_buff *skb,
uh = udp_hdr(skb); uh = udp_hdr(skb);
if (udp_port_key->udp_src != uh->source) if (udp_port_key->udp_src != uh->source)
set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check); set_udp_port(skb, &uh->source, udp_port_key->udp_src);
if (udp_port_key->udp_dst != uh->dest) if (udp_port_key->udp_dst != uh->dest)
set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check); set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
return 0; return 0;
} }
static int set_tcp_port(struct sk_buff *skb, static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
const struct ovs_key_tcp *tcp_port_key)
{ {
struct tcphdr *th; struct tcphdr *th;
int err; int err;
...@@ -328,11 +348,11 @@ static int execute_set_action(struct sk_buff *skb, ...@@ -328,11 +348,11 @@ static int execute_set_action(struct sk_buff *skb,
break; break;
case OVS_KEY_ATTR_TCP: case OVS_KEY_ATTR_TCP:
err = set_tcp_port(skb, nla_data(nested_attr)); err = set_tcp(skb, nla_data(nested_attr));
break; break;
case OVS_KEY_ATTR_UDP: case OVS_KEY_ATTR_UDP:
err = set_udp_port(skb, nla_data(nested_attr)); err = set_udp(skb, nla_data(nested_attr));
break; break;
} }
......
...@@ -1521,6 +1521,9 @@ static struct vport *lookup_vport(struct ovs_header *ovs_header, ...@@ -1521,6 +1521,9 @@ static struct vport *lookup_vport(struct ovs_header *ovs_header,
vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME])); vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
if (!vport) if (!vport)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
if (ovs_header->dp_ifindex &&
ovs_header->dp_ifindex != get_dpifindex(vport->dp))
return ERR_PTR(-ENODEV);
return vport; return vport;
} else if (a[OVS_VPORT_ATTR_PORT_NO]) { } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment