Commit 8f18e4d0 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Various ipvlan fixes from Eric Dumazet and Mahesh Bandewar.

    The most important is to not assume the packet is RX just because
    the destination address matches that of the device. Such an
    assumption causes problems when an interface is put into loopback
    mode.

 2) If we retry when creating a new tc entry (because we dropped the
    RTNL mutex in order to load a module, for example) we end up with
    -EAGAIN and then loop trying to replay the request. But we didn't
    reset some state when looping back to the top like this, and if
    another thread meanwhile inserted the same tc entry we were trying
    to, we re-link it creating an enless loop in the tc chain. Fix from
    Daniel Borkmann.

 3) There are two different WRITE bits in the MDIO address register for
    the stmmac chip, depending upon the chip variant. Due to a bug we
    could set them both, fix from Hock Leong Kweh.

 4) Fix mlx4 bug in XDP_TX handling, from Tariq Toukan.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  net: stmmac: fix incorrect bit set in gmac4 mdio addr register
  r8169: add support for RTL8168 series add-on card.
  net: xdp: remove unused bfp_warn_invalid_xdp_buffer()
  openvswitch: upcall: Fix vlan handling.
  ipv4: Namespaceify tcp_tw_reuse knob
  net: korina: Fix NAPI versus resources freeing
  net, sched: fix soft lockup in tc_classify
  net/mlx4_en: Fix user prio field in XDP forward
  tipc: don't send FIN message from connectionless socket
  ipvlan: fix multicast processing
  ipvlan: fix various issues in ipvlan_process_multicast()
parents 0dad3a30 5799fc90
...@@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work) ...@@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work)
DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR, DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
&lp->rx_dma_regs->dmasm); &lp->rx_dma_regs->dmasm);
korina_free_ring(dev);
napi_disable(&lp->napi); napi_disable(&lp->napi);
korina_free_ring(dev);
if (korina_init(dev) < 0) { if (korina_init(dev) < 0) {
printk(KERN_ERR "%s: cannot restart device\n", dev->name); printk(KERN_ERR "%s: cannot restart device\n", dev->name);
return; return;
...@@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev) ...@@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev)
tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR; tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
writel(tmp, &lp->rx_dma_regs->dmasm); writel(tmp, &lp->rx_dma_regs->dmasm);
korina_free_ring(dev);
napi_disable(&lp->napi); napi_disable(&lp->napi);
cancel_work_sync(&lp->restart_task); cancel_work_sync(&lp->restart_task);
korina_free_ring(dev);
free_irq(lp->rx_irq, dev); free_irq(lp->rx_irq, dev);
free_irq(lp->tx_irq, dev); free_irq(lp->tx_irq, dev);
free_irq(lp->ovr_irq, dev); free_irq(lp->ovr_irq, dev);
......
...@@ -1638,7 +1638,8 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1638,7 +1638,8 @@ int mlx4_en_start_port(struct net_device *dev)
/* Configure tx cq's and rings */ /* Configure tx cq's and rings */
for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
u8 num_tx_rings_p_up = t == TX ? priv->num_tx_rings_p_up : 1; u8 num_tx_rings_p_up = t == TX ?
priv->num_tx_rings_p_up : priv->tx_ring_num[t];
for (i = 0; i < priv->tx_ring_num[t]; i++) { for (i = 0; i < priv->tx_ring_num[t]; i++) {
/* Configure cq */ /* Configure cq */
......
...@@ -326,6 +326,7 @@ enum cfg_version { ...@@ -326,6 +326,7 @@ enum cfg_version {
static const struct pci_device_id rtl8169_pci_tbl[] = { static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
......
...@@ -116,7 +116,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, ...@@ -116,7 +116,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
unsigned int mii_address = priv->hw->mii.addr; unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data; unsigned int mii_data = priv->hw->mii.data;
u32 value = MII_WRITE | MII_BUSY; u32 value = MII_BUSY;
value |= (phyaddr << priv->hw->mii.addr_shift) value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask; & priv->hw->mii.addr_mask;
...@@ -126,6 +126,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, ...@@ -126,6 +126,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
& priv->hw->mii.clk_csr_mask; & priv->hw->mii.clk_csr_mask;
if (priv->plat->has_gmac4) if (priv->plat->has_gmac4)
value |= MII_GMAC4_WRITE; value |= MII_GMAC4_WRITE;
else
value |= MII_WRITE;
/* Wait until any existing MII operation is complete */ /* Wait until any existing MII operation is complete */
if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
......
...@@ -99,6 +99,11 @@ struct ipvl_port { ...@@ -99,6 +99,11 @@ struct ipvl_port {
int count; int count;
}; };
struct ipvl_skb_cb {
bool tx_pkt;
};
#define IPVL_SKB_CB(_skb) ((struct ipvl_skb_cb *)&((_skb)->cb[0]))
static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d) static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
{ {
return rcu_dereference(d->rx_handler_data); return rcu_dereference(d->rx_handler_data);
......
...@@ -198,7 +198,7 @@ void ipvlan_process_multicast(struct work_struct *work) ...@@ -198,7 +198,7 @@ void ipvlan_process_multicast(struct work_struct *work)
unsigned int mac_hash; unsigned int mac_hash;
int ret; int ret;
u8 pkt_type; u8 pkt_type;
bool hlocal, dlocal; bool tx_pkt;
__skb_queue_head_init(&list); __skb_queue_head_init(&list);
...@@ -207,8 +207,11 @@ void ipvlan_process_multicast(struct work_struct *work) ...@@ -207,8 +207,11 @@ void ipvlan_process_multicast(struct work_struct *work)
spin_unlock_bh(&port->backlog.lock); spin_unlock_bh(&port->backlog.lock);
while ((skb = __skb_dequeue(&list)) != NULL) { while ((skb = __skb_dequeue(&list)) != NULL) {
struct net_device *dev = skb->dev;
bool consumed = false;
ethh = eth_hdr(skb); ethh = eth_hdr(skb);
hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr); tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
mac_hash = ipvlan_mac_hash(ethh->h_dest); mac_hash = ipvlan_mac_hash(ethh->h_dest);
if (ether_addr_equal(ethh->h_dest, port->dev->broadcast)) if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
...@@ -216,41 +219,45 @@ void ipvlan_process_multicast(struct work_struct *work) ...@@ -216,41 +219,45 @@ void ipvlan_process_multicast(struct work_struct *work)
else else
pkt_type = PACKET_MULTICAST; pkt_type = PACKET_MULTICAST;
dlocal = false;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) { list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
if (hlocal && (ipvlan->dev == skb->dev)) { if (tx_pkt && (ipvlan->dev == skb->dev))
dlocal = true;
continue; continue;
}
if (!test_bit(mac_hash, ipvlan->mac_filters)) if (!test_bit(mac_hash, ipvlan->mac_filters))
continue; continue;
if (!(ipvlan->dev->flags & IFF_UP))
continue;
ret = NET_RX_DROP; ret = NET_RX_DROP;
len = skb->len + ETH_HLEN; len = skb->len + ETH_HLEN;
nskb = skb_clone(skb, GFP_ATOMIC); nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb) local_bh_disable();
goto acct; if (nskb) {
consumed = true;
nskb->pkt_type = pkt_type; nskb->pkt_type = pkt_type;
nskb->dev = ipvlan->dev; nskb->dev = ipvlan->dev;
if (hlocal) if (tx_pkt)
ret = dev_forward_skb(ipvlan->dev, nskb); ret = dev_forward_skb(ipvlan->dev, nskb);
else else
ret = netif_rx(nskb); ret = netif_rx(nskb);
acct: }
ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
local_bh_enable();
} }
rcu_read_unlock(); rcu_read_unlock();
if (dlocal) { if (tx_pkt) {
/* If the packet originated here, send it out. */ /* If the packet originated here, send it out. */
skb->dev = port->dev; skb->dev = port->dev;
skb->pkt_type = pkt_type; skb->pkt_type = pkt_type;
dev_queue_xmit(skb); dev_queue_xmit(skb);
} else { } else {
if (consumed)
consume_skb(skb);
else
kfree_skb(skb); kfree_skb(skb);
} }
if (dev)
dev_put(dev);
} }
} }
...@@ -470,15 +477,24 @@ static int ipvlan_process_outbound(struct sk_buff *skb) ...@@ -470,15 +477,24 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
} }
static void ipvlan_multicast_enqueue(struct ipvl_port *port, static void ipvlan_multicast_enqueue(struct ipvl_port *port,
struct sk_buff *skb) struct sk_buff *skb, bool tx_pkt)
{ {
if (skb->protocol == htons(ETH_P_PAUSE)) { if (skb->protocol == htons(ETH_P_PAUSE)) {
kfree_skb(skb); kfree_skb(skb);
return; return;
} }
/* Record that the deferred packet is from TX or RX path. By
* looking at mac-addresses on packet will lead to erronus decisions.
* (This would be true for a loopback-mode on master device or a
* hair-pin mode of the switch.)
*/
IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
spin_lock(&port->backlog.lock); spin_lock(&port->backlog.lock);
if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) { if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
if (skb->dev)
dev_hold(skb->dev);
__skb_queue_tail(&port->backlog, skb); __skb_queue_tail(&port->backlog, skb);
spin_unlock(&port->backlog.lock); spin_unlock(&port->backlog.lock);
schedule_work(&port->wq); schedule_work(&port->wq);
...@@ -537,7 +553,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) ...@@ -537,7 +553,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
} else if (is_multicast_ether_addr(eth->h_dest)) { } else if (is_multicast_ether_addr(eth->h_dest)) {
ipvlan_skb_crossing_ns(skb, NULL); ipvlan_skb_crossing_ns(skb, NULL);
ipvlan_multicast_enqueue(ipvlan->port, skb); ipvlan_multicast_enqueue(ipvlan->port, skb, true);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -634,7 +650,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, ...@@ -634,7 +650,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
*/ */
if (nskb) { if (nskb) {
ipvlan_skb_crossing_ns(nskb, NULL); ipvlan_skb_crossing_ns(nskb, NULL);
ipvlan_multicast_enqueue(port, nskb); ipvlan_multicast_enqueue(port, nskb, false);
} }
} }
} else { } else {
......
...@@ -135,6 +135,7 @@ static int ipvlan_port_create(struct net_device *dev) ...@@ -135,6 +135,7 @@ static int ipvlan_port_create(struct net_device *dev)
static void ipvlan_port_destroy(struct net_device *dev) static void ipvlan_port_destroy(struct net_device *dev)
{ {
struct ipvl_port *port = ipvlan_port_get_rtnl(dev); struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
struct sk_buff *skb;
dev->priv_flags &= ~IFF_IPVLAN_MASTER; dev->priv_flags &= ~IFF_IPVLAN_MASTER;
if (port->mode == IPVLAN_MODE_L3S) { if (port->mode == IPVLAN_MODE_L3S) {
...@@ -144,7 +145,11 @@ static void ipvlan_port_destroy(struct net_device *dev) ...@@ -144,7 +145,11 @@ static void ipvlan_port_destroy(struct net_device *dev)
} }
netdev_rx_handler_unregister(dev); netdev_rx_handler_unregister(dev);
cancel_work_sync(&port->wq); cancel_work_sync(&port->wq);
__skb_queue_purge(&port->backlog); while ((skb = __skb_dequeue(&port->backlog)) != NULL) {
if (skb->dev)
dev_put(skb->dev);
kfree_skb(skb);
}
kfree(port); kfree(port);
} }
......
...@@ -610,7 +610,6 @@ bool bpf_helper_changes_pkt_data(void *func); ...@@ -610,7 +610,6 @@ bool bpf_helper_changes_pkt_data(void *func);
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len); const struct bpf_insn *patch, u32 len);
void bpf_warn_invalid_xdp_action(u32 act); void bpf_warn_invalid_xdp_action(u32 act);
void bpf_warn_invalid_xdp_buffer(void);
#ifdef CONFIG_BPF_JIT #ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable; extern int bpf_jit_enable;
......
...@@ -110,6 +110,7 @@ struct netns_ipv4 { ...@@ -110,6 +110,7 @@ struct netns_ipv4 {
int sysctl_tcp_orphan_retries; int sysctl_tcp_orphan_retries;
int sysctl_tcp_fin_timeout; int sysctl_tcp_fin_timeout;
unsigned int sysctl_tcp_notsent_lowat; unsigned int sysctl_tcp_notsent_lowat;
int sysctl_tcp_tw_reuse;
int sysctl_igmp_max_memberships; int sysctl_igmp_max_memberships;
int sysctl_igmp_max_msf; int sysctl_igmp_max_msf;
......
...@@ -252,7 +252,6 @@ extern int sysctl_tcp_wmem[3]; ...@@ -252,7 +252,6 @@ extern int sysctl_tcp_wmem[3];
extern int sysctl_tcp_rmem[3]; extern int sysctl_tcp_rmem[3];
extern int sysctl_tcp_app_win; extern int sysctl_tcp_app_win;
extern int sysctl_tcp_adv_win_scale; extern int sysctl_tcp_adv_win_scale;
extern int sysctl_tcp_tw_reuse;
extern int sysctl_tcp_frto; extern int sysctl_tcp_frto;
extern int sysctl_tcp_low_latency; extern int sysctl_tcp_low_latency;
extern int sysctl_tcp_nometrics_save; extern int sysctl_tcp_nometrics_save;
......
...@@ -2972,12 +2972,6 @@ void bpf_warn_invalid_xdp_action(u32 act) ...@@ -2972,12 +2972,6 @@ void bpf_warn_invalid_xdp_action(u32 act)
} }
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
void bpf_warn_invalid_xdp_buffer(void)
{
WARN_ONCE(1, "Illegal XDP buffer encountered, expect throughput degradation\n");
}
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_buffer);
static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg, static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off, int src_reg, int ctx_off,
struct bpf_insn *insn_buf, struct bpf_insn *insn_buf,
......
...@@ -432,13 +432,6 @@ static struct ctl_table ipv4_table[] = { ...@@ -432,13 +432,6 @@ static struct ctl_table ipv4_table[] = {
.extra1 = &tcp_adv_win_scale_min, .extra1 = &tcp_adv_win_scale_min,
.extra2 = &tcp_adv_win_scale_max, .extra2 = &tcp_adv_win_scale_max,
}, },
{
.procname = "tcp_tw_reuse",
.data = &sysctl_tcp_tw_reuse,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{ {
.procname = "tcp_frto", .procname = "tcp_frto",
.data = &sysctl_tcp_frto, .data = &sysctl_tcp_frto,
...@@ -960,6 +953,13 @@ static struct ctl_table ipv4_net_table[] = { ...@@ -960,6 +953,13 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
{
.procname = "tcp_tw_reuse",
.data = &init_net.ipv4.sysctl_tcp_tw_reuse,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
#ifdef CONFIG_IP_ROUTE_MULTIPATH #ifdef CONFIG_IP_ROUTE_MULTIPATH
{ {
.procname = "fib_multipath_use_neigh", .procname = "fib_multipath_use_neigh",
......
...@@ -84,7 +84,6 @@ ...@@ -84,7 +84,6 @@
#include <crypto/hash.h> #include <crypto/hash.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly; int sysctl_tcp_low_latency __read_mostly;
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
...@@ -120,7 +119,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) ...@@ -120,7 +119,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
and use initial timestamp retrieved from peer table. and use initial timestamp retrieved from peer table.
*/ */
if (tcptw->tw_ts_recent_stamp && if (tcptw->tw_ts_recent_stamp &&
(!twp || (sysctl_tcp_tw_reuse && (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
if (tp->write_seq == 0) if (tp->write_seq == 0)
...@@ -2456,6 +2455,7 @@ static int __net_init tcp_sk_init(struct net *net) ...@@ -2456,6 +2455,7 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_orphan_retries = 0; net->ipv4.sysctl_tcp_orphan_retries = 0;
net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX; net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
net->ipv4.sysctl_tcp_tw_reuse = 0;
return 0; return 0;
fail: fail:
......
...@@ -606,7 +606,6 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) ...@@ -606,7 +606,6 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
rcu_assign_pointer(flow->sf_acts, acts); rcu_assign_pointer(flow->sf_acts, acts);
packet->priority = flow->key.phy.priority; packet->priority = flow->key.phy.priority;
packet->mark = flow->key.phy.skb_mark; packet->mark = flow->key.phy.skb_mark;
packet->protocol = flow->key.eth.type;
rcu_read_lock(); rcu_read_lock();
dp = get_dp_rcu(net, ovs_header->dp_ifindex); dp = get_dp_rcu(net, ovs_header->dp_ifindex);
......
...@@ -312,7 +312,8 @@ static bool icmp6hdr_ok(struct sk_buff *skb) ...@@ -312,7 +312,8 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
* Returns 0 if it encounters a non-vlan or incomplete packet. * Returns 0 if it encounters a non-vlan or incomplete packet.
* Returns 1 after successfully parsing vlan tag. * Returns 1 after successfully parsing vlan tag.
*/ */
static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh) static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
bool untag_vlan)
{ {
struct vlan_head *vh = (struct vlan_head *)skb->data; struct vlan_head *vh = (struct vlan_head *)skb->data;
...@@ -330,7 +331,20 @@ static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh) ...@@ -330,7 +331,20 @@ static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh)
key_vh->tci = vh->tci | htons(VLAN_TAG_PRESENT); key_vh->tci = vh->tci | htons(VLAN_TAG_PRESENT);
key_vh->tpid = vh->tpid; key_vh->tpid = vh->tpid;
if (unlikely(untag_vlan)) {
int offset = skb->data - skb_mac_header(skb);
u16 tci;
int err;
__skb_push(skb, offset);
err = __skb_vlan_pop(skb, &tci);
__skb_pull(skb, offset);
if (err)
return err;
__vlan_hwaccel_put_tag(skb, key_vh->tpid, tci);
} else {
__skb_pull(skb, sizeof(struct vlan_head)); __skb_pull(skb, sizeof(struct vlan_head));
}
return 1; return 1;
} }
...@@ -351,13 +365,13 @@ static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) ...@@ -351,13 +365,13 @@ static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
key->eth.vlan.tpid = skb->vlan_proto; key->eth.vlan.tpid = skb->vlan_proto;
} else { } else {
/* Parse outer vlan tag in the non-accelerated case. */ /* Parse outer vlan tag in the non-accelerated case. */
res = parse_vlan_tag(skb, &key->eth.vlan); res = parse_vlan_tag(skb, &key->eth.vlan, true);
if (res <= 0) if (res <= 0)
return res; return res;
} }
/* Parse inner vlan tag. */ /* Parse inner vlan tag. */
res = parse_vlan_tag(skb, &key->eth.cvlan); res = parse_vlan_tag(skb, &key->eth.cvlan, false);
if (res <= 0) if (res <= 0)
return res; return res;
...@@ -800,29 +814,15 @@ int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr, ...@@ -800,29 +814,15 @@ int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
if (err) if (err)
return err; return err;
if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
/* key_extract assumes that skb->protocol is set-up for /* key_extract assumes that skb->protocol is set-up for
* layer 3 packets which is the case for other callers, * layer 3 packets which is the case for other callers,
* in particular packets recieved from the network stack. * in particular packets received from the network stack.
* Here the correct value can be set from the metadata * Here the correct value can be set from the metadata
* extracted above. * extracted above.
* For L2 packet key eth type would be zero. skb protocol
* would be set to correct value later during key-extact.
*/ */
skb->protocol = key->eth.type;
} else {
struct ethhdr *eth;
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
/* Normally, setting the skb 'protocol' field would be
* handled by a call to eth_type_trans(), but it assumes
* there's a sending device, which we may not have.
*/
if (eth_proto_is_802_3(eth->h_proto))
skb->protocol = eth->h_proto;
else
skb->protocol = htons(ETH_P_802_2);
}
skb->protocol = key->eth.type;
return key_extract(skb, key); return key_extract(skb, key);
} }
...@@ -148,13 +148,15 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) ...@@ -148,13 +148,15 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
unsigned long cl; unsigned long cl;
unsigned long fh; unsigned long fh;
int err; int err;
int tp_created = 0; int tp_created;
if ((n->nlmsg_type != RTM_GETTFILTER) && if ((n->nlmsg_type != RTM_GETTFILTER) &&
!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM; return -EPERM;
replay: replay:
tp_created = 0;
err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL); err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL);
if (err < 0) if (err < 0)
return err; return err;
......
...@@ -441,7 +441,8 @@ static void __tipc_shutdown(struct socket *sock, int error) ...@@ -441,7 +441,8 @@ static void __tipc_shutdown(struct socket *sock, int error)
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (TIPC_SKB_CB(skb)->bytes_read) { if (TIPC_SKB_CB(skb)->bytes_read) {
kfree_skb(skb); kfree_skb(skb);
} else { continue;
}
if (!tipc_sk_type_connectionless(sk) && if (!tipc_sk_type_connectionless(sk) &&
sk->sk_state != TIPC_DISCONNECTING) { sk->sk_state != TIPC_DISCONNECTING) {
tipc_set_sk_state(sk, TIPC_DISCONNECTING); tipc_set_sk_state(sk, TIPC_DISCONNECTING);
...@@ -449,7 +450,10 @@ static void __tipc_shutdown(struct socket *sock, int error) ...@@ -449,7 +450,10 @@ static void __tipc_shutdown(struct socket *sock, int error)
} }
tipc_sk_respond(sk, skb, error); tipc_sk_respond(sk, skb, error);
} }
}
if (tipc_sk_type_connectionless(sk))
return;
if (sk->sk_state != TIPC_DISCONNECTING) { if (sk->sk_state != TIPC_DISCONNECTING) {
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
...@@ -457,11 +461,9 @@ static void __tipc_shutdown(struct socket *sock, int error) ...@@ -457,11 +461,9 @@ static void __tipc_shutdown(struct socket *sock, int error)
tsk->portid, error); tsk->portid, error);
if (skb) if (skb)
tipc_node_xmit_skb(net, skb, dnode, tsk->portid); tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
if (!tipc_sk_type_connectionless(sk)) {
tipc_node_remove_conn(net, dnode, tsk->portid); tipc_node_remove_conn(net, dnode, tsk->portid);
tipc_set_sk_state(sk, TIPC_DISCONNECTING); tipc_set_sk_state(sk, TIPC_DISCONNECTING);
} }
}
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment