Commit 0ffb01d9 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "A quick set of fixes, some to deal with fallout from yesterday's
  net-next merge.

   1) Fix compilation of bnx2x driver with CONFIG_BNX2X_SRIOV disabled,
      from Dmitry Kravkov.

   2) Fix a bnx2x regression caused by one of Dave Jones's mistaken
      braces changes, from Eilon Greenstein.

   3) Add some protective filtering in the netlink tap code, from Daniel
      Borkmann.

   4) Fix TCP congestion window growth regression after timeouts, from
      Yuchung Cheng.

   5) Correctly adjust TCP's rcv_ssthresh for out of order packets, from
      Eric Dumazet"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  tcp: properly increase rcv_ssthresh for ofo packets
  net: add documentation for BQL helpers
  mlx5: remove unused MLX5_DEBUG param in Kconfig
  bnx2x: Restore a call to config_init
  bnx2x: fix broken compilation with CONFIG_BNX2X_SRIOV is not set
  tcp: fix no cwnd growth after timeout
  net: netlink: filter particular protocols from analyzers
parents 7b4022fa 4e4f1fc2
...@@ -6501,13 +6501,10 @@ static int bnx2x_link_initialize(struct link_params *params, ...@@ -6501,13 +6501,10 @@ static int bnx2x_link_initialize(struct link_params *params,
struct bnx2x_phy *phy = &params->phy[INT_PHY]; struct bnx2x_phy *phy = &params->phy[INT_PHY];
if (vars->line_speed == SPEED_AUTO_NEG && if (vars->line_speed == SPEED_AUTO_NEG &&
(CHIP_IS_E1x(bp) || (CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp))) { CHIP_IS_E2(bp)))
bnx2x_set_parallel_detection(phy, params); bnx2x_set_parallel_detection(phy, params);
if (params->phy[INT_PHY].config_init) if (params->phy[INT_PHY].config_init)
params->phy[INT_PHY].config_init(phy, params->phy[INT_PHY].config_init(phy, params, vars);
params,
vars);
}
} }
/* Init external phy*/ /* Init external phy*/
......
...@@ -816,6 +816,8 @@ static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp ...@@ -816,6 +816,8 @@ static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp
static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; } static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
u8 vf_qid, bool set) {return 0; } u8 vf_qid, bool set) {return 0; }
static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params) {return 0; }
static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; } static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; } static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; } static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
......
...@@ -6,13 +6,3 @@ config MLX5_CORE ...@@ -6,13 +6,3 @@ config MLX5_CORE
tristate tristate
depends on PCI && X86 depends on PCI && X86
default n default n
config MLX5_DEBUG
bool "Verbose debugging output" if (MLX5_CORE && EXPERT)
depends on MLX5_CORE
default y
---help---
This option causes debugging code to be compiled into the
mlx5_core driver. The output can be turned on via the
debug_mask module parameter (which can also be set after
the driver is loaded through sysfs).
...@@ -2101,6 +2101,15 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, ...@@ -2101,6 +2101,15 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
#endif #endif
} }
/**
* netdev_sent_queue - report the number of bytes queued to hardware
* @dev: network device
* @bytes: number of bytes queued to the hardware device queue
*
* Report the number of bytes queued for sending/completion to the network
* device hardware queue. @bytes should be a good approximation and should
* exactly match netdev_completed_queue() @bytes
*/
static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
{ {
netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
...@@ -2130,6 +2139,16 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, ...@@ -2130,6 +2139,16 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
#endif #endif
} }
/**
* netdev_completed_queue - report bytes and packets completed by device
* @dev: network device
* @pkts: actual number of packets sent over the medium
* @bytes: actual number of bytes sent over the medium
*
* Report the number of bytes and packets transmitted by the network device
* hardware queue over the physical medium, @bytes must exactly match the
* @bytes amount passed to netdev_sent_queue()
*/
static inline void netdev_completed_queue(struct net_device *dev, static inline void netdev_completed_queue(struct net_device *dev,
unsigned int pkts, unsigned int bytes) unsigned int pkts, unsigned int bytes)
{ {
...@@ -2144,6 +2163,13 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q) ...@@ -2144,6 +2163,13 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q)
#endif #endif
} }
/**
* netdev_reset_queue - reset the packets and bytes count of a network device
* @dev_queue: network device
*
* Reset the bytes and packet count of a network device and clear the
* software flow control OFF bit for this network device
*/
static inline void netdev_reset_queue(struct net_device *dev_queue) static inline void netdev_reset_queue(struct net_device *dev_queue)
{ {
netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
......
...@@ -3162,16 +3162,14 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) ...@@ -3162,16 +3162,14 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
/* If reordering is high then always grow cwnd whenever data is /* If reordering is high then always grow cwnd whenever data is
* delivered regardless of its ordering. Otherwise stay conservative * delivered regardless of its ordering. Otherwise stay conservative
* and only grow cwnd on in-order delivery in Open state, and retain * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/
* cwnd in Disordered state (RFC5681). A stretched ACK with
* new SACK or ECE mark may first advance cwnd here and later reduce * new SACK or ECE mark may first advance cwnd here and later reduce
* cwnd in tcp_fastretrans_alert() based on more states. * cwnd in tcp_fastretrans_alert() based on more states.
*/ */
if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) if (tcp_sk(sk)->reordering > sysctl_tcp_reordering)
return flag & FLAG_FORWARD_PROGRESS; return flag & FLAG_FORWARD_PROGRESS;
return inet_csk(sk)->icsk_ca_state == TCP_CA_Open && return flag & FLAG_DATA_ACKED;
flag & FLAG_DATA_ACKED;
} }
/* Check that window update is acceptable. /* Check that window update is acceptable.
...@@ -4141,6 +4139,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) ...@@ -4141,6 +4139,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
__skb_queue_after(&tp->out_of_order_queue, skb1, skb); __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
} else { } else {
tcp_grow_window(sk, skb);
kfree_skb_partial(skb, fragstolen); kfree_skb_partial(skb, fragstolen);
skb = NULL; skb = NULL;
} }
...@@ -4216,8 +4215,10 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) ...@@ -4216,8 +4215,10 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
if (tcp_is_sack(tp)) if (tcp_is_sack(tp))
tcp_sack_new_ofo_skb(sk, seq, end_seq); tcp_sack_new_ofo_skb(sk, seq, end_seq);
end: end:
if (skb) if (skb) {
tcp_grow_window(sk, skb);
skb_set_owner_r(skb, sk); skb_set_owner_r(skb, sk);
}
} }
static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
......
...@@ -168,16 +168,43 @@ int netlink_remove_tap(struct netlink_tap *nt) ...@@ -168,16 +168,43 @@ int netlink_remove_tap(struct netlink_tap *nt)
} }
EXPORT_SYMBOL_GPL(netlink_remove_tap); EXPORT_SYMBOL_GPL(netlink_remove_tap);
static bool netlink_filter_tap(const struct sk_buff *skb)
{
struct sock *sk = skb->sk;
bool pass = false;
/* We take the more conservative approach and
* whitelist socket protocols that may pass.
*/
switch (sk->sk_protocol) {
case NETLINK_ROUTE:
case NETLINK_USERSOCK:
case NETLINK_SOCK_DIAG:
case NETLINK_NFLOG:
case NETLINK_XFRM:
case NETLINK_FIB_LOOKUP:
case NETLINK_NETFILTER:
case NETLINK_GENERIC:
pass = true;
break;
}
return pass;
}
static int __netlink_deliver_tap_skb(struct sk_buff *skb, static int __netlink_deliver_tap_skb(struct sk_buff *skb,
struct net_device *dev) struct net_device *dev)
{ {
struct sk_buff *nskb; struct sk_buff *nskb;
struct sock *sk = skb->sk;
int ret = -ENOMEM; int ret = -ENOMEM;
dev_hold(dev); dev_hold(dev);
nskb = skb_clone(skb, GFP_ATOMIC); nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) { if (nskb) {
nskb->dev = dev; nskb->dev = dev;
nskb->protocol = htons((u16) sk->sk_protocol);
ret = dev_queue_xmit(nskb); ret = dev_queue_xmit(nskb);
if (unlikely(ret > 0)) if (unlikely(ret > 0))
ret = net_xmit_errno(ret); ret = net_xmit_errno(ret);
...@@ -192,6 +219,9 @@ static void __netlink_deliver_tap(struct sk_buff *skb) ...@@ -192,6 +219,9 @@ static void __netlink_deliver_tap(struct sk_buff *skb)
int ret; int ret;
struct netlink_tap *tmp; struct netlink_tap *tmp;
if (!netlink_filter_tap(skb))
return;
list_for_each_entry_rcu(tmp, &netlink_tap_all, list) { list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
ret = __netlink_deliver_tap_skb(skb, tmp->dev); ret = __netlink_deliver_tap_skb(skb, tmp->dev);
if (unlikely(ret)) if (unlikely(ret))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment