Commit a84a8ab9 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix divide by zero in mlx5, from Talut Batheesh.

 2) Guard against invalid GSO packets coming from untrusted guests and
    arriving in qdisc_pkt_len_init(), from Eric Dumazet.

 3) Similarly add such protection to the various protocol GSO handlers.
    From Willem de Bruijn.

 4) Fix regression added to IGMP source address checking for IGMPv3
    reports, from Felix Feitkau.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  tls: Correct length of scatterlist in tls_sw_sendpage
  be2net: restore properly promisc mode after queues reconfiguration
  net: igmp: fix source address check for IGMPv3 reports
  gso: validate gso_type in GSO handlers
  net: qdisc_pkt_len_init() should be more robust
  ibmvnic: Allocate and request vpd in init_resources
  ibmvnic: Revert to previous mtu when unsupported value requested
  ibmvnic: Modify buffer size and number of queues on failover
  rds: tcp: compute m_ack_seq as offset from ->write_seq
  usbnet: silence an unnecessary warning
  cxgb4: fix endianness for vlan value in cxgb4_tc_flower
  cxgb4: set filter type to 1 for ETH_P_IPV6
  net/mlx5e: Fix fixpoint divide exception in mlx5e_am_stats_compare
parents 19952667 7a8c4dd9
...@@ -111,6 +111,9 @@ static void cxgb4_process_flow_match(struct net_device *dev, ...@@ -111,6 +111,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
ethtype_mask = 0; ethtype_mask = 0;
} }
if (ethtype_key == ETH_P_IPV6)
fs->type = 1;
fs->val.ethtype = ethtype_key; fs->val.ethtype = ethtype_key;
fs->mask.ethtype = ethtype_mask; fs->mask.ethtype = ethtype_mask;
fs->val.proto = key->ip_proto; fs->val.proto = key->ip_proto;
...@@ -205,8 +208,8 @@ static void cxgb4_process_flow_match(struct net_device *dev, ...@@ -205,8 +208,8 @@ static void cxgb4_process_flow_match(struct net_device *dev,
VLAN_PRIO_SHIFT); VLAN_PRIO_SHIFT);
vlan_tci_mask = mask->vlan_id | (mask->vlan_priority << vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
VLAN_PRIO_SHIFT); VLAN_PRIO_SHIFT);
fs->val.ivlan = cpu_to_be16(vlan_tci); fs->val.ivlan = vlan_tci;
fs->mask.ivlan = cpu_to_be16(vlan_tci_mask); fs->mask.ivlan = vlan_tci_mask;
/* Chelsio adapters use ivlan_vld bit to match vlan packets /* Chelsio adapters use ivlan_vld bit to match vlan packets
* as 802.1Q. Also, when vlan tag is present in packets, * as 802.1Q. Also, when vlan tag is present in packets,
......
...@@ -4634,6 +4634,15 @@ int be_update_queues(struct be_adapter *adapter) ...@@ -4634,6 +4634,15 @@ int be_update_queues(struct be_adapter *adapter)
be_schedule_worker(adapter); be_schedule_worker(adapter);
/*
* The IF was destroyed and re-created. We need to clear
* all promiscuous flags valid for the destroyed IF.
* Without this promisc mode is not restored during
* be_open() because the driver thinks that it is
* already enabled in HW.
*/
adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
if (netif_running(netdev)) if (netif_running(netdev))
status = be_open(netdev); status = be_open(netdev);
......
...@@ -410,6 +410,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter) ...@@ -410,6 +410,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
struct ibmvnic_rx_pool *rx_pool; struct ibmvnic_rx_pool *rx_pool;
int rx_scrqs; int rx_scrqs;
int i, j, rc; int i, j, rc;
u64 *size_array;
size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
for (i = 0; i < rx_scrqs; i++) { for (i = 0; i < rx_scrqs; i++) {
...@@ -417,7 +421,17 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter) ...@@ -417,7 +421,17 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff); if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
free_long_term_buff(adapter, &rx_pool->long_term_buff);
rx_pool->buff_size = be64_to_cpu(size_array[i]);
alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
rx_pool->size *
rx_pool->buff_size);
} else {
rc = reset_long_term_buff(adapter,
&rx_pool->long_term_buff);
}
if (rc) if (rc)
return rc; return rc;
...@@ -439,14 +453,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter) ...@@ -439,14 +453,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
static void release_rx_pools(struct ibmvnic_adapter *adapter) static void release_rx_pools(struct ibmvnic_adapter *adapter)
{ {
struct ibmvnic_rx_pool *rx_pool; struct ibmvnic_rx_pool *rx_pool;
int rx_scrqs;
int i, j; int i, j;
if (!adapter->rx_pool) if (!adapter->rx_pool)
return; return;
rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); for (i = 0; i < adapter->num_active_rx_pools; i++) {
for (i = 0; i < rx_scrqs; i++) {
rx_pool = &adapter->rx_pool[i]; rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
...@@ -469,6 +481,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) ...@@ -469,6 +481,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->rx_pool); kfree(adapter->rx_pool);
adapter->rx_pool = NULL; adapter->rx_pool = NULL;
adapter->num_active_rx_pools = 0;
} }
static int init_rx_pools(struct net_device *netdev) static int init_rx_pools(struct net_device *netdev)
...@@ -493,6 +506,8 @@ static int init_rx_pools(struct net_device *netdev) ...@@ -493,6 +506,8 @@ static int init_rx_pools(struct net_device *netdev)
return -1; return -1;
} }
adapter->num_active_rx_pools = 0;
for (i = 0; i < rxadd_subcrqs; i++) { for (i = 0; i < rxadd_subcrqs; i++) {
rx_pool = &adapter->rx_pool[i]; rx_pool = &adapter->rx_pool[i];
...@@ -536,6 +551,8 @@ static int init_rx_pools(struct net_device *netdev) ...@@ -536,6 +551,8 @@ static int init_rx_pools(struct net_device *netdev)
rx_pool->next_free = 0; rx_pool->next_free = 0;
} }
adapter->num_active_rx_pools = rxadd_subcrqs;
return 0; return 0;
} }
...@@ -586,13 +603,12 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter) ...@@ -586,13 +603,12 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
static void release_tx_pools(struct ibmvnic_adapter *adapter) static void release_tx_pools(struct ibmvnic_adapter *adapter)
{ {
struct ibmvnic_tx_pool *tx_pool; struct ibmvnic_tx_pool *tx_pool;
int i, tx_scrqs; int i;
if (!adapter->tx_pool) if (!adapter->tx_pool)
return; return;
tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); for (i = 0; i < adapter->num_active_tx_pools; i++) {
for (i = 0; i < tx_scrqs; i++) {
netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i); netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
tx_pool = &adapter->tx_pool[i]; tx_pool = &adapter->tx_pool[i];
kfree(tx_pool->tx_buff); kfree(tx_pool->tx_buff);
...@@ -603,6 +619,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter) ...@@ -603,6 +619,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->tx_pool); kfree(adapter->tx_pool);
adapter->tx_pool = NULL; adapter->tx_pool = NULL;
adapter->num_active_tx_pools = 0;
} }
static int init_tx_pools(struct net_device *netdev) static int init_tx_pools(struct net_device *netdev)
...@@ -619,6 +636,8 @@ static int init_tx_pools(struct net_device *netdev) ...@@ -619,6 +636,8 @@ static int init_tx_pools(struct net_device *netdev)
if (!adapter->tx_pool) if (!adapter->tx_pool)
return -1; return -1;
adapter->num_active_tx_pools = 0;
for (i = 0; i < tx_subcrqs; i++) { for (i = 0; i < tx_subcrqs; i++) {
tx_pool = &adapter->tx_pool[i]; tx_pool = &adapter->tx_pool[i];
...@@ -666,6 +685,8 @@ static int init_tx_pools(struct net_device *netdev) ...@@ -666,6 +685,8 @@ static int init_tx_pools(struct net_device *netdev)
tx_pool->producer_index = 0; tx_pool->producer_index = 0;
} }
adapter->num_active_tx_pools = tx_subcrqs;
return 0; return 0;
} }
...@@ -860,7 +881,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) ...@@ -860,7 +881,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
if (adapter->vpd->buff) if (adapter->vpd->buff)
len = adapter->vpd->len; len = adapter->vpd->len;
reinit_completion(&adapter->fw_done); init_completion(&adapter->fw_done);
crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
crq.get_vpd_size.cmd = GET_VPD_SIZE; crq.get_vpd_size.cmd = GET_VPD_SIZE;
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
...@@ -922,6 +943,13 @@ static int init_resources(struct ibmvnic_adapter *adapter) ...@@ -922,6 +943,13 @@ static int init_resources(struct ibmvnic_adapter *adapter)
if (!adapter->vpd) if (!adapter->vpd)
return -ENOMEM; return -ENOMEM;
/* Vital Product Data (VPD) */
rc = ibmvnic_get_vpd(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
return rc;
}
adapter->map_id = 1; adapter->map_id = 1;
adapter->napi = kcalloc(adapter->req_rx_queues, adapter->napi = kcalloc(adapter->req_rx_queues,
sizeof(struct napi_struct), GFP_KERNEL); sizeof(struct napi_struct), GFP_KERNEL);
...@@ -995,7 +1023,7 @@ static int __ibmvnic_open(struct net_device *netdev) ...@@ -995,7 +1023,7 @@ static int __ibmvnic_open(struct net_device *netdev)
static int ibmvnic_open(struct net_device *netdev) static int ibmvnic_open(struct net_device *netdev)
{ {
struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int rc, vpd; int rc;
mutex_lock(&adapter->reset_lock); mutex_lock(&adapter->reset_lock);
...@@ -1018,11 +1046,6 @@ static int ibmvnic_open(struct net_device *netdev) ...@@ -1018,11 +1046,6 @@ static int ibmvnic_open(struct net_device *netdev)
rc = __ibmvnic_open(netdev); rc = __ibmvnic_open(netdev);
netif_carrier_on(netdev); netif_carrier_on(netdev);
/* Vital Product Data (VPD) */
vpd = ibmvnic_get_vpd(adapter);
if (vpd)
netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
mutex_unlock(&adapter->reset_lock); mutex_unlock(&adapter->reset_lock);
return rc; return rc;
...@@ -1548,6 +1571,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p) ...@@ -1548,6 +1571,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
static int do_reset(struct ibmvnic_adapter *adapter, static int do_reset(struct ibmvnic_adapter *adapter,
struct ibmvnic_rwi *rwi, u32 reset_state) struct ibmvnic_rwi *rwi, u32 reset_state)
{ {
u64 old_num_rx_queues, old_num_tx_queues;
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
int i, rc; int i, rc;
...@@ -1557,6 +1581,9 @@ static int do_reset(struct ibmvnic_adapter *adapter, ...@@ -1557,6 +1581,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
netif_carrier_off(netdev); netif_carrier_off(netdev);
adapter->reset_reason = rwi->reset_reason; adapter->reset_reason = rwi->reset_reason;
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
if (rwi->reset_reason == VNIC_RESET_MOBILITY) { if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
rc = ibmvnic_reenable_crq_queue(adapter); rc = ibmvnic_reenable_crq_queue(adapter);
if (rc) if (rc)
...@@ -1601,6 +1628,12 @@ static int do_reset(struct ibmvnic_adapter *adapter, ...@@ -1601,6 +1628,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rc = init_resources(adapter); rc = init_resources(adapter);
if (rc) if (rc)
return rc; return rc;
} else if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues) {
release_rx_pools(adapter);
release_tx_pools(adapter);
init_rx_pools(netdev);
init_tx_pools(netdev);
} else { } else {
rc = reset_tx_pools(adapter); rc = reset_tx_pools(adapter);
if (rc) if (rc)
...@@ -3592,7 +3625,17 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, ...@@ -3592,7 +3625,17 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
*req_value, *req_value,
(long int)be64_to_cpu(crq->request_capability_rsp. (long int)be64_to_cpu(crq->request_capability_rsp.
number), name); number), name);
*req_value = be64_to_cpu(crq->request_capability_rsp.number);
if (be16_to_cpu(crq->request_capability_rsp.capability) ==
REQ_MTU) {
pr_err("mtu of %llu is not supported. Reverting.\n",
*req_value);
*req_value = adapter->fallback.mtu;
} else {
*req_value =
be64_to_cpu(crq->request_capability_rsp.number);
}
ibmvnic_send_req_caps(adapter, 1); ibmvnic_send_req_caps(adapter, 1);
return; return;
default: default:
......
...@@ -1091,6 +1091,8 @@ struct ibmvnic_adapter { ...@@ -1091,6 +1091,8 @@ struct ibmvnic_adapter {
u64 opt_rxba_entries_per_subcrq; u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req; __be64 tx_rx_desc_req;
u8 map_id; u8 map_id;
u64 num_active_rx_pools;
u64 num_active_tx_pools;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
enum vnic_state state; enum vnic_state state;
......
...@@ -201,9 +201,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, ...@@ -201,9 +201,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
MLX5E_AM_STATS_WORSE; MLX5E_AM_STATS_WORSE;
if (!prev->ppms)
return curr->ppms ? MLX5E_AM_STATS_BETTER :
MLX5E_AM_STATS_SAME;
if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
MLX5E_AM_STATS_WORSE; MLX5E_AM_STATS_WORSE;
if (!prev->epms)
return MLX5E_AM_STATS_SAME;
if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
......
...@@ -457,12 +457,10 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, ...@@ -457,12 +457,10 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
void usbnet_defer_kevent (struct usbnet *dev, int work) void usbnet_defer_kevent (struct usbnet *dev, int work)
{ {
set_bit (work, &dev->flags); set_bit (work, &dev->flags);
if (!schedule_work (&dev->kevent)) { if (!schedule_work (&dev->kevent))
if (net_ratelimit()) netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
netdev_err(dev->net, "kevent %d may have been dropped\n", work); else
} else {
netdev_dbg(dev->net, "kevent %d scheduled\n", work); netdev_dbg(dev->net, "kevent %d scheduled\n", work);
}
} }
EXPORT_SYMBOL_GPL(usbnet_defer_kevent); EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
......
...@@ -3151,10 +3151,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb) ...@@ -3151,10 +3151,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
hdr_len = skb_transport_header(skb) - skb_mac_header(skb); hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
/* + transport layer */ /* + transport layer */
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
hdr_len += tcp_hdrlen(skb); const struct tcphdr *th;
else struct tcphdr _tcphdr;
hdr_len += sizeof(struct udphdr);
th = skb_header_pointer(skb, skb_transport_offset(skb),
sizeof(_tcphdr), &_tcphdr);
if (likely(th))
hdr_len += __tcp_hdrlen(th);
} else {
struct udphdr _udphdr;
if (skb_header_pointer(skb, skb_transport_offset(skb),
sizeof(_udphdr), &_udphdr))
hdr_len += sizeof(struct udphdr);
}
if (shinfo->gso_type & SKB_GSO_DODGY) if (shinfo->gso_type & SKB_GSO_DODGY)
gso_segs = DIV_ROUND_UP(skb->len - hdr_len, gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
......
...@@ -122,6 +122,9 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, ...@@ -122,6 +122,9 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
if (!xo) if (!xo)
goto out; goto out;
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
goto out;
seq = xo->seq.low; seq = xo->seq.low;
x = skb->sp->xvec[skb->sp->len - 1]; x = skb->sp->xvec[skb->sp->len - 1];
......
...@@ -332,7 +332,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev, ...@@ -332,7 +332,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev,
return htonl(INADDR_ANY); return htonl(INADDR_ANY);
for_ifa(in_dev) { for_ifa(in_dev) {
if (inet_ifa_match(fl4->saddr, ifa)) if (fl4->saddr == ifa->ifa_local)
return fl4->saddr; return fl4->saddr;
} endfor_ifa(in_dev); } endfor_ifa(in_dev);
......
...@@ -32,6 +32,9 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, ...@@ -32,6 +32,9 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
return ERR_PTR(-EINVAL);
if (!pskb_may_pull(skb, sizeof(struct tcphdr))) if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
......
...@@ -203,6 +203,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, ...@@ -203,6 +203,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
goto out; goto out;
} }
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
goto out;
if (!pskb_may_pull(skb, sizeof(struct udphdr))) if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto out; goto out;
......
...@@ -149,6 +149,9 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, ...@@ -149,6 +149,9 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
if (!xo) if (!xo)
goto out; goto out;
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
goto out;
seq = xo->seq.low; seq = xo->seq.low;
x = skb->sp->xvec[skb->sp->len - 1]; x = skb->sp->xvec[skb->sp->len - 1];
......
...@@ -46,6 +46,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb, ...@@ -46,6 +46,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
{ {
struct tcphdr *th; struct tcphdr *th;
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
return ERR_PTR(-EINVAL);
if (!pskb_may_pull(skb, sizeof(*th))) if (!pskb_may_pull(skb, sizeof(*th)))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
......
...@@ -42,6 +42,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, ...@@ -42,6 +42,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
const struct ipv6hdr *ipv6h; const struct ipv6hdr *ipv6h;
struct udphdr *uh; struct udphdr *uh;
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
goto out;
if (!pskb_may_pull(skb, sizeof(struct udphdr))) if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto out; goto out;
......
...@@ -90,9 +90,10 @@ void rds_tcp_nonagle(struct socket *sock) ...@@ -90,9 +90,10 @@ void rds_tcp_nonagle(struct socket *sock)
sizeof(val)); sizeof(val));
} }
u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc) u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
{ {
return tcp_sk(tc->t_sock->sk)->snd_nxt; /* seq# of the last byte of data in tcp send buffer */
return tcp_sk(tc->t_sock->sk)->write_seq;
} }
u32 rds_tcp_snd_una(struct rds_tcp_connection *tc) u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
......
...@@ -54,7 +54,7 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp); ...@@ -54,7 +54,7 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp);
void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp); void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp);
void rds_tcp_restore_callbacks(struct socket *sock, void rds_tcp_restore_callbacks(struct socket *sock,
struct rds_tcp_connection *tc); struct rds_tcp_connection *tc);
u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); u32 rds_tcp_write_seq(struct rds_tcp_connection *tc);
u32 rds_tcp_snd_una(struct rds_tcp_connection *tc); u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq); u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
extern struct rds_transport rds_tcp_transport; extern struct rds_transport rds_tcp_transport;
......
...@@ -86,7 +86,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -86,7 +86,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
* m_ack_seq is set to the sequence number of the last byte of * m_ack_seq is set to the sequence number of the last byte of
* header and data. see rds_tcp_is_acked(). * header and data. see rds_tcp_is_acked().
*/ */
tc->t_last_sent_nxt = rds_tcp_snd_nxt(tc); tc->t_last_sent_nxt = rds_tcp_write_seq(tc);
rm->m_ack_seq = tc->t_last_sent_nxt + rm->m_ack_seq = tc->t_last_sent_nxt +
sizeof(struct rds_header) + sizeof(struct rds_header) +
be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1; be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
...@@ -98,7 +98,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -98,7 +98,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED; rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
rdsdebug("rm %p tcp nxt %u ack_seq %llu\n", rdsdebug("rm %p tcp nxt %u ack_seq %llu\n",
rm, rds_tcp_snd_nxt(tc), rm, rds_tcp_write_seq(tc),
(unsigned long long)rm->m_ack_seq); (unsigned long long)rm->m_ack_seq);
} }
......
...@@ -45,6 +45,9 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb, ...@@ -45,6 +45,9 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
struct sk_buff *segs = ERR_PTR(-EINVAL); struct sk_buff *segs = ERR_PTR(-EINVAL);
struct sctphdr *sh; struct sctphdr *sh;
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP))
goto out;
sh = sctp_hdr(skb); sh = sctp_hdr(skb);
if (!pskb_may_pull(skb, sizeof(*sh))) if (!pskb_may_pull(skb, sizeof(*sh)))
goto out; goto out;
......
...@@ -577,6 +577,8 @@ int tls_sw_sendpage(struct sock *sk, struct page *page, ...@@ -577,6 +577,8 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
get_page(page); get_page(page);
sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem; sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
sg_set_page(sg, page, copy, offset); sg_set_page(sg, page, copy, offset);
sg_unmark_end(sg);
ctx->sg_plaintext_num_elem++; ctx->sg_plaintext_num_elem++;
sk_mem_charge(sk, copy); sk_mem_charge(sk, copy);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment