Commit 5c509549 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-next-2.6

parents 4d586b82 def57687
...@@ -343,32 +343,6 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id) ...@@ -343,32 +343,6 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
return res; return res;
} }
/**
* bond_has_challenged_slaves
* @bond: the bond we're working on
*
* Searches the slave list. Returns 1 if a vlan challenged slave
* was found, 0 otherwise.
*
* Assumes bond->lock is held.
*/
static int bond_has_challenged_slaves(struct bonding *bond)
{
struct slave *slave;
int i;
bond_for_each_slave(bond, slave, i) {
if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) {
pr_debug("found VLAN challenged slave - %s\n",
slave->dev->name);
return 1;
}
}
pr_debug("no VLAN challenged slaves found\n");
return 0;
}
/** /**
* bond_next_vlan - safely skip to the next item in the vlans list. * bond_next_vlan - safely skip to the next item in the vlans list.
* @bond: the bond we're working on * @bond: the bond we're working on
...@@ -1406,52 +1380,68 @@ static int bond_sethwaddr(struct net_device *bond_dev, ...@@ -1406,52 +1380,68 @@ static int bond_sethwaddr(struct net_device *bond_dev,
return 0; return 0;
} }
#define BOND_VLAN_FEATURES \ static u32 bond_fix_features(struct net_device *dev, u32 features)
(NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | \
NETIF_F_HW_VLAN_FILTER)
/*
* Compute the common dev->feature set available to all slaves. Some
* feature bits are managed elsewhere, so preserve those feature bits
* on the master device.
*/
static int bond_compute_features(struct bonding *bond)
{ {
struct slave *slave; struct slave *slave;
struct net_device *bond_dev = bond->dev; struct bonding *bond = netdev_priv(dev);
u32 features = bond_dev->features; u32 mask;
u32 vlan_features = 0;
unsigned short max_hard_header_len = max((u16)ETH_HLEN,
bond_dev->hard_header_len);
int i; int i;
features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES); read_lock(&bond->lock);
features |= NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_NOCACHE_COPY;
if (!bond->first_slave) if (!bond->first_slave) {
goto done; /* Disable adding VLANs to empty bond. But why? --mq */
features |= NETIF_F_VLAN_CHALLENGED;
goto out;
}
mask = features;
features &= ~NETIF_F_ONE_FOR_ALL; features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
vlan_features = bond->first_slave->dev->vlan_features;
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave, i) {
features = netdev_increment_features(features, features = netdev_increment_features(features,
slave->dev->features, slave->dev->features,
NETIF_F_ONE_FOR_ALL); mask);
}
out:
read_unlock(&bond->lock);
return features;
}
#define BOND_VLAN_FEATURES (NETIF_F_ALL_TX_OFFLOADS | \
NETIF_F_SOFT_FEATURES | \
NETIF_F_LRO)
static void bond_compute_features(struct bonding *bond)
{
struct slave *slave;
struct net_device *bond_dev = bond->dev;
u32 vlan_features = BOND_VLAN_FEATURES;
unsigned short max_hard_header_len = ETH_HLEN;
int i;
read_lock(&bond->lock);
if (!bond->first_slave)
goto done;
bond_for_each_slave(bond, slave, i) {
vlan_features = netdev_increment_features(vlan_features, vlan_features = netdev_increment_features(vlan_features,
slave->dev->vlan_features, slave->dev->vlan_features, BOND_VLAN_FEATURES);
NETIF_F_ONE_FOR_ALL);
if (slave->dev->hard_header_len > max_hard_header_len) if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len; max_hard_header_len = slave->dev->hard_header_len;
} }
done: done:
features |= (bond_dev->features & BOND_VLAN_FEATURES); bond_dev->vlan_features = vlan_features;
bond_dev->features = netdev_fix_features(bond_dev, features);
bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
bond_dev->hard_header_len = max_hard_header_len; bond_dev->hard_header_len = max_hard_header_len;
return 0; read_unlock(&bond->lock);
netdev_change_features(bond_dev);
} }
static void bond_setup_by_slave(struct net_device *bond_dev, static void bond_setup_by_slave(struct net_device *bond_dev,
...@@ -1544,7 +1534,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) ...@@ -1544,7 +1534,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
struct netdev_hw_addr *ha; struct netdev_hw_addr *ha;
struct sockaddr addr; struct sockaddr addr;
int link_reporting; int link_reporting;
int old_features = bond_dev->features;
int res = 0; int res = 0;
if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
...@@ -1577,16 +1566,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) ...@@ -1577,16 +1566,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n", pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
bond_dev->name, slave_dev->name, bond_dev->name, slave_dev->name,
slave_dev->name, bond_dev->name); slave_dev->name, bond_dev->name);
bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
} }
} else { } else {
pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
if (bond->slave_cnt == 0) {
/* First slave, and it is not VLAN challenged,
* so remove the block of adding VLANs over the bond.
*/
bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
}
} }
/* /*
...@@ -1775,10 +1757,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) ...@@ -1775,10 +1757,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
new_slave->delay = 0; new_slave->delay = 0;
new_slave->link_failure_count = 0; new_slave->link_failure_count = 0;
bond_compute_features(bond);
write_unlock_bh(&bond->lock); write_unlock_bh(&bond->lock);
bond_compute_features(bond);
read_lock(&bond->lock); read_lock(&bond->lock);
new_slave->last_arp_rx = jiffies; new_slave->last_arp_rx = jiffies;
...@@ -1958,7 +1940,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) ...@@ -1958,7 +1940,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
kfree(new_slave); kfree(new_slave);
err_undo_flags: err_undo_flags:
bond_dev->features = old_features; bond_compute_features(bond);
return res; return res;
} }
...@@ -1979,6 +1961,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) ...@@ -1979,6 +1961,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
struct bonding *bond = netdev_priv(bond_dev); struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent; struct slave *slave, *oldcurrent;
struct sockaddr addr; struct sockaddr addr;
u32 old_features = bond_dev->features;
/* slave is not a slave or master is not master of this slave */ /* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) || if (!(slave_dev->flags & IFF_SLAVE) ||
...@@ -2039,8 +2022,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) ...@@ -2039,8 +2022,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
/* release the slave from its bond */ /* release the slave from its bond */
bond_detach_slave(bond, slave); bond_detach_slave(bond, slave);
bond_compute_features(bond);
if (bond->primary_slave == slave) if (bond->primary_slave == slave)
bond->primary_slave = NULL; bond->primary_slave = NULL;
...@@ -2084,24 +2065,23 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) ...@@ -2084,24 +2065,23 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
*/ */
memset(bond_dev->dev_addr, 0, bond_dev->addr_len); memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
if (!bond->vlgrp) { if (bond->vlgrp) {
bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
} else {
pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
bond_dev->name, bond_dev->name); bond_dev->name, bond_dev->name);
pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
bond_dev->name); bond_dev->name);
} }
} else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
!bond_has_challenged_slaves(bond)) {
pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
bond_dev->name, slave_dev->name, bond_dev->name);
bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
} }
write_unlock_bh(&bond->lock); write_unlock_bh(&bond->lock);
unblock_netpoll_tx(); unblock_netpoll_tx();
bond_compute_features(bond);
if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
(old_features & NETIF_F_VLAN_CHALLENGED))
pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
bond_dev->name, slave_dev->name, bond_dev->name);
/* must do this from outside any spinlocks */ /* must do this from outside any spinlocks */
bond_destroy_slave_symlinks(bond_dev, slave_dev); bond_destroy_slave_symlinks(bond_dev, slave_dev);
...@@ -2219,8 +2199,6 @@ static int bond_release_all(struct net_device *bond_dev) ...@@ -2219,8 +2199,6 @@ static int bond_release_all(struct net_device *bond_dev)
bond_alb_deinit_slave(bond, slave); bond_alb_deinit_slave(bond, slave);
} }
bond_compute_features(bond);
bond_destroy_slave_symlinks(bond_dev, slave_dev); bond_destroy_slave_symlinks(bond_dev, slave_dev);
bond_del_vlans_from_slave(bond, slave_dev); bond_del_vlans_from_slave(bond, slave_dev);
...@@ -2269,9 +2247,7 @@ static int bond_release_all(struct net_device *bond_dev) ...@@ -2269,9 +2247,7 @@ static int bond_release_all(struct net_device *bond_dev)
*/ */
memset(bond_dev->dev_addr, 0, bond_dev->addr_len); memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
if (!bond->vlgrp) { if (bond->vlgrp) {
bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
} else {
pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
bond_dev->name, bond_dev->name); bond_dev->name, bond_dev->name);
pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
...@@ -2282,6 +2258,9 @@ static int bond_release_all(struct net_device *bond_dev) ...@@ -2282,6 +2258,9 @@ static int bond_release_all(struct net_device *bond_dev)
out: out:
write_unlock_bh(&bond->lock); write_unlock_bh(&bond->lock);
bond_compute_features(bond);
return 0; return 0;
} }
...@@ -4337,11 +4316,6 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, ...@@ -4337,11 +4316,6 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
static const struct ethtool_ops bond_ethtool_ops = { static const struct ethtool_ops bond_ethtool_ops = {
.get_drvinfo = bond_ethtool_get_drvinfo, .get_drvinfo = bond_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
.get_tx_csum = ethtool_op_get_tx_csum,
.get_sg = ethtool_op_get_sg,
.get_tso = ethtool_op_get_tso,
.get_ufo = ethtool_op_get_ufo,
.get_flags = ethtool_op_get_flags,
}; };
static const struct net_device_ops bond_netdev_ops = { static const struct net_device_ops bond_netdev_ops = {
...@@ -4367,6 +4341,7 @@ static const struct net_device_ops bond_netdev_ops = { ...@@ -4367,6 +4341,7 @@ static const struct net_device_ops bond_netdev_ops = {
#endif #endif
.ndo_add_slave = bond_enslave, .ndo_add_slave = bond_enslave,
.ndo_del_slave = bond_release, .ndo_del_slave = bond_release,
.ndo_fix_features = bond_fix_features,
}; };
static void bond_destructor(struct net_device *bond_dev) static void bond_destructor(struct net_device *bond_dev)
...@@ -4422,14 +4397,14 @@ static void bond_setup(struct net_device *bond_dev) ...@@ -4422,14 +4397,14 @@ static void bond_setup(struct net_device *bond_dev)
* when there are slaves that are not hw accel * when there are slaves that are not hw accel
* capable * capable
*/ */
bond_dev->features |= (NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER);
/* By default, we enable GRO on bonding devices. bond_dev->hw_features = BOND_VLAN_FEATURES |
* Actual support requires lowlevel drivers are GRO ready. NETIF_F_HW_VLAN_TX |
*/ NETIF_F_HW_VLAN_RX |
bond_dev->features |= NETIF_F_GRO; NETIF_F_HW_VLAN_FILTER;
bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM);
bond_dev->features |= bond_dev->hw_features;
} }
static void bond_work_cancel_all(struct bonding *bond) static void bond_work_cancel_all(struct bonding *bond)
......
...@@ -3373,8 +3373,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) ...@@ -3373,8 +3373,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
tg3_phy_copper_begin(tp); tg3_phy_copper_begin(tp);
tg3_readphy(tp, MII_BMSR, &bmsr); tg3_readphy(tp, MII_BMSR, &bmsr);
if (!tg3_readphy(tp, MII_BMSR, &bmsr) && if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
(bmsr & BMSR_LSTATUS)) (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
current_link_up = 1; current_link_up = 1;
} }
...@@ -6309,6 +6309,42 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, ...@@ -6309,6 +6309,42 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static void tg3_set_loopback(struct net_device *dev, u32 features)
{
struct tg3 *tp = netdev_priv(dev);
if (features & NETIF_F_LOOPBACK) {
if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
return;
/*
* Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
* loopback mode if Half-Duplex mode was negotiated earlier.
*/
tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
/* Enable internal MAC loopback mode */
tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
spin_lock_bh(&tp->lock);
tw32(MAC_MODE, tp->mac_mode);
netif_carrier_on(tp->dev);
spin_unlock_bh(&tp->lock);
netdev_info(dev, "Internal MAC loopback mode enabled.\n");
} else {
if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
return;
/* Disable internal MAC loopback mode */
tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
spin_lock_bh(&tp->lock);
tw32(MAC_MODE, tp->mac_mode);
/* Force link status check */
tg3_setup_phy(tp, 1);
spin_unlock_bh(&tp->lock);
netdev_info(dev, "Internal MAC loopback mode disabled.\n");
}
}
static u32 tg3_fix_features(struct net_device *dev, u32 features) static u32 tg3_fix_features(struct net_device *dev, u32 features)
{ {
struct tg3 *tp = netdev_priv(dev); struct tg3 *tp = netdev_priv(dev);
...@@ -6319,6 +6355,16 @@ static u32 tg3_fix_features(struct net_device *dev, u32 features) ...@@ -6319,6 +6355,16 @@ static u32 tg3_fix_features(struct net_device *dev, u32 features)
return features; return features;
} }
static int tg3_set_features(struct net_device *dev, u32 features)
{
u32 changed = dev->features ^ features;
if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
tg3_set_loopback(dev, features);
return 0;
}
static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
int new_mtu) int new_mtu)
{ {
...@@ -9485,6 +9531,13 @@ static int tg3_open(struct net_device *dev) ...@@ -9485,6 +9531,13 @@ static int tg3_open(struct net_device *dev)
netif_tx_start_all_queues(dev); netif_tx_start_all_queues(dev);
/*
* Reset loopback feature if it was turned on while the device was down
* make sure that it's installed properly now.
*/
if (dev->features & NETIF_F_LOOPBACK)
tg3_set_loopback(dev, dev->features);
return 0; return 0;
err_out3: err_out3:
...@@ -15033,6 +15086,7 @@ static const struct net_device_ops tg3_netdev_ops = { ...@@ -15033,6 +15086,7 @@ static const struct net_device_ops tg3_netdev_ops = {
.ndo_tx_timeout = tg3_tx_timeout, .ndo_tx_timeout = tg3_tx_timeout,
.ndo_change_mtu = tg3_change_mtu, .ndo_change_mtu = tg3_change_mtu,
.ndo_fix_features = tg3_fix_features, .ndo_fix_features = tg3_fix_features,
.ndo_set_features = tg3_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller, .ndo_poll_controller = tg3_poll_controller,
#endif #endif
...@@ -15049,6 +15103,7 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = { ...@@ -15049,6 +15103,7 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
.ndo_do_ioctl = tg3_ioctl, .ndo_do_ioctl = tg3_ioctl,
.ndo_tx_timeout = tg3_tx_timeout, .ndo_tx_timeout = tg3_tx_timeout,
.ndo_change_mtu = tg3_change_mtu, .ndo_change_mtu = tg3_change_mtu,
.ndo_set_features = tg3_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller, .ndo_poll_controller = tg3_poll_controller,
#endif #endif
...@@ -15246,6 +15301,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, ...@@ -15246,6 +15301,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
dev->features |= hw_features; dev->features |= hw_features;
dev->vlan_features |= hw_features; dev->vlan_features |= hw_features;
/*
* Add loopback capability only for a subset of devices that support
* MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
* loopback for the remaining devices.
*/
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
!tg3_flag(tp, CPMU_PRESENT))
/* Add the loopback capability */
dev->hw_features |= NETIF_F_LOOPBACK;
if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
!tg3_flag(tp, TSO_CAPABLE) && !tg3_flag(tp, TSO_CAPABLE) &&
!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
......
...@@ -54,13 +54,13 @@ ...@@ -54,13 +54,13 @@
#include <linux/usb/usbnet.h> #include <linux/usb/usbnet.h>
#include <linux/usb/cdc.h> #include <linux/usb/cdc.h>
#define DRIVER_VERSION "23-Apr-2011" #define DRIVER_VERSION "06-May-2011"
/* CDC NCM subclass 3.2.1 */ /* CDC NCM subclass 3.2.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
/* Maximum NTB length */ /* Maximum NTB length */
#define CDC_NCM_NTB_MAX_SIZE_TX (16384 + 4) /* bytes, must be short terminated */ #define CDC_NCM_NTB_MAX_SIZE_TX 16384 /* bytes */
#define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */ #define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */
/* Minimum value for MaxDatagramSize, ch. 6.2.9 */ /* Minimum value for MaxDatagramSize, ch. 6.2.9 */
...@@ -722,7 +722,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) ...@@ -722,7 +722,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
} else { } else {
/* reset variables */ /* reset variables */
skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC); skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC);
if (skb_out == NULL) { if (skb_out == NULL) {
if (skb != NULL) { if (skb != NULL) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
...@@ -861,8 +861,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) ...@@ -861,8 +861,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
/* store last offset */ /* store last offset */
last_offset = offset; last_offset = offset;
if ((last_offset < ctx->tx_max) && ((last_offset % if (((last_offset < ctx->tx_max) && ((last_offset %
le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) { le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) ||
(((last_offset == ctx->tx_max) && ((ctx->tx_max %
le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) &&
(ctx->tx_max < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)))) {
/* force short packet */ /* force short packet */
*(((u8 *)skb_out->data) + last_offset) = 0; *(((u8 *)skb_out->data) + last_offset) = 0;
last_offset++; last_offset++;
......
...@@ -2884,6 +2884,9 @@ vmxnet3_probe_device(struct pci_dev *pdev, ...@@ -2884,6 +2884,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
int num_tx_queues; int num_tx_queues;
int num_rx_queues; int num_rx_queues;
if (!pci_msi_enabled())
enable_mq = 0;
#ifdef VMXNET3_RSS #ifdef VMXNET3_RSS
if (enable_mq) if (enable_mq)
num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES, num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
......
...@@ -68,10 +68,10 @@ ...@@ -68,10 +68,10 @@
/* /*
* Version numbers * Version numbers
*/ */
#define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k" #define VMXNET3_DRIVER_VERSION_STRING "1.1.9.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
#define VMXNET3_DRIVER_VERSION_NUM 0x01001900 #define VMXNET3_DRIVER_VERSION_NUM 0x01010900
#if defined(CONFIG_PCI_MSI) #if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */ /* RSS only makes sense if MSI-X is supported. */
......
...@@ -1097,10 +1097,14 @@ struct net_device { ...@@ -1097,10 +1097,14 @@ struct net_device {
#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) #define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
NETIF_F_FSO)
#define NETIF_F_ALL_TX_OFFLOADS (NETIF_F_ALL_CSUM | NETIF_F_SG | \ #define NETIF_F_ALL_TX_OFFLOADS (NETIF_F_ALL_CSUM | NETIF_F_SG | \
NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
NETIF_F_HIGHDMA | \ NETIF_F_HIGHDMA | \
NETIF_F_SCTP_CSUM | NETIF_F_FCOE_CRC) NETIF_F_SCTP_CSUM | \
NETIF_F_ALL_FCOE)
/* /*
* If one device supports one of these features, then enable them * If one device supports one of these features, then enable them
...@@ -2561,6 +2565,7 @@ u32 netdev_increment_features(u32 all, u32 one, u32 mask); ...@@ -2561,6 +2565,7 @@ u32 netdev_increment_features(u32 all, u32 one, u32 mask);
u32 netdev_fix_features(struct net_device *dev, u32 features); u32 netdev_fix_features(struct net_device *dev, u32 features);
int __netdev_update_features(struct net_device *dev); int __netdev_update_features(struct net_device *dev);
void netdev_update_features(struct net_device *dev); void netdev_update_features(struct net_device *dev);
void netdev_change_features(struct net_device *dev);
void netif_stacked_transfer_operstate(const struct net_device *rootdev, void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev); struct net_device *dev);
......
...@@ -104,6 +104,7 @@ struct garp_applicant { ...@@ -104,6 +104,7 @@ struct garp_applicant {
struct sk_buff_head queue; struct sk_buff_head queue;
struct sk_buff *pdu; struct sk_buff *pdu;
struct rb_root gid; struct rb_root gid;
struct rcu_head rcu;
}; };
struct garp_port { struct garp_port {
......
...@@ -665,9 +665,7 @@ struct ip_vs_dest { ...@@ -665,9 +665,7 @@ struct ip_vs_dest {
struct dst_entry *dst_cache; /* destination cache entry */ struct dst_entry *dst_cache; /* destination cache entry */
u32 dst_rtos; /* RT_TOS(tos) for dst */ u32 dst_rtos; /* RT_TOS(tos) for dst */
u32 dst_cookie; u32 dst_cookie;
#ifdef CONFIG_IP_VS_IPV6 union nf_inet_addr dst_saddr;
struct in6_addr dst_saddr;
#endif
/* for virtual service */ /* for virtual service */
struct ip_vs_service *svc; /* service it belongs to */ struct ip_vs_service *svc; /* service it belongs to */
...@@ -1253,7 +1251,8 @@ extern int ip_vs_tunnel_xmit ...@@ -1253,7 +1251,8 @@ extern int ip_vs_tunnel_xmit
extern int ip_vs_dr_xmit extern int ip_vs_dr_xmit
(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
extern int ip_vs_icmp_xmit extern int ip_vs_icmp_xmit
(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, int offset); (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp,
int offset, unsigned int hooknum);
extern void ip_vs_dst_reset(struct ip_vs_dest *dest); extern void ip_vs_dst_reset(struct ip_vs_dest *dest);
#ifdef CONFIG_IP_VS_IPV6 #ifdef CONFIG_IP_VS_IPV6
...@@ -1267,7 +1266,7 @@ extern int ip_vs_dr_xmit_v6 ...@@ -1267,7 +1266,7 @@ extern int ip_vs_dr_xmit_v6
(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
extern int ip_vs_icmp_xmit_v6 extern int ip_vs_icmp_xmit_v6
(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp,
int offset); int offset, unsigned int hooknum);
#endif #endif
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
......
...@@ -603,6 +603,11 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl) ...@@ -603,6 +603,11 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
} }
EXPORT_SYMBOL_GPL(garp_init_applicant); EXPORT_SYMBOL_GPL(garp_init_applicant);
static void garp_app_kfree_rcu(struct rcu_head *head)
{
kfree(container_of(head, struct garp_applicant, rcu));
}
void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl) void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl)
{ {
struct garp_port *port = rtnl_dereference(dev->garp_port); struct garp_port *port = rtnl_dereference(dev->garp_port);
...@@ -611,7 +616,6 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl ...@@ -611,7 +616,6 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
ASSERT_RTNL(); ASSERT_RTNL();
rcu_assign_pointer(port->applicants[appl->type], NULL); rcu_assign_pointer(port->applicants[appl->type], NULL);
synchronize_rcu();
/* Delete timer and generate a final TRANSMIT_PDU event to flush out /* Delete timer and generate a final TRANSMIT_PDU event to flush out
* all pending messages before the applicant is gone. */ * all pending messages before the applicant is gone. */
...@@ -621,7 +625,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl ...@@ -621,7 +625,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
garp_queue_xmit(app); garp_queue_xmit(app);
dev_mc_del(dev, appl->proto.group_address); dev_mc_del(dev, appl->proto.group_address);
kfree(app); call_rcu(&app->rcu, garp_app_kfree_rcu);
garp_release_port(dev); garp_release_port(dev);
} }
EXPORT_SYMBOL_GPL(garp_uninit_applicant); EXPORT_SYMBOL_GPL(garp_uninit_applicant);
...@@ -639,3 +643,9 @@ void garp_unregister_application(struct garp_application *appl) ...@@ -639,3 +643,9 @@ void garp_unregister_application(struct garp_application *appl)
stp_proto_unregister(&appl->proto); stp_proto_unregister(&appl->proto);
} }
EXPORT_SYMBOL_GPL(garp_unregister_application); EXPORT_SYMBOL_GPL(garp_unregister_application);
static void __exit garp_cleanup_module(void)
{
rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
module_exit(garp_cleanup_module);
...@@ -528,7 +528,7 @@ static int vlan_dev_init(struct net_device *dev) ...@@ -528,7 +528,7 @@ static int vlan_dev_init(struct net_device *dev)
(1<<__LINK_STATE_DORMANT))) | (1<<__LINK_STATE_DORMANT))) |
(1<<__LINK_STATE_PRESENT); (1<<__LINK_STATE_PRESENT);
dev->hw_features = real_dev->vlan_features & NETIF_F_ALL_TX_OFFLOADS; dev->hw_features = NETIF_F_ALL_TX_OFFLOADS;
dev->features |= real_dev->vlan_features | NETIF_F_LLTX; dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
dev->gso_max_size = real_dev->gso_max_size; dev->gso_max_size = real_dev->gso_max_size;
...@@ -587,9 +587,11 @@ static u32 vlan_dev_fix_features(struct net_device *dev, u32 features) ...@@ -587,9 +587,11 @@ static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
{ {
struct net_device *real_dev = vlan_dev_info(dev)->real_dev; struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
features &= (real_dev->features | NETIF_F_LLTX); features &= real_dev->features;
features &= real_dev->vlan_features;
if (dev_ethtool_get_rx_csum(real_dev)) if (dev_ethtool_get_rx_csum(real_dev))
features |= NETIF_F_RXCSUM; features |= NETIF_F_RXCSUM;
features |= NETIF_F_LLTX;
return features; return features;
} }
......
...@@ -5289,6 +5289,14 @@ int __netdev_update_features(struct net_device *dev) ...@@ -5289,6 +5289,14 @@ int __netdev_update_features(struct net_device *dev)
return 1; return 1;
} }
/**
* netdev_update_features - recalculate device features
* @dev: the device to check
*
* Recalculate dev->features set and send notifications if it
* has changed. Should be called after driver or hardware dependent
* conditions might have changed that influence the features.
*/
void netdev_update_features(struct net_device *dev) void netdev_update_features(struct net_device *dev)
{ {
if (__netdev_update_features(dev)) if (__netdev_update_features(dev))
...@@ -5296,6 +5304,23 @@ void netdev_update_features(struct net_device *dev) ...@@ -5296,6 +5304,23 @@ void netdev_update_features(struct net_device *dev)
} }
EXPORT_SYMBOL(netdev_update_features); EXPORT_SYMBOL(netdev_update_features);
/**
* netdev_change_features - recalculate device features
* @dev: the device to check
*
* Recalculate dev->features set and send notifications even
* if they have not changed. Should be called instead of
* netdev_update_features() if also dev->vlan_features might
* have changed to allow the changes to be propagated to stacked
* VLAN devices.
*/
void netdev_change_features(struct net_device *dev)
{
__netdev_update_features(dev);
netdev_features_change(dev);
}
EXPORT_SYMBOL(netdev_change_features);
/** /**
* netif_stacked_transfer_operstate - transfer operstate * netif_stacked_transfer_operstate - transfer operstate
* @rootdev: the root or lower level device to transfer state from * @rootdev: the root or lower level device to transfer state from
......
...@@ -361,7 +361,7 @@ static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GS ...@@ -361,7 +361,7 @@ static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GS
/* NETIF_F_NTUPLE */ "rx-ntuple-filter", /* NETIF_F_NTUPLE */ "rx-ntuple-filter",
/* NETIF_F_RXHASH */ "rx-hashing", /* NETIF_F_RXHASH */ "rx-hashing",
/* NETIF_F_RXCSUM */ "rx-checksum", /* NETIF_F_RXCSUM */ "rx-checksum",
/* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy" /* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy",
/* NETIF_F_LOOPBACK */ "loopback", /* NETIF_F_LOOPBACK */ "loopback",
}; };
......
...@@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb) ...@@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb)
rt = skb_rtable(skb); rt = skb_rtable(skb);
if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) if (opt->is_strictroute && iph->daddr != rt->rt_gateway)
goto sr_failed; goto sr_failed;
if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) && if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
......
...@@ -601,7 +601,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) ...@@ -601,7 +601,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
unsigned long orefdst; unsigned long orefdst;
int err; int err;
if (!opt->srr || !rt) if (!rt)
return 0; return 0;
if (skb->pkt_type != PACKET_HOST) if (skb->pkt_type != PACKET_HOST)
...@@ -635,7 +635,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) ...@@ -635,7 +635,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
if (rt2->rt_type != RTN_LOCAL) if (rt2->rt_type != RTN_LOCAL)
break; break;
/* Superfast 8) loopback forward */ /* Superfast 8) loopback forward */
memcpy(&iph->daddr, &optptr[srrptr-1], 4); iph->daddr = nexthop;
opt->is_changed = 1; opt->is_changed = 1;
} }
if (srrptr <= srrspace) { if (srrptr <= srrspace) {
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/termios.h> #include <linux/termios.h>
#include <linux/tty.h> #include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/device.h> /* for MODULE_ALIAS_CHARDEV_MAJOR */ #include <linux/device.h> /* for MODULE_ALIAS_CHARDEV_MAJOR */
...@@ -1132,7 +1133,6 @@ static int ircomm_tty_data_indication(void *instance, void *sap, ...@@ -1132,7 +1133,6 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
struct tty_ldisc *ld;
IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_DEBUG(2, "%s()\n", __func__ );
...@@ -1161,15 +1161,11 @@ static int ircomm_tty_data_indication(void *instance, void *sap, ...@@ -1161,15 +1161,11 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
} }
/* /*
* Just give it over to the line discipline. There is no need to * Use flip buffer functions since the code may be called from interrupt
* involve the flip buffers, since we are not running in an interrupt * context
* handler
*/ */
tty_insert_flip_string(self->tty, skb->data, skb->len);
ld = tty_ldisc_ref(self->tty); tty_flip_buffer_push(self->tty);
if (ld)
ld->ops->receive_buf(self->tty, skb->data, NULL, skb->len);
tty_ldisc_deref(ld);
/* No need to kfree_skb - see ircomm_ttp_data_indication() */ /* No need to kfree_skb - see ircomm_ttp_data_indication() */
......
...@@ -1435,16 +1435,15 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 ...@@ -1435,16 +1435,15 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
/* Add tunnel to our list */ /* Add tunnel to our list */
INIT_LIST_HEAD(&tunnel->list); INIT_LIST_HEAD(&tunnel->list);
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
synchronize_rcu();
atomic_inc(&l2tp_tunnel_count); atomic_inc(&l2tp_tunnel_count);
/* Bump the reference count. The tunnel context is deleted /* Bump the reference count. The tunnel context is deleted
* only when this drops to zero. * only when this drops to zero. Must be done before list insertion
*/ */
l2tp_tunnel_inc_refcount(tunnel); l2tp_tunnel_inc_refcount(tunnel);
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
err = 0; err = 0;
err: err:
...@@ -1636,7 +1635,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn ...@@ -1636,7 +1635,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
hlist_add_head_rcu(&session->global_hlist, hlist_add_head_rcu(&session->global_hlist,
l2tp_session_id_hash_2(pn, session_id)); l2tp_session_id_hash_2(pn, session_id));
spin_unlock_bh(&pn->l2tp_session_hlist_lock); spin_unlock_bh(&pn->l2tp_session_hlist_lock);
synchronize_rcu();
} }
/* Ignore management session in session count value */ /* Ignore management session in session count value */
......
...@@ -1382,15 +1382,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) ...@@ -1382,15 +1382,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
ip_vs_in_stats(cp, skb); ip_vs_in_stats(cp, skb);
if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
offset += 2 * sizeof(__u16); offset += 2 * sizeof(__u16);
verdict = ip_vs_icmp_xmit(skb, cp, pp, offset); verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum);
/* LOCALNODE from FORWARD hook is not supported */
if (verdict == NF_ACCEPT && hooknum == NF_INET_FORWARD &&
skb_rtable(skb)->rt_flags & RTCF_LOCAL) {
IP_VS_DBG(1, "%s(): "
"local delivery to %pI4 but in FORWARD\n",
__func__, &skb_rtable(skb)->rt_dst);
verdict = NF_DROP;
}
out: out:
__ip_vs_conn_put(cp); __ip_vs_conn_put(cp);
...@@ -1412,7 +1404,6 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) ...@@ -1412,7 +1404,6 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
struct ip_vs_protocol *pp; struct ip_vs_protocol *pp;
struct ip_vs_proto_data *pd; struct ip_vs_proto_data *pd;
unsigned int offset, verdict; unsigned int offset, verdict;
struct rt6_info *rt;
*related = 1; *related = 1;
...@@ -1474,23 +1465,12 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) ...@@ -1474,23 +1465,12 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
if (!cp) if (!cp)
return NF_ACCEPT; return NF_ACCEPT;
verdict = NF_DROP;
/* do the statistics and put it back */ /* do the statistics and put it back */
ip_vs_in_stats(cp, skb); ip_vs_in_stats(cp, skb);
if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr || if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr ||
IPPROTO_SCTP == cih->nexthdr) IPPROTO_SCTP == cih->nexthdr)
offset += 2 * sizeof(__u16); offset += 2 * sizeof(__u16);
verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset); verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum);
/* LOCALNODE from FORWARD hook is not supported */
if (verdict == NF_ACCEPT && hooknum == NF_INET_FORWARD &&
(rt = (struct rt6_info *) skb_dst(skb)) &&
rt->rt6i_dev && rt->rt6i_dev->flags & IFF_LOOPBACK) {
IP_VS_DBG(1, "%s(): "
"local delivery to %pI6 but in FORWARD\n",
__func__, &rt->rt6i_dst);
verdict = NF_DROP;
}
__ip_vs_conn_put(cp); __ip_vs_conn_put(cp);
......
This diff is collapsed.
...@@ -1496,7 +1496,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, ...@@ -1496,7 +1496,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct sctp_chunk *chunk; struct sctp_chunk *chunk;
union sctp_addr to; union sctp_addr to;
struct sockaddr *msg_name = NULL; struct sockaddr *msg_name = NULL;
struct sctp_sndrcvinfo default_sinfo = { 0 }; struct sctp_sndrcvinfo default_sinfo;
struct sctp_sndrcvinfo *sinfo; struct sctp_sndrcvinfo *sinfo;
struct sctp_initmsg *sinit; struct sctp_initmsg *sinit;
sctp_assoc_t associd = 0; sctp_assoc_t associd = 0;
...@@ -1760,6 +1760,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, ...@@ -1760,6 +1760,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
/* If the user didn't specify SNDRCVINFO, make up one with /* If the user didn't specify SNDRCVINFO, make up one with
* some defaults. * some defaults.
*/ */
memset(&default_sinfo, 0, sizeof(default_sinfo));
default_sinfo.sinfo_stream = asoc->default_stream; default_sinfo.sinfo_stream = asoc->default_stream;
default_sinfo.sinfo_flags = asoc->default_flags; default_sinfo.sinfo_flags = asoc->default_flags;
default_sinfo.sinfo_ppid = asoc->default_ppid; default_sinfo.sinfo_ppid = asoc->default_ppid;
...@@ -1790,12 +1791,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, ...@@ -1790,12 +1791,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_free; goto out_free;
} }
if (sinfo) { /* Check for invalid stream. */
/* Check for invalid stream. */ if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) {
if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { err = -EINVAL;
err = -EINVAL; goto out_free;
goto out_free;
}
} }
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment