Commit 72da3bc0 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (22 commits)
  netlink: bug fix: wrong size was calculated for vfinfo list blob
  netlink: bug fix: don't overrun skbs on vf_port dump
  xt_tee: use skb_dst_drop()
  netdev/fec: fix ifconfig eth0 down hang issue
  cnic: Fix context memory init. on 5709.
  drivers/net: Eliminate a NULL pointer dereference
  drivers/net/hamradio: Eliminate a NULL pointer dereference
  be2net: Patch removes redundant while statement in loop.
  ipv6: Add GSO support on forwarding path
  net: fix __neigh_event_send()
  vhost: fix the memory leak which will happen when memory_access_ok fails
  vhost-net: fix to check the return value of copy_to/from_user() correctly
  vhost: fix to check the return value of copy_to/from_user() correctly
  vhost: Fix host panic if ioctl called with wrong index
  net: fix lock_sock_bh/unlock_sock_bh
  net/iucv: Add missing spin_unlock
  net: ll_temac: fix checksum offload logic
  net: ll_temac: fix interrupt bug when interrupt 0 is used
  sctp: dubious bitfields in sctp_transport
  ipmr: off by one in __ipmr_fill_mroute()
  ...
parents 8507bb00 045de01a
...@@ -551,8 +551,7 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id) ...@@ -551,8 +551,7 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id)
void __iomem *shmem; void __iomem *shmem;
if (dev == NULL) { if (dev == NULL) {
pr_err("%s: net_interrupt(): irq %d for unknown device.\n", pr_err("net_interrupt(): irq %d for unknown device.\n", irq);
dev->name, irq);
return IRQ_NONE; return IRQ_NONE;
} }
......
...@@ -294,7 +294,7 @@ int be_cmd_POST(struct be_adapter *adapter) ...@@ -294,7 +294,7 @@ int be_cmd_POST(struct be_adapter *adapter)
} else { } else {
return 0; return 0;
} }
} while (timeout < 20); } while (timeout < 40);
dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage); dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
return -1; return -1;
......
...@@ -1861,7 +1861,7 @@ static int be_setup(struct be_adapter *adapter) ...@@ -1861,7 +1861,7 @@ static int be_setup(struct be_adapter *adapter)
goto if_destroy; goto if_destroy;
} }
vf++; vf++;
} while (vf < num_vfs); }
} else if (!be_physfn(adapter)) { } else if (!be_physfn(adapter)) {
status = be_cmd_mac_addr_query(adapter, mac, status = be_cmd_mac_addr_query(adapter, mac,
MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
......
...@@ -3367,13 +3367,9 @@ static int cnic_cm_shutdown(struct cnic_dev *dev) ...@@ -3367,13 +3367,9 @@ static int cnic_cm_shutdown(struct cnic_dev *dev)
static void cnic_init_context(struct cnic_dev *dev, u32 cid) static void cnic_init_context(struct cnic_dev *dev, u32 cid)
{ {
struct cnic_local *cp = dev->cnic_priv;
u32 cid_addr; u32 cid_addr;
int i; int i;
if (CHIP_NUM(cp) == CHIP_NUM_5709)
return;
cid_addr = GET_CID_ADDR(cid); cid_addr = GET_CID_ADDR(cid);
for (i = 0; i < CTX_SIZE; i += 4) for (i = 0; i < CTX_SIZE; i += 4)
...@@ -3530,14 +3526,11 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) ...@@ -3530,14 +3526,11 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
sb_id = cp->status_blk_num; sb_id = cp->status_blk_num;
tx_cid = 20; tx_cid = 20;
cnic_init_context(dev, tx_cid);
cnic_init_context(dev, tx_cid + 1);
cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
struct status_block_msix *sblk = cp->status_blk.bnx2; struct status_block_msix *sblk = cp->status_blk.bnx2;
tx_cid = TX_TSS_CID + sb_id - 1; tx_cid = TX_TSS_CID + sb_id - 1;
cnic_init_context(dev, tx_cid);
CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
(TX_TSS_CID << 7)); (TX_TSS_CID << 7));
cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
...@@ -3556,6 +3549,9 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) ...@@ -3556,6 +3549,9 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
} else { } else {
cnic_init_context(dev, tx_cid);
cnic_init_context(dev, tx_cid + 1);
offset0 = BNX2_L2CTX_TYPE; offset0 = BNX2_L2CTX_TYPE;
offset1 = BNX2_L2CTX_CMD_TYPE; offset1 = BNX2_L2CTX_CMD_TYPE;
offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
......
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H #ifndef CNIC_IF_H
#define CNIC_IF_H #define CNIC_IF_H
#define CNIC_MODULE_VERSION "2.1.1" #define CNIC_MODULE_VERSION "2.1.2"
#define CNIC_MODULE_RELDATE "Feb 22, 2010" #define CNIC_MODULE_RELDATE "May 26, 2010"
#define CNIC_ULP_RDMA 0 #define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1 #define CNIC_ULP_ISCSI 1
......
...@@ -681,6 +681,8 @@ static int fec_enet_mii_probe(struct net_device *dev) ...@@ -681,6 +681,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
struct phy_device *phy_dev = NULL; struct phy_device *phy_dev = NULL;
int phy_addr; int phy_addr;
fep->phy_dev = NULL;
/* find the first phy */ /* find the first phy */
for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
if (fep->mii_bus->phy_map[phy_addr]) { if (fep->mii_bus->phy_map[phy_addr]) {
...@@ -711,6 +713,11 @@ static int fec_enet_mii_probe(struct net_device *dev) ...@@ -711,6 +713,11 @@ static int fec_enet_mii_probe(struct net_device *dev)
fep->link = 0; fep->link = 0;
fep->full_duplex = 0; fep->full_duplex = 0;
printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
"(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
fep->phy_dev->irq);
return 0; return 0;
} }
...@@ -756,13 +763,8 @@ static int fec_enet_mii_init(struct platform_device *pdev) ...@@ -756,13 +763,8 @@ static int fec_enet_mii_init(struct platform_device *pdev)
if (mdiobus_register(fep->mii_bus)) if (mdiobus_register(fep->mii_bus))
goto err_out_free_mdio_irq; goto err_out_free_mdio_irq;
if (fec_enet_mii_probe(dev) != 0)
goto err_out_unregister_bus;
return 0; return 0;
err_out_unregister_bus:
mdiobus_unregister(fep->mii_bus);
err_out_free_mdio_irq: err_out_free_mdio_irq:
kfree(fep->mii_bus->irq); kfree(fep->mii_bus->irq);
err_out_free_mdiobus: err_out_free_mdiobus:
...@@ -915,7 +917,12 @@ fec_enet_open(struct net_device *dev) ...@@ -915,7 +917,12 @@ fec_enet_open(struct net_device *dev)
if (ret) if (ret)
return ret; return ret;
/* schedule a link state check */ /* Probe and connect to PHY when open the interface */
ret = fec_enet_mii_probe(dev);
if (ret) {
fec_enet_free_buffers(dev);
return ret;
}
phy_start(fep->phy_dev); phy_start(fep->phy_dev);
netif_start_queue(dev); netif_start_queue(dev);
fep->opened = 1; fep->opened = 1;
...@@ -929,10 +936,12 @@ fec_enet_close(struct net_device *dev) ...@@ -929,10 +936,12 @@ fec_enet_close(struct net_device *dev)
/* Don't know what to do yet. */ /* Don't know what to do yet. */
fep->opened = 0; fep->opened = 0;
phy_stop(fep->phy_dev);
netif_stop_queue(dev); netif_stop_queue(dev);
fec_stop(dev); fec_stop(dev);
if (fep->phy_dev)
phy_disconnect(fep->phy_dev);
fec_enet_free_buffers(dev); fec_enet_free_buffers(dev);
return 0; return 0;
...@@ -1316,11 +1325,6 @@ fec_probe(struct platform_device *pdev) ...@@ -1316,11 +1325,6 @@ fec_probe(struct platform_device *pdev)
if (ret) if (ret)
goto failed_register; goto failed_register;
printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
"(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
fep->phy_dev->irq);
return 0; return 0;
failed_register: failed_register:
......
...@@ -1151,8 +1151,7 @@ static int __init yam_init_driver(void) ...@@ -1151,8 +1151,7 @@ static int __init yam_init_driver(void)
dev = alloc_netdev(sizeof(struct yam_port), name, dev = alloc_netdev(sizeof(struct yam_port), name,
yam_setup); yam_setup);
if (!dev) { if (!dev) {
printk(KERN_ERR "yam: cannot allocate net device %s\n", pr_err("yam: cannot allocate net device\n");
dev->name);
err = -ENOMEM; err = -ENOMEM;
goto error; goto error;
} }
......
...@@ -295,6 +295,10 @@ This option defaults to enabled (set) */ ...@@ -295,6 +295,10 @@ This option defaults to enabled (set) */
#define MULTICAST_CAM_TABLE_NUM 4 #define MULTICAST_CAM_TABLE_NUM 4
/* TEMAC Synthesis features */
#define TEMAC_FEATURE_RX_CSUM (1 << 0)
#define TEMAC_FEATURE_TX_CSUM (1 << 1)
/* TX/RX CURDESC_PTR points to first descriptor */ /* TX/RX CURDESC_PTR points to first descriptor */
/* TX/RX TAILDESC_PTR points to last descriptor in linked list */ /* TX/RX TAILDESC_PTR points to last descriptor in linked list */
...@@ -353,6 +357,7 @@ struct temac_local { ...@@ -353,6 +357,7 @@ struct temac_local {
struct mutex indirect_mutex; struct mutex indirect_mutex;
u32 options; /* Current options word */ u32 options; /* Current options word */
int last_link; int last_link;
unsigned int temac_features;
/* Buffer descriptors */ /* Buffer descriptors */
struct cdmac_bd *tx_bd_v; struct cdmac_bd *tx_bd_v;
......
...@@ -245,7 +245,7 @@ static int temac_dma_bd_init(struct net_device *ndev) ...@@ -245,7 +245,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
CHNL_CTRL_IRQ_COAL_EN); CHNL_CTRL_IRQ_COAL_EN);
/* 0x10220483 */ /* 0x10220483 */
/* 0x00100483 */ /* 0x00100483 */
lp->dma_out(lp, RX_CHNL_CTRL, 0xff010000 | lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN | CHNL_CTRL_IRQ_COAL_EN |
...@@ -574,6 +574,10 @@ static void temac_start_xmit_done(struct net_device *ndev) ...@@ -574,6 +574,10 @@ static void temac_start_xmit_done(struct net_device *ndev)
if (cur_p->app4) if (cur_p->app4)
dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
cur_p->app0 = 0; cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app3 = 0;
cur_p->app4 = 0;
ndev->stats.tx_packets++; ndev->stats.tx_packets++;
ndev->stats.tx_bytes += cur_p->len; ndev->stats.tx_bytes += cur_p->len;
...@@ -589,6 +593,29 @@ static void temac_start_xmit_done(struct net_device *ndev) ...@@ -589,6 +593,29 @@ static void temac_start_xmit_done(struct net_device *ndev)
netif_wake_queue(ndev); netif_wake_queue(ndev);
} }
static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
{
struct cdmac_bd *cur_p;
int tail;
tail = lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[tail];
do {
if (cur_p->app0)
return NETDEV_TX_BUSY;
tail++;
if (tail >= TX_BD_NUM)
tail = 0;
cur_p = &lp->tx_bd_v[tail];
num_frag--;
} while (num_frag >= 0);
return 0;
}
static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{ {
struct temac_local *lp = netdev_priv(ndev); struct temac_local *lp = netdev_priv(ndev);
...@@ -603,7 +630,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -603,7 +630,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (cur_p->app0 & STS_CTRL_APP0_CMPLT) { if (temac_check_tx_bd_space(lp, num_frag)) {
if (!netif_queue_stopped(ndev)) { if (!netif_queue_stopped(ndev)) {
netif_stop_queue(ndev); netif_stop_queue(ndev);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
...@@ -613,29 +640,14 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -613,29 +640,14 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 = 0; cur_p->app0 = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
const struct iphdr *ip = ip_hdr(skb); unsigned int csum_start_off = skb_transport_offset(skb);
int length = 0, start = 0, insert = 0; unsigned int csum_index_off = csum_start_off + skb->csum_offset;
switch (ip->protocol) { cur_p->app0 |= 1; /* TX Checksum Enabled */
case IPPROTO_TCP: cur_p->app1 = (csum_start_off << 16) | csum_index_off;
start = sizeof(struct iphdr) + ETH_HLEN; cur_p->app2 = 0; /* initial checksum seed */
insert = sizeof(struct iphdr) + ETH_HLEN + 16;
length = ip->tot_len - sizeof(struct iphdr);
break;
case IPPROTO_UDP:
start = sizeof(struct iphdr) + ETH_HLEN;
insert = sizeof(struct iphdr) + ETH_HLEN + 6;
length = ip->tot_len - sizeof(struct iphdr);
break;
default:
break;
}
cur_p->app1 = ((start << 16) | insert);
cur_p->app2 = csum_tcpudp_magic(ip->saddr, ip->daddr,
length, ip->protocol, 0);
skb->data[insert] = 0;
skb->data[insert + 1] = 0;
} }
cur_p->app0 |= STS_CTRL_APP0_SOP; cur_p->app0 |= STS_CTRL_APP0_SOP;
cur_p->len = skb_headlen(skb); cur_p->len = skb_headlen(skb);
cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len, cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
...@@ -699,6 +711,15 @@ static void ll_temac_recv(struct net_device *ndev) ...@@ -699,6 +711,15 @@ static void ll_temac_recv(struct net_device *ndev)
skb->protocol = eth_type_trans(skb, ndev); skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
/* if we're doing rx csum offload, set it up */
if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
(skb->protocol == __constant_htons(ETH_P_IP)) &&
(skb->len > 64)) {
skb->csum = cur_p->app3 & 0xFFFF;
skb->ip_summed = CHECKSUM_COMPLETE;
}
netif_rx(skb); netif_rx(skb);
ndev->stats.rx_packets++; ndev->stats.rx_packets++;
...@@ -883,6 +904,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) ...@@ -883,6 +904,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
struct temac_local *lp; struct temac_local *lp;
struct net_device *ndev; struct net_device *ndev;
const void *addr; const void *addr;
__be32 *p;
int size, rc = 0; int size, rc = 0;
/* Init network device structure */ /* Init network device structure */
...@@ -926,6 +948,18 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) ...@@ -926,6 +948,18 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
goto nodev; goto nodev;
} }
/* Setup checksum offload, but default to off if not specified */
lp->temac_features = 0;
p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
if (p && be32_to_cpu(*p)) {
lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
}
p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
if (p && be32_to_cpu(*p))
lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
if (!np) { if (!np) {
...@@ -950,7 +984,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) ...@@ -950,7 +984,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
lp->rx_irq = irq_of_parse_and_map(np, 0); lp->rx_irq = irq_of_parse_and_map(np, 0);
lp->tx_irq = irq_of_parse_and_map(np, 1); lp->tx_irq = irq_of_parse_and_map(np, 1);
if (!lp->rx_irq || !lp->tx_irq) { if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
dev_err(&op->dev, "could not determine irqs\n"); dev_err(&op->dev, "could not determine irqs\n");
rc = -ENOMEM; rc = -ENOMEM;
goto nodev; goto nodev;
......
...@@ -593,17 +593,17 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl, ...@@ -593,17 +593,17 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
int r; int r;
switch (ioctl) { switch (ioctl) {
case VHOST_NET_SET_BACKEND: case VHOST_NET_SET_BACKEND:
r = copy_from_user(&backend, argp, sizeof backend); if (copy_from_user(&backend, argp, sizeof backend))
if (r < 0) return -EFAULT;
return r;
return vhost_net_set_backend(n, backend.index, backend.fd); return vhost_net_set_backend(n, backend.index, backend.fd);
case VHOST_GET_FEATURES: case VHOST_GET_FEATURES:
features = VHOST_FEATURES; features = VHOST_FEATURES;
return copy_to_user(featurep, &features, sizeof features); if (copy_to_user(featurep, &features, sizeof features))
return -EFAULT;
return 0;
case VHOST_SET_FEATURES: case VHOST_SET_FEATURES:
r = copy_from_user(&features, featurep, sizeof features); if (copy_from_user(&features, featurep, sizeof features))
if (r < 0) return -EFAULT;
return r;
if (features & ~VHOST_FEATURES) if (features & ~VHOST_FEATURES)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return vhost_net_set_features(n, features); return vhost_net_set_features(n, features);
......
...@@ -320,10 +320,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) ...@@ -320,10 +320,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{ {
struct vhost_memory mem, *newmem, *oldmem; struct vhost_memory mem, *newmem, *oldmem;
unsigned long size = offsetof(struct vhost_memory, regions); unsigned long size = offsetof(struct vhost_memory, regions);
long r; if (copy_from_user(&mem, m, size))
r = copy_from_user(&mem, m, size); return -EFAULT;
if (r)
return r;
if (mem.padding) if (mem.padding)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
...@@ -333,15 +331,16 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) ...@@ -333,15 +331,16 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
return -ENOMEM; return -ENOMEM;
memcpy(newmem, &mem, size); memcpy(newmem, &mem, size);
r = copy_from_user(newmem->regions, m->regions, if (copy_from_user(newmem->regions, m->regions,
mem.nregions * sizeof *m->regions); mem.nregions * sizeof *m->regions)) {
if (r) {
kfree(newmem); kfree(newmem);
return r; return -EFAULT;
} }
if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) {
kfree(newmem);
return -EFAULT; return -EFAULT;
}
oldmem = d->memory; oldmem = d->memory;
rcu_assign_pointer(d->memory, newmem); rcu_assign_pointer(d->memory, newmem);
synchronize_rcu(); synchronize_rcu();
...@@ -374,7 +373,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) ...@@ -374,7 +373,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
r = get_user(idx, idxp); r = get_user(idx, idxp);
if (r < 0) if (r < 0)
return r; return r;
if (idx > d->nvqs) if (idx >= d->nvqs)
return -ENOBUFS; return -ENOBUFS;
vq = d->vqs + idx; vq = d->vqs + idx;
...@@ -389,9 +388,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) ...@@ -389,9 +388,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EBUSY; r = -EBUSY;
break; break;
} }
r = copy_from_user(&s, argp, sizeof s); if (copy_from_user(&s, argp, sizeof s)) {
if (r < 0) r = -EFAULT;
break; break;
}
if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) { if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
r = -EINVAL; r = -EINVAL;
break; break;
...@@ -405,9 +405,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) ...@@ -405,9 +405,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EBUSY; r = -EBUSY;
break; break;
} }
r = copy_from_user(&s, argp, sizeof s); if (copy_from_user(&s, argp, sizeof s)) {
if (r < 0) r = -EFAULT;
break; break;
}
if (s.num > 0xffff) { if (s.num > 0xffff) {
r = -EINVAL; r = -EINVAL;
break; break;
...@@ -419,12 +420,14 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) ...@@ -419,12 +420,14 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
case VHOST_GET_VRING_BASE: case VHOST_GET_VRING_BASE:
s.index = idx; s.index = idx;
s.num = vq->last_avail_idx; s.num = vq->last_avail_idx;
r = copy_to_user(argp, &s, sizeof s); if (copy_to_user(argp, &s, sizeof s))
r = -EFAULT;
break; break;
case VHOST_SET_VRING_ADDR: case VHOST_SET_VRING_ADDR:
r = copy_from_user(&a, argp, sizeof a); if (copy_from_user(&a, argp, sizeof a)) {
if (r < 0) r = -EFAULT;
break; break;
}
if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) { if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
r = -EOPNOTSUPP; r = -EOPNOTSUPP;
break; break;
...@@ -477,9 +480,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) ...@@ -477,9 +480,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
vq->used = (void __user *)(unsigned long)a.used_user_addr; vq->used = (void __user *)(unsigned long)a.used_user_addr;
break; break;
case VHOST_SET_VRING_KICK: case VHOST_SET_VRING_KICK:
r = copy_from_user(&f, argp, sizeof f); if (copy_from_user(&f, argp, sizeof f)) {
if (r < 0) r = -EFAULT;
break; break;
}
eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
if (IS_ERR(eventfp)) { if (IS_ERR(eventfp)) {
r = PTR_ERR(eventfp); r = PTR_ERR(eventfp);
...@@ -492,9 +496,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) ...@@ -492,9 +496,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
filep = eventfp; filep = eventfp;
break; break;
case VHOST_SET_VRING_CALL: case VHOST_SET_VRING_CALL:
r = copy_from_user(&f, argp, sizeof f); if (copy_from_user(&f, argp, sizeof f)) {
if (r < 0) r = -EFAULT;
break; break;
}
eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
if (IS_ERR(eventfp)) { if (IS_ERR(eventfp)) {
r = PTR_ERR(eventfp); r = PTR_ERR(eventfp);
...@@ -510,9 +515,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) ...@@ -510,9 +515,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
filep = eventfp; filep = eventfp;
break; break;
case VHOST_SET_VRING_ERR: case VHOST_SET_VRING_ERR:
r = copy_from_user(&f, argp, sizeof f); if (copy_from_user(&f, argp, sizeof f)) {
if (r < 0) r = -EFAULT;
break; break;
}
eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
if (IS_ERR(eventfp)) { if (IS_ERR(eventfp)) {
r = PTR_ERR(eventfp); r = PTR_ERR(eventfp);
...@@ -575,9 +581,10 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg) ...@@ -575,9 +581,10 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
r = vhost_set_memory(d, argp); r = vhost_set_memory(d, argp);
break; break;
case VHOST_SET_LOG_BASE: case VHOST_SET_LOG_BASE:
r = copy_from_user(&p, argp, sizeof p); if (copy_from_user(&p, argp, sizeof p)) {
if (r < 0) r = -EFAULT;
break; break;
}
if ((u64)(unsigned long)p != p) { if ((u64)(unsigned long)p != p) {
r = -EFAULT; r = -EFAULT;
break; break;
......
...@@ -39,7 +39,7 @@ extern int net_cls_subsys_id; ...@@ -39,7 +39,7 @@ extern int net_cls_subsys_id;
static inline u32 task_cls_classid(struct task_struct *p) static inline u32 task_cls_classid(struct task_struct *p)
{ {
int id; int id;
u32 classid; u32 classid = 0;
if (in_interrupt()) if (in_interrupt())
return 0; return 0;
......
...@@ -876,7 +876,7 @@ struct sctp_transport { ...@@ -876,7 +876,7 @@ struct sctp_transport {
/* Reference counting. */ /* Reference counting. */
atomic_t refcnt; atomic_t refcnt;
int dead:1, __u32 dead:1,
/* RTO-Pending : A flag used to track if one of the DATA /* RTO-Pending : A flag used to track if one of the DATA
* chunks sent to this address is currently being * chunks sent to this address is currently being
* used to compute a RTT. If this flag is 0, * used to compute a RTT. If this flag is 0,
......
...@@ -1026,15 +1026,23 @@ extern void release_sock(struct sock *sk); ...@@ -1026,15 +1026,23 @@ extern void release_sock(struct sock *sk);
SINGLE_DEPTH_NESTING) SINGLE_DEPTH_NESTING)
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
static inline void lock_sock_bh(struct sock *sk) extern bool lock_sock_fast(struct sock *sk);
/**
* unlock_sock_fast - complement of lock_sock_fast
* @sk: socket
* @slow: slow mode
*
* fast unlock socket for user context.
* If slow mode is on, we call regular release_sock()
*/
static inline void unlock_sock_fast(struct sock *sk, bool slow)
{ {
spin_lock_bh(&sk->sk_lock.slock); if (slow)
release_sock(sk);
else
spin_unlock_bh(&sk->sk_lock.slock);
} }
static inline void unlock_sock_bh(struct sock *sk)
{
spin_unlock_bh(&sk->sk_lock.slock);
}
extern struct sock *sk_alloc(struct net *net, int family, extern struct sock *sk_alloc(struct net *net, int family,
gfp_t priority, gfp_t priority,
......
...@@ -229,15 +229,17 @@ EXPORT_SYMBOL(skb_free_datagram); ...@@ -229,15 +229,17 @@ EXPORT_SYMBOL(skb_free_datagram);
void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
{ {
bool slow;
if (likely(atomic_read(&skb->users) == 1)) if (likely(atomic_read(&skb->users) == 1))
smp_rmb(); smp_rmb();
else if (likely(!atomic_dec_and_test(&skb->users))) else if (likely(!atomic_dec_and_test(&skb->users)))
return; return;
lock_sock_bh(sk); slow = lock_sock_fast(sk);
skb_orphan(skb); skb_orphan(skb);
sk_mem_reclaim_partial(sk); sk_mem_reclaim_partial(sk);
unlock_sock_bh(sk); unlock_sock_fast(sk, slow);
/* skb is now orphaned, can be freed outside of locked section */ /* skb is now orphaned, can be freed outside of locked section */
__kfree_skb(skb); __kfree_skb(skb);
......
...@@ -934,6 +934,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) ...@@ -934,6 +934,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
kfree_skb(buff); kfree_skb(buff);
NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
} }
skb_dst_force(skb);
__skb_queue_tail(&neigh->arp_queue, skb); __skb_queue_tail(&neigh->arp_queue, skb);
} }
rc = 1; rc = 1;
......
...@@ -650,11 +650,12 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev) ...@@ -650,11 +650,12 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev)
if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { if (dev->dev.parent && dev_is_pci(dev->dev.parent)) {
int num_vfs = dev_num_vf(dev->dev.parent); int num_vfs = dev_num_vf(dev->dev.parent);
size_t size = nlmsg_total_size(sizeof(struct nlattr)); size_t size = nla_total_size(sizeof(struct nlattr));
size += nlmsg_total_size(num_vfs * sizeof(struct nlattr)); size += nla_total_size(num_vfs * sizeof(struct nlattr));
size += num_vfs * (sizeof(struct ifla_vf_mac) + size += num_vfs *
sizeof(struct ifla_vf_vlan) + (nla_total_size(sizeof(struct ifla_vf_mac)) +
sizeof(struct ifla_vf_tx_rate)); nla_total_size(sizeof(struct ifla_vf_vlan)) +
nla_total_size(sizeof(struct ifla_vf_tx_rate)));
return size; return size;
} else } else
return 0; return 0;
...@@ -722,14 +723,13 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) ...@@ -722,14 +723,13 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
vf_port = nla_nest_start(skb, IFLA_VF_PORT); vf_port = nla_nest_start(skb, IFLA_VF_PORT);
if (!vf_port) { if (!vf_port)
nla_nest_cancel(skb, vf_ports); goto nla_put_failure;
return -EMSGSIZE;
}
NLA_PUT_U32(skb, IFLA_PORT_VF, vf); NLA_PUT_U32(skb, IFLA_PORT_VF, vf);
err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
if (err == -EMSGSIZE)
goto nla_put_failure;
if (err) { if (err) {
nla_put_failure:
nla_nest_cancel(skb, vf_port); nla_nest_cancel(skb, vf_port);
continue; continue;
} }
...@@ -739,6 +739,10 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) ...@@ -739,6 +739,10 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
nla_nest_end(skb, vf_ports); nla_nest_end(skb, vf_ports);
return 0; return 0;
nla_put_failure:
nla_nest_cancel(skb, vf_ports);
return -EMSGSIZE;
} }
static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
...@@ -753,7 +757,7 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) ...@@ -753,7 +757,7 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
if (err) { if (err) {
nla_nest_cancel(skb, port_self); nla_nest_cancel(skb, port_self);
return err; return (err == -EMSGSIZE) ? err : 0;
} }
nla_nest_end(skb, port_self); nla_nest_end(skb, port_self);
......
...@@ -2007,6 +2007,39 @@ void release_sock(struct sock *sk) ...@@ -2007,6 +2007,39 @@ void release_sock(struct sock *sk)
} }
EXPORT_SYMBOL(release_sock); EXPORT_SYMBOL(release_sock);
/**
* lock_sock_fast - fast version of lock_sock
* @sk: socket
*
* This version should be used for very small section, where process wont block
* return false if fast path is taken
* sk_lock.slock locked, owned = 0, BH disabled
* return true if slow path is taken
* sk_lock.slock unlocked, owned = 1, BH enabled
*/
bool lock_sock_fast(struct sock *sk)
{
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);
if (!sk->sk_lock.owned)
/*
* Note : We must disable BH
*/
return false;
__lock_sock(sk);
sk->sk_lock.owned = 1;
spin_unlock(&sk->sk_lock.slock);
/*
* The sk_lock has mutex_lock() semantics here:
*/
mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
local_bh_enable();
return true;
}
EXPORT_SYMBOL(lock_sock_fast);
int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
{ {
struct timeval tv; struct timeval tv;
......
...@@ -1911,7 +1911,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, ...@@ -1911,7 +1911,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct rtattr *mp_head; struct rtattr *mp_head;
/* If cache is unresolved, don't try to parse IIF and OIF */ /* If cache is unresolved, don't try to parse IIF and OIF */
if (c->mfc_parent > MAXVIFS) if (c->mfc_parent >= MAXVIFS)
return -ENOENT; return -ENOENT;
if (VIF_EXISTS(mrt, c->mfc_parent)) if (VIF_EXISTS(mrt, c->mfc_parent))
......
...@@ -1063,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk) ...@@ -1063,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk)
spin_unlock_bh(&rcvq->lock); spin_unlock_bh(&rcvq->lock);
if (!skb_queue_empty(&list_kill)) { if (!skb_queue_empty(&list_kill)) {
lock_sock_bh(sk); bool slow = lock_sock_fast(sk);
__skb_queue_purge(&list_kill); __skb_queue_purge(&list_kill);
sk_mem_reclaim_partial(sk); sk_mem_reclaim_partial(sk);
unlock_sock_bh(sk); unlock_sock_fast(sk, slow);
} }
return res; return res;
} }
...@@ -1123,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1123,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
int peeked; int peeked;
int err; int err;
int is_udplite = IS_UDPLITE(sk); int is_udplite = IS_UDPLITE(sk);
bool slow;
/* /*
* Check any passed addresses * Check any passed addresses
...@@ -1197,10 +1199,10 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1197,10 +1199,10 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
return err; return err;
csum_copy_err: csum_copy_err:
lock_sock_bh(sk); slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags)) if (!skb_kill_datagram(sk, skb, flags))
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
unlock_sock_bh(sk); unlock_sock_fast(sk, slow);
if (noblock) if (noblock)
return -EAGAIN; return -EAGAIN;
...@@ -1625,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb) ...@@ -1625,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb)
void udp_destroy_sock(struct sock *sk) void udp_destroy_sock(struct sock *sk)
{ {
lock_sock_bh(sk); bool slow = lock_sock_fast(sk);
udp_flush_pending_frames(sk); udp_flush_pending_frames(sk);
unlock_sock_bh(sk); unlock_sock_fast(sk, slow);
} }
/* /*
......
...@@ -507,7 +507,7 @@ int ip6_forward(struct sk_buff *skb) ...@@ -507,7 +507,7 @@ int ip6_forward(struct sk_buff *skb)
if (mtu < IPV6_MIN_MTU) if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU; mtu = IPV6_MIN_MTU;
if (skb->len > mtu) { if (skb->len > mtu && !skb_is_gso(skb)) {
/* Again, force OUTPUT device used as source address */ /* Again, force OUTPUT device used as source address */
skb->dev = dst->dev; skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
......
...@@ -2017,7 +2017,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, ...@@ -2017,7 +2017,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
struct rtattr *mp_head; struct rtattr *mp_head;
/* If cache is unresolved, don't try to parse IIF and OIF */ /* If cache is unresolved, don't try to parse IIF and OIF */
if (c->mf6c_parent > MAXMIFS) if (c->mf6c_parent >= MAXMIFS)
return -ENOENT; return -ENOENT;
if (MIF_EXISTS(mrt, c->mf6c_parent)) if (MIF_EXISTS(mrt, c->mf6c_parent))
......
...@@ -328,6 +328,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, ...@@ -328,6 +328,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
int err; int err;
int is_udplite = IS_UDPLITE(sk); int is_udplite = IS_UDPLITE(sk);
int is_udp4; int is_udp4;
bool slow;
if (addr_len) if (addr_len)
*addr_len=sizeof(struct sockaddr_in6); *addr_len=sizeof(struct sockaddr_in6);
...@@ -424,7 +425,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, ...@@ -424,7 +425,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
return err; return err;
csum_copy_err: csum_copy_err:
lock_sock_bh(sk); slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags)) { if (!skb_kill_datagram(sk, skb, flags)) {
if (is_udp4) if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk), UDP_INC_STATS_USER(sock_net(sk),
...@@ -433,7 +434,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, ...@@ -433,7 +434,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
UDP6_INC_STATS_USER(sock_net(sk), UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite); UDP_MIB_INERRORS, is_udplite);
} }
unlock_sock_bh(sk); unlock_sock_fast(sk, slow);
if (flags & MSG_DONTWAIT) if (flags & MSG_DONTWAIT)
return -EAGAIN; return -EAGAIN;
......
...@@ -1619,7 +1619,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) ...@@ -1619,7 +1619,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
save_message: save_message:
save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
if (!save_msg) if (!save_msg)
return; goto out_unlock;
save_msg->path = path; save_msg->path = path;
save_msg->msg = *msg; save_msg->msg = *msg;
......
...@@ -76,7 +76,7 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info) ...@@ -76,7 +76,7 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
if (ip_route_output_key(net, &rt, &fl) != 0) if (ip_route_output_key(net, &rt, &fl) != 0)
return false; return false;
dst_release(skb_dst(skb)); skb_dst_drop(skb);
skb_dst_set(skb, &rt->u.dst); skb_dst_set(skb, &rt->u.dst);
skb->dev = rt->u.dst.dev; skb->dev = rt->u.dst.dev;
skb->protocol = htons(ETH_P_IP); skb->protocol = htons(ETH_P_IP);
...@@ -157,7 +157,7 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info) ...@@ -157,7 +157,7 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
if (dst == NULL) if (dst == NULL)
return false; return false;
dst_release(skb_dst(skb)); skb_dst_drop(skb);
skb_dst_set(skb, dst); skb_dst_set(skb, dst);
skb->dev = dst->dev; skb->dev = dst->dev;
skb->protocol = htons(ETH_P_IPV6); skb->protocol = htons(ETH_P_IPV6);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment