Commit cec05562 authored by Neil Horman's avatar Neil Horman Committed by David S. Miller

vmxnet3: avoid calling pskb_may_pull with interrupts disabled

vmxnet3 has a function vmxnet3_parse_and_copy_hdr which, among other operations,
uses pskb_may_pull to linearize the header portion of an skb.  That operation
eventually uses local_bh_disable/enable to ensure that it doesn't race with the
drivers bottom half handler.  Unfortunately, vmxnet3 preforms this
parse_and_copy operation with a spinlock held and interrupts disabled.  This
causes us to run afoul of the WARN_ON_ONCE(irqs_disabled()) warning in
local_bh_enable, resulting in this:

WARNING: at kernel/softirq.c:159 local_bh_enable+0x59/0x90() (Not tainted)
Hardware name: VMware Virtual Platform
Modules linked in: ipv6 ppdev parport_pc parport microcode e1000 vmware_balloon
vmxnet3 i2c_piix4 sg ext4 jbd2 mbcache sd_mod crc_t10dif sr_mod cdrom mptspi
mptscsih mptbase scsi_transport_spi pata_acpi ata_generic ata_piix vmwgfx ttm
drm_kms_helper drm i2c_core dm_mirror dm_region_hash dm_log dm_mod [last
unloaded: mperf]
Pid: 6229, comm: sshd Not tainted 2.6.32-616.el6.i686 #1
Call Trace:
 [<c04624d9>] ? warn_slowpath_common+0x89/0xe0
 [<c0469e99>] ? local_bh_enable+0x59/0x90
 [<c046254b>] ? warn_slowpath_null+0x1b/0x20
 [<c0469e99>] ? local_bh_enable+0x59/0x90
 [<c07bb936>] ? skb_copy_bits+0x126/0x210
 [<f8d1d9fe>] ? ext4_ext_find_extent+0x24e/0x2d0 [ext4]
 [<c07bc49e>] ? __pskb_pull_tail+0x6e/0x2b0
 [<f95a6164>] ? vmxnet3_xmit_frame+0xba4/0xef0 [vmxnet3]
 [<c05d15a6>] ? selinux_ip_postroute+0x56/0x320
 [<c0615988>] ? cfq_add_rq_rb+0x98/0x110
 [<c0852df8>] ? packet_rcv+0x48/0x350
 [<c07c5839>] ? dev_queue_xmit_nit+0xc9/0x140
...

Fix it by splitting vmxnet3_parse_and_copy_hdr into two functions:

vmxnet3_parse_hdr, which sets up the internal/on stack ctx datastructure, and
pulls the skb (both of which can be done without holding the spinlock with irqs
disabled

and

vmxnet3_copy_header, which just copies the skb to the tx ring under the lock
safely.

tested and shown to correct the described problem.  Applies cleanly to the head
of the net tree
Signed-off-by: default avatarNeil Horman <nhorman@tuxdriver.com>
CC: Shrikrishna Khare <skhare@vmware.com>
CC: "VMware, Inc." <pv-drivers@vmware.com>
CC: "David S. Miller" <davem@davemloft.net>
Acked-by: default avatarShrikrishna Khare <skhare@vmware.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7024b68e
...@@ -814,7 +814,7 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter) ...@@ -814,7 +814,7 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
/* /*
* parse and copy relevant protocol headers: * parse relevant protocol headers:
* For a tso pkt, relevant headers are L2/3/4 including options * For a tso pkt, relevant headers are L2/3/4 including options
* For a pkt requesting csum offloading, they are L2/3 and may include L4 * For a pkt requesting csum offloading, they are L2/3 and may include L4
* if it's a TCP/UDP pkt * if it's a TCP/UDP pkt
...@@ -827,15 +827,14 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter) ...@@ -827,15 +827,14 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
* Other effects: * Other effects:
* 1. related *ctx fields are updated. * 1. related *ctx fields are updated.
* 2. ctx->copy_size is # of bytes copied * 2. ctx->copy_size is # of bytes copied
* 3. the portion copied is guaranteed to be in the linear part * 3. the portion to be copied is guaranteed to be in the linear part
* *
*/ */
static int static int
vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
struct vmxnet3_tx_ctx *ctx, struct vmxnet3_tx_ctx *ctx,
struct vmxnet3_adapter *adapter) struct vmxnet3_adapter *adapter)
{ {
struct Vmxnet3_TxDataDesc *tdd;
u8 protocol = 0; u8 protocol = 0;
if (ctx->mss) { /* TSO */ if (ctx->mss) { /* TSO */
...@@ -892,16 +891,34 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, ...@@ -892,16 +891,34 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
return 0; return 0;
} }
return 1;
err:
return -1;
}
/*
* copy relevant protocol headers to the transmit ring:
* For a tso pkt, relevant headers are L2/3/4 including options
* For a pkt requesting csum offloading, they are L2/3 and may include L4
* if it's a TCP/UDP pkt
*
*
* Note that this requires that vmxnet3_parse_hdr be called first to set the
* appropriate bits in ctx first
*/
static void
vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
struct vmxnet3_tx_ctx *ctx,
struct vmxnet3_adapter *adapter)
{
struct Vmxnet3_TxDataDesc *tdd;
tdd = tq->data_ring.base + tq->tx_ring.next2fill; tdd = tq->data_ring.base + tq->tx_ring.next2fill;
memcpy(tdd->data, skb->data, ctx->copy_size); memcpy(tdd->data, skb->data, ctx->copy_size);
netdev_dbg(adapter->netdev, netdev_dbg(adapter->netdev,
"copy %u bytes to dataRing[%u]\n", "copy %u bytes to dataRing[%u]\n",
ctx->copy_size, tq->tx_ring.next2fill); ctx->copy_size, tq->tx_ring.next2fill);
return 1;
err:
return -1;
} }
...@@ -998,22 +1015,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, ...@@ -998,22 +1015,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
} }
} }
spin_lock_irqsave(&tq->tx_lock, flags); ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
tq->stats.tx_ring_full++;
netdev_dbg(adapter->netdev,
"tx queue stopped on %s, next2comp %u"
" next2fill %u\n", adapter->netdev->name,
tq->tx_ring.next2comp, tq->tx_ring.next2fill);
vmxnet3_tq_stop(tq, adapter);
spin_unlock_irqrestore(&tq->tx_lock, flags);
return NETDEV_TX_BUSY;
}
ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
if (ret >= 0) { if (ret >= 0) {
BUG_ON(ret <= 0 && ctx.copy_size != 0); BUG_ON(ret <= 0 && ctx.copy_size != 0);
/* hdrs parsed, check against other limits */ /* hdrs parsed, check against other limits */
...@@ -1033,9 +1035,26 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, ...@@ -1033,9 +1035,26 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
} }
} else { } else {
tq->stats.drop_hdr_inspect_err++; tq->stats.drop_hdr_inspect_err++;
goto unlock_drop_pkt; goto drop_pkt;
} }
spin_lock_irqsave(&tq->tx_lock, flags);
if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
tq->stats.tx_ring_full++;
netdev_dbg(adapter->netdev,
"tx queue stopped on %s, next2comp %u"
" next2fill %u\n", adapter->netdev->name,
tq->tx_ring.next2comp, tq->tx_ring.next2fill);
vmxnet3_tq_stop(tq, adapter);
spin_unlock_irqrestore(&tq->tx_lock, flags);
return NETDEV_TX_BUSY;
}
vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
/* fill tx descs related to addr & len */ /* fill tx descs related to addr & len */
if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
goto unlock_drop_pkt; goto unlock_drop_pkt;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment