Commit f230332f authored by David S. Miller's avatar David S. Miller

Merge branch 'enic-next'

Govindarajulu Varadarajan says:

====================
enic: Check for DMA mapping error

After dma mapping the buffers, enic does not call dma_mapping_error() to check
if mapping is successful.

This series fixes the issue by checking return value of pci_dma_mapping_error()
after pci_map_single().

This is reported by redhat here
https://bugzilla.redhat.com/show_bug.cgi?id=1145016
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5164172f 58feff07
...@@ -188,6 +188,7 @@ struct enic { ...@@ -188,6 +188,7 @@ struct enic {
struct enic_rfs_flw_tbl rfs_h; struct enic_rfs_flw_tbl rfs_h;
u32 rx_copybreak; u32 rx_copybreak;
u8 rss_key[ENIC_RSS_LEN]; u8 rss_key[ENIC_RSS_LEN];
struct vnic_gen_stats gen_stats;
}; };
static inline struct device *enic_get_dev(struct enic *enic) static inline struct device *enic_get_dev(struct enic *enic)
...@@ -242,6 +243,19 @@ static inline unsigned int enic_msix_notify_intr(struct enic *enic) ...@@ -242,6 +243,19 @@ static inline unsigned int enic_msix_notify_intr(struct enic *enic)
return enic->rq_count + enic->wq_count + 1; return enic->rq_count + enic->wq_count + 1;
} }
static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
{
if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) {
net_warn_ratelimited("%s: PCI dma mapping failed!\n",
enic->netdev->name);
enic->gen_stats.dma_map_error++;
return -ENOMEM;
}
return 0;
}
void enic_reset_addr_lists(struct enic *enic); void enic_reset_addr_lists(struct enic *enic);
int enic_sriov_enabled(struct enic *enic); int enic_sriov_enabled(struct enic *enic);
int enic_is_valid_vf(struct enic *enic, int vf); int enic_is_valid_vf(struct enic *enic, int vf);
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "enic_dev.h" #include "enic_dev.h"
#include "enic_clsf.h" #include "enic_clsf.h"
#include "vnic_rss.h" #include "vnic_rss.h"
#include "vnic_stats.h"
struct enic_stat { struct enic_stat {
char name[ETH_GSTRING_LEN]; char name[ETH_GSTRING_LEN];
...@@ -40,6 +41,11 @@ struct enic_stat { ...@@ -40,6 +41,11 @@ struct enic_stat {
.index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \ .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
} }
#define ENIC_GEN_STAT(stat) { \
.name = #stat, \
.index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
}
static const struct enic_stat enic_tx_stats[] = { static const struct enic_stat enic_tx_stats[] = {
ENIC_TX_STAT(tx_frames_ok), ENIC_TX_STAT(tx_frames_ok),
ENIC_TX_STAT(tx_unicast_frames_ok), ENIC_TX_STAT(tx_unicast_frames_ok),
...@@ -78,8 +84,13 @@ static const struct enic_stat enic_rx_stats[] = { ...@@ -78,8 +84,13 @@ static const struct enic_stat enic_rx_stats[] = {
ENIC_RX_STAT(rx_frames_to_max), ENIC_RX_STAT(rx_frames_to_max),
}; };
static const struct enic_stat enic_gen_stats[] = {
ENIC_GEN_STAT(dma_map_error),
};
static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
void enic_intr_coal_set_rx(struct enic *enic, u32 timer) void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
{ {
...@@ -146,6 +157,10 @@ static void enic_get_strings(struct net_device *netdev, u32 stringset, ...@@ -146,6 +157,10 @@ static void enic_get_strings(struct net_device *netdev, u32 stringset,
memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN; data += ETH_GSTRING_LEN;
} }
for (i = 0; i < enic_n_gen_stats; i++) {
memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
break; break;
} }
} }
...@@ -154,7 +169,7 @@ static int enic_get_sset_count(struct net_device *netdev, int sset) ...@@ -154,7 +169,7 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
{ {
switch (sset) { switch (sset) {
case ETH_SS_STATS: case ETH_SS_STATS:
return enic_n_tx_stats + enic_n_rx_stats; return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -173,6 +188,8 @@ static void enic_get_ethtool_stats(struct net_device *netdev, ...@@ -173,6 +188,8 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
for (i = 0; i < enic_n_rx_stats; i++) for (i = 0; i < enic_n_rx_stats; i++)
*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index]; *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
for (i = 0; i < enic_n_gen_stats; i++)
*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
} }
static u32 enic_get_msglevel(struct net_device *netdev) static u32 enic_get_msglevel(struct net_device *netdev)
......
...@@ -351,80 +351,94 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data) ...@@ -351,80 +351,94 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static inline void enic_queue_wq_skb_cont(struct enic *enic, static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq,
struct vnic_wq *wq, struct sk_buff *skb, struct sk_buff *skb, unsigned int len_left,
unsigned int len_left, int loopback) int loopback)
{ {
const skb_frag_t *frag; const skb_frag_t *frag;
dma_addr_t dma_addr;
/* Queue additional data fragments */ /* Queue additional data fragments */
for (frag = skb_shinfo(skb)->frags; len_left; frag++) { for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
len_left -= skb_frag_size(frag); len_left -= skb_frag_size(frag);
enic_queue_wq_desc_cont(wq, skb, dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0,
skb_frag_dma_map(&enic->pdev->dev, skb_frag_size(frag),
frag, 0, skb_frag_size(frag), DMA_TO_DEVICE);
DMA_TO_DEVICE), if (unlikely(enic_dma_map_check(enic, dma_addr)))
skb_frag_size(frag), return -ENOMEM;
(len_left == 0), /* EOP? */ enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
loopback); (len_left == 0), /* EOP? */
loopback);
} }
return 0;
} }
static inline void enic_queue_wq_skb_vlan(struct enic *enic, static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
struct vnic_wq *wq, struct sk_buff *skb, struct sk_buff *skb, int vlan_tag_insert,
int vlan_tag_insert, unsigned int vlan_tag, int loopback) unsigned int vlan_tag, int loopback)
{ {
unsigned int head_len = skb_headlen(skb); unsigned int head_len = skb_headlen(skb);
unsigned int len_left = skb->len - head_len; unsigned int len_left = skb->len - head_len;
int eop = (len_left == 0); int eop = (len_left == 0);
dma_addr_t dma_addr;
int err = 0;
dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
PCI_DMA_TODEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr)))
return -ENOMEM;
/* Queue the main skb fragment. The fragments are no larger /* Queue the main skb fragment. The fragments are no larger
* than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
* than WQ_ENET_MAX_DESC_LEN length. So only one descriptor * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
* per fragment is queued. * per fragment is queued.
*/ */
enic_queue_wq_desc(wq, skb, enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert,
pci_map_single(enic->pdev, skb->data, vlan_tag, eop, loopback);
head_len, PCI_DMA_TODEVICE),
head_len,
vlan_tag_insert, vlan_tag,
eop, loopback);
if (!eop) if (!eop)
enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
return err;
} }
static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
struct vnic_wq *wq, struct sk_buff *skb, struct sk_buff *skb, int vlan_tag_insert,
int vlan_tag_insert, unsigned int vlan_tag, int loopback) unsigned int vlan_tag, int loopback)
{ {
unsigned int head_len = skb_headlen(skb); unsigned int head_len = skb_headlen(skb);
unsigned int len_left = skb->len - head_len; unsigned int len_left = skb->len - head_len;
unsigned int hdr_len = skb_checksum_start_offset(skb); unsigned int hdr_len = skb_checksum_start_offset(skb);
unsigned int csum_offset = hdr_len + skb->csum_offset; unsigned int csum_offset = hdr_len + skb->csum_offset;
int eop = (len_left == 0); int eop = (len_left == 0);
dma_addr_t dma_addr;
int err = 0;
dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
PCI_DMA_TODEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr)))
return -ENOMEM;
/* Queue the main skb fragment. The fragments are no larger /* Queue the main skb fragment. The fragments are no larger
* than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
* than WQ_ENET_MAX_DESC_LEN length. So only one descriptor * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
* per fragment is queued. * per fragment is queued.
*/ */
enic_queue_wq_desc_csum_l4(wq, skb, enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset,
pci_map_single(enic->pdev, skb->data, hdr_len, vlan_tag_insert, vlan_tag, eop,
head_len, PCI_DMA_TODEVICE), loopback);
head_len,
csum_offset,
hdr_len,
vlan_tag_insert, vlan_tag,
eop, loopback);
if (!eop) if (!eop)
enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
return err;
} }
static inline void enic_queue_wq_skb_tso(struct enic *enic, static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, struct sk_buff *skb, unsigned int mss,
int vlan_tag_insert, unsigned int vlan_tag, int loopback) int vlan_tag_insert, unsigned int vlan_tag,
int loopback)
{ {
unsigned int frag_len_left = skb_headlen(skb); unsigned int frag_len_left = skb_headlen(skb);
unsigned int len_left = skb->len - frag_len_left; unsigned int len_left = skb->len - frag_len_left;
...@@ -454,20 +468,19 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic, ...@@ -454,20 +468,19 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
*/ */
while (frag_len_left) { while (frag_len_left) {
len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
dma_addr = pci_map_single(enic->pdev, skb->data + offset, dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
len, PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
enic_queue_wq_desc_tso(wq, skb, if (unlikely(enic_dma_map_check(enic, dma_addr)))
dma_addr, return -ENOMEM;
len, enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
mss, hdr_len, vlan_tag_insert, vlan_tag,
vlan_tag_insert, vlan_tag, eop && (len == frag_len_left), loopback);
eop && (len == frag_len_left), loopback);
frag_len_left -= len; frag_len_left -= len;
offset += len; offset += len;
} }
if (eop) if (eop)
return; return 0;
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
* for additional data fragments * for additional data fragments
...@@ -483,16 +496,18 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic, ...@@ -483,16 +496,18 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
offset, len, offset, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
enic_queue_wq_desc_cont(wq, skb, if (unlikely(enic_dma_map_check(enic, dma_addr)))
dma_addr, return -ENOMEM;
len, enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
(len_left == 0) && (len_left == 0) &&
(len == frag_len_left), /* EOP? */ (len == frag_len_left),/*EOP*/
loopback); loopback);
frag_len_left -= len; frag_len_left -= len;
offset += len; offset += len;
} }
} }
return 0;
} }
static inline void enic_queue_wq_skb(struct enic *enic, static inline void enic_queue_wq_skb(struct enic *enic,
...@@ -502,6 +517,7 @@ static inline void enic_queue_wq_skb(struct enic *enic, ...@@ -502,6 +517,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
unsigned int vlan_tag = 0; unsigned int vlan_tag = 0;
int vlan_tag_insert = 0; int vlan_tag_insert = 0;
int loopback = 0; int loopback = 0;
int err;
if (vlan_tx_tag_present(skb)) { if (vlan_tx_tag_present(skb)) {
/* VLAN tag from trunking driver */ /* VLAN tag from trunking driver */
...@@ -513,14 +529,30 @@ static inline void enic_queue_wq_skb(struct enic *enic, ...@@ -513,14 +529,30 @@ static inline void enic_queue_wq_skb(struct enic *enic,
} }
if (mss) if (mss)
enic_queue_wq_skb_tso(enic, wq, skb, mss, err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
vlan_tag_insert, vlan_tag, loopback); vlan_tag_insert, vlan_tag,
loopback);
else if (skb->ip_summed == CHECKSUM_PARTIAL) else if (skb->ip_summed == CHECKSUM_PARTIAL)
enic_queue_wq_skb_csum_l4(enic, wq, skb, err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
vlan_tag_insert, vlan_tag, loopback); vlan_tag, loopback);
else else
enic_queue_wq_skb_vlan(enic, wq, skb, err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
vlan_tag_insert, vlan_tag, loopback); vlan_tag, loopback);
if (unlikely(err)) {
struct vnic_wq_buf *buf;
buf = wq->to_use->prev;
/* while not EOP of previous pkt && queue not empty.
* For all non EOP bufs, os_buf is NULL.
*/
while (!buf->os_buf && (buf->next != wq->to_clean)) {
enic_free_wq_buf(wq, buf);
wq->ring.desc_avail++;
buf = buf->prev;
}
wq->to_use = buf->next;
dev_kfree_skb(skb);
}
} }
/* netif_tx_lock held, process context with BHs disabled, or BH */ /* netif_tx_lock held, process context with BHs disabled, or BH */
...@@ -950,8 +982,12 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) ...@@ -950,8 +982,12 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
if (!skb) if (!skb)
return -ENOMEM; return -ENOMEM;
dma_addr = pci_map_single(enic->pdev, skb->data, dma_addr = pci_map_single(enic->pdev, skb->data, len,
len, PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr))) {
dev_kfree_skb(skb);
return -ENOMEM;
}
enic_queue_rq_desc(rq, skb, os_buf_index, enic_queue_rq_desc(rq, skb, os_buf_index,
dma_addr, len); dma_addr, len);
......
...@@ -62,6 +62,11 @@ struct vnic_rx_stats { ...@@ -62,6 +62,11 @@ struct vnic_rx_stats {
u64 rsvd[16]; u64 rsvd[16];
}; };
/* Generic statistics */
struct vnic_gen_stats {
u64 dma_map_error;
};
struct vnic_stats { struct vnic_stats {
struct vnic_tx_stats tx; struct vnic_tx_stats tx;
struct vnic_rx_stats rx; struct vnic_rx_stats rx;
......
...@@ -47,11 +47,14 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq) ...@@ -47,11 +47,14 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
wq->ring.desc_size * buf->index; wq->ring.desc_size * buf->index;
if (buf->index + 1 == count) { if (buf->index + 1 == count) {
buf->next = wq->bufs[0]; buf->next = wq->bufs[0];
buf->next->prev = buf;
break; break;
} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) { } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
buf->next = wq->bufs[i + 1]; buf->next = wq->bufs[i + 1];
buf->next->prev = buf;
} else { } else {
buf->next = buf + 1; buf->next = buf + 1;
buf->next->prev = buf;
buf++; buf++;
} }
} }
......
...@@ -62,6 +62,7 @@ struct vnic_wq_buf { ...@@ -62,6 +62,7 @@ struct vnic_wq_buf {
uint8_t cq_entry; /* Gets completion event from hw */ uint8_t cq_entry; /* Gets completion event from hw */
uint8_t desc_skip_cnt; /* Num descs to occupy */ uint8_t desc_skip_cnt; /* Num descs to occupy */
uint8_t compressed_send; /* Both hdr and payload in one desc */ uint8_t compressed_send; /* Both hdr and payload in one desc */
struct vnic_wq_buf *prev;
}; };
/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ /* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment