Commit c3de991f authored by David S. Miller's avatar David S. Miller

Merge branch 'enic'

Govindarajulu Varadarajan says:

====================
The following patch adds multi tx support for enic.
Signed-off-by: default avatarNishank Trivedi <nistrive@cisco.com>
Signed-off-by: default avatarChristian Benvenuti <benve@cisco.com>
Signed-off-by: default avatarGovindarajulu Varadarajan <govindarajulu90@gmail.com>
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3c3769e6 001e1c1d
...@@ -2076,7 +2076,8 @@ F: drivers/usb/chipidea/ ...@@ -2076,7 +2076,8 @@ F: drivers/usb/chipidea/
CISCO VIC ETHERNET NIC DRIVER CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com> M: Christian Benvenuti <benve@cisco.com>
M: Roopa Prabhu <roprabhu@cisco.com> M: Sujith Sankar <ssujith@cisco.com>
M: Govindarajulu Varadarajan <govindarajulu90@gmail.com>
M: Neel Patel <neepatel@cisco.com> M: Neel Patel <neepatel@cisco.com>
M: Nishank Trivedi <nistrive@cisco.com> M: Nishank Trivedi <nistrive@cisco.com>
S: Supported S: Supported
......
...@@ -32,12 +32,12 @@ ...@@ -32,12 +32,12 @@
#define DRV_NAME "enic" #define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
#define DRV_VERSION "2.1.1.43" #define DRV_VERSION "2.1.1.50"
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6 #define ENIC_BARS_MAX 6
#define ENIC_WQ_MAX 1 #define ENIC_WQ_MAX 8
#define ENIC_RQ_MAX 8 #define ENIC_RQ_MAX 8
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) #define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) #define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
......
...@@ -128,10 +128,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, ...@@ -128,10 +128,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
completed_index, enic_wq_free_buf, completed_index, enic_wq_free_buf,
opaque); opaque);
if (netif_queue_stopped(enic->netdev) && if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
vnic_wq_desc_avail(&enic->wq[q_number]) >= vnic_wq_desc_avail(&enic->wq[q_number]) >=
(MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
netif_wake_queue(enic->netdev); netif_wake_subqueue(enic->netdev, q_number);
spin_unlock(&enic->wq_lock[q_number]); spin_unlock(&enic->wq_lock[q_number]);
...@@ -292,10 +292,15 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data) ...@@ -292,10 +292,15 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
static irqreturn_t enic_isr_msix_wq(int irq, void *data) static irqreturn_t enic_isr_msix_wq(int irq, void *data)
{ {
struct enic *enic = data; struct enic *enic = data;
unsigned int cq = enic_cq_wq(enic, 0); unsigned int cq;
unsigned int intr = enic_msix_wq_intr(enic, 0); unsigned int intr;
unsigned int wq_work_to_do = -1; /* no limit */ unsigned int wq_work_to_do = -1; /* no limit */
unsigned int wq_work_done; unsigned int wq_work_done;
unsigned int wq_irq;
wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector;
cq = enic_cq_wq(enic, wq_irq);
intr = enic_msix_wq_intr(enic, wq_irq);
wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_done = vnic_cq_service(&enic->cq[cq],
wq_work_to_do, enic_wq_service, NULL); wq_work_to_do, enic_wq_service, NULL);
...@@ -511,14 +516,18 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, ...@@ -511,14 +516,18 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
struct net_device *netdev) struct net_device *netdev)
{ {
struct enic *enic = netdev_priv(netdev); struct enic *enic = netdev_priv(netdev);
struct vnic_wq *wq = &enic->wq[0]; struct vnic_wq *wq;
unsigned long flags; unsigned long flags;
unsigned int txq_map;
if (skb->len <= 0) { if (skb->len <= 0) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
wq = &enic->wq[txq_map];
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
* which is very likely. In the off chance it's going to take * which is very likely. In the off chance it's going to take
* more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
...@@ -531,23 +540,23 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, ...@@ -531,23 +540,23 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
spin_lock_irqsave(&enic->wq_lock[0], flags); spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
if (vnic_wq_desc_avail(wq) < if (vnic_wq_desc_avail(wq) <
skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
netif_stop_queue(netdev); netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
/* This is a hard error, log it */ /* This is a hard error, log it */
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
spin_unlock_irqrestore(&enic->wq_lock[0], flags); spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
enic_queue_wq_skb(enic, wq, skb); enic_queue_wq_skb(enic, wq, skb);
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
netif_stop_queue(netdev); netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
spin_unlock_irqrestore(&enic->wq_lock[0], flags); spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -1025,6 +1034,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, ...@@ -1025,6 +1034,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb_put(skb, bytes_written); skb_put(skb, bytes_written);
skb->protocol = eth_type_trans(skb, netdev); skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, q_number);
if (netdev->features & NETIF_F_RXHASH) {
skb->rxhash = rss_hash;
if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
NIC_CFG_RSS_HASH_TYPE_TCP_IPV4))
skb->l4_rxhash = true;
}
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) { if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
skb->csum = htons(checksum); skb->csum = htons(checksum);
...@@ -1369,7 +1386,7 @@ static int enic_open(struct net_device *netdev) ...@@ -1369,7 +1386,7 @@ static int enic_open(struct net_device *netdev)
enic_set_rx_mode(netdev); enic_set_rx_mode(netdev);
netif_wake_queue(netdev); netif_tx_wake_all_queues(netdev);
for (i = 0; i < enic->rq_count; i++) for (i = 0; i < enic->rq_count; i++)
napi_enable(&enic->napi[i]); napi_enable(&enic->napi[i]);
...@@ -2032,7 +2049,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2032,7 +2049,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* instance data is initialized to zero. * instance data is initialized to zero.
*/ */
netdev = alloc_etherdev(sizeof(struct enic)); netdev = alloc_etherdev_mqs(sizeof(struct enic),
ENIC_RQ_MAX, ENIC_WQ_MAX);
if (!netdev) if (!netdev)
return -ENOMEM; return -ENOMEM;
...@@ -2062,11 +2080,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2062,11 +2080,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev); pci_set_master(pdev);
/* Query PCI controller on system for DMA addressing /* Query PCI controller on system for DMA addressing
* limitation for the device. Try 40-bit first, and * limitation for the device. Try 64-bit first, and
* fail to 32-bit. * fail to 32-bit.
*/ */
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) { if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) { if (err) {
...@@ -2080,10 +2098,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2080,10 +2098,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_release_regions; goto err_out_release_regions;
} }
} else { } else {
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) { if (err) {
dev_err(dev, "Unable to obtain %u-bit DMA " dev_err(dev, "Unable to obtain %u-bit DMA "
"for consistent allocations, aborting\n", 40); "for consistent allocations, aborting\n", 64);
goto err_out_release_regions; goto err_out_release_regions;
} }
using_dac = 1; using_dac = 1;
...@@ -2198,6 +2216,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2198,6 +2216,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_dev_close; goto err_out_dev_close;
} }
netif_set_real_num_tx_queues(netdev, enic->wq_count);
netif_set_real_num_rx_queues(netdev, enic->rq_count);
/* Setup notification timer, HW reset task, and wq locks /* Setup notification timer, HW reset task, and wq locks
*/ */
...@@ -2246,6 +2267,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2246,6 +2267,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ENIC_SETTING(enic, TSO)) if (ENIC_SETTING(enic, TSO))
netdev->hw_features |= NETIF_F_TSO | netdev->hw_features |= NETIF_F_TSO |
NETIF_F_TSO6 | NETIF_F_TSO_ECN; NETIF_F_TSO6 | NETIF_F_TSO_ECN;
if (ENIC_SETTING(enic, RSS))
netdev->hw_features |= NETIF_F_RXHASH;
if (ENIC_SETTING(enic, RXCSUM)) if (ENIC_SETTING(enic, RXCSUM))
netdev->hw_features |= NETIF_F_RXCSUM; netdev->hw_features |= NETIF_F_RXCSUM;
......
...@@ -175,6 +175,7 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, ...@@ -175,6 +175,7 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
{ {
return vdev->res[type].count; return vdev->res[type].count;
} }
EXPORT_SYMBOL(vnic_dev_get_res_count);
void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index) unsigned int index)
...@@ -193,6 +194,7 @@ void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, ...@@ -193,6 +194,7 @@ void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
return (char __iomem *)vdev->res[type].vaddr; return (char __iomem *)vdev->res[type].vaddr;
} }
} }
EXPORT_SYMBOL(vnic_dev_get_res);
static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size) unsigned int desc_count, unsigned int desc_size)
...@@ -942,6 +944,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev) ...@@ -942,6 +944,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
kfree(vdev); kfree(vdev);
} }
} }
EXPORT_SYMBOL(vnic_dev_unregister);
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
...@@ -969,6 +972,13 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, ...@@ -969,6 +972,13 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
vnic_dev_unregister(vdev); vnic_dev_unregister(vdev);
return NULL; return NULL;
} }
EXPORT_SYMBOL(vnic_dev_register);
struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
{
return vdev->pdev;
}
EXPORT_SYMBOL(vnic_dev_get_pdev);
int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len) int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
{ {
......
...@@ -127,6 +127,7 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, ...@@ -127,6 +127,7 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
unsigned int num_bars); unsigned int num_bars);
struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev);
int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len); int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
int vnic_dev_enable2(struct vnic_dev *vdev, int active); int vnic_dev_enable2(struct vnic_dev *vdev, int active);
int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status); int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment