Commit 58308451 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
This series contains updates to i40e only.

Alex provides the majority of the patches against i40e, where he does
cleanup of the Tx and RX queues and to align the code with the known
good Tx/Rx queue code in the ixgbe driver.

Anjali provides an i40e patch to update link events to not print to
the log until the device is administratively up.

Catherine provides a patch to update the driver version.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b44084c2 d04795d6
...@@ -347,9 +347,9 @@ struct i40e_vsi { ...@@ -347,9 +347,9 @@ struct i40e_vsi {
u32 rx_buf_failed; u32 rx_buf_failed;
u32 rx_page_failed; u32 rx_page_failed;
/* These are arrays of rings, allocated at run-time */ /* These are containers of ring pointers, allocated at run-time */
struct i40e_ring *rx_rings; struct i40e_ring **rx_rings;
struct i40e_ring *tx_rings; struct i40e_ring **tx_rings;
u16 work_limit; u16 work_limit;
/* high bit set means dynamic, use accessor routines to read/write. /* high bit set means dynamic, use accessor routines to read/write.
...@@ -366,7 +366,7 @@ struct i40e_vsi { ...@@ -366,7 +366,7 @@ struct i40e_vsi {
u8 dtype; u8 dtype;
/* List of q_vectors allocated to this VSI */ /* List of q_vectors allocated to this VSI */
struct i40e_q_vector *q_vectors; struct i40e_q_vector **q_vectors;
int num_q_vectors; int num_q_vectors;
int base_vector; int base_vector;
...@@ -422,8 +422,9 @@ struct i40e_q_vector { ...@@ -422,8 +422,9 @@ struct i40e_q_vector {
u8 num_ringpairs; /* total number of ring pairs in vector */ u8 num_ringpairs; /* total number of ring pairs in vector */
char name[IFNAMSIZ + 9];
cpumask_t affinity_mask; cpumask_t affinity_mask;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
/* lan device */ /* lan device */
......
...@@ -258,12 +258,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp, ...@@ -258,12 +258,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
len = sizeof(struct i40e_tx_buffer); len = sizeof(struct i40e_tx_buffer);
memcpy(p, vsi->tx_rings[i].tx_bi, len); memcpy(p, vsi->tx_rings[i]->tx_bi, len);
p += len; p += len;
} }
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
len = sizeof(struct i40e_rx_buffer); len = sizeof(struct i40e_rx_buffer);
memcpy(p, vsi->rx_rings[i].rx_bi, len); memcpy(p, vsi->rx_rings[i]->rx_bi, len);
p += len; p += len;
} }
...@@ -484,100 +484,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -484,100 +484,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
vsi->tx_restart, vsi->tx_busy, vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed); vsi->rx_buf_failed, vsi->rx_page_failed);
if (vsi->rx_rings) { rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
dev_info(&pf->pdev->dev, struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
" rx_rings[%i]: desc = %p\n", if (!rx_ring)
i, vsi->rx_rings[i].desc); continue;
dev_info(&pf->pdev->dev,
" rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n", dev_info(&pf->pdev->dev,
i, vsi->rx_rings[i].dev, " rx_rings[%i]: desc = %p\n",
vsi->rx_rings[i].netdev, i, rx_ring->desc);
vsi->rx_rings[i].rx_bi); dev_info(&pf->pdev->dev,
dev_info(&pf->pdev->dev, " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
" rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", i, rx_ring->dev,
i, vsi->rx_rings[i].state, rx_ring->netdev,
vsi->rx_rings[i].queue_index, rx_ring->rx_bi);
vsi->rx_rings[i].reg_idx); dev_info(&pf->pdev->dev,
dev_info(&pf->pdev->dev, " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
" rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", i, rx_ring->state,
i, vsi->rx_rings[i].rx_hdr_len, rx_ring->queue_index,
vsi->rx_rings[i].rx_buf_len, rx_ring->reg_idx);
vsi->rx_rings[i].dtype); dev_info(&pf->pdev->dev,
dev_info(&pf->pdev->dev, " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
" rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", i, rx_ring->rx_hdr_len,
i, vsi->rx_rings[i].hsplit, rx_ring->rx_buf_len,
vsi->rx_rings[i].next_to_use, rx_ring->dtype);
vsi->rx_rings[i].next_to_clean, dev_info(&pf->pdev->dev,
vsi->rx_rings[i].ring_active); " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
dev_info(&pf->pdev->dev, i, rx_ring->hsplit,
" rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", rx_ring->next_to_use,
i, vsi->rx_rings[i].rx_stats.packets, rx_ring->next_to_clean,
vsi->rx_rings[i].rx_stats.bytes, rx_ring->ring_active);
vsi->rx_rings[i].rx_stats.non_eop_descs); dev_info(&pf->pdev->dev,
dev_info(&pf->pdev->dev, " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
" rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n", i, rx_ring->stats.packets,
i, rx_ring->stats.bytes,
vsi->rx_rings[i].rx_stats.alloc_rx_page_failed, rx_ring->rx_stats.non_eop_descs);
vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed); dev_info(&pf->pdev->dev,
dev_info(&pf->pdev->dev, " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
" rx_rings[%i]: size = %i, dma = 0x%08lx\n", i,
i, vsi->rx_rings[i].size, rx_ring->rx_stats.alloc_rx_page_failed,
(long unsigned int)vsi->rx_rings[i].dma); rx_ring->rx_stats.alloc_rx_buff_failed);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: vsi = %p, q_vector = %p\n", " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, vsi->rx_rings[i].vsi, i, rx_ring->size,
vsi->rx_rings[i].q_vector); (long unsigned int)rx_ring->dma);
} dev_info(&pf->pdev->dev,
" rx_rings[%i]: vsi = %p, q_vector = %p\n",
i, rx_ring->vsi,
rx_ring->q_vector);
} }
if (vsi->tx_rings) { for (i = 0; i < vsi->num_queue_pairs; i++) {
for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
dev_info(&pf->pdev->dev, if (!tx_ring)
" tx_rings[%i]: desc = %p\n", continue;
i, vsi->tx_rings[i].desc); dev_info(&pf->pdev->dev,
dev_info(&pf->pdev->dev, " tx_rings[%i]: desc = %p\n",
" tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n", i, tx_ring->desc);
i, vsi->tx_rings[i].dev, dev_info(&pf->pdev->dev,
vsi->tx_rings[i].netdev, " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
vsi->tx_rings[i].tx_bi); i, tx_ring->dev,
dev_info(&pf->pdev->dev, tx_ring->netdev,
" tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", tx_ring->tx_bi);
i, vsi->tx_rings[i].state, dev_info(&pf->pdev->dev,
vsi->tx_rings[i].queue_index, " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
vsi->tx_rings[i].reg_idx); i, tx_ring->state,
dev_info(&pf->pdev->dev, tx_ring->queue_index,
" tx_rings[%i]: dtype = %d\n", tx_ring->reg_idx);
i, vsi->tx_rings[i].dtype); dev_info(&pf->pdev->dev,
dev_info(&pf->pdev->dev, " tx_rings[%i]: dtype = %d\n",
" tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", i, tx_ring->dtype);
i, vsi->tx_rings[i].hsplit, dev_info(&pf->pdev->dev,
vsi->tx_rings[i].next_to_use, " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
vsi->tx_rings[i].next_to_clean, i, tx_ring->hsplit,
vsi->tx_rings[i].ring_active); tx_ring->next_to_use,
dev_info(&pf->pdev->dev, tx_ring->next_to_clean,
" tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", tx_ring->ring_active);
i, vsi->tx_rings[i].tx_stats.packets, dev_info(&pf->pdev->dev,
vsi->tx_rings[i].tx_stats.bytes, " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
vsi->tx_rings[i].tx_stats.restart_queue); i, tx_ring->stats.packets,
dev_info(&pf->pdev->dev, tx_ring->stats.bytes,
" tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n", tx_ring->tx_stats.restart_queue);
i, dev_info(&pf->pdev->dev,
vsi->tx_rings[i].tx_stats.tx_busy, " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
vsi->tx_rings[i].tx_stats.completed, i,
vsi->tx_rings[i].tx_stats.tx_done_old); tx_ring->tx_stats.tx_busy,
dev_info(&pf->pdev->dev, tx_ring->tx_stats.tx_done_old);
" tx_rings[%i]: size = %i, dma = 0x%08lx\n", dev_info(&pf->pdev->dev,
i, vsi->tx_rings[i].size, " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
(long unsigned int)vsi->tx_rings[i].dma); i, tx_ring->size,
dev_info(&pf->pdev->dev, (long unsigned int)tx_ring->dma);
" tx_rings[%i]: vsi = %p, q_vector = %p\n", dev_info(&pf->pdev->dev,
i, vsi->tx_rings[i].vsi, " tx_rings[%i]: vsi = %p, q_vector = %p\n",
vsi->tx_rings[i].q_vector); i, tx_ring->vsi,
dev_info(&pf->pdev->dev, tx_ring->q_vector);
" tx_rings[%i]: DCB tc = %d\n", dev_info(&pf->pdev->dev,
i, vsi->tx_rings[i].dcb_tc); " tx_rings[%i]: DCB tc = %d\n",
} i, tx_ring->dcb_tc);
} }
rcu_read_unlock();
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n", " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
vsi->work_limit, vsi->rx_itr_setting, vsi->work_limit, vsi->rx_itr_setting,
...@@ -587,15 +591,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -587,15 +591,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n", " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype); vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
if (vsi->q_vectors) {
for (i = 0; i < vsi->num_q_vectors; i++) {
dev_info(&pf->pdev->dev,
" q_vectors[%i]: base index = %ld\n",
i, ((long int)*vsi->q_vectors[i].rx.ring-
(long int)*vsi->q_vectors[0].rx.ring)/
sizeof(struct i40e_ring));
}
}
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" num_q_vectors = %i, base_vector = %i\n", " num_q_vectors = %i, base_vector = %i\n",
vsi->num_q_vectors, vsi->base_vector); vsi->num_q_vectors, vsi->base_vector);
...@@ -792,9 +787,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, ...@@ -792,9 +787,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
return; return;
} }
if (is_rx_ring) if (is_rx_ring)
ring = vsi->rx_rings[ring_id]; ring = *vsi->rx_rings[ring_id];
else else
ring = vsi->tx_rings[ring_id]; ring = *vsi->tx_rings[ring_id];
if (cnt == 2) { if (cnt == 2) {
dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
...@@ -1996,7 +1991,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, ...@@ -1996,7 +1991,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
goto netdev_ops_write_done; goto netdev_ops_write_done;
} }
for (i = 0; i < vsi->num_q_vectors; i++) for (i = 0; i < vsi->num_q_vectors; i++)
napi_schedule(&vsi->q_vectors[i].napi); napi_schedule(&vsi->q_vectors[i]->napi);
dev_info(&pf->pdev->dev, "napi called\n"); dev_info(&pf->pdev->dev, "napi called\n");
} else { } else {
dev_info(&pf->pdev->dev, "unknown command '%s'\n", dev_info(&pf->pdev->dev, "unknown command '%s'\n",
......
...@@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev, ...@@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS; ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
ring->rx_mini_max_pending = 0; ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0; ring->rx_jumbo_max_pending = 0;
ring->rx_pending = vsi->rx_rings[0].count; ring->rx_pending = vsi->rx_rings[0]->count;
ring->tx_pending = vsi->tx_rings[0].count; ring->tx_pending = vsi->tx_rings[0]->count;
ring->rx_mini_pending = 0; ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0; ring->rx_jumbo_pending = 0;
} }
...@@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE); new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */ /* if nothing to do return success */
if ((new_tx_count == vsi->tx_rings[0].count) && if ((new_tx_count == vsi->tx_rings[0]->count) &&
(new_rx_count == vsi->rx_rings[0].count)) (new_rx_count == vsi->rx_rings[0]->count))
return 0; return 0;
while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
...@@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (!netif_running(vsi->netdev)) { if (!netif_running(vsi->netdev)) {
/* simple case - set for the next time the netdev is started */ /* simple case - set for the next time the netdev is started */
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
vsi->tx_rings[i].count = new_tx_count; vsi->tx_rings[i]->count = new_tx_count;
vsi->rx_rings[i].count = new_rx_count; vsi->rx_rings[i]->count = new_rx_count;
} }
goto done; goto done;
} }
...@@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/ */
/* alloc updated Tx resources */ /* alloc updated Tx resources */
if (new_tx_count != vsi->tx_rings[0].count) { if (new_tx_count != vsi->tx_rings[0]->count) {
netdev_info(netdev, netdev_info(netdev,
"Changing Tx descriptor count from %d to %d.\n", "Changing Tx descriptor count from %d to %d.\n",
vsi->tx_rings[0].count, new_tx_count); vsi->tx_rings[0]->count, new_tx_count);
tx_rings = kcalloc(vsi->alloc_queue_pairs, tx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL); sizeof(struct i40e_ring), GFP_KERNEL);
if (!tx_rings) { if (!tx_rings) {
...@@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */ /* clone ring and setup updated count */
tx_rings[i] = vsi->tx_rings[i]; tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count; tx_rings[i].count = new_tx_count;
err = i40e_setup_tx_descriptors(&tx_rings[i]); err = i40e_setup_tx_descriptors(&tx_rings[i]);
if (err) { if (err) {
...@@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
} }
/* alloc updated Rx resources */ /* alloc updated Rx resources */
if (new_rx_count != vsi->rx_rings[0].count) { if (new_rx_count != vsi->rx_rings[0]->count) {
netdev_info(netdev, netdev_info(netdev,
"Changing Rx descriptor count from %d to %d\n", "Changing Rx descriptor count from %d to %d\n",
vsi->rx_rings[0].count, new_rx_count); vsi->rx_rings[0]->count, new_rx_count);
rx_rings = kcalloc(vsi->alloc_queue_pairs, rx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL); sizeof(struct i40e_ring), GFP_KERNEL);
if (!rx_rings) { if (!rx_rings) {
...@@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */ /* clone ring and setup updated count */
rx_rings[i] = vsi->rx_rings[i]; rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count; rx_rings[i].count = new_rx_count;
err = i40e_setup_rx_descriptors(&rx_rings[i]); err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err) { if (err) {
...@@ -517,8 +517,8 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -517,8 +517,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (tx_rings) { if (tx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_free_tx_resources(&vsi->tx_rings[i]); i40e_free_tx_resources(vsi->tx_rings[i]);
vsi->tx_rings[i] = tx_rings[i]; *vsi->tx_rings[i] = tx_rings[i];
} }
kfree(tx_rings); kfree(tx_rings);
tx_rings = NULL; tx_rings = NULL;
...@@ -526,8 +526,8 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -526,8 +526,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (rx_rings) { if (rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_free_rx_resources(&vsi->rx_rings[i]); i40e_free_rx_resources(vsi->rx_rings[i]);
vsi->rx_rings[i] = rx_rings[i]; *vsi->rx_rings[i] = rx_rings[i];
} }
kfree(rx_rings); kfree(rx_rings);
rx_rings = NULL; rx_rings = NULL;
...@@ -579,6 +579,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, ...@@ -579,6 +579,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
char *p; char *p;
int j; int j;
struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
unsigned int start;
i40e_update_stats(vsi); i40e_update_stats(vsi);
...@@ -587,14 +588,30 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, ...@@ -587,14 +588,30 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat == data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p; sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
} }
for (j = 0; j < vsi->num_queue_pairs; j++) { rcu_read_lock();
data[i++] = vsi->tx_rings[j].tx_stats.packets; for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
data[i++] = vsi->tx_rings[j].tx_stats.bytes; struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
} struct i40e_ring *rx_ring;
for (j = 0; j < vsi->num_queue_pairs; j++) {
data[i++] = vsi->rx_rings[j].rx_stats.packets; if (!tx_ring)
data[i++] = vsi->rx_rings[j].rx_stats.bytes; continue;
/* process Tx ring statistics */
do {
start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
data[i] = tx_ring->stats.packets;
data[i + 1] = tx_ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
/* Rx ring is the 2nd half of the queue pair */
rx_ring = &tx_ring[1];
do {
start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
data[i + 2] = rx_ring->stats.packets;
data[i + 3] = rx_ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
} }
rcu_read_unlock();
if (vsi == pf->vsi[pf->lan_vsi]) { if (vsi == pf->vsi[pf->lan_vsi]) {
for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
p = (char *)pf + i40e_gstrings_stats[j].stat_offset; p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
...@@ -641,8 +658,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, ...@@ -641,8 +658,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
...@@ -910,8 +925,8 @@ static int i40e_set_coalesce(struct net_device *netdev, ...@@ -910,8 +925,8 @@ static int i40e_set_coalesce(struct net_device *netdev,
} }
vector = vsi->base_vector; vector = vsi->base_vector;
q_vector = vsi->q_vectors; for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) { q_vector = vsi->q_vectors[i];
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr); wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
......
...@@ -36,7 +36,7 @@ static const char i40e_driver_string[] = ...@@ -36,7 +36,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0 #define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3 #define DRV_VERSION_MINOR 3
#define DRV_VERSION_BUILD 9 #define DRV_VERSION_BUILD 10
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN __stringify(DRV_VERSION_BUILD) DRV_KERN
...@@ -347,14 +347,53 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) ...@@ -347,14 +347,53 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
**/ **/
static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct net_device *netdev, struct net_device *netdev,
struct rtnl_link_stats64 *storage) struct rtnl_link_stats64 *stats)
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
int i;
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *tx_ring, *rx_ring;
u64 bytes, packets;
unsigned int start;
tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
*storage = *i40e_get_vsi_stats_struct(vsi); do {
start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
packets = tx_ring->stats.packets;
bytes = tx_ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
rx_ring = &tx_ring[1];
do {
start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
packets = rx_ring->stats.packets;
bytes = rx_ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
return storage; stats->rx_packets += packets;
stats->rx_bytes += bytes;
}
rcu_read_unlock();
/* following stats updated by ixgbe_watchdog_task() */
stats->multicast = vsi_stats->multicast;
stats->tx_errors = vsi_stats->tx_errors;
stats->tx_dropped = vsi_stats->tx_dropped;
stats->rx_errors = vsi_stats->rx_errors;
stats->rx_crc_errors = vsi_stats->rx_crc_errors;
stats->rx_length_errors = vsi_stats->rx_length_errors;
return stats;
} }
/** /**
...@@ -376,10 +415,14 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) ...@@ -376,10 +415,14 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
if (vsi->rx_rings) if (vsi->rx_rings)
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
memset(&vsi->rx_rings[i].rx_stats, 0 , memset(&vsi->rx_rings[i]->stats, 0 ,
sizeof(vsi->rx_rings[i].rx_stats)); sizeof(vsi->rx_rings[i]->stats));
memset(&vsi->tx_rings[i].tx_stats, 0, memset(&vsi->rx_rings[i]->rx_stats, 0 ,
sizeof(vsi->tx_rings[i].tx_stats)); sizeof(vsi->rx_rings[i]->rx_stats));
memset(&vsi->tx_rings[i]->stats, 0 ,
sizeof(vsi->tx_rings[i]->stats));
memset(&vsi->tx_rings[i]->tx_stats, 0,
sizeof(vsi->tx_rings[i]->tx_stats));
} }
vsi->stat_offsets_loaded = false; vsi->stat_offsets_loaded = false;
} }
...@@ -598,7 +641,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf) ...@@ -598,7 +641,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
continue; continue;
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *ring = &vsi->tx_rings[i]; struct i40e_ring *ring = vsi->tx_rings[i];
clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
} }
} }
...@@ -652,7 +695,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) ...@@ -652,7 +695,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
continue; continue;
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *ring = &vsi->tx_rings[i]; struct i40e_ring *ring = vsi->tx_rings[i];
tc = ring->dcb_tc; tc = ring->dcb_tc;
if (xoff[tc]) if (xoff[tc])
...@@ -704,21 +747,38 @@ void i40e_update_stats(struct i40e_vsi *vsi) ...@@ -704,21 +747,38 @@ void i40e_update_stats(struct i40e_vsi *vsi)
tx_restart = tx_busy = 0; tx_restart = tx_busy = 0;
rx_page = 0; rx_page = 0;
rx_buf = 0; rx_buf = 0;
rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) { for (q = 0; q < vsi->num_queue_pairs; q++) {
struct i40e_ring *p; struct i40e_ring *p;
u64 bytes, packets;
unsigned int start;
p = &vsi->rx_rings[q]; /* locate Tx ring */
rx_b += p->rx_stats.bytes; p = ACCESS_ONCE(vsi->tx_rings[q]);
rx_p += p->rx_stats.packets;
rx_buf += p->rx_stats.alloc_rx_buff_failed;
rx_page += p->rx_stats.alloc_rx_page_failed;
p = &vsi->tx_rings[q]; do {
tx_b += p->tx_stats.bytes; start = u64_stats_fetch_begin_bh(&p->syncp);
tx_p += p->tx_stats.packets; packets = p->stats.packets;
bytes = p->stats.bytes;
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
tx_b += bytes;
tx_p += packets;
tx_restart += p->tx_stats.restart_queue; tx_restart += p->tx_stats.restart_queue;
tx_busy += p->tx_stats.tx_busy; tx_busy += p->tx_stats.tx_busy;
/* Rx queue is part of the same block as Tx queue */
p = &p[1];
do {
start = u64_stats_fetch_begin_bh(&p->syncp);
packets = p->stats.packets;
bytes = p->stats.bytes;
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
rx_b += bytes;
rx_p += packets;
rx_buf += p->rx_stats.alloc_rx_buff_failed;
rx_page += p->rx_stats.alloc_rx_page_failed;
} }
rcu_read_unlock();
vsi->tx_restart = tx_restart; vsi->tx_restart = tx_restart;
vsi->tx_busy = tx_busy; vsi->tx_busy = tx_busy;
vsi->rx_page_failed = rx_page; vsi->rx_page_failed = rx_page;
...@@ -1988,7 +2048,7 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) ...@@ -1988,7 +2048,7 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
int i, err = 0; int i, err = 0;
for (i = 0; i < vsi->num_queue_pairs && !err; i++) for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]); err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
return err; return err;
} }
...@@ -2004,8 +2064,8 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) ...@@ -2004,8 +2064,8 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
int i; int i;
for (i = 0; i < vsi->num_queue_pairs; i++) for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->tx_rings[i].desc) if (vsi->tx_rings[i]->desc)
i40e_free_tx_resources(&vsi->tx_rings[i]); i40e_free_tx_resources(vsi->tx_rings[i]);
} }
/** /**
...@@ -2023,7 +2083,7 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) ...@@ -2023,7 +2083,7 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
int i, err = 0; int i, err = 0;
for (i = 0; i < vsi->num_queue_pairs && !err; i++) for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]); err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
return err; return err;
} }
...@@ -2038,8 +2098,8 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) ...@@ -2038,8 +2098,8 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
int i; int i;
for (i = 0; i < vsi->num_queue_pairs; i++) for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->rx_rings[i].desc) if (vsi->rx_rings[i]->desc)
i40e_free_rx_resources(&vsi->rx_rings[i]); i40e_free_rx_resources(vsi->rx_rings[i]);
} }
/** /**
...@@ -2223,8 +2283,8 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) ...@@ -2223,8 +2283,8 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
int err = 0; int err = 0;
u16 i; u16 i;
for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++) for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
err = i40e_configure_tx_ring(&vsi->tx_rings[i]); err = i40e_configure_tx_ring(vsi->tx_rings[i]);
return err; return err;
} }
...@@ -2274,7 +2334,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) ...@@ -2274,7 +2334,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
/* set up individual rings */ /* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++) for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_configure_rx_ring(&vsi->rx_rings[i]); err = i40e_configure_rx_ring(vsi->rx_rings[i]);
return err; return err;
} }
...@@ -2298,8 +2358,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) ...@@ -2298,8 +2358,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
qoffset = vsi->tc_config.tc_info[n].qoffset; qoffset = vsi->tc_config.tc_info[n].qoffset;
qcount = vsi->tc_config.tc_info[n].qcount; qcount = vsi->tc_config.tc_info[n].qcount;
for (i = qoffset; i < (qoffset + qcount); i++) { for (i = qoffset; i < (qoffset + qcount); i++) {
struct i40e_ring *rx_ring = &vsi->rx_rings[i]; struct i40e_ring *rx_ring = vsi->rx_rings[i];
struct i40e_ring *tx_ring = &vsi->tx_rings[i]; struct i40e_ring *tx_ring = vsi->tx_rings[i];
rx_ring->dcb_tc = n; rx_ring->dcb_tc = n;
tx_ring->dcb_tc = n; tx_ring->dcb_tc = n;
} }
...@@ -2354,8 +2414,8 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) ...@@ -2354,8 +2414,8 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
*/ */
qp = vsi->base_queue; qp = vsi->base_queue;
vector = vsi->base_vector; vector = vsi->base_vector;
q_vector = vsi->q_vectors; for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) { q_vector = vsi->q_vectors[i];
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
q_vector->rx.latency_range = I40E_LOW_LATENCY; q_vector->rx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
...@@ -2435,7 +2495,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw) ...@@ -2435,7 +2495,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
**/ **/
static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
{ {
struct i40e_q_vector *q_vector = vsi->q_vectors; struct i40e_q_vector *q_vector = vsi->q_vectors[0];
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
u32 val; u32 val;
...@@ -2512,7 +2572,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data) ...@@ -2512,7 +2572,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
{ {
struct i40e_q_vector *q_vector = data; struct i40e_q_vector *q_vector = data;
if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0]) if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED; return IRQ_HANDLED;
napi_schedule(&q_vector->napi); napi_schedule(&q_vector->napi);
...@@ -2529,7 +2589,7 @@ static irqreturn_t i40e_fdir_clean_rings(int irq, void *data) ...@@ -2529,7 +2589,7 @@ static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
{ {
struct i40e_q_vector *q_vector = data; struct i40e_q_vector *q_vector = data;
if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0]) if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED; return IRQ_HANDLED;
pr_info("fdir ring cleaning needed\n"); pr_info("fdir ring cleaning needed\n");
...@@ -2554,16 +2614,16 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) ...@@ -2554,16 +2614,16 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
int vector, err; int vector, err;
for (vector = 0; vector < q_vectors; vector++) { for (vector = 0; vector < q_vectors; vector++) {
struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]); struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) { if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1, snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "TxRx", rx_int_idx++); "%s-%s-%d", basename, "TxRx", rx_int_idx++);
tx_int_idx++; tx_int_idx++;
} else if (q_vector->rx.ring[0]) { } else if (q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1, snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "rx", rx_int_idx++); "%s-%s-%d", basename, "rx", rx_int_idx++);
} else if (q_vector->tx.ring[0]) { } else if (q_vector->tx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1, snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "tx", tx_int_idx++); "%s-%s-%d", basename, "tx", tx_int_idx++);
} else { } else {
...@@ -2611,8 +2671,8 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) ...@@ -2611,8 +2671,8 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
int i; int i;
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0); wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0); wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
} }
if (pf->flags & I40E_FLAG_MSIX_ENABLED) { if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
...@@ -2705,7 +2765,7 @@ static irqreturn_t i40e_intr(int irq, void *data) ...@@ -2705,7 +2765,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
i40e_flush(hw); i40e_flush(hw);
if (!test_bit(__I40E_DOWN, &pf->state)) if (!test_bit(__I40E_DOWN, &pf->state))
napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi); napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
} }
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
...@@ -2774,40 +2834,26 @@ static irqreturn_t i40e_intr(int irq, void *data) ...@@ -2774,40 +2834,26 @@ static irqreturn_t i40e_intr(int irq, void *data)
} }
/** /**
* i40e_map_vector_to_rxq - Assigns the Rx queue to the vector * i40e_map_vector_to_qp - Assigns the queue pair to the vector
* @vsi: the VSI being configured * @vsi: the VSI being configured
* @v_idx: vector index * @v_idx: vector index
* @r_idx: rx queue index * @qp_idx: queue pair index
**/ **/
static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx) static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
{ {
struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]); struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]); struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
rx_ring->q_vector = q_vector;
q_vector->rx.ring[q_vector->rx.count] = rx_ring;
q_vector->rx.count++;
q_vector->rx.latency_range = I40E_LOW_LATENCY;
q_vector->vsi = vsi;
}
/**
* i40e_map_vector_to_txq - Assigns the Tx queue to the vector
* @vsi: the VSI being configured
* @v_idx: vector index
* @t_idx: tx queue index
**/
static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
{
struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
tx_ring->q_vector = q_vector; tx_ring->q_vector = q_vector;
q_vector->tx.ring[q_vector->tx.count] = tx_ring; tx_ring->next = q_vector->tx.ring;
q_vector->tx.ring = tx_ring;
q_vector->tx.count++; q_vector->tx.count++;
q_vector->tx.latency_range = I40E_LOW_LATENCY;
q_vector->num_ringpairs++; rx_ring->q_vector = q_vector;
q_vector->vsi = vsi; rx_ring->next = q_vector->rx.ring;
q_vector->rx.ring = rx_ring;
q_vector->rx.count++;
} }
/** /**
...@@ -2823,7 +2869,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) ...@@ -2823,7 +2869,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
{ {
int qp_remaining = vsi->num_queue_pairs; int qp_remaining = vsi->num_queue_pairs;
int q_vectors = vsi->num_q_vectors; int q_vectors = vsi->num_q_vectors;
int qp_per_vector; int num_ringpairs;
int v_start = 0; int v_start = 0;
int qp_idx = 0; int qp_idx = 0;
...@@ -2831,11 +2877,21 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) ...@@ -2831,11 +2877,21 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
* group them so there are multiple queues per vector. * group them so there are multiple queues per vector.
*/ */
for (; v_start < q_vectors && qp_remaining; v_start++) { for (; v_start < q_vectors && qp_remaining; v_start++) {
qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
for (; qp_per_vector;
qp_per_vector--, qp_idx++, qp_remaining--) { num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
map_vector_to_rxq(vsi, v_start, qp_idx);
map_vector_to_txq(vsi, v_start, qp_idx); q_vector->num_ringpairs = num_ringpairs;
q_vector->rx.count = 0;
q_vector->tx.count = 0;
q_vector->rx.ring = NULL;
q_vector->tx.ring = NULL;
while (num_ringpairs--) {
map_vector_to_qp(vsi, v_start, qp_idx);
qp_idx++;
qp_remaining--;
} }
} }
} }
...@@ -2887,7 +2943,7 @@ static void i40e_netpoll(struct net_device *netdev) ...@@ -2887,7 +2943,7 @@ static void i40e_netpoll(struct net_device *netdev)
pf->flags |= I40E_FLAG_IN_NETPOLL; pf->flags |= I40E_FLAG_IN_NETPOLL;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) { if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
for (i = 0; i < vsi->num_q_vectors; i++) for (i = 0; i < vsi->num_q_vectors; i++)
i40e_msix_clean_rings(0, &vsi->q_vectors[i]); i40e_msix_clean_rings(0, vsi->q_vectors[i]);
} else { } else {
i40e_intr(pf->pdev->irq, netdev); i40e_intr(pf->pdev->irq, netdev);
} }
...@@ -3073,14 +3129,14 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) ...@@ -3073,14 +3129,14 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
u16 vector = i + base; u16 vector = i + base;
/* free only the irqs that were actually requested */ /* free only the irqs that were actually requested */
if (vsi->q_vectors[i].num_ringpairs == 0) if (vsi->q_vectors[i]->num_ringpairs == 0)
continue; continue;
/* clear the affinity_mask in the IRQ descriptor */ /* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(pf->msix_entries[vector].vector, irq_set_affinity_hint(pf->msix_entries[vector].vector,
NULL); NULL);
free_irq(pf->msix_entries[vector].vector, free_irq(pf->msix_entries[vector].vector,
&vsi->q_vectors[i]); vsi->q_vectors[i]);
/* Tear down the interrupt queue link list /* Tear down the interrupt queue link list
* *
...@@ -3163,6 +3219,39 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) ...@@ -3163,6 +3219,39 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
} }
} }
/**
* i40e_free_q_vector - Free memory allocated for specific interrupt vector
* @vsi: the VSI being configured
* @v_idx: Index of vector to be freed
*
* This function frees the memory allocated to the q_vector. In addition if
* NAPI is enabled it will delete any references to the NAPI struct prior
* to freeing the q_vector.
**/
static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
{
struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
struct i40e_ring *ring;
if (!q_vector)
return;
/* disassociate q_vector from rings */
i40e_for_each_ring(ring, q_vector->tx)
ring->q_vector = NULL;
i40e_for_each_ring(ring, q_vector->rx)
ring->q_vector = NULL;
/* only VSI w/ an associated netdev is set up w/ NAPI */
if (vsi->netdev)
netif_napi_del(&q_vector->napi);
vsi->q_vectors[v_idx] = NULL;
kfree_rcu(q_vector, rcu);
}
/** /**
* i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
* @vsi: the VSI being un-configured * @vsi: the VSI being un-configured
...@@ -3174,24 +3263,8 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) ...@@ -3174,24 +3263,8 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
{ {
int v_idx; int v_idx;
for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) { for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx]; i40e_free_q_vector(vsi, v_idx);
int r_idx;
if (!q_vector)
continue;
/* disassociate q_vector from rings */
for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
q_vector->tx.ring[r_idx]->q_vector = NULL;
for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
q_vector->rx.ring[r_idx]->q_vector = NULL;
/* only VSI w/ an associated netdev is set up w/ NAPI */
if (vsi->netdev)
netif_napi_del(&q_vector->napi);
}
kfree(vsi->q_vectors);
} }
/** /**
...@@ -3241,7 +3314,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi) ...@@ -3241,7 +3314,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
return; return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
napi_enable(&vsi->q_vectors[q_idx].napi); napi_enable(&vsi->q_vectors[q_idx]->napi);
} }
/** /**
...@@ -3256,7 +3329,7 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi) ...@@ -3256,7 +3329,7 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
return; return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
napi_disable(&vsi->q_vectors[q_idx].napi); napi_disable(&vsi->q_vectors[q_idx]->napi);
} }
/** /**
...@@ -3703,8 +3776,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi) ...@@ -3703,8 +3776,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
(vsi->netdev)) { (vsi->netdev)) {
netdev_info(vsi->netdev, "NIC Link is Up\n");
netif_tx_start_all_queues(vsi->netdev); netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev); netif_carrier_on(vsi->netdev);
} else if (vsi->netdev) {
netdev_info(vsi->netdev, "NIC Link is Down\n");
} }
i40e_service_event_schedule(pf); i40e_service_event_schedule(pf);
...@@ -3772,8 +3848,8 @@ void i40e_down(struct i40e_vsi *vsi) ...@@ -3772,8 +3848,8 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_napi_disable_all(vsi); i40e_napi_disable_all(vsi);
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_clean_tx_ring(&vsi->tx_rings[i]); i40e_clean_tx_ring(vsi->tx_rings[i]);
i40e_clean_rx_ring(&vsi->rx_rings[i]); i40e_clean_rx_ring(vsi->rx_rings[i]);
} }
} }
...@@ -4153,8 +4229,9 @@ static void i40e_link_event(struct i40e_pf *pf) ...@@ -4153,8 +4229,9 @@ static void i40e_link_event(struct i40e_pf *pf)
if (new_link == old_link) if (new_link == old_link)
return; return;
netdev_info(pf->vsi[pf->lan_vsi]->netdev, if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
"NIC Link is %s\n", (new_link ? "Up" : "Down")); netdev_info(pf->vsi[pf->lan_vsi]->netdev,
"NIC Link is %s\n", (new_link ? "Up" : "Down"));
/* Notify the base of the switch tree connected to /* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified. * the link. Floating VEBs are not notified.
...@@ -4199,9 +4276,9 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf) ...@@ -4199,9 +4276,9 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
continue; continue;
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
set_check_for_tx_hang(&vsi->tx_rings[i]); set_check_for_tx_hang(vsi->tx_rings[i]);
if (test_bit(__I40E_HANG_CHECK_ARMED, if (test_bit(__I40E_HANG_CHECK_ARMED,
&vsi->tx_rings[i].state)) &vsi->tx_rings[i]->state))
armed++; armed++;
} }
...@@ -4937,6 +5014,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) ...@@ -4937,6 +5014,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
{ {
int ret = -ENODEV; int ret = -ENODEV;
struct i40e_vsi *vsi; struct i40e_vsi *vsi;
int sz_vectors;
int sz_rings;
int vsi_idx; int vsi_idx;
int i; int i;
...@@ -4962,14 +5041,14 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) ...@@ -4962,14 +5041,14 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi_idx = i; /* Found one! */ vsi_idx = i; /* Found one! */
} else { } else {
ret = -ENODEV; ret = -ENODEV;
goto err_alloc_vsi; /* out of VSI slots! */ goto unlock_pf; /* out of VSI slots! */
} }
pf->next_vsi = ++i; pf->next_vsi = ++i;
vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
if (!vsi) { if (!vsi) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_alloc_vsi; goto unlock_pf;
} }
vsi->type = type; vsi->type = type;
vsi->back = pf; vsi->back = pf;
...@@ -4982,14 +5061,40 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) ...@@ -4982,14 +5061,40 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi->work_limit = I40E_DEFAULT_IRQ_WORK; vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
INIT_LIST_HEAD(&vsi->mac_filter_list); INIT_LIST_HEAD(&vsi->mac_filter_list);
i40e_set_num_rings_in_vsi(vsi); ret = i40e_set_num_rings_in_vsi(vsi);
if (ret)
goto err_rings;
/* allocate memory for ring pointers */
sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
if (!vsi->tx_rings) {
ret = -ENOMEM;
goto err_rings;
}
vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
/* allocate memory for q_vector pointers */
sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
if (!vsi->q_vectors) {
ret = -ENOMEM;
goto err_vectors;
}
/* Setup default MSIX irq handler for VSI */ /* Setup default MSIX irq handler for VSI */
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
pf->vsi[vsi_idx] = vsi; pf->vsi[vsi_idx] = vsi;
ret = vsi_idx; ret = vsi_idx;
err_alloc_vsi: goto unlock_pf;
err_vectors:
kfree(vsi->tx_rings);
err_rings:
pf->next_vsi = i - 1;
kfree(vsi);
unlock_pf:
mutex_unlock(&pf->switch_mutex); mutex_unlock(&pf->switch_mutex);
return ret; return ret;
} }
...@@ -5030,6 +5135,10 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) ...@@ -5030,6 +5135,10 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
/* free the ring and vector containers */
kfree(vsi->q_vectors);
kfree(vsi->tx_rings);
pf->vsi[vsi->idx] = NULL; pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi) if (vsi->idx < pf->next_vsi)
pf->next_vsi = vsi->idx; pf->next_vsi = vsi->idx;
...@@ -5042,6 +5151,23 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) ...@@ -5042,6 +5151,23 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
return 0; return 0;
} }
/**
* i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being cleaned
**/
static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
{
int i;
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu);
vsi->tx_rings[i] = NULL;
vsi->rx_rings[i] = NULL;
}
return 0;
}
/** /**
* i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being configured * @vsi: the VSI being configured
...@@ -5049,28 +5175,16 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) ...@@ -5049,28 +5175,16 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
static int i40e_alloc_rings(struct i40e_vsi *vsi) static int i40e_alloc_rings(struct i40e_vsi *vsi)
{ {
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
int ret = 0;
int i; int i;
vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!vsi->rx_rings) {
ret = -ENOMEM;
goto err_alloc_rings;
}
vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!vsi->tx_rings) {
ret = -ENOMEM;
kfree(vsi->rx_rings);
goto err_alloc_rings;
}
/* Set basic values in the rings to be used later during open() */ /* Set basic values in the rings to be used later during open() */
for (i = 0; i < vsi->alloc_queue_pairs; i++) { for (i = 0; i < vsi->alloc_queue_pairs; i++) {
struct i40e_ring *rx_ring = &vsi->rx_rings[i]; struct i40e_ring *tx_ring;
struct i40e_ring *tx_ring = &vsi->tx_rings[i]; struct i40e_ring *rx_ring;
tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
if (!tx_ring)
goto err_out;
tx_ring->queue_index = i; tx_ring->queue_index = i;
tx_ring->reg_idx = vsi->base_queue + i; tx_ring->reg_idx = vsi->base_queue + i;
...@@ -5081,7 +5195,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ...@@ -5081,7 +5195,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
tx_ring->count = vsi->num_desc; tx_ring->count = vsi->num_desc;
tx_ring->size = 0; tx_ring->size = 0;
tx_ring->dcb_tc = 0; tx_ring->dcb_tc = 0;
vsi->tx_rings[i] = tx_ring;
rx_ring = &tx_ring[1];
rx_ring->queue_index = i; rx_ring->queue_index = i;
rx_ring->reg_idx = vsi->base_queue + i; rx_ring->reg_idx = vsi->base_queue + i;
rx_ring->ring_active = false; rx_ring->ring_active = false;
...@@ -5095,24 +5211,14 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ...@@ -5095,24 +5211,14 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
set_ring_16byte_desc_enabled(rx_ring); set_ring_16byte_desc_enabled(rx_ring);
else else
clear_ring_16byte_desc_enabled(rx_ring); clear_ring_16byte_desc_enabled(rx_ring);
} vsi->rx_rings[i] = rx_ring;
err_alloc_rings:
return ret;
}
/**
* i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being cleaned
**/
static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
{
if (vsi) {
kfree(vsi->rx_rings);
kfree(vsi->tx_rings);
} }
return 0; return 0;
err_out:
i40e_vsi_clear_rings(vsi);
return -ENOMEM;
} }
/** /**
...@@ -5248,6 +5354,38 @@ static int i40e_init_msix(struct i40e_pf *pf) ...@@ -5248,6 +5354,38 @@ static int i40e_init_msix(struct i40e_pf *pf)
return err; return err;
} }
/**
* i40e_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
{
struct i40e_q_vector *q_vector;
/* allocate q_vector */
q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
q_vector->vsi = vsi;
q_vector->v_idx = v_idx;
cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
if (vsi->netdev)
netif_napi_add(vsi->netdev, &q_vector->napi,
i40e_napi_poll, vsi->work_limit);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
q_vector->tx.latency_range = I40E_LOW_LATENCY;
/* tie q_vector and vsi together */
vsi->q_vectors[v_idx] = q_vector;
return 0;
}
/** /**
* i40e_alloc_q_vectors - Allocate memory for interrupt vectors * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
* @vsi: the VSI being configured * @vsi: the VSI being configured
...@@ -5259,6 +5397,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi) ...@@ -5259,6 +5397,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
{ {
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
int v_idx, num_q_vectors; int v_idx, num_q_vectors;
int err;
/* if not MSIX, give the one vector only to the LAN VSI */ /* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED) if (pf->flags & I40E_FLAG_MSIX_ENABLED)
...@@ -5268,22 +5407,19 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi) ...@@ -5268,22 +5407,19 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
else else
return -EINVAL; return -EINVAL;
vsi->q_vectors = kcalloc(num_q_vectors,
sizeof(struct i40e_q_vector),
GFP_KERNEL);
if (!vsi->q_vectors)
return -ENOMEM;
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
vsi->q_vectors[v_idx].vsi = vsi; err = i40e_alloc_q_vector(vsi, v_idx);
vsi->q_vectors[v_idx].v_idx = v_idx; if (err)
cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask); goto err_out;
if (vsi->netdev)
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
i40e_napi_poll, vsi->work_limit);
} }
return 0; return 0;
err_out:
while (v_idx--)
i40e_free_q_vector(vsi, v_idx);
return err;
} }
/** /**
...@@ -5950,7 +6086,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) ...@@ -5950,7 +6086,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
int ret = -ENOENT; int ret = -ENOENT;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
if (vsi->q_vectors) { if (vsi->q_vectors[0]) {
dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
vsi->seid); vsi->seid);
return -EEXIST; return -EEXIST;
......
...@@ -64,7 +64,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, ...@@ -64,7 +64,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
if (!vsi) if (!vsi)
return -ENOENT; return -ENOENT;
tx_ring = &vsi->tx_rings[0]; tx_ring = vsi->tx_rings[0];
dev = tx_ring->dev; dev = tx_ring->dev;
dma = dma_map_single(dev, fdir_data->raw_packet, dma = dma_map_single(dev, fdir_data->raw_packet,
...@@ -73,11 +73,12 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, ...@@ -73,11 +73,12 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
goto dma_fail; goto dma_fail;
/* grab the next descriptor */ /* grab the next descriptor */
fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use); i = tx_ring->next_to_use;
tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use]; fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
tx_ring->next_to_use++; tx_buf = &tx_ring->tx_bi[i];
if (tx_ring->next_to_use == tx_ring->count)
tx_ring->next_to_use = 0; i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index
<< I40E_TXD_FLTR_QW0_QINDEX_SHIFT) << I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
...@@ -134,11 +135,11 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, ...@@ -134,11 +135,11 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id); fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
/* Now program a dummy descriptor */ /* Now program a dummy descriptor */
tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use); i = tx_ring->next_to_use;
tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use]; tx_desc = I40E_TX_DESC(tx_ring, i);
tx_ring->next_to_use++;
if (tx_ring->next_to_use == tx_ring->count) i++;
tx_ring->next_to_use = 0; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
tx_desc->buffer_addr = cpu_to_le64(dma); tx_desc->buffer_addr = cpu_to_le64(dma);
td_cmd = I40E_TX_DESC_CMD_EOP | td_cmd = I40E_TX_DESC_CMD_EOP |
...@@ -148,9 +149,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, ...@@ -148,9 +149,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
tx_desc->cmd_type_offset_bsz = tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0); build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
/* Mark the data descriptor to be watched */
tx_buf->next_to_watch = tx_desc;
/* Force memory writes to complete before letting h/w /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, * applicable for weak-ordered memory model archs,
...@@ -158,6 +156,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, ...@@ -158,6 +156,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
*/ */
wmb(); wmb();
/* Mark the data descriptor to be watched */
tx_buf->next_to_watch = tx_desc;
writel(tx_ring->next_to_use, tx_ring->tail); writel(tx_ring->next_to_use, tx_ring->tail);
return 0; return 0;
...@@ -188,27 +189,30 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id) ...@@ -188,27 +189,30 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
} }
/** /**
* i40e_unmap_tx_resource - Release a Tx buffer * i40e_unmap_and_free_tx_resource - Release a Tx buffer
* @ring: the ring that owns the buffer * @ring: the ring that owns the buffer
* @tx_buffer: the buffer to free * @tx_buffer: the buffer to free
**/ **/
static inline void i40e_unmap_tx_resource(struct i40e_ring *ring, static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer) struct i40e_tx_buffer *tx_buffer)
{ {
if (tx_buffer->dma) { if (tx_buffer->skb) {
if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE) dev_kfree_skb_any(tx_buffer->skb);
dma_unmap_page(ring->dev, if (dma_unmap_len(tx_buffer, len))
tx_buffer->dma,
tx_buffer->length,
DMA_TO_DEVICE);
else
dma_unmap_single(ring->dev, dma_unmap_single(ring->dev,
tx_buffer->dma, dma_unmap_addr(tx_buffer, dma),
tx_buffer->length, dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); DMA_TO_DEVICE);
} else if (dma_unmap_len(tx_buffer, len)) {
dma_unmap_page(ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
} }
tx_buffer->dma = 0; tx_buffer->next_to_watch = NULL;
tx_buffer->time_stamp = 0; tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
/* tx_buffer must be completely set up in the transmit path */
} }
/** /**
...@@ -217,7 +221,6 @@ static inline void i40e_unmap_tx_resource(struct i40e_ring *ring, ...@@ -217,7 +221,6 @@ static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
**/ **/
void i40e_clean_tx_ring(struct i40e_ring *tx_ring) void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
{ {
struct i40e_tx_buffer *tx_buffer;
unsigned long bi_size; unsigned long bi_size;
u16 i; u16 i;
...@@ -226,13 +229,8 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring) ...@@ -226,13 +229,8 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
return; return;
/* Free all the Tx ring sk_buffs */ /* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) { for (i = 0; i < tx_ring->count; i++)
tx_buffer = &tx_ring->tx_bi[i]; i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
i40e_unmap_tx_resource(tx_ring, tx_buffer);
if (tx_buffer->skb)
dev_kfree_skb_any(tx_buffer->skb);
tx_buffer->skb = NULL;
}
bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_bi, 0, bi_size); memset(tx_ring->tx_bi, 0, bi_size);
...@@ -242,6 +240,13 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring) ...@@ -242,6 +240,13 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
tx_ring->next_to_use = 0; tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0; tx_ring->next_to_clean = 0;
if (!tx_ring->netdev)
return;
/* cleanup Tx queue statistics */
netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index));
} }
/** /**
...@@ -300,14 +305,14 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) ...@@ -300,14 +305,14 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion * run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet. * pending but without time to complete it yet.
*/ */
if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) && if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
tx_pending) { tx_pending) {
/* make sure it is true for two checks in a row */ /* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state); &tx_ring->state);
} else { } else {
/* update completed stats and disarm the hang check */ /* update completed stats and disarm the hang check */
tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets; tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
} }
...@@ -331,62 +336,88 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) ...@@ -331,62 +336,88 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_buf = &tx_ring->tx_bi[i]; tx_buf = &tx_ring->tx_bi[i];
tx_desc = I40E_TX_DESC(tx_ring, i); tx_desc = I40E_TX_DESC(tx_ring, i);
i -= tx_ring->count;
for (; budget; budget--) { do {
struct i40e_tx_desc *eop_desc; struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
eop_desc = tx_buf->next_to_watch;
/* if next_to_watch is not set then there is no work pending */ /* if next_to_watch is not set then there is no work pending */
if (!eop_desc) if (!eop_desc)
break; break;
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
/* if the descriptor isn't done, no work yet to do */ /* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->cmd_type_offset_bsz & if (!(eop_desc->cmd_type_offset_bsz &
cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
break; break;
/* count the packet as being completed */ /* clear next_to_watch to prevent false hangs */
tx_ring->tx_stats.completed++;
tx_buf->next_to_watch = NULL; tx_buf->next_to_watch = NULL;
tx_buf->time_stamp = 0;
/* set memory barrier before eop_desc is verified */
rmb();
do { /* update the statistics for this packet */
i40e_unmap_tx_resource(tx_ring, tx_buf); total_bytes += tx_buf->bytecount;
total_packets += tx_buf->gso_segs;
/* clear dtype status */ /* free the skb */
tx_desc->cmd_type_offset_bsz &= dev_kfree_skb_any(tx_buf->skb);
~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK);
if (likely(tx_desc == eop_desc)) { /* unmap skb header data */
eop_desc = NULL; dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
dev_kfree_skb_any(tx_buf->skb); /* clear tx_buffer data */
tx_buf->skb = NULL; tx_buf->skb = NULL;
dma_unmap_len_set(tx_buf, len, 0);
total_bytes += tx_buf->bytecount; /* unmap remaining buffers */
total_packets += tx_buf->gso_segs; while (tx_desc != eop_desc) {
}
tx_buf++; tx_buf++;
tx_desc++; tx_desc++;
i++; i++;
if (unlikely(i == tx_ring->count)) { if (unlikely(!i)) {
i = 0; i -= tx_ring->count;
tx_buf = tx_ring->tx_bi; tx_buf = tx_ring->tx_bi;
tx_desc = I40E_TX_DESC(tx_ring, 0); tx_desc = I40E_TX_DESC(tx_ring, 0);
} }
} while (eop_desc);
}
/* unmap any remaining paged data */
if (dma_unmap_len(tx_buf, len)) {
dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buf, len, 0);
}
}
/* move us one more past the eop_desc for start of next pkt */
tx_buf++;
tx_desc++;
i++;
if (unlikely(!i)) {
i -= tx_ring->count;
tx_buf = tx_ring->tx_bi;
tx_desc = I40E_TX_DESC(tx_ring, 0);
}
/* update budget accounting */
budget--;
} while (likely(budget));
i += tx_ring->count;
tx_ring->next_to_clean = i; tx_ring->next_to_clean = i;
tx_ring->tx_stats.bytes += total_bytes; u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.packets += total_packets; tx_ring->stats.bytes += total_bytes;
tx_ring->stats.packets += total_packets;
u64_stats_update_end(&tx_ring->syncp);
tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets; tx_ring->q_vector->tx.total_packets += total_packets;
if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */ /* schedule immediate reset if we believe we hung */
dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
...@@ -414,6 +445,10 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) ...@@ -414,6 +445,10 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
return true; return true;
} }
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
...@@ -1042,8 +1077,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1042,8 +1077,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
} }
rx_ring->next_to_clean = i; rx_ring->next_to_clean = i;
rx_ring->rx_stats.packets += total_rx_packets; u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.bytes += total_rx_bytes; rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes; rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
...@@ -1067,27 +1104,28 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) ...@@ -1067,27 +1104,28 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
struct i40e_q_vector *q_vector = struct i40e_q_vector *q_vector =
container_of(napi, struct i40e_q_vector, napi); container_of(napi, struct i40e_q_vector, napi);
struct i40e_vsi *vsi = q_vector->vsi; struct i40e_vsi *vsi = q_vector->vsi;
struct i40e_ring *ring;
bool clean_complete = true; bool clean_complete = true;
int budget_per_ring; int budget_per_ring;
int i;
if (test_bit(__I40E_DOWN, &vsi->state)) { if (test_bit(__I40E_DOWN, &vsi->state)) {
napi_complete(napi); napi_complete(napi);
return 0; return 0;
} }
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx)
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
/* We attempt to distribute budget to each Rx queue fairly, but don't /* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early. * allow the budget to go below 1 because that would exit polling early.
* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/ */
budget_per_ring = max(budget/q_vector->num_ringpairs, 1); budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
for (i = 0; i < q_vector->num_ringpairs; i++) {
clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i], i40e_for_each_ring(ring, q_vector->rx)
vsi->work_limit); clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i],
budget_per_ring);
}
/* If work not completed, return budget and polling will return */ /* If work not completed, return budget and polling will return */
if (!clean_complete) if (!clean_complete)
...@@ -1144,6 +1182,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1144,6 +1182,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct tcphdr *th; struct tcphdr *th;
unsigned int hlen; unsigned int hlen;
u32 flex_ptype, dtype_cmd; u32 flex_ptype, dtype_cmd;
u16 i;
/* make sure ATR is enabled */ /* make sure ATR is enabled */
if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED)) if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
...@@ -1183,10 +1222,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1183,10 +1222,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->atr_count = 0; tx_ring->atr_count = 0;
/* grab the next descriptor */ /* grab the next descriptor */
fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use); i = tx_ring->next_to_use;
tx_ring->next_to_use++; fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
if (tx_ring->next_to_use == tx_ring->count)
tx_ring->next_to_use = 0; i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
I40E_TXD_FLTR_QW0_QINDEX_MASK; I40E_TXD_FLTR_QW0_QINDEX_MASK;
...@@ -1275,27 +1315,6 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, ...@@ -1275,27 +1315,6 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
return 0; return 0;
} }
/**
* i40e_tx_csum - is checksum offload requested
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
* @tx_flags: the collected send information
* @protocol: the send protocol
*
* Returns true if checksum offload is requested
**/
static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, __be16 protocol)
{
if ((skb->ip_summed != CHECKSUM_PARTIAL) &&
!(tx_flags & I40E_TX_FLAGS_TXSW)) {
if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN))
return false;
}
return skb->ip_summed == CHECKSUM_PARTIAL;
}
/** /**
* i40e_tso - set up the tso context descriptor * i40e_tso - set up the tso context descriptor
* @tx_ring: ptr to the ring to send * @tx_ring: ptr to the ring to send
...@@ -1482,15 +1501,16 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, ...@@ -1482,15 +1501,16 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
const u32 cd_tunneling, const u32 cd_l2tag2) const u32 cd_tunneling, const u32 cd_l2tag2)
{ {
struct i40e_tx_context_desc *context_desc; struct i40e_tx_context_desc *context_desc;
int i = tx_ring->next_to_use;
if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
return; return;
/* grab the next descriptor */ /* grab the next descriptor */
context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use); context_desc = I40E_TX_CTXTDESC(tx_ring, i);
tx_ring->next_to_use++;
if (tx_ring->next_to_use == tx_ring->count) i++;
tx_ring->next_to_use = 0; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
/* cpu_to_le32 and assign to struct fields */ /* cpu_to_le32 and assign to struct fields */
context_desc->tunneling_params = cpu_to_le32(cd_tunneling); context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
...@@ -1512,68 +1532,71 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1512,68 +1532,71 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags, struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset) const u8 hdr_len, u32 td_cmd, u32 td_offset)
{ {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned int data_len = skb->data_len; unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb); unsigned int size = skb_headlen(skb);
struct device *dev = tx_ring->dev; struct skb_frag_struct *frag;
u32 paylen = skb->len - hdr_len;
u16 i = tx_ring->next_to_use;
struct i40e_tx_buffer *tx_bi; struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc; struct i40e_tx_desc *tx_desc;
u32 buf_offset = 0; u16 i = tx_ring->next_to_use;
u32 td_tag = 0; u32 td_tag = 0;
dma_addr_t dma; dma_addr_t dma;
u16 gso_segs; u16 gso_segs;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto dma_error;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
I40E_TX_FLAGS_VLAN_SHIFT; I40E_TX_FLAGS_VLAN_SHIFT;
} }
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
gso_segs = skb_shinfo(skb)->gso_segs;
else
gso_segs = 1;
/* multiply data chunks by size of headers */
first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
first->gso_segs = gso_segs;
first->skb = skb;
first->tx_flags = tx_flags;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
tx_desc = I40E_TX_DESC(tx_ring, i); tx_desc = I40E_TX_DESC(tx_ring, i);
for (;;) { tx_bi = first;
while (size > I40E_MAX_DATA_PER_TXD) {
tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset); for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
/* record length, and DMA address */
dma_unmap_len_set(tx_bi, len, size);
dma_unmap_addr_set(tx_bi, dma, dma);
tx_desc->buffer_addr = cpu_to_le64(dma);
while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz = tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, build_ctob(td_cmd, td_offset,
I40E_MAX_DATA_PER_TXD, td_tag); I40E_MAX_DATA_PER_TXD, td_tag);
buf_offset += I40E_MAX_DATA_PER_TXD;
size -= I40E_MAX_DATA_PER_TXD;
tx_desc++; tx_desc++;
i++; i++;
if (i == tx_ring->count) { if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0); tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0; i = 0;
} }
}
tx_bi = &tx_ring->tx_bi[i]; dma += I40E_MAX_DATA_PER_TXD;
tx_bi->length = buf_offset + size; size -= I40E_MAX_DATA_PER_TXD;
tx_bi->tx_flags = tx_flags;
tx_bi->dma = dma;
tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset); tx_desc->buffer_addr = cpu_to_le64(dma);
tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, }
size, td_tag);
if (likely(!data_len)) if (likely(!data_len))
break; break;
size = skb_frag_size(frag); tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
data_len -= size; size, td_tag);
buf_offset = 0;
tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE;
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto dma_error;
tx_desc++; tx_desc++;
i++; i++;
...@@ -1582,31 +1605,25 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1582,31 +1605,25 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i = 0; i = 0;
} }
frag++; size = skb_frag_size(frag);
} data_len -= size;
tx_desc->cmd_type_offset_bsz |=
cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
i++; dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
if (i == tx_ring->count) DMA_TO_DEVICE);
i = 0;
tx_ring->next_to_use = i; tx_bi = &tx_ring->tx_bi[i];
}
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) tx_desc->cmd_type_offset_bsz =
gso_segs = skb_shinfo(skb)->gso_segs; build_ctob(td_cmd, td_offset, size, td_tag) |
else cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
gso_segs = 1;
/* multiply data chunks by size of headers */ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_bi->bytecount = paylen + (gso_segs * hdr_len); tx_ring->queue_index),
tx_bi->gso_segs = gso_segs; first->bytecount);
tx_bi->skb = skb;
/* set the timestamp and next to watch values */ /* set the timestamp */
first->time_stamp = jiffies; first->time_stamp = jiffies;
first->next_to_watch = tx_desc;
/* Force memory writes to complete before letting h/w /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only * know there are new descriptors to fetch. (Only
...@@ -1615,16 +1632,27 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1615,16 +1632,27 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
*/ */
wmb(); wmb();
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
i++;
if (i == tx_ring->count)
i = 0;
tx_ring->next_to_use = i;
/* notify HW of packet */
writel(i, tx_ring->tail); writel(i, tx_ring->tail);
return; return;
dma_error: dma_error:
dev_info(dev, "TX DMA map failed\n"); dev_info(tx_ring->dev, "TX DMA map failed\n");
/* clear dma mappings for failed tx_bi map */ /* clear dma mappings for failed tx_bi map */
for (;;) { for (;;) {
tx_bi = &tx_ring->tx_bi[i]; tx_bi = &tx_ring->tx_bi[i];
i40e_unmap_tx_resource(tx_ring, tx_bi); i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
if (tx_bi == first) if (tx_bi == first)
break; break;
if (i == 0) if (i == 0)
...@@ -1632,8 +1660,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1632,8 +1660,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i--; i--;
} }
dev_kfree_skb_any(skb);
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
} }
...@@ -1758,16 +1784,16 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -1758,16 +1784,16 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
/* Always offload the checksum, since it's in the data descriptor */ /* Always offload the checksum, since it's in the data descriptor */
if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol)) if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_flags |= I40E_TX_FLAGS_CSUM; tx_flags |= I40E_TX_FLAGS_CSUM;
/* always enable offload insertion */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
if (tx_flags & I40E_TX_FLAGS_CSUM)
i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling); tx_ring, &cd_tunneling);
}
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2); cd_tunneling, cd_l2tag2);
...@@ -1801,7 +1827,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -1801,7 +1827,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping]; struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
/* hardware can't handle really short frames, hardware padding works /* hardware can't handle really short frames, hardware padding works
* beyond this point * beyond this point
......
...@@ -102,23 +102,20 @@ ...@@ -102,23 +102,20 @@
#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) #define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) #define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7) #define I40E_TX_FLAGS_FSO (u32)(1 << 7)
#define I40E_TX_FLAGS_TXSW (u32)(1 << 8)
#define I40E_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 9)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
#define I40E_TX_FLAGS_VLAN_SHIFT 16 #define I40E_TX_FLAGS_VLAN_SHIFT 16
struct i40e_tx_buffer { struct i40e_tx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
unsigned long time_stamp;
u16 length;
u32 tx_flags;
struct i40e_tx_desc *next_to_watch; struct i40e_tx_desc *next_to_watch;
unsigned long time_stamp;
struct sk_buff *skb;
unsigned int bytecount; unsigned int bytecount;
u16 gso_segs; unsigned short gso_segs;
u8 mapped_as_page; DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
}; };
struct i40e_rx_buffer { struct i40e_rx_buffer {
...@@ -129,18 +126,18 @@ struct i40e_rx_buffer { ...@@ -129,18 +126,18 @@ struct i40e_rx_buffer {
unsigned int page_offset; unsigned int page_offset;
}; };
struct i40e_tx_queue_stats { struct i40e_queue_stats {
u64 packets; u64 packets;
u64 bytes; u64 bytes;
};
struct i40e_tx_queue_stats {
u64 restart_queue; u64 restart_queue;
u64 tx_busy; u64 tx_busy;
u64 completed;
u64 tx_done_old; u64 tx_done_old;
}; };
struct i40e_rx_queue_stats { struct i40e_rx_queue_stats {
u64 packets;
u64 bytes;
u64 non_eop_descs; u64 non_eop_descs;
u64 alloc_rx_page_failed; u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed; u64 alloc_rx_buff_failed;
...@@ -183,6 +180,7 @@ enum i40e_ring_state_t { ...@@ -183,6 +180,7 @@ enum i40e_ring_state_t {
/* struct that defines a descriptor ring, associated with a VSI */ /* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring { struct i40e_ring {
struct i40e_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */ void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */ struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */ struct net_device *netdev; /* netdev ring maps to */
...@@ -219,6 +217,8 @@ struct i40e_ring { ...@@ -219,6 +217,8 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */ bool ring_active; /* is ring online or not */
/* stats structs */ /* stats structs */
struct i40e_queue_stats stats;
struct u64_stats_sync syncp;
union { union {
struct i40e_tx_queue_stats tx_stats; struct i40e_tx_queue_stats tx_stats;
struct i40e_rx_queue_stats rx_stats; struct i40e_rx_queue_stats rx_stats;
...@@ -229,6 +229,8 @@ struct i40e_ring { ...@@ -229,6 +229,8 @@ struct i40e_ring {
struct i40e_vsi *vsi; /* Backreference to associated VSI */ struct i40e_vsi *vsi; /* Backreference to associated VSI */
struct i40e_q_vector *q_vector; /* Backreference to associated vector */ struct i40e_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
enum i40e_latency_range { enum i40e_latency_range {
...@@ -238,9 +240,8 @@ enum i40e_latency_range { ...@@ -238,9 +240,8 @@ enum i40e_latency_range {
}; };
struct i40e_ring_container { struct i40e_ring_container {
#define I40E_MAX_RINGPAIR_PER_VECTOR 8
/* array of pointers to rings */ /* array of pointers to rings */
struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR]; struct i40e_ring *ring;
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */ unsigned int total_packets; /* total packets processed this int */
u16 count; u16 count;
...@@ -248,6 +249,10 @@ struct i40e_ring_container { ...@@ -248,6 +249,10 @@ struct i40e_ring_container {
u16 itr; u16 itr;
}; };
/* iterator for handling rings in ring container */
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring); void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment