Commit 58308451 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
This series contains updates to i40e only.

Alex provides the majority of the patches against i40e, where he does
cleanup of the Tx and RX queues and to align the code with the known
good Tx/Rx queue code in the ixgbe driver.

Anjali provides an i40e patch to update link events to not print to
the log until the device is administratively up.

Catherine provides a patch to update the driver version.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b44084c2 d04795d6
...@@ -347,9 +347,9 @@ struct i40e_vsi { ...@@ -347,9 +347,9 @@ struct i40e_vsi {
u32 rx_buf_failed; u32 rx_buf_failed;
u32 rx_page_failed; u32 rx_page_failed;
/* These are arrays of rings, allocated at run-time */ /* These are containers of ring pointers, allocated at run-time */
struct i40e_ring *rx_rings; struct i40e_ring **rx_rings;
struct i40e_ring *tx_rings; struct i40e_ring **tx_rings;
u16 work_limit; u16 work_limit;
/* high bit set means dynamic, use accessor routines to read/write. /* high bit set means dynamic, use accessor routines to read/write.
...@@ -366,7 +366,7 @@ struct i40e_vsi { ...@@ -366,7 +366,7 @@ struct i40e_vsi {
u8 dtype; u8 dtype;
/* List of q_vectors allocated to this VSI */ /* List of q_vectors allocated to this VSI */
struct i40e_q_vector *q_vectors; struct i40e_q_vector **q_vectors;
int num_q_vectors; int num_q_vectors;
int base_vector; int base_vector;
...@@ -422,8 +422,9 @@ struct i40e_q_vector { ...@@ -422,8 +422,9 @@ struct i40e_q_vector {
u8 num_ringpairs; /* total number of ring pairs in vector */ u8 num_ringpairs; /* total number of ring pairs in vector */
char name[IFNAMSIZ + 9];
cpumask_t affinity_mask; cpumask_t affinity_mask;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
/* lan device */ /* lan device */
......
...@@ -258,12 +258,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp, ...@@ -258,12 +258,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
len = sizeof(struct i40e_tx_buffer); len = sizeof(struct i40e_tx_buffer);
memcpy(p, vsi->tx_rings[i].tx_bi, len); memcpy(p, vsi->tx_rings[i]->tx_bi, len);
p += len; p += len;
} }
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
len = sizeof(struct i40e_rx_buffer); len = sizeof(struct i40e_rx_buffer);
memcpy(p, vsi->rx_rings[i].rx_bi, len); memcpy(p, vsi->rx_rings[i]->rx_bi, len);
p += len; p += len;
} }
...@@ -484,100 +484,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -484,100 +484,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
vsi->tx_restart, vsi->tx_busy, vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed); vsi->rx_buf_failed, vsi->rx_page_failed);
if (vsi->rx_rings) { rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
if (!rx_ring)
continue;
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: desc = %p\n", " rx_rings[%i]: desc = %p\n",
i, vsi->rx_rings[i].desc); i, rx_ring->desc);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n", " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
i, vsi->rx_rings[i].dev, i, rx_ring->dev,
vsi->rx_rings[i].netdev, rx_ring->netdev,
vsi->rx_rings[i].rx_bi); rx_ring->rx_bi);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
i, vsi->rx_rings[i].state, i, rx_ring->state,
vsi->rx_rings[i].queue_index, rx_ring->queue_index,
vsi->rx_rings[i].reg_idx); rx_ring->reg_idx);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
i, vsi->rx_rings[i].rx_hdr_len, i, rx_ring->rx_hdr_len,
vsi->rx_rings[i].rx_buf_len, rx_ring->rx_buf_len,
vsi->rx_rings[i].dtype); rx_ring->dtype);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i, vsi->rx_rings[i].hsplit, i, rx_ring->hsplit,
vsi->rx_rings[i].next_to_use, rx_ring->next_to_use,
vsi->rx_rings[i].next_to_clean, rx_ring->next_to_clean,
vsi->rx_rings[i].ring_active); rx_ring->ring_active);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
i, vsi->rx_rings[i].rx_stats.packets, i, rx_ring->stats.packets,
vsi->rx_rings[i].rx_stats.bytes, rx_ring->stats.bytes,
vsi->rx_rings[i].rx_stats.non_eop_descs); rx_ring->rx_stats.non_eop_descs);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n", " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
i, i,
vsi->rx_rings[i].rx_stats.alloc_rx_page_failed, rx_ring->rx_stats.alloc_rx_page_failed,
vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed); rx_ring->rx_stats.alloc_rx_buff_failed);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i, dma = 0x%08lx\n", " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, vsi->rx_rings[i].size, i, rx_ring->size,
(long unsigned int)vsi->rx_rings[i].dma); (long unsigned int)rx_ring->dma);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: vsi = %p, q_vector = %p\n", " rx_rings[%i]: vsi = %p, q_vector = %p\n",
i, vsi->rx_rings[i].vsi, i, rx_ring->vsi,
vsi->rx_rings[i].q_vector); rx_ring->q_vector);
}
} }
if (vsi->tx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" tx_rings[%i]: desc = %p\n", " tx_rings[%i]: desc = %p\n",
i, vsi->tx_rings[i].desc); i, tx_ring->desc);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n", " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
i, vsi->tx_rings[i].dev, i, tx_ring->dev,
vsi->tx_rings[i].netdev, tx_ring->netdev,
vsi->tx_rings[i].tx_bi); tx_ring->tx_bi);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
i, vsi->tx_rings[i].state, i, tx_ring->state,
vsi->tx_rings[i].queue_index, tx_ring->queue_index,
vsi->tx_rings[i].reg_idx); tx_ring->reg_idx);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" tx_rings[%i]: dtype = %d\n", " tx_rings[%i]: dtype = %d\n",
i, vsi->tx_rings[i].dtype); i, tx_ring->dtype);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i, vsi->tx_rings[i].hsplit, i, tx_ring->hsplit,
vsi->tx_rings[i].next_to_use, tx_ring->next_to_use,
vsi->tx_rings[i].next_to_clean, tx_ring->next_to_clean,
vsi->tx_rings[i].ring_active); tx_ring->ring_active);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
i, vsi->tx_rings[i].tx_stats.packets, i, tx_ring->stats.packets,
vsi->tx_rings[i].tx_stats.bytes, tx_ring->stats.bytes,
vsi->tx_rings[i].tx_stats.restart_queue); tx_ring->tx_stats.restart_queue);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n", " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
i, i,
vsi->tx_rings[i].tx_stats.tx_busy, tx_ring->tx_stats.tx_busy,
vsi->tx_rings[i].tx_stats.completed, tx_ring->tx_stats.tx_done_old);
vsi->tx_rings[i].tx_stats.tx_done_old);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" tx_rings[%i]: size = %i, dma = 0x%08lx\n", " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, vsi->tx_rings[i].size, i, tx_ring->size,
(long unsigned int)vsi->tx_rings[i].dma); (long unsigned int)tx_ring->dma);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" tx_rings[%i]: vsi = %p, q_vector = %p\n", " tx_rings[%i]: vsi = %p, q_vector = %p\n",
i, vsi->tx_rings[i].vsi, i, tx_ring->vsi,
vsi->tx_rings[i].q_vector); tx_ring->q_vector);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" tx_rings[%i]: DCB tc = %d\n", " tx_rings[%i]: DCB tc = %d\n",
i, vsi->tx_rings[i].dcb_tc); i, tx_ring->dcb_tc);
}
} }
rcu_read_unlock();
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n", " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
vsi->work_limit, vsi->rx_itr_setting, vsi->work_limit, vsi->rx_itr_setting,
...@@ -587,15 +591,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -587,15 +591,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n", " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype); vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
if (vsi->q_vectors) {
for (i = 0; i < vsi->num_q_vectors; i++) {
dev_info(&pf->pdev->dev,
" q_vectors[%i]: base index = %ld\n",
i, ((long int)*vsi->q_vectors[i].rx.ring-
(long int)*vsi->q_vectors[0].rx.ring)/
sizeof(struct i40e_ring));
}
}
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" num_q_vectors = %i, base_vector = %i\n", " num_q_vectors = %i, base_vector = %i\n",
vsi->num_q_vectors, vsi->base_vector); vsi->num_q_vectors, vsi->base_vector);
...@@ -792,9 +787,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, ...@@ -792,9 +787,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
return; return;
} }
if (is_rx_ring) if (is_rx_ring)
ring = vsi->rx_rings[ring_id]; ring = *vsi->rx_rings[ring_id];
else else
ring = vsi->tx_rings[ring_id]; ring = *vsi->tx_rings[ring_id];
if (cnt == 2) { if (cnt == 2) {
dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
...@@ -1996,7 +1991,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, ...@@ -1996,7 +1991,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
goto netdev_ops_write_done; goto netdev_ops_write_done;
} }
for (i = 0; i < vsi->num_q_vectors; i++) for (i = 0; i < vsi->num_q_vectors; i++)
napi_schedule(&vsi->q_vectors[i].napi); napi_schedule(&vsi->q_vectors[i]->napi);
dev_info(&pf->pdev->dev, "napi called\n"); dev_info(&pf->pdev->dev, "napi called\n");
} else { } else {
dev_info(&pf->pdev->dev, "unknown command '%s'\n", dev_info(&pf->pdev->dev, "unknown command '%s'\n",
......
...@@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev, ...@@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS; ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
ring->rx_mini_max_pending = 0; ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0; ring->rx_jumbo_max_pending = 0;
ring->rx_pending = vsi->rx_rings[0].count; ring->rx_pending = vsi->rx_rings[0]->count;
ring->tx_pending = vsi->tx_rings[0].count; ring->tx_pending = vsi->tx_rings[0]->count;
ring->rx_mini_pending = 0; ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0; ring->rx_jumbo_pending = 0;
} }
...@@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE); new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */ /* if nothing to do return success */
if ((new_tx_count == vsi->tx_rings[0].count) && if ((new_tx_count == vsi->tx_rings[0]->count) &&
(new_rx_count == vsi->rx_rings[0].count)) (new_rx_count == vsi->rx_rings[0]->count))
return 0; return 0;
while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
...@@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (!netif_running(vsi->netdev)) { if (!netif_running(vsi->netdev)) {
/* simple case - set for the next time the netdev is started */ /* simple case - set for the next time the netdev is started */
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
vsi->tx_rings[i].count = new_tx_count; vsi->tx_rings[i]->count = new_tx_count;
vsi->rx_rings[i].count = new_rx_count; vsi->rx_rings[i]->count = new_rx_count;
} }
goto done; goto done;
} }
...@@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/ */
/* alloc updated Tx resources */ /* alloc updated Tx resources */
if (new_tx_count != vsi->tx_rings[0].count) { if (new_tx_count != vsi->tx_rings[0]->count) {
netdev_info(netdev, netdev_info(netdev,
"Changing Tx descriptor count from %d to %d.\n", "Changing Tx descriptor count from %d to %d.\n",
vsi->tx_rings[0].count, new_tx_count); vsi->tx_rings[0]->count, new_tx_count);
tx_rings = kcalloc(vsi->alloc_queue_pairs, tx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL); sizeof(struct i40e_ring), GFP_KERNEL);
if (!tx_rings) { if (!tx_rings) {
...@@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */ /* clone ring and setup updated count */
tx_rings[i] = vsi->tx_rings[i]; tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count; tx_rings[i].count = new_tx_count;
err = i40e_setup_tx_descriptors(&tx_rings[i]); err = i40e_setup_tx_descriptors(&tx_rings[i]);
if (err) { if (err) {
...@@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
} }
/* alloc updated Rx resources */ /* alloc updated Rx resources */
if (new_rx_count != vsi->rx_rings[0].count) { if (new_rx_count != vsi->rx_rings[0]->count) {
netdev_info(netdev, netdev_info(netdev,
"Changing Rx descriptor count from %d to %d\n", "Changing Rx descriptor count from %d to %d\n",
vsi->rx_rings[0].count, new_rx_count); vsi->rx_rings[0]->count, new_rx_count);
rx_rings = kcalloc(vsi->alloc_queue_pairs, rx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL); sizeof(struct i40e_ring), GFP_KERNEL);
if (!rx_rings) { if (!rx_rings) {
...@@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */ /* clone ring and setup updated count */
rx_rings[i] = vsi->rx_rings[i]; rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count; rx_rings[i].count = new_rx_count;
err = i40e_setup_rx_descriptors(&rx_rings[i]); err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err) { if (err) {
...@@ -517,8 +517,8 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -517,8 +517,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (tx_rings) { if (tx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_free_tx_resources(&vsi->tx_rings[i]); i40e_free_tx_resources(vsi->tx_rings[i]);
vsi->tx_rings[i] = tx_rings[i]; *vsi->tx_rings[i] = tx_rings[i];
} }
kfree(tx_rings); kfree(tx_rings);
tx_rings = NULL; tx_rings = NULL;
...@@ -526,8 +526,8 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -526,8 +526,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (rx_rings) { if (rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_free_rx_resources(&vsi->rx_rings[i]); i40e_free_rx_resources(vsi->rx_rings[i]);
vsi->rx_rings[i] = rx_rings[i]; *vsi->rx_rings[i] = rx_rings[i];
} }
kfree(rx_rings); kfree(rx_rings);
rx_rings = NULL; rx_rings = NULL;
...@@ -579,6 +579,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, ...@@ -579,6 +579,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
char *p; char *p;
int j; int j;
struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
unsigned int start;
i40e_update_stats(vsi); i40e_update_stats(vsi);
...@@ -587,14 +588,30 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, ...@@ -587,14 +588,30 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat == data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p; sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
} }
for (j = 0; j < vsi->num_queue_pairs; j++) { rcu_read_lock();
data[i++] = vsi->tx_rings[j].tx_stats.packets; for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
data[i++] = vsi->tx_rings[j].tx_stats.bytes; struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
} struct i40e_ring *rx_ring;
for (j = 0; j < vsi->num_queue_pairs; j++) {
data[i++] = vsi->rx_rings[j].rx_stats.packets; if (!tx_ring)
data[i++] = vsi->rx_rings[j].rx_stats.bytes; continue;
}
/* process Tx ring statistics */
do {
start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
data[i] = tx_ring->stats.packets;
data[i + 1] = tx_ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
/* Rx ring is the 2nd half of the queue pair */
rx_ring = &tx_ring[1];
do {
start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
data[i + 2] = rx_ring->stats.packets;
data[i + 3] = rx_ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
}
rcu_read_unlock();
if (vsi == pf->vsi[pf->lan_vsi]) { if (vsi == pf->vsi[pf->lan_vsi]) {
for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
p = (char *)pf + i40e_gstrings_stats[j].stat_offset; p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
...@@ -641,8 +658,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, ...@@ -641,8 +658,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
...@@ -910,8 +925,8 @@ static int i40e_set_coalesce(struct net_device *netdev, ...@@ -910,8 +925,8 @@ static int i40e_set_coalesce(struct net_device *netdev,
} }
vector = vsi->base_vector; vector = vsi->base_vector;
q_vector = vsi->q_vectors; for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) { q_vector = vsi->q_vectors[i];
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr); wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
......
This diff is collapsed.
This diff is collapsed.
...@@ -102,23 +102,20 @@ ...@@ -102,23 +102,20 @@
#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) #define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) #define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7) #define I40E_TX_FLAGS_FSO (u32)(1 << 7)
#define I40E_TX_FLAGS_TXSW (u32)(1 << 8)
#define I40E_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 9)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
#define I40E_TX_FLAGS_VLAN_SHIFT 16 #define I40E_TX_FLAGS_VLAN_SHIFT 16
struct i40e_tx_buffer { struct i40e_tx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
unsigned long time_stamp;
u16 length;
u32 tx_flags;
struct i40e_tx_desc *next_to_watch; struct i40e_tx_desc *next_to_watch;
unsigned long time_stamp;
struct sk_buff *skb;
unsigned int bytecount; unsigned int bytecount;
u16 gso_segs; unsigned short gso_segs;
u8 mapped_as_page; DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
}; };
struct i40e_rx_buffer { struct i40e_rx_buffer {
...@@ -129,18 +126,18 @@ struct i40e_rx_buffer { ...@@ -129,18 +126,18 @@ struct i40e_rx_buffer {
unsigned int page_offset; unsigned int page_offset;
}; };
struct i40e_tx_queue_stats { struct i40e_queue_stats {
u64 packets; u64 packets;
u64 bytes; u64 bytes;
};
struct i40e_tx_queue_stats {
u64 restart_queue; u64 restart_queue;
u64 tx_busy; u64 tx_busy;
u64 completed;
u64 tx_done_old; u64 tx_done_old;
}; };
struct i40e_rx_queue_stats { struct i40e_rx_queue_stats {
u64 packets;
u64 bytes;
u64 non_eop_descs; u64 non_eop_descs;
u64 alloc_rx_page_failed; u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed; u64 alloc_rx_buff_failed;
...@@ -183,6 +180,7 @@ enum i40e_ring_state_t { ...@@ -183,6 +180,7 @@ enum i40e_ring_state_t {
/* struct that defines a descriptor ring, associated with a VSI */ /* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring { struct i40e_ring {
struct i40e_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */ void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */ struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */ struct net_device *netdev; /* netdev ring maps to */
...@@ -219,6 +217,8 @@ struct i40e_ring { ...@@ -219,6 +217,8 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */ bool ring_active; /* is ring online or not */
/* stats structs */ /* stats structs */
struct i40e_queue_stats stats;
struct u64_stats_sync syncp;
union { union {
struct i40e_tx_queue_stats tx_stats; struct i40e_tx_queue_stats tx_stats;
struct i40e_rx_queue_stats rx_stats; struct i40e_rx_queue_stats rx_stats;
...@@ -229,6 +229,8 @@ struct i40e_ring { ...@@ -229,6 +229,8 @@ struct i40e_ring {
struct i40e_vsi *vsi; /* Backreference to associated VSI */ struct i40e_vsi *vsi; /* Backreference to associated VSI */
struct i40e_q_vector *q_vector; /* Backreference to associated vector */ struct i40e_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
enum i40e_latency_range { enum i40e_latency_range {
...@@ -238,9 +240,8 @@ enum i40e_latency_range { ...@@ -238,9 +240,8 @@ enum i40e_latency_range {
}; };
struct i40e_ring_container { struct i40e_ring_container {
#define I40E_MAX_RINGPAIR_PER_VECTOR 8
/* array of pointers to rings */ /* array of pointers to rings */
struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR]; struct i40e_ring *ring;
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */ unsigned int total_packets; /* total packets processed this int */
u16 count; u16 count;
...@@ -248,6 +249,10 @@ struct i40e_ring_container { ...@@ -248,6 +249,10 @@ struct i40e_ring_container {
u16 itr; u16 itr;
}; };
/* iterator for handling rings in ring container */
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring); void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment