Commit 717ecc27 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
This series contains updates to igb only.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7146b2d9 c9f14bf3
...@@ -101,7 +101,6 @@ struct vf_data_storage { ...@@ -101,7 +101,6 @@ struct vf_data_storage {
u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos; u16 pf_qos;
u16 tx_rate; u16 tx_rate;
struct pci_dev *vfdev;
}; };
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ #define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
...@@ -169,8 +168,8 @@ struct igb_tx_buffer { ...@@ -169,8 +168,8 @@ struct igb_tx_buffer {
unsigned int bytecount; unsigned int bytecount;
u16 gso_segs; u16 gso_segs;
__be16 protocol; __be16 protocol;
dma_addr_t dma; DEFINE_DMA_UNMAP_ADDR(dma);
u32 length; DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags; u32 tx_flags;
}; };
...@@ -214,7 +213,6 @@ struct igb_q_vector { ...@@ -214,7 +213,6 @@ struct igb_q_vector {
struct igb_ring_container rx, tx; struct igb_ring_container rx, tx;
struct napi_struct napi; struct napi_struct napi;
int numa_node;
u16 itr_val; u16 itr_val;
u8 set_itr; u8 set_itr;
...@@ -259,7 +257,6 @@ struct igb_ring { ...@@ -259,7 +257,6 @@ struct igb_ring {
}; };
/* Items past this point are only used during ring alloc / free */ /* Items past this point are only used during ring alloc / free */
dma_addr_t dma; /* phys address of the ring */ dma_addr_t dma; /* phys address of the ring */
int numa_node; /* node to alloc ring memory on */
}; };
enum e1000_ring_flags_t { enum e1000_ring_flags_t {
...@@ -374,7 +371,6 @@ struct igb_adapter { ...@@ -374,7 +371,6 @@ struct igb_adapter {
int vf_rate_link_speed; int vf_rate_link_speed;
u32 rss_queues; u32 rss_queues;
u32 wvbr; u32 wvbr;
int node;
u32 *shadow_vfta; u32 *shadow_vfta;
#ifdef CONFIG_IGB_PTP #ifdef CONFIG_IGB_PTP
......
...@@ -172,8 +172,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *); ...@@ -172,8 +172,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf); static int igb_vf_configure(struct igb_adapter *adapter, int vf);
static int igb_find_enabled_vfs(struct igb_adapter *adapter); static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
static int igb_check_vf_assignment(struct igb_adapter *adapter);
#endif #endif
#ifdef CONFIG_PM #ifdef CONFIG_PM
...@@ -404,8 +403,8 @@ static void igb_dump(struct igb_adapter *adapter) ...@@ -404,8 +403,8 @@ static void igb_dump(struct igb_adapter *adapter)
buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
n, tx_ring->next_to_use, tx_ring->next_to_clean, n, tx_ring->next_to_use, tx_ring->next_to_clean,
(u64)buffer_info->dma, (u64)dma_unmap_addr(buffer_info, dma),
buffer_info->length, dma_unmap_len(buffer_info, len),
buffer_info->next_to_watch, buffer_info->next_to_watch,
(u64)buffer_info->time_stamp); (u64)buffer_info->time_stamp);
} }
...@@ -456,8 +455,8 @@ static void igb_dump(struct igb_adapter *adapter) ...@@ -456,8 +455,8 @@ static void igb_dump(struct igb_adapter *adapter)
" %04X %p %016llX %p%s\n", i, " %04X %p %016llX %p%s\n", i,
le64_to_cpu(u0->a), le64_to_cpu(u0->a),
le64_to_cpu(u0->b), le64_to_cpu(u0->b),
(u64)buffer_info->dma, (u64)dma_unmap_addr(buffer_info, dma),
buffer_info->length, dma_unmap_len(buffer_info, len),
buffer_info->next_to_watch, buffer_info->next_to_watch,
(u64)buffer_info->time_stamp, (u64)buffer_info->time_stamp,
buffer_info->skb, next_desc); buffer_info->skb, next_desc);
...@@ -466,7 +465,8 @@ static void igb_dump(struct igb_adapter *adapter) ...@@ -466,7 +465,8 @@ static void igb_dump(struct igb_adapter *adapter)
print_hex_dump(KERN_INFO, "", print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, DUMP_PREFIX_ADDRESS,
16, 1, buffer_info->skb->data, 16, 1, buffer_info->skb->data,
buffer_info->length, true); dma_unmap_len(buffer_info, len),
true);
} }
} }
...@@ -683,52 +683,29 @@ static int igb_alloc_queues(struct igb_adapter *adapter) ...@@ -683,52 +683,29 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
{ {
struct igb_ring *ring; struct igb_ring *ring;
int i; int i;
int orig_node = adapter->node;
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
if (orig_node == -1) { ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
int cur_node = next_online_node(adapter->node);
if (cur_node == MAX_NUMNODES)
cur_node = first_online_node;
adapter->node = cur_node;
}
ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
adapter->node);
if (!ring)
ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
if (!ring) if (!ring)
goto err; goto err;
ring->count = adapter->tx_ring_count; ring->count = adapter->tx_ring_count;
ring->queue_index = i; ring->queue_index = i;
ring->dev = &adapter->pdev->dev; ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev; ring->netdev = adapter->netdev;
ring->numa_node = adapter->node;
/* For 82575, context index must be unique per ring. */ /* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575) if (adapter->hw.mac.type == e1000_82575)
set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
adapter->tx_ring[i] = ring; adapter->tx_ring[i] = ring;
} }
/* Restore the adapter's original node */
adapter->node = orig_node;
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
if (orig_node == -1) { ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
int cur_node = next_online_node(adapter->node);
if (cur_node == MAX_NUMNODES)
cur_node = first_online_node;
adapter->node = cur_node;
}
ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
adapter->node);
if (!ring)
ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
if (!ring) if (!ring)
goto err; goto err;
ring->count = adapter->rx_ring_count; ring->count = adapter->rx_ring_count;
ring->queue_index = i; ring->queue_index = i;
ring->dev = &adapter->pdev->dev; ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev; ring->netdev = adapter->netdev;
ring->numa_node = adapter->node;
/* set flag indicating ring supports SCTP checksum offload */ /* set flag indicating ring supports SCTP checksum offload */
if (adapter->hw.mac.type >= e1000_82576) if (adapter->hw.mac.type >= e1000_82576)
set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
...@@ -742,16 +719,12 @@ static int igb_alloc_queues(struct igb_adapter *adapter) ...@@ -742,16 +719,12 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
adapter->rx_ring[i] = ring; adapter->rx_ring[i] = ring;
} }
/* Restore the adapter's original node */
adapter->node = orig_node;
igb_cache_ring_register(adapter); igb_cache_ring_register(adapter);
return 0; return 0;
err: err:
/* Restore the adapter's original node */
adapter->node = orig_node;
igb_free_queues(adapter); igb_free_queues(adapter);
return -ENOMEM; return -ENOMEM;
...@@ -1117,24 +1090,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter) ...@@ -1117,24 +1090,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
struct igb_q_vector *q_vector; struct igb_q_vector *q_vector;
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
int v_idx; int v_idx;
int orig_node = adapter->node;
for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
if ((adapter->num_q_vectors == (adapter->num_rx_queues + q_vector = kzalloc(sizeof(struct igb_q_vector),
adapter->num_tx_queues)) && GFP_KERNEL);
(adapter->num_rx_queues == v_idx))
adapter->node = orig_node;
if (orig_node == -1) {
int cur_node = next_online_node(adapter->node);
if (cur_node == MAX_NUMNODES)
cur_node = first_online_node;
adapter->node = cur_node;
}
q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
adapter->node);
if (!q_vector)
q_vector = kzalloc(sizeof(struct igb_q_vector),
GFP_KERNEL);
if (!q_vector) if (!q_vector)
goto err_out; goto err_out;
q_vector->adapter = adapter; q_vector->adapter = adapter;
...@@ -1143,14 +1102,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter) ...@@ -1143,14 +1102,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
adapter->q_vector[v_idx] = q_vector; adapter->q_vector[v_idx] = q_vector;
} }
/* Restore the adapter's original node */
adapter->node = orig_node;
return 0; return 0;
err_out: err_out:
/* Restore the adapter's original node */
adapter->node = orig_node;
igb_free_q_vectors(adapter); igb_free_q_vectors(adapter);
return -ENOMEM; return -ENOMEM;
} }
...@@ -2300,11 +2255,11 @@ static void __devexit igb_remove(struct pci_dev *pdev) ...@@ -2300,11 +2255,11 @@ static void __devexit igb_remove(struct pci_dev *pdev)
/* reclaim resources allocated to VFs */ /* reclaim resources allocated to VFs */
if (adapter->vf_data) { if (adapter->vf_data) {
/* disable iov and allow time for transactions to clear */ /* disable iov and allow time for transactions to clear */
if (!igb_check_vf_assignment(adapter)) { if (igb_vfs_are_assigned(adapter)) {
dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
} else {
pci_disable_sriov(pdev); pci_disable_sriov(pdev);
msleep(500); msleep(500);
} else {
dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
} }
kfree(adapter->vf_data); kfree(adapter->vf_data);
...@@ -2344,7 +2299,7 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter) ...@@ -2344,7 +2299,7 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
int old_vfs = igb_find_enabled_vfs(adapter); int old_vfs = pci_num_vf(adapter->pdev);
int i; int i;
/* Virtualization features not supported on i210 family. */ /* Virtualization features not supported on i210 family. */
...@@ -2424,8 +2379,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) ...@@ -2424,8 +2379,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
VLAN_HLEN; VLAN_HLEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
adapter->node = -1;
spin_lock_init(&adapter->stats64_lock); spin_lock_init(&adapter->stats64_lock);
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
switch (hw->mac.type) { switch (hw->mac.type) {
...@@ -2672,13 +2625,11 @@ static int igb_close(struct net_device *netdev) ...@@ -2672,13 +2625,11 @@ static int igb_close(struct net_device *netdev)
int igb_setup_tx_resources(struct igb_ring *tx_ring) int igb_setup_tx_resources(struct igb_ring *tx_ring)
{ {
struct device *dev = tx_ring->dev; struct device *dev = tx_ring->dev;
int orig_node = dev_to_node(dev);
int size; int size;
size = sizeof(struct igb_tx_buffer) * tx_ring->count; size = sizeof(struct igb_tx_buffer) * tx_ring->count;
tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
if (!tx_ring->tx_buffer_info) tx_ring->tx_buffer_info = vzalloc(size);
tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info) if (!tx_ring->tx_buffer_info)
goto err; goto err;
...@@ -2686,18 +2637,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring) ...@@ -2686,18 +2637,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->size = ALIGN(tx_ring->size, 4096);
set_dev_node(dev, tx_ring->numa_node);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->desc = dma_alloc_coherent(dev,
tx_ring->size, tx_ring->size,
&tx_ring->dma, &tx_ring->dma,
GFP_KERNEL); GFP_KERNEL);
set_dev_node(dev, orig_node);
if (!tx_ring->desc)
tx_ring->desc = dma_alloc_coherent(dev,
tx_ring->size,
&tx_ring->dma,
GFP_KERNEL);
if (!tx_ring->desc) if (!tx_ring->desc)
goto err; goto err;
...@@ -2708,8 +2651,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring) ...@@ -2708,8 +2651,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
err: err:
vfree(tx_ring->tx_buffer_info); vfree(tx_ring->tx_buffer_info);
dev_err(dev, tx_ring->tx_buffer_info = NULL;
"Unable to allocate memory for the transmit descriptor ring\n"); dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -2826,34 +2769,23 @@ static void igb_configure_tx(struct igb_adapter *adapter) ...@@ -2826,34 +2769,23 @@ static void igb_configure_tx(struct igb_adapter *adapter)
int igb_setup_rx_resources(struct igb_ring *rx_ring) int igb_setup_rx_resources(struct igb_ring *rx_ring)
{ {
struct device *dev = rx_ring->dev; struct device *dev = rx_ring->dev;
int orig_node = dev_to_node(dev); int size;
int size, desc_len;
size = sizeof(struct igb_rx_buffer) * rx_ring->count; size = sizeof(struct igb_rx_buffer) * rx_ring->count;
rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
if (!rx_ring->rx_buffer_info) rx_ring->rx_buffer_info = vzalloc(size);
rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info) if (!rx_ring->rx_buffer_info)
goto err; goto err;
desc_len = sizeof(union e1000_adv_rx_desc);
/* Round up to nearest 4K */ /* Round up to nearest 4K */
rx_ring->size = rx_ring->count * desc_len; rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->size = ALIGN(rx_ring->size, 4096);
set_dev_node(dev, rx_ring->numa_node);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->desc = dma_alloc_coherent(dev,
rx_ring->size, rx_ring->size,
&rx_ring->dma, &rx_ring->dma,
GFP_KERNEL); GFP_KERNEL);
set_dev_node(dev, orig_node);
if (!rx_ring->desc)
rx_ring->desc = dma_alloc_coherent(dev,
rx_ring->size,
&rx_ring->dma,
GFP_KERNEL);
if (!rx_ring->desc) if (!rx_ring->desc)
goto err; goto err;
...@@ -2865,8 +2797,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) ...@@ -2865,8 +2797,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
err: err:
vfree(rx_ring->rx_buffer_info); vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL; rx_ring->rx_buffer_info = NULL;
dev_err(dev, "Unable to allocate memory for the receive descriptor" dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
" ring\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -2904,57 +2835,48 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) ...@@ -2904,57 +2835,48 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
{ {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 mrqc, rxcsum; u32 mrqc, rxcsum;
u32 j, num_rx_queues, shift = 0, shift2 = 0; u32 j, num_rx_queues, shift = 0;
union e1000_reta { static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
u32 dword; 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
u8 bytes[4]; 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
} reta; 0xFA01ACBE };
static const u8 rsshash[40] = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
/* Fill out hash function seeds */ /* Fill out hash function seeds */
for (j = 0; j < 10; j++) { for (j = 0; j < 10; j++)
u32 rsskey = rsshash[(j * 4)]; wr32(E1000_RSSRK(j), rsskey[j]);
rsskey |= rsshash[(j * 4) + 1] << 8;
rsskey |= rsshash[(j * 4) + 2] << 16;
rsskey |= rsshash[(j * 4) + 3] << 24;
array_wr32(E1000_RSSRK(0), j, rsskey);
}
num_rx_queues = adapter->rss_queues; num_rx_queues = adapter->rss_queues;
if (adapter->vfs_allocated_count) { switch (hw->mac.type) {
/* 82575 and 82576 supports 2 RSS queues for VMDq */ case e1000_82575:
switch (hw->mac.type) { shift = 6;
case e1000_i350: break;
case e1000_82580: case e1000_82576:
num_rx_queues = 1; /* 82576 supports 2 RSS queues for SR-IOV */
shift = 0; if (adapter->vfs_allocated_count) {
break;
case e1000_82576:
shift = 3; shift = 3;
num_rx_queues = 2; num_rx_queues = 2;
break;
case e1000_82575:
shift = 2;
shift2 = 6;
default:
break;
} }
} else { break;
if (hw->mac.type == e1000_82575) default:
shift = 6; break;
} }
for (j = 0; j < (32 * 4); j++) { /*
reta.bytes[j & 3] = (j % num_rx_queues) << shift; * Populate the indirection table 4 entries at a time. To do this
if (shift2) * we are generating the results for n and n+2 and then interleaving
reta.bytes[j & 3] |= num_rx_queues << shift2; * those with the results with n+1 and n+3.
if ((j & 3) == 3) */
wr32(E1000_RETA(j >> 2), reta.dword); for (j = 0; j < 32; j++) {
/* first pass generates n and n+2 */
u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
u32 reta = (base & 0x07800780) >> (7 - shift);
/* second pass generates n+1 and n+3 */
base += 0x00010001 * num_rx_queues;
reta |= (base & 0x07800780) << (1 + shift);
wr32(E1000_RETA(j), reta);
} }
/* /*
...@@ -3277,20 +3199,20 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring, ...@@ -3277,20 +3199,20 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
{ {
if (tx_buffer->skb) { if (tx_buffer->skb) {
dev_kfree_skb_any(tx_buffer->skb); dev_kfree_skb_any(tx_buffer->skb);
if (tx_buffer->dma) if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev, dma_unmap_single(ring->dev,
tx_buffer->dma, dma_unmap_addr(tx_buffer, dma),
tx_buffer->length, dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); DMA_TO_DEVICE);
} else if (tx_buffer->dma) { } else if (dma_unmap_len(tx_buffer, len)) {
dma_unmap_page(ring->dev, dma_unmap_page(ring->dev,
tx_buffer->dma, dma_unmap_addr(tx_buffer, dma),
tx_buffer->length, dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
tx_buffer->next_to_watch = NULL; tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL; tx_buffer->skb = NULL;
tx_buffer->dma = 0; dma_unmap_len_set(tx_buffer, len, 0);
/* buffer_info must be completely set up in the transmit path */ /* buffer_info must be completely set up in the transmit path */
} }
...@@ -4285,7 +4207,7 @@ static void igb_tx_map(struct igb_ring *tx_ring, ...@@ -4285,7 +4207,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
const u8 hdr_len) const u8 hdr_len)
{ {
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
struct igb_tx_buffer *tx_buffer_info; struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc; union e1000_adv_tx_desc *tx_desc;
dma_addr_t dma; dma_addr_t dma;
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
...@@ -4306,8 +4228,8 @@ static void igb_tx_map(struct igb_ring *tx_ring, ...@@ -4306,8 +4228,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
goto dma_error; goto dma_error;
/* record length, and DMA address */ /* record length, and DMA address */
first->length = size; dma_unmap_len_set(first, len, size);
first->dma = dma; dma_unmap_addr_set(first, dma, dma);
tx_desc->read.buffer_addr = cpu_to_le64(dma); tx_desc->read.buffer_addr = cpu_to_le64(dma);
for (;;) { for (;;) {
...@@ -4349,9 +4271,9 @@ static void igb_tx_map(struct igb_ring *tx_ring, ...@@ -4349,9 +4271,9 @@ static void igb_tx_map(struct igb_ring *tx_ring,
if (dma_mapping_error(tx_ring->dev, dma)) if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error; goto dma_error;
tx_buffer_info = &tx_ring->tx_buffer_info[i]; tx_buffer = &tx_ring->tx_buffer_info[i];
tx_buffer_info->length = size; dma_unmap_len_set(tx_buffer, len, size);
tx_buffer_info->dma = dma; dma_unmap_addr_set(tx_buffer, dma, dma);
tx_desc->read.olinfo_status = 0; tx_desc->read.olinfo_status = 0;
tx_desc->read.buffer_addr = cpu_to_le64(dma); tx_desc->read.buffer_addr = cpu_to_le64(dma);
...@@ -4402,9 +4324,9 @@ static void igb_tx_map(struct igb_ring *tx_ring, ...@@ -4402,9 +4324,9 @@ static void igb_tx_map(struct igb_ring *tx_ring,
/* clear dma mappings for failed tx_buffer_info map */ /* clear dma mappings for failed tx_buffer_info map */
for (;;) { for (;;) {
tx_buffer_info = &tx_ring->tx_buffer_info[i]; tx_buffer = &tx_ring->tx_buffer_info[i];
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
if (tx_buffer_info == first) if (tx_buffer == first)
break; break;
if (i == 0) if (i == 0)
i = tx_ring->count; i = tx_ring->count;
...@@ -4777,7 +4699,11 @@ void igb_update_stats(struct igb_adapter *adapter, ...@@ -4777,7 +4699,11 @@ void igb_update_stats(struct igb_adapter *adapter,
reg = rd32(E1000_CTRL_EXT); reg = rd32(E1000_CTRL_EXT);
if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
adapter->stats.rxerrc += rd32(E1000_RXERRC); adapter->stats.rxerrc += rd32(E1000_RXERRC);
adapter->stats.tncrs += rd32(E1000_TNCRS);
/* this stat has invalid values on i210/i211 */
if ((hw->mac.type != e1000_i210) &&
(hw->mac.type != e1000_i211))
adapter->stats.tncrs += rd32(E1000_TNCRS);
} }
adapter->stats.tsctc += rd32(E1000_TSCTC); adapter->stats.tsctc += rd32(E1000_TSCTC);
...@@ -5037,102 +4963,43 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event, ...@@ -5037,102 +4963,43 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
static int igb_vf_configure(struct igb_adapter *adapter, int vf) static int igb_vf_configure(struct igb_adapter *adapter, int vf)
{ {
unsigned char mac_addr[ETH_ALEN]; unsigned char mac_addr[ETH_ALEN];
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pvfdev;
unsigned int device_id;
u16 thisvf_devfn;
eth_random_addr(mac_addr); eth_random_addr(mac_addr);
igb_set_vf_mac(adapter, vf, mac_addr); igb_set_vf_mac(adapter, vf, mac_addr);
switch (adapter->hw.mac.type) { return 0;
case e1000_82576:
device_id = IGB_82576_VF_DEV_ID;
/* VF Stride for 82576 is 2 */
thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
(pdev->devfn & 1);
break;
case e1000_i350:
device_id = IGB_I350_VF_DEV_ID;
/* VF Stride for I350 is 4 */
thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
(pdev->devfn & 3);
break;
default:
device_id = 0;
thisvf_devfn = 0;
break;
}
pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
while (pvfdev) {
if (pvfdev->devfn == thisvf_devfn)
break;
pvfdev = pci_get_device(hw->vendor_id,
device_id, pvfdev);
}
if (pvfdev)
adapter->vf_data[vf].vfdev = pvfdev;
else
dev_err(&pdev->dev,
"Couldn't find pci dev ptr for VF %4.4x\n",
thisvf_devfn);
return pvfdev != NULL;
} }
static int igb_find_enabled_vfs(struct igb_adapter *adapter) static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
{ {
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
struct pci_dev *pvfdev; struct pci_dev *vfdev;
u16 vf_devfn = 0; int dev_id;
u16 vf_stride;
unsigned int device_id;
int vfs_found = 0;
switch (adapter->hw.mac.type) { switch (adapter->hw.mac.type) {
case e1000_82576: case e1000_82576:
device_id = IGB_82576_VF_DEV_ID; dev_id = IGB_82576_VF_DEV_ID;
/* VF Stride for 82576 is 2 */
vf_stride = 2;
break; break;
case e1000_i350: case e1000_i350:
device_id = IGB_I350_VF_DEV_ID; dev_id = IGB_I350_VF_DEV_ID;
/* VF Stride for I350 is 4 */
vf_stride = 4;
break; break;
default: default:
device_id = 0; return false;
vf_stride = 0;
break;
}
vf_devfn = pdev->devfn + 0x80;
pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
while (pvfdev) {
if (pvfdev->devfn == vf_devfn &&
(pvfdev->bus->number >= pdev->bus->number))
vfs_found++;
vf_devfn += vf_stride;
pvfdev = pci_get_device(hw->vendor_id,
device_id, pvfdev);
} }
return vfs_found; /* loop through all the VFs to see if we own any that are assigned */
} vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
while (vfdev) {
static int igb_check_vf_assignment(struct igb_adapter *adapter) /* if we don't own it we don't care */
{ if (vfdev->is_virtfn && vfdev->physfn == pdev) {
int i; /* if it is assigned we cannot release it */
for (i = 0; i < adapter->vfs_allocated_count; i++) { if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
if (adapter->vf_data[i].vfdev) {
if (adapter->vf_data[i].vfdev->dev_flags &
PCI_DEV_FLAGS_ASSIGNED)
return true; return true;
} }
vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
} }
return false; return false;
} }
...@@ -5815,7 +5682,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) ...@@ -5815,7 +5682,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
struct igb_adapter *adapter = q_vector->adapter; struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *tx_ring = q_vector->tx.ring; struct igb_ring *tx_ring = q_vector->tx.ring;
struct igb_tx_buffer *tx_buffer; struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc, *eop_desc; union e1000_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0; unsigned int total_bytes = 0, total_packets = 0;
unsigned int budget = q_vector->tx.work_limit; unsigned int budget = q_vector->tx.work_limit;
unsigned int i = tx_ring->next_to_clean; unsigned int i = tx_ring->next_to_clean;
...@@ -5827,16 +5694,16 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) ...@@ -5827,16 +5694,16 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_desc = IGB_TX_DESC(tx_ring, i); tx_desc = IGB_TX_DESC(tx_ring, i);
i -= tx_ring->count; i -= tx_ring->count;
for (; budget; budget--) { do {
eop_desc = tx_buffer->next_to_watch; union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
/* prevent any other reads prior to eop_desc */
rmb();
/* if next_to_watch is not set then there is no work pending */ /* if next_to_watch is not set then there is no work pending */
if (!eop_desc) if (!eop_desc)
break; break;
/* prevent any other reads prior to eop_desc */
rmb();
/* if DD is not set pending work has not been completed */ /* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
break; break;
...@@ -5850,18 +5717,19 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) ...@@ -5850,18 +5717,19 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
/* free the skb */ /* free the skb */
dev_kfree_skb_any(tx_buffer->skb); dev_kfree_skb_any(tx_buffer->skb);
tx_buffer->skb = NULL;
/* unmap skb header data */ /* unmap skb header data */
dma_unmap_single(tx_ring->dev, dma_unmap_single(tx_ring->dev,
tx_buffer->dma, dma_unmap_addr(tx_buffer, dma),
tx_buffer->length, dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); DMA_TO_DEVICE);
/* clear tx_buffer data */
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
/* clear last DMA location and unmap remaining buffers */ /* clear last DMA location and unmap remaining buffers */
while (tx_desc != eop_desc) { while (tx_desc != eop_desc) {
tx_buffer->dma = 0;
tx_buffer++; tx_buffer++;
tx_desc++; tx_desc++;
i++; i++;
...@@ -5872,17 +5740,15 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) ...@@ -5872,17 +5740,15 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
} }
/* unmap any remaining paged data */ /* unmap any remaining paged data */
if (tx_buffer->dma) { if (dma_unmap_len(tx_buffer, len)) {
dma_unmap_page(tx_ring->dev, dma_unmap_page(tx_ring->dev,
tx_buffer->dma, dma_unmap_addr(tx_buffer, dma),
tx_buffer->length, dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
} }
} }
/* clear last DMA location */
tx_buffer->dma = 0;
/* move us one more past the eop_desc for start of next pkt */ /* move us one more past the eop_desc for start of next pkt */
tx_buffer++; tx_buffer++;
tx_desc++; tx_desc++;
...@@ -5892,7 +5758,13 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) ...@@ -5892,7 +5758,13 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_buffer = tx_ring->tx_buffer_info; tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IGB_TX_DESC(tx_ring, 0); tx_desc = IGB_TX_DESC(tx_ring, 0);
} }
}
/* issue prefetch for next Tx descriptor */
prefetch(tx_desc);
/* update budget accounting */
budget--;
} while (likely(budget));
netdev_tx_completed_queue(txring_txq(tx_ring), netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes); total_packets, total_bytes);
...@@ -5908,12 +5780,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) ...@@ -5908,12 +5780,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
eop_desc = tx_buffer->next_to_watch;
/* Detect a transmit hang in hardware, this serializes the /* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */ * check with the clearing of time_stamp and movement of i */
clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
if (eop_desc && if (tx_buffer->next_to_watch &&
time_after(jiffies, tx_buffer->time_stamp + time_after(jiffies, tx_buffer->time_stamp +
(adapter->tx_timeout_factor * HZ)) && (adapter->tx_timeout_factor * HZ)) &&
!(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
...@@ -5937,9 +5807,9 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) ...@@ -5937,9 +5807,9 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_ring->next_to_use, tx_ring->next_to_use,
tx_ring->next_to_clean, tx_ring->next_to_clean,
tx_buffer->time_stamp, tx_buffer->time_stamp,
eop_desc, tx_buffer->next_to_watch,
jiffies, jiffies,
eop_desc->wb.status); tx_buffer->next_to_watch->wb.status);
netif_stop_subqueue(tx_ring->netdev, netif_stop_subqueue(tx_ring->netdev,
tx_ring->queue_index); tx_ring->queue_index);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment