Commit 0fe13151 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2014-09-12

This series contains updates to e1000, ixgbe and ixgbevf.

Mark provide two fixes to reduce compile warnings produce by ixgbe
and ixgbevf.

Alex provides two patches for ixgbe, first removes the receive buffer
allocation at the end of the ixgbe_clean_rx_irq().  The reason for
removing this is to avoid the extra latency introduced by the MMIO write.
Second patch addresses several issues in the current ixgbe implementation
of busy poll sockets.  It was possible for frames to be delivered out of
order if they were held in GRO, so address this by flushing the GRO
buffers before releasing the q_vector back to the idle state.  Also, we
were having to take a spinlock on changing the state to and from idle,
so to resolve this, replaced the state value with an atomic and use
atomic_cmpxchg to change the value from idle, and a simple atomic set
to restore it back to idle after we have acquired it.  This allows us
to only use a locked operation on acquiring the vector without a need
for a locked operation to release it.

Florian Westphal provides several patches for e1000 which does some
cleanup and updating of the driver.  Moved e1000_tbi_adjust_stats()
so that he could make the function static.  Added a helper function
to deal with the tbi workaround that was located in 2 different
Rx clean functions.  Added a e1000_rx_buffer struct for use on receive
since the transmit and receive have different requirements.  Updates
e1000 to use napi_gro_frags API.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 54996b52 de591c78
......@@ -148,16 +148,23 @@ struct e1000_adapter;
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct e1000_buffer {
struct e1000_tx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
unsigned int segs;
bool mapped_as_page;
unsigned short segs;
unsigned int bytecount;
u16 mapped_as_page;
};
struct e1000_rx_buffer {
union {
struct page *page; /* jumbo: alloc_page */
u8 *data; /* else, netdev_alloc_frag */
} rxbuf;
dma_addr_t dma;
};
struct e1000_tx_ring {
......@@ -174,7 +181,7 @@ struct e1000_tx_ring {
/* next descriptor to check for DD status bit */
unsigned int next_to_clean;
/* array of buffer information structs */
struct e1000_buffer *buffer_info;
struct e1000_tx_buffer *buffer_info;
u16 tdh;
u16 tdt;
......@@ -195,7 +202,7 @@ struct e1000_rx_ring {
/* next descriptor to check for DD status bit */
unsigned int next_to_clean;
/* array of buffer information structs */
struct e1000_buffer *buffer_info;
struct e1000_rx_buffer *buffer_info;
struct sk_buff *rx_skb_top;
/* cpu for rx queue */
......
......@@ -968,10 +968,9 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (rxdr->buffer_info[i].dma)
dma_unmap_single(&pdev->dev,
rxdr->buffer_info[i].dma,
rxdr->buffer_info[i].length,
E1000_RXBUFFER_2048,
DMA_FROM_DEVICE);
if (rxdr->buffer_info[i].skb)
dev_kfree_skb(rxdr->buffer_info[i].skb);
kfree(rxdr->buffer_info[i].rxbuf.data);
}
}
......@@ -1006,7 +1005,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
if (!txdr->count)
txdr->count = E1000_DEFAULT_TXD;
txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_buffer),
txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer),
GFP_KERNEL);
if (!txdr->buffer_info) {
ret_val = 1;
......@@ -1065,7 +1064,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
if (!rxdr->count)
rxdr->count = E1000_DEFAULT_RXD;
rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer),
GFP_KERNEL);
if (!rxdr->buffer_info) {
ret_val = 5;
......@@ -1095,25 +1094,25 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
for (i = 0; i < rxdr->count; i++) {
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
struct sk_buff *skb;
u8 *buf;
skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
if (!skb) {
buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN,
GFP_KERNEL);
if (!buf) {
ret_val = 7;
goto err_nomem;
}
skb_reserve(skb, NET_IP_ALIGN);
rxdr->buffer_info[i].skb = skb;
rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
rxdr->buffer_info[i].rxbuf.data = buf;
rxdr->buffer_info[i].dma =
dma_map_single(&pdev->dev, skb->data,
dma_map_single(&pdev->dev,
buf + NET_SKB_PAD + NET_IP_ALIGN,
E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) {
ret_val = 8;
goto err_nomem;
}
rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
}
return 0;
......@@ -1386,13 +1385,13 @@ static void e1000_create_lbtest_frame(struct sk_buff *skb,
memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
}
static int e1000_check_lbtest_frame(struct sk_buff *skb,
static int e1000_check_lbtest_frame(const unsigned char *data,
unsigned int frame_size)
{
frame_size &= ~1;
if (skb->data[3] == 0xFF) {
if (skb->data[frame_size / 2 + 10] == 0xBE &&
skb->data[frame_size / 2 + 12] == 0xAF) {
if (*(data + 3) == 0xFF) {
if ((*(data + frame_size / 2 + 10) == 0xBE) &&
(*(data + frame_size / 2 + 12) == 0xAF)) {
return 0;
}
}
......@@ -1440,11 +1439,12 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
do { /* receive the sent packets */
dma_sync_single_for_cpu(&pdev->dev,
rxdr->buffer_info[l].dma,
rxdr->buffer_info[l].length,
E1000_RXBUFFER_2048,
DMA_FROM_DEVICE);
ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].skb,
rxdr->buffer_info[l].rxbuf.data +
NET_SKB_PAD + NET_IP_ALIGN,
1024);
if (!ret_val)
good_cnt++;
......
......@@ -4836,84 +4836,6 @@ void e1000_update_adaptive(struct e1000_hw *hw)
}
}
/**
* e1000_tbi_adjust_stats
* @hw: Struct containing variables accessed by shared code
* @frame_len: The length of the frame in question
* @mac_addr: The Ethernet destination address of the frame in question
*
* Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
*/
void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
u32 frame_len, u8 *mac_addr)
{
u64 carry_bit;
/* First adjust the frame length. */
frame_len--;
/* We need to adjust the statistics counters, since the hardware
* counters overcount this packet as a CRC error and undercount
* the packet as a good packet
*/
/* This packet should not be counted as a CRC error. */
stats->crcerrs--;
/* This packet does count as a Good Packet Received. */
stats->gprc++;
/* Adjust the Good Octets received counters */
carry_bit = 0x80000000 & stats->gorcl;
stats->gorcl += frame_len;
/* If the high bit of Gorcl (the low 32 bits of the Good Octets
* Received Count) was one before the addition,
* AND it is zero after, then we lost the carry out,
* need to add one to Gorch (Good Octets Received Count High).
* This could be simplified if all environments supported
* 64-bit integers.
*/
if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
stats->gorch++;
/* Is this a broadcast or multicast? Check broadcast first,
* since the test for a multicast frame will test positive on
* a broadcast frame.
*/
if (is_broadcast_ether_addr(mac_addr))
/* Broadcast packet */
stats->bprc++;
else if (is_multicast_ether_addr(mac_addr))
/* Multicast packet */
stats->mprc++;
if (frame_len == hw->max_frame_size) {
/* In this case, the hardware has overcounted the number of
* oversize frames.
*/
if (stats->roc > 0)
stats->roc--;
}
/* Adjust the bin counters when the extra byte put the frame in the
* wrong bin. Remember that the frame_len was adjusted above.
*/
if (frame_len == 64) {
stats->prc64++;
stats->prc127--;
} else if (frame_len == 127) {
stats->prc127++;
stats->prc255--;
} else if (frame_len == 255) {
stats->prc255++;
stats->prc511--;
} else if (frame_len == 511) {
stats->prc511++;
stats->prc1023--;
} else if (frame_len == 1023) {
stats->prc1023++;
stats->prc1522--;
} else if (frame_len == 1522) {
stats->prc1522++;
}
}
/**
* e1000_get_bus_info
* @hw: Struct containing variables accessed by shared code
......
......@@ -393,8 +393,6 @@ s32 e1000_blink_led_start(struct e1000_hw *hw);
/* Everything else */
void e1000_reset_adaptive(struct e1000_hw *hw);
void e1000_update_adaptive(struct e1000_hw *hw);
void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
u32 frame_len, u8 * mac_addr);
void e1000_get_bus_info(struct e1000_hw *hw);
void e1000_pci_set_mwi(struct e1000_hw *hw);
void e1000_pci_clear_mwi(struct e1000_hw *hw);
......
......@@ -1497,7 +1497,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
struct pci_dev *pdev = adapter->pdev;
int size;
size = sizeof(struct e1000_buffer) * txdr->count;
size = sizeof(struct e1000_tx_buffer) * txdr->count;
txdr->buffer_info = vzalloc(size);
if (!txdr->buffer_info)
return -ENOMEM;
......@@ -1687,7 +1687,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
struct pci_dev *pdev = adapter->pdev;
int size, desc_len;
size = sizeof(struct e1000_buffer) * rxdr->count;
size = sizeof(struct e1000_rx_buffer) * rxdr->count;
rxdr->buffer_info = vzalloc(size);
if (!rxdr->buffer_info)
return -ENOMEM;
......@@ -1947,8 +1947,9 @@ void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
}
static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info)
static void
e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
struct e1000_tx_buffer *buffer_info)
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
......@@ -1977,7 +1978,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_buffer *buffer_info;
struct e1000_tx_buffer *buffer_info;
unsigned long size;
unsigned int i;
......@@ -1989,7 +1990,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
}
netdev_reset_queue(adapter->netdev);
size = sizeof(struct e1000_buffer) * tx_ring->count;
size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
memset(tx_ring->buffer_info, 0, size);
/* Zero out the descriptor ring */
......@@ -2053,6 +2054,28 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
}
#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
static unsigned int e1000_frag_len(const struct e1000_adapter *a)
{
return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
static void *e1000_alloc_frag(const struct e1000_adapter *a)
{
unsigned int len = e1000_frag_len(a);
u8 *data = netdev_alloc_frag(len);
if (likely(data))
data += E1000_HEADROOM;
return data;
}
static void e1000_free_frag(const void *data)
{
put_page(virt_to_head_page(data));
}
/**
* e1000_clean_rx_ring - Free Rx Buffers per Queue
* @adapter: board private structure
......@@ -2062,44 +2085,42 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_buffer *buffer_info;
struct e1000_rx_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
unsigned long size;
unsigned int i;
/* Free all the Rx ring sk_buffs */
/* Free all the Rx netfrags */
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma &&
adapter->clean_rx == e1000_clean_rx_irq) {
dma_unmap_single(&pdev->dev, buffer_info->dma,
buffer_info->length,
DMA_FROM_DEVICE);
} else if (buffer_info->dma &&
adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
dma_unmap_page(&pdev->dev, buffer_info->dma,
buffer_info->length,
DMA_FROM_DEVICE);
if (adapter->clean_rx == e1000_clean_rx_irq) {
if (buffer_info->dma)
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (buffer_info->rxbuf.data) {
e1000_free_frag(buffer_info->rxbuf.data);
buffer_info->rxbuf.data = NULL;
}
} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
if (buffer_info->dma)
dma_unmap_page(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (buffer_info->rxbuf.page) {
put_page(buffer_info->rxbuf.page);
buffer_info->rxbuf.page = NULL;
}
}
buffer_info->dma = 0;
if (buffer_info->page) {
put_page(buffer_info->page);
buffer_info->page = NULL;
}
if (buffer_info->skb) {
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
}
}
/* there also may be some cached data from a chained receive */
if (rx_ring->rx_skb_top) {
dev_kfree_skb(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
}
napi_free_frags(&adapter->napi);
rx_ring->rx_skb_top = NULL;
size = sizeof(struct e1000_buffer) * rx_ring->count;
size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
memset(rx_ring->buffer_info, 0, size);
/* Zero out the descriptor ring */
......@@ -2678,7 +2699,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
__be16 protocol)
{
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
struct e1000_tx_buffer *buffer_info;
unsigned int i;
u32 cmd_length = 0;
u16 ipcse = 0, tucse, mss;
......@@ -2750,7 +2771,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
__be16 protocol)
{
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
struct e1000_tx_buffer *buffer_info;
unsigned int i;
u8 css;
u32 cmd_len = E1000_TXD_CMD_DEXT;
......@@ -2809,7 +2830,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
{
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
struct e1000_buffer *buffer_info;
struct e1000_tx_buffer *buffer_info;
unsigned int len = skb_headlen(skb);
unsigned int offset = 0, size, count = 0, i;
unsigned int f, bytecount, segs;
......@@ -2955,7 +2976,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_tx_desc *tx_desc = NULL;
struct e1000_buffer *buffer_info;
struct e1000_tx_buffer *buffer_info;
u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
unsigned int i;
......@@ -3373,7 +3394,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
struct my_u { __le64 a; __le64 b; };
struct my_u *u = (struct my_u *)tx_desc;
const char *type;
......@@ -3415,7 +3436,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
struct my_u { __le64 a; __le64 b; };
struct my_u *u = (struct my_u *)rx_desc;
const char *type;
......@@ -3429,7 +3450,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
i, le64_to_cpu(u->a), le64_to_cpu(u->b),
(u64)buffer_info->dma, buffer_info->skb, type);
(u64)buffer_info->dma, buffer_info->rxbuf.data, type);
} /* for */
/* dump the descriptor caches */
......@@ -3811,7 +3832,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct e1000_tx_desc *tx_desc, *eop_desc;
struct e1000_buffer *buffer_info;
struct e1000_tx_buffer *buffer_info;
unsigned int i, eop;
unsigned int count = 0;
unsigned int total_tx_bytes=0, total_tx_packets=0;
......@@ -3949,12 +3970,12 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
}
/**
* e1000_consume_page - helper function
* e1000_consume_page - helper function for jumbo Rx path
**/
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
u16 length)
{
bi->page = NULL;
bi->rxbuf.page = NULL;
skb->len += length;
skb->data_len += length;
skb->truesize += PAGE_SIZE;
......@@ -3980,6 +4001,113 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
napi_gro_receive(&adapter->napi, skb);
}
/**
* e1000_tbi_adjust_stats
* @hw: Struct containing variables accessed by shared code
* @frame_len: The length of the frame in question
* @mac_addr: The Ethernet destination address of the frame in question
*
* Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
*/
static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
struct e1000_hw_stats *stats,
u32 frame_len, const u8 *mac_addr)
{
u64 carry_bit;
/* First adjust the frame length. */
frame_len--;
/* We need to adjust the statistics counters, since the hardware
* counters overcount this packet as a CRC error and undercount
* the packet as a good packet
*/
/* This packet should not be counted as a CRC error. */
stats->crcerrs--;
/* This packet does count as a Good Packet Received. */
stats->gprc++;
/* Adjust the Good Octets received counters */
carry_bit = 0x80000000 & stats->gorcl;
stats->gorcl += frame_len;
/* If the high bit of Gorcl (the low 32 bits of the Good Octets
* Received Count) was one before the addition,
* AND it is zero after, then we lost the carry out,
* need to add one to Gorch (Good Octets Received Count High).
* This could be simplified if all environments supported
* 64-bit integers.
*/
if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
stats->gorch++;
/* Is this a broadcast or multicast? Check broadcast first,
* since the test for a multicast frame will test positive on
* a broadcast frame.
*/
if (is_broadcast_ether_addr(mac_addr))
stats->bprc++;
else if (is_multicast_ether_addr(mac_addr))
stats->mprc++;
if (frame_len == hw->max_frame_size) {
/* In this case, the hardware has overcounted the number of
* oversize frames.
*/
if (stats->roc > 0)
stats->roc--;
}
/* Adjust the bin counters when the extra byte put the frame in the
* wrong bin. Remember that the frame_len was adjusted above.
*/
if (frame_len == 64) {
stats->prc64++;
stats->prc127--;
} else if (frame_len == 127) {
stats->prc127++;
stats->prc255--;
} else if (frame_len == 255) {
stats->prc255++;
stats->prc511--;
} else if (frame_len == 511) {
stats->prc511++;
stats->prc1023--;
} else if (frame_len == 1023) {
stats->prc1023++;
stats->prc1522--;
} else if (frame_len == 1522) {
stats->prc1522++;
}
}
static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
u8 status, u8 errors,
u32 length, const u8 *data)
{
struct e1000_hw *hw = &adapter->hw;
u8 last_byte = *(data + length - 1);
if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
unsigned long irq_flags;
spin_lock_irqsave(&adapter->stats_lock, irq_flags);
e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
return true;
}
return false;
}
static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
unsigned int bufsz)
{
struct sk_buff *skb = netdev_alloc_skb_ip_align(adapter->netdev, bufsz);
if (unlikely(!skb))
adapter->alloc_rx_buff_failed++;
return skb;
}
/**
* e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
* @adapter: board private structure
......@@ -3994,12 +4122,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_rx_desc *rx_desc, *next_rxd;
struct e1000_buffer *buffer_info, *next_buffer;
unsigned long irq_flags;
struct e1000_rx_buffer *buffer_info, *next_buffer;
u32 length;
unsigned int i;
int cleaned_count = 0;
......@@ -4020,8 +4146,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
rmb(); /* read descriptor and rx_buffer_info after status DD */
status = rx_desc->status;
skb = buffer_info->skb;
buffer_info->skb = NULL;
if (++i == rx_ring->count) i = 0;
next_rxd = E1000_RX_DESC(*rx_ring, i);
......@@ -4032,7 +4156,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
dma_unmap_page(&pdev->dev, buffer_info->dma,
buffer_info->length, DMA_FROM_DEVICE);
adapter->rx_buffer_len, DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
......@@ -4040,25 +4164,15 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* errors is only valid for DD + EOP descriptors */
if (unlikely((status & E1000_RXD_STAT_EOP) &&
(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
u8 *mapped;
u8 last_byte;
mapped = page_address(buffer_info->page);
last_byte = *(mapped + length - 1);
if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
last_byte)) {
spin_lock_irqsave(&adapter->stats_lock,
irq_flags);
e1000_tbi_adjust_stats(hw, &adapter->stats,
length, mapped);
spin_unlock_irqrestore(&adapter->stats_lock,
irq_flags);
u8 *mapped = page_address(buffer_info->rxbuf.page);
if (e1000_tbi_should_accept(adapter, status,
rx_desc->errors,
length, mapped)) {
length--;
} else if (netdev->features & NETIF_F_RXALL) {
goto process_skb;
} else {
if (netdev->features & NETIF_F_RXALL)
goto process_skb;
/* recycle both page and skb */
buffer_info->skb = skb;
/* an error means any chain goes out the window
* too
*/
......@@ -4075,16 +4189,18 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* this descriptor is only the beginning (or middle) */
if (!rxtop) {
/* this is the beginning of a chain */
rxtop = skb;
skb_fill_page_desc(rxtop, 0, buffer_info->page,
rxtop = napi_get_frags(&adapter->napi);
if (!rxtop)
break;
skb_fill_page_desc(rxtop, 0,
buffer_info->rxbuf.page,
0, length);
} else {
/* this is the middle of a chain */
skb_fill_page_desc(rxtop,
skb_shinfo(rxtop)->nr_frags,
buffer_info->page, 0, length);
/* re-use the skb, only consumed the page */
buffer_info->skb = skb;
buffer_info->rxbuf.page, 0, length);
}
e1000_consume_page(buffer_info, rxtop, length);
goto next_desc;
......@@ -4093,32 +4209,51 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* end of the chain */
skb_fill_page_desc(rxtop,
skb_shinfo(rxtop)->nr_frags,
buffer_info->page, 0, length);
/* re-use the current skb, we only consumed the
* page
*/
buffer_info->skb = skb;
buffer_info->rxbuf.page, 0, length);
skb = rxtop;
rxtop = NULL;
e1000_consume_page(buffer_info, skb, length);
} else {
struct page *p;
/* no chain, got EOP, this buf is the packet
* copybreak to save the put_page/alloc_page
*/
if (length <= copybreak &&
skb_tailroom(skb) >= length) {
p = buffer_info->rxbuf.page;
if (length <= copybreak) {
u8 *vaddr;
vaddr = kmap_atomic(buffer_info->page);
if (likely(!(netdev->features & NETIF_F_RXFCS)))
length -= 4;
skb = e1000_alloc_rx_skb(adapter,
length);
if (!skb)
break;
vaddr = kmap_atomic(p);
memcpy(skb_tail_pointer(skb), vaddr,
length);
kunmap_atomic(vaddr);
/* re-use the page, so don't erase
* buffer_info->page
* buffer_info->rxbuf.page
*/
skb_put(skb, length);
e1000_rx_checksum(adapter,
status | rx_desc->errors << 24,
le16_to_cpu(rx_desc->csum), skb);
total_rx_bytes += skb->len;
total_rx_packets++;
e1000_receive_skb(adapter, status,
rx_desc->special, skb);
goto next_desc;
} else {
skb_fill_page_desc(skb, 0,
buffer_info->page, 0,
skb = napi_get_frags(&adapter->napi);
if (!skb) {
adapter->alloc_rx_buff_failed++;
break;
}
skb_fill_page_desc(skb, 0, p, 0,
length);
e1000_consume_page(buffer_info, skb,
length);
......@@ -4137,14 +4272,14 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
pskb_trim(skb, skb->len - 4);
total_rx_packets++;
/* eth type trans needs skb->data to point to something */
if (!pskb_may_pull(skb, ETH_HLEN)) {
e_err(drv, "pskb_may_pull failed.\n");
dev_kfree_skb(skb);
goto next_desc;
if (status & E1000_RXD_STAT_VP) {
__le16 vlan = rx_desc->special;
u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
e1000_receive_skb(adapter, status, rx_desc->special, skb);
napi_gro_frags(&adapter->napi);
next_desc:
rx_desc->status = 0;
......@@ -4175,25 +4310,25 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* this should improve performance for small packets with large amounts
* of reassembly being done in the stack
*/
static void e1000_check_copybreak(struct net_device *netdev,
struct e1000_buffer *buffer_info,
u32 length, struct sk_buff **skb)
static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
struct e1000_rx_buffer *buffer_info,
u32 length, const void *data)
{
struct sk_buff *new_skb;
struct sk_buff *skb;
if (length > copybreak)
return;
return NULL;
new_skb = netdev_alloc_skb_ip_align(netdev, length);
if (!new_skb)
return;
skb = e1000_alloc_rx_skb(adapter, length);
if (!skb)
return NULL;
dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
length, DMA_FROM_DEVICE);
skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
(*skb)->data - NET_IP_ALIGN,
length + NET_IP_ALIGN);
/* save the skb in buffer_info as good */
buffer_info->skb = *skb;
*skb = new_skb;
memcpy(skb_put(skb, length), data, length);
return skb;
}
/**
......@@ -4207,12 +4342,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_rx_desc *rx_desc, *next_rxd;
struct e1000_buffer *buffer_info, *next_buffer;
unsigned long flags;
struct e1000_rx_buffer *buffer_info, *next_buffer;
u32 length;
unsigned int i;
int cleaned_count = 0;
......@@ -4225,6 +4358,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
while (rx_desc->status & E1000_RXD_STAT_DD) {
struct sk_buff *skb;
u8 *data;
u8 status;
if (*work_done >= work_to_do)
......@@ -4233,10 +4367,27 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
rmb(); /* read descriptor and rx_buffer_info after status DD */
status = rx_desc->status;
skb = buffer_info->skb;
buffer_info->skb = NULL;
length = le16_to_cpu(rx_desc->length);
data = buffer_info->rxbuf.data;
prefetch(data);
skb = e1000_copybreak(adapter, buffer_info, length, data);
if (!skb) {
unsigned int frag_len = e1000_frag_len(adapter);
skb = build_skb(data - E1000_HEADROOM, frag_len);
if (!skb) {
adapter->alloc_rx_buff_failed++;
break;
}
prefetch(skb->data - NET_IP_ALIGN);
skb_reserve(skb, E1000_HEADROOM);
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
buffer_info->dma = 0;
buffer_info->rxbuf.data = NULL;
}
if (++i == rx_ring->count) i = 0;
next_rxd = E1000_RX_DESC(*rx_ring, i);
......@@ -4246,11 +4397,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
dma_unmap_single(&pdev->dev, buffer_info->dma,
buffer_info->length, DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
/* !EOP means multiple descriptors were used to store a single
* packet, if thats the case we need to toss it. In fact, we
* to toss every packet with the EOP bit clear and the next
......@@ -4262,29 +4409,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
if (adapter->discarding) {
/* All receives must fit into a single buffer */
e_dbg("Receive packet consumed multiple buffers\n");
/* recycle */
buffer_info->skb = skb;
netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
dev_kfree_skb(skb);
if (status & E1000_RXD_STAT_EOP)
adapter->discarding = false;
goto next_desc;
}
if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
u8 last_byte = *(skb->data + length - 1);
if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
last_byte)) {
spin_lock_irqsave(&adapter->stats_lock, flags);
e1000_tbi_adjust_stats(hw, &adapter->stats,
length, skb->data);
spin_unlock_irqrestore(&adapter->stats_lock,
flags);
if (e1000_tbi_should_accept(adapter, status,
rx_desc->errors,
length, data)) {
length--;
} else if (netdev->features & NETIF_F_RXALL) {
goto process_skb;
} else {
if (netdev->features & NETIF_F_RXALL)
goto process_skb;
/* recycle */
buffer_info->skb = skb;
dev_kfree_skb(skb);
goto next_desc;
}
}
......@@ -4299,9 +4439,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
*/
length -= 4;
e1000_check_copybreak(netdev, buffer_info, length, &skb);
skb_put(skb, length);
if (buffer_info->rxbuf.data == NULL)
skb_put(skb, length);
else /* copybreak skb */
skb_trim(skb, length);
/* Receive Checksum Offload */
e1000_rx_checksum(adapter,
......@@ -4347,38 +4488,19 @@ static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring, int cleaned_count)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_rx_desc *rx_desc;
struct e1000_buffer *buffer_info;
struct sk_buff *skb;
struct e1000_rx_buffer *buffer_info;
unsigned int i;
unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
while (cleaned_count--) {
skb = buffer_info->skb;
if (skb) {
skb_trim(skb, 0);
goto check_page;
}
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
if (unlikely(!skb)) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
break;
}
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
check_page:
/* allocate a new page if necessary */
if (!buffer_info->page) {
buffer_info->page = alloc_page(GFP_ATOMIC);
if (unlikely(!buffer_info->page)) {
if (!buffer_info->rxbuf.page) {
buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
if (unlikely(!buffer_info->rxbuf.page)) {
adapter->alloc_rx_buff_failed++;
break;
}
......@@ -4386,17 +4508,15 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
if (!buffer_info->dma) {
buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
buffer_info->length,
buffer_info->rxbuf.page, 0,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
put_page(buffer_info->page);
dev_kfree_skb(skb);
buffer_info->page = NULL;
buffer_info->skb = NULL;
put_page(buffer_info->rxbuf.page);
buffer_info->rxbuf.page = NULL;
buffer_info->dma = 0;
adapter->alloc_rx_buff_failed++;
break; /* while !buffer_info->skb */
break;
}
}
......@@ -4432,11 +4552,9 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
int cleaned_count)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_rx_desc *rx_desc;
struct e1000_buffer *buffer_info;
struct sk_buff *skb;
struct e1000_rx_buffer *buffer_info;
unsigned int i;
unsigned int bufsz = adapter->rx_buffer_len;
......@@ -4444,57 +4562,52 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info = &rx_ring->buffer_info[i];
while (cleaned_count--) {
skb = buffer_info->skb;
if (skb) {
skb_trim(skb, 0);
goto map_skb;
}
void *data;
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
if (unlikely(!skb)) {
if (buffer_info->rxbuf.data)
goto skip;
data = e1000_alloc_frag(adapter);
if (!data) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
break;
}
/* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
struct sk_buff *oldskb = skb;
if (!e1000_check_64k_bound(adapter, data, bufsz)) {
void *olddata = data;
e_err(rx_err, "skb align check failed: %u bytes at "
"%p\n", bufsz, skb->data);
"%p\n", bufsz, data);
/* Try again, without freeing the previous */
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
data = e1000_alloc_frag(adapter);
/* Failed allocation, critical failure */
if (!skb) {
dev_kfree_skb(oldskb);
if (!data) {
e1000_free_frag(olddata);
adapter->alloc_rx_buff_failed++;
break;
}
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
if (!e1000_check_64k_bound(adapter, data, bufsz)) {
/* give up */
dev_kfree_skb(skb);
dev_kfree_skb(oldskb);
e1000_free_frag(data);
e1000_free_frag(olddata);
adapter->alloc_rx_buff_failed++;
break; /* while !buffer_info->skb */
break;
}
/* Use new allocation */
dev_kfree_skb(oldskb);
e1000_free_frag(olddata);
}
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
map_skb:
buffer_info->dma = dma_map_single(&pdev->dev,
skb->data,
buffer_info->length,
data,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_kfree_skb(skb);
buffer_info->skb = NULL;
e1000_free_frag(data);
buffer_info->dma = 0;
adapter->alloc_rx_buff_failed++;
break; /* while !buffer_info->skb */
break;
}
/* XXX if it was allocated cleanly it will never map to a
......@@ -4508,17 +4621,20 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
e_err(rx_err, "dma align check failed: %u bytes at "
"%p\n", adapter->rx_buffer_len,
(void *)(unsigned long)buffer_info->dma);
dev_kfree_skb(skb);
buffer_info->skb = NULL;
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
e1000_free_frag(data);
buffer_info->rxbuf.data = NULL;
buffer_info->dma = 0;
adapter->alloc_rx_buff_failed++;
break; /* while !buffer_info->skb */
break;
}
buffer_info->rxbuf.data = data;
skip:
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
......
......@@ -386,119 +386,87 @@ struct ixgbe_q_vector {
char name[IFNAMSIZ + 9];
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define IXGBE_QV_STATE_IDLE 0
#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
#define IXGBE_QV_STATE_DISABLED 4 /* QV is disabled */
#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED)
#define IXGBE_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
#define IXGBE_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
spinlock_t lock;
atomic_t state;
#endif /* CONFIG_NET_RX_BUSY_POLL */
/* for dynamic allocation of rings associated with this q_vector */
struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
};
#ifdef CONFIG_NET_RX_BUSY_POLL
enum ixgbe_qv_state_t {
IXGBE_QV_STATE_IDLE = 0,
IXGBE_QV_STATE_NAPI,
IXGBE_QV_STATE_POLL,
IXGBE_QV_STATE_DISABLE
};
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
spin_lock_init(&q_vector->lock);
q_vector->state = IXGBE_QV_STATE_IDLE;
/* reset state to idle */
atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
}
/* called from the device poll routine to get ownership of a q_vector */
static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
{
int rc = true;
spin_lock_bh(&q_vector->lock);
if (q_vector->state & IXGBE_QV_LOCKED) {
WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
rc = false;
int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
IXGBE_QV_STATE_NAPI);
#ifdef BP_EXTENDED_STATS
if (rc != IXGBE_QV_STATE_IDLE)
q_vector->tx.ring->stats.yields++;
#endif
} else {
/* we don't care if someone yielded */
q_vector->state = IXGBE_QV_STATE_NAPI;
}
spin_unlock_bh(&q_vector->lock);
return rc;
return rc == IXGBE_QV_STATE_IDLE;
}
/* returns true is someone tried to get the qv while napi had it */
static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
{
int rc = false;
spin_lock_bh(&q_vector->lock);
WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL |
IXGBE_QV_STATE_NAPI_YIELD));
if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
rc = true;
/* will reset state to idle, unless QV is disabled */
q_vector->state &= IXGBE_QV_STATE_DISABLED;
spin_unlock_bh(&q_vector->lock);
return rc;
WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI);
/* flush any outstanding Rx frames */
if (q_vector->napi.gro_list)
napi_gro_flush(&q_vector->napi, false);
/* reset state to idle */
atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
}
/* called from ixgbe_low_latency_poll() */
static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
{
int rc = true;
spin_lock_bh(&q_vector->lock);
if ((q_vector->state & IXGBE_QV_LOCKED)) {
q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
rc = false;
int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
IXGBE_QV_STATE_POLL);
#ifdef BP_EXTENDED_STATS
q_vector->rx.ring->stats.yields++;
if (rc != IXGBE_QV_STATE_IDLE)
q_vector->tx.ring->stats.yields++;
#endif
} else {
/* preserve yield marks */
q_vector->state |= IXGBE_QV_STATE_POLL;
}
spin_unlock_bh(&q_vector->lock);
return rc;
return rc == IXGBE_QV_STATE_IDLE;
}
/* returns true if someone tried to get the qv while it was locked */
static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
{
int rc = false;
spin_lock_bh(&q_vector->lock);
WARN_ON(q_vector->state & (IXGBE_QV_STATE_NAPI));
if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
rc = true;
/* will reset state to idle, unless QV is disabled */
q_vector->state &= IXGBE_QV_STATE_DISABLED;
spin_unlock_bh(&q_vector->lock);
return rc;
WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL);
/* reset state to idle */
atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
}
/* true if a socket is polling, even if it did not get the lock */
static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
{
WARN_ON(!(q_vector->state & IXGBE_QV_OWNED));
return q_vector->state & IXGBE_QV_USER_PEND;
return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL;
}
/* false if QV is currently owned */
static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
{
int rc = true;
spin_lock_bh(&q_vector->lock);
if (q_vector->state & IXGBE_QV_OWNED)
rc = false;
q_vector->state |= IXGBE_QV_STATE_DISABLED;
spin_unlock_bh(&q_vector->lock);
return rc;
int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
IXGBE_QV_STATE_DISABLE);
return rc == IXGBE_QV_STATE_IDLE;
}
#else /* CONFIG_NET_RX_BUSY_POLL */
......
......@@ -1303,7 +1303,7 @@ static const struct ixgbe_reg_test reg_test_82599[] = {
{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ 0, 0, 0, 0 }
{ .reg = 0 }
};
/* default 82598 register test */
......@@ -1331,7 +1331,7 @@ static const struct ixgbe_reg_test reg_test_82598[] = {
{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ 0, 0, 0, 0 }
{ .reg = 0 }
};
static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
......
......@@ -807,6 +807,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ixgbe_poll, 64);
napi_hash_add(&q_vector->napi);
#ifdef CONFIG_NET_RX_BUSY_POLL
/* initialize busy poll */
atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE);
#endif
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
q_vector->adapter = adapter;
......
......@@ -440,7 +440,7 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
{IXGBE_TXDCTL(0), "TXDCTL"},
/* List Terminator */
{}
{ .name = NULL }
};
......@@ -2077,9 +2077,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
if (cleaned_count)
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
return total_rx_packets;
}
......@@ -5186,15 +5183,15 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
int orig_node = dev_to_node(dev);
int numa_node = -1;
int ring_node = -1;
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
if (tx_ring->q_vector)
numa_node = tx_ring->q_vector->numa_node;
ring_node = tx_ring->q_vector->numa_node;
tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
tx_ring->tx_buffer_info = vzalloc_node(size, ring_node);
if (!tx_ring->tx_buffer_info)
tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info)
......@@ -5206,7 +5203,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
set_dev_node(dev, numa_node);
set_dev_node(dev, ring_node);
tx_ring->desc = dma_alloc_coherent(dev,
tx_ring->size,
&tx_ring->dma,
......@@ -5270,15 +5267,15 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
int orig_node = dev_to_node(dev);
int numa_node = -1;
int ring_node = -1;
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
if (rx_ring->q_vector)
numa_node = rx_ring->q_vector->numa_node;
ring_node = rx_ring->q_vector->numa_node;
rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
rx_ring->rx_buffer_info = vzalloc_node(size, ring_node);
if (!rx_ring->rx_buffer_info)
rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info)
......@@ -5290,7 +5287,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
set_dev_node(dev, numa_node);
set_dev_node(dev, ring_node);
rx_ring->desc = dma_alloc_coherent(dev,
rx_ring->size,
&rx_ring->dma,
......
......@@ -523,7 +523,7 @@ static const struct ixgbevf_reg_test reg_test_vf[] = {
{ IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
{ 0, 0, 0, 0 }
{ .reg = 0 }
};
static const u32 register_test_patterns[] = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment