Commit 0fe13151 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2014-09-12

This series contains updates to e1000, ixgbe and ixgbevf.

Mark provide two fixes to reduce compile warnings produce by ixgbe
and ixgbevf.

Alex provides two patches for ixgbe, first removes the receive buffer
allocation at the end of the ixgbe_clean_rx_irq().  The reason for
removing this is to avoid the extra latency introduced by the MMIO write.
Second patch addresses several issues in the current ixgbe implementation
of busy poll sockets.  It was possible for frames to be delivered out of
order if they were held in GRO, so address this by flushing the GRO
buffers before releasing the q_vector back to the idle state.  Also, we
were having to take a spinlock on changing the state to and from idle,
so to resolve this, replaced the state value with an atomic and use
atomic_cmpxchg to change the value from idle, and a simple atomic set
to restore it back to idle after we have acquired it.  This allows us
to only use a locked operation on acquiring the vector without a need
for a locked operation to release it.

Florian Westphal provides several patches for e1000 which does some
cleanup and updating of the driver.  Moved e1000_tbi_adjust_stats()
so that he could make the function static.  Added a helper function
to deal with the tbi workaround that was located in 2 different
Rx clean functions.  Added a e1000_rx_buffer struct for use on receive
since the transmit and receive have different requirements.  Updates
e1000 to use napi_gro_frags API.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 54996b52 de591c78
...@@ -148,16 +148,23 @@ struct e1000_adapter; ...@@ -148,16 +148,23 @@ struct e1000_adapter;
/* wrapper around a pointer to a socket buffer, /* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer * so a DMA handle can be stored along with the buffer
*/ */
struct e1000_buffer { struct e1000_tx_buffer {
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t dma; dma_addr_t dma;
struct page *page;
unsigned long time_stamp; unsigned long time_stamp;
u16 length; u16 length;
u16 next_to_watch; u16 next_to_watch;
unsigned int segs; bool mapped_as_page;
unsigned short segs;
unsigned int bytecount; unsigned int bytecount;
u16 mapped_as_page; };
struct e1000_rx_buffer {
union {
struct page *page; /* jumbo: alloc_page */
u8 *data; /* else, netdev_alloc_frag */
} rxbuf;
dma_addr_t dma;
}; };
struct e1000_tx_ring { struct e1000_tx_ring {
...@@ -174,7 +181,7 @@ struct e1000_tx_ring { ...@@ -174,7 +181,7 @@ struct e1000_tx_ring {
/* next descriptor to check for DD status bit */ /* next descriptor to check for DD status bit */
unsigned int next_to_clean; unsigned int next_to_clean;
/* array of buffer information structs */ /* array of buffer information structs */
struct e1000_buffer *buffer_info; struct e1000_tx_buffer *buffer_info;
u16 tdh; u16 tdh;
u16 tdt; u16 tdt;
...@@ -195,7 +202,7 @@ struct e1000_rx_ring { ...@@ -195,7 +202,7 @@ struct e1000_rx_ring {
/* next descriptor to check for DD status bit */ /* next descriptor to check for DD status bit */
unsigned int next_to_clean; unsigned int next_to_clean;
/* array of buffer information structs */ /* array of buffer information structs */
struct e1000_buffer *buffer_info; struct e1000_rx_buffer *buffer_info;
struct sk_buff *rx_skb_top; struct sk_buff *rx_skb_top;
/* cpu for rx queue */ /* cpu for rx queue */
......
...@@ -968,10 +968,9 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter) ...@@ -968,10 +968,9 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (rxdr->buffer_info[i].dma) if (rxdr->buffer_info[i].dma)
dma_unmap_single(&pdev->dev, dma_unmap_single(&pdev->dev,
rxdr->buffer_info[i].dma, rxdr->buffer_info[i].dma,
rxdr->buffer_info[i].length, E1000_RXBUFFER_2048,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (rxdr->buffer_info[i].skb) kfree(rxdr->buffer_info[i].rxbuf.data);
dev_kfree_skb(rxdr->buffer_info[i].skb);
} }
} }
...@@ -1006,7 +1005,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) ...@@ -1006,7 +1005,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
if (!txdr->count) if (!txdr->count)
txdr->count = E1000_DEFAULT_TXD; txdr->count = E1000_DEFAULT_TXD;
txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_buffer), txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer),
GFP_KERNEL); GFP_KERNEL);
if (!txdr->buffer_info) { if (!txdr->buffer_info) {
ret_val = 1; ret_val = 1;
...@@ -1065,7 +1064,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) ...@@ -1065,7 +1064,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
if (!rxdr->count) if (!rxdr->count)
rxdr->count = E1000_DEFAULT_RXD; rxdr->count = E1000_DEFAULT_RXD;
rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer),
GFP_KERNEL); GFP_KERNEL);
if (!rxdr->buffer_info) { if (!rxdr->buffer_info) {
ret_val = 5; ret_val = 5;
...@@ -1095,25 +1094,25 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) ...@@ -1095,25 +1094,25 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
for (i = 0; i < rxdr->count; i++) { for (i = 0; i < rxdr->count; i++) {
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
struct sk_buff *skb; u8 *buf;
skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN,
if (!skb) { GFP_KERNEL);
if (!buf) {
ret_val = 7; ret_val = 7;
goto err_nomem; goto err_nomem;
} }
skb_reserve(skb, NET_IP_ALIGN); rxdr->buffer_info[i].rxbuf.data = buf;
rxdr->buffer_info[i].skb = skb;
rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
rxdr->buffer_info[i].dma = rxdr->buffer_info[i].dma =
dma_map_single(&pdev->dev, skb->data, dma_map_single(&pdev->dev,
buf + NET_SKB_PAD + NET_IP_ALIGN,
E1000_RXBUFFER_2048, DMA_FROM_DEVICE); E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) {
ret_val = 8; ret_val = 8;
goto err_nomem; goto err_nomem;
} }
rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
} }
return 0; return 0;
...@@ -1386,13 +1385,13 @@ static void e1000_create_lbtest_frame(struct sk_buff *skb, ...@@ -1386,13 +1385,13 @@ static void e1000_create_lbtest_frame(struct sk_buff *skb,
memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
} }
static int e1000_check_lbtest_frame(struct sk_buff *skb, static int e1000_check_lbtest_frame(const unsigned char *data,
unsigned int frame_size) unsigned int frame_size)
{ {
frame_size &= ~1; frame_size &= ~1;
if (skb->data[3] == 0xFF) { if (*(data + 3) == 0xFF) {
if (skb->data[frame_size / 2 + 10] == 0xBE && if ((*(data + frame_size / 2 + 10) == 0xBE) &&
skb->data[frame_size / 2 + 12] == 0xAF) { (*(data + frame_size / 2 + 12) == 0xAF)) {
return 0; return 0;
} }
} }
...@@ -1440,11 +1439,12 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) ...@@ -1440,11 +1439,12 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
do { /* receive the sent packets */ do { /* receive the sent packets */
dma_sync_single_for_cpu(&pdev->dev, dma_sync_single_for_cpu(&pdev->dev,
rxdr->buffer_info[l].dma, rxdr->buffer_info[l].dma,
rxdr->buffer_info[l].length, E1000_RXBUFFER_2048,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
ret_val = e1000_check_lbtest_frame( ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].skb, rxdr->buffer_info[l].rxbuf.data +
NET_SKB_PAD + NET_IP_ALIGN,
1024); 1024);
if (!ret_val) if (!ret_val)
good_cnt++; good_cnt++;
......
...@@ -4836,84 +4836,6 @@ void e1000_update_adaptive(struct e1000_hw *hw) ...@@ -4836,84 +4836,6 @@ void e1000_update_adaptive(struct e1000_hw *hw)
} }
} }
/**
* e1000_tbi_adjust_stats
* @hw: Struct containing variables accessed by shared code
* @frame_len: The length of the frame in question
* @mac_addr: The Ethernet destination address of the frame in question
*
* Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
*/
void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
u32 frame_len, u8 *mac_addr)
{
u64 carry_bit;
/* First adjust the frame length. */
frame_len--;
/* We need to adjust the statistics counters, since the hardware
* counters overcount this packet as a CRC error and undercount
* the packet as a good packet
*/
/* This packet should not be counted as a CRC error. */
stats->crcerrs--;
/* This packet does count as a Good Packet Received. */
stats->gprc++;
/* Adjust the Good Octets received counters */
carry_bit = 0x80000000 & stats->gorcl;
stats->gorcl += frame_len;
/* If the high bit of Gorcl (the low 32 bits of the Good Octets
* Received Count) was one before the addition,
* AND it is zero after, then we lost the carry out,
* need to add one to Gorch (Good Octets Received Count High).
* This could be simplified if all environments supported
* 64-bit integers.
*/
if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
stats->gorch++;
/* Is this a broadcast or multicast? Check broadcast first,
* since the test for a multicast frame will test positive on
* a broadcast frame.
*/
if (is_broadcast_ether_addr(mac_addr))
/* Broadcast packet */
stats->bprc++;
else if (is_multicast_ether_addr(mac_addr))
/* Multicast packet */
stats->mprc++;
if (frame_len == hw->max_frame_size) {
/* In this case, the hardware has overcounted the number of
* oversize frames.
*/
if (stats->roc > 0)
stats->roc--;
}
/* Adjust the bin counters when the extra byte put the frame in the
* wrong bin. Remember that the frame_len was adjusted above.
*/
if (frame_len == 64) {
stats->prc64++;
stats->prc127--;
} else if (frame_len == 127) {
stats->prc127++;
stats->prc255--;
} else if (frame_len == 255) {
stats->prc255++;
stats->prc511--;
} else if (frame_len == 511) {
stats->prc511++;
stats->prc1023--;
} else if (frame_len == 1023) {
stats->prc1023++;
stats->prc1522--;
} else if (frame_len == 1522) {
stats->prc1522++;
}
}
/** /**
* e1000_get_bus_info * e1000_get_bus_info
* @hw: Struct containing variables accessed by shared code * @hw: Struct containing variables accessed by shared code
......
...@@ -393,8 +393,6 @@ s32 e1000_blink_led_start(struct e1000_hw *hw); ...@@ -393,8 +393,6 @@ s32 e1000_blink_led_start(struct e1000_hw *hw);
/* Everything else */ /* Everything else */
void e1000_reset_adaptive(struct e1000_hw *hw); void e1000_reset_adaptive(struct e1000_hw *hw);
void e1000_update_adaptive(struct e1000_hw *hw); void e1000_update_adaptive(struct e1000_hw *hw);
void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
u32 frame_len, u8 * mac_addr);
void e1000_get_bus_info(struct e1000_hw *hw); void e1000_get_bus_info(struct e1000_hw *hw);
void e1000_pci_set_mwi(struct e1000_hw *hw); void e1000_pci_set_mwi(struct e1000_hw *hw);
void e1000_pci_clear_mwi(struct e1000_hw *hw); void e1000_pci_clear_mwi(struct e1000_hw *hw);
......
...@@ -386,119 +386,87 @@ struct ixgbe_q_vector { ...@@ -386,119 +386,87 @@ struct ixgbe_q_vector {
char name[IFNAMSIZ + 9]; char name[IFNAMSIZ + 9];
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state; atomic_t state;
#define IXGBE_QV_STATE_IDLE 0
#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
#define IXGBE_QV_STATE_DISABLED 4 /* QV is disabled */
#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED)
#define IXGBE_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
#define IXGBE_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
spinlock_t lock;
#endif /* CONFIG_NET_RX_BUSY_POLL */ #endif /* CONFIG_NET_RX_BUSY_POLL */
/* for dynamic allocation of rings associated with this q_vector */ /* for dynamic allocation of rings associated with this q_vector */
struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
}; };
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
enum ixgbe_qv_state_t {
IXGBE_QV_STATE_IDLE = 0,
IXGBE_QV_STATE_NAPI,
IXGBE_QV_STATE_POLL,
IXGBE_QV_STATE_DISABLE
};
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{ {
/* reset state to idle */
spin_lock_init(&q_vector->lock); atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
q_vector->state = IXGBE_QV_STATE_IDLE;
} }
/* called from the device poll routine to get ownership of a q_vector */ /* called from the device poll routine to get ownership of a q_vector */
static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
{ {
int rc = true; int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
spin_lock_bh(&q_vector->lock); IXGBE_QV_STATE_NAPI);
if (q_vector->state & IXGBE_QV_LOCKED) {
WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
rc = false;
#ifdef BP_EXTENDED_STATS #ifdef BP_EXTENDED_STATS
if (rc != IXGBE_QV_STATE_IDLE)
q_vector->tx.ring->stats.yields++; q_vector->tx.ring->stats.yields++;
#endif #endif
} else {
/* we don't care if someone yielded */ return rc == IXGBE_QV_STATE_IDLE;
q_vector->state = IXGBE_QV_STATE_NAPI;
}
spin_unlock_bh(&q_vector->lock);
return rc;
} }
/* returns true is someone tried to get the qv while napi had it */ /* returns true is someone tried to get the qv while napi had it */
static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
{ {
int rc = false; WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI);
spin_lock_bh(&q_vector->lock);
WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL | /* flush any outstanding Rx frames */
IXGBE_QV_STATE_NAPI_YIELD)); if (q_vector->napi.gro_list)
napi_gro_flush(&q_vector->napi, false);
if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
rc = true; /* reset state to idle */
/* will reset state to idle, unless QV is disabled */ atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
q_vector->state &= IXGBE_QV_STATE_DISABLED;
spin_unlock_bh(&q_vector->lock);
return rc;
} }
/* called from ixgbe_low_latency_poll() */ /* called from ixgbe_low_latency_poll() */
static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
{ {
int rc = true; int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
spin_lock_bh(&q_vector->lock); IXGBE_QV_STATE_POLL);
if ((q_vector->state & IXGBE_QV_LOCKED)) {
q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
rc = false;
#ifdef BP_EXTENDED_STATS #ifdef BP_EXTENDED_STATS
q_vector->rx.ring->stats.yields++; if (rc != IXGBE_QV_STATE_IDLE)
q_vector->tx.ring->stats.yields++;
#endif #endif
} else { return rc == IXGBE_QV_STATE_IDLE;
/* preserve yield marks */
q_vector->state |= IXGBE_QV_STATE_POLL;
}
spin_unlock_bh(&q_vector->lock);
return rc;
} }
/* returns true if someone tried to get the qv while it was locked */ /* returns true if someone tried to get the qv while it was locked */
static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
{ {
int rc = false; WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL);
spin_lock_bh(&q_vector->lock);
WARN_ON(q_vector->state & (IXGBE_QV_STATE_NAPI)); /* reset state to idle */
atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
rc = true;
/* will reset state to idle, unless QV is disabled */
q_vector->state &= IXGBE_QV_STATE_DISABLED;
spin_unlock_bh(&q_vector->lock);
return rc;
} }
/* true if a socket is polling, even if it did not get the lock */ /* true if a socket is polling, even if it did not get the lock */
static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
{ {
WARN_ON(!(q_vector->state & IXGBE_QV_OWNED)); return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL;
return q_vector->state & IXGBE_QV_USER_PEND;
} }
/* false if QV is currently owned */ /* false if QV is currently owned */
static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
{ {
int rc = true; int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
spin_lock_bh(&q_vector->lock); IXGBE_QV_STATE_DISABLE);
if (q_vector->state & IXGBE_QV_OWNED)
rc = false; return rc == IXGBE_QV_STATE_IDLE;
q_vector->state |= IXGBE_QV_STATE_DISABLED;
spin_unlock_bh(&q_vector->lock);
return rc;
} }
#else /* CONFIG_NET_RX_BUSY_POLL */ #else /* CONFIG_NET_RX_BUSY_POLL */
......
...@@ -1303,7 +1303,7 @@ static const struct ixgbe_reg_test reg_test_82599[] = { ...@@ -1303,7 +1303,7 @@ static const struct ixgbe_reg_test reg_test_82599[] = {
{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ 0, 0, 0, 0 } { .reg = 0 }
}; };
/* default 82598 register test */ /* default 82598 register test */
...@@ -1331,7 +1331,7 @@ static const struct ixgbe_reg_test reg_test_82598[] = { ...@@ -1331,7 +1331,7 @@ static const struct ixgbe_reg_test reg_test_82598[] = {
{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ 0, 0, 0, 0 } { .reg = 0 }
}; };
static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
......
...@@ -807,6 +807,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ...@@ -807,6 +807,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ixgbe_poll, 64); ixgbe_poll, 64);
napi_hash_add(&q_vector->napi); napi_hash_add(&q_vector->napi);
#ifdef CONFIG_NET_RX_BUSY_POLL
/* initialize busy poll */
atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE);
#endif
/* tie q_vector and adapter together */ /* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector; adapter->q_vector[v_idx] = q_vector;
q_vector->adapter = adapter; q_vector->adapter = adapter;
......
...@@ -440,7 +440,7 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { ...@@ -440,7 +440,7 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
{IXGBE_TXDCTL(0), "TXDCTL"}, {IXGBE_TXDCTL(0), "TXDCTL"},
/* List Terminator */ /* List Terminator */
{} { .name = NULL }
}; };
...@@ -2077,9 +2077,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -2077,9 +2077,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes; q_vector->rx.total_bytes += total_rx_bytes;
if (cleaned_count)
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
return total_rx_packets; return total_rx_packets;
} }
...@@ -5186,15 +5183,15 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) ...@@ -5186,15 +5183,15 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
{ {
struct device *dev = tx_ring->dev; struct device *dev = tx_ring->dev;
int orig_node = dev_to_node(dev); int orig_node = dev_to_node(dev);
int numa_node = -1; int ring_node = -1;
int size; int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
if (tx_ring->q_vector) if (tx_ring->q_vector)
numa_node = tx_ring->q_vector->numa_node; ring_node = tx_ring->q_vector->numa_node;
tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); tx_ring->tx_buffer_info = vzalloc_node(size, ring_node);
if (!tx_ring->tx_buffer_info) if (!tx_ring->tx_buffer_info)
tx_ring->tx_buffer_info = vzalloc(size); tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info) if (!tx_ring->tx_buffer_info)
...@@ -5206,7 +5203,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) ...@@ -5206,7 +5203,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->size = ALIGN(tx_ring->size, 4096);
set_dev_node(dev, numa_node); set_dev_node(dev, ring_node);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->desc = dma_alloc_coherent(dev,
tx_ring->size, tx_ring->size,
&tx_ring->dma, &tx_ring->dma,
...@@ -5270,15 +5267,15 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) ...@@ -5270,15 +5267,15 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
{ {
struct device *dev = rx_ring->dev; struct device *dev = rx_ring->dev;
int orig_node = dev_to_node(dev); int orig_node = dev_to_node(dev);
int numa_node = -1; int ring_node = -1;
int size; int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
if (rx_ring->q_vector) if (rx_ring->q_vector)
numa_node = rx_ring->q_vector->numa_node; ring_node = rx_ring->q_vector->numa_node;
rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); rx_ring->rx_buffer_info = vzalloc_node(size, ring_node);
if (!rx_ring->rx_buffer_info) if (!rx_ring->rx_buffer_info)
rx_ring->rx_buffer_info = vzalloc(size); rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info) if (!rx_ring->rx_buffer_info)
...@@ -5290,7 +5287,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) ...@@ -5290,7 +5287,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->size = ALIGN(rx_ring->size, 4096);
set_dev_node(dev, numa_node); set_dev_node(dev, ring_node);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->desc = dma_alloc_coherent(dev,
rx_ring->size, rx_ring->size,
&rx_ring->dma, &rx_ring->dma,
......
...@@ -523,7 +523,7 @@ static const struct ixgbevf_reg_test reg_test_vf[] = { ...@@ -523,7 +523,7 @@ static const struct ixgbevf_reg_test reg_test_vf[] = {
{ IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
{ 0, 0, 0, 0 } { .reg = 0 }
}; };
static const u32 register_test_patterns[] = { static const u32 register_test_patterns[] = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment