Commit 13d5231c authored by David S. Miller's avatar David S. Miller

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2019-09-11

This series contains fixes to ixgbe.

Alex fixes up the adaptive ITR scheme for ixgbe which could result in a
value that was either 0 or something less than 10 which was causing
issues with hardware features, like RSC, that do not function well with
ITR values that low.

Ilya Maximets fixes the ixgbe driver to limit the number of transmit
descriptors to clean by the number of transmit descriptors used in the
transmit ring, so that the driver does not try to "double" clean the
same descriptors.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents af38d07e bf280c03
...@@ -2621,7 +2621,7 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, ...@@ -2621,7 +2621,7 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
/* 16K ints/sec to 9.2K ints/sec */ /* 16K ints/sec to 9.2K ints/sec */
avg_wire_size *= 15; avg_wire_size *= 15;
avg_wire_size += 11452; avg_wire_size += 11452;
} else if (avg_wire_size <= 1980) { } else if (avg_wire_size < 1968) {
/* 9.2K ints/sec to 8K ints/sec */ /* 9.2K ints/sec to 8K ints/sec */
avg_wire_size *= 5; avg_wire_size *= 5;
avg_wire_size += 22420; avg_wire_size += 22420;
...@@ -2654,6 +2654,8 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, ...@@ -2654,6 +2654,8 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
case IXGBE_LINK_SPEED_2_5GB_FULL: case IXGBE_LINK_SPEED_2_5GB_FULL:
case IXGBE_LINK_SPEED_1GB_FULL: case IXGBE_LINK_SPEED_1GB_FULL:
case IXGBE_LINK_SPEED_10_FULL: case IXGBE_LINK_SPEED_10_FULL:
if (avg_wire_size > 8064)
avg_wire_size = 8064;
itr += DIV_ROUND_UP(avg_wire_size, itr += DIV_ROUND_UP(avg_wire_size,
IXGBE_ITR_ADAPTIVE_MIN_INC * 64) * IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
IXGBE_ITR_ADAPTIVE_MIN_INC; IXGBE_ITR_ADAPTIVE_MIN_INC;
......
...@@ -633,19 +633,17 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, ...@@ -633,19 +633,17 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring, int napi_budget) struct ixgbe_ring *tx_ring, int napi_budget)
{ {
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
unsigned int total_packets = 0, total_bytes = 0; unsigned int total_packets = 0, total_bytes = 0;
u32 i = tx_ring->next_to_clean, xsk_frames = 0;
unsigned int budget = q_vector->tx.work_limit;
struct xdp_umem *umem = tx_ring->xsk_umem; struct xdp_umem *umem = tx_ring->xsk_umem;
union ixgbe_adv_tx_desc *tx_desc; union ixgbe_adv_tx_desc *tx_desc;
struct ixgbe_tx_buffer *tx_bi; struct ixgbe_tx_buffer *tx_bi;
bool xmit_done; u32 xsk_frames = 0;
tx_bi = &tx_ring->tx_buffer_info[i]; tx_bi = &tx_ring->tx_buffer_info[ntc];
tx_desc = IXGBE_TX_DESC(tx_ring, i); tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
i -= tx_ring->count;
do { while (ntc != ntu) {
if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
break; break;
...@@ -661,22 +659,18 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, ...@@ -661,22 +659,18 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
tx_bi++; tx_bi++;
tx_desc++; tx_desc++;
i++; ntc++;
if (unlikely(!i)) { if (unlikely(ntc == tx_ring->count)) {
i -= tx_ring->count; ntc = 0;
tx_bi = tx_ring->tx_buffer_info; tx_bi = tx_ring->tx_buffer_info;
tx_desc = IXGBE_TX_DESC(tx_ring, 0); tx_desc = IXGBE_TX_DESC(tx_ring, 0);
} }
/* issue prefetch for next Tx descriptor */ /* issue prefetch for next Tx descriptor */
prefetch(tx_desc); prefetch(tx_desc);
}
/* update budget accounting */ tx_ring->next_to_clean = ntc;
budget--;
} while (likely(budget));
i += tx_ring->count;
tx_ring->next_to_clean = i;
u64_stats_update_begin(&tx_ring->syncp); u64_stats_update_begin(&tx_ring->syncp);
tx_ring->stats.bytes += total_bytes; tx_ring->stats.bytes += total_bytes;
...@@ -688,8 +682,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, ...@@ -688,8 +682,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
if (xsk_frames) if (xsk_frames)
xsk_umem_complete_tx(umem, xsk_frames); xsk_umem_complete_tx(umem, xsk_frames);
xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
return budget > 0 && xmit_done;
} }
int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid) int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment