Commit 4bfeadfc authored by David S. Miller's avatar David S. Miller

Merge branch 'sfc-fix-bugs-introduced-by-XDP-patches'

Edward Cree says:

====================
sfc: fix bugs introduced by XDP patches

Two fixes for bugs introduced by the XDP support in the sfc driver.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 258a980d 11a14dc8
......@@ -1472,6 +1472,12 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
n_xdp_tx = num_possible_cpus();
n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
vec_count = pci_msix_vec_count(efx->pci_dev);
if (vec_count < 0)
return vec_count;
max_channels = min_t(unsigned int, vec_count, max_channels);
/* Check resources.
* We need a channel per event queue, plus a VI per tx queue.
* This may be more pessimistic than it needs to be.
......@@ -1493,11 +1499,6 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
n_xdp_tx, n_xdp_ev);
}
n_channels = min(n_channels, max_channels);
vec_count = pci_msix_vec_count(efx->pci_dev);
if (vec_count < 0)
return vec_count;
if (vec_count < n_channels) {
netif_err(efx, drv, efx->net_dev,
"WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
......@@ -1507,11 +1508,9 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
n_channels = vec_count;
}
efx->n_channels = n_channels;
n_channels = min(n_channels, max_channels);
/* Do not create the PTP TX queue(s) if PTP uses the MC directly. */
if (extra_channels && !efx_ptp_use_mac_tx_timestamps(efx))
n_channels--;
efx->n_channels = n_channels;
/* Ignore XDP tx channels when creating rx channels. */
n_channels -= efx->n_xdp_channels;
......@@ -1531,11 +1530,10 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
efx->n_rx_channels = n_channels;
}
if (efx->n_xdp_channels)
efx->xdp_channel_offset = efx->tx_channel_offset +
efx->n_tx_channels;
else
efx->xdp_channel_offset = efx->n_channels;
efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
efx->xdp_channel_offset = n_channels;
netif_dbg(efx, drv, efx->net_dev,
"Allocating %u RX channels\n",
......@@ -1550,6 +1548,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
static int efx_probe_interrupts(struct efx_nic *efx)
{
unsigned int extra_channels = 0;
unsigned int rss_spread;
unsigned int i, j;
int rc;
......@@ -1631,8 +1630,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
if (!efx->extra_channel_type[i])
continue;
if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
efx->n_channels <= extra_channels) {
if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
efx->extra_channel_type[i]->handle_no_channel(efx);
} else {
--j;
......@@ -1643,16 +1641,17 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
}
rss_spread = efx->n_rx_channels;
/* RSS might be usable on VFs even if it is disabled on the PF */
#ifdef CONFIG_SFC_SRIOV
if (efx->type->sriov_wanted) {
efx->rss_spread = ((efx->n_rx_channels > 1 ||
efx->rss_spread = ((rss_spread > 1 ||
!efx->type->sriov_wanted(efx)) ?
efx->n_rx_channels : efx_vf_size(efx));
rss_spread : efx_vf_size(efx));
return 0;
}
#endif
efx->rss_spread = efx->n_rx_channels;
efx->rss_spread = rss_spread;
return 0;
}
......
......@@ -1533,9 +1533,7 @@ static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
return efx_channel_is_xdp_tx(channel) ||
(channel->type && channel->type->want_txqs &&
channel->type->want_txqs(channel));
return true;
}
static inline struct efx_tx_queue *
......
......@@ -96,11 +96,12 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
void efx_rx_config_page_split(struct efx_nic *efx)
{
efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
XDP_PACKET_HEADROOM,
EFX_RX_BUF_ALIGNMENT);
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
(efx->rx_page_buf_step + XDP_PACKET_HEADROOM));
efx->rx_page_buf_step);
efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
efx->rx_bufs_per_page;
efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
......@@ -190,14 +191,13 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
page_offset = sizeof(struct efx_rx_page_state);
do {
page_offset += XDP_PACKET_HEADROOM;
dma_addr += XDP_PACKET_HEADROOM;
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
XDP_PACKET_HEADROOM;
rx_buf->page = page;
rx_buf->page_offset = page_offset + efx->rx_ip_align;
rx_buf->page_offset = page_offset + efx->rx_ip_align +
XDP_PACKET_HEADROOM;
rx_buf->len = efx->rx_dma_len;
rx_buf->flags = 0;
++rx_queue->added_count;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment