Commit 272baeeb authored by Ben Hutchings's avatar Ben Hutchings

sfc: Properly distinguish RX buffer and DMA lengths

Replace efx_nic::rx_buffer_len with efx_nic::rx_dma_len, the maximum
RX DMA length.
Signed-off-by: default avatarBen Hutchings <bhutchings@solarflare.com>
parent 80c2e716
...@@ -639,12 +639,11 @@ static void efx_start_datapath(struct efx_nic *efx) ...@@ -639,12 +639,11 @@ static void efx_start_datapath(struct efx_nic *efx)
* support the current MTU, including padding for header * support the current MTU, including padding for header
* alignment and overruns. * alignment and overruns.
*/ */
efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_hash_size + efx->type->rx_buffer_padding);
efx->type->rx_buffer_padding); efx->rx_buffer_order = get_order(sizeof(struct efx_rx_page_state) +
efx->rx_buffer_order = get_order(efx->rx_buffer_len + EFX_PAGE_IP_ALIGN + efx->rx_dma_len);
sizeof(struct efx_rx_page_state));
/* We must keep at least one descriptor in a TX ring empty. /* We must keep at least one descriptor in a TX ring empty.
* We could avoid this when the queue size does not exactly * We could avoid this when the queue size does not exactly
......
...@@ -669,8 +669,7 @@ struct vfdi_status; ...@@ -669,8 +669,7 @@ struct vfdi_status;
* @n_channels: Number of channels in use * @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues) * @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX * @n_tx_channels: Number of channels used for TX
* @rx_buffer_len: RX buffer length, including start alignment but excluding * @rx_dma_len: Current maximum RX DMA length
* any metadata
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_hash_key: Toeplitz hash key for RSS * @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS * @rx_indir_table: Indirection table for RSS
...@@ -786,7 +785,7 @@ struct efx_nic { ...@@ -786,7 +785,7 @@ struct efx_nic {
unsigned rss_spread; unsigned rss_spread;
unsigned tx_channel_offset; unsigned tx_channel_offset;
unsigned n_tx_channels; unsigned n_tx_channels;
unsigned int rx_buffer_len; unsigned int rx_dma_len;
unsigned int rx_buffer_order; unsigned int rx_buffer_order;
u8 rx_hash_key[40]; u8 rx_hash_key[40];
u32 rx_indir_table[128]; u32 rx_indir_table[128];
......
...@@ -27,8 +27,9 @@ ...@@ -27,8 +27,9 @@
/* Number of RX descriptors pushed at once. */ /* Number of RX descriptors pushed at once. */
#define EFX_RX_BATCH 8 #define EFX_RX_BATCH 8
/* Maximum size of a buffer sharing a page */ /* Maximum length for an RX descriptor sharing a page */
#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state)) #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state) \
- EFX_PAGE_IP_ALIGN)
/* Size of buffer allocated for skb header area. */ /* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS 64u #define EFX_SKB_HEADERS 64u
...@@ -52,10 +53,6 @@ static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, ...@@ -52,10 +53,6 @@ static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
{ {
return buf->page_offset + efx->type->rx_buffer_hash_size; return buf->page_offset + efx->type->rx_buffer_hash_size;
} }
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
return PAGE_SIZE << efx->rx_buffer_order;
}
static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
{ {
...@@ -105,7 +102,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) ...@@ -105,7 +102,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
if (unlikely(page == NULL)) if (unlikely(page == NULL))
return -ENOMEM; return -ENOMEM;
dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0, dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
efx_rx_buf_size(efx), PAGE_SIZE << efx->rx_buffer_order,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) { if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
__free_pages(page, efx->rx_buffer_order); __free_pages(page, efx->rx_buffer_order);
...@@ -124,12 +121,12 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) ...@@ -124,12 +121,12 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
rx_buf->page = page; rx_buf->page = page;
rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; rx_buf->len = efx->rx_dma_len;
rx_buf->flags = 0; rx_buf->flags = 0;
++rx_queue->added_count; ++rx_queue->added_count;
++state->refcnt; ++state->refcnt;
if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) { if ((~count & 1) && (efx->rx_dma_len <= EFX_RX_HALF_PAGE)) {
/* Use the second half of the page */ /* Use the second half of the page */
get_page(page); get_page(page);
dma_addr += (PAGE_SIZE >> 1); dma_addr += (PAGE_SIZE >> 1);
...@@ -153,7 +150,7 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, ...@@ -153,7 +150,7 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
if (--state->refcnt == 0) { if (--state->refcnt == 0) {
dma_unmap_page(&efx->pci_dev->dev, dma_unmap_page(&efx->pci_dev->dev,
state->dma_addr, state->dma_addr,
efx_rx_buf_size(efx), PAGE_SIZE << efx->rx_buffer_order,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} else if (used_len) { } else if (used_len) {
dma_sync_single_for_cpu(&efx->pci_dev->dev, dma_sync_single_for_cpu(&efx->pci_dev->dev,
...@@ -221,7 +218,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel, ...@@ -221,7 +218,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
rx_buf->flags = 0; rx_buf->flags = 0;
if (efx->rx_buffer_len <= EFX_RX_HALF_PAGE && if (efx->rx_dma_len <= EFX_RX_HALF_PAGE &&
page_count(rx_buf->page) == 1) page_count(rx_buf->page) == 1)
efx_resurrect_rx_buffer(rx_queue, rx_buf); efx_resurrect_rx_buffer(rx_queue, rx_buf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment