Commit 0c26b677 authored by Santiago Leon's avatar Santiago Leon Committed by David S. Miller

ibmveth: Add optional flush of rx buffer

On some machines we can improve the bandwidth by ensuring rx buffers are
not in the cache. Add a module option that is disabled by default that flushes
rx buffers on insertion.
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarSantiago Leon <santil@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6e8ab30e
...@@ -127,6 +127,10 @@ module_param(rx_copybreak, uint, 0644); ...@@ -127,6 +127,10 @@ module_param(rx_copybreak, uint, 0644);
MODULE_PARM_DESC(rx_copybreak, MODULE_PARM_DESC(rx_copybreak,
"Maximum size of packet that is copied to a new buffer on receive"); "Maximum size of packet that is copied to a new buffer on receive");
static unsigned int rx_flush __read_mostly = 0;
module_param(rx_flush, uint, 0644);
MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
struct ibmveth_stat { struct ibmveth_stat {
char name[ETH_GSTRING_LEN]; char name[ETH_GSTRING_LEN];
int offset; int offset;
...@@ -234,6 +238,14 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) ...@@ -234,6 +238,14 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
return 0; return 0;
} }
static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
{
unsigned long offset;
for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
}
/* replenish the buffers for a pool. note that we don't need to /* replenish the buffers for a pool. note that we don't need to
* skb_reserve these since they are used for incoming... * skb_reserve these since they are used for incoming...
*/ */
...@@ -286,6 +298,12 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc ...@@ -286,6 +298,12 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
desc.fields.address = dma_addr; desc.fields.address = dma_addr;
if (rx_flush) {
unsigned int len = min(pool->buff_size,
adapter->netdev->mtu +
IBMVETH_BUFF_OH);
ibmveth_flush_buffer(skb->data, len);
}
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
if (lpar_rc != H_SUCCESS) if (lpar_rc != H_SUCCESS)
...@@ -1095,6 +1113,9 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) ...@@ -1095,6 +1113,9 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
skb_copy_to_linear_data(new_skb, skb_copy_to_linear_data(new_skb,
skb->data + offset, skb->data + offset,
length); length);
if (rx_flush)
ibmveth_flush_buffer(skb->data,
length + offset);
skb = new_skb; skb = new_skb;
ibmveth_rxq_recycle_buffer(adapter); ibmveth_rxq_recycle_buffer(adapter);
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment