Commit a995560a authored by Bert Kenward's avatar Bert Kenward Committed by David S. Miller

sfc: use new performance based event queue init

Rather than explicitly specifying flags we can now specify a desired
performance target to the firmware, ie higher throughput or lower latency.
For now we use the default "auto" configuration.
Signed-off-by: default avatarBert Kenward <bkenward@solarflare.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ca889a05
...@@ -2541,13 +2541,12 @@ static void efx_ef10_ev_fini(struct efx_channel *channel) ...@@ -2541,13 +2541,12 @@ static void efx_ef10_ev_fini(struct efx_channel *channel)
static int efx_ef10_ev_init(struct efx_channel *channel) static int efx_ef10_ev_init(struct efx_channel *channel)
{ {
MCDI_DECLARE_BUF(inbuf, MCDI_DECLARE_BUF(inbuf,
MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
EFX_BUF_SIZE)); EFX_BUF_SIZE));
MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
struct efx_ef10_nic_data *nic_data; struct efx_ef10_nic_data *nic_data;
bool supports_rx_merge;
size_t inlen, outlen; size_t inlen, outlen;
unsigned int enabled, implemented; unsigned int enabled, implemented;
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -2555,9 +2554,6 @@ static int efx_ef10_ev_init(struct efx_channel *channel) ...@@ -2555,9 +2554,6 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
int i; int i;
nic_data = efx->nic_data; nic_data = efx->nic_data;
supports_rx_merge =
!!(nic_data->datapath_caps &
1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
/* Fill event queue with all ones (i.e. empty events) */ /* Fill event queue with all ones (i.e. empty events) */
memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
...@@ -2566,11 +2562,6 @@ static int efx_ef10_ev_init(struct efx_channel *channel) ...@@ -2566,11 +2562,6 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
/* INIT_EVQ expects index in vector table, not absolute */ /* INIT_EVQ expects index in vector table, not absolute */
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
INIT_EVQ_IN_FLAG_RX_MERGE, 1,
INIT_EVQ_IN_FLAG_TX_MERGE, 1,
INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
...@@ -2579,6 +2570,27 @@ static int efx_ef10_ev_init(struct efx_channel *channel) ...@@ -2579,6 +2570,27 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
if (nic_data->datapath_caps2 &
1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) {
/* Use the new generic approach to specifying event queue
* configuration, requesting lower latency or higher throughput.
* The options that actually get used appear in the output.
*/
MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
INIT_EVQ_V2_IN_FLAG_TYPE,
MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
} else {
bool cut_thru = !(nic_data->datapath_caps &
1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
INIT_EVQ_IN_FLAG_RX_MERGE, 1,
INIT_EVQ_IN_FLAG_TX_MERGE, 1,
INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru);
}
dma_addr = channel->eventq.buf.dma_addr; dma_addr = channel->eventq.buf.dma_addr;
for (i = 0; i < entries; ++i) { for (i = 0; i < entries; ++i) {
MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
...@@ -2589,6 +2601,13 @@ static int efx_ef10_ev_init(struct efx_channel *channel) ...@@ -2589,6 +2601,13 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen); outbuf, sizeof(outbuf), &outlen);
if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
netif_dbg(efx, drv, efx->net_dev,
"Channel %d using event queue flags %08x\n",
channel->channel,
MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
/* IRQ return is ignored */ /* IRQ return is ignored */
if (channel->channel || rc) if (channel->channel || rc)
return rc; return rc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment