Commit b1daa4d1 authored by David S. Miller's avatar David S. Miller

Merge branch 'sfc-more-code-refactoring'

Alex Maftei says:

====================
sfc: more code refactoring

Splitting more of the driver code into different files, which will
later be used in another driver for a new product.

This is a continuation to my previous patch series.
There will be another series and a stand-alone patch as well
after this.

This series in particular covers MCDI (management controller
driver interface) code.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 43ad352d b69f7a3e
...@@ -3,8 +3,8 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \ ...@@ -3,8 +3,8 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
farch.o siena.o ef10.o \ farch.o siena.o ef10.o \
tx.o tx_common.o rx.o rx_common.o \ tx.o tx_common.o rx.o rx_common.o \
selftest.o ethtool.o ptp.o tx_tso.o \ selftest.o ethtool.o ptp.o tx_tso.o \
mcdi.o mcdi_port.o \ mcdi.o mcdi_port.o mcdi_port_common.o \
mcdi_mon.o mcdi_functions.o mcdi_mon.o
sfc-$(CONFIG_SFC_MTD) += mtd.o sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "mcdi.h" #include "mcdi.h"
#include "mcdi_pcol.h" #include "mcdi_pcol.h"
#include "mcdi_port_common.h" #include "mcdi_port_common.h"
#include "mcdi_functions.h"
#include "nic.h" #include "nic.h"
#include "workarounds.h" #include "workarounds.h"
#include "selftest.h" #include "selftest.h"
...@@ -834,22 +835,6 @@ static int efx_ef10_probe(struct efx_nic *efx) ...@@ -834,22 +835,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
return rc; return rc;
} }
static int efx_ef10_free_vis(struct efx_nic *efx)
{
MCDI_DECLARE_BUF_ERR(outbuf);
size_t outlen;
int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
/* -EALREADY means nothing to free, so ignore */
if (rc == -EALREADY)
rc = 0;
if (rc)
efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
rc);
return rc;
}
#ifdef EFX_USE_PIO #ifdef EFX_USE_PIO
static void efx_ef10_free_piobufs(struct efx_nic *efx) static void efx_ef10_free_piobufs(struct efx_nic *efx)
...@@ -1092,7 +1077,7 @@ static void efx_ef10_remove(struct efx_nic *efx) ...@@ -1092,7 +1077,7 @@ static void efx_ef10_remove(struct efx_nic *efx)
if (nic_data->wc_membase) if (nic_data->wc_membase)
iounmap(nic_data->wc_membase); iounmap(nic_data->wc_membase);
rc = efx_ef10_free_vis(efx); rc = efx_mcdi_free_vis(efx);
WARN_ON(rc != 0); WARN_ON(rc != 0);
if (!nic_data->must_restore_piobufs) if (!nic_data->must_restore_piobufs)
...@@ -1263,28 +1248,10 @@ static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused))) ...@@ -1263,28 +1248,10 @@ static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
static int efx_ef10_alloc_vis(struct efx_nic *efx, static int efx_ef10_alloc_vis(struct efx_nic *efx,
unsigned int min_vis, unsigned int max_vis) unsigned int min_vis, unsigned int max_vis)
{ {
MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data; struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc != 0)
return rc;
if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN) return efx_mcdi_alloc_vis(efx, min_vis, max_vis, &nic_data->vi_base,
return -EIO; &nic_data->n_allocated_vis);
netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
return 0;
} }
/* Note that the failure path of this function does not free /* Note that the failure path of this function does not free
...@@ -1366,7 +1333,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) ...@@ -1366,7 +1333,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
} }
/* In case the last attached driver failed to free VIs, do it now */ /* In case the last attached driver failed to free VIs, do it now */
rc = efx_ef10_free_vis(efx); rc = efx_mcdi_free_vis(efx);
if (rc != 0) if (rc != 0)
return rc; return rc;
...@@ -1387,7 +1354,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) ...@@ -1387,7 +1354,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
efx->max_tx_channels = efx->max_tx_channels =
nic_data->n_allocated_vis / EFX_TXQ_TYPES; nic_data->n_allocated_vis / EFX_TXQ_TYPES;
efx_ef10_free_vis(efx); efx_mcdi_free_vis(efx);
return -EAGAIN; return -EAGAIN;
} }
...@@ -2411,20 +2378,15 @@ static u32 efx_ef10_tso_versions(struct efx_nic *efx) ...@@ -2411,20 +2378,15 @@ static u32 efx_ef10_tso_versions(struct efx_nic *efx)
static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
{ {
MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE));
bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel; struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
struct efx_ef10_nic_data *nic_data = efx->nic_data; struct efx_ef10_nic_data *nic_data;
bool tso_v2 = false; bool tso_v2 = false;
size_t inlen;
dma_addr_t dma_addr;
efx_qword_t *txd; efx_qword_t *txd;
int rc; int rc;
int i;
BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0); nic_data = efx->nic_data;
/* Only attempt to enable TX timestamping if we have the license for it, /* Only attempt to enable TX timestamping if we have the license for it,
* otherwise TXQ init will fail * otherwise TXQ init will fail
...@@ -2451,51 +2413,9 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) ...@@ -2451,51 +2413,9 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
channel->channel); channel->channel);
} }
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); rc = efx_mcdi_tx_init(tx_queue, tso_v2);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); if (rc)
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); goto fail;
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
dma_addr = tx_queue->txd.buf.dma_addr;
netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
tx_queue->queue, entries, (u64)dma_addr);
for (i = 0; i < entries; ++i) {
MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
dma_addr += EFX_BUF_SIZE;
}
inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
do {
MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
/* This flag was removed from mcdi_pcol.h for
* the non-_EXT version of INIT_TXQ. However,
* firmware still honours it.
*/
INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
tx_queue->timestamping);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
NULL, 0, NULL);
if (rc == -ENOSPC && tso_v2) {
/* Retry without TSOv2 if we're short on contexts. */
tso_v2 = false;
netif_warn(efx, probe, efx->net_dev,
"TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n");
} else if (rc) {
efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
MC_CMD_INIT_TXQ_EXT_IN_LEN,
NULL, 0, rc);
goto fail;
}
} while (rc);
/* A previous user of this TX queue might have set us up the /* A previous user of this TX queue might have set us up the
* bomb by writing a descriptor to the TX push collector but * bomb by writing a descriptor to the TX push collector but
...@@ -2533,35 +2453,6 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) ...@@ -2533,35 +2453,6 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
tx_queue->queue); tx_queue->queue);
} }
static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = tx_queue->efx;
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
tx_queue->queue);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
goto fail;
return;
fail:
efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
outbuf, outlen, rc);
}
static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
{
efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
}
/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
{ {
...@@ -3076,91 +2967,6 @@ static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, ...@@ -3076,91 +2967,6 @@ static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
return efx_ef10_rx_push_shared_rss_config(efx, NULL); return efx_ef10_rx_push_shared_rss_config(efx, NULL);
} }
static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
{
return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
(rx_queue->ptr_mask + 1) *
sizeof(efx_qword_t),
GFP_KERNEL);
}
static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
{
MCDI_DECLARE_BUF(inbuf,
MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE));
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = rx_queue->efx;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t inlen;
dma_addr_t dma_addr;
int rc;
int i;
BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
rx_queue->scatter_n = 0;
rx_queue->scatter_len = 0;
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
efx_rx_queue_index(rx_queue));
MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
INIT_RXQ_IN_FLAG_PREFIX, 1,
INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
dma_addr = rx_queue->rxd.buf.dma_addr;
netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
for (i = 0; i < entries; ++i) {
MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
dma_addr += EFX_BUF_SIZE;
}
inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
NULL, 0, NULL);
if (rc)
netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
efx_rx_queue_index(rx_queue));
}
static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = rx_queue->efx;
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
efx_rx_queue_index(rx_queue));
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
goto fail;
return;
fail:
efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
outbuf, outlen, rc);
}
static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
{
efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
}
/* This creates an entry in the RX descriptor queue */ /* This creates an entry in the RX descriptor queue */
static inline void static inline void
efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
...@@ -3232,106 +3038,20 @@ efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, ...@@ -3232,106 +3038,20 @@ efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
/* nothing to do */ /* nothing to do */
} }
static int efx_ef10_ev_probe(struct efx_channel *channel)
{
return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
(channel->eventq_mask + 1) *
sizeof(efx_qword_t),
GFP_KERNEL);
}
static void efx_ef10_ev_fini(struct efx_channel *channel)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = channel->efx;
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
goto fail;
return;
fail:
efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
outbuf, outlen, rc);
}
static int efx_ef10_ev_init(struct efx_channel *channel) static int efx_ef10_ev_init(struct efx_channel *channel)
{ {
MCDI_DECLARE_BUF(inbuf,
MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
EFX_BUF_SIZE));
MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
struct efx_ef10_nic_data *nic_data; struct efx_ef10_nic_data *nic_data;
size_t inlen, outlen;
unsigned int enabled, implemented; unsigned int enabled, implemented;
dma_addr_t dma_addr; bool use_v2, cut_thru;
int rc; int rc;
int i;
nic_data = efx->nic_data; nic_data = efx->nic_data;
use_v2 = nic_data->datapath_caps2 &
/* Fill event queue with all ones (i.e. empty events) */ 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN;
memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); cut_thru = !(nic_data->datapath_caps &
1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); rc = efx_mcdi_ev_init(channel, cut_thru, use_v2);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
/* INIT_EVQ expects index in vector table, not absolute */
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
if (nic_data->datapath_caps2 &
1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) {
/* Use the new generic approach to specifying event queue
* configuration, requesting lower latency or higher throughput.
* The options that actually get used appear in the output.
*/
MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
INIT_EVQ_V2_IN_FLAG_TYPE,
MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
} else {
bool cut_thru = !(nic_data->datapath_caps &
1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
INIT_EVQ_IN_FLAG_RX_MERGE, 1,
INIT_EVQ_IN_FLAG_TX_MERGE, 1,
INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru);
}
dma_addr = channel->eventq.buf.dma_addr;
for (i = 0; i < entries; ++i) {
MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
dma_addr += EFX_BUF_SIZE;
}
inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
netif_dbg(efx, drv, efx->net_dev,
"Channel %d using event queue flags %08x\n",
channel->channel,
MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
/* IRQ return is ignored */ /* IRQ return is ignored */
if (channel->channel || rc) if (channel->channel || rc)
...@@ -3389,15 +3109,10 @@ static int efx_ef10_ev_init(struct efx_channel *channel) ...@@ -3389,15 +3109,10 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
return 0; return 0;
fail: fail:
efx_ef10_ev_fini(channel); efx_mcdi_ev_fini(channel);
return rc; return rc;
} }
static void efx_ef10_ev_remove(struct efx_channel *channel)
{
efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
}
static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
unsigned int rx_queue_label) unsigned int rx_queue_label)
{ {
...@@ -3979,9 +3694,9 @@ static int efx_ef10_fini_dmaq(struct efx_nic *efx) ...@@ -3979,9 +3694,9 @@ static int efx_ef10_fini_dmaq(struct efx_nic *efx)
if (efx->state != STATE_RECOVERY) { if (efx->state != STATE_RECOVERY) {
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel) efx_for_each_channel_rx_queue(rx_queue, channel)
efx_ef10_rx_fini(rx_queue); efx_mcdi_rx_fini(rx_queue);
efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_channel_tx_queue(tx_queue, channel)
efx_ef10_tx_fini(tx_queue); efx_mcdi_tx_fini(tx_queue);
} }
wait_event_timeout(efx->flush_wq, wait_event_timeout(efx->flush_wq,
...@@ -6653,20 +6368,20 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { ...@@ -6653,20 +6368,20 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.irq_handle_legacy = efx_ef10_legacy_interrupt, .irq_handle_legacy = efx_ef10_legacy_interrupt,
.tx_probe = efx_ef10_tx_probe, .tx_probe = efx_ef10_tx_probe,
.tx_init = efx_ef10_tx_init, .tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove, .tx_remove = efx_mcdi_tx_remove,
.tx_write = efx_ef10_tx_write, .tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len, .tx_limit_len = efx_ef10_tx_limit_len,
.rx_push_rss_config = efx_ef10_vf_rx_push_rss_config, .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
.rx_pull_rss_config = efx_ef10_rx_pull_rss_config, .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
.rx_probe = efx_ef10_rx_probe, .rx_probe = efx_mcdi_rx_probe,
.rx_init = efx_ef10_rx_init, .rx_init = efx_mcdi_rx_init,
.rx_remove = efx_ef10_rx_remove, .rx_remove = efx_mcdi_rx_remove,
.rx_write = efx_ef10_rx_write, .rx_write = efx_ef10_rx_write,
.rx_defer_refill = efx_ef10_rx_defer_refill, .rx_defer_refill = efx_ef10_rx_defer_refill,
.ev_probe = efx_ef10_ev_probe, .ev_probe = efx_mcdi_ev_probe,
.ev_init = efx_ef10_ev_init, .ev_init = efx_ef10_ev_init,
.ev_fini = efx_ef10_ev_fini, .ev_fini = efx_mcdi_ev_fini,
.ev_remove = efx_ef10_ev_remove, .ev_remove = efx_mcdi_ev_remove,
.ev_process = efx_ef10_ev_process, .ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack, .ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate, .ev_test_generate = efx_ef10_ev_test_generate,
...@@ -6762,7 +6477,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { ...@@ -6762,7 +6477,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.irq_handle_legacy = efx_ef10_legacy_interrupt, .irq_handle_legacy = efx_ef10_legacy_interrupt,
.tx_probe = efx_ef10_tx_probe, .tx_probe = efx_ef10_tx_probe,
.tx_init = efx_ef10_tx_init, .tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove, .tx_remove = efx_mcdi_tx_remove,
.tx_write = efx_ef10_tx_write, .tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len, .tx_limit_len = efx_ef10_tx_limit_len,
.rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
...@@ -6770,15 +6485,15 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { ...@@ -6770,15 +6485,15 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.rx_push_rss_context_config = efx_ef10_rx_push_rss_context_config, .rx_push_rss_context_config = efx_ef10_rx_push_rss_context_config,
.rx_pull_rss_context_config = efx_ef10_rx_pull_rss_context_config, .rx_pull_rss_context_config = efx_ef10_rx_pull_rss_context_config,
.rx_restore_rss_contexts = efx_ef10_rx_restore_rss_contexts, .rx_restore_rss_contexts = efx_ef10_rx_restore_rss_contexts,
.rx_probe = efx_ef10_rx_probe, .rx_probe = efx_mcdi_rx_probe,
.rx_init = efx_ef10_rx_init, .rx_init = efx_mcdi_rx_init,
.rx_remove = efx_ef10_rx_remove, .rx_remove = efx_mcdi_rx_remove,
.rx_write = efx_ef10_rx_write, .rx_write = efx_ef10_rx_write,
.rx_defer_refill = efx_ef10_rx_defer_refill, .rx_defer_refill = efx_ef10_rx_defer_refill,
.ev_probe = efx_ef10_ev_probe, .ev_probe = efx_mcdi_ev_probe,
.ev_init = efx_ef10_ev_init, .ev_init = efx_ef10_ev_init,
.ev_fini = efx_ef10_ev_fini, .ev_fini = efx_mcdi_ev_fini,
.ev_remove = efx_ef10_ev_remove, .ev_remove = efx_mcdi_ev_remove,
.ev_process = efx_ef10_ev_process, .ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack, .ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate, .ev_test_generate = efx_ef10_ev_test_generate,
......
...@@ -134,21 +134,6 @@ static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, ...@@ -134,21 +134,6 @@ static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
* *
**************************************************************************/ **************************************************************************/
void efx_link_set_advertising(struct efx_nic *efx,
const unsigned long *advertising)
{
memcpy(efx->link_advertising, advertising,
sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
efx->link_advertising[0] |= ADVERTISED_Autoneg;
if (advertising[0] & ADVERTISED_Pause)
efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
else
efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
if (advertising[0] & ADVERTISED_Asym_Pause)
efx->wanted_fc ^= EFX_FC_TX;
}
/* Equivalent to efx_link_set_advertising with all-zeroes, except does not /* Equivalent to efx_link_set_advertising with all-zeroes, except does not
* force the Autoneg bit on. * force the Autoneg bit on.
*/ */
...@@ -1051,28 +1036,6 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) ...@@ -1051,28 +1036,6 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
} }
static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
#ifdef CONFIG_SFC_MCDI_LOGGING
static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct efx_nic *efx = dev_get_drvdata(dev);
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
}
static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct efx_nic *efx = dev_get_drvdata(dev);
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
bool enable = count > 0 && *buf != '0';
mcdi->logging_enabled = enable;
return count;
}
static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
#endif
static int efx_register_netdev(struct efx_nic *efx) static int efx_register_netdev(struct efx_nic *efx)
{ {
struct net_device *net_dev = efx->net_dev; struct net_device *net_dev = efx->net_dev;
...@@ -1132,21 +1095,11 @@ static int efx_register_netdev(struct efx_nic *efx) ...@@ -1132,21 +1095,11 @@ static int efx_register_netdev(struct efx_nic *efx)
"failed to init net dev attributes\n"); "failed to init net dev attributes\n");
goto fail_registered; goto fail_registered;
} }
#ifdef CONFIG_SFC_MCDI_LOGGING
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); efx_init_mcdi_logging(efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to init net dev attributes\n");
goto fail_attr_mcdi_logging;
}
#endif
return 0; return 0;
#ifdef CONFIG_SFC_MCDI_LOGGING
fail_attr_mcdi_logging:
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
#endif
fail_registered: fail_registered:
rtnl_lock(); rtnl_lock();
efx_dissociate(efx); efx_dissociate(efx);
...@@ -1167,9 +1120,7 @@ static void efx_unregister_netdev(struct efx_nic *efx) ...@@ -1167,9 +1120,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
if (efx_dev_registered(efx)) { if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
#ifdef CONFIG_SFC_MCDI_LOGGING efx_fini_mcdi_logging(efx);
device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
#endif
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev); unregister_netdev(efx->net_dev);
} }
......
...@@ -1039,7 +1039,7 @@ void efx_stop_channels(struct efx_nic *efx) ...@@ -1039,7 +1039,7 @@ void efx_stop_channels(struct efx_nic *efx)
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue; struct efx_rx_queue *rx_queue;
struct efx_channel *channel; struct efx_channel *channel;
int rc; int rc = 0;
/* Stop RX refill */ /* Stop RX refill */
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
...@@ -1060,7 +1060,9 @@ void efx_stop_channels(struct efx_nic *efx) ...@@ -1060,7 +1060,9 @@ void efx_stop_channels(struct efx_nic *efx)
} }
} }
rc = efx->type->fini_dmaq(efx); if (efx->type->fini_dmaq)
rc = efx->type->fini_dmaq(efx);
if (rc) { if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
} else { } else {
......
...@@ -141,9 +141,11 @@ void efx_destroy_reset_workqueue(void) ...@@ -141,9 +141,11 @@ void efx_destroy_reset_workqueue(void)
*/ */
void efx_mac_reconfigure(struct efx_nic *efx) void efx_mac_reconfigure(struct efx_nic *efx)
{ {
down_read(&efx->filter_sem); if (efx->type->reconfigure_mac) {
efx->type->reconfigure_mac(efx); down_read(&efx->filter_sem);
up_read(&efx->filter_sem); efx->type->reconfigure_mac(efx);
up_read(&efx->filter_sem);
}
} }
/* Asynchronous work item for changing MAC promiscuity and multicast /* Asynchronous work item for changing MAC promiscuity and multicast
...@@ -296,7 +298,8 @@ static void efx_start_datapath(struct efx_nic *efx) ...@@ -296,7 +298,8 @@ static void efx_start_datapath(struct efx_nic *efx)
netdev_features_change(efx->net_dev); netdev_features_change(efx->net_dev);
/* RX filters may also have scatter-enabled flags */ /* RX filters may also have scatter-enabled flags */
if (efx->rx_scatter != old_rx_scatter) if ((efx->rx_scatter != old_rx_scatter) &&
efx->type->filter_update_rx_scatter)
efx->type->filter_update_rx_scatter(efx); efx->type->filter_update_rx_scatter(efx);
/* We must keep at least one descriptor in a TX ring empty. /* We must keep at least one descriptor in a TX ring empty.
...@@ -405,11 +408,13 @@ void efx_start_all(struct efx_nic *efx) ...@@ -405,11 +408,13 @@ void efx_start_all(struct efx_nic *efx)
efx_link_status_changed(efx); efx_link_status_changed(efx);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
efx->type->start_stats(efx); if (efx->type->start_stats) {
efx->type->pull_stats(efx); efx->type->start_stats(efx);
spin_lock_bh(&efx->stats_lock); efx->type->pull_stats(efx);
efx->type->update_stats(efx, NULL, NULL); spin_lock_bh(&efx->stats_lock);
spin_unlock_bh(&efx->stats_lock); efx->type->update_stats(efx, NULL, NULL);
spin_unlock_bh(&efx->stats_lock);
}
} }
/* Quiesce the hardware and software data path, and regular activity /* Quiesce the hardware and software data path, and regular activity
...@@ -425,14 +430,17 @@ void efx_stop_all(struct efx_nic *efx) ...@@ -425,14 +430,17 @@ void efx_stop_all(struct efx_nic *efx)
if (!efx->port_enabled) if (!efx->port_enabled)
return; return;
/* update stats before we go down so we can accurately count if (efx->type->update_stats) {
* rx_nodesc_drops /* update stats before we go down so we can accurately count
*/ * rx_nodesc_drops
efx->type->pull_stats(efx); */
spin_lock_bh(&efx->stats_lock); efx->type->pull_stats(efx);
efx->type->update_stats(efx, NULL, NULL); spin_lock_bh(&efx->stats_lock);
spin_unlock_bh(&efx->stats_lock); efx->type->update_stats(efx, NULL, NULL);
efx->type->stop_stats(efx); spin_unlock_bh(&efx->stats_lock);
efx->type->stop_stats(efx);
}
efx_stop_port(efx); efx_stop_port(efx);
/* Stop the kernel transmit interface. This is only valid if /* Stop the kernel transmit interface. This is only valid if
...@@ -456,7 +464,7 @@ void efx_stop_all(struct efx_nic *efx) ...@@ -456,7 +464,7 @@ void efx_stop_all(struct efx_nic *efx)
int __efx_reconfigure_port(struct efx_nic *efx) int __efx_reconfigure_port(struct efx_nic *efx)
{ {
enum efx_phy_mode phy_mode; enum efx_phy_mode phy_mode;
int rc; int rc = 0;
WARN_ON(!mutex_is_locked(&efx->mac_lock)); WARN_ON(!mutex_is_locked(&efx->mac_lock));
...@@ -467,7 +475,8 @@ int __efx_reconfigure_port(struct efx_nic *efx) ...@@ -467,7 +475,8 @@ int __efx_reconfigure_port(struct efx_nic *efx)
else else
efx->phy_mode &= ~PHY_MODE_TX_DISABLED; efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
rc = efx->type->reconfigure_port(efx); if (efx->type->reconfigure_port)
rc = efx->type->reconfigure_port(efx);
if (rc) if (rc)
efx->phy_mode = phy_mode; efx->phy_mode = phy_mode;
...@@ -997,3 +1006,42 @@ void efx_fini_io(struct efx_nic *efx, int bar) ...@@ -997,3 +1006,42 @@ void efx_fini_io(struct efx_nic *efx, int bar)
if (!pci_vfs_assigned(efx->pci_dev)) if (!pci_vfs_assigned(efx->pci_dev))
pci_disable_device(efx->pci_dev); pci_disable_device(efx->pci_dev);
} }
#ifdef CONFIG_SFC_MCDI_LOGGING
static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct efx_nic *efx = dev_get_drvdata(dev);
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
}
static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct efx_nic *efx = dev_get_drvdata(dev);
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
bool enable = count > 0 && *buf != '0';
mcdi->logging_enabled = enable;
return count;
}
static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
void efx_init_mcdi_logging(struct efx_nic *efx)
{
int rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
if (rc) {
netif_warn(efx, drv, efx->net_dev,
"failed to init net dev attributes\n");
}
}
void efx_fini_mcdi_logging(struct efx_nic *efx)
{
device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
}
#endif
...@@ -55,6 +55,14 @@ static inline int efx_check_disabled(struct efx_nic *efx) ...@@ -55,6 +55,14 @@ static inline int efx_check_disabled(struct efx_nic *efx)
return 0; return 0;
} }
#ifdef CONFIG_SFC_MCDI_LOGGING
void efx_init_mcdi_logging(struct efx_nic *efx);
void efx_fini_mcdi_logging(struct efx_nic *efx);
#else
static inline void efx_init_mcdi_logging(struct efx_nic *efx) {}
static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {}
#endif
void efx_mac_reconfigure(struct efx_nic *efx); void efx_mac_reconfigure(struct efx_nic *efx);
void efx_link_status_changed(struct efx_nic *efx); void efx_link_status_changed(struct efx_nic *efx);
......
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
#include "mcdi_functions.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
int efx_mcdi_free_vis(struct efx_nic *efx)
{
MCDI_DECLARE_BUF_ERR(outbuf);
size_t outlen;
int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
/* -EALREADY means nothing to free, so ignore */
if (rc == -EALREADY)
rc = 0;
if (rc)
efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
rc);
return rc;
}
int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis,
unsigned int max_vis, unsigned int *vi_base,
unsigned int *allocated_vis)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc != 0)
return rc;
if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
return -EIO;
netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
if (vi_base)
*vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
if (allocated_vis)
*allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
return 0;
}
int efx_mcdi_ev_probe(struct efx_channel *channel)
{
return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
(channel->eventq_mask + 1) *
sizeof(efx_qword_t),
GFP_KERNEL);
}
int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2)
{
MCDI_DECLARE_BUF(inbuf,
MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
EFX_BUF_SIZE));
MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = channel->efx;
struct efx_ef10_nic_data *nic_data;
size_t inlen, outlen;
dma_addr_t dma_addr;
int rc, i;
nic_data = efx->nic_data;
/* Fill event queue with all ones (i.e. empty events) */
memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
/* INIT_EVQ expects index in vector table, not absolute */
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
if (v2) {
/* Use the new generic approach to specifying event queue
* configuration, requesting lower latency or higher throughput.
* The options that actually get used appear in the output.
*/
MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
INIT_EVQ_V2_IN_FLAG_TYPE,
MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
} else {
MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
INIT_EVQ_IN_FLAG_RX_MERGE, 1,
INIT_EVQ_IN_FLAG_TX_MERGE, 1,
INIT_EVQ_IN_FLAG_CUT_THRU, v1_cut_thru);
}
dma_addr = channel->eventq.buf.dma_addr;
for (i = 0; i < entries; ++i) {
MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
dma_addr += EFX_BUF_SIZE;
}
inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
netif_dbg(efx, drv, efx->net_dev,
"Channel %d using event queue flags %08x\n",
channel->channel,
MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
return rc;
}
void efx_mcdi_ev_remove(struct efx_channel *channel)
{
efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
}
void efx_mcdi_ev_fini(struct efx_channel *channel)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = channel->efx;
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
goto fail;
return;
fail:
efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
outbuf, outlen, rc);
}
int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE));
bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
struct efx_ef10_nic_data *nic_data;
dma_addr_t dma_addr;
size_t inlen;
int rc, i;
BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
nic_data = efx->nic_data;
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
dma_addr = tx_queue->txd.buf.dma_addr;
netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
tx_queue->queue, entries, (u64)dma_addr);
for (i = 0; i < entries; ++i) {
MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
dma_addr += EFX_BUF_SIZE;
}
inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
do {
MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
/* This flag was removed from mcdi_pcol.h for
* the non-_EXT version of INIT_TXQ. However,
* firmware still honours it.
*/
INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
tx_queue->timestamping);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
NULL, 0, NULL);
if (rc == -ENOSPC && tso_v2) {
/* Retry without TSOv2 if we're short on contexts. */
tso_v2 = false;
netif_warn(efx, probe, efx->net_dev,
"TSOv2 context not available to segment in "
"hardware. TCP performance may be reduced.\n"
);
} else if (rc) {
efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
MC_CMD_INIT_TXQ_EXT_IN_LEN,
NULL, 0, rc);
goto fail;
}
} while (rc);
return 0;
fail:
return rc;
}
void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue)
{
efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
}
void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = tx_queue->efx;
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
tx_queue->queue);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
goto fail;
return;
fail:
efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
outbuf, outlen, rc);
}
int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue)
{
return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
(rx_queue->ptr_mask + 1) *
sizeof(efx_qword_t),
GFP_KERNEL);
}
void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
{
MCDI_DECLARE_BUF(inbuf,
MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE));
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = rx_queue->efx;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
dma_addr_t dma_addr;
size_t inlen;
int rc;
int i;
BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
rx_queue->scatter_n = 0;
rx_queue->scatter_len = 0;
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
efx_rx_queue_index(rx_queue));
MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
INIT_RXQ_IN_FLAG_PREFIX, 1,
INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
dma_addr = rx_queue->rxd.buf.dma_addr;
netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
for (i = 0; i < entries; ++i) {
MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
dma_addr += EFX_BUF_SIZE;
}
inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
NULL, 0, NULL);
if (rc)
netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
efx_rx_queue_index(rx_queue));
}
void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue)
{
efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
}
void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = rx_queue->efx;
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
efx_rx_queue_index(rx_queue));
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
goto fail;
return;
fail:
efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
outbuf, outlen, rc);
}
...@@ -23,7 +23,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2); ...@@ -23,7 +23,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2);
void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue); void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue);
void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue); void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue);
int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue); int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue);
int efx_mcdi_rx_init(struct efx_rx_queue *rx_queue, bool want_outer_classes); void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue);
void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue); void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue);
void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue); void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue);
......
...@@ -16,91 +16,6 @@ ...@@ -16,91 +16,6 @@
#include "selftest.h" #include "selftest.h"
#include "mcdi_port_common.h" #include "mcdi_port_common.h"
int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
size_t outlen;
int rc;
BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
rc = -EIO;
goto fail;
}
cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
cfg->supported_cap =
MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
sizeof(cfg->name));
cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
sizeof(cfg->revision));
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
u32 flags, u32 loopback_mode,
u32 loopback_speed)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
int rc;
BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
NULL, 0, NULL);
return rc;
}
int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
size_t outlen;
int rc;
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
rc = -EIO;
goto fail;
}
*loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
static int efx_mcdi_mdio_read(struct net_device *net_dev, static int efx_mcdi_mdio_read(struct net_device *net_dev,
int prtad, int devad, u16 addr) int prtad, int devad, u16 addr)
{ {
...@@ -154,246 +69,6 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev, ...@@ -154,246 +69,6 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
return 0; return 0;
} }
void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
{
#define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
linkset)
bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
switch (media) {
case MC_CMD_MEDIA_KX4:
SET_BIT(Backplane);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
SET_BIT(1000baseKX_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
SET_BIT(10000baseKX4_Full);
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
SET_BIT(40000baseKR4_Full);
break;
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
case MC_CMD_MEDIA_QSFP_PLUS:
SET_BIT(FIBRE);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
SET_BIT(1000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
SET_BIT(10000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
SET_BIT(40000baseCR4_Full);
if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
SET_BIT(100000baseCR4_Full);
if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
SET_BIT(25000baseCR_Full);
if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
SET_BIT(50000baseCR2_Full);
break;
case MC_CMD_MEDIA_BASE_T:
SET_BIT(TP);
if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
SET_BIT(10baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
SET_BIT(10baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
SET_BIT(100baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
SET_BIT(100baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
SET_BIT(1000baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
SET_BIT(1000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
SET_BIT(10000baseT_Full);
break;
}
if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
SET_BIT(Pause);
if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
SET_BIT(Asym_Pause);
if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
SET_BIT(Autoneg);
#undef SET_BIT
}
u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
{
u32 result = 0;
#define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
linkset)
if (TEST_BIT(10baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
if (TEST_BIT(10baseT_Full))
result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
if (TEST_BIT(100baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
if (TEST_BIT(100baseT_Full))
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
if (TEST_BIT(1000baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
if (TEST_BIT(100000baseCR4_Full))
result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
if (TEST_BIT(25000baseCR_Full))
result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
if (TEST_BIT(50000baseCR2_Full))
result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
if (TEST_BIT(Pause))
result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
if (TEST_BIT(Asym_Pause))
result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
if (TEST_BIT(Autoneg))
result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
#undef TEST_BIT
return result;
}
u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
enum efx_phy_mode mode, supported;
u32 flags;
/* TODO: Advertise the capabilities supported by this PHY */
supported = 0;
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
supported |= PHY_MODE_TX_DISABLED;
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
supported |= PHY_MODE_LOW_POWER;
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
supported |= PHY_MODE_OFF;
mode = efx->phy_mode & supported;
flags = 0;
if (mode & PHY_MODE_TX_DISABLED)
flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
if (mode & PHY_MODE_LOW_POWER)
flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
if (mode & PHY_MODE_OFF)
flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
return flags;
}
u8 mcdi_to_ethtool_media(u32 media)
{
switch (media) {
case MC_CMD_MEDIA_XAUI:
case MC_CMD_MEDIA_CX4:
case MC_CMD_MEDIA_KX4:
return PORT_OTHER;
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
case MC_CMD_MEDIA_QSFP_PLUS:
return PORT_FIBRE;
case MC_CMD_MEDIA_BASE_T:
return PORT_TP;
default:
return PORT_OTHER;
}
}
void efx_mcdi_phy_decode_link(struct efx_nic *efx,
struct efx_link_state *link_state,
u32 speed, u32 flags, u32 fcntl)
{
switch (fcntl) {
case MC_CMD_FCNTL_AUTO:
WARN_ON(1); /* This is not a link mode */
link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
break;
case MC_CMD_FCNTL_BIDIR:
link_state->fc = EFX_FC_TX | EFX_FC_RX;
break;
case MC_CMD_FCNTL_RESPOND:
link_state->fc = EFX_FC_RX;
break;
default:
WARN_ON(1);
/* Fall through */
case MC_CMD_FCNTL_OFF:
link_state->fc = 0;
break;
}
link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
link_state->speed = speed;
}
/* The semantics of the ethtool FEC mode bitmask are not well defined,
* particularly the meaning of combinations of bits. Which means we get to
* define our own semantics, as follows:
* OFF overrides any other bits, and means "disable all FEC" (with the
* exception of 25G KR4/CR4, where it is not possible to reject it if AN
* partner requests it).
* AUTO on its own means use cable requirements and link partner autoneg with
* fw-default preferences for the cable type.
* AUTO and either RS or BASER means use the specified FEC type if cable and
* link partner support it, otherwise autoneg/fw-default.
* RS or BASER alone means use the specified FEC type if cable and link partner
* support it and either requests it, otherwise no FEC.
* Both RS and BASER (whether AUTO or not) means use FEC if cable and link
* partner support it, preferring RS to BASER.
*/
u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
{
u32 ret = 0;
if (ethtool_cap & ETHTOOL_FEC_OFF)
return 0;
if (ethtool_cap & ETHTOOL_FEC_AUTO)
ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
(1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
(1 << MC_CMD_PHY_CAP_RS_FEC_LBN);
if (ethtool_cap & ETHTOOL_FEC_RS)
ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) |
(1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN);
if (ethtool_cap & ETHTOOL_FEC_BASER)
ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
(1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
(1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN) |
(1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
return ret;
}
/* Invert ethtool_fec_caps_to_mcdi. There are two combinations that function
* can never produce, (baser xor rs) and neither req; the implementation below
* maps both of those to AUTO. This should never matter, and it's not clear
* what a better mapping would be anyway.
*/
u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g)
{
bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN),
rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN),
baser = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)
: caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN),
baser_req = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN)
: caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
if (!baser && !rs)
return ETHTOOL_FEC_OFF;
return (rs_req ? ETHTOOL_FEC_RS : 0) |
(baser_req ? ETHTOOL_FEC_BASER : 0) |
(baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO);
}
static int efx_mcdi_phy_probe(struct efx_nic *efx) static int efx_mcdi_phy_probe(struct efx_nic *efx)
{ {
struct efx_mcdi_phy_data *phy_data; struct efx_mcdi_phy_data *phy_data;
...@@ -513,58 +188,6 @@ int efx_mcdi_port_reconfigure(struct efx_nic *efx) ...@@ -513,58 +188,6 @@ int efx_mcdi_port_reconfigure(struct efx_nic *efx)
efx->loopback_mode, 0); efx->loopback_mode, 0);
} }
/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
* supported by the link partner. Warn the user if this isn't the case
*/
void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 rmtadv;
/* The link partner capabilities are only relevant if the
* link supports flow control autonegotiation */
if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
return;
/* If flow control autoneg is supported and enabled, then fine */
if (efx->wanted_fc & EFX_FC_AUTO)
return;
rmtadv = 0;
if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
rmtadv |= ADVERTISED_Pause;
if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
rmtadv |= ADVERTISED_Asym_Pause;
if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
netif_err(efx, link, efx->net_dev,
"warning: link partner doesn't support pause frames");
}
bool efx_mcdi_phy_poll(struct efx_nic *efx)
{
struct efx_link_state old_state = efx->link_state;
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc)
efx->link_state.up = false;
else
efx_mcdi_phy_decode_link(
efx, &efx->link_state,
MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
return !efx_link_state_equal(&efx->link_state, &old_state);
}
static void efx_mcdi_phy_remove(struct efx_nic *efx) static void efx_mcdi_phy_remove(struct efx_nic *efx)
{ {
struct efx_mcdi_phy_data *phy_data = efx->phy_data; struct efx_mcdi_phy_data *phy_data = efx->phy_data;
...@@ -652,58 +275,6 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, ...@@ -652,58 +275,6 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
return 0; return 0;
} }
int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
struct ethtool_fecparam *fec)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN);
u32 caps, active, speed; /* MCDI format */
bool is_25g = false;
size_t outlen;
int rc;
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN)
return -EOPNOTSUPP;
/* behaviour for 25G/50G links depends on 25G BASER bit */
speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED);
is_25g = speed == 25000 || speed == 50000;
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP);
fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g);
/* BASER is never supported on 100G */
if (speed == 100000)
fec->fec &= ~ETHTOOL_FEC_BASER;
active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE);
switch (active) {
case MC_CMD_FEC_NONE:
fec->active_fec = ETHTOOL_FEC_OFF;
break;
case MC_CMD_FEC_BASER:
fec->active_fec = ETHTOOL_FEC_BASER;
break;
case MC_CMD_FEC_RS:
fec->active_fec = ETHTOOL_FEC_RS;
break;
default:
netif_warn(efx, hw, efx->net_dev,
"Firmware reports unrecognised FEC_TYPE %u\n",
active);
/* We don't know what firmware has picked. AUTO is as good a
* "can't happen" value as any other.
*/
fec->active_fec = ETHTOOL_FEC_AUTO;
break;
}
return 0;
}
static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx,
const struct ethtool_fecparam *fec) const struct ethtool_fecparam *fec)
{ {
...@@ -731,27 +302,6 @@ static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, ...@@ -731,27 +302,6 @@ static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx,
return 0; return 0;
} }
int efx_mcdi_phy_test_alive(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
size_t outlen;
int rc;
BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
return -EIO;
if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
return -EINVAL;
return 0;
}
static const char *const mcdi_sft9001_cable_diag_names[] = { static const char *const mcdi_sft9001_cable_diag_names[] = {
"cable.pairA.length", "cable.pairA.length",
"cable.pairB.length", "cable.pairB.length",
...@@ -1334,17 +884,3 @@ void efx_mcdi_port_remove(struct efx_nic *efx) ...@@ -1334,17 +884,3 @@ void efx_mcdi_port_remove(struct efx_nic *efx)
efx->phy_op->remove(efx); efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer); efx_nic_free_buffer(efx, &efx->stats_buffer);
} }
/* Get physical port number (EF10 only; on Siena it is same as PF number) */
int efx_mcdi_port_get_number(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
int rc;
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc)
return rc;
return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
}
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2018 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include "mcdi_port_common.h"
int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
size_t outlen;
int rc;
BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
rc = -EIO;
goto fail;
}
cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
cfg->supported_cap =
MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
sizeof(cfg->name));
cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
sizeof(cfg->revision));
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
void efx_link_set_advertising(struct efx_nic *efx,
const unsigned long *advertising)
{
memcpy(efx->link_advertising, advertising,
sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
efx->link_advertising[0] |= ADVERTISED_Autoneg;
if (advertising[0] & ADVERTISED_Pause)
efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
else
efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
if (advertising[0] & ADVERTISED_Asym_Pause)
efx->wanted_fc ^= EFX_FC_TX;
}
int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
u32 flags, u32 loopback_mode, u32 loopback_speed)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
int rc;
BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
NULL, 0, NULL);
return rc;
}
int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
size_t outlen;
int rc;
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
rc = -EIO;
goto fail;
}
*loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
{
#define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
linkset)
bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
switch (media) {
case MC_CMD_MEDIA_KX4:
SET_BIT(Backplane);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
SET_BIT(1000baseKX_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
SET_BIT(10000baseKX4_Full);
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
SET_BIT(40000baseKR4_Full);
break;
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
case MC_CMD_MEDIA_QSFP_PLUS:
SET_BIT(FIBRE);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
SET_BIT(1000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
SET_BIT(10000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
SET_BIT(40000baseCR4_Full);
if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
SET_BIT(100000baseCR4_Full);
if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
SET_BIT(25000baseCR_Full);
if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
SET_BIT(50000baseCR2_Full);
break;
case MC_CMD_MEDIA_BASE_T:
SET_BIT(TP);
if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
SET_BIT(10baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
SET_BIT(10baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
SET_BIT(100baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
SET_BIT(100baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
SET_BIT(1000baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
SET_BIT(1000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
SET_BIT(10000baseT_Full);
break;
}
if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
SET_BIT(Pause);
if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
SET_BIT(Asym_Pause);
if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
SET_BIT(Autoneg);
#undef SET_BIT
}
u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
{
u32 result = 0;
#define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
linkset)
if (TEST_BIT(10baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
if (TEST_BIT(10baseT_Full))
result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
if (TEST_BIT(100baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
if (TEST_BIT(100baseT_Full))
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
if (TEST_BIT(1000baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
if (TEST_BIT(100000baseCR4_Full))
result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
if (TEST_BIT(25000baseCR_Full))
result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
if (TEST_BIT(50000baseCR2_Full))
result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
if (TEST_BIT(Pause))
result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
if (TEST_BIT(Asym_Pause))
result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
if (TEST_BIT(Autoneg))
result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
#undef TEST_BIT
return result;
}
u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
enum efx_phy_mode mode, supported;
u32 flags;
/* TODO: Advertise the capabilities supported by this PHY */
supported = 0;
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
supported |= PHY_MODE_TX_DISABLED;
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
supported |= PHY_MODE_LOW_POWER;
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
supported |= PHY_MODE_OFF;
mode = efx->phy_mode & supported;
flags = 0;
if (mode & PHY_MODE_TX_DISABLED)
flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
if (mode & PHY_MODE_LOW_POWER)
flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
if (mode & PHY_MODE_OFF)
flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
return flags;
}
u8 mcdi_to_ethtool_media(u32 media)
{
switch (media) {
case MC_CMD_MEDIA_XAUI:
case MC_CMD_MEDIA_CX4:
case MC_CMD_MEDIA_KX4:
return PORT_OTHER;
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
case MC_CMD_MEDIA_QSFP_PLUS:
return PORT_FIBRE;
case MC_CMD_MEDIA_BASE_T:
return PORT_TP;
default:
return PORT_OTHER;
}
}
void efx_mcdi_phy_decode_link(struct efx_nic *efx,
struct efx_link_state *link_state,
u32 speed, u32 flags, u32 fcntl)
{
switch (fcntl) {
case MC_CMD_FCNTL_AUTO:
WARN_ON(1); /* This is not a link mode */
link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
break;
case MC_CMD_FCNTL_BIDIR:
link_state->fc = EFX_FC_TX | EFX_FC_RX;
break;
case MC_CMD_FCNTL_RESPOND:
link_state->fc = EFX_FC_RX;
break;
default:
WARN_ON(1);
/* Fall through */
case MC_CMD_FCNTL_OFF:
link_state->fc = 0;
break;
}
link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
link_state->speed = speed;
}
/* The semantics of the ethtool FEC mode bitmask are not well defined,
* particularly the meaning of combinations of bits. Which means we get to
* define our own semantics, as follows:
* OFF overrides any other bits, and means "disable all FEC" (with the
* exception of 25G KR4/CR4, where it is not possible to reject it if AN
* partner requests it).
* AUTO on its own means use cable requirements and link partner autoneg with
* fw-default preferences for the cable type.
* AUTO and either RS or BASER means use the specified FEC type if cable and
* link partner support it, otherwise autoneg/fw-default.
* RS or BASER alone means use the specified FEC type if cable and link partner
* support it and either requests it, otherwise no FEC.
* Both RS and BASER (whether AUTO or not) means use FEC if cable and link
* partner support it, preferring RS to BASER.
*/
u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
{
u32 ret = 0;
if (ethtool_cap & ETHTOOL_FEC_OFF)
return 0;
if (ethtool_cap & ETHTOOL_FEC_AUTO)
ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
(1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
(1 << MC_CMD_PHY_CAP_RS_FEC_LBN);
if (ethtool_cap & ETHTOOL_FEC_RS)
ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) |
(1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN);
if (ethtool_cap & ETHTOOL_FEC_BASER)
ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
(1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
(1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN) |
(1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
return ret;
}
/* Invert ethtool_fec_caps_to_mcdi. There are two combinations that function
* can never produce, (baser xor rs) and neither req; the implementation below
* maps both of those to AUTO. This should never matter, and it's not clear
* what a better mapping would be anyway.
*/
u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g)
{
bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN),
rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN),
baser = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)
: caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN),
baser_req = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN)
: caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
if (!baser && !rs)
return ETHTOOL_FEC_OFF;
return (rs_req ? ETHTOOL_FEC_RS : 0) |
(baser_req ? ETHTOOL_FEC_BASER : 0) |
(baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO);
}
/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
* supported by the link partner. Warn the user if this isn't the case
*/
void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 rmtadv;
/* The link partner capabilities are only relevant if the
* link supports flow control autonegotiation
*/
if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
return;
/* If flow control autoneg is supported and enabled, then fine */
if (efx->wanted_fc & EFX_FC_AUTO)
return;
rmtadv = 0;
if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
rmtadv |= ADVERTISED_Pause;
if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
rmtadv |= ADVERTISED_Asym_Pause;
if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
netif_err(efx, link, efx->net_dev,
"warning: link partner doesn't support pause frames");
}
bool efx_mcdi_phy_poll(struct efx_nic *efx)
{
struct efx_link_state old_state = efx->link_state;
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc)
efx->link_state.up = false;
else
efx_mcdi_phy_decode_link(
efx, &efx->link_state,
MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
return !efx_link_state_equal(&efx->link_state, &old_state);
}
int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN);
u32 caps, active, speed; /* MCDI format */
bool is_25g = false;
size_t outlen;
int rc;
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN)
return -EOPNOTSUPP;
/* behaviour for 25G/50G links depends on 25G BASER bit */
speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED);
is_25g = speed == 25000 || speed == 50000;
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP);
fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g);
/* BASER is never supported on 100G */
if (speed == 100000)
fec->fec &= ~ETHTOOL_FEC_BASER;
active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE);
switch (active) {
case MC_CMD_FEC_NONE:
fec->active_fec = ETHTOOL_FEC_OFF;
break;
case MC_CMD_FEC_BASER:
fec->active_fec = ETHTOOL_FEC_BASER;
break;
case MC_CMD_FEC_RS:
fec->active_fec = ETHTOOL_FEC_RS;
break;
default:
netif_warn(efx, hw, efx->net_dev,
"Firmware reports unrecognised FEC_TYPE %u\n",
active);
/* We don't know what firmware has picked. AUTO is as good a
* "can't happen" value as any other.
*/
fec->active_fec = ETHTOOL_FEC_AUTO;
break;
}
return 0;
}
int efx_mcdi_phy_test_alive(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
size_t outlen;
int rc;
BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
return -EIO;
if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
return -EINVAL;
return 0;
}
/* Get physical port number (EF10 only; on Siena it is same as PF number) */
int efx_mcdi_port_get_number(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
int rc;
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc)
return rc;
return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment