Commit 1a676d2b authored by David S. Miller's avatar David S. Miller

Merge branch 'sfc-next'

Shradha Shah says:

====================
sfc: Enabling EF10 Vf's, set up vswitching and bind the SFC driver to the VF's

This set of patches makes way for the implementation of EF10
SR-IOV driver starting with some cleanup code.
NIC specific SR-IOV functions are moved to their own header
and netdev_ops are made generic instead of being NIC specific

Next in line comes the patch to enable VF's using sriov_configure.
VEB vswitching hierarchy is set up next followed by patches to
prepare sfc driver to bind to enabled VF's

This is followed by patch to support use of shared RSS contexts
which makes VF's use shared RSS contexts in all cases.

Patch series ends with a patch to bind the sfc driver to the
enabled VF's which creates network interfaces corresponding to
the VF's.

Coming up soon are the patches to set_vf_mac, set_vf_config,
set_vf_vlan, vf_spoofcheck, etc.

These patches have been tested with and without CONFIG_SFC_SRIOV.
In the case of CONFIG_SFC_SRIOV=y enabling of VF's using
sriov_configure is also tested. The enabled VF's bind to the
installed sfc driver succesfully to create network interfaces.
In the case of CONFIG_SFC_SRIOV=n enabling of VF's using
sriov_configure returns the correct error message:
"Function not implemented".
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f926204b 6f7f8aa6
......@@ -3,6 +3,6 @@ sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
tenxpress.o txc43128_phy.o falcon_boards.o \
mcdi.o mcdi_port.o mcdi_mon.o ptp.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
obj-$(CONFIG_SFC) += sfc.o
......@@ -15,6 +15,7 @@
#include "nic.h"
#include "workarounds.h"
#include "selftest.h"
#include "ef10_sriov.h"
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/wait.h>
......@@ -30,6 +31,9 @@ enum {
/* The reserved RSS context value */
#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
/* The maximum size of a shared RSS context */
/* TODO: this should really be from the mcdi protocol export */
#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
/* The filter table(s) are managed by firmware and we have write-only
* access. When removing filters we must identify them to the
......@@ -77,7 +81,6 @@ struct efx_ef10_filter_table {
/* An arbitrary search limit for the software hash table */
#define EFX_EF10_FILTER_SEARCH_LIMIT 200
static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
static void efx_ef10_filter_table_remove(struct efx_nic *efx);
......@@ -92,7 +95,28 @@ static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
{
return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
int bar;
bar = efx->type->mem_bar;
return resource_size(&efx->pci_dev->resource[bar]);
}
static int efx_ef10_get_pf_index(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < sizeof(outbuf))
return -EIO;
nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
return 0;
}
static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
......@@ -117,6 +141,13 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
nic_data->datapath_caps =
MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
/* record the DPCPU firmware IDs to determine VEB vswitching support.
*/
nic_data->rx_dpcpu_fw_id =
MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
nic_data->tx_dpcpu_fw_id =
MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
if (!(nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
netif_err(efx, drv, efx->net_dev,
......@@ -178,7 +209,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
efx->max_channels =
min_t(unsigned int,
EFX_MAX_CHANNELS,
resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
efx_ef10_mem_map_size(efx) /
(EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
if (WARN_ON(efx->max_channels == 0))
return -EIO;
......@@ -209,6 +240,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
/* In case we're recovering from a crash (kexec), we want to
* cancel any outstanding request by the previous user of this
* function. We send a special message using the least
......@@ -230,6 +263,10 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail3;
rc = efx_ef10_get_pf_index(efx);
if (rc)
goto fail3;
rc = efx_ef10_init_datapath_caps(efx);
if (rc < 0)
goto fail3;
......@@ -251,10 +288,22 @@ static int efx_ef10_probe(struct efx_nic *efx)
goto fail3;
efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
/* Check whether firmware supports bug 35388 workaround */
/* Check whether firmware supports bug 35388 workaround.
* First try to enable it, then if we get EPERM, just
* ask if it's already enabled
*/
rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
if (rc == 0)
nic_data->workaround_35388 = true;
else if (rc == -EPERM) {
unsigned int enabled;
rc = efx_mcdi_get_workarounds(efx, NULL, &enabled);
if (rc)
goto fail3;
nic_data->workaround_35388 = enabled &
MC_CMD_GET_WORKAROUNDS_OUT_BUG35388;
}
else if (rc != -ENOSYS && rc != -ENOENT)
goto fail3;
netif_dbg(efx, probe, efx->net_dev,
......@@ -262,7 +311,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
nic_data->workaround_35388 ? "en" : "dis");
rc = efx_mcdi_mon_probe(efx);
if (rc)
if (rc && rc != -EPERM)
goto fail3;
efx_ptp_probe(efx, NULL);
......@@ -279,6 +328,23 @@ static int efx_ef10_probe(struct efx_nic *efx)
return rc;
}
static int efx_ef10_probe_pf(struct efx_nic *efx)
{
return efx_ef10_probe(efx);
}
#ifdef CONFIG_SFC_SRIOV
static int efx_ef10_probe_vf(struct efx_nic *efx)
{
return efx_ef10_probe(efx);
}
#else
static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
{
return 0;
}
#endif
static int efx_ef10_free_vis(struct efx_nic *efx)
{
MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
......@@ -687,7 +753,9 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_restore_piobufs = false;
}
efx_ef10_rx_push_rss_config(efx);
/* don't fail init if RSS setup doesn't work */
efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
return 0;
}
......@@ -1044,6 +1112,14 @@ static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
}
}
static void efx_ef10_get_wol_vf(struct efx_nic *efx,
struct ethtool_wolinfo *wol) {}
static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
{
return -EOPNOTSUPP;
}
static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
{
wol->supported = 0;
......@@ -1123,6 +1199,10 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
/* All our allocations have been reset */
efx_ef10_reset_mc_allocations(efx);
/* Driver-created vswitches and vports must be re-created */
nic_data->must_probe_vswitching = true;
nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
/* The datapath firmware might have been changed */
nic_data->must_check_datapath_caps = true;
......@@ -1237,6 +1317,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t inlen, outlen;
dma_addr_t dma_addr;
efx_qword_t *txd;
......@@ -1251,7 +1332,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
dma_addr = tx_queue->txd.buf.dma_addr;
......@@ -1378,19 +1459,33 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
}
}
static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
bool exclusive, unsigned *context_size)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
u32 alloc_type = exclusive ?
MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
unsigned rss_spread = exclusive ?
efx->rss_spread :
min(rounddown_pow_of_two(efx->rss_spread),
EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
if (!exclusive && rss_spread == 1) {
*context = EFX_EF10_RSS_CONTEXT_INVALID;
if (context_size)
*context_size = 1;
return 0;
}
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
EVB_PORT_ID_ASSIGNED);
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
EFX_MAX_CHANNELS);
nic_data->vport_id);
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
......@@ -1402,6 +1497,9 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
*context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
if (context_size)
*context_size = rss_spread;
return 0;
}
......@@ -1418,7 +1516,8 @@ static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
WARN_ON(rc != 0);
}
static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
const u32 *rx_indir_table)
{
MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
......@@ -1432,7 +1531,7 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
MCDI_PTR(tablebuf,
RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
(u8) efx->rx_indir_table[i];
(u8) rx_indir_table[i];
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
sizeof(tablebuf), NULL, 0, NULL);
......@@ -1460,27 +1559,119 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
}
static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
unsigned *context_size)
{
u32 new_rx_rss_context;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
false, context_size);
netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
if (rc != 0)
return rc;
if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
if (rc != 0)
goto fail;
nic_data->rx_rss_context = new_rx_rss_context;
nic_data->rx_rss_context_exclusive = false;
efx_set_default_rx_indir_table(efx);
return 0;
}
static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
const u32 *rx_indir_table)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
u32 new_rx_rss_context;
if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID ||
!nic_data->rx_rss_context_exclusive) {
rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
true, NULL);
if (rc == -EOPNOTSUPP)
return rc;
else if (rc != 0)
goto fail1;
} else {
new_rx_rss_context = nic_data->rx_rss_context;
}
rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
rx_indir_table);
if (rc != 0)
goto fail;
goto fail2;
return;
if (nic_data->rx_rss_context != new_rx_rss_context)
efx_ef10_rx_free_indir_table(efx);
nic_data->rx_rss_context = new_rx_rss_context;
nic_data->rx_rss_context_exclusive = true;
if (rx_indir_table != efx->rx_indir_table)
memcpy(efx->rx_indir_table, rx_indir_table,
sizeof(efx->rx_indir_table));
return 0;
fail:
fail2:
if (new_rx_rss_context != nic_data->rx_rss_context)
efx_ef10_free_rss_context(efx, new_rx_rss_context);
fail1:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
const u32 *rx_indir_table)
{
int rc;
if (efx->rss_spread == 1)
return 0;
rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table);
if (rc == -ENOBUFS && !user) {
unsigned context_size;
bool mismatch = false;
size_t i;
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch;
i++)
mismatch = rx_indir_table[i] !=
ethtool_rxfh_indir_default(i, efx->rss_spread);
rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
if (rc == 0) {
if (context_size != efx->rss_spread)
netif_warn(efx, probe, efx->net_dev,
"Could not allocate an exclusive RSS"
" context; allocated a shared one of"
" different size."
" Wanted %u, got %u.\n",
efx->rss_spread, context_size);
else if (mismatch)
netif_warn(efx, probe, efx->net_dev,
"Could not allocate an exclusive RSS"
" context; allocated a shared one but"
" could not apply custom"
" indirection.\n");
else
netif_info(efx, probe, efx->net_dev,
"Could not allocate an exclusive RSS"
" context; allocated a shared one.\n");
}
}
return rc;
}
static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
const u32 *rx_indir_table
__attribute__ ((unused)))
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
if (user)
return -EOPNOTSUPP;
if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
return 0;
return efx_ef10_rx_push_shared_rss_config(efx, NULL);
}
static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
......@@ -1500,6 +1691,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = rx_queue->efx;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t inlen, outlen;
dma_addr_t dma_addr;
int rc;
......@@ -1517,7 +1709,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
INIT_RXQ_IN_FLAG_PREFIX, 1,
INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
dma_addr = rx_queue->rxd.buf.dma_addr;
......@@ -2286,11 +2478,12 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
match_fields);
}
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
......@@ -3494,6 +3687,9 @@ static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
}
static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
u32 host_time) {}
static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
bool temp)
{
......@@ -3571,6 +3767,12 @@ static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
return 0;
}
static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
struct hwtstamp_config *init)
{
return -EOPNOTSUPP;
}
static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
struct hwtstamp_config *init)
{
......@@ -3607,9 +3809,109 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
}
}
const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.is_vf = true,
.mem_bar = EFX_MEM_VF_BAR,
.mem_map_size = efx_ef10_mem_map_size,
.probe = efx_ef10_probe_vf,
.remove = efx_ef10_remove,
.dimension_resources = efx_ef10_dimension_resources,
.init = efx_ef10_init_nic,
.fini = efx_port_dummy_op_void,
.map_reset_reason = efx_mcdi_map_reset_reason,
.map_reset_flags = efx_ef10_map_reset_flags,
.reset = efx_ef10_reset,
.probe_port = efx_mcdi_port_probe,
.remove_port = efx_mcdi_port_remove,
.fini_dmaq = efx_ef10_fini_dmaq,
.prepare_flr = efx_ef10_prepare_flr,
.finish_flr = efx_port_dummy_op_void,
.describe_stats = efx_ef10_describe_stats,
.update_stats = efx_ef10_update_stats,
.start_stats = efx_port_dummy_op_void,
.pull_stats = efx_port_dummy_op_void,
.stop_stats = efx_port_dummy_op_void,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = efx_ef10_push_irq_moderation,
.reconfigure_mac = efx_ef10_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault,
.reconfigure_port = efx_mcdi_port_reconfigure,
.get_wol = efx_ef10_get_wol_vf,
.set_wol = efx_ef10_set_wol_vf,
.resume_wol = efx_port_dummy_op_void,
.mcdi_request = efx_ef10_mcdi_request,
.mcdi_poll_response = efx_ef10_mcdi_poll_response,
.mcdi_read_response = efx_ef10_mcdi_read_response,
.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
.irq_enable_master = efx_port_dummy_op_void,
.irq_test_generate = efx_ef10_irq_test_generate,
.irq_disable_non_ev = efx_port_dummy_op_void,
.irq_handle_msi = efx_ef10_msi_interrupt,
.irq_handle_legacy = efx_ef10_legacy_interrupt,
.tx_probe = efx_ef10_tx_probe,
.tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove,
.tx_write = efx_ef10_tx_write,
.rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
.rx_remove = efx_ef10_rx_remove,
.rx_write = efx_ef10_rx_write,
.rx_defer_refill = efx_ef10_rx_defer_refill,
.ev_probe = efx_ef10_ev_probe,
.ev_init = efx_ef10_ev_init,
.ev_fini = efx_ef10_ev_fini,
.ev_remove = efx_ef10_ev_remove,
.ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate,
.filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_ef10_filter_table_restore,
.filter_table_remove = efx_ef10_filter_table_remove,
.filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
.filter_insert = efx_ef10_filter_insert,
.filter_remove_safe = efx_ef10_filter_remove_safe,
.filter_get_safe = efx_ef10_filter_get_safe,
.filter_clear_rx = efx_ef10_filter_clear_rx,
.filter_count_rx_used = efx_ef10_filter_count_rx_used,
.filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
.filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
#ifdef CONFIG_RFS_ACCEL
.filter_rfs_insert = efx_ef10_filter_rfs_insert,
.filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
#endif
#ifdef CONFIG_SFC_MTD
.mtd_probe = efx_port_dummy_op_int,
#endif
.ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
.ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
#ifdef CONFIG_SFC_SRIOV
.vswitching_probe = efx_ef10_vswitching_probe_vf,
.vswitching_restore = efx_ef10_vswitching_restore_vf,
.vswitching_remove = efx_ef10_vswitching_remove_vf,
#endif
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
.rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
.rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
.can_rx_scatter = true,
.always_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXHASH | NETIF_F_NTUPLE),
.mcdi_max_ver = 2,
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
};
const struct efx_nic_type efx_hunt_a0_nic_type = {
.is_vf = false,
.mem_bar = EFX_MEM_BAR,
.mem_map_size = efx_ef10_mem_map_size,
.probe = efx_ef10_probe,
.probe = efx_ef10_probe_pf,
.remove = efx_ef10_remove,
.dimension_resources = efx_ef10_dimension_resources,
.init = efx_ef10_init_nic,
......@@ -3650,7 +3952,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove,
.tx_write = efx_ef10_tx_write,
.rx_push_rss_config = efx_ef10_rx_push_rss_config,
.rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
.rx_remove = efx_ef10_rx_remove,
......@@ -3689,11 +3991,22 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ptp_write_host_time = efx_ef10_ptp_write_host_time,
.ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
#ifdef CONFIG_SFC_SRIOV
.sriov_configure = efx_ef10_sriov_configure,
.sriov_init = efx_ef10_sriov_init,
.sriov_fini = efx_ef10_sriov_fini,
.sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed,
.sriov_wanted = efx_ef10_sriov_wanted,
.sriov_reset = efx_ef10_sriov_reset,
.sriov_flr = efx_ef10_sriov_flr,
.sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
.sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
.sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
.sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
.vswitching_probe = efx_ef10_vswitching_probe_pf,
.vswitching_restore = efx_ef10_vswitching_restore_pf,
.vswitching_remove = efx_ef10_vswitching_remove_pf,
#endif
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
......
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2015 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/pci.h>
#include <linux/module.h>
#include "net_driver.h"
#include "ef10_sriov.h"
#include "efx.h"
#include "nic.h"
#include "mcdi_pcol.h"
static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id,
unsigned int vf_fn)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_EVB_PORT_ASSIGN_IN_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
MCDI_SET_DWORD(inbuf, EVB_PORT_ASSIGN_IN_PORT_ID, port_id);
MCDI_POPULATE_DWORD_2(inbuf, EVB_PORT_ASSIGN_IN_FUNCTION,
EVB_PORT_ASSIGN_IN_PF, nic_data->pf_index,
EVB_PORT_ASSIGN_IN_VF, vf_fn);
return efx_mcdi_rpc(efx, MC_CMD_EVB_PORT_ASSIGN, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
static int efx_ef10_vport_add_mac(struct efx_nic *efx,
unsigned int port_id, u8 *mac)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
sizeof(inbuf), NULL, 0, NULL);
}
static int efx_ef10_vport_del_mac(struct efx_nic *efx,
unsigned int port_id, u8 *mac)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
sizeof(inbuf), NULL, 0, NULL);
}
static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id,
unsigned int vswitch_type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_ALLOC_IN_LEN);
MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_TYPE, vswitch_type);
MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 0);
MCDI_POPULATE_DWORD_1(inbuf, VSWITCH_ALLOC_IN_FLAGS,
VSWITCH_ALLOC_IN_FLAG_AUTO_PORT, 0);
return efx_mcdi_rpc(efx, MC_CMD_VSWITCH_ALLOC, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
static int efx_ef10_vswitch_free(struct efx_nic *efx, unsigned int port_id)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_FREE_IN_LEN);
MCDI_SET_DWORD(inbuf, VSWITCH_FREE_IN_UPSTREAM_PORT_ID, port_id);
return efx_mcdi_rpc(efx, MC_CMD_VSWITCH_FREE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
static int efx_ef10_vport_alloc(struct efx_nic *efx,
unsigned int port_id_in,
unsigned int vport_type,
unsigned int *port_id_out)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ALLOC_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_ALLOC_OUT_LEN);
size_t outlen;
int rc;
EFX_WARN_ON_PARANOID(!port_id_out);
MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_UPSTREAM_PORT_ID, port_id_in);
MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_TYPE, vport_type);
MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_NUM_VLAN_TAGS, 0);
MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_FLAGS,
VPORT_ALLOC_IN_FLAG_AUTO_PORT, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_ALLOC, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < MC_CMD_VPORT_ALLOC_OUT_LEN)
return -EIO;
*port_id_out = MCDI_DWORD(outbuf, VPORT_ALLOC_OUT_VPORT_ID);
return 0;
}
static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_FREE_IN_LEN);
MCDI_SET_DWORD(inbuf, VPORT_FREE_IN_VPORT_ID, port_id);
return efx_mcdi_rpc(efx, MC_CMD_VPORT_FREE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
static int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
static int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int i;
if (!nic_data->vf)
return;
for (i = 0; i < efx->vf_count; i++) {
struct ef10_vf *vf = nic_data->vf + i;
if (vf->vport_assigned) {
efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, i);
vf->vport_assigned = 0;
}
if (!is_zero_ether_addr(vf->mac)) {
efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac);
eth_zero_addr(vf->mac);
}
if (vf->vport_id) {
efx_ef10_vport_free(efx, vf->vport_id);
vf->vport_id = 0;
}
}
}
static void efx_ef10_sriov_free_vf_vswitching(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
efx_ef10_sriov_free_vf_vports(efx);
kfree(nic_data->vf);
nic_data->vf = NULL;
}
static int efx_ef10_sriov_assign_vf_vport(struct efx_nic *efx,
unsigned int vf_i)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct ef10_vf *vf = nic_data->vf + vf_i;
int rc;
if (WARN_ON_ONCE(!nic_data->vf))
return -EOPNOTSUPP;
rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
&vf->vport_id);
if (rc)
return rc;
rc = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
if (rc) {
eth_zero_addr(vf->mac);
return rc;
}
rc = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
if (rc)
return rc;
vf->vport_assigned = 1;
return 0;
}
static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
unsigned int i;
int rc;
nic_data->vf = kcalloc(efx->vf_count, sizeof(struct ef10_vf),
GFP_KERNEL);
if (!nic_data->vf)
return -ENOMEM;
for (i = 0; i < efx->vf_count; i++) {
random_ether_addr(nic_data->vf[i].mac);
rc = efx_ef10_sriov_assign_vf_vport(efx, i);
if (rc)
goto fail;
}
return 0;
fail:
efx_ef10_sriov_free_vf_vports(efx);
kfree(nic_data->vf);
nic_data->vf = NULL;
return rc;
}
static int efx_ef10_sriov_restore_vf_vswitching(struct efx_nic *efx)
{
unsigned int i;
int rc;
for (i = 0; i < efx->vf_count; i++) {
rc = efx_ef10_sriov_assign_vf_vport(efx, i);
if (rc)
goto fail;
}
return 0;
fail:
efx_ef10_sriov_free_vf_vswitching(efx);
return rc;
}
/* On top of the default firmware vswitch setup, create a VEB vswitch and
* expansion vport for use by this function.
*/
int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct net_device *net_dev = efx->net_dev;
int rc;
if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0) {
/* vswitch not needed as we have no VFs */
efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
return 0;
}
rc = efx_ef10_vswitch_alloc(efx, EVB_PORT_ID_ASSIGNED,
MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB);
if (rc)
goto fail1;
rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
&nic_data->vport_id);
if (rc)
goto fail2;
rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, net_dev->dev_addr);
if (rc)
goto fail3;
ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr);
rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
if (rc)
goto fail4;
return 0;
fail4:
efx_ef10_vport_del_mac(efx, nic_data->vport_id, nic_data->vport_mac);
eth_zero_addr(nic_data->vport_mac);
fail3:
efx_ef10_vport_free(efx, nic_data->vport_id);
nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
fail2:
efx_ef10_vswitch_free(efx, EVB_PORT_ID_ASSIGNED);
fail1:
return rc;
}
int efx_ef10_vswitching_probe_vf(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
return efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
}
int efx_ef10_vswitching_restore_pf(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
if (!nic_data->must_probe_vswitching)
return 0;
rc = efx_ef10_vswitching_probe_pf(efx);
if (rc)
goto fail;
rc = efx_ef10_sriov_restore_vf_vswitching(efx);
if (rc)
goto fail;
nic_data->must_probe_vswitching = false;
fail:
return rc;
}
int efx_ef10_vswitching_restore_vf(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
if (!nic_data->must_probe_vswitching)
return 0;
rc = efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
if (rc)
return rc;
nic_data->must_probe_vswitching = false;
return 0;
}
void efx_ef10_vswitching_remove_pf(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
efx_ef10_sriov_free_vf_vswitching(efx);
efx_ef10_vadaptor_free(efx, nic_data->vport_id);
if (nic_data->vport_id == EVB_PORT_ID_ASSIGNED)
return; /* No vswitch was ever created */
if (!is_zero_ether_addr(nic_data->vport_mac)) {
efx_ef10_vport_del_mac(efx, nic_data->vport_id,
efx->net_dev->dev_addr);
eth_zero_addr(nic_data->vport_mac);
}
efx_ef10_vport_free(efx, nic_data->vport_id);
nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
efx_ef10_vswitch_free(efx, nic_data->vport_id);
}
void efx_ef10_vswitching_remove_vf(struct efx_nic *efx)
{
efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
}
static int efx_ef10_pci_sriov_enable(struct efx_nic *efx, int num_vfs)
{
int rc = 0;
struct pci_dev *dev = efx->pci_dev;
efx->vf_count = num_vfs;
rc = efx_ef10_sriov_alloc_vf_vswitching(efx);
if (rc)
goto fail1;
rc = pci_enable_sriov(dev, num_vfs);
if (rc)
goto fail2;
return 0;
fail2:
efx_ef10_sriov_free_vf_vswitching(efx);
fail1:
efx->vf_count = 0;
netif_err(efx, probe, efx->net_dev,
"Failed to enable SRIOV VFs\n");
return rc;
}
static int efx_ef10_pci_sriov_disable(struct efx_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
pci_disable_sriov(dev);
efx_ef10_sriov_free_vf_vswitching(efx);
efx->vf_count = 0;
return 0;
}
int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs)
{
if (num_vfs == 0)
return efx_ef10_pci_sriov_disable(efx);
else
return efx_ef10_pci_sriov_enable(efx, num_vfs);
}
int efx_ef10_sriov_init(struct efx_nic *efx)
{
return 0;
}
void efx_ef10_sriov_fini(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
if (!nic_data->vf)
return;
rc = efx_ef10_pci_sriov_disable(efx);
if (rc)
netif_dbg(efx, drv, efx->net_dev,
"Disabling SRIOV was not successful rc=%d\n", rc);
else
netif_dbg(efx, drv, efx->net_dev, "SRIOV disabled\n");
}
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2015 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF10_SRIOV_H
#define EF10_SRIOV_H
#include "net_driver.h"
/**
* struct ef10_vf - PF's store of VF data
* @vport_id: vport ID for the VF
* @vport_assigned: record whether the vport is currently assigned to the VF
* @mac: MAC address for the VF, zero when address is removed from the vport
*/
struct ef10_vf {
unsigned int vport_id;
unsigned int vport_assigned;
u8 mac[ETH_ALEN];
};
static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx)
{
return false;
}
int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs);
int efx_ef10_sriov_init(struct efx_nic *efx);
static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
void efx_ef10_sriov_fini(struct efx_nic *efx);
static inline void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned vf_i) {}
static inline int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf,
u8 *mac)
{
return -EOPNOTSUPP;
}
static inline int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf,
u16 vlan, u8 qos)
{
return -EOPNOTSUPP;
}
static inline int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
bool spoofchk)
{
return -EOPNOTSUPP;
}
static inline int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf,
struct ifla_vf_info *ivf)
{
return -EOPNOTSUPP;
}
int efx_ef10_vswitching_probe_pf(struct efx_nic *efx);
int efx_ef10_vswitching_probe_vf(struct efx_nic *efx);
int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
#endif /* EF10_SRIOV_H */
......@@ -26,6 +26,7 @@
#include "efx.h"
#include "nic.h"
#include "selftest.h"
#include "sriov.h"
#include "mcdi.h"
#include "workarounds.h"
......@@ -1045,7 +1046,7 @@ static int efx_init_port(struct efx_nic *efx)
/* Ensure the PHY advertises the correct flow control settings */
rc = efx->phy_op->reconfigure(efx);
if (rc)
if (rc && rc != -EPERM)
goto fail2;
mutex_unlock(&efx->mac_lock);
......@@ -1200,10 +1201,12 @@ static int efx_init_io(struct efx_nic *efx)
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
unsigned int mem_map_size = efx->type->mem_map_size(efx);
int rc;
int rc, bar;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
bar = efx->type->mem_bar;
rc = pci_enable_device(pci_dev);
if (rc) {
netif_err(efx, probe, efx->net_dev,
......@@ -1234,8 +1237,8 @@ static int efx_init_io(struct efx_nic *efx)
netif_dbg(efx, probe, efx->net_dev,
"using DMA mask %llx\n", (unsigned long long) dma_mask);
efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
rc = pci_request_region(pci_dev, bar, "sfc");
if (rc) {
netif_err(efx, probe, efx->net_dev,
"request for memory BAR failed\n");
......@@ -1258,7 +1261,7 @@ static int efx_init_io(struct efx_nic *efx)
return 0;
fail4:
pci_release_region(efx->pci_dev, EFX_MEM_BAR);
pci_release_region(efx->pci_dev, bar);
fail3:
efx->membase_phys = 0;
fail2:
......@@ -1269,6 +1272,8 @@ static int efx_init_io(struct efx_nic *efx)
static void efx_fini_io(struct efx_nic *efx)
{
int bar;
netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
if (efx->membase) {
......@@ -1277,13 +1282,23 @@ static void efx_fini_io(struct efx_nic *efx)
}
if (efx->membase_phys) {
pci_release_region(efx->pci_dev, EFX_MEM_BAR);
bar = efx->type->mem_bar;
pci_release_region(efx->pci_dev, bar);
efx->membase_phys = 0;
}
pci_disable_device(efx->pci_dev);
}
void efx_set_default_rx_indir_table(struct efx_nic *efx)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
efx->rx_indir_table[i] =
ethtool_rxfh_indir_default(i, efx->rss_spread);
}
static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
{
cpumask_var_t thread_mask;
......@@ -1314,15 +1329,19 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
/* If RSS is requested for the PF *and* VFs then we can't write RSS
* table entries that are inaccessible to VFs
*/
if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
count > efx_vf_size(efx)) {
netif_warn(efx, probe, efx->net_dev,
"Reducing number of RSS channels from %u to %u for "
"VF support. Increase vf-msix-limit to use more "
"channels on the PF.\n",
count, efx_vf_size(efx));
count = efx_vf_size(efx);
#ifdef CONFIG_SFC_SRIOV
if (efx->type->sriov_wanted) {
if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
count > efx_vf_size(efx)) {
netif_warn(efx, probe, efx->net_dev,
"Reducing number of RSS channels from %u to %u for "
"VF support. Increase vf-msix-limit to use more "
"channels on the PF.\n",
count, efx_vf_size(efx));
count = efx_vf_size(efx);
}
}
#endif
return count;
}
......@@ -1426,10 +1445,15 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
/* RSS might be usable on VFs even if it is disabled on the PF */
efx->rss_spread = ((efx->n_rx_channels > 1 ||
!efx->type->sriov_wanted(efx)) ?
efx->n_rx_channels : efx_vf_size(efx));
#ifdef CONFIG_SFC_SRIOV
if (efx->type->sriov_wanted) {
efx->rss_spread = ((efx->n_rx_channels > 1 ||
!efx->type->sriov_wanted(efx)) ?
efx->n_rx_channels : efx_vf_size(efx));
return 0;
}
#endif
efx->rss_spread = efx->n_rx_channels;
return 0;
}
......@@ -1593,7 +1617,6 @@ static void efx_set_channels(struct efx_nic *efx)
static int efx_probe_nic(struct efx_nic *efx)
{
size_t i;
int rc;
netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
......@@ -1616,10 +1639,9 @@ static int efx_probe_nic(struct efx_nic *efx)
goto fail2;
if (efx->n_channels > 1)
netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
efx->rx_indir_table[i] =
ethtool_rxfh_indir_default(i, efx->rss_spread);
netdev_rss_key_fill(&efx->rx_hash_key,
sizeof(efx->rx_hash_key));
efx_set_default_rx_indir_table(efx);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
......@@ -1712,21 +1734,33 @@ static int efx_probe_all(struct efx_nic *efx)
}
efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
#ifdef CONFIG_SFC_SRIOV
rc = efx->type->vswitching_probe(efx);
if (rc) /* not fatal; the PF will still work fine */
netif_warn(efx, probe, efx->net_dev,
"failed to setup vswitching rc=%d;"
" VFs may not function\n", rc);
#endif
rc = efx_probe_filters(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to create filter tables\n");
goto fail3;
goto fail4;
}
rc = efx_probe_channels(efx);
if (rc)
goto fail4;
goto fail5;
return 0;
fail4:
fail5:
efx_remove_filters(efx);
fail4:
#ifdef CONFIG_SFC_SRIOV
efx->type->vswitching_remove(efx);
#endif
fail3:
efx_remove_port(efx);
fail2:
......@@ -1816,6 +1850,9 @@ static void efx_remove_all(struct efx_nic *efx)
{
efx_remove_channels(efx);
efx_remove_filters(efx);
#ifdef CONFIG_SFC_SRIOV
efx->type->vswitching_remove(efx);
#endif
efx_remove_port(efx);
efx_remove_nic(efx);
}
......@@ -2168,7 +2205,8 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
}
ether_addr_copy(net_dev->dev_addr, new_addr);
efx->type->sriov_mac_address_changed(efx);
if (efx->type->sriov_mac_address_changed)
efx->type->sriov_mac_address_changed(efx);
/* Reconfigure the MAC */
mutex_lock(&efx->mac_lock);
......@@ -2199,7 +2237,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
return 0;
}
static const struct net_device_ops efx_farch_netdev_ops = {
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
.ndo_get_stats64 = efx_net_stats,
......@@ -2212,10 +2250,10 @@ static const struct net_device_ops efx_farch_netdev_ops = {
.ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
#ifdef CONFIG_SFC_SRIOV
.ndo_set_vf_mac = efx_siena_sriov_set_vf_mac,
.ndo_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
.ndo_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
.ndo_get_vf_config = efx_siena_sriov_get_vf_config,
.ndo_set_vf_mac = efx_sriov_set_vf_mac,
.ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
.ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
.ndo_get_vf_config = efx_sriov_get_vf_config,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
......@@ -2229,29 +2267,6 @@ static const struct net_device_ops efx_farch_netdev_ops = {
#endif
};
static const struct net_device_ops efx_ef10_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
.ndo_get_stats64 = efx_net_stats,
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
#endif
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = efx_busy_poll,
#endif
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
};
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
......@@ -2264,8 +2279,7 @@ static int efx_netdev_event(struct notifier_block *this,
{
struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
if ((net_dev->netdev_ops == &efx_netdev_ops) &&
event == NETDEV_CHANGENAME)
efx_update_name(netdev_priv(net_dev));
......@@ -2292,12 +2306,9 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
net_dev->netdev_ops = &efx_ef10_netdev_ops;
net_dev->netdev_ops = &efx_netdev_ops;
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
net_dev->priv_flags |= IFF_UNICAST_FLT;
} else {
net_dev->netdev_ops = &efx_farch_netdev_ops;
}
net_dev->ethtool_ops = &efx_ethtool_ops;
net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
......@@ -2426,7 +2437,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
rc = efx->phy_op->init(efx);
if (rc)
goto fail;
if (efx->phy_op->reconfigure(efx))
rc = efx->phy_op->reconfigure(efx);
if (rc && rc != -EPERM)
netif_err(efx, drv, efx->net_dev,
"could not restore PHY settings\n");
}
......@@ -2434,8 +2446,18 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
rc = efx_enable_interrupts(efx);
if (rc)
goto fail;
#ifdef CONFIG_SFC_SRIOV
rc = efx->type->vswitching_restore(efx);
if (rc) /* not fatal; the PF will still work fine */
netif_warn(efx, probe, efx->net_dev,
"failed to restore vswitching rc=%d;"
" VFs may not function\n", rc);
#endif
efx_restore_filters(efx);
efx->type->sriov_reset(efx);
if (efx->type->sriov_reset)
efx->type->sriov_reset(efx);
mutex_unlock(&efx->mac_lock);
......@@ -2655,6 +2677,8 @@ static const struct pci_device_id efx_pci_table[] = {
.driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */
.driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{0} /* end of list */
......@@ -2826,7 +2850,9 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_disable_interrupts(efx);
rtnl_unlock();
efx->type->sriov_fini(efx);
if (efx->type->sriov_fini)
efx->type->sriov_fini(efx);
efx_unregister_netdev(efx);
efx_mtd_remove(efx);
......@@ -3008,7 +3034,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
efx_probe_vpd_strings(efx);
if (!efx->type->is_vf)
efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
......@@ -3023,10 +3050,12 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
if (rc)
goto fail4;
rc = efx->type->sriov_init(efx);
if (rc)
netif_err(efx, probe, efx->net_dev,
"SR-IOV can't be enabled rc %d\n", rc);
if (efx->type->sriov_init) {
rc = efx->type->sriov_init(efx);
if (rc)
netif_err(efx, probe, efx->net_dev,
"SR-IOV can't be enabled rc %d\n", rc);
}
netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
......@@ -3058,6 +3087,26 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
return rc;
}
/* efx_pci_sriov_configure returns the actual number of Virtual Functions
* enabled on success
*/
#ifdef CONFIG_SFC_SRIOV
static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
{
int rc;
struct efx_nic *efx = pci_get_drvdata(dev);
if (efx->type->sriov_configure) {
rc = efx->type->sriov_configure(efx, num_vfs);
if (rc)
return rc;
else
return num_vfs;
} else
return -EOPNOTSUPP;
}
#endif
static int efx_pm_freeze(struct device *dev)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
......@@ -3280,6 +3329,9 @@ static struct pci_driver efx_pci_driver = {
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
.err_handler = &efx_err_handlers,
#ifdef CONFIG_SFC_SRIOV
.sriov_configure = efx_pci_sriov_configure,
#endif
};
/**************************************************************************
......@@ -3302,9 +3354,11 @@ static int __init efx_init_module(void)
if (rc)
goto err_notifier;
#ifdef CONFIG_SFC_SRIOV
rc = efx_init_sriov();
if (rc)
goto err_sriov;
#endif
reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!reset_workqueue) {
......@@ -3321,8 +3375,10 @@ static int __init efx_init_module(void)
err_pci:
destroy_workqueue(reset_workqueue);
err_reset:
#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
err_sriov:
#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
return rc;
......@@ -3334,7 +3390,9 @@ static void __exit efx_exit_module(void)
pci_unregister_driver(&efx_pci_driver);
destroy_workqueue(reset_workqueue);
#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
}
......
......@@ -15,7 +15,9 @@
#include "filter.h"
/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
/* All VFs use BAR 0/1 for memory */
#define EFX_MEM_BAR 2
#define EFX_MEM_VF_BAR 0
/* TX */
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
......@@ -32,6 +34,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
extern unsigned int efx_piobuf_size;
/* RX */
void efx_set_default_rx_indir_table(struct efx_nic *efx);
void efx_rx_config_page_split(struct efx_nic *efx);
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
......@@ -220,6 +223,13 @@ static inline void efx_mtd_rename(struct efx_nic *efx) {}
static inline void efx_mtd_remove(struct efx_nic *efx) {}
#endif
#ifdef CONFIG_SFC_SRIOV
static inline unsigned int efx_vf_size(struct efx_nic *efx)
{
return 1 << efx->vi_scale;
}
#endif
static inline void efx_schedule_channel(struct efx_channel *channel)
{
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
......
......@@ -1109,9 +1109,8 @@ static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
return -EOPNOTSUPP;
if (!indir)
return 0;
memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
efx->type->rx_push_rss_config(efx);
return 0;
return efx->type->rx_push_rss_config(efx, true, indir);
}
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
......
......@@ -477,16 +477,29 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
*
**************************************************************************
*/
static int dummy_rx_push_rss_config(struct efx_nic *efx, bool user,
const u32 *rx_indir_table)
{
(void) efx;
(void) user;
(void) rx_indir_table;
return -ENOSYS;
}
static void falcon_b0_rx_push_rss_config(struct efx_nic *efx)
static int falcon_b0_rx_push_rss_config(struct efx_nic *efx, bool user,
const u32 *rx_indir_table)
{
efx_oword_t temp;
(void) user;
/* Set hash key for IPv4 */
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
memcpy(efx->rx_indir_table, rx_indir_table,
sizeof(efx->rx_indir_table));
efx_farch_rx_push_indir_table(efx);
return 0;
}
/**************************************************************************
......@@ -2507,7 +2520,7 @@ static int falcon_init_nic(struct efx_nic *efx)
falcon_init_rx_cfg(efx);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
falcon_b0_rx_push_rss_config(efx);
falcon_b0_rx_push_rss_config(efx, false, efx->rx_indir_table);
/* Set destination of both TX and RX Flush events */
EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
......@@ -2687,6 +2700,8 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
*/
const struct efx_nic_type falcon_a1_nic_type = {
.is_vf = false,
.mem_bar = EFX_MEM_BAR,
.mem_map_size = falcon_a1_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
......@@ -2729,7 +2744,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
.rx_push_rss_config = efx_port_dummy_op_void,
.rx_push_rss_config = dummy_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
......@@ -2766,11 +2781,6 @@ const struct efx_nic_type falcon_a1_nic_type = {
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
.sriov_init = efx_falcon_sriov_init,
.sriov_fini = efx_falcon_sriov_fini,
.sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
.sriov_wanted = efx_falcon_sriov_wanted,
.sriov_reset = efx_falcon_sriov_reset,
.revision = EFX_REV_FALCON_A1,
.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
......@@ -2788,6 +2798,8 @@ const struct efx_nic_type falcon_a1_nic_type = {
};
const struct efx_nic_type falcon_b0_nic_type = {
.is_vf = false,
.mem_bar = EFX_MEM_BAR,
.mem_map_size = falcon_b0_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
......@@ -2867,11 +2879,6 @@ const struct efx_nic_type falcon_b0_nic_type = {
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
.sriov_init = efx_falcon_sriov_init,
.sriov_fini = efx_falcon_sriov_fini,
.sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
.sriov_wanted = efx_falcon_sriov_wanted,
.sriov_reset = efx_falcon_sriov_reset,
.revision = EFX_REV_FALCON_B0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
......
......@@ -20,6 +20,8 @@
#include "efx.h"
#include "nic.h"
#include "farch_regs.h"
#include "sriov.h"
#include "siena_sriov.h"
#include "io.h"
#include "workarounds.h"
......@@ -1198,13 +1200,17 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_tx_flush_done(efx, event);
#ifdef CONFIG_SFC_SRIOV
efx_siena_sriov_tx_flush_done(efx, event);
#endif
break;
case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_rx_flush_done(efx, event);
#ifdef CONFIG_SFC_SRIOV
efx_siena_sriov_rx_flush_done(efx, event);
#endif
break;
case FSE_AZ_EVQ_INIT_DONE_EV:
netif_dbg(efx, hw, efx->net_dev,
......@@ -1242,8 +1248,11 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
" RX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
} else
}
#ifdef CONFIG_SFC_SRIOV
else
efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
#endif
break;
case FSE_BZ_TX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
......@@ -1252,8 +1261,11 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
" TX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
} else
}
#ifdef CONFIG_SFC_SRIOV
else
efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
#endif
break;
default:
netif_vdbg(efx, hw, efx->net_dev,
......@@ -1317,9 +1329,11 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
case FSE_AZ_EV_CODE_DRIVER_EV:
efx_farch_handle_driver_event(channel, &event);
break;
#ifdef CONFIG_SFC_SRIOV
case FSE_CZ_EV_CODE_USER_EV:
efx_siena_sriov_event(channel, &event);
break;
#endif
case FSE_CZ_EV_CODE_MCDI_EV:
efx_mcdi_process_event(channel, &event);
break;
......@@ -1685,28 +1699,32 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
#ifdef CONFIG_SFC_SRIOV
if (efx->type->sriov_wanted(efx)) {
unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
nic_data->vf_buftbl_base = buftbl_min;
vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
vi_count = max(vi_count, EFX_VI_BASE);
buftbl_free = (sram_lim_qw - buftbl_min -
vi_count * vi_dc_entries);
entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
efx_vf_size(efx));
vf_limit = min(buftbl_free / entries_per_vf,
(1024U - EFX_VI_BASE) >> efx->vi_scale);
if (efx->vf_count > vf_limit) {
netif_err(efx, probe, efx->net_dev,
"Reducing VF count from from %d to %d\n",
efx->vf_count, vf_limit);
efx->vf_count = vf_limit;
if (efx->type->sriov_wanted) {
if (efx->type->sriov_wanted(efx)) {
unsigned vi_dc_entries, buftbl_free;
unsigned entries_per_vf, vf_limit;
nic_data->vf_buftbl_base = buftbl_min;
vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
vi_count = max(vi_count, EFX_VI_BASE);
buftbl_free = (sram_lim_qw - buftbl_min -
vi_count * vi_dc_entries);
entries_per_vf = ((vi_dc_entries +
EFX_VF_BUFTBL_PER_VI) *
efx_vf_size(efx));
vf_limit = min(buftbl_free / entries_per_vf,
(1024U - EFX_VI_BASE) >> efx->vi_scale);
if (efx->vf_count > vf_limit) {
netif_err(efx, probe, efx->net_dev,
"Reducing VF count from from %d to %d\n",
efx->vf_count, vf_limit);
efx->vf_count = vf_limit;
}
vi_count += efx->vf_count * efx_vf_size(efx);
}
vi_count += efx->vf_count * efx_vf_size(efx);
}
#endif
......
......@@ -1035,7 +1035,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
/* MAC stats are gather lazily. We can ignore this. */
break;
case MCDI_EVENT_CODE_FLR:
efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
if (efx->type->sriov_flr)
efx->type->sriov_flr(efx,
MCDI_EVENT_FIELD(*event, FLR_VF));
break;
case MCDI_EVENT_CODE_PTP_RX:
case MCDI_EVENT_CODE_PTP_FAULT:
......@@ -1081,9 +1083,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
{
MCDI_DECLARE_BUF(outbuf,
max(MC_CMD_GET_VERSION_OUT_LEN,
MC_CMD_GET_CAPABILITIES_OUT_LEN));
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
size_t outlength;
const __le16 *ver_words;
size_t offset;
......@@ -1108,19 +1108,11 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
* single version. Report which variants are running.
*/
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
outbuf, sizeof(outbuf), &outlength);
if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
offset += snprintf(
buf + offset, len - offset, " rx? tx?");
else
offset += snprintf(
buf + offset, len - offset, " rx%x tx%x",
MCDI_WORD(outbuf,
GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
MCDI_WORD(outbuf,
GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
struct efx_ef10_nic_data *nic_data = efx->nic_data;
offset += snprintf(buf + offset, len - offset, " rx%x tx%x",
nic_data->rx_dpcpu_fw_id,
nic_data->tx_dpcpu_fw_id);
/* It's theoretically possible for the string to exceed 31
* characters, though in practice the first three version
......@@ -1150,10 +1142,26 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
/* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
* specified will fail with EPERM, and we have to tell the MC we don't
* care what firmware we get.
*/
if (rc == -EPERM) {
netif_dbg(efx, probe, efx->net_dev,
"efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
MC_CMD_FW_DONT_CARE);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
sizeof(inbuf), outbuf, sizeof(outbuf),
&outlen);
}
if (rc) {
efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf),
outbuf, outlen, rc);
goto fail;
}
if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
rc = -EIO;
goto fail;
......@@ -1178,16 +1186,6 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
* and are completely trusted by firmware. Abort probing
* if that's not true for this function.
*/
if (driver_operating &&
(efx->mcdi->fn_flags &
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
netif_err(efx, probe, efx->net_dev,
"This driver version only supports one function per port\n");
return -ENODEV;
}
if (was_attached != NULL)
*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
......@@ -1385,6 +1383,9 @@ int efx_mcdi_nvram_test_all(struct efx_nic *efx)
return rc;
}
/* Returns 1 if an assertion was read, 0 if no assertion had fired,
* negative on error.
*/
static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
......@@ -1406,6 +1407,8 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
outbuf, sizeof(outbuf), &outlen);
if (rc == -EPERM)
return 0;
} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
if (rc) {
......@@ -1443,24 +1446,31 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
index));
return 0;
return 1;
}
static void efx_mcdi_exit_assertion(struct efx_nic *efx)
static int efx_mcdi_exit_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
int rc;
/* If the MC is running debug firmware, it might now be
* waiting for a debugger to attach, but we just want it to
* reboot. We set a flag that makes the command a no-op if it
* has already done so. We don't know what return code to
* expect (0 or -EIO), so ignore it.
* has already done so.
* The MCDI will thus return either 0 or -EIO.
*/
BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
(void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
NULL, 0, NULL);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
NULL, 0, NULL);
if (rc == -EIO)
rc = 0;
if (rc)
efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN,
NULL, 0, rc);
return rc;
}
int efx_mcdi_handle_assertion(struct efx_nic *efx)
......@@ -1468,12 +1478,10 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
int rc;
rc = efx_mcdi_read_assertion(efx);
if (rc)
if (rc <= 0)
return rc;
efx_mcdi_exit_assertion(efx);
return 0;
return efx_mcdi_exit_assertion(efx);
}
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
......@@ -1688,6 +1696,36 @@ int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
NULL, 0, NULL);
}
int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
unsigned int *enabled_out)
{
MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
size_t outlen;
int rc;
rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
rc = -EIO;
goto fail;
}
if (impl_out)
*impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED);
if (enabled_out)
*enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED);
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
#ifdef CONFIG_SFC_MTD
#define EFX_MCDI_NVRAM_LEN_MAX 128
......
......@@ -339,6 +339,8 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
unsigned int *enabled_out);
#ifdef CONFIG_SFC_MCDI_MON
int efx_mcdi_mon_probe(struct efx_nic *efx);
......
......@@ -1875,6 +1875,8 @@
#define MC_CMD_FW_FULL_FEATURED 0x0
/* enum: Prefer to use firmware with fewer features but lower latency */
#define MC_CMD_FW_LOW_LATENCY 0x1
/* enum: Only this option is allowed for non-admin functions */
#define MC_CMD_FW_DONT_CARE 0xffffffff
/* MC_CMD_DRV_ATTACH_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
......@@ -4086,6 +4088,27 @@
#define LICENSED_APP_ID_ID_WIDTH 32
/***********************************/
/* MC_CMD_GET_WORKAROUNDS
* Read the list of all implemented and all currently enabled workarounds. The
* enums here must correspond with those in MC_CMD_WORKAROUND.
*/
#define MC_CMD_GET_WORKAROUNDS 0x59
/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
/* Each workaround is represented by a single bit according to the enums below.
*/
#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
/* enum: Bug 17230 work around. */
#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
/* enum: Bug 35388 work around (unsafe EVQ writes). */
#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
/* enum: Bug35017 workaround (A64 tables must be identity map) */
#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
/***********************************/
/* MC_CMD_READ_REGS
* Get a dump of the MCPU registers
......
......@@ -793,7 +793,6 @@ union efx_multicast_hash {
efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
};
struct efx_vf;
struct vfdi_status;
/**
......@@ -909,7 +908,6 @@ struct vfdi_status;
* completed (either success or failure). Not used when MCDI is used to
* flush receive queues.
* @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
* @vf: Array of &struct efx_vf objects.
* @vf_count: Number of VFs intended to be enabled.
* @vf_init_count: Number of VFs that have been fully initialised.
* @vi_scale: log2 number of vnics per VF.
......@@ -1053,7 +1051,6 @@ struct efx_nic {
wait_queue_head_t flush_wq;
#ifdef CONFIG_SFC_SRIOV
struct efx_vf *vf;
unsigned vf_count;
unsigned vf_init_count;
unsigned vi_scale;
......@@ -1092,6 +1089,7 @@ struct efx_mtd_partition {
/**
* struct efx_nic_type - Efx device type definition
* @mem_bar: Get the memory BAR
* @mem_map_size: Get memory BAR mapped size
* @probe: Probe the controller
* @remove: Free resources allocated by probe()
......@@ -1226,6 +1224,8 @@ struct efx_mtd_partition {
* @hwtstamp_filters: Mask of hardware timestamp filter types supported
*/
struct efx_nic_type {
bool is_vf;
unsigned int mem_bar;
unsigned int (*mem_map_size)(struct efx_nic *efx);
int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx);
......@@ -1277,7 +1277,8 @@ struct efx_nic_type {
void (*tx_init)(struct efx_tx_queue *tx_queue);
void (*tx_remove)(struct efx_tx_queue *tx_queue);
void (*tx_write)(struct efx_tx_queue *tx_queue);
void (*rx_push_rss_config)(struct efx_nic *efx);
int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
const u32 *rx_indir_table);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue);
......@@ -1330,11 +1331,23 @@ struct efx_nic_type {
int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
int (*ptp_set_ts_config)(struct efx_nic *efx,
struct hwtstamp_config *init);
int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
int (*sriov_init)(struct efx_nic *efx);
void (*sriov_fini)(struct efx_nic *efx);
void (*sriov_mac_address_changed)(struct efx_nic *efx);
bool (*sriov_wanted)(struct efx_nic *efx);
void (*sriov_reset)(struct efx_nic *efx);
void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac);
int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
u8 qos);
int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i,
bool spoofchk);
int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i,
struct ifla_vf_info *ivi);
int (*vswitching_probe)(struct efx_nic *efx);
int (*vswitching_restore)(struct efx_nic *efx);
void (*vswitching_remove)(struct efx_nic *efx);
int revision;
unsigned int txd_ptr_tbl_base;
......
......@@ -381,6 +381,7 @@ enum {
* @efx: Pointer back to main interface structure
* @wol_filter_id: Wake-on-LAN packet filter id
* @stats: Hardware statistics
* @vf: Array of &struct siena_vf objects
* @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
* @vfdi_status: Common VFDI status page to be dmad to VF address space.
* @local_addr_list: List of local addresses. Protected by %local_lock.
......@@ -394,6 +395,7 @@ struct siena_nic_data {
int wol_filter_id;
u64 stats[SIENA_STAT_COUNT];
#ifdef CONFIG_SFC_SRIOV
struct siena_vf *vf;
struct efx_channel *vfdi_channel;
unsigned vf_buftbl_base;
struct efx_buffer vfdi_status;
......@@ -483,12 +485,21 @@ enum {
* @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
* reboot
* @rx_rss_context: Firmware handle for our RSS context
* @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
* @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
* after MC reboot
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
* %MC_CMD_GET_CAPABILITIES response)
* @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
* @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
* @vport_id: The function's vport ID, only relevant for PFs
* @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
* @pf_index: The number for this PF, or the parent PF if this is a VF
#ifdef CONFIG_SFC_SRIOV
* @vf: Pointer to VF data structure
#endif
*/
struct efx_ef10_nic_data {
struct efx_buffer mcdi_buf;
......@@ -503,126 +514,25 @@ struct efx_ef10_nic_data {
unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
bool must_restore_piobufs;
u32 rx_rss_context;
bool rx_rss_context_exclusive;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
bool must_check_datapath_caps;
u32 datapath_caps;
};
/*
* On the SFC9000 family each port is associated with 1 PCI physical
* function (PF) handled by sfc and a configurable number of virtual
* functions (VFs) that may be handled by some other driver, often in
* a VM guest. The queue pointer registers are mapped in both PF and
* VF BARs such that an 8K region provides access to a single RX, TX
* and event queue (collectively a Virtual Interface, VI or VNIC).
*
* The PF has access to all 1024 VIs while VFs are mapped to VIs
* according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
* in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
* The number of VIs and the VI_SCALE value are configurable but must
* be established at boot time by firmware.
*/
/* Maximum VI_SCALE parameter supported by Siena */
#define EFX_VI_SCALE_MAX 6
/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
* so this is the smallest allowed value. */
#define EFX_VI_BASE 128U
/* Maximum number of VFs allowed */
#define EFX_VF_COUNT_MAX 127
/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
#define EFX_MAX_VF_EVQ_SIZE 8192UL
/* The number of buffer table entries reserved for each VI on a VF */
#define EFX_VF_BUFTBL_PER_VI \
((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
sizeof(efx_qword_t) / EFX_BUF_SIZE)
unsigned int rx_dpcpu_fw_id;
unsigned int tx_dpcpu_fw_id;
unsigned int vport_id;
bool must_probe_vswitching;
unsigned int pf_index;
#ifdef CONFIG_SFC_SRIOV
/* SIENA */
static inline bool efx_siena_sriov_wanted(struct efx_nic *efx)
{
return efx->vf_count != 0;
}
static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
{
return efx->vf_init_count != 0;
}
static inline unsigned int efx_vf_size(struct efx_nic *efx)
{
return 1 << efx->vi_scale;
}
struct ef10_vf *vf;
#endif
u8 vport_mac[ETH_ALEN];
};
int efx_init_sriov(void);
void efx_siena_sriov_probe(struct efx_nic *efx);
int efx_siena_sriov_init(struct efx_nic *efx);
void efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
void efx_siena_sriov_reset(struct efx_nic *efx);
void efx_siena_sriov_fini(struct efx_nic *efx);
void efx_fini_sriov(void);
/* EF10 */
static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
#else
/* SIENA */
static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; }
static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; }
static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; }
static inline int efx_init_sriov(void) { return 0; }
static inline void efx_siena_sriov_probe(struct efx_nic *efx) {}
static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {}
static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx,
efx_qword_t *event) {}
static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx,
efx_qword_t *event) {}
static inline void efx_siena_sriov_event(struct efx_channel *channel,
efx_qword_t *event) {}
static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx,
unsigned dmaq) {}
static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {}
static inline void efx_siena_sriov_reset(struct efx_nic *efx) {}
static inline void efx_siena_sriov_fini(struct efx_nic *efx) {}
static inline void efx_fini_sriov(void) {}
/* EF10 */
static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
#endif
/* FALCON */
static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; }
static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {}
static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {}
static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {}
int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf,
u16 vlan, u8 qos);
int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf,
struct ifla_vf_info *ivf);
int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
bool spoofchk);
struct ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
......@@ -654,6 +564,7 @@ extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
extern const struct efx_nic_type siena_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
/**************************************************************************
*
......
......@@ -25,6 +25,7 @@
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "selftest.h"
#include "siena_sriov.h"
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
......@@ -306,7 +307,9 @@ static int siena_probe_nic(struct efx_nic *efx)
if (rc)
goto fail5;
#ifdef CONFIG_SFC_SRIOV
efx_siena_sriov_probe(efx);
#endif
efx_ptp_defer_probe_with_channel(efx);
return 0;
......@@ -321,7 +324,8 @@ static int siena_probe_nic(struct efx_nic *efx)
return rc;
}
static void siena_rx_push_rss_config(struct efx_nic *efx)
static int siena_rx_push_rss_config(struct efx_nic *efx, bool user,
const u32 *rx_indir_table)
{
efx_oword_t temp;
......@@ -343,7 +347,11 @@ static void siena_rx_push_rss_config(struct efx_nic *efx)
FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
memcpy(efx->rx_indir_table, rx_indir_table,
sizeof(efx->rx_indir_table));
efx_farch_rx_push_indir_table(efx);
return 0;
}
/* This call performs hardware-specific global initialisation, such as
......@@ -386,7 +394,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
siena_rx_push_rss_config(efx);
siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
......@@ -909,6 +917,8 @@ static int siena_mtd_probe(struct efx_nic *efx)
*/
const struct efx_nic_type siena_a0_nic_type = {
.is_vf = false,
.mem_bar = EFX_MEM_BAR,
.mem_map_size = siena_mem_map_size,
.probe = siena_probe_nic,
.remove = siena_remove_nic,
......@@ -996,11 +1006,22 @@ const struct efx_nic_type siena_a0_nic_type = {
#endif
.ptp_write_host_time = siena_ptp_write_host_time,
.ptp_set_ts_config = siena_ptp_set_ts_config,
#ifdef CONFIG_SFC_SRIOV
.sriov_configure = efx_siena_sriov_configure,
.sriov_init = efx_siena_sriov_init,
.sriov_fini = efx_siena_sriov_fini,
.sriov_mac_address_changed = efx_siena_sriov_mac_address_changed,
.sriov_wanted = efx_siena_sriov_wanted,
.sriov_reset = efx_siena_sriov_reset,
.sriov_flr = efx_siena_sriov_flr,
.sriov_set_vf_mac = efx_siena_sriov_set_vf_mac,
.sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
.sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
.sriov_get_vf_config = efx_siena_sriov_get_vf_config,
.vswitching_probe = efx_port_dummy_op_int,
.vswitching_restore = efx_port_dummy_op_int,
.vswitching_remove = efx_port_dummy_op_void,
#endif
.revision = EFX_REV_SIENA_A0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
......
......@@ -16,6 +16,7 @@
#include "filter.h"
#include "mcdi_pcol.h"
#include "farch_regs.h"
#include "siena_sriov.h"
#include "vfdi.h"
/* Number of longs required to track all the VIs in a VF */
......@@ -38,7 +39,7 @@ enum efx_vf_tx_filter_mode {
};
/**
* struct efx_vf - Back-end resource and protocol state for a PCI VF
* struct siena_vf - Back-end resource and protocol state for a PCI VF
* @efx: The Efx NIC owning this VF
* @pci_rid: The PCI requester ID for this VF
* @pci_name: The PCI name (formatted address) of this VF
......@@ -83,7 +84,7 @@ enum efx_vf_tx_filter_mode {
* @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
* @reset_work: Work item to schedule a VF reset.
*/
struct efx_vf {
struct siena_vf {
struct efx_nic *efx;
unsigned int pci_rid;
char pci_name[13]; /* dddd:bb:dd.f */
......@@ -189,7 +190,7 @@ MODULE_PARM_DESC(max_vfs,
*/
static struct workqueue_struct *vfdi_workqueue;
static unsigned abs_index(struct efx_vf *vf, unsigned index)
static unsigned abs_index(struct siena_vf *vf, unsigned index)
{
return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
}
......@@ -299,7 +300,7 @@ static int efx_siena_sriov_memcpy(struct efx_nic *efx,
/* The TX filter is entirely controlled by this driver, and is modified
* underneath the feet of the VF
*/
static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
......@@ -343,7 +344,7 @@ static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
}
/* The RX filter is managed here on behalf of the VF driver */
static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
static void efx_siena_sriov_reset_rx_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
......@@ -382,7 +383,7 @@ static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
}
}
static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
static void __efx_siena_sriov_update_vf_addr(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
......@@ -397,7 +398,7 @@ static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
* local_page_list, either by acquiring local_lock or by running from
* efx_siena_sriov_peer_work()
*/
static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf)
static void __efx_siena_sriov_push_vf_status(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
......@@ -509,8 +510,9 @@ static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count)
* Optionally set VF index and VI index within the VF.
*/
static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
struct efx_vf **vf_out, unsigned *rel_index_out)
struct siena_vf **vf_out, unsigned *rel_index_out)
{
struct siena_nic_data *nic_data = efx->nic_data;
unsigned vf_i;
if (abs_index < EFX_VI_BASE)
......@@ -520,13 +522,13 @@ static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
return true;
if (vf_out)
*vf_out = efx->vf + vf_i;
*vf_out = nic_data->vf + vf_i;
if (rel_index_out)
*rel_index_out = abs_index % efx_vf_size(efx);
return false;
}
static int efx_vfdi_init_evq(struct efx_vf *vf)
static int efx_vfdi_init_evq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
......@@ -567,7 +569,7 @@ static int efx_vfdi_init_evq(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
static int efx_vfdi_init_rxq(struct efx_vf *vf)
static int efx_vfdi_init_rxq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
......@@ -608,7 +610,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
static int efx_vfdi_init_txq(struct efx_vf *vf)
static int efx_vfdi_init_txq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
......@@ -655,7 +657,7 @@ static int efx_vfdi_init_txq(struct efx_vf *vf)
}
/* Returns true when efx_vfdi_fini_all_queues should wake */
static bool efx_vfdi_flush_wake(struct efx_vf *vf)
static bool efx_vfdi_flush_wake(struct siena_vf *vf)
{
/* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
smp_mb();
......@@ -664,7 +666,7 @@ static bool efx_vfdi_flush_wake(struct efx_vf *vf)
atomic_read(&vf->rxq_retry_count);
}
static void efx_vfdi_flush_clear(struct efx_vf *vf)
static void efx_vfdi_flush_clear(struct siena_vf *vf)
{
memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
vf->txq_count = 0;
......@@ -674,7 +676,7 @@ static void efx_vfdi_flush_clear(struct efx_vf *vf)
atomic_set(&vf->rxq_retry_count, 0);
}
static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
static int efx_vfdi_fini_all_queues(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
efx_oword_t reg;
......@@ -757,7 +759,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
return timeout ? 0 : VFDI_RC_ETIMEDOUT;
}
static int efx_vfdi_insert_filter(struct efx_vf *vf)
static int efx_vfdi_insert_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
......@@ -789,7 +791,7 @@ static int efx_vfdi_insert_filter(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
static int efx_vfdi_remove_all_filters(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
......@@ -801,7 +803,7 @@ static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
static int efx_vfdi_set_status_page(struct efx_vf *vf)
static int efx_vfdi_set_status_page(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
......@@ -846,7 +848,7 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
static int efx_vfdi_clear_status_page(struct efx_vf *vf)
static int efx_vfdi_clear_status_page(struct siena_vf *vf)
{
mutex_lock(&vf->status_lock);
vf->status_addr = 0;
......@@ -855,7 +857,7 @@ static int efx_vfdi_clear_status_page(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
typedef int (*efx_vfdi_op_t)(struct efx_vf *vf);
typedef int (*efx_vfdi_op_t)(struct siena_vf *vf);
static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
[VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
......@@ -870,7 +872,7 @@ static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
static void efx_siena_sriov_vfdi(struct work_struct *work)
{
struct efx_vf *vf = container_of(work, struct efx_vf, req);
struct siena_vf *vf = container_of(work, struct siena_vf, req);
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
struct efx_memcpy_req copy[2];
......@@ -936,7 +938,8 @@ static void efx_siena_sriov_vfdi(struct work_struct *work)
* event ring in guest memory with VFDI reset events, then (re-initialise) the
* event queue to raise an interrupt. The guest driver will then recover.
*/
static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
static void efx_siena_sriov_reset_vf(struct siena_vf *vf,
struct efx_buffer *buffer)
{
struct efx_nic *efx = vf->efx;
......@@ -1006,7 +1009,7 @@ static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
{
struct efx_vf *vf = container_of(work, struct efx_vf, req);
struct siena_vf *vf = container_of(work, struct siena_vf, req);
struct efx_nic *efx = vf->efx;
struct efx_buffer buf;
......@@ -1077,7 +1080,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
peer_work);
struct efx_nic *efx = nic_data->efx;
struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
struct efx_vf *vf;
struct siena_vf *vf;
struct efx_local_addr *local_addr;
struct vfdi_endpoint *peer;
struct efx_endpoint_page *epp;
......@@ -1099,7 +1102,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
peer_count = 1;
for (pos = 0; pos < efx->vf_count; ++pos) {
vf = efx->vf + pos;
vf = nic_data->vf + pos;
mutex_lock(&vf->status_lock);
if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
......@@ -1155,7 +1158,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
/* Finally, push the pages */
for (pos = 0; pos < efx->vf_count; ++pos) {
vf = efx->vf + pos;
vf = nic_data->vf + pos;
mutex_lock(&vf->status_lock);
if (vf->status_addr)
......@@ -1190,14 +1193,16 @@ static void efx_siena_sriov_free_local(struct efx_nic *efx)
static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
{
unsigned index;
struct efx_vf *vf;
struct siena_vf *vf;
struct siena_nic_data *nic_data = efx->nic_data;
efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL);
if (!efx->vf)
nic_data->vf = kcalloc(efx->vf_count, sizeof(*nic_data->vf),
GFP_KERNEL);
if (!nic_data->vf)
return -ENOMEM;
for (index = 0; index < efx->vf_count; ++index) {
vf = efx->vf + index;
vf = nic_data->vf + index;
vf->efx = efx;
vf->index = index;
......@@ -1216,11 +1221,12 @@ static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
{
struct efx_vf *vf;
struct siena_nic_data *nic_data = efx->nic_data;
struct siena_vf *vf;
unsigned int pos;
for (pos = 0; pos < efx->vf_count; ++pos) {
vf = efx->vf + pos;
vf = nic_data->vf + pos;
efx_nic_free_buffer(efx, &vf->buf);
kfree(vf->peer_page_addrs);
......@@ -1237,7 +1243,7 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
struct siena_nic_data *nic_data = efx->nic_data;
unsigned index, devfn, sriov, buftbl_base;
u16 offset, stride;
struct efx_vf *vf;
struct siena_vf *vf;
int rc;
sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
......@@ -1250,7 +1256,7 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
buftbl_base = nic_data->vf_buftbl_base;
devfn = pci_dev->devfn + offset;
for (index = 0; index < efx->vf_count; ++index) {
vf = efx->vf + index;
vf = nic_data->vf + index;
/* Reserve buffer entries */
vf->buftbl_base = buftbl_base;
......@@ -1350,7 +1356,7 @@ int efx_siena_sriov_init(struct efx_nic *efx)
fail_vfs:
cancel_work_sync(&nic_data->peer_work);
efx_siena_sriov_free_local(efx);
kfree(efx->vf);
kfree(nic_data->vf);
fail_alloc:
efx_nic_free_buffer(efx, &nic_data->vfdi_status);
fail_status:
......@@ -1361,7 +1367,7 @@ int efx_siena_sriov_init(struct efx_nic *efx)
void efx_siena_sriov_fini(struct efx_nic *efx)
{
struct efx_vf *vf;
struct siena_vf *vf;
unsigned int pos;
struct siena_nic_data *nic_data = efx->nic_data;
......@@ -1377,7 +1383,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
/* Flush all reconfiguration work */
for (pos = 0; pos < efx->vf_count; ++pos) {
vf = efx->vf + pos;
vf = nic_data->vf + pos;
cancel_work_sync(&vf->req);
cancel_work_sync(&vf->reset_work);
}
......@@ -1388,7 +1394,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
/* Tear down back-end state */
efx_siena_sriov_vfs_fini(efx);
efx_siena_sriov_free_local(efx);
kfree(efx->vf);
kfree(nic_data->vf);
efx_nic_free_buffer(efx, &nic_data->vfdi_status);
efx_siena_sriov_cmd(efx, false, NULL, NULL);
}
......@@ -1396,7 +1402,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
struct efx_vf *vf;
struct siena_vf *vf;
unsigned qid, seq, type, data;
qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
......@@ -1452,11 +1458,12 @@ void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event)
void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
{
struct efx_vf *vf;
struct siena_nic_data *nic_data = efx->nic_data;
struct siena_vf *vf;
if (vf_i > efx->vf_init_count)
return;
vf = efx->vf + vf_i;
vf = nic_data->vf + vf_i;
netif_info(efx, hw, efx->net_dev,
"FLR on VF %s\n", vf->pci_name);
......@@ -1481,7 +1488,7 @@ void efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_vf *vf;
struct siena_vf *vf;
unsigned queue, qid;
queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
......@@ -1500,7 +1507,7 @@ void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_vf *vf;
struct siena_vf *vf;
unsigned ev_failed, queue, qid;
queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
......@@ -1525,7 +1532,7 @@ void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
/* Called from napi. Schedule the reset work item */
void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
{
struct efx_vf *vf;
struct siena_vf *vf;
unsigned int rel;
if (map_vi_index(efx, dmaq, &vf, &rel))
......@@ -1541,9 +1548,10 @@ void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
/* Reset all VFs */
void efx_siena_sriov_reset(struct efx_nic *efx)
{
struct siena_nic_data *nic_data = efx->nic_data;
unsigned int vf_i;
struct efx_buffer buf;
struct efx_vf *vf;
struct siena_vf *vf;
ASSERT_RTNL();
......@@ -1557,7 +1565,7 @@ void efx_siena_sriov_reset(struct efx_nic *efx)
return;
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
vf = efx->vf + vf_i;
vf = nic_data->vf + vf_i;
efx_siena_sriov_reset_vf(vf, &buf);
}
......@@ -1573,7 +1581,6 @@ int efx_init_sriov(void)
vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
if (!vfdi_workqueue)
return -ENOMEM;
return 0;
}
......@@ -1582,14 +1589,14 @@ void efx_fini_sriov(void)
destroy_workqueue(vfdi_workqueue);
}
int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
struct siena_nic_data *nic_data = efx->nic_data;
struct siena_vf *vf;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
vf = efx->vf + vf_i;
vf = nic_data->vf + vf_i;
mutex_lock(&vf->status_lock);
ether_addr_copy(vf->addr.mac_addr, mac);
......@@ -1599,16 +1606,16 @@ int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
return 0;
}
int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
u16 vlan, u8 qos)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
struct siena_nic_data *nic_data = efx->nic_data;
struct siena_vf *vf;
u16 tci;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
vf = efx->vf + vf_i;
vf = nic_data->vf + vf_i;
mutex_lock(&vf->status_lock);
tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
......@@ -1619,16 +1626,16 @@ int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
return 0;
}
int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
bool spoofchk)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
struct siena_nic_data *nic_data = efx->nic_data;
struct siena_vf *vf;
int rc;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
vf = efx->vf + vf_i;
vf = nic_data->vf + vf_i;
mutex_lock(&vf->txq_lock);
if (vf->txq_count == 0) {
......@@ -1643,16 +1650,16 @@ int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
return rc;
}
int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
struct ifla_vf_info *ivi)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
struct siena_nic_data *nic_data = efx->nic_data;
struct siena_vf *vf;
u16 tci;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
vf = efx->vf + vf_i;
vf = nic_data->vf + vf_i;
ivi->vf = vf_i;
ether_addr_copy(ivi->mac, vf->addr.mac_addr);
......@@ -1666,3 +1673,12 @@ int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
return 0;
}
bool efx_siena_sriov_wanted(struct efx_nic *efx)
{
return efx->vf_count != 0;
}
int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs)
{
return 0;
}
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2015 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef SIENA_SRIOV_H
#define SIENA_SRIOV_H
#include "net_driver.h"
/* On the SFC9000 family each port is associated with 1 PCI physical
* function (PF) handled by sfc and a configurable number of virtual
* functions (VFs) that may be handled by some other driver, often in
* a VM guest. The queue pointer registers are mapped in both PF and
* VF BARs such that an 8K region provides access to a single RX, TX
* and event queue (collectively a Virtual Interface, VI or VNIC).
*
* The PF has access to all 1024 VIs while VFs are mapped to VIs
* according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
* in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
* The number of VIs and the VI_SCALE value are configurable but must
* be established at boot time by firmware.
*/
/* Maximum VI_SCALE parameter supported by Siena */
#define EFX_VI_SCALE_MAX 6
/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
* so this is the smallest allowed value.
*/
#define EFX_VI_BASE 128U
/* Maximum number of VFs allowed */
#define EFX_VF_COUNT_MAX 127
/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
#define EFX_MAX_VF_EVQ_SIZE 8192UL
/* The number of buffer table entries reserved for each VI on a VF */
#define EFX_VF_BUFTBL_PER_VI \
((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
sizeof(efx_qword_t) / EFX_BUF_SIZE)
int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs);
int efx_siena_sriov_init(struct efx_nic *efx);
void efx_siena_sriov_fini(struct efx_nic *efx);
void efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
bool efx_siena_sriov_wanted(struct efx_nic *efx);
void efx_siena_sriov_reset(struct efx_nic *efx);
void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf,
u16 vlan, u8 qos);
int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
bool spoofchk);
int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf,
struct ifla_vf_info *ivf);
#ifdef CONFIG_SFC_SRIOV
static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
{
return efx->vf_init_count != 0;
}
#else /* !CONFIG_SFC_SRIOV */
static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
{
return false;
}
#endif /* CONFIG_SFC_SRIOV */
void efx_siena_sriov_probe(struct efx_nic *efx);
void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
#endif /* SIENA_SRIOV_H */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2014-2015 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/module.h>
#include "net_driver.h"
#include "nic.h"
#include "sriov.h"
int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
{
struct efx_nic *efx = netdev_priv(net_dev);
if (efx->type->sriov_set_vf_mac)
return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
else
return -EOPNOTSUPP;
}
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
u8 qos)
{
struct efx_nic *efx = netdev_priv(net_dev);
if (efx->type->sriov_set_vf_vlan) {
if ((vlan & ~VLAN_VID_MASK) ||
(qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
return -EINVAL;
return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
} else {
return -EOPNOTSUPP;
}
}
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
bool spoofchk)
{
struct efx_nic *efx = netdev_priv(net_dev);
if (efx->type->sriov_set_vf_spoofchk)
return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
else
return -EOPNOTSUPP;
}
int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
struct ifla_vf_info *ivi)
{
struct efx_nic *efx = netdev_priv(net_dev);
if (efx->type->sriov_get_vf_config)
return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
else
return -EOPNOTSUPP;
}
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2014-2015 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_SRIOV_H
#define EFX_SRIOV_H
#include "net_driver.h"
#ifdef CONFIG_SFC_SRIOV
int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac);
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
u8 qos);
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
bool spoofchk);
int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
struct ifla_vf_info *ivi);
#endif /* CONFIG_SFC_SRIOV */
#endif /* EFX_SRIOV_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment