Commit dbafb0c4 authored by David S. Miller's avatar David S. Miller

Merge branch 'liquidio-enhanced-ethtool-set-channels-feature'

Intiyaz Basha says:

====================
liquidio: enhanced ethtool --set-channels feature

For the ethtool --set-channels feature, the liquidio driver currently
accepts max combined value as the queue count configured during driver
load time, where max combined count is the total count of input and output
queues. This limitation is applicable only when SR-IOV is enabled, that
is, when VFs are created for PF. If SR-IOV is not enabled, the driver can
configure max supported (64) queues.

This series of patches are for enhancing driver to accept
max supported queues for ethtool --set-channels.

Changes in V2:
  Only patch #6 was changed to fix these Sparse warnings reported by kbuild
  test robot:
    lio_ethtool.c:848:5: warning: symbol 'lio_23xx_reconfigure_queue_count'
                         was not declared. Should it be static?
    lio_ethtool.c:877:22: warning: incorrect type in assignment (different
                          base types)
    lio_ethtool.c:878:22: warning: incorrect type in assignment (different
                          base types)
    lio_ethtool.c:879:22: warning: incorrect type in assignment (different
                          base types)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3ac305c3 c33c9973
...@@ -1245,7 +1245,7 @@ static void cn23xx_setup_reg_address(struct octeon_device *oct) ...@@ -1245,7 +1245,7 @@ static void cn23xx_setup_reg_address(struct octeon_device *oct)
CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
} }
static int cn23xx_sriov_config(struct octeon_device *oct) int cn23xx_sriov_config(struct octeon_device *oct)
{ {
struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
u32 max_rings, total_rings, max_vfs, rings_per_vf; u32 max_rings, total_rings, max_vfs, rings_per_vf;
...@@ -1269,8 +1269,8 @@ static int cn23xx_sriov_config(struct octeon_device *oct) ...@@ -1269,8 +1269,8 @@ static int cn23xx_sriov_config(struct octeon_device *oct)
break; break;
} }
if (max_rings <= num_present_cpus()) if (oct->sriov_info.num_pf_rings)
num_pf_rings = 1; num_pf_rings = oct->sriov_info.num_pf_rings;
else else
num_pf_rings = num_present_cpus(); num_pf_rings = num_present_cpus();
......
...@@ -61,6 +61,8 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us); ...@@ -61,6 +61,8 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct); void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct);
int cn23xx_sriov_config(struct octeon_device *oct);
int cn23xx_fw_loaded(struct octeon_device *oct); int cn23xx_fw_loaded(struct octeon_device *oct);
void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
......
...@@ -29,6 +29,162 @@ ...@@ -29,6 +29,162 @@
/* OOM task polling interval */ /* OOM task polling interval */
#define LIO_OOM_POLL_INTERVAL_MS 250 #define LIO_OOM_POLL_INTERVAL_MS 250
#define OCTNIC_MAX_SG MAX_SKB_FRAGS
/**
* \brief Callback for getting interface configuration
* @param status status of request
* @param buf pointer to resp structure
*/
void lio_if_cfg_callback(struct octeon_device *oct,
u32 status __attribute__((unused)), void *buf)
{
struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
struct liquidio_if_cfg_context *ctx;
struct liquidio_if_cfg_resp *resp;
resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
oct = lio_get_device(ctx->octeon_id);
if (resp->status)
dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
CVM_CAST64(resp->status));
WRITE_ONCE(ctx->cond, 1);
snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
resp->cfg_info.liquidio_firmware_version);
/* This barrier is required to be sure that the response has been
* written fully before waking up the handler
*/
wmb();
wake_up_interruptible(&ctx->wc);
}
/**
* \brief Delete gather lists
* @param lio per-network private data
*/
void lio_delete_glists(struct lio *lio)
{
struct octnic_gather *g;
int i;
kfree(lio->glist_lock);
lio->glist_lock = NULL;
if (!lio->glist)
return;
for (i = 0; i < lio->oct_dev->num_iqs; i++) {
do {
g = (struct octnic_gather *)
lio_list_delete_head(&lio->glist[i]);
kfree(g);
} while (g);
if (lio->glists_virt_base && lio->glists_virt_base[i] &&
lio->glists_dma_base && lio->glists_dma_base[i]) {
lio_dma_free(lio->oct_dev,
lio->glist_entry_size * lio->tx_qsize,
lio->glists_virt_base[i],
lio->glists_dma_base[i]);
}
}
kfree(lio->glists_virt_base);
lio->glists_virt_base = NULL;
kfree(lio->glists_dma_base);
lio->glists_dma_base = NULL;
kfree(lio->glist);
lio->glist = NULL;
}
/**
* \brief Setup gather lists
* @param lio per-network private data
*/
int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
{
struct octnic_gather *g;
int i, j;
lio->glist_lock =
kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
if (!lio->glist_lock)
return -ENOMEM;
lio->glist =
kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
if (!lio->glist) {
kfree(lio->glist_lock);
lio->glist_lock = NULL;
return -ENOMEM;
}
lio->glist_entry_size =
ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
/* allocate memory to store virtual and dma base address of
* per glist consistent memory
*/
lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
GFP_KERNEL);
lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
GFP_KERNEL);
if (!lio->glists_virt_base || !lio->glists_dma_base) {
lio_delete_glists(lio);
return -ENOMEM;
}
for (i = 0; i < num_iqs; i++) {
int numa_node = dev_to_node(&oct->pci_dev->dev);
spin_lock_init(&lio->glist_lock[i]);
INIT_LIST_HEAD(&lio->glist[i]);
lio->glists_virt_base[i] =
lio_dma_alloc(oct,
lio->glist_entry_size * lio->tx_qsize,
&lio->glists_dma_base[i]);
if (!lio->glists_virt_base[i]) {
lio_delete_glists(lio);
return -ENOMEM;
}
for (j = 0; j < lio->tx_qsize; j++) {
g = kzalloc_node(sizeof(*g), GFP_KERNEL,
numa_node);
if (!g)
g = kzalloc(sizeof(*g), GFP_KERNEL);
if (!g)
break;
g->sg = lio->glists_virt_base[i] +
(j * lio->glist_entry_size);
g->sg_dma_ptr = lio->glists_dma_base[i] +
(j * lio->glist_entry_size);
list_add_tail(&g->list, &lio->glist[i]);
}
if (j != lio->tx_qsize) {
lio_delete_glists(lio);
return -ENOMEM;
}
}
return 0;
}
int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
{ {
struct lio *lio = GET_LIO(netdev); struct lio *lio = GET_LIO(netdev);
...@@ -880,8 +1036,8 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs) ...@@ -880,8 +1036,8 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
int num_ioq_vectors; int num_ioq_vectors;
int irqret, err; int irqret, err;
oct->num_msix_irqs = num_ioqs;
if (oct->msix_on) { if (oct->msix_on) {
oct->num_msix_irqs = num_ioqs;
if (OCTEON_CN23XX_PF(oct)) { if (OCTEON_CN23XX_PF(oct)) {
num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1; num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
......
...@@ -361,7 +361,14 @@ lio_ethtool_get_channels(struct net_device *dev, ...@@ -361,7 +361,14 @@ lio_ethtool_get_channels(struct net_device *dev,
rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
} else if (OCTEON_CN23XX_PF(oct)) { } else if (OCTEON_CN23XX_PF(oct)) {
if (oct->sriov_info.sriov_enabled) {
max_combined = lio->linfo.num_txpciq; max_combined = lio->linfo.num_txpciq;
} else {
struct octeon_config *conf23_pf =
CHIP_CONF(oct, cn23xx_pf);
max_combined = CFG_GET_IQ_MAX_Q(conf23_pf);
}
combined_count = oct->num_iqs; combined_count = oct->num_iqs;
} else if (OCTEON_CN23XX_VF(oct)) { } else if (OCTEON_CN23XX_VF(oct)) {
u64 reg_val = 0ULL; u64 reg_val = 0ULL;
...@@ -425,9 +432,15 @@ lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs) ...@@ -425,9 +432,15 @@ lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs)
kfree(oct->irq_name_storage); kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL; oct->irq_name_storage = NULL;
if (octeon_allocate_ioq_vector(oct, num_ioqs)) {
dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
return -1;
}
if (octeon_setup_interrupt(oct, num_ioqs)) { if (octeon_setup_interrupt(oct, num_ioqs)) {
dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n"); dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n");
return 1; return -1;
} }
/* Enable Octeon device interrupts */ /* Enable Octeon device interrupts */
...@@ -457,7 +470,16 @@ lio_ethtool_set_channels(struct net_device *dev, ...@@ -457,7 +470,16 @@ lio_ethtool_set_channels(struct net_device *dev,
combined_count = channel->combined_count; combined_count = channel->combined_count;
if (OCTEON_CN23XX_PF(oct)) { if (OCTEON_CN23XX_PF(oct)) {
max_combined = channel->max_combined; if (oct->sriov_info.sriov_enabled) {
max_combined = lio->linfo.num_txpciq;
} else {
struct octeon_config *conf23_pf =
CHIP_CONF(oct,
cn23xx_pf);
max_combined =
CFG_GET_IQ_MAX_Q(conf23_pf);
}
} else if (OCTEON_CN23XX_VF(oct)) { } else if (OCTEON_CN23XX_VF(oct)) {
u64 reg_val = 0ULL; u64 reg_val = 0ULL;
u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
...@@ -485,7 +507,6 @@ lio_ethtool_set_channels(struct net_device *dev, ...@@ -485,7 +507,6 @@ lio_ethtool_set_channels(struct net_device *dev,
if (lio_reset_queues(dev, combined_count)) if (lio_reset_queues(dev, combined_count))
return -EINVAL; return -EINVAL;
lio_irq_reallocate_irqs(oct, combined_count);
if (stopped) if (stopped)
dev->netdev_ops->ndo_open(dev); dev->netdev_ops->ndo_open(dev);
...@@ -824,12 +845,120 @@ lio_ethtool_get_ringparam(struct net_device *netdev, ...@@ -824,12 +845,120 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
ering->rx_jumbo_max_pending = 0; ering->rx_jumbo_max_pending = 0;
} }
static int lio_23xx_reconfigure_queue_count(struct lio *lio)
{
struct octeon_device *oct = lio->oct_dev;
struct liquidio_if_cfg_context *ctx;
u32 resp_size, ctx_size, data_size;
struct liquidio_if_cfg_resp *resp;
struct octeon_soft_command *sc;
union oct_nic_if_cfg if_cfg;
struct lio_version *vdata;
u32 ifidx_or_pfnum;
int retval;
int j;
resp_size = sizeof(struct liquidio_if_cfg_resp);
ctx_size = sizeof(struct liquidio_if_cfg_context);
data_size = sizeof(struct lio_version);
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, data_size,
resp_size, ctx_size);
if (!sc) {
dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n",
__func__);
return -1;
}
resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
vdata = (struct lio_version *)sc->virtdptr;
vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
ifidx_or_pfnum = oct->pf_num;
WRITE_ONCE(ctx->cond, 0);
ctx->octeon_id = lio_get_device_id(oct);
init_waitqueue_head(&ctx->wc);
if_cfg.u64 = 0;
if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings;
if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings;
if_cfg.s.base_queue = oct->sriov_info.pf_srn;
if_cfg.s.gmx_port_id = oct->pf_num;
sc->iq_no = 0;
octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
OPCODE_NIC_QCOUNT_UPDATE, 0,
if_cfg.u64, 0);
sc->callback = lio_if_cfg_callback;
sc->callback_arg = sc;
sc->wait_time = LIO_IFCFG_WAIT_TIME;
retval = octeon_send_soft_command(oct, sc);
if (retval == IQ_SEND_FAILED) {
dev_err(&oct->pci_dev->dev,
"iq/oq config failed status: %x\n",
retval);
goto qcount_update_fail;
}
if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
dev_err(&oct->pci_dev->dev, "Wait interrupted\n");
return -1;
}
retval = resp->status;
if (retval) {
dev_err(&oct->pci_dev->dev, "iq/oq config failed\n");
goto qcount_update_fail;
}
octeon_swap_8B_data((u64 *)(&resp->cfg_info),
(sizeof(struct liquidio_if_cfg_info)) >> 3);
lio->ifidx = ifidx_or_pfnum;
lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask);
lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask);
for (j = 0; j < lio->linfo.num_rxpciq; j++) {
lio->linfo.rxpciq[j].u64 =
resp->cfg_info.linfo.rxpciq[j].u64;
}
for (j = 0; j < lio->linfo.num_txpciq; j++) {
lio->linfo.txpciq[j].u64 =
resp->cfg_info.linfo.txpciq[j].u64;
}
lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
lio->txq = lio->linfo.txpciq[0].s.q_no;
lio->rxq = lio->linfo.rxpciq[0].s.q_no;
octeon_free_soft_command(oct, sc);
dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n",
lio->linfo.num_rxpciq);
return 0;
qcount_update_fail:
octeon_free_soft_command(oct, sc);
return -1;
}
static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
{ {
struct lio *lio = GET_LIO(netdev); struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev; struct octeon_device *oct = lio->oct_dev;
int i, queue_count_update = 0;
struct napi_struct *napi, *n; struct napi_struct *napi, *n;
int i, update = 0; int ret;
schedule_timeout_uninterruptible(msecs_to_jiffies(100));
if (wait_for_pending_requests(oct)) if (wait_for_pending_requests(oct))
dev_err(&oct->pci_dev->dev, "There were pending requests\n"); dev_err(&oct->pci_dev->dev, "There were pending requests\n");
...@@ -838,7 +967,7 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) ...@@ -838,7 +967,7 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
if (octeon_set_io_queues_off(oct)) { if (octeon_set_io_queues_off(oct)) {
dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n");
return -1; return -1;
} }
...@@ -851,9 +980,40 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) ...@@ -851,9 +980,40 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
netif_napi_del(napi); netif_napi_del(napi);
if (num_qs != oct->num_iqs) { if (num_qs != oct->num_iqs) {
netif_set_real_num_rx_queues(netdev, num_qs); ret = netif_set_real_num_rx_queues(netdev, num_qs);
netif_set_real_num_tx_queues(netdev, num_qs); if (ret) {
update = 1; dev_err(&oct->pci_dev->dev,
"Setting real number rx failed\n");
return ret;
}
ret = netif_set_real_num_tx_queues(netdev, num_qs);
if (ret) {
dev_err(&oct->pci_dev->dev,
"Setting real number tx failed\n");
return ret;
}
/* The value of queue_count_update decides whether it is the
* queue count or the descriptor count that is being
* re-configured.
*/
queue_count_update = 1;
}
/* Re-configuration of queues can happen in two scenarios, SRIOV enabled
* and SRIOV disabled. Few things like recreating queue zero, resetting
* glists and IRQs are required for both. For the latter, some more
* steps like updating sriov_info for the octeon device need to be done.
*/
if (queue_count_update) {
lio_delete_glists(lio);
/* Delete mbox for PF which is SRIOV disabled because sriov_info
* will be now changed.
*/
if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled)
oct->fn_list.free_mbox(oct);
} }
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
...@@ -868,24 +1028,91 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) ...@@ -868,24 +1028,91 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
octeon_delete_instr_queue(oct, i); octeon_delete_instr_queue(oct, i);
} }
if (queue_count_update) {
/* For PF re-configure sriov related information */
if ((OCTEON_CN23XX_PF(oct)) &&
!oct->sriov_info.sriov_enabled) {
oct->sriov_info.num_pf_rings = num_qs;
if (cn23xx_sriov_config(oct)) {
dev_err(&oct->pci_dev->dev,
"Queue reset aborted: SRIOV config failed\n");
return -1;
}
num_qs = oct->sriov_info.num_pf_rings;
}
}
if (oct->fn_list.setup_device_regs(oct)) { if (oct->fn_list.setup_device_regs(oct)) {
dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n"); dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n");
return -1; return -1;
} }
if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { /* The following are needed in case of queue count re-configuration and
dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n"); * not for descriptor count re-configuration.
*/
if (queue_count_update) {
if (octeon_setup_instr_queues(oct))
return -1;
if (octeon_setup_output_queues(oct))
return -1;
/* Recreating mbox for PF that is SRIOV disabled */
if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
if (oct->fn_list.setup_mbox(oct)) {
dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
return -1;
}
}
/* Deleting and recreating IRQs whether the interface is SRIOV
* enabled or disabled.
*/
if (lio_irq_reallocate_irqs(oct, num_qs)) {
dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n");
return -1; return -1;
} }
/* Enable the input and output queues for this Octeon device */ /* Enable the input and output queues for this Octeon device */
if (oct->fn_list.enable_io_queues(oct)) { if (oct->fn_list.enable_io_queues(oct)) {
dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues"); dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n");
return -1; return -1;
} }
if (update && lio_send_queue_count_update(netdev, num_qs)) for (i = 0; i < oct->num_oqs; i++)
writel(oct->droq[i]->max_count,
oct->droq[i]->pkts_credit_reg);
/* Informing firmware about the new queue count. It is required
* for firmware to allocate more number of queues than those at
* load time.
*/
if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
if (lio_23xx_reconfigure_queue_count(lio))
return -1; return -1;
}
}
/* Once firmware is aware of the new value, queues can be recreated */
if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n");
return -1;
}
if (queue_count_update) {
if (lio_setup_glists(oct, lio, num_qs)) {
dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n");
return -1;
}
/* Send firmware the information about new number of queues
* if the interface is a VF or a PF that is SRIOV enabled.
*/
if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct))
if (lio_send_queue_count_update(netdev, num_qs))
return -1;
}
return 0; return 0;
} }
...@@ -930,7 +1157,7 @@ static int lio_ethtool_set_ringparam(struct net_device *netdev, ...@@ -930,7 +1157,7 @@ static int lio_ethtool_set_ringparam(struct net_device *netdev,
CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
rx_count); rx_count);
if (lio_reset_queues(netdev, lio->linfo.num_txpciq)) if (lio_reset_queues(netdev, oct->num_iqs))
goto err_lio_reset_queues; goto err_lio_reset_queues;
if (stopped) if (stopped)
......
...@@ -138,33 +138,10 @@ union tx_info { ...@@ -138,33 +138,10 @@ union tx_info {
* by this structure in the NIC module. * by this structure in the NIC module.
*/ */
#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
#define OCTNIC_GSO_MAX_HEADER_SIZE 128 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
#define OCTNIC_GSO_MAX_SIZE \ #define OCTNIC_GSO_MAX_SIZE \
(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
/** Structure of a node in list of gather components maintained by
* NIC driver for each network device.
*/
struct octnic_gather {
/** List manipulation. Next and prev pointers. */
struct list_head list;
/** Size of the gather component at sg in bytes. */
int sg_size;
/** Number of bytes that sg was adjusted to make it 8B-aligned. */
int adjust;
/** Gather component that can accommodate max sized fragment list
* received from the IP layer.
*/
struct octeon_sg_entry *sg;
dma_addr_t sg_dma_ptr;
};
struct handshake { struct handshake {
struct completion init; struct completion init;
struct completion started; struct completion started;
...@@ -520,7 +497,7 @@ static void liquidio_deinit_pci(void) ...@@ -520,7 +497,7 @@ static void liquidio_deinit_pci(void)
*/ */
static inline int check_txq_status(struct lio *lio) static inline int check_txq_status(struct lio *lio)
{ {
int numqs = lio->netdev->num_tx_queues; int numqs = lio->netdev->real_num_tx_queues;
int ret_val = 0; int ret_val = 0;
int q, iq; int q, iq;
...@@ -541,148 +518,6 @@ static inline int check_txq_status(struct lio *lio) ...@@ -541,148 +518,6 @@ static inline int check_txq_status(struct lio *lio)
return ret_val; return ret_val;
} }
/**
* Remove the node at the head of the list. The list would be empty at
* the end of this call if there are no more nodes in the list.
*/
static inline struct list_head *list_delete_head(struct list_head *root)
{
struct list_head *node;
if ((root->prev == root) && (root->next == root))
node = NULL;
else
node = root->next;
if (node)
list_del(node);
return node;
}
/**
* \brief Delete gather lists
* @param lio per-network private data
*/
static void delete_glists(struct lio *lio)
{
struct octnic_gather *g;
int i;
kfree(lio->glist_lock);
lio->glist_lock = NULL;
if (!lio->glist)
return;
for (i = 0; i < lio->linfo.num_txpciq; i++) {
do {
g = (struct octnic_gather *)
list_delete_head(&lio->glist[i]);
if (g)
kfree(g);
} while (g);
if (lio->glists_virt_base && lio->glists_virt_base[i] &&
lio->glists_dma_base && lio->glists_dma_base[i]) {
lio_dma_free(lio->oct_dev,
lio->glist_entry_size * lio->tx_qsize,
lio->glists_virt_base[i],
lio->glists_dma_base[i]);
}
}
kfree(lio->glists_virt_base);
lio->glists_virt_base = NULL;
kfree(lio->glists_dma_base);
lio->glists_dma_base = NULL;
kfree(lio->glist);
lio->glist = NULL;
}
/**
* \brief Setup gather lists
* @param lio per-network private data
*/
static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
{
int i, j;
struct octnic_gather *g;
lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
GFP_KERNEL);
if (!lio->glist_lock)
return -ENOMEM;
lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
GFP_KERNEL);
if (!lio->glist) {
kfree(lio->glist_lock);
lio->glist_lock = NULL;
return -ENOMEM;
}
lio->glist_entry_size =
ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
/* allocate memory to store virtual and dma base address of
* per glist consistent memory
*/
lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
GFP_KERNEL);
lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
GFP_KERNEL);
if (!lio->glists_virt_base || !lio->glists_dma_base) {
delete_glists(lio);
return -ENOMEM;
}
for (i = 0; i < num_iqs; i++) {
int numa_node = dev_to_node(&oct->pci_dev->dev);
spin_lock_init(&lio->glist_lock[i]);
INIT_LIST_HEAD(&lio->glist[i]);
lio->glists_virt_base[i] =
lio_dma_alloc(oct,
lio->glist_entry_size * lio->tx_qsize,
&lio->glists_dma_base[i]);
if (!lio->glists_virt_base[i]) {
delete_glists(lio);
return -ENOMEM;
}
for (j = 0; j < lio->tx_qsize; j++) {
g = kzalloc_node(sizeof(*g), GFP_KERNEL,
numa_node);
if (!g)
g = kzalloc(sizeof(*g), GFP_KERNEL);
if (!g)
break;
g->sg = lio->glists_virt_base[i] +
(j * lio->glist_entry_size);
g->sg_dma_ptr = lio->glists_dma_base[i] +
(j * lio->glist_entry_size);
list_add_tail(&g->list, &lio->glist[i]);
}
if (j != lio->tx_qsize) {
delete_glists(lio);
return -ENOMEM;
}
}
return 0;
}
/** /**
* \brief Print link information * \brief Print link information
* @param netdev network device * @param netdev network device
...@@ -1471,7 +1306,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) ...@@ -1471,7 +1306,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
cleanup_rx_oom_poll_fn(netdev); cleanup_rx_oom_poll_fn(netdev);
delete_glists(lio); lio_delete_glists(lio);
free_netdev(netdev); free_netdev(netdev);
...@@ -1686,7 +1521,7 @@ static void free_netsgbuf(void *buf) ...@@ -1686,7 +1521,7 @@ static void free_netsgbuf(void *buf)
i++; i++;
} }
iq = skb_iq(lio, skb); iq = skb_iq(lio->oct_dev, skb);
spin_lock(&lio->glist_lock[iq]); spin_lock(&lio->glist_lock[iq]);
list_add_tail(&g->list, &lio->glist[iq]); list_add_tail(&g->list, &lio->glist[iq]);
spin_unlock(&lio->glist_lock[iq]); spin_unlock(&lio->glist_lock[iq]);
...@@ -1729,7 +1564,7 @@ static void free_netsgbuf_with_resp(void *buf) ...@@ -1729,7 +1564,7 @@ static void free_netsgbuf_with_resp(void *buf)
i++; i++;
} }
iq = skb_iq(lio, skb); iq = skb_iq(lio->oct_dev, skb);
spin_lock(&lio->glist_lock[iq]); spin_lock(&lio->glist_lock[iq]);
list_add_tail(&g->list, &lio->glist[iq]); list_add_tail(&g->list, &lio->glist[iq]);
...@@ -1941,39 +1776,6 @@ static int load_firmware(struct octeon_device *oct) ...@@ -1941,39 +1776,6 @@ static int load_firmware(struct octeon_device *oct)
return ret; return ret;
} }
/**
* \brief Callback for getting interface configuration
* @param status status of request
* @param buf pointer to resp structure
*/
static void if_cfg_callback(struct octeon_device *oct,
u32 status __attribute__((unused)),
void *buf)
{
struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
struct liquidio_if_cfg_resp *resp;
struct liquidio_if_cfg_context *ctx;
resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
oct = lio_get_device(ctx->octeon_id);
if (resp->status)
dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n",
CVM_CAST64(resp->status), status);
WRITE_ONCE(ctx->cond, 1);
snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
resp->cfg_info.liquidio_firmware_version);
/* This barrier is required to be sure that the response has been
* written fully before waking up the handler
*/
wmb();
wake_up_interruptible(&ctx->wc);
}
/** /**
* \brief Poll routine for checking transmit queue status * \brief Poll routine for checking transmit queue status
* @param work work_struct data structure * @param work work_struct data structure
...@@ -2049,11 +1851,6 @@ static int liquidio_open(struct net_device *netdev) ...@@ -2049,11 +1851,6 @@ static int liquidio_open(struct net_device *netdev)
ifstate_set(lio, LIO_IFSTATE_RUNNING); ifstate_set(lio, LIO_IFSTATE_RUNNING);
/* Ready for link status updates */
lio->intf_open = 1;
netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
if (OCTEON_CN23XX_PF(oct)) { if (OCTEON_CN23XX_PF(oct)) {
if (!oct->msix_on) if (!oct->msix_on)
if (setup_tx_poll_fn(netdev)) if (setup_tx_poll_fn(netdev))
...@@ -2063,7 +1860,12 @@ static int liquidio_open(struct net_device *netdev) ...@@ -2063,7 +1860,12 @@ static int liquidio_open(struct net_device *netdev)
return -1; return -1;
} }
start_txqs(netdev); netif_tx_start_all_queues(netdev);
/* Ready for link status updates */
lio->intf_open = 1;
netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
/* tell Octeon to start forwarding packets to host */ /* tell Octeon to start forwarding packets to host */
send_rx_ctrl_cmd(lio, 1); send_rx_ctrl_cmd(lio, 1);
...@@ -2086,11 +1888,15 @@ static int liquidio_stop(struct net_device *netdev) ...@@ -2086,11 +1888,15 @@ static int liquidio_stop(struct net_device *netdev)
ifstate_reset(lio, LIO_IFSTATE_RUNNING); ifstate_reset(lio, LIO_IFSTATE_RUNNING);
netif_tx_disable(netdev); /* Stop any link updates */
lio->intf_open = 0;
stop_txqs(netdev);
/* Inform that netif carrier is down */ /* Inform that netif carrier is down */
netif_carrier_off(netdev); netif_carrier_off(netdev);
lio->intf_open = 0; netif_tx_disable(netdev);
lio->linfo.link.s.link_up = 0; lio->linfo.link.s.link_up = 0;
lio->link_changes++; lio->link_changes++;
...@@ -2530,7 +2336,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -2530,7 +2336,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
lio = GET_LIO(netdev); lio = GET_LIO(netdev);
oct = lio->oct_dev; oct = lio->oct_dev;
q_idx = skb_iq(lio, skb); q_idx = skb_iq(oct, skb);
tag = q_idx; tag = q_idx;
iq_no = lio->linfo.txpciq[q_idx].s.q_no; iq_no = lio->linfo.txpciq[q_idx].s.q_no;
...@@ -2623,7 +2429,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -2623,7 +2429,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
spin_lock(&lio->glist_lock[q_idx]); spin_lock(&lio->glist_lock[q_idx]);
g = (struct octnic_gather *) g = (struct octnic_gather *)
list_delete_head(&lio->glist[q_idx]); lio_list_delete_head(&lio->glist[q_idx]);
spin_unlock(&lio->glist_lock[q_idx]); spin_unlock(&lio->glist_lock[q_idx]);
if (!g) { if (!g) {
...@@ -3496,6 +3302,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) ...@@ -3496,6 +3302,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
struct liquidio_if_cfg_resp *resp; struct liquidio_if_cfg_resp *resp;
struct octdev_props *props; struct octdev_props *props;
int retval, num_iqueues, num_oqueues; int retval, num_iqueues, num_oqueues;
int max_num_queues = 0;
union oct_nic_if_cfg if_cfg; union oct_nic_if_cfg if_cfg;
unsigned int base_queue; unsigned int base_queue;
unsigned int gmx_port_id; unsigned int gmx_port_id;
...@@ -3576,9 +3383,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) ...@@ -3576,9 +3383,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
OPCODE_NIC_IF_CFG, 0, OPCODE_NIC_IF_CFG, 0,
if_cfg.u64, 0); if_cfg.u64, 0);
sc->callback = if_cfg_callback; sc->callback = lio_if_cfg_callback;
sc->callback_arg = sc; sc->callback_arg = sc;
sc->wait_time = 3000; sc->wait_time = LIO_IFCFG_WAIT_TIME;
retval = octeon_send_soft_command(octeon_dev, sc); retval = octeon_send_soft_command(octeon_dev, sc);
if (retval == IQ_SEND_FAILED) { if (retval == IQ_SEND_FAILED) {
...@@ -3632,11 +3439,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) ...@@ -3632,11 +3439,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
resp->cfg_info.oqmask); resp->cfg_info.oqmask);
goto setup_nic_dev_fail; goto setup_nic_dev_fail;
} }
if (OCTEON_CN6XXX(octeon_dev)) {
max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
cn6xxx));
} else if (OCTEON_CN23XX_PF(octeon_dev)) {
max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
cn23xx_pf));
}
dev_dbg(&octeon_dev->pci_dev->dev, dev_dbg(&octeon_dev->pci_dev->dev,
"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
num_iqueues, num_oqueues); num_iqueues, num_oqueues, max_num_queues);
netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
if (!netdev) { if (!netdev) {
dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
...@@ -3651,6 +3467,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) ...@@ -3651,6 +3467,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
netdev->netdev_ops = &lionetdevops; netdev->netdev_ops = &lionetdevops;
SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops); SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops);
retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
if (retval) {
dev_err(&octeon_dev->pci_dev->dev,
"setting real number rx failed\n");
goto setup_nic_dev_fail;
}
retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
if (retval) {
dev_err(&octeon_dev->pci_dev->dev,
"setting real number tx failed\n");
goto setup_nic_dev_fail;
}
lio = GET_LIO(netdev); lio = GET_LIO(netdev);
memset(lio, 0, sizeof(struct lio)); memset(lio, 0, sizeof(struct lio));
...@@ -3772,7 +3602,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) ...@@ -3772,7 +3602,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
if (setup_glists(octeon_dev, lio, num_iqueues)) { if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
dev_err(&octeon_dev->pci_dev->dev, dev_err(&octeon_dev->pci_dev->dev,
"Gather list allocation failed\n"); "Gather list allocation failed\n");
goto setup_nic_dev_fail; goto setup_nic_dev_fail;
...@@ -4271,7 +4101,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev) ...@@ -4271,7 +4101,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
} }
atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
if (octeon_allocate_ioq_vector(octeon_dev)) { if (octeon_allocate_ioq_vector
(octeon_dev,
octeon_dev->sriov_info.num_pf_rings)) {
dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
return 1; return 1;
} }
......
...@@ -69,30 +69,10 @@ union tx_info { ...@@ -69,30 +69,10 @@ union tx_info {
} s; } s;
}; };
#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
#define OCTNIC_GSO_MAX_HEADER_SIZE 128 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
#define OCTNIC_GSO_MAX_SIZE \ #define OCTNIC_GSO_MAX_SIZE \
(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
struct octnic_gather {
/* List manipulation. Next and prev pointers. */
struct list_head list;
/* Size of the gather component at sg in bytes. */
int sg_size;
/* Number of bytes that sg was adjusted to make it 8B-aligned. */
int adjust;
/* Gather component that can accommodate max sized fragment list
* received from the IP layer.
*/
struct octeon_sg_entry *sg;
dma_addr_t sg_dma_ptr;
};
static int static int
liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void liquidio_vf_remove(struct pci_dev *pdev); static void liquidio_vf_remove(struct pci_dev *pdev);
...@@ -284,142 +264,6 @@ static struct pci_driver liquidio_vf_pci_driver = { ...@@ -284,142 +264,6 @@ static struct pci_driver liquidio_vf_pci_driver = {
.err_handler = &liquidio_vf_err_handler, /* For AER */ .err_handler = &liquidio_vf_err_handler, /* For AER */
}; };
/**
* Remove the node at the head of the list. The list would be empty at
* the end of this call if there are no more nodes in the list.
*/
static struct list_head *list_delete_head(struct list_head *root)
{
struct list_head *node;
if ((root->prev == root) && (root->next == root))
node = NULL;
else
node = root->next;
if (node)
list_del(node);
return node;
}
/**
* \brief Delete gather lists
* @param lio per-network private data
*/
static void delete_glists(struct lio *lio)
{
struct octnic_gather *g;
int i;
kfree(lio->glist_lock);
lio->glist_lock = NULL;
if (!lio->glist)
return;
for (i = 0; i < lio->linfo.num_txpciq; i++) {
do {
g = (struct octnic_gather *)
list_delete_head(&lio->glist[i]);
kfree(g);
} while (g);
if (lio->glists_virt_base && lio->glists_virt_base[i] &&
lio->glists_dma_base && lio->glists_dma_base[i]) {
lio_dma_free(lio->oct_dev,
lio->glist_entry_size * lio->tx_qsize,
lio->glists_virt_base[i],
lio->glists_dma_base[i]);
}
}
kfree(lio->glists_virt_base);
lio->glists_virt_base = NULL;
kfree(lio->glists_dma_base);
lio->glists_dma_base = NULL;
kfree(lio->glist);
lio->glist = NULL;
}
/**
* \brief Setup gather lists
* @param lio per-network private data
*/
static int setup_glists(struct lio *lio, int num_iqs)
{
struct octnic_gather *g;
int i, j;
lio->glist_lock =
kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
if (!lio->glist_lock)
return -ENOMEM;
lio->glist =
kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
if (!lio->glist) {
kfree(lio->glist_lock);
lio->glist_lock = NULL;
return -ENOMEM;
}
lio->glist_entry_size =
ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
/* allocate memory to store virtual and dma base address of
* per glist consistent memory
*/
lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
GFP_KERNEL);
lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
GFP_KERNEL);
if (!lio->glists_virt_base || !lio->glists_dma_base) {
delete_glists(lio);
return -ENOMEM;
}
for (i = 0; i < num_iqs; i++) {
spin_lock_init(&lio->glist_lock[i]);
INIT_LIST_HEAD(&lio->glist[i]);
lio->glists_virt_base[i] =
lio_dma_alloc(lio->oct_dev,
lio->glist_entry_size * lio->tx_qsize,
&lio->glists_dma_base[i]);
if (!lio->glists_virt_base[i]) {
delete_glists(lio);
return -ENOMEM;
}
for (j = 0; j < lio->tx_qsize; j++) {
g = kzalloc(sizeof(*g), GFP_KERNEL);
if (!g)
break;
g->sg = lio->glists_virt_base[i] +
(j * lio->glist_entry_size);
g->sg_dma_ptr = lio->glists_dma_base[i] +
(j * lio->glist_entry_size);
list_add_tail(&g->list, &lio->glist[i]);
}
if (j != lio->tx_qsize) {
delete_glists(lio);
return -ENOMEM;
}
}
return 0;
}
/** /**
* \brief Print link information * \brief Print link information
* @param netdev network device * @param netdev network device
...@@ -856,7 +700,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) ...@@ -856,7 +700,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
cleanup_link_status_change_wq(netdev); cleanup_link_status_change_wq(netdev);
delete_glists(lio); lio_delete_glists(lio);
free_netdev(netdev); free_netdev(netdev);
...@@ -1005,7 +849,7 @@ static void free_netsgbuf(void *buf) ...@@ -1005,7 +849,7 @@ static void free_netsgbuf(void *buf)
i++; i++;
} }
iq = skb_iq(lio, skb); iq = skb_iq(lio->oct_dev, skb);
spin_lock(&lio->glist_lock[iq]); spin_lock(&lio->glist_lock[iq]);
list_add_tail(&g->list, &lio->glist[iq]); list_add_tail(&g->list, &lio->glist[iq]);
...@@ -1049,7 +893,7 @@ static void free_netsgbuf_with_resp(void *buf) ...@@ -1049,7 +893,7 @@ static void free_netsgbuf_with_resp(void *buf)
i++; i++;
} }
iq = skb_iq(lio, skb); iq = skb_iq(lio->oct_dev, skb);
spin_lock(&lio->glist_lock[iq]); spin_lock(&lio->glist_lock[iq]);
list_add_tail(&g->list, &lio->glist[iq]); list_add_tail(&g->list, &lio->glist[iq]);
...@@ -1058,38 +902,6 @@ static void free_netsgbuf_with_resp(void *buf) ...@@ -1058,38 +902,6 @@ static void free_netsgbuf_with_resp(void *buf)
/* Don't free the skb yet */ /* Don't free the skb yet */
} }
/**
* \brief Callback for getting interface configuration
* @param status status of request
* @param buf pointer to resp structure
*/
static void if_cfg_callback(struct octeon_device *oct,
u32 status __attribute__((unused)), void *buf)
{
struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
struct liquidio_if_cfg_context *ctx;
struct liquidio_if_cfg_resp *resp;
resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
oct = lio_get_device(ctx->octeon_id);
if (resp->status)
dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
CVM_CAST64(resp->status));
WRITE_ONCE(ctx->cond, 1);
snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
resp->cfg_info.liquidio_firmware_version);
/* This barrier is required to be sure that the response has been
* written fully before waking up the handler
*/
wmb();
wake_up_interruptible(&ctx->wc);
}
/** /**
* \brief Net device open for LiquidIO * \brief Net device open for LiquidIO
* @param netdev network device * @param netdev network device
...@@ -1595,7 +1407,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1595,7 +1407,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
lio = GET_LIO(netdev); lio = GET_LIO(netdev);
oct = lio->oct_dev; oct = lio->oct_dev;
q_idx = skb_iq(lio, skb); q_idx = skb_iq(lio->oct_dev, skb);
tag = q_idx; tag = q_idx;
iq_no = lio->linfo.txpciq[q_idx].s.q_no; iq_no = lio->linfo.txpciq[q_idx].s.q_no;
...@@ -1676,8 +1488,8 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1676,8 +1488,8 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
int i, frags; int i, frags;
spin_lock(&lio->glist_lock[q_idx]); spin_lock(&lio->glist_lock[q_idx]);
g = (struct octnic_gather *)list_delete_head( g = (struct octnic_gather *)
&lio->glist[q_idx]); lio_list_delete_head(&lio->glist[q_idx]);
spin_unlock(&lio->glist_lock[q_idx]); spin_unlock(&lio->glist_lock[q_idx]);
if (!g) { if (!g) {
...@@ -2171,7 +1983,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) ...@@ -2171,7 +1983,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
OPCODE_NIC_IF_CFG, 0, if_cfg.u64, OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
0); 0);
sc->callback = if_cfg_callback; sc->callback = lio_if_cfg_callback;
sc->callback_arg = sc; sc->callback_arg = sc;
sc->wait_time = 5000; sc->wait_time = 5000;
...@@ -2336,7 +2148,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) ...@@ -2336,7 +2148,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
if (setup_glists(lio, num_iqueues)) { if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
dev_err(&octeon_dev->pci_dev->dev, dev_err(&octeon_dev->pci_dev->dev,
"Gather list allocation failed\n"); "Gather list allocation failed\n");
goto setup_nic_dev_fail; goto setup_nic_dev_fail;
...@@ -2527,7 +2339,7 @@ static int octeon_device_init(struct octeon_device *oct) ...@@ -2527,7 +2339,7 @@ static int octeon_device_init(struct octeon_device *oct)
} }
atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
if (octeon_allocate_ioq_vector(oct)) { if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) {
dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
return 1; return 1;
} }
......
...@@ -84,6 +84,7 @@ enum octeon_tag_type { ...@@ -84,6 +84,7 @@ enum octeon_tag_type {
#define OPCODE_NIC_IF_CFG 0x09 #define OPCODE_NIC_IF_CFG 0x09
#define OPCODE_NIC_VF_DRV_NOTICE 0x0A #define OPCODE_NIC_VF_DRV_NOTICE 0x0A
#define OPCODE_NIC_INTRMOD_PARAMS 0x0B #define OPCODE_NIC_INTRMOD_PARAMS 0x0B
#define OPCODE_NIC_QCOUNT_UPDATE 0x12
#define OPCODE_NIC_SET_TRUSTED_VF 0x13 #define OPCODE_NIC_SET_TRUSTED_VF 0x13
#define OPCODE_NIC_SYNC_OCTEON_TIME 0x14 #define OPCODE_NIC_SYNC_OCTEON_TIME 0x14
#define VF_DRV_LOADED 1 #define VF_DRV_LOADED 1
......
...@@ -824,23 +824,18 @@ int octeon_deregister_device(struct octeon_device *oct) ...@@ -824,23 +824,18 @@ int octeon_deregister_device(struct octeon_device *oct)
} }
int int
octeon_allocate_ioq_vector(struct octeon_device *oct) octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs)
{ {
int i, num_ioqs = 0;
struct octeon_ioq_vector *ioq_vector; struct octeon_ioq_vector *ioq_vector;
int cpu_num; int cpu_num;
int size; int size;
int i;
if (OCTEON_CN23XX_PF(oct))
num_ioqs = oct->sriov_info.num_pf_rings;
else if (OCTEON_CN23XX_VF(oct))
num_ioqs = oct->sriov_info.rings_per_vf;
size = sizeof(struct octeon_ioq_vector) * num_ioqs; size = sizeof(struct octeon_ioq_vector) * num_ioqs;
oct->ioq_vector = vzalloc(size); oct->ioq_vector = vzalloc(size);
if (!oct->ioq_vector) if (!oct->ioq_vector)
return 1; return -1;
for (i = 0; i < num_ioqs; i++) { for (i = 0; i < num_ioqs; i++) {
ioq_vector = &oct->ioq_vector[i]; ioq_vector = &oct->ioq_vector[i];
ioq_vector->oct_dev = oct; ioq_vector->oct_dev = oct;
...@@ -856,6 +851,7 @@ octeon_allocate_ioq_vector(struct octeon_device *oct) ...@@ -856,6 +851,7 @@ octeon_allocate_ioq_vector(struct octeon_device *oct)
else else
ioq_vector->ioq_num = i; ioq_vector->ioq_num = i;
} }
return 0; return 0;
} }
......
...@@ -867,7 +867,7 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type); ...@@ -867,7 +867,7 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
struct octeon_config *octeon_get_conf(struct octeon_device *oct); struct octeon_config *octeon_get_conf(struct octeon_device *oct);
void octeon_free_ioq_vector(struct octeon_device *oct); void octeon_free_ioq_vector(struct octeon_device *oct);
int octeon_allocate_ioq_vector(struct octeon_device *oct); int octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs);
void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq); void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq);
/* LiquidIO driver pivate flags */ /* LiquidIO driver pivate flags */
......
...@@ -47,6 +47,29 @@ struct liquidio_if_cfg_resp { ...@@ -47,6 +47,29 @@ struct liquidio_if_cfg_resp {
u64 status; u64 status;
}; };
#define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */
/* Structure of a node in list of gather components maintained by
* NIC driver for each network device.
*/
struct octnic_gather {
/* List manipulation. Next and prev pointers. */
struct list_head list;
/* Size of the gather component at sg in bytes. */
int sg_size;
/* Number of bytes that sg was adjusted to make it 8B-aligned. */
int adjust;
/* Gather component that can accommodate max sized fragment list
* received from the IP layer.
*/
struct octeon_sg_entry *sg;
dma_addr_t sg_dma_ptr;
};
struct oct_nic_stats_resp { struct oct_nic_stats_resp {
u64 rh; u64 rh;
struct oct_link_stats stats; struct oct_link_stats stats;
...@@ -199,6 +222,14 @@ int lio_wait_for_clean_oq(struct octeon_device *oct); ...@@ -199,6 +222,14 @@ int lio_wait_for_clean_oq(struct octeon_device *oct);
*/ */
void liquidio_set_ethtool_ops(struct net_device *netdev); void liquidio_set_ethtool_ops(struct net_device *netdev);
void lio_if_cfg_callback(struct octeon_device *oct,
u32 status __attribute__((unused)),
void *buf);
void lio_delete_glists(struct lio *lio);
int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs);
/** /**
* \brief Net device change_mtu * \brief Net device change_mtu
* @param netdev network device * @param netdev network device
...@@ -517,7 +548,7 @@ static inline void stop_txqs(struct net_device *netdev) ...@@ -517,7 +548,7 @@ static inline void stop_txqs(struct net_device *netdev)
{ {
int i; int i;
for (i = 0; i < netdev->num_tx_queues; i++) for (i = 0; i < netdev->real_num_tx_queues; i++)
netif_stop_subqueue(netdev, i); netif_stop_subqueue(netdev, i);
} }
...@@ -530,7 +561,7 @@ static inline void wake_txqs(struct net_device *netdev) ...@@ -530,7 +561,7 @@ static inline void wake_txqs(struct net_device *netdev)
struct lio *lio = GET_LIO(netdev); struct lio *lio = GET_LIO(netdev);
int i, qno; int i, qno;
for (i = 0; i < netdev->num_tx_queues; i++) { for (i = 0; i < netdev->real_num_tx_queues; i++) {
qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no; qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no;
if (__netif_subqueue_stopped(netdev, i)) { if (__netif_subqueue_stopped(netdev, i)) {
...@@ -551,14 +582,33 @@ static inline void start_txqs(struct net_device *netdev) ...@@ -551,14 +582,33 @@ static inline void start_txqs(struct net_device *netdev)
int i; int i;
if (lio->linfo.link.s.link_up) { if (lio->linfo.link.s.link_up) {
for (i = 0; i < netdev->num_tx_queues; i++) for (i = 0; i < netdev->real_num_tx_queues; i++)
netif_start_subqueue(netdev, i); netif_start_subqueue(netdev, i);
} }
} }
static inline int skb_iq(struct lio *lio, struct sk_buff *skb) static inline int skb_iq(struct octeon_device *oct, struct sk_buff *skb)
{
return skb->queue_mapping % oct->num_iqs;
}
/**
* Remove the node at the head of the list. The list would be empty at
* the end of this call if there are no more nodes in the list.
*/
static inline struct list_head *lio_list_delete_head(struct list_head *root)
{ {
return skb->queue_mapping % lio->linfo.num_txpciq; struct list_head *node;
if (root->prev == root && root->next == root)
node = NULL;
else
node = root->next;
if (node)
list_del(node);
return node;
} }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment