Commit 869cec99 authored by David S. Miller's avatar David S. Miller

Merge branch 'liquidio-adding-support-for-ethtool-set-channels-feature'

Intiyaz Basha says:

====================
liquidio: adding support for ethtool --set-channels feature

Code reorganization is required for adding ethtool --set-channels feature.
First three patches are for code reorganization.  The last patch is for
adding this feature.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 463910e2 a82457f1
...@@ -275,6 +275,11 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) ...@@ -275,6 +275,11 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n"); netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
break; break;
case OCTNET_CMD_QUEUE_COUNT_CTL:
netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
nctrl->ncmd.s.param1);
break;
default: default:
dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__, dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
nctrl->ncmd.s.cmd); nctrl->ncmd.s.cmd);
...@@ -689,7 +694,8 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) ...@@ -689,7 +694,8 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
* an input queue is for egress packets, and output queues * an input queue is for egress packets, and output queues
* are for ingress packets. * are for ingress packets.
*/ */
int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx) int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
u32 num_iqs, u32 num_oqs)
{ {
struct octeon_droq_ops droq_ops; struct octeon_droq_ops droq_ops;
struct net_device *netdev; struct net_device *netdev;
...@@ -717,7 +723,7 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx) ...@@ -717,7 +723,7 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx)
cpu_id_modulus = num_present_cpus(); cpu_id_modulus = num_present_cpus();
/* set up DROQs. */ /* set up DROQs. */
for (q = 0; q < lio->linfo.num_rxpciq; q++) { for (q = 0; q < num_oqs; q++) {
q_no = lio->linfo.rxpciq[q].s.q_no; q_no = lio->linfo.rxpciq[q].s.q_no;
dev_dbg(&octeon_dev->pci_dev->dev, dev_dbg(&octeon_dev->pci_dev->dev,
"%s index:%d linfo.rxpciq.s.q_no:%d\n", "%s index:%d linfo.rxpciq.s.q_no:%d\n",
...@@ -761,7 +767,7 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx) ...@@ -761,7 +767,7 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx)
} }
/* set up IQs. */ /* set up IQs. */
for (q = 0; q < lio->linfo.num_txpciq; q++) { for (q = 0; q < num_iqs; q++) {
num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF( num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
octeon_get_conf(octeon_dev), lio->ifidx); octeon_get_conf(octeon_dev), lio->ifidx);
retval = octeon_setup_iq(octeon_dev, ifidx, q, retval = octeon_setup_iq(octeon_dev, ifidx, q,
...@@ -788,3 +794,298 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx) ...@@ -788,3 +794,298 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx)
return 0; return 0;
} }
static
int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
{
struct octeon_device *oct = droq->oct_dev;
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
if (droq->ops.poll_mode) {
droq->ops.napi_fn(droq);
} else {
if (ret & MSIX_PO_INT) {
if (OCTEON_CN23XX_VF(oct))
dev_err(&oct->pci_dev->dev,
"should not come here should not get rx when poll mode = 0 for vf\n");
tasklet_schedule(&oct_priv->droq_tasklet);
return 1;
}
/* this will be flushed periodically by check iq db */
if (ret & MSIX_PI_INT)
return 0;
}
return 0;
}
irqreturn_t
liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
{
struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
struct octeon_device *oct = ioq_vector->oct_dev;
struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
u64 ret;
ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
liquidio_schedule_msix_droq_pkt_handler(droq, ret);
return IRQ_HANDLED;
}
/**
* \brief Droq packet processor sceduler
* @param oct octeon device
*/
static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
{
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct octeon_droq *droq;
u64 oq_no;
if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
oq_no++) {
if (!(oct->droq_intr & BIT_ULL(oq_no)))
continue;
droq = oct->droq[oq_no];
if (droq->ops.poll_mode) {
droq->ops.napi_fn(droq);
oct_priv->napi_mask |= (1 << oq_no);
} else {
tasklet_schedule(&oct_priv->droq_tasklet);
}
}
}
}
/**
* \brief Interrupt handler for octeon
* @param irq unused
* @param dev octeon device
*/
static
irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
void *dev)
{
struct octeon_device *oct = (struct octeon_device *)dev;
irqreturn_t ret;
/* Disable our interrupts for the duration of ISR */
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
ret = oct->fn_list.process_interrupt_regs(oct);
if (ret == IRQ_HANDLED)
liquidio_schedule_droq_pkt_handlers(oct);
/* Re-enable our interrupts */
if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
return ret;
}
/**
* \brief Setup interrupt for octeon device
* @param oct octeon device
*
* Enable interrupt in Octeon device as given in the PCI interrupt mask.
*/
int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
{
struct msix_entry *msix_entries;
char *queue_irq_names = NULL;
int i, num_interrupts = 0;
int num_alloc_ioq_vectors;
char *aux_irq_name = NULL;
int num_ioq_vectors;
int irqret, err;
oct->num_msix_irqs = num_ioqs;
if (oct->msix_on) {
if (OCTEON_CN23XX_PF(oct)) {
num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
/* one non ioq interrupt for handling
* sli_mac_pf_int_sum
*/
oct->num_msix_irqs += 1;
} else if (OCTEON_CN23XX_VF(oct)) {
num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
}
/* allocate storage for the names assigned to each irq */
oct->irq_name_storage =
kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
if (!oct->irq_name_storage) {
dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
return -ENOMEM;
}
queue_irq_names = oct->irq_name_storage;
if (OCTEON_CN23XX_PF(oct))
aux_irq_name = &queue_irq_names
[IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
oct->msix_entries = kcalloc(oct->num_msix_irqs,
sizeof(struct msix_entry),
GFP_KERNEL);
if (!oct->msix_entries) {
dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
return -ENOMEM;
}
msix_entries = (struct msix_entry *)oct->msix_entries;
/*Assumption is that pf msix vectors start from pf srn to pf to
* trs and not from 0. if not change this code
*/
if (OCTEON_CN23XX_PF(oct)) {
for (i = 0; i < oct->num_msix_irqs - 1; i++)
msix_entries[i].entry =
oct->sriov_info.pf_srn + i;
msix_entries[oct->num_msix_irqs - 1].entry =
oct->sriov_info.trs;
} else if (OCTEON_CN23XX_VF(oct)) {
for (i = 0; i < oct->num_msix_irqs; i++)
msix_entries[i].entry = i;
}
num_alloc_ioq_vectors = pci_enable_msix_range(
oct->pci_dev, msix_entries,
oct->num_msix_irqs,
oct->num_msix_irqs);
if (num_alloc_ioq_vectors < 0) {
dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
kfree(oct->msix_entries);
oct->msix_entries = NULL;
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
return num_alloc_ioq_vectors;
}
dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
num_ioq_vectors = oct->num_msix_irqs;
/** For PF, there is one non-ioq interrupt handler */
if (OCTEON_CN23XX_PF(oct)) {
num_ioq_vectors -= 1;
snprintf(aux_irq_name, INTRNAMSIZ,
"LiquidIO%u-pf%u-aux", oct->octeon_id,
oct->pf_num);
irqret = request_irq(
msix_entries[num_ioq_vectors].vector,
liquidio_legacy_intr_handler, 0,
aux_irq_name, oct);
if (irqret) {
dev_err(&oct->pci_dev->dev,
"Request_irq failed for MSIX interrupt Error: %d\n",
irqret);
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
oct->msix_entries = NULL;
return irqret;
}
}
for (i = 0 ; i < num_ioq_vectors ; i++) {
if (OCTEON_CN23XX_PF(oct))
snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
oct->octeon_id, oct->pf_num, i);
if (OCTEON_CN23XX_VF(oct))
snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
oct->octeon_id, oct->vf_num, i);
irqret = request_irq(msix_entries[i].vector,
liquidio_msix_intr_handler, 0,
&queue_irq_names[IRQ_NAME_OFF(i)],
&oct->ioq_vector[i]);
if (irqret) {
dev_err(&oct->pci_dev->dev,
"Request_irq failed for MSIX interrupt Error: %d\n",
irqret);
/** Freeing the non-ioq irq vector here . */
free_irq(msix_entries[num_ioq_vectors].vector,
oct);
while (i) {
i--;
/** clearing affinity mask. */
irq_set_affinity_hint(
msix_entries[i].vector,
NULL);
free_irq(msix_entries[i].vector,
&oct->ioq_vector[i]);
}
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
oct->msix_entries = NULL;
return irqret;
}
oct->ioq_vector[i].vector = msix_entries[i].vector;
/* assign the cpu mask for this msix interrupt vector */
irq_set_affinity_hint(msix_entries[i].vector,
&oct->ioq_vector[i].affinity_mask
);
}
dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
oct->octeon_id);
} else {
err = pci_enable_msi(oct->pci_dev);
if (err)
dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
err);
else
oct->flags |= LIO_FLAG_MSI_ENABLED;
/* allocate storage for the names assigned to the irq */
oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
if (!oct->irq_name_storage)
return -ENOMEM;
queue_irq_names = oct->irq_name_storage;
if (OCTEON_CN23XX_PF(oct))
snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
"LiquidIO%u-pf%u-rxtx-%u",
oct->octeon_id, oct->pf_num, 0);
if (OCTEON_CN23XX_VF(oct))
snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
"LiquidIO%u-vf%u-rxtx-%u",
oct->octeon_id, oct->vf_num, 0);
irqret = request_irq(oct->pci_dev->irq,
liquidio_legacy_intr_handler,
IRQF_SHARED,
&queue_irq_names[IRQ_NAME_OFF(0)], oct);
if (irqret) {
if (oct->flags & LIO_FLAG_MSI_ENABLED)
pci_disable_msi(oct->pci_dev);
dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
irqret);
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
return irqret;
}
}
return 0;
}
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "cn23xx_pf_device.h" #include "cn23xx_pf_device.h"
#include "cn23xx_vf_device.h" #include "cn23xx_vf_device.h"
static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs);
static int octnet_get_link_stats(struct net_device *netdev); static int octnet_get_link_stats(struct net_device *netdev);
struct oct_intrmod_context { struct oct_intrmod_context {
...@@ -300,6 +301,35 @@ lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) ...@@ -300,6 +301,35 @@ lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
} }
static int
lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
int ret = 0;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL;
nctrl.ncmd.s.param1 = num_queues;
nctrl.ncmd.s.param2 = num_queues;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n",
ret);
return -1;
}
return 0;
}
static void static void
lio_ethtool_get_channels(struct net_device *dev, lio_ethtool_get_channels(struct net_device *dev,
struct ethtool_channels *channel) struct ethtool_channels *channel)
...@@ -307,6 +337,7 @@ lio_ethtool_get_channels(struct net_device *dev, ...@@ -307,6 +337,7 @@ lio_ethtool_get_channels(struct net_device *dev,
struct lio *lio = GET_LIO(dev); struct lio *lio = GET_LIO(dev);
struct octeon_device *oct = lio->oct_dev; struct octeon_device *oct = lio->oct_dev;
u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0; u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
u32 combined_count = 0, max_combined = 0;
if (OCTEON_CN6XXX(oct)) { if (OCTEON_CN6XXX(oct)) {
struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
...@@ -316,22 +347,137 @@ lio_ethtool_get_channels(struct net_device *dev, ...@@ -316,22 +347,137 @@ lio_ethtool_get_channels(struct net_device *dev,
rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
} else if (OCTEON_CN23XX_PF(oct)) { } else if (OCTEON_CN23XX_PF(oct)) {
max_combined = lio->linfo.num_txpciq;
max_rx = oct->sriov_info.num_pf_rings; combined_count = oct->num_iqs;
max_tx = oct->sriov_info.num_pf_rings;
rx_count = lio->linfo.num_rxpciq;
tx_count = lio->linfo.num_txpciq;
} else if (OCTEON_CN23XX_VF(oct)) { } else if (OCTEON_CN23XX_VF(oct)) {
max_tx = oct->sriov_info.rings_per_vf; u64 reg_val = 0ULL;
max_rx = oct->sriov_info.rings_per_vf; u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
rx_count = lio->linfo.num_rxpciq;
tx_count = lio->linfo.num_txpciq; reg_val = octeon_read_csr64(oct, ctrl);
reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
combined_count = oct->num_iqs;
} }
channel->max_rx = max_rx; channel->max_rx = max_rx;
channel->max_tx = max_tx; channel->max_tx = max_tx;
channel->max_combined = max_combined;
channel->rx_count = rx_count; channel->rx_count = rx_count;
channel->tx_count = tx_count; channel->tx_count = tx_count;
channel->combined_count = combined_count;
}
static int
lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs)
{
struct msix_entry *msix_entries;
int num_msix_irqs = 0;
int i;
if (!oct->msix_on)
return 0;
/* Disable the input and output queues now. No more packets will
* arrive from Octeon.
*/
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
if (oct->msix_on) {
if (OCTEON_CN23XX_PF(oct))
num_msix_irqs = oct->num_msix_irqs - 1;
else if (OCTEON_CN23XX_VF(oct))
num_msix_irqs = oct->num_msix_irqs;
msix_entries = (struct msix_entry *)oct->msix_entries;
for (i = 0; i < num_msix_irqs; i++) {
if (oct->ioq_vector[i].vector) {
/* clear the affinity_cpumask */
irq_set_affinity_hint(msix_entries[i].vector,
NULL);
free_irq(msix_entries[i].vector,
&oct->ioq_vector[i]);
oct->ioq_vector[i].vector = 0;
}
}
/* non-iov vector's argument is oct struct */
if (OCTEON_CN23XX_PF(oct))
free_irq(msix_entries[i].vector, oct);
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
}
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
if (octeon_setup_interrupt(oct, num_ioqs)) {
dev_info(&oct->pci_dev->dev, "Setup interuupt failed\n");
return 1;
}
/* Enable Octeon device interrupts */
oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
return 0;
}
static int
lio_ethtool_set_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
u32 combined_count, max_combined;
struct lio *lio = GET_LIO(dev);
struct octeon_device *oct = lio->oct_dev;
int stopped = 0;
if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) {
dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n");
return -EINVAL;
}
if (!channel->combined_count || channel->other_count ||
channel->rx_count || channel->tx_count)
return -EINVAL;
combined_count = channel->combined_count;
if (OCTEON_CN23XX_PF(oct)) {
max_combined = channel->max_combined;
} else if (OCTEON_CN23XX_VF(oct)) {
u64 reg_val = 0ULL;
u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
reg_val = octeon_read_csr64(oct, ctrl);
reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
} else {
return -EINVAL;
}
if (combined_count > max_combined || combined_count < 1)
return -EINVAL;
if (combined_count == oct->num_iqs)
return 0;
ifstate_set(lio, LIO_IFSTATE_RESETTING);
if (netif_running(dev)) {
dev->netdev_ops->ndo_stop(dev);
stopped = 1;
}
if (lio_reset_queues(dev, combined_count))
return -EINVAL;
lio_irq_reallocate_irqs(oct, combined_count);
if (stopped)
dev->netdev_ops->ndo_open(dev);
ifstate_reset(lio, LIO_IFSTATE_RESETTING);
return 0;
} }
static int lio_get_eeprom_len(struct net_device *netdev) static int lio_get_eeprom_len(struct net_device *netdev)
...@@ -664,15 +810,12 @@ lio_ethtool_get_ringparam(struct net_device *netdev, ...@@ -664,15 +810,12 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
ering->rx_jumbo_max_pending = 0; ering->rx_jumbo_max_pending = 0;
} }
static int lio_reset_queues(struct net_device *netdev) static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
{ {
struct lio *lio = GET_LIO(netdev); struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev; struct octeon_device *oct = lio->oct_dev;
struct napi_struct *napi, *n; struct napi_struct *napi, *n;
int i; int i, update = 0;
dev_dbg(&oct->pci_dev->dev, "%s:%d ifidx %d\n",
__func__, __LINE__, lio->ifidx);
if (wait_for_pending_requests(oct)) if (wait_for_pending_requests(oct))
dev_err(&oct->pci_dev->dev, "There were pending requests\n"); dev_err(&oct->pci_dev->dev, "There were pending requests\n");
...@@ -693,6 +836,12 @@ static int lio_reset_queues(struct net_device *netdev) ...@@ -693,6 +836,12 @@ static int lio_reset_queues(struct net_device *netdev)
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
netif_napi_del(napi); netif_napi_del(napi);
if (num_qs != oct->num_iqs) {
netif_set_real_num_rx_queues(netdev, num_qs);
netif_set_real_num_tx_queues(netdev, num_qs);
update = 1;
}
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
if (!(oct->io_qmask.oq & BIT_ULL(i))) if (!(oct->io_qmask.oq & BIT_ULL(i)))
continue; continue;
...@@ -710,7 +859,7 @@ static int lio_reset_queues(struct net_device *netdev) ...@@ -710,7 +859,7 @@ static int lio_reset_queues(struct net_device *netdev)
return -1; return -1;
} }
if (liquidio_setup_io_queues(oct, 0)) { if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n"); dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n");
return -1; return -1;
} }
...@@ -721,6 +870,9 @@ static int lio_reset_queues(struct net_device *netdev) ...@@ -721,6 +870,9 @@ static int lio_reset_queues(struct net_device *netdev)
return -1; return -1;
} }
if (update && lio_send_queue_count_update(netdev, num_qs))
return -1;
return 0; return 0;
} }
...@@ -764,7 +916,7 @@ static int lio_ethtool_set_ringparam(struct net_device *netdev, ...@@ -764,7 +916,7 @@ static int lio_ethtool_set_ringparam(struct net_device *netdev,
CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
rx_count); rx_count);
if (lio_reset_queues(netdev)) if (lio_reset_queues(netdev, lio->linfo.num_txpciq))
goto err_lio_reset_queues; goto err_lio_reset_queues;
if (stopped) if (stopped)
...@@ -1194,7 +1346,7 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev, ...@@ -1194,7 +1346,7 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev,
/* lio->link_changes */ /* lio->link_changes */
data[i++] = CVM_CAST64(lio->link_changes); data[i++] = CVM_CAST64(lio->link_changes);
for (vj = 0; vj < lio->linfo.num_txpciq; vj++) { for (vj = 0; vj < oct_dev->num_iqs; vj++) {
j = lio->linfo.txpciq[vj].s.q_no; j = lio->linfo.txpciq[vj].s.q_no;
/* packets to network port */ /* packets to network port */
...@@ -1236,7 +1388,7 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev, ...@@ -1236,7 +1388,7 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev,
} }
/* RX */ /* RX */
for (vj = 0; vj < lio->linfo.num_rxpciq; vj++) { for (vj = 0; vj < oct_dev->num_oqs; vj++) {
j = lio->linfo.rxpciq[vj].s.q_no; j = lio->linfo.rxpciq[vj].s.q_no;
/* packets send to TCP/IP network stack */ /* packets send to TCP/IP network stack */
...@@ -2705,6 +2857,7 @@ static const struct ethtool_ops lio_ethtool_ops = { ...@@ -2705,6 +2857,7 @@ static const struct ethtool_ops lio_ethtool_ops = {
.get_ringparam = lio_ethtool_get_ringparam, .get_ringparam = lio_ethtool_get_ringparam,
.set_ringparam = lio_ethtool_set_ringparam, .set_ringparam = lio_ethtool_set_ringparam,
.get_channels = lio_ethtool_get_channels, .get_channels = lio_ethtool_get_channels,
.set_channels = lio_ethtool_set_channels,
.set_phys_id = lio_set_phys_id, .set_phys_id = lio_set_phys_id,
.get_eeprom_len = lio_get_eeprom_len, .get_eeprom_len = lio_get_eeprom_len,
.get_eeprom = lio_get_eeprom, .get_eeprom = lio_get_eeprom,
...@@ -2731,6 +2884,7 @@ static const struct ethtool_ops lio_vf_ethtool_ops = { ...@@ -2731,6 +2884,7 @@ static const struct ethtool_ops lio_vf_ethtool_ops = {
.get_ringparam = lio_ethtool_get_ringparam, .get_ringparam = lio_ethtool_get_ringparam,
.set_ringparam = lio_ethtool_set_ringparam, .set_ringparam = lio_ethtool_set_ringparam,
.get_channels = lio_ethtool_get_channels, .get_channels = lio_ethtool_get_channels,
.set_channels = lio_ethtool_set_channels,
.get_strings = lio_vf_get_strings, .get_strings = lio_vf_get_strings,
.get_ethtool_stats = lio_vf_get_ethtool_stats, .get_ethtool_stats = lio_vf_get_ethtool_stats,
.get_regs_len = lio_get_regs_len, .get_regs_len = lio_get_regs_len,
......
...@@ -107,12 +107,6 @@ struct octnic_gather { ...@@ -107,12 +107,6 @@ struct octnic_gather {
dma_addr_t sg_dma_ptr; dma_addr_t sg_dma_ptr;
}; };
struct octeon_device_priv {
/* Tasklet structures for this device. */
struct tasklet_struct droq_tasklet;
unsigned long napi_mask;
};
static int static int
liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void liquidio_vf_remove(struct pci_dev *pdev); static void liquidio_vf_remove(struct pci_dev *pdev);
...@@ -348,7 +342,7 @@ static void txqs_wake(struct net_device *netdev) ...@@ -348,7 +342,7 @@ static void txqs_wake(struct net_device *netdev)
int i; int i;
for (i = 0; i < netdev->num_tx_queues; i++) { for (i = 0; i < netdev->num_tx_queues; i++) {
int qno = lio->linfo.txpciq[i % (lio->linfo.num_txpciq)] int qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs]
.s.q_no; .s.q_no;
if (__netif_subqueue_stopped(netdev, i)) { if (__netif_subqueue_stopped(netdev, i)) {
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
...@@ -648,143 +642,6 @@ static void update_link_status(struct net_device *netdev, ...@@ -648,143 +642,6 @@ static void update_link_status(struct net_device *netdev,
} }
} }
static
int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
{
struct octeon_device *oct = droq->oct_dev;
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
if (droq->ops.poll_mode) {
droq->ops.napi_fn(droq);
} else {
if (ret & MSIX_PO_INT) {
dev_err(&oct->pci_dev->dev,
"should not come here should not get rx when poll mode = 0 for vf\n");
tasklet_schedule(&oct_priv->droq_tasklet);
return 1;
}
/* this will be flushed periodically by check iq db */
if (ret & MSIX_PI_INT)
return 0;
}
return 0;
}
static irqreturn_t
liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
{
struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
struct octeon_device *oct = ioq_vector->oct_dev;
struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
u64 ret;
ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
liquidio_schedule_msix_droq_pkt_handler(droq, ret);
return IRQ_HANDLED;
}
/**
* \brief Setup interrupt for octeon device
* @param oct octeon device
*
* Enable interrupt in Octeon device as given in the PCI interrupt mask.
*/
static int octeon_setup_interrupt(struct octeon_device *oct)
{
struct msix_entry *msix_entries;
char *queue_irq_names = NULL;
int num_alloc_ioq_vectors;
int num_ioq_vectors;
int irqret;
int i;
if (oct->msix_on) {
oct->num_msix_irqs = oct->sriov_info.rings_per_vf;
/* allocate storage for the names assigned to each irq */
oct->irq_name_storage =
kcalloc(MAX_IOQ_INTERRUPTS_PER_VF, INTRNAMSIZ,
GFP_KERNEL);
if (!oct->irq_name_storage) {
dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
return -ENOMEM;
}
queue_irq_names = oct->irq_name_storage;
oct->msix_entries = kcalloc(
oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
if (!oct->msix_entries) {
dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
return -ENOMEM;
}
msix_entries = (struct msix_entry *)oct->msix_entries;
for (i = 0; i < oct->num_msix_irqs; i++)
msix_entries[i].entry = i;
num_alloc_ioq_vectors = pci_enable_msix_range(
oct->pci_dev, msix_entries,
oct->num_msix_irqs,
oct->num_msix_irqs);
if (num_alloc_ioq_vectors < 0) {
dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
kfree(oct->msix_entries);
oct->msix_entries = NULL;
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
return num_alloc_ioq_vectors;
}
dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
num_ioq_vectors = oct->num_msix_irqs;
for (i = 0; i < num_ioq_vectors; i++) {
snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ,
"LiquidIO%u-vf%u-rxtx-%u",
oct->octeon_id, oct->vf_num, i);
irqret = request_irq(msix_entries[i].vector,
liquidio_msix_intr_handler, 0,
&queue_irq_names[IRQ_NAME_OFF(i)],
&oct->ioq_vector[i]);
if (irqret) {
dev_err(&oct->pci_dev->dev,
"OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
irqret);
while (i) {
i--;
irq_set_affinity_hint(
msix_entries[i].vector, NULL);
free_irq(msix_entries[i].vector,
&oct->ioq_vector[i]);
}
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
return irqret;
}
oct->ioq_vector[i].vector = msix_entries[i].vector;
/* assign the cpu mask for this msix interrupt vector */
irq_set_affinity_hint(
msix_entries[i].vector,
(&oct->ioq_vector[i].affinity_mask));
}
dev_dbg(&oct->pci_dev->dev,
"OCTEON[%d]: MSI-X enabled\n", oct->octeon_id);
}
return 0;
}
/** /**
* \brief PCI probe handler * \brief PCI probe handler
* @param pdev PCI device structure * @param pdev PCI device structure
...@@ -893,10 +750,14 @@ static void octeon_destroy_resources(struct octeon_device *oct) ...@@ -893,10 +750,14 @@ static void octeon_destroy_resources(struct octeon_device *oct)
if (oct->msix_on) { if (oct->msix_on) {
msix_entries = (struct msix_entry *)oct->msix_entries; msix_entries = (struct msix_entry *)oct->msix_entries;
for (i = 0; i < oct->num_msix_irqs; i++) { for (i = 0; i < oct->num_msix_irqs; i++) {
irq_set_affinity_hint(msix_entries[i].vector, if (oct->ioq_vector[i].vector) {
NULL); irq_set_affinity_hint(
free_irq(msix_entries[i].vector, msix_entries[i].vector,
&oct->ioq_vector[i]); NULL);
free_irq(msix_entries[i].vector,
&oct->ioq_vector[i]);
oct->ioq_vector[i].vector = 0;
}
} }
pci_disable_msix(oct->pci_dev); pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries); kfree(oct->msix_entries);
...@@ -1129,7 +990,7 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) ...@@ -1129,7 +990,7 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
for (i = 0; i < oct->ifcount; i++) { for (i = 0; i < oct->ifcount; i++) {
lio = GET_LIO(oct->props[i].netdev); lio = GET_LIO(oct->props[i].netdev);
for (j = 0; j < lio->linfo.num_rxpciq; j++) for (j = 0; j < oct->num_oqs; j++)
octeon_unregister_droq_ops(oct, octeon_unregister_droq_ops(oct,
lio->linfo.rxpciq[j].s.q_no); lio->linfo.rxpciq[j].s.q_no);
} }
...@@ -1217,7 +1078,7 @@ static int check_txq_state(struct lio *lio, struct sk_buff *skb) ...@@ -1217,7 +1078,7 @@ static int check_txq_state(struct lio *lio, struct sk_buff *skb)
if (netif_is_multiqueue(lio->netdev)) { if (netif_is_multiqueue(lio->netdev)) {
q = skb->queue_mapping; q = skb->queue_mapping;
iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no; iq = lio->linfo.txpciq[q % lio->oct_dev->num_iqs].s.q_no;
} else { } else {
iq = lio->txq; iq = lio->txq;
q = iq; q = iq;
...@@ -1637,7 +1498,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) ...@@ -1637,7 +1498,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
return stats; return stats;
for (i = 0; i < lio->linfo.num_txpciq; i++) { for (i = 0; i < oct->num_iqs; i++) {
iq_no = lio->linfo.txpciq[i].s.q_no; iq_no = lio->linfo.txpciq[i].s.q_no;
iq_stats = &oct->instr_queue[iq_no]->stats; iq_stats = &oct->instr_queue[iq_no]->stats;
pkts += iq_stats->tx_done; pkts += iq_stats->tx_done;
...@@ -1653,7 +1514,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) ...@@ -1653,7 +1514,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
drop = 0; drop = 0;
bytes = 0; bytes = 0;
for (i = 0; i < lio->linfo.num_rxpciq; i++) { for (i = 0; i < oct->num_oqs; i++) {
oq_no = lio->linfo.rxpciq[i].s.q_no; oq_no = lio->linfo.rxpciq[i].s.q_no;
oq_stats = &oct->droq[oq_no]->stats; oq_stats = &oct->droq[oq_no]->stats;
pkts += oq_stats->rx_pkts_received; pkts += oq_stats->rx_pkts_received;
...@@ -2608,7 +2469,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) ...@@ -2608,7 +2469,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
/* Copy MAC Address to OS network device structure */ /* Copy MAC Address to OS network device structure */
ether_addr_copy(netdev->dev_addr, mac); ether_addr_copy(netdev->dev_addr, mac);
if (liquidio_setup_io_queues(octeon_dev, i)) { if (liquidio_setup_io_queues(octeon_dev, i,
lio->linfo.num_txpciq,
lio->linfo.num_rxpciq)) {
dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
goto setup_nic_dev_fail; goto setup_nic_dev_fail;
} }
...@@ -2831,7 +2694,7 @@ static int octeon_device_init(struct octeon_device *oct) ...@@ -2831,7 +2694,7 @@ static int octeon_device_init(struct octeon_device *oct)
LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf); LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf);
/* Setup the interrupt handler and record the INT SUM register address*/ /* Setup the interrupt handler and record the INT SUM register address*/
if (octeon_setup_interrupt(oct)) if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
return 1; return 1;
atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE); atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
......
...@@ -226,6 +226,9 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, ...@@ -226,6 +226,9 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_SET_UC_LIST 0x1b #define OCTNET_CMD_SET_UC_LIST 0x1b
#define OCTNET_CMD_SET_VF_LINKSTATE 0x1c #define OCTNET_CMD_SET_VF_LINKSTATE 0x1c
#define OCTNET_CMD_QUEUE_COUNT_CTL 0x1f
#define OCTNET_CMD_VXLAN_PORT_ADD 0x0 #define OCTNET_CMD_VXLAN_PORT_ADD 0x0
#define OCTNET_CMD_VXLAN_PORT_DEL 0x1 #define OCTNET_CMD_VXLAN_PORT_DEL 0x1
#define OCTNET_CMD_RXCSUM_ENABLE 0x0 #define OCTNET_CMD_RXCSUM_ENABLE 0x0
......
...@@ -35,6 +35,12 @@ ...@@ -35,6 +35,12 @@
#define DRV_NAME "LiquidIO" #define DRV_NAME "LiquidIO"
struct octeon_device_priv {
/** Tasklet structures for this device. */
struct tasklet_struct droq_tasklet;
unsigned long napi_mask;
};
/** This structure is used by NIC driver to store information required /** This structure is used by NIC driver to store information required
* to free the sk_buff when the packet has been fetched by Octeon. * to free the sk_buff when the packet has been fetched by Octeon.
* Bytes offset below assume worst-case of a 64-bit system. * Bytes offset below assume worst-case of a 64-bit system.
......
...@@ -167,7 +167,13 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev); ...@@ -167,7 +167,13 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev);
*/ */
void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr); void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx); int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
u32 num_iqs, u32 num_oqs);
irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)),
void *dev);
int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs);
/** /**
* \brief Register ethtool operations * \brief Register ethtool operations
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment