Commit 79cdf17e authored by David S. Miller's avatar David S. Miller

Merge branch 'ionic-on-chip-desc'

Shannon Nelson says:

====================
ionic: on-chip descriptors

We start with a couple of house-keeping patches that were originally
presented for 'net', then we add support for on-chip descriptor rings
for tx-push, as well as adding support for rx-push.

I have a patch for the ethtool userland utility that I can send out
once this has been accepted.

v4: added rx-push attributes to ethtool netlink
    converted CMB feature from using a priv-flag to using ethtool tx/rx-push

v3: edited commit message to describe interface-down limitation
    added warn msg if cmb_inuse alloc fails
    removed unnecessary clearing of phy_cmb_pages and cmb_npages
    changed cmb_rings_toggle to use cmb_inuse
    removed unrelated pci_set_drvdata()
    removed unnecessary (u32) cast
    added static inline func for writing CMB descriptors

v2: dropped the rx buffers patch
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8024edf3 40bc471d
......@@ -874,6 +874,7 @@ Kernel response contents:
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
``ETHTOOL_A_RINGS_RX_PUSH`` u8 flag of RX Push mode
==================================== ====== ===========================
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with
......@@ -883,8 +884,8 @@ separate buffers. The device configuration must make it possible to receive
full memory pages of data, for example because MTU is high enough or through
HW-GRO.
``ETHTOOL_A_RINGS_TX_PUSH`` flag is used to enable descriptor fast
path to send packets. In ordinary path, driver fills descriptors in DRAM and
``ETHTOOL_A_RINGS_[RX|TX]_PUSH`` flag is used to enable descriptor fast
path to send or receive packets. In ordinary path, driver fills descriptors in DRAM and
notifies NIC hardware. In fast path, driver pushes descriptors to the device
through MMIO writes, thus reducing the latency. However, enabling this feature
may increase the CPU cost. Drivers may enforce additional per-packet
......@@ -906,6 +907,7 @@ Request contents:
``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
``ETHTOOL_A_RINGS_RX_PUSH`` u8 flag of RX Push mode
==================================== ====== ===========================
Kernel checks that requested ring sizes do not exceed limits reported by
......
......@@ -121,7 +121,7 @@ static void ionic_vf_dealloc_locked(struct ionic *ionic)
if (v->stats_pa) {
vfc.stats_pa = 0;
(void)ionic_set_vf_config(ionic, i, &vfc);
ionic_set_vf_config(ionic, i, &vfc);
dma_unmap_single(ionic->dev, v->stats_pa,
sizeof(v->stats), DMA_FROM_DEVICE);
v->stats_pa = 0;
......@@ -169,7 +169,7 @@ static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
/* ignore failures from older FW, we just won't get stats */
vfc.stats_pa = cpu_to_le64(v->stats_pa);
(void)ionic_set_vf_config(ionic, i, &vfc);
ionic_set_vf_config(ionic, i, &vfc);
}
out:
......@@ -352,6 +352,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_reset:
ionic_reset(ionic);
err_out_teardown:
ionic_dev_teardown(ionic);
pci_clear_master(pdev);
/* Don't fail the probe for these errors, keep
* the hw interface around for inspection
......@@ -390,6 +391,7 @@ static void ionic_remove(struct pci_dev *pdev)
ionic_port_reset(ionic);
ionic_reset(ionic);
ionic_dev_teardown(ionic);
pci_clear_master(pdev);
ionic_unmap_bars(ionic);
pci_release_regions(pdev);
......
......@@ -92,6 +92,7 @@ int ionic_dev_setup(struct ionic *ionic)
unsigned int num_bars = ionic->num_bars;
struct ionic_dev *idev = &ionic->idev;
struct device *dev = ionic->dev;
int size;
u32 sig;
/* BAR0: dev_cmd and interrupts */
......@@ -133,9 +134,36 @@ int ionic_dev_setup(struct ionic *ionic)
idev->db_pages = bar->vaddr;
idev->phy_db_pages = bar->bus_addr;
/* BAR2: optional controller memory mapping */
bar++;
mutex_init(&idev->cmb_inuse_lock);
if (num_bars < 3 || !ionic->bars[IONIC_PCI_BAR_CMB].len) {
idev->cmb_inuse = NULL;
return 0;
}
idev->phy_cmb_pages = bar->bus_addr;
idev->cmb_npages = bar->len / PAGE_SIZE;
size = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
idev->cmb_inuse = kzalloc(size, GFP_KERNEL);
if (!idev->cmb_inuse)
dev_warn(dev, "No memory for CMB, disabling\n");
return 0;
}
void ionic_dev_teardown(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
kfree(idev->cmb_inuse);
idev->cmb_inuse = NULL;
idev->phy_cmb_pages = 0;
idev->cmb_npages = 0;
mutex_destroy(&idev->cmb_inuse_lock);
}
/* Devcmd Interface */
bool ionic_is_fw_running(struct ionic_dev *idev)
{
......@@ -571,6 +599,33 @@ int ionic_db_page_num(struct ionic_lif *lif, int pid)
return (lif->hw_index * lif->dbid_count) + pid;
}
int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order)
{
struct ionic_dev *idev = &lif->ionic->idev;
int ret;
mutex_lock(&idev->cmb_inuse_lock);
ret = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
mutex_unlock(&idev->cmb_inuse_lock);
if (ret < 0)
return ret;
*pgid = ret;
*pgaddr = idev->phy_cmb_pages + ret * PAGE_SIZE;
return 0;
}
void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
{
struct ionic_dev *idev = &lif->ionic->idev;
mutex_lock(&idev->cmb_inuse_lock);
bitmap_release_region(idev->cmb_inuse, pgid, order);
mutex_unlock(&idev->cmb_inuse_lock);
}
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
unsigned int num_descs, size_t desc_size)
......@@ -679,6 +734,18 @@ void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
cur->desc = base + (i * q->desc_size);
}
void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa)
{
struct ionic_desc_info *cur;
unsigned int i;
q->cmb_base = base;
q->cmb_base_pa = base_pa;
for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
cur->cmb_desc = base + (i * q->desc_size);
}
void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
{
struct ionic_desc_info *cur;
......
......@@ -159,6 +159,11 @@ struct ionic_dev {
struct ionic_intr __iomem *intr_ctrl;
u64 __iomem *intr_status;
struct mutex cmb_inuse_lock; /* for cmb_inuse */
unsigned long *cmb_inuse;
dma_addr_t phy_cmb_pages;
u32 cmb_npages;
u32 port_info_sz;
struct ionic_port_info *port_info;
dma_addr_t port_info_pa;
......@@ -203,6 +208,7 @@ struct ionic_desc_info {
struct ionic_rxq_desc *rxq_desc;
struct ionic_admin_cmd *adminq_desc;
};
void __iomem *cmb_desc;
union {
void *sg_desc;
struct ionic_txq_sg_desc *txq_sg_desc;
......@@ -241,12 +247,14 @@ struct ionic_queue {
struct ionic_rxq_desc *rxq;
struct ionic_admin_cmd *adminq;
};
void __iomem *cmb_base;
union {
void *sg_base;
struct ionic_txq_sg_desc *txq_sgl;
struct ionic_rxq_sg_desc *rxq_sgl;
};
dma_addr_t base_pa;
dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa;
unsigned int desc_size;
unsigned int sg_desc_size;
......@@ -309,6 +317,7 @@ static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want)
void ionic_init_devinfo(struct ionic *ionic);
int ionic_dev_setup(struct ionic *ionic);
void ionic_dev_teardown(struct ionic *ionic);
void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd);
u8 ionic_dev_cmd_status(struct ionic_dev *idev);
......@@ -344,6 +353,9 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
int ionic_db_page_num(struct ionic_lif *lif, int pid);
int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order);
void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order);
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
unsigned int num_descs, size_t desc_size);
......@@ -360,6 +372,7 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
unsigned int num_descs, size_t desc_size,
size_t sg_desc_size, unsigned int pid);
void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa);
void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void *cb_arg);
......
......@@ -511,6 +511,87 @@ static int ionic_set_coalesce(struct net_device *netdev,
return 0;
}
static int ionic_validate_cmb_config(struct ionic_lif *lif,
struct ionic_queue_params *qparam)
{
int pages_have, pages_required = 0;
unsigned long sz;
if (!lif->ionic->idev.cmb_inuse &&
(qparam->cmb_tx || qparam->cmb_rx)) {
netdev_info(lif->netdev, "CMB rings are not supported on this device\n");
return -EOPNOTSUPP;
}
if (qparam->cmb_tx) {
if (!(lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_CMB)) {
netdev_info(lif->netdev,
"CMB rings for tx-push are not supported on this device\n");
return -EOPNOTSUPP;
}
sz = sizeof(struct ionic_txq_desc) * qparam->ntxq_descs * qparam->nxqs;
pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
}
if (qparam->cmb_rx) {
if (!(lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_CMB)) {
netdev_info(lif->netdev,
"CMB rings for rx-push are not supported on this device\n");
return -EOPNOTSUPP;
}
sz = sizeof(struct ionic_rxq_desc) * qparam->nrxq_descs * qparam->nxqs;
pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
}
pages_have = lif->ionic->bars[IONIC_PCI_BAR_CMB].len / PAGE_SIZE;
if (pages_required > pages_have) {
netdev_info(lif->netdev,
"Not enough CMB pages for number of queues and size of descriptor rings, need %d have %d",
pages_required, pages_have);
return -ENOMEM;
}
return pages_required;
}
static int ionic_cmb_rings_toggle(struct ionic_lif *lif, bool cmb_tx, bool cmb_rx)
{
struct ionic_queue_params qparam;
int pages_used;
if (netif_running(lif->netdev)) {
netdev_info(lif->netdev, "Please stop device to toggle CMB for tx/rx-push\n");
return -EBUSY;
}
ionic_init_queue_params(lif, &qparam);
qparam.cmb_tx = cmb_tx;
qparam.cmb_rx = cmb_rx;
pages_used = ionic_validate_cmb_config(lif, &qparam);
if (pages_used < 0)
return pages_used;
if (cmb_tx)
set_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
else
clear_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
if (cmb_rx)
set_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
else
clear_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
if (cmb_tx || cmb_rx)
netdev_info(lif->netdev, "Enabling CMB %s %s rings - %d pages\n",
cmb_tx ? "TX" : "", cmb_rx ? "RX" : "", pages_used);
else
netdev_info(lif->netdev, "Disabling CMB rings\n");
return 0;
}
static void ionic_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
......@@ -522,6 +603,8 @@ static void ionic_get_ringparam(struct net_device *netdev,
ring->tx_pending = lif->ntxq_descs;
ring->rx_max_pending = IONIC_MAX_RX_DESC;
ring->rx_pending = lif->nrxq_descs;
kernel_ring->tx_push = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
kernel_ring->rx_push = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
}
static int ionic_set_ringparam(struct net_device *netdev,
......@@ -551,9 +634,28 @@ static int ionic_set_ringparam(struct net_device *netdev,
/* if nothing to do return success */
if (ring->tx_pending == lif->ntxq_descs &&
ring->rx_pending == lif->nrxq_descs)
ring->rx_pending == lif->nrxq_descs &&
kernel_ring->tx_push == test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) &&
kernel_ring->rx_push == test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
return 0;
qparam.ntxq_descs = ring->tx_pending;
qparam.nrxq_descs = ring->rx_pending;
qparam.cmb_tx = kernel_ring->tx_push;
qparam.cmb_rx = kernel_ring->rx_push;
err = ionic_validate_cmb_config(lif, &qparam);
if (err < 0)
return err;
if (kernel_ring->tx_push != test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) ||
kernel_ring->rx_push != test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state)) {
err = ionic_cmb_rings_toggle(lif, kernel_ring->tx_push,
kernel_ring->rx_push);
if (err < 0)
return err;
}
if (ring->tx_pending != lif->ntxq_descs)
netdev_info(netdev, "Changing Tx ring size from %d to %d\n",
lif->ntxq_descs, ring->tx_pending);
......@@ -569,9 +671,6 @@ static int ionic_set_ringparam(struct net_device *netdev,
return 0;
}
qparam.ntxq_descs = ring->tx_pending;
qparam.nrxq_descs = ring->rx_pending;
mutex_lock(&lif->queue_lock);
err = ionic_reconfigure_queues(lif, &qparam);
mutex_unlock(&lif->queue_lock);
......@@ -638,7 +737,7 @@ static int ionic_set_channels(struct net_device *netdev,
lif->nxqs, ch->combined_count);
qparam.nxqs = ch->combined_count;
qparam.intr_split = 0;
qparam.intr_split = false;
} else {
max_cnt /= 2;
if (ch->rx_count > max_cnt)
......@@ -654,9 +753,13 @@ static int ionic_set_channels(struct net_device *netdev,
lif->nxqs, ch->rx_count);
qparam.nxqs = ch->rx_count;
qparam.intr_split = 1;
qparam.intr_split = true;
}
err = ionic_validate_cmb_config(lif, &qparam);
if (err < 0)
return err;
/* if we're not running, just set the values and return */
if (!netif_running(lif->netdev)) {
lif->nxqs = qparam.nxqs;
......@@ -965,6 +1068,8 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
ETHTOOL_COALESCE_USE_ADAPTIVE_TX,
.supported_ring_params = ETHTOOL_RING_USE_TX_PUSH |
ETHTOOL_RING_USE_RX_PUSH,
.get_drvinfo = ionic_get_drvinfo,
.get_regs_len = ionic_get_regs_len,
.get_regs = ionic_get_regs,
......
......@@ -3073,9 +3073,10 @@ union ionic_adminq_comp {
#define IONIC_BARS_MAX 6
#define IONIC_PCI_BAR_DBELL 1
#define IONIC_PCI_BAR_CMB 2
/* BAR0 */
#define IONIC_BAR0_SIZE 0x8000
#define IONIC_BAR2_SIZE 0x800000
#define IONIC_BAR0_DEV_INFO_REGS_OFFSET 0x0000
#define IONIC_BAR0_DEV_CMD_REGS_OFFSET 0x0800
......
......@@ -26,9 +26,12 @@
static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
[IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
[IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
[IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */
[IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support
[IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support
* 2 = ... with CMB rings
*/
[IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support
* 1 = ... with Tx SG version 1
* 3 = ... with CMB rings
*/
};
......@@ -149,7 +152,7 @@ static void ionic_link_status_check(struct ionic_lif *lif)
mutex_lock(&lif->queue_lock);
err = ionic_start_queues(lif);
if (err && err != -EBUSY) {
netdev_err(lif->netdev,
netdev_err(netdev,
"Failed to start queues: %d\n", err);
set_bit(IONIC_LIF_F_BROKEN, lif->state);
netif_carrier_off(lif->netdev);
......@@ -397,6 +400,15 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->q_base_pa = 0;
}
if (qcq->cmb_q_base) {
iounmap(qcq->cmb_q_base);
ionic_put_cmb(lif, qcq->cmb_pgid, qcq->cmb_order);
qcq->cmb_pgid = 0;
qcq->cmb_order = 0;
qcq->cmb_q_base = NULL;
qcq->cmb_q_base_pa = 0;
}
if (qcq->cq_base) {
dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
qcq->cq_base = NULL;
......@@ -608,6 +620,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
ionic_cq_map(&new->cq, cq_base, cq_base_pa);
ionic_cq_bind(&new->cq, &new->q);
} else {
/* regular DMA q descriptors */
new->q_size = PAGE_SIZE + (num_descs * desc_size);
new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
GFP_KERNEL);
......@@ -620,6 +633,33 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
ionic_q_map(&new->q, q_base, q_base_pa);
if (flags & IONIC_QCQ_F_CMB_RINGS) {
/* on-chip CMB q descriptors */
new->cmb_q_size = num_descs * desc_size;
new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE);
err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa,
new->cmb_order);
if (err) {
netdev_err(lif->netdev,
"Cannot allocate queue order %d from cmb: err %d\n",
new->cmb_order, err);
goto err_out_free_q;
}
new->cmb_q_base = ioremap_wc(new->cmb_q_base_pa, new->cmb_q_size);
if (!new->cmb_q_base) {
netdev_err(lif->netdev, "Cannot map queue from cmb\n");
ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
err = -ENOMEM;
goto err_out_free_q;
}
new->cmb_q_base_pa -= idev->phy_cmb_pages;
ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa);
}
/* cq DMA descriptors */
new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
GFP_KERNEL);
......@@ -658,6 +698,10 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err_out_free_cq:
dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
err_out_free_q:
if (new->cmb_q_base) {
iounmap(new->cmb_q_base);
ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
}
dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
err_out_free_cq_info:
vfree(new->cq.info);
......@@ -739,6 +783,8 @@ static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
qcq->cq.tail_idx = 0;
qcq->cq.done_color = 1;
memset(qcq->q_base, 0, qcq->q_size);
if (qcq->cmb_q_base)
memset_io(qcq->cmb_q_base, 0, qcq->cmb_q_size);
memset(qcq->cq_base, 0, qcq->cq_size);
memset(qcq->sg_base, 0, qcq->sg_size);
}
......@@ -758,6 +804,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_SG),
.intr_index = cpu_to_le16(qcq->intr.index),
.pid = cpu_to_le16(q->pid),
.ring_size = ilog2(q->num_descs),
.ring_base = cpu_to_le64(q->base_pa),
......@@ -766,17 +813,19 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.features = cpu_to_le64(q->features),
},
};
unsigned int intr_index;
int err;
intr_index = qcq->intr.index;
ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
}
dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
dev_dbg(dev, "txq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base);
dev_dbg(dev, "txq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base);
dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
......@@ -834,6 +883,11 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
};
int err;
if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
}
dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
......@@ -2010,8 +2064,13 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
if (test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state))
flags |= IONIC_QCQ_F_CMB_RINGS;
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
flags |= IONIC_QCQ_F_INTR;
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
......@@ -2032,6 +2091,9 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
if (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
flags |= IONIC_QCQ_F_CMB_RINGS;
num_desc = lif->nrxq_descs;
desc_sz = sizeof(struct ionic_rxq_desc);
comp_sz = sizeof(struct ionic_rxq_comp);
......@@ -2507,7 +2569,7 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
}
up_write(&ionic->vf_op_lock);
......@@ -2707,6 +2769,55 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_get_vf_stats = ionic_get_vf_stats,
};
static int ionic_cmb_reconfig(struct ionic_lif *lif,
struct ionic_queue_params *qparam)
{
struct ionic_queue_params start_qparams;
int err = 0;
/* When changing CMB queue parameters, we're using limited
* on-device memory and don't have extra memory to use for
* duplicate allocations, so we free it all first then
* re-allocate with the new parameters.
*/
/* Checkpoint for possible unwind */
ionic_init_queue_params(lif, &start_qparams);
/* Stop and free the queues */
ionic_stop_queues_reconfig(lif);
ionic_txrx_free(lif);
/* Set up new qparams */
ionic_set_queue_params(lif, qparam);
if (netif_running(lif->netdev)) {
/* Alloc and start the new configuration */
err = ionic_txrx_alloc(lif);
if (err) {
dev_warn(lif->ionic->dev,
"CMB reconfig failed, restoring values: %d\n", err);
/* Back out the changes */
ionic_set_queue_params(lif, &start_qparams);
err = ionic_txrx_alloc(lif);
if (err) {
dev_err(lif->ionic->dev,
"CMB restore failed: %d\n", err);
goto errout;
}
}
ionic_start_queues_reconfig(lif);
} else {
/* This was detached in ionic_stop_queues_reconfig() */
netif_device_attach(lif->netdev);
}
errout:
return err;
}
static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
{
/* only swapping the queues, not the napi, flags, or other stuff */
......@@ -2749,6 +2860,11 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
unsigned int flags, i;
int err = 0;
/* Are we changing q params while CMB is on */
if ((test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) && qparam->cmb_tx) ||
(test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state) && qparam->cmb_rx))
return ionic_cmb_reconfig(lif, qparam);
/* allocate temporary qcq arrays to hold new queue structs */
if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
......@@ -2785,6 +2901,16 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
for (i = 0; i < qparam->nxqs; i++) {
/* If missing, short placeholder qcq needed for swap */
if (!lif->txqcqs[i]) {
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
lif->kern_pid, &lif->txqcqs[i]);
if (err)
goto err_out;
}
flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
......@@ -2804,6 +2930,16 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
comp_sz *= 2;
for (i = 0; i < qparam->nxqs; i++) {
/* If missing, short placeholder qcq needed for swap */
if (!lif->rxqcqs[i]) {
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
lif->kern_pid, &lif->rxqcqs[i]);
if (err)
goto err_out;
}
flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
......@@ -2853,9 +2989,14 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
lif->tx_coalesce_hw = lif->rx_coalesce_hw;
}
/* clear existing interrupt assignments */
/* Clear existing interrupt assignments. We check for NULL here
* because we're checking the whole array for potential qcqs, not
* just those qcqs that have just been set up.
*/
for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
if (lif->txqcqs[i])
ionic_qcq_intr_free(lif, lif->txqcqs[i]);
if (lif->rxqcqs[i])
ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
}
......
......@@ -59,6 +59,7 @@ struct ionic_rx_stats {
#define IONIC_QCQ_F_TX_STATS BIT(3)
#define IONIC_QCQ_F_RX_STATS BIT(4)
#define IONIC_QCQ_F_NOTIFYQ BIT(5)
#define IONIC_QCQ_F_CMB_RINGS BIT(6)
struct ionic_qcq {
void *q_base;
......@@ -70,6 +71,11 @@ struct ionic_qcq {
void *sg_base;
dma_addr_t sg_base_pa;
u32 sg_size;
void __iomem *cmb_q_base;
phys_addr_t cmb_q_base_pa;
u32 cmb_q_size;
u32 cmb_pgid;
u32 cmb_order;
struct dim dim;
struct ionic_queue q;
struct ionic_cq cq;
......@@ -142,6 +148,8 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_BROKEN,
IONIC_LIF_F_TX_DIM_INTR,
IONIC_LIF_F_RX_DIM_INTR,
IONIC_LIF_F_CMB_TX_RINGS,
IONIC_LIF_F_CMB_RX_RINGS,
/* leave this as last */
IONIC_LIF_F_STATE_SIZE
......@@ -245,8 +253,10 @@ struct ionic_queue_params {
unsigned int nxqs;
unsigned int ntxq_descs;
unsigned int nrxq_descs;
unsigned int intr_split;
u64 rxq_features;
bool intr_split;
bool cmb_tx;
bool cmb_rx;
};
static inline void ionic_init_queue_params(struct ionic_lif *lif,
......@@ -255,8 +265,34 @@ static inline void ionic_init_queue_params(struct ionic_lif *lif,
qparam->nxqs = lif->nxqs;
qparam->ntxq_descs = lif->ntxq_descs;
qparam->nrxq_descs = lif->nrxq_descs;
qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
qparam->rxq_features = lif->rxq_features;
qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
qparam->cmb_tx = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
qparam->cmb_rx = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
}
static inline void ionic_set_queue_params(struct ionic_lif *lif,
struct ionic_queue_params *qparam)
{
lif->nxqs = qparam->nxqs;
lif->ntxq_descs = qparam->ntxq_descs;
lif->nrxq_descs = qparam->nrxq_descs;
lif->rxq_features = qparam->rxq_features;
if (qparam->intr_split)
set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
else
clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
if (qparam->cmb_tx)
set_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
else
clear_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
if (qparam->cmb_rx)
set_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
else
clear_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
}
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
......
......@@ -388,7 +388,7 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
break;
/* force a check of FW status and break out if FW reset */
(void)ionic_heartbeat_check(lif->ionic);
ionic_heartbeat_check(lif->ionic);
if ((test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
!lif->ionic->idev.fw_status_ready) ||
test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
......@@ -676,7 +676,7 @@ int ionic_port_init(struct ionic *ionic)
err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_UP);
(void)ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
mutex_unlock(&ionic->dev_cmd_lock);
if (err) {
......
......@@ -268,7 +268,7 @@ static u64 ionic_hwstamp_read(struct ionic *ionic,
u32 tick_high_before, tick_high, tick_low;
/* read and discard low part to defeat hw staging of high part */
(void)ioread32(&ionic->idev.hwstamp_regs->tick_low);
ioread32(&ionic->idev.hwstamp_regs->tick_low);
tick_high_before = ioread32(&ionic->idev.hwstamp_regs->tick_high);
......
......@@ -604,14 +604,14 @@ void ionic_rx_filter_sync(struct ionic_lif *lif)
* they can clear room for some new filters
*/
list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) {
(void)ionic_lif_filter_del(lif, &sync_item->f.cmd);
ionic_lif_filter_del(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
}
list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
(void)ionic_lif_filter_add(lif, &sync_item->f.cmd);
ionic_lif_filter_add(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
......
......@@ -402,6 +402,14 @@ bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return true;
}
static inline void ionic_write_cmb_desc(struct ionic_queue *q,
void __iomem *cmb_desc,
void *desc)
{
if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS)
memcpy_toio(cmb_desc, desc, q->desc_size);
}
void ionic_rx_fill(struct ionic_queue *q)
{
struct net_device *netdev = q->lif->netdev;
......@@ -480,6 +488,8 @@ void ionic_rx_fill(struct ionic_queue *q)
IONIC_RXQ_DESC_OPCODE_SIMPLE;
desc_info->nbufs = nfrags;
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
ionic_rxq_post(q, false, ionic_rx_clean, NULL);
}
......@@ -943,7 +953,8 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
return 0;
}
static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
static void ionic_tx_tso_post(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct sk_buff *skb,
dma_addr_t addr, u8 nsge, u16 len,
unsigned int hdrlen, unsigned int mss,
......@@ -951,6 +962,7 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
u16 vlan_tci, bool has_vlan,
bool start, bool done)
{
struct ionic_txq_desc *desc = desc_info->desc;
u8 flags = 0;
u64 cmd;
......@@ -966,6 +978,8 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
desc->hdr_len = cpu_to_le16(hdrlen);
desc->mss = cpu_to_le16(mss);
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
if (start) {
skb_tx_timestamp(skb);
if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
......@@ -1084,7 +1098,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
seg_rem = min(tso_rem, mss);
done = (tso_rem == 0);
/* post descriptor */
ionic_tx_tso_post(q, desc, skb,
ionic_tx_tso_post(q, desc_info, skb,
desc_addr, desc_nsge, desc_len,
hdrlen, mss, outer_csum, vlan_tci, has_vlan,
start, done);
......@@ -1133,6 +1147,8 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
desc->csum_offset = cpu_to_le16(skb->csum_offset);
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
if (skb_csum_is_sctp(skb))
stats->crc32_csum++;
else
......@@ -1170,6 +1186,8 @@ static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = 0;
desc->csum_offset = 0;
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
stats->csum_none++;
}
......
......@@ -73,12 +73,14 @@ enum {
* @rx_buf_len: Current length of buffers on the rx ring.
* @tcp_data_split: Scatter packet headers and data to separate buffers
* @tx_push: The flag of tx push mode
* @rx_push: The flag of rx push mode
* @cqe_size: Size of TX/RX completion queue event
*/
struct kernel_ethtool_ringparam {
u32 rx_buf_len;
u8 tcp_data_split;
u8 tx_push;
u8 rx_push;
u32 cqe_size;
};
......@@ -87,11 +89,13 @@ struct kernel_ethtool_ringparam {
* @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
* @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
* @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
* @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push
*/
enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
ETHTOOL_RING_USE_TX_PUSH = BIT(2),
ETHTOOL_RING_USE_RX_PUSH = BIT(3),
};
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
......
......@@ -356,6 +356,7 @@ enum {
ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */
ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */
ETHTOOL_A_RINGS_TX_PUSH, /* u8 */
ETHTOOL_A_RINGS_RX_PUSH, /* u8 */
/* add new constants above here */
__ETHTOOL_A_RINGS_CNT,
......
......@@ -413,7 +413,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT
extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1];
extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1];
extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1];
extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX_PUSH + 1];
extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_RX_PUSH + 1];
extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1];
extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1];
extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1];
......
......@@ -56,7 +56,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base,
nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */
nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */
nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */
nla_total_size(sizeof(u8))); /* _RINGS_TX_PUSH */
nla_total_size(sizeof(u8)) + /* _RINGS_TX_PUSH */
nla_total_size(sizeof(u8))); /* _RINGS_RX_PUSH */
}
static int rings_fill_reply(struct sk_buff *skb,
......@@ -96,7 +97,8 @@ static int rings_fill_reply(struct sk_buff *skb,
kr->tcp_data_split))) ||
(kr->cqe_size &&
(nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) ||
nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push))
nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push) ||
nla_put_u8(skb, ETHTOOL_A_RINGS_RX_PUSH, !!kr->rx_push))
return -EMSGSIZE;
return 0;
......@@ -114,6 +116,7 @@ const struct nla_policy ethnl_rings_set_policy[] = {
[ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1),
[ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1),
[ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
[ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
};
static int
......@@ -147,6 +150,14 @@ ethnl_set_rings_validate(struct ethnl_req_info *req_info,
return -EOPNOTSUPP;
}
if (tb[ETHTOOL_A_RINGS_RX_PUSH] &&
!(ops->supported_ring_params & ETHTOOL_RING_USE_RX_PUSH)) {
NL_SET_ERR_MSG_ATTR(info->extack,
tb[ETHTOOL_A_RINGS_RX_PUSH],
"setting rx push not supported");
return -EOPNOTSUPP;
}
return ops->get_ringparam && ops->set_ringparam ? 1 : -EOPNOTSUPP;
}
......@@ -176,6 +187,8 @@ ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info)
tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod);
ethnl_update_u8(&kernel_ringparam.tx_push,
tb[ETHTOOL_A_RINGS_TX_PUSH], &mod);
ethnl_update_u8(&kernel_ringparam.rx_push,
tb[ETHTOOL_A_RINGS_RX_PUSH], &mod);
if (!mod)
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment