Commit 79cdf17e authored by David S. Miller's avatar David S. Miller

Merge branch 'ionic-on-chip-desc'

Shannon Nelson says:

====================
ionic: on-chip descriptors

We start with a couple of house-keeping patches that were originally
presented for 'net', then we add support for on-chip descriptor rings
for tx-push, as well as adding support for rx-push.

I have a patch for the ethtool userland utility that I can send out
once this has been accepted.

v4: added rx-push attributes to ethtool netlink
    converted CMB feature from using a priv-flag to using ethtool tx/rx-push

v3: edited commit message to describe interface-down limitation
    added warn msg if cmb_inuse alloc fails
    removed unnecessary clearing of phy_cmb_pages and cmb_npages
    changed cmb_rings_toggle to use cmb_inuse
    removed unrelated pci_set_drvdata()
    removed unnecessary (u32) cast
    added static inline func for writing CMB descriptors

v2: dropped the rx buffers patch
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8024edf3 40bc471d
......@@ -874,6 +874,7 @@ Kernel response contents:
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
``ETHTOOL_A_RINGS_RX_PUSH`` u8 flag of RX Push mode
==================================== ====== ===========================
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with
......@@ -883,8 +884,8 @@ separate buffers. The device configuration must make it possible to receive
full memory pages of data, for example because MTU is high enough or through
HW-GRO.
``ETHTOOL_A_RINGS_TX_PUSH`` flag is used to enable descriptor fast
path to send packets. In ordinary path, driver fills descriptors in DRAM and
``ETHTOOL_A_RINGS_[RX|TX]_PUSH`` flag is used to enable descriptor fast
path to send or receive packets. In ordinary path, driver fills descriptors in DRAM and
notifies NIC hardware. In fast path, driver pushes descriptors to the device
through MMIO writes, thus reducing the latency. However, enabling this feature
may increase the CPU cost. Drivers may enforce additional per-packet
......@@ -906,6 +907,7 @@ Request contents:
``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
``ETHTOOL_A_RINGS_RX_PUSH`` u8 flag of RX Push mode
==================================== ====== ===========================
Kernel checks that requested ring sizes do not exceed limits reported by
......
......@@ -121,7 +121,7 @@ static void ionic_vf_dealloc_locked(struct ionic *ionic)
if (v->stats_pa) {
vfc.stats_pa = 0;
(void)ionic_set_vf_config(ionic, i, &vfc);
ionic_set_vf_config(ionic, i, &vfc);
dma_unmap_single(ionic->dev, v->stats_pa,
sizeof(v->stats), DMA_FROM_DEVICE);
v->stats_pa = 0;
......@@ -169,7 +169,7 @@ static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
/* ignore failures from older FW, we just won't get stats */
vfc.stats_pa = cpu_to_le64(v->stats_pa);
(void)ionic_set_vf_config(ionic, i, &vfc);
ionic_set_vf_config(ionic, i, &vfc);
}
out:
......@@ -352,6 +352,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_reset:
ionic_reset(ionic);
err_out_teardown:
ionic_dev_teardown(ionic);
pci_clear_master(pdev);
/* Don't fail the probe for these errors, keep
* the hw interface around for inspection
......@@ -390,6 +391,7 @@ static void ionic_remove(struct pci_dev *pdev)
ionic_port_reset(ionic);
ionic_reset(ionic);
ionic_dev_teardown(ionic);
pci_clear_master(pdev);
ionic_unmap_bars(ionic);
pci_release_regions(pdev);
......
......@@ -92,6 +92,7 @@ int ionic_dev_setup(struct ionic *ionic)
unsigned int num_bars = ionic->num_bars;
struct ionic_dev *idev = &ionic->idev;
struct device *dev = ionic->dev;
int size;
u32 sig;
/* BAR0: dev_cmd and interrupts */
......@@ -133,9 +134,36 @@ int ionic_dev_setup(struct ionic *ionic)
idev->db_pages = bar->vaddr;
idev->phy_db_pages = bar->bus_addr;
/* BAR2: optional controller memory mapping */
bar++;
mutex_init(&idev->cmb_inuse_lock);
if (num_bars < 3 || !ionic->bars[IONIC_PCI_BAR_CMB].len) {
idev->cmb_inuse = NULL;
return 0;
}
idev->phy_cmb_pages = bar->bus_addr;
idev->cmb_npages = bar->len / PAGE_SIZE;
size = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
idev->cmb_inuse = kzalloc(size, GFP_KERNEL);
if (!idev->cmb_inuse)
dev_warn(dev, "No memory for CMB, disabling\n");
return 0;
}
void ionic_dev_teardown(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
kfree(idev->cmb_inuse);
idev->cmb_inuse = NULL;
idev->phy_cmb_pages = 0;
idev->cmb_npages = 0;
mutex_destroy(&idev->cmb_inuse_lock);
}
/* Devcmd Interface */
bool ionic_is_fw_running(struct ionic_dev *idev)
{
......@@ -571,6 +599,33 @@ int ionic_db_page_num(struct ionic_lif *lif, int pid)
return (lif->hw_index * lif->dbid_count) + pid;
}
int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order)
{
struct ionic_dev *idev = &lif->ionic->idev;
int ret;
mutex_lock(&idev->cmb_inuse_lock);
ret = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
mutex_unlock(&idev->cmb_inuse_lock);
if (ret < 0)
return ret;
*pgid = ret;
*pgaddr = idev->phy_cmb_pages + ret * PAGE_SIZE;
return 0;
}
void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
{
struct ionic_dev *idev = &lif->ionic->idev;
mutex_lock(&idev->cmb_inuse_lock);
bitmap_release_region(idev->cmb_inuse, pgid, order);
mutex_unlock(&idev->cmb_inuse_lock);
}
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
unsigned int num_descs, size_t desc_size)
......@@ -679,6 +734,18 @@ void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
cur->desc = base + (i * q->desc_size);
}
void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa)
{
struct ionic_desc_info *cur;
unsigned int i;
q->cmb_base = base;
q->cmb_base_pa = base_pa;
for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
cur->cmb_desc = base + (i * q->desc_size);
}
void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
{
struct ionic_desc_info *cur;
......
......@@ -159,6 +159,11 @@ struct ionic_dev {
struct ionic_intr __iomem *intr_ctrl;
u64 __iomem *intr_status;
struct mutex cmb_inuse_lock; /* for cmb_inuse */
unsigned long *cmb_inuse;
dma_addr_t phy_cmb_pages;
u32 cmb_npages;
u32 port_info_sz;
struct ionic_port_info *port_info;
dma_addr_t port_info_pa;
......@@ -203,6 +208,7 @@ struct ionic_desc_info {
struct ionic_rxq_desc *rxq_desc;
struct ionic_admin_cmd *adminq_desc;
};
void __iomem *cmb_desc;
union {
void *sg_desc;
struct ionic_txq_sg_desc *txq_sg_desc;
......@@ -241,12 +247,14 @@ struct ionic_queue {
struct ionic_rxq_desc *rxq;
struct ionic_admin_cmd *adminq;
};
void __iomem *cmb_base;
union {
void *sg_base;
struct ionic_txq_sg_desc *txq_sgl;
struct ionic_rxq_sg_desc *rxq_sgl;
};
dma_addr_t base_pa;
dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa;
unsigned int desc_size;
unsigned int sg_desc_size;
......@@ -309,6 +317,7 @@ static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want)
void ionic_init_devinfo(struct ionic *ionic);
int ionic_dev_setup(struct ionic *ionic);
void ionic_dev_teardown(struct ionic *ionic);
void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd);
u8 ionic_dev_cmd_status(struct ionic_dev *idev);
......@@ -344,6 +353,9 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
int ionic_db_page_num(struct ionic_lif *lif, int pid);
int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order);
void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order);
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
unsigned int num_descs, size_t desc_size);
......@@ -360,6 +372,7 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
unsigned int num_descs, size_t desc_size,
size_t sg_desc_size, unsigned int pid);
void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa);
void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void *cb_arg);
......
......@@ -511,6 +511,87 @@ static int ionic_set_coalesce(struct net_device *netdev,
return 0;
}
static int ionic_validate_cmb_config(struct ionic_lif *lif,
struct ionic_queue_params *qparam)
{
int pages_have, pages_required = 0;
unsigned long sz;
if (!lif->ionic->idev.cmb_inuse &&
(qparam->cmb_tx || qparam->cmb_rx)) {
netdev_info(lif->netdev, "CMB rings are not supported on this device\n");
return -EOPNOTSUPP;
}
if (qparam->cmb_tx) {
if (!(lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_CMB)) {
netdev_info(lif->netdev,
"CMB rings for tx-push are not supported on this device\n");
return -EOPNOTSUPP;
}
sz = sizeof(struct ionic_txq_desc) * qparam->ntxq_descs * qparam->nxqs;
pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
}
if (qparam->cmb_rx) {
if (!(lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_CMB)) {
netdev_info(lif->netdev,
"CMB rings for rx-push are not supported on this device\n");
return -EOPNOTSUPP;
}
sz = sizeof(struct ionic_rxq_desc) * qparam->nrxq_descs * qparam->nxqs;
pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
}
pages_have = lif->ionic->bars[IONIC_PCI_BAR_CMB].len / PAGE_SIZE;
if (pages_required > pages_have) {
netdev_info(lif->netdev,
"Not enough CMB pages for number of queues and size of descriptor rings, need %d have %d",
pages_required, pages_have);
return -ENOMEM;
}
return pages_required;
}
static int ionic_cmb_rings_toggle(struct ionic_lif *lif, bool cmb_tx, bool cmb_rx)
{
struct ionic_queue_params qparam;
int pages_used;
if (netif_running(lif->netdev)) {
netdev_info(lif->netdev, "Please stop device to toggle CMB for tx/rx-push\n");
return -EBUSY;
}
ionic_init_queue_params(lif, &qparam);
qparam.cmb_tx = cmb_tx;
qparam.cmb_rx = cmb_rx;
pages_used = ionic_validate_cmb_config(lif, &qparam);
if (pages_used < 0)
return pages_used;
if (cmb_tx)
set_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
else
clear_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
if (cmb_rx)
set_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
else
clear_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
if (cmb_tx || cmb_rx)
netdev_info(lif->netdev, "Enabling CMB %s %s rings - %d pages\n",
cmb_tx ? "TX" : "", cmb_rx ? "RX" : "", pages_used);
else
netdev_info(lif->netdev, "Disabling CMB rings\n");
return 0;
}
static void ionic_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
......@@ -522,6 +603,8 @@ static void ionic_get_ringparam(struct net_device *netdev,
ring->tx_pending = lif->ntxq_descs;
ring->rx_max_pending = IONIC_MAX_RX_DESC;
ring->rx_pending = lif->nrxq_descs;
kernel_ring->tx_push = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
kernel_ring->rx_push = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
}
static int ionic_set_ringparam(struct net_device *netdev,
......@@ -551,9 +634,28 @@ static int ionic_set_ringparam(struct net_device *netdev,
/* if nothing to do return success */
if (ring->tx_pending == lif->ntxq_descs &&
ring->rx_pending == lif->nrxq_descs)
ring->rx_pending == lif->nrxq_descs &&
kernel_ring->tx_push == test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) &&
kernel_ring->rx_push == test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
return 0;
qparam.ntxq_descs = ring->tx_pending;
qparam.nrxq_descs = ring->rx_pending;
qparam.cmb_tx = kernel_ring->tx_push;
qparam.cmb_rx = kernel_ring->rx_push;
err = ionic_validate_cmb_config(lif, &qparam);
if (err < 0)
return err;
if (kernel_ring->tx_push != test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) ||
kernel_ring->rx_push != test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state)) {
err = ionic_cmb_rings_toggle(lif, kernel_ring->tx_push,
kernel_ring->rx_push);
if (err < 0)
return err;
}
if (ring->tx_pending != lif->ntxq_descs)
netdev_info(netdev, "Changing Tx ring size from %d to %d\n",
lif->ntxq_descs, ring->tx_pending);
......@@ -569,9 +671,6 @@ static int ionic_set_ringparam(struct net_device *netdev,
return 0;
}
qparam.ntxq_descs = ring->tx_pending;
qparam.nrxq_descs = ring->rx_pending;
mutex_lock(&lif->queue_lock);
err = ionic_reconfigure_queues(lif, &qparam);
mutex_unlock(&lif->queue_lock);
......@@ -638,7 +737,7 @@ static int ionic_set_channels(struct net_device *netdev,
lif->nxqs, ch->combined_count);
qparam.nxqs = ch->combined_count;
qparam.intr_split = 0;
qparam.intr_split = false;
} else {
max_cnt /= 2;
if (ch->rx_count > max_cnt)
......@@ -654,9 +753,13 @@ static int ionic_set_channels(struct net_device *netdev,
lif->nxqs, ch->rx_count);
qparam.nxqs = ch->rx_count;
qparam.intr_split = 1;
qparam.intr_split = true;
}
err = ionic_validate_cmb_config(lif, &qparam);
if (err < 0)
return err;
/* if we're not running, just set the values and return */
if (!netif_running(lif->netdev)) {
lif->nxqs = qparam.nxqs;
......@@ -965,6 +1068,8 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
ETHTOOL_COALESCE_USE_ADAPTIVE_TX,
.supported_ring_params = ETHTOOL_RING_USE_TX_PUSH |
ETHTOOL_RING_USE_RX_PUSH,
.get_drvinfo = ionic_get_drvinfo,
.get_regs_len = ionic_get_regs_len,
.get_regs = ionic_get_regs,
......
......@@ -3073,9 +3073,10 @@ union ionic_adminq_comp {
#define IONIC_BARS_MAX 6
#define IONIC_PCI_BAR_DBELL 1
#define IONIC_PCI_BAR_CMB 2
/* BAR0 */
#define IONIC_BAR0_SIZE 0x8000
#define IONIC_BAR2_SIZE 0x800000
#define IONIC_BAR0_DEV_INFO_REGS_OFFSET 0x0000
#define IONIC_BAR0_DEV_CMD_REGS_OFFSET 0x0800
......
......@@ -59,6 +59,7 @@ struct ionic_rx_stats {
#define IONIC_QCQ_F_TX_STATS BIT(3)
#define IONIC_QCQ_F_RX_STATS BIT(4)
#define IONIC_QCQ_F_NOTIFYQ BIT(5)
#define IONIC_QCQ_F_CMB_RINGS BIT(6)
struct ionic_qcq {
void *q_base;
......@@ -70,6 +71,11 @@ struct ionic_qcq {
void *sg_base;
dma_addr_t sg_base_pa;
u32 sg_size;
void __iomem *cmb_q_base;
phys_addr_t cmb_q_base_pa;
u32 cmb_q_size;
u32 cmb_pgid;
u32 cmb_order;
struct dim dim;
struct ionic_queue q;
struct ionic_cq cq;
......@@ -142,6 +148,8 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_BROKEN,
IONIC_LIF_F_TX_DIM_INTR,
IONIC_LIF_F_RX_DIM_INTR,
IONIC_LIF_F_CMB_TX_RINGS,
IONIC_LIF_F_CMB_RX_RINGS,
/* leave this as last */
IONIC_LIF_F_STATE_SIZE
......@@ -245,8 +253,10 @@ struct ionic_queue_params {
unsigned int nxqs;
unsigned int ntxq_descs;
unsigned int nrxq_descs;
unsigned int intr_split;
u64 rxq_features;
bool intr_split;
bool cmb_tx;
bool cmb_rx;
};
static inline void ionic_init_queue_params(struct ionic_lif *lif,
......@@ -255,8 +265,34 @@ static inline void ionic_init_queue_params(struct ionic_lif *lif,
qparam->nxqs = lif->nxqs;
qparam->ntxq_descs = lif->ntxq_descs;
qparam->nrxq_descs = lif->nrxq_descs;
qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
qparam->rxq_features = lif->rxq_features;
qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
qparam->cmb_tx = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
qparam->cmb_rx = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
}
static inline void ionic_set_queue_params(struct ionic_lif *lif,
struct ionic_queue_params *qparam)
{
lif->nxqs = qparam->nxqs;
lif->ntxq_descs = qparam->ntxq_descs;
lif->nrxq_descs = qparam->nrxq_descs;
lif->rxq_features = qparam->rxq_features;
if (qparam->intr_split)
set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
else
clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
if (qparam->cmb_tx)
set_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
else
clear_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
if (qparam->cmb_rx)
set_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
else
clear_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
}
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
......
......@@ -388,7 +388,7 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
break;
/* force a check of FW status and break out if FW reset */
(void)ionic_heartbeat_check(lif->ionic);
ionic_heartbeat_check(lif->ionic);
if ((test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
!lif->ionic->idev.fw_status_ready) ||
test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
......@@ -676,7 +676,7 @@ int ionic_port_init(struct ionic *ionic)
err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_UP);
(void)ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
mutex_unlock(&ionic->dev_cmd_lock);
if (err) {
......
......@@ -268,7 +268,7 @@ static u64 ionic_hwstamp_read(struct ionic *ionic,
u32 tick_high_before, tick_high, tick_low;
/* read and discard low part to defeat hw staging of high part */
(void)ioread32(&ionic->idev.hwstamp_regs->tick_low);
ioread32(&ionic->idev.hwstamp_regs->tick_low);
tick_high_before = ioread32(&ionic->idev.hwstamp_regs->tick_high);
......
......@@ -604,14 +604,14 @@ void ionic_rx_filter_sync(struct ionic_lif *lif)
* they can clear room for some new filters
*/
list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) {
(void)ionic_lif_filter_del(lif, &sync_item->f.cmd);
ionic_lif_filter_del(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
}
list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
(void)ionic_lif_filter_add(lif, &sync_item->f.cmd);
ionic_lif_filter_add(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
......
......@@ -402,6 +402,14 @@ bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return true;
}
static inline void ionic_write_cmb_desc(struct ionic_queue *q,
void __iomem *cmb_desc,
void *desc)
{
if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS)
memcpy_toio(cmb_desc, desc, q->desc_size);
}
void ionic_rx_fill(struct ionic_queue *q)
{
struct net_device *netdev = q->lif->netdev;
......@@ -480,6 +488,8 @@ void ionic_rx_fill(struct ionic_queue *q)
IONIC_RXQ_DESC_OPCODE_SIMPLE;
desc_info->nbufs = nfrags;
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
ionic_rxq_post(q, false, ionic_rx_clean, NULL);
}
......@@ -943,7 +953,8 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
return 0;
}
static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
static void ionic_tx_tso_post(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct sk_buff *skb,
dma_addr_t addr, u8 nsge, u16 len,
unsigned int hdrlen, unsigned int mss,
......@@ -951,6 +962,7 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
u16 vlan_tci, bool has_vlan,
bool start, bool done)
{
struct ionic_txq_desc *desc = desc_info->desc;
u8 flags = 0;
u64 cmd;
......@@ -966,6 +978,8 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
desc->hdr_len = cpu_to_le16(hdrlen);
desc->mss = cpu_to_le16(mss);
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
if (start) {
skb_tx_timestamp(skb);
if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
......@@ -1084,7 +1098,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
seg_rem = min(tso_rem, mss);
done = (tso_rem == 0);
/* post descriptor */
ionic_tx_tso_post(q, desc, skb,
ionic_tx_tso_post(q, desc_info, skb,
desc_addr, desc_nsge, desc_len,
hdrlen, mss, outer_csum, vlan_tci, has_vlan,
start, done);
......@@ -1133,6 +1147,8 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
desc->csum_offset = cpu_to_le16(skb->csum_offset);
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
if (skb_csum_is_sctp(skb))
stats->crc32_csum++;
else
......@@ -1170,6 +1186,8 @@ static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = 0;
desc->csum_offset = 0;
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
stats->csum_none++;
}
......
......@@ -73,12 +73,14 @@ enum {
* @rx_buf_len: Current length of buffers on the rx ring.
* @tcp_data_split: Scatter packet headers and data to separate buffers
* @tx_push: The flag of tx push mode
* @rx_push: The flag of rx push mode
* @cqe_size: Size of TX/RX completion queue event
*/
struct kernel_ethtool_ringparam {
u32 rx_buf_len;
u8 tcp_data_split;
u8 tx_push;
u8 rx_push;
u32 cqe_size;
};
......@@ -87,11 +89,13 @@ struct kernel_ethtool_ringparam {
* @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
* @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
* @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
* @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push
*/
enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
ETHTOOL_RING_USE_TX_PUSH = BIT(2),
ETHTOOL_RING_USE_RX_PUSH = BIT(3),
};
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
......
......@@ -356,6 +356,7 @@ enum {
ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */
ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */
ETHTOOL_A_RINGS_TX_PUSH, /* u8 */
ETHTOOL_A_RINGS_RX_PUSH, /* u8 */
/* add new constants above here */
__ETHTOOL_A_RINGS_CNT,
......
......@@ -413,7 +413,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT
extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1];
extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1];
extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1];
extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX_PUSH + 1];
extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_RX_PUSH + 1];
extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1];
extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1];
extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1];
......
......@@ -56,7 +56,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base,
nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */
nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */
nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */
nla_total_size(sizeof(u8))); /* _RINGS_TX_PUSH */
nla_total_size(sizeof(u8)) + /* _RINGS_TX_PUSH */
nla_total_size(sizeof(u8))); /* _RINGS_RX_PUSH */
}
static int rings_fill_reply(struct sk_buff *skb,
......@@ -96,7 +97,8 @@ static int rings_fill_reply(struct sk_buff *skb,
kr->tcp_data_split))) ||
(kr->cqe_size &&
(nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) ||
nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push))
nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push) ||
nla_put_u8(skb, ETHTOOL_A_RINGS_RX_PUSH, !!kr->rx_push))
return -EMSGSIZE;
return 0;
......@@ -114,6 +116,7 @@ const struct nla_policy ethnl_rings_set_policy[] = {
[ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1),
[ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1),
[ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
[ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
};
static int
......@@ -147,6 +150,14 @@ ethnl_set_rings_validate(struct ethnl_req_info *req_info,
return -EOPNOTSUPP;
}
if (tb[ETHTOOL_A_RINGS_RX_PUSH] &&
!(ops->supported_ring_params & ETHTOOL_RING_USE_RX_PUSH)) {
NL_SET_ERR_MSG_ATTR(info->extack,
tb[ETHTOOL_A_RINGS_RX_PUSH],
"setting rx push not supported");
return -EOPNOTSUPP;
}
return ops->get_ringparam && ops->set_ringparam ? 1 : -EOPNOTSUPP;
}
......@@ -176,6 +187,8 @@ ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info)
tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod);
ethnl_update_u8(&kernel_ringparam.tx_push,
tb[ETHTOOL_A_RINGS_TX_PUSH], &mod);
ethnl_update_u8(&kernel_ringparam.rx_push,
tb[ETHTOOL_A_RINGS_RX_PUSH], &mod);
if (!mod)
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment