Commit e943d94e authored by David S. Miller's avatar David S. Miller

Merge branch 'octeontx2-af-NPA-and-NIX-blocks-initialization'

Sunil Goutham says:

====================
octeontx2-af: NPA and NIX blocks initialization

This patchset is a continuation to earlier submitted patch series
to add a new driver for Marvell's OcteonTX2 SOC's
Resource virtualization unit (RVU) admin function driver.

octeontx2-af: Add RVU Admin Function driver
https://www.spinics.net/lists/netdev/msg528272.html

This patch series adds logic for the following.
- Modified register polling loop to use time_before(jiffies, timeout),
  as suggested by Arnd Bergmann.
- Support to forward interface link status notifications sent by
  firmware to registered PFs mapped to a CGX::LMAC.
- Support to set CGX LMAC in loopback mode, retrieve stats,
  configure DMAC filters at CGX level etc.
- Network pool allocator (NPA) functional block initialization,
  admin queue support, NPALF aura/pool contexts memory allocation, init
  and deinit.
- Network interface controller (NIX) functional block basic init,
  admin queue support, NIXLF RQ/CQ/SQ HW contexts memory allocation,
  init and deinit.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 53e50a6e 557dd485
...@@ -7,4 +7,4 @@ obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o ...@@ -7,4 +7,4 @@ obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
octeontx2_mbox-y := mbox.o octeontx2_mbox-y := mbox.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
* @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
* @cmd_lock: Lock to serialize the command interface * @cmd_lock: Lock to serialize the command interface
* @resp: command response * @resp: command response
* @link_info: link related information
* @event_cb: callback for linkchange events * @event_cb: callback for linkchange events
* @cmd_pend: flag set before new command is started * @cmd_pend: flag set before new command is started
* flag cleared after command response is received * flag cleared after command response is received
...@@ -40,6 +41,7 @@ struct lmac { ...@@ -40,6 +41,7 @@ struct lmac {
wait_queue_head_t wq_cmd_cmplt; wait_queue_head_t wq_cmd_cmplt;
struct mutex cmd_lock; struct mutex cmd_lock;
u64 resp; u64 resp;
struct cgx_link_user_info link_info;
struct cgx_event_cb event_cb; struct cgx_event_cb event_cb;
bool cmd_pend; bool cmd_pend;
struct cgx *cgx; struct cgx *cgx;
...@@ -58,6 +60,12 @@ struct cgx { ...@@ -58,6 +60,12 @@ struct cgx {
static LIST_HEAD(cgx_list); static LIST_HEAD(cgx_list);
/* Convert firmware speed encoding to user format(Mbps) */
static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
/* Convert firmware lmac type encoding to string */
static char *cgx_lmactype_string[LMAC_MODE_MAX];
/* Supported devices */ /* Supported devices */
static const struct pci_device_id cgx_id_table[] = { static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
...@@ -119,6 +127,177 @@ void *cgx_get_pdata(int cgx_id) ...@@ -119,6 +127,177 @@ void *cgx_get_pdata(int cgx_id)
} }
EXPORT_SYMBOL(cgx_get_pdata); EXPORT_SYMBOL(cgx_get_pdata);
/* Ensure the required lock for event queue(where asynchronous events are
* posted) is acquired before calling this API. Else an asynchronous event(with
* latest link status) can reach the destination before this function returns
* and could make the link status appear wrong.
*/
int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo)
{
struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
if (!lmac)
return -ENODEV;
*linfo = lmac->link_info;
return 0;
}
EXPORT_SYMBOL(cgx_get_link_info);
static u64 mac2u64 (u8 *mac_addr)
{
u64 mac = 0;
int index;
for (index = ETH_ALEN - 1; index >= 0; index--)
mac |= ((u64)*mac_addr++) << (8 * index);
return mac;
}
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
u64 cfg;
/* copy 6bytes from macaddr */
/* memcpy(&cfg, mac_addr, 6); */
cfg = mac2u64 (mac_addr);
cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
return 0;
}
EXPORT_SYMBOL(cgx_lmac_addr_set);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
u64 cfg;
cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
return cfg & CGX_RX_DMAC_ADR_MASK;
}
EXPORT_SYMBOL(cgx_lmac_addr_get);
static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
{
u64 cfg;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
}
/* Configure CGX LMAC in internal loopback mode */
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
u8 lmac_type;
u64 cfg;
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
lmac_type = cgx_get_lmac_type(cgx, lmac_id);
if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
if (enable)
cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
else
cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
} else {
cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
if (enable)
cfg |= CGXX_SPUX_CONTROL1_LBK;
else
cfg &= ~CGXX_SPUX_CONTROL1_LBK;
cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
}
return 0;
}
EXPORT_SYMBOL(cgx_lmac_internal_loopback);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
u64 cfg = 0;
if (!cgx)
return;
if (enable) {
/* Enable promiscuous mode on LMAC */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
cfg |= CGX_DMAC_BCAST_MODE;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
cfg = cgx_read(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
cgx_write(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
} else {
/* Disable promiscuous mode */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
cfg = cgx_read(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
cgx_write(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
}
}
EXPORT_SYMBOL(cgx_lmac_promisc_config);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
{
struct cgx *cgx = cgxd;
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
}
EXPORT_SYMBOL(cgx_get_rx_stats);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
{
struct cgx *cgx = cgxd;
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
return 0;
}
EXPORT_SYMBOL(cgx_get_tx_stats);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
u64 cfg;
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
if (enable)
cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
else
cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
return 0;
}
EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
/* CGX Firmware interface low level support */ /* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac) static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{ {
...@@ -191,36 +370,79 @@ static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp, ...@@ -191,36 +370,79 @@ static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
return err; return err;
} }
static inline void cgx_link_usertable_init(void)
{
cgx_speed_mbps[CGX_LINK_NONE] = 0;
cgx_speed_mbps[CGX_LINK_10M] = 10;
cgx_speed_mbps[CGX_LINK_100M] = 100;
cgx_speed_mbps[CGX_LINK_1G] = 1000;
cgx_speed_mbps[CGX_LINK_2HG] = 2500;
cgx_speed_mbps[CGX_LINK_5G] = 5000;
cgx_speed_mbps[CGX_LINK_10G] = 10000;
cgx_speed_mbps[CGX_LINK_20G] = 20000;
cgx_speed_mbps[CGX_LINK_25G] = 25000;
cgx_speed_mbps[CGX_LINK_40G] = 40000;
cgx_speed_mbps[CGX_LINK_50G] = 50000;
cgx_speed_mbps[CGX_LINK_100G] = 100000;
cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
}
static inline void link_status_user_format(u64 lstat,
struct cgx_link_user_info *linfo,
struct cgx *cgx, u8 lmac_id)
{
char *lmac_string;
linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
}
/* Hardware event handlers */ /* Hardware event handlers */
static inline void cgx_link_change_handler(u64 lstat, static inline void cgx_link_change_handler(u64 lstat,
struct lmac *lmac) struct lmac *lmac)
{ {
struct cgx_link_user_info *linfo;
struct cgx *cgx = lmac->cgx; struct cgx *cgx = lmac->cgx;
struct cgx_link_event event; struct cgx_link_event event;
struct device *dev; struct device *dev;
int err_type;
dev = &cgx->pdev->dev; dev = &cgx->pdev->dev;
event.lstat.link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
event.lstat.full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
event.lstat.speed = FIELD_GET(RESP_LINKSTAT_SPEED, lstat);
event.lstat.err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
event.cgx_id = cgx->cgx_id; event.cgx_id = cgx->cgx_id;
event.lmac_id = lmac->lmac_id; event.lmac_id = lmac->lmac_id;
/* update the local copy of link status */
lmac->link_info = event.link_uinfo;
linfo = &lmac->link_info;
if (!lmac->event_cb.notify_link_chg) { if (!lmac->event_cb.notify_link_chg) {
dev_dbg(dev, "cgx port %d:%d Link change handler null", dev_dbg(dev, "cgx port %d:%d Link change handler null",
cgx->cgx_id, lmac->lmac_id); cgx->cgx_id, lmac->lmac_id);
if (event.lstat.err_type != CGX_ERR_NONE) { if (err_type != CGX_ERR_NONE) {
dev_err(dev, "cgx port %d:%d Link error %d\n", dev_err(dev, "cgx port %d:%d Link error %d\n",
cgx->cgx_id, lmac->lmac_id, cgx->cgx_id, lmac->lmac_id, err_type);
event.lstat.err_type);
} }
dev_info(dev, "cgx port %d:%d Link status %s, speed %x\n", dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
cgx->cgx_id, lmac->lmac_id, cgx->cgx_id, lmac->lmac_id,
event.lstat.link_up ? "UP" : "DOWN", linfo->link_up ? "UP" : "DOWN", linfo->speed);
event.lstat.speed);
return; return;
} }
...@@ -448,6 +670,8 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -448,6 +670,8 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
list_add(&cgx->cgx_list, &cgx_list); list_add(&cgx->cgx_list, &cgx_list);
cgx->cgx_id = cgx_get_cgx_cnt() - 1; cgx->cgx_id = cgx_get_cgx_cnt() - 1;
cgx_link_usertable_init();
err = cgx_lmac_init(cgx); err = cgx_lmac_init(cgx);
if (err) if (err)
goto err_release_lmac; goto err_release_lmac;
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#ifndef CGX_H #ifndef CGX_H
#define CGX_H #define CGX_H
#include "mbox.h"
#include "cgx_fw_if.h" #include "cgx_fw_if.h"
/* PCI device IDs */ /* PCI device IDs */
...@@ -24,14 +25,35 @@ ...@@ -24,14 +25,35 @@
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) #define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
/* Registers */ /* Registers */
#define CGXX_CMRX_CFG 0x00
#define CMR_EN BIT_ULL(55)
#define DATA_PKT_TX_EN BIT_ULL(53)
#define DATA_PKT_RX_EN BIT_ULL(54)
#define CGX_LMAC_TYPE_SHIFT 40
#define CGX_LMAC_TYPE_MASK 0xF
#define CGXX_CMRX_INT 0x040 #define CGXX_CMRX_INT 0x040
#define FW_CGX_INT BIT_ULL(1) #define FW_CGX_INT BIT_ULL(1)
#define CGXX_CMRX_INT_ENA_W1S 0x058 #define CGXX_CMRX_INT_ENA_W1S 0x058
#define CGXX_CMRX_RX_ID_MAP 0x060 #define CGXX_CMRX_RX_ID_MAP 0x060
#define CGXX_CMRX_RX_STAT0 0x070
#define CGXX_CMRX_RX_LMACS 0x128 #define CGXX_CMRX_RX_LMACS 0x128
#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8
#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
#define CGXX_CMRX_RX_DMAC_CAM0 0x200
#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
#define CGXX_CMRX_TX_STAT0 0x700
#define CGXX_SCRATCH0_REG 0x1050 #define CGXX_SCRATCH0_REG 0x1050
#define CGXX_SCRATCH1_REG 0x1058 #define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000 #define CGX_CONST 0x2000
#define CGXX_SPUX_CONTROL1 0x10000
#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
#define CGXX_GMP_PCS_MRX_CTL 0x30000
#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG #define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG #define CGX_EVENT_REG CGXX_SCRATCH0_REG
...@@ -40,8 +62,22 @@ ...@@ -40,8 +62,22 @@
#define CGX_NVEC 37 #define CGX_NVEC 37
#define CGX_LMAC_FWI 0 #define CGX_LMAC_FWI 0
enum LMAC_TYPE {
LMAC_MODE_SGMII = 0,
LMAC_MODE_XAUI = 1,
LMAC_MODE_RXAUI = 2,
LMAC_MODE_10G_R = 3,
LMAC_MODE_40G_R = 4,
LMAC_MODE_QSGMII = 6,
LMAC_MODE_25G_R = 7,
LMAC_MODE_50G_R = 8,
LMAC_MODE_100G_R = 9,
LMAC_MODE_USXGMII = 10,
LMAC_MODE_MAX,
};
struct cgx_link_event { struct cgx_link_event {
struct cgx_lnk_sts lstat; struct cgx_link_user_info link_uinfo;
u8 cgx_id; u8 cgx_id;
u8 lmac_id; u8 lmac_id;
}; };
...@@ -62,4 +98,13 @@ int cgx_get_cgx_cnt(void); ...@@ -62,4 +98,13 @@ int cgx_get_cgx_cnt(void);
int cgx_get_lmac_cnt(void *cgxd); int cgx_get_lmac_cnt(void *cgxd);
void *cgx_get_pdata(int cgx_id); void *cgx_get_pdata(int cgx_id);
int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id); int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
#endif /* CGX_H */ #endif /* CGX_H */
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef COMMON_H
#define COMMON_H
#include "rvu_struct.h"
#define OTX2_ALIGN 128 /* Align to cacheline */
#define Q_SIZE_16 0ULL /* 16 entries */
#define Q_SIZE_64 1ULL /* 64 entries */
#define Q_SIZE_256 2ULL
#define Q_SIZE_1K 3ULL
#define Q_SIZE_4K 4ULL
#define Q_SIZE_16K 5ULL
#define Q_SIZE_64K 6ULL
#define Q_SIZE_256K 7ULL
#define Q_SIZE_1M 8ULL /* Million entries */
#define Q_SIZE_MIN Q_SIZE_16
#define Q_SIZE_MAX Q_SIZE_1M
#define Q_COUNT(x) (16ULL << (2 * x))
#define Q_SIZE(x, n) ((ilog2(x) - (n)) / 2)
/* Admin queue info */
/* Since we intend to add only one instruction at a time,
* keep queue size to it's minimum.
*/
#define AQ_SIZE Q_SIZE_16
/* HW head & tail pointer mask */
#define AQ_PTR_MASK 0xFFFFF
struct qmem {
void *base;
dma_addr_t iova;
int alloc_sz;
u8 entry_sz;
u8 align;
u32 qsize;
};
static inline int qmem_alloc(struct device *dev, struct qmem **q,
int qsize, int entry_sz)
{
struct qmem *qmem;
int aligned_addr;
if (!qsize)
return -EINVAL;
*q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
if (!*q)
return -ENOMEM;
qmem = *q;
qmem->entry_sz = entry_sz;
qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
qmem->base = dma_zalloc_coherent(dev, qmem->alloc_sz,
&qmem->iova, GFP_KERNEL);
if (!qmem->base)
return -ENOMEM;
qmem->qsize = qsize;
aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
qmem->align = (aligned_addr - qmem->iova);
qmem->base += qmem->align;
qmem->iova += qmem->align;
return 0;
}
static inline void qmem_free(struct device *dev, struct qmem *qmem)
{
if (!qmem)
return;
if (qmem->base)
dma_free_coherent(dev, qmem->alloc_sz,
qmem->base - qmem->align,
qmem->iova - qmem->align);
devm_kfree(dev, qmem);
}
struct admin_queue {
struct qmem *inst;
struct qmem *res;
spinlock_t lock; /* Serialize inst enqueue from PFs */
};
/* NPA aura count */
enum npa_aura_sz {
NPA_AURA_SZ_0,
NPA_AURA_SZ_128,
NPA_AURA_SZ_256,
NPA_AURA_SZ_512,
NPA_AURA_SZ_1K,
NPA_AURA_SZ_2K,
NPA_AURA_SZ_4K,
NPA_AURA_SZ_8K,
NPA_AURA_SZ_16K,
NPA_AURA_SZ_32K,
NPA_AURA_SZ_64K,
NPA_AURA_SZ_128K,
NPA_AURA_SZ_256K,
NPA_AURA_SZ_512K,
NPA_AURA_SZ_1M,
NPA_AURA_SZ_MAX,
};
#define NPA_AURA_COUNT(x) (1ULL << ((x) + 6))
/* NPA AQ result structure for init/read/write of aura HW contexts */
struct npa_aq_aura_res {
struct npa_aq_res_s res;
struct npa_aura_s aura_ctx;
struct npa_aura_s ctx_mask;
};
/* NPA AQ result structure for init/read/write of pool HW contexts */
struct npa_aq_pool_res {
struct npa_aq_res_s res;
struct npa_pool_s pool_ctx;
struct npa_pool_s ctx_mask;
};
/* NIX Transmit schedulers */
enum nix_scheduler {
NIX_TXSCH_LVL_SMQ = 0x0,
NIX_TXSCH_LVL_MDQ = 0x0,
NIX_TXSCH_LVL_TL4 = 0x1,
NIX_TXSCH_LVL_TL3 = 0x2,
NIX_TXSCH_LVL_TL2 = 0x3,
NIX_TXSCH_LVL_TL1 = 0x4,
NIX_TXSCH_LVL_CNT = 0x5,
};
/* NIX LSO format indices.
* As of now TSO is the only one using, so statically assigning indices.
*/
#define NIX_LSO_FORMAT_IDX_TSOV4 0
#define NIX_LSO_FORMAT_IDX_TSOV6 1
/* RSS info */
#define MAX_RSS_GROUPS 8
/* Group 0 has to be used in default pkt forwarding MCAM entries
* reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple
* filters.
*/
#define DEFAULT_RSS_CONTEXT_GROUP 0
#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
#endif /* COMMON_H */
...@@ -124,21 +124,50 @@ M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \ ...@@ -124,21 +124,50 @@ M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \ M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \
M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \ M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, msg_req, msg_rsp) \
M(CGX_STATS, 0x202, msg_req, cgx_stats_rsp) \
M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set_or_get, \
cgx_mac_addr_set_or_get) \
M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_set_or_get, \
cgx_mac_addr_set_or_get) \
M(CGX_PROMISC_ENABLE, 0x205, msg_req, msg_rsp) \
M(CGX_PROMISC_DISABLE, 0x206, msg_req, msg_rsp) \
M(CGX_START_LINKEVENTS, 0x207, msg_req, msg_rsp) \
M(CGX_STOP_LINKEVENTS, 0x208, msg_req, msg_rsp) \
M(CGX_GET_LINKINFO, 0x209, msg_req, cgx_link_info_msg) \
M(CGX_INTLBK_ENABLE, 0x20A, msg_req, msg_rsp) \
M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \
M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \
M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \
M(NPA_HWCTX_DISABLE, 0x403, hwctx_disable_req, msg_rsp) \
/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \ /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
/* TIM mbox IDs (range 0x800 - 0x9FF) */ \ /* TIM mbox IDs (range 0x800 - 0x9FF) */ \
/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \ /* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \ /* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \ /* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc_req, nix_lf_alloc_rsp) \
M(NIX_LF_FREE, 0x8001, msg_req, msg_rsp) \
M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \
M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
M(CGX_LINK_EVENT, 0xC00, cgx_link_info_msg, msg_rsp)
enum { enum {
#define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id, #define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
#undef M #undef M
}; };
/* Mailbox message formats */ /* Mailbox message formats */
#define RVU_DEFAULT_PF_FUNC 0xFFFF
/* Generic request msg used for those mbox messages which /* Generic request msg used for those mbox messages which
* don't send any data in the request. * don't send any data in the request.
*/ */
...@@ -208,4 +237,181 @@ struct msix_offset_rsp { ...@@ -208,4 +237,181 @@ struct msix_offset_rsp {
u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT]; u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
}; };
/* CGX mbox message formats */
struct cgx_stats_rsp {
struct mbox_msghdr hdr;
#define CGX_RX_STATS_COUNT 13
#define CGX_TX_STATS_COUNT 18
u64 rx_stats[CGX_RX_STATS_COUNT];
u64 tx_stats[CGX_TX_STATS_COUNT];
};
/* Structure for requesting the operation for
* setting/getting mac address in the CGX interface
*/
struct cgx_mac_addr_set_or_get {
struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN];
};
struct cgx_link_user_info {
uint64_t link_up:1;
uint64_t full_duplex:1;
uint64_t lmac_type_id:4;
uint64_t speed:20; /* speed in Mbps */
#define LMACTYPE_STR_LEN 16
char lmac_type[LMACTYPE_STR_LEN];
};
struct cgx_link_info_msg {
struct mbox_msghdr hdr;
struct cgx_link_user_info link_info;
};
/* NPA mbox message formats */
/* NPA mailbox error codes
* Range 301 - 400.
*/
enum npa_af_status {
NPA_AF_ERR_PARAM = -301,
NPA_AF_ERR_AQ_FULL = -302,
NPA_AF_ERR_AQ_ENQUEUE = -303,
NPA_AF_ERR_AF_LF_INVALID = -304,
NPA_AF_ERR_AF_LF_ALLOC = -305,
NPA_AF_ERR_LF_RESET = -306,
};
/* For NPA LF context alloc and init */
struct npa_lf_alloc_req {
struct mbox_msghdr hdr;
int node;
int aura_sz; /* No of auras */
u32 nr_pools; /* No of pools */
};
struct npa_lf_alloc_rsp {
struct mbox_msghdr hdr;
u32 stack_pg_ptrs; /* No of ptrs per stack page */
u32 stack_pg_bytes; /* Size of stack page */
u16 qints; /* NPA_AF_CONST::QINTS */
};
/* NPA AQ enqueue msg */
struct npa_aq_enq_req {
struct mbox_msghdr hdr;
u32 aura_id;
u8 ctype;
u8 op;
union {
/* Valid when op == WRITE/INIT and ctype == AURA.
* LF fills the pool_id in aura.pool_addr. AF will translate
* the pool_id to pool context pointer.
*/
struct npa_aura_s aura;
/* Valid when op == WRITE/INIT and ctype == POOL */
struct npa_pool_s pool;
};
/* Mask data when op == WRITE (1=write, 0=don't write) */
union {
/* Valid when op == WRITE and ctype == AURA */
struct npa_aura_s aura_mask;
/* Valid when op == WRITE and ctype == POOL */
struct npa_pool_s pool_mask;
};
};
struct npa_aq_enq_rsp {
struct mbox_msghdr hdr;
union {
/* Valid when op == READ and ctype == AURA */
struct npa_aura_s aura;
/* Valid when op == READ and ctype == POOL */
struct npa_pool_s pool;
};
};
/* Disable all contexts of type 'ctype' */
struct hwctx_disable_req {
struct mbox_msghdr hdr;
u8 ctype;
};
/* NIX mailbox error codes
* Range 401 - 500.
*/
enum nix_af_status {
NIX_AF_ERR_PARAM = -401,
NIX_AF_ERR_AQ_FULL = -402,
NIX_AF_ERR_AQ_ENQUEUE = -403,
NIX_AF_ERR_AF_LF_INVALID = -404,
NIX_AF_ERR_AF_LF_ALLOC = -405,
NIX_AF_ERR_TLX_ALLOC_FAIL = -406,
NIX_AF_ERR_TLX_INVALID = -407,
NIX_AF_ERR_RSS_SIZE_INVALID = -408,
NIX_AF_ERR_RSS_GRPS_INVALID = -409,
NIX_AF_ERR_FRS_INVALID = -410,
NIX_AF_ERR_RX_LINK_INVALID = -411,
NIX_AF_INVAL_TXSCHQ_CFG = -412,
NIX_AF_SMQ_FLUSH_FAILED = -413,
NIX_AF_ERR_LF_RESET = -414,
};
/* For NIX LF context alloc and init */
struct nix_lf_alloc_req {
struct mbox_msghdr hdr;
int node;
u32 rq_cnt; /* No of receive queues */
u32 sq_cnt; /* No of send queues */
u32 cq_cnt; /* No of completion queues */
u8 xqe_sz;
u16 rss_sz;
u8 rss_grps;
u16 npa_func;
u16 sso_func;
u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
};
struct nix_lf_alloc_rsp {
struct mbox_msghdr hdr;
u16 sqb_size;
u8 lso_tsov4_idx;
u8 lso_tsov6_idx;
u8 mac_addr[ETH_ALEN];
};
/* NIX AQ enqueue msg */
struct nix_aq_enq_req {
struct mbox_msghdr hdr;
u32 qidx;
u8 ctype;
u8 op;
union {
struct nix_rq_ctx_s rq;
struct nix_sq_ctx_s sq;
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
};
union {
struct nix_rq_ctx_s rq_mask;
struct nix_sq_ctx_s sq_mask;
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
};
};
struct nix_aq_enq_rsp {
struct mbox_msghdr hdr;
union {
struct nix_rq_ctx_s rq;
struct nix_sq_ctx_s sq;
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
};
};
#endif /* MBOX_H */ #endif /* MBOX_H */
...@@ -47,18 +47,18 @@ MODULE_DEVICE_TABLE(pci, rvu_id_table); ...@@ -47,18 +47,18 @@ MODULE_DEVICE_TABLE(pci, rvu_id_table);
*/ */
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
{ {
unsigned long timeout = jiffies + usecs_to_jiffies(100);
void __iomem *reg; void __iomem *reg;
int timeout = 100;
u64 reg_val; u64 reg_val;
reg = rvu->afreg_base + ((block << 28) | offset); reg = rvu->afreg_base + ((block << 28) | offset);
while (timeout) { while (time_before(jiffies, timeout)) {
reg_val = readq(reg); reg_val = readq(reg);
if (zero && !(reg_val & mask)) if (zero && !(reg_val & mask))
return 0; return 0;
if (!zero && (reg_val & mask)) if (!zero && (reg_val & mask))
return 0; return 0;
usleep_range(1, 2); usleep_range(1, 5);
timeout--; timeout--;
} }
return -EBUSY; return -EBUSY;
...@@ -361,6 +361,19 @@ static void rvu_check_block_implemented(struct rvu *rvu) ...@@ -361,6 +361,19 @@ static void rvu_check_block_implemented(struct rvu *rvu)
} }
} }
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
{
int err;
if (!block->implemented)
return 0;
rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
true);
return err;
}
static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg) static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
{ {
struct rvu_block *block = &rvu->hw->block[blkaddr]; struct rvu_block *block = &rvu->hw->block[blkaddr];
...@@ -552,6 +565,9 @@ static void rvu_free_hw_resources(struct rvu *rvu) ...@@ -552,6 +565,9 @@ static void rvu_free_hw_resources(struct rvu *rvu)
int id, max_msix; int id, max_msix;
u64 cfg; u64 cfg;
rvu_npa_freemem(rvu);
rvu_nix_freemem(rvu);
/* Free block LF bitmaps */ /* Free block LF bitmaps */
for (id = 0; id < BLK_COUNT; id++) { for (id = 0; id < BLK_COUNT; id++) {
block = &hw->block[id]; block = &hw->block[id];
...@@ -755,6 +771,54 @@ static int rvu_setup_hw_resources(struct rvu *rvu) ...@@ -755,6 +771,54 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
rvu_scan_block(rvu, block); rvu_scan_block(rvu, block);
} }
err = rvu_npa_init(rvu);
if (err)
return err;
err = rvu_nix_init(rvu);
if (err)
return err;
return 0;
}
/* NPA and NIX admin queue APIs */
void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
{
if (!aq)
return;
qmem_free(rvu->dev, aq->inst);
qmem_free(rvu->dev, aq->res);
devm_kfree(rvu->dev, aq);
}
int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
int qsize, int inst_size, int res_size)
{
struct admin_queue *aq;
int err;
*ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
if (!*ad_queue)
return -ENOMEM;
aq = *ad_queue;
/* Alloc memory for instructions i.e AQ */
err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
if (err) {
devm_kfree(rvu->dev, aq);
return err;
}
/* Alloc memory for results */
err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
if (err) {
rvu_aq_free(rvu, aq);
return err;
}
spin_lock_init(&aq->lock);
return 0; return 0;
} }
...@@ -1316,6 +1380,63 @@ static void rvu_mbox_handler(struct work_struct *work) ...@@ -1316,6 +1380,63 @@ static void rvu_mbox_handler(struct work_struct *work)
otx2_mbox_msg_send(mbox, pf); otx2_mbox_msg_send(mbox, pf);
} }
static void rvu_mbox_up_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
struct rvu *rvu = mwork->rvu;
struct otx2_mbox_dev *mdev;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
struct otx2_mbox *mbox;
int offset, id;
u16 pf;
mbox = &rvu->mbox_up;
pf = mwork - rvu->mbox_wrk_up;
mdev = &mbox->dev[pf];
rsp_hdr = mdev->mbase + mbox->rx_start;
if (rsp_hdr->num_msgs == 0) {
dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
return;
}
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < rsp_hdr->num_msgs; id++) {
msg = mdev->mbase + offset;
if (msg->id >= MBOX_MSG_MAX) {
dev_err(rvu->dev,
"Mbox msg with unknown ID 0x%x\n", msg->id);
goto end;
}
if (msg->sig != OTX2_MBOX_RSP_SIG) {
dev_err(rvu->dev,
"Mbox msg with wrong signature %x, ID 0x%x\n",
msg->sig, msg->id);
goto end;
}
switch (msg->id) {
case MBOX_MSG_CGX_LINK_EVENT:
break;
default:
if (msg->rc)
dev_err(rvu->dev,
"Mbox msg response has err %d, ID 0x%x\n",
msg->rc, msg->id);
break;
}
end:
offset = mbox->rx_start + msg->next_msgoff;
mdev->msgs_acked++;
}
otx2_mbox_reset(mbox, 0);
}
static int rvu_mbox_init(struct rvu *rvu) static int rvu_mbox_init(struct rvu *rvu)
{ {
struct rvu_hwinfo *hw = rvu->hw; struct rvu_hwinfo *hw = rvu->hw;
...@@ -1337,6 +1458,13 @@ static int rvu_mbox_init(struct rvu *rvu) ...@@ -1337,6 +1458,13 @@ static int rvu_mbox_init(struct rvu *rvu)
goto exit; goto exit;
} }
rvu->mbox_wrk_up = devm_kcalloc(rvu->dev, hw->total_pfs,
sizeof(struct rvu_work), GFP_KERNEL);
if (!rvu->mbox_wrk_up) {
err = -ENOMEM;
goto exit;
}
/* Map mbox region shared with PFs */ /* Map mbox region shared with PFs */
bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
/* Mailbox is a reserved memory (in RAM) region shared between /* Mailbox is a reserved memory (in RAM) region shared between
...@@ -1355,12 +1483,23 @@ static int rvu_mbox_init(struct rvu *rvu) ...@@ -1355,12 +1483,23 @@ static int rvu_mbox_init(struct rvu *rvu)
if (err) if (err)
goto exit; goto exit;
err = otx2_mbox_init(&rvu->mbox_up, hwbase, rvu->pdev, rvu->afreg_base,
MBOX_DIR_AFPF_UP, hw->total_pfs);
if (err)
goto exit;
for (pf = 0; pf < hw->total_pfs; pf++) { for (pf = 0; pf < hw->total_pfs; pf++) {
mwork = &rvu->mbox_wrk[pf]; mwork = &rvu->mbox_wrk[pf];
mwork->rvu = rvu; mwork->rvu = rvu;
INIT_WORK(&mwork->work, rvu_mbox_handler); INIT_WORK(&mwork->work, rvu_mbox_handler);
} }
for (pf = 0; pf < hw->total_pfs; pf++) {
mwork = &rvu->mbox_wrk_up[pf];
mwork->rvu = rvu;
INIT_WORK(&mwork->work, rvu_mbox_up_handler);
}
return 0; return 0;
exit: exit:
if (hwbase) if (hwbase)
...@@ -1381,6 +1520,7 @@ static void rvu_mbox_destroy(struct rvu *rvu) ...@@ -1381,6 +1520,7 @@ static void rvu_mbox_destroy(struct rvu *rvu)
iounmap((void __iomem *)rvu->mbox.hwbase); iounmap((void __iomem *)rvu->mbox.hwbase);
otx2_mbox_destroy(&rvu->mbox); otx2_mbox_destroy(&rvu->mbox);
otx2_mbox_destroy(&rvu->mbox_up);
} }
static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
...@@ -1407,6 +1547,12 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) ...@@ -1407,6 +1547,12 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
if (hdr->num_msgs) if (hdr->num_msgs)
queue_work(rvu->mbox_wq, queue_work(rvu->mbox_wq,
&rvu->mbox_wrk[pf].work); &rvu->mbox_wrk[pf].work);
mbox = &rvu->mbox_up;
mdev = &mbox->dev[pf];
hdr = mdev->mbase + mbox->rx_start;
if (hdr->num_msgs)
queue_work(rvu->mbox_wq,
&rvu->mbox_wrk_up[pf].work);
} }
} }
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#define RVU_H #define RVU_H
#include "rvu_struct.h" #include "rvu_struct.h"
#include "common.h"
#include "mbox.h" #include "mbox.h"
/* PCI device IDs */ /* PCI device IDs */
...@@ -42,6 +43,7 @@ struct rsrc_bmap { ...@@ -42,6 +43,7 @@ struct rsrc_bmap {
struct rvu_block { struct rvu_block {
struct rsrc_bmap lf; struct rsrc_bmap lf;
struct admin_queue *aq; /* NIX/NPA AQ */
u16 *fn_map; /* LF to pcifunc mapping */ u16 *fn_map; /* LF to pcifunc mapping */
bool multislot; bool multislot;
bool implemented; bool implemented;
...@@ -70,14 +72,50 @@ struct rvu_pfvf { ...@@ -70,14 +72,50 @@ struct rvu_pfvf {
struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */ struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */
#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF)) #define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF))
u16 *msix_lfmap; /* Vector to block LF mapping */ u16 *msix_lfmap; /* Vector to block LF mapping */
/* NPA contexts */
struct qmem *aura_ctx;
struct qmem *pool_ctx;
struct qmem *npa_qints_ctx;
unsigned long *aura_bmap;
unsigned long *pool_bmap;
/* NIX contexts */
struct qmem *rq_ctx;
struct qmem *sq_ctx;
struct qmem *cq_ctx;
struct qmem *rss_ctx;
struct qmem *cq_ints_ctx;
struct qmem *nix_qints_ctx;
unsigned long *sq_bmap;
unsigned long *rq_bmap;
unsigned long *cq_bmap;
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
};
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
u16 *pfvf_map;
};
struct nix_hw {
struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
}; };
struct rvu_hwinfo { struct rvu_hwinfo {
u8 total_pfs; /* MAX RVU PFs HW supports */ u8 total_pfs; /* MAX RVU PFs HW supports */
u16 total_vfs; /* Max RVU VFs HW supports */ u16 total_vfs; /* Max RVU VFs HW supports */
u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */ u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */
u8 cgx;
u8 lmac_per_cgx;
u8 cgx_links;
u8 lbk_links;
u8 sdp_links;
struct rvu_block block[BLK_COUNT]; /* Block info */ struct rvu_block block[BLK_COUNT]; /* Block info */
struct nix_hw *nix0;
}; };
struct rvu { struct rvu {
...@@ -93,6 +131,8 @@ struct rvu { ...@@ -93,6 +131,8 @@ struct rvu {
/* Mbox */ /* Mbox */
struct otx2_mbox mbox; struct otx2_mbox mbox;
struct rvu_work *mbox_wrk; struct rvu_work *mbox_wrk;
struct otx2_mbox mbox_up;
struct rvu_work *mbox_wrk_up;
struct workqueue_struct *mbox_wq; struct workqueue_struct *mbox_wq;
/* MSI-X */ /* MSI-X */
...@@ -109,6 +149,7 @@ struct rvu { ...@@ -109,6 +149,7 @@ struct rvu {
u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
* every cgx lmac port * every cgx lmac port
*/ */
unsigned long pf_notify_bmap; /* Flags for PF notification */
void **cgx_idmap; /* cgx id to cgx data map table */ void **cgx_idmap; /* cgx id to cgx data map table */
struct work_struct cgx_evh_work; struct work_struct cgx_evh_work;
struct workqueue_struct *cgx_evh_wq; struct workqueue_struct *cgx_evh_wq;
...@@ -149,10 +190,84 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); ...@@ -149,10 +190,84 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf); void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr); bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot); int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
/* NPA/NIX AQ APIs */
int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
int qsize, int inst_size, int res_size);
void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
/* CGX APIs */ /* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
{
return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs);
}
static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
{
*cgx_id = (map >> 4) & 0xF;
*lmac_id = (map & 0xF);
}
int rvu_cgx_probe(struct rvu *rvu); int rvu_cgx_probe(struct rvu *rvu);
void rvu_cgx_wq_destroy(struct rvu *rvu); void rvu_cgx_wq_destroy(struct rvu *rvu);
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
struct cgx_stats_rsp *rsp);
int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp);
int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp);
int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
struct cgx_link_info_msg *rsp);
int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp);
int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
struct npa_lf_alloc_req *req,
struct npa_lf_alloc_rsp *rsp);
int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
/* NIX APIs */
int rvu_nix_init(struct rvu *rvu);
void rvu_nix_freemem(struct rvu *rvu);
int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
struct nix_lf_alloc_req *req,
struct nix_lf_alloc_rsp *rsp);
int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp);
int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp);
#endif /* RVU_H */ #endif /* RVU_H */
...@@ -20,6 +20,31 @@ struct cgx_evq_entry { ...@@ -20,6 +20,31 @@ struct cgx_evq_entry {
struct cgx_link_event link_event; struct cgx_link_event link_event;
}; };
#define M(_name, _id, _req_type, _rsp_type) \
static struct _req_type __maybe_unused \
*otx2_mbox_alloc_msg_ ## _name(struct rvu *rvu, int devid) \
{ \
struct _req_type *req; \
\
req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
&rvu->mbox_up, devid, sizeof(struct _req_type), \
sizeof(struct _rsp_type)); \
if (!req) \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
return req; \
}
MBOX_UP_CGX_MESSAGES
#undef M
/* Returns bitmap of mapped PFs */
static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
{
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
{ {
return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF); return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
...@@ -77,6 +102,34 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) ...@@ -77,6 +102,34 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
return 0; return 0;
} }
static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
{
struct cgx_evq_entry *qentry;
unsigned long flags;
int err;
qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
if (!qentry)
return -ENOMEM;
/* Lock the event queue before we read the local link status */
spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
&qentry->link_event.link_uinfo);
qentry->link_event.cgx_id = cgx_id;
qentry->link_event.lmac_id = lmac_id;
if (err)
goto skip_add;
list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
skip_add:
spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
/* start worker to process the events */
queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
return 0;
}
/* This is called from interrupt context and is expected to be atomic */ /* This is called from interrupt context and is expected to be atomic */
static int cgx_lmac_postevent(struct cgx_link_event *event, void *data) static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
{ {
...@@ -98,6 +151,41 @@ static int cgx_lmac_postevent(struct cgx_link_event *event, void *data) ...@@ -98,6 +151,41 @@ static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
return 0; return 0;
} }
static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
{
struct cgx_link_user_info *linfo;
struct cgx_link_info_msg *msg;
unsigned long pfmap;
int err, pfid;
linfo = &event->link_uinfo;
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
do {
pfid = find_first_bit(&pfmap, 16);
clear_bit(pfid, &pfmap);
/* check if notification is enabled */
if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
event->cgx_id, event->lmac_id,
linfo->link_up ? "UP" : "DOWN");
continue;
}
/* Send mbox message to PF */
msg = otx2_mbox_alloc_msg_CGX_LINK_EVENT(rvu, pfid);
if (!msg)
continue;
msg->link_info = *linfo;
otx2_mbox_msg_send(&rvu->mbox_up, pfid);
err = otx2_mbox_wait_for_rsp(&rvu->mbox_up, pfid);
if (err)
dev_warn(rvu->dev, "notification to pf %d failed\n",
pfid);
} while (pfmap);
}
static void cgx_evhandler_task(struct work_struct *work) static void cgx_evhandler_task(struct work_struct *work)
{ {
struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work); struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
...@@ -119,7 +207,8 @@ static void cgx_evhandler_task(struct work_struct *work) ...@@ -119,7 +207,8 @@ static void cgx_evhandler_task(struct work_struct *work)
event = &qentry->link_event; event = &qentry->link_event;
/* Do nothing for now */ /* process event */
cgx_notify_pfs(event, rvu);
kfree(qentry); kfree(qentry);
} while (1); } while (1);
} }
...@@ -192,3 +281,232 @@ int rvu_cgx_probe(struct rvu *rvu) ...@@ -192,3 +281,232 @@ int rvu_cgx_probe(struct rvu *rvu)
cgx_lmac_event_handler_init(rvu); cgx_lmac_event_handler_init(rvu);
return 0; return 0;
} }
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
/* This msg is expected only from PFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
return 0;
}
int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
return 0;
}
int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
return 0;
}
int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
struct cgx_stats_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
int stat = 0, err = 0;
u64 tx_stat, rx_stat;
u8 cgx_idx, lmac;
void *cgxd;
if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
/* Rx stats */
while (stat < CGX_RX_STATS_COUNT) {
err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
if (err)
return err;
rsp->rx_stats[stat] = rx_stat;
stat++;
}
/* Tx stats */
stat = 0;
while (stat < CGX_TX_STATS_COUNT) {
err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
if (err)
return err;
rsp->tx_stats[stat] = tx_stat;
stat++;
}
return 0;
}
int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
return 0;
}
int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
int rc = 0, i;
u64 cfg;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rsp->hdr.rc = rc;
cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
/* copy 48 bit mac address to req->mac_addr */
for (i = 0; i < ETH_ALEN; i++)
rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
return 0;
}
int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
/* This msg is expected only from PFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_promisc_config(cgx_id, lmac_id, true);
return 0;
}
int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
/* This msg is expected only from PFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_promisc_config(cgx_id, lmac_id, false);
return 0;
}
static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
/* This msg is expected only from PFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
if (en) {
set_bit(pf, &rvu->pf_notify_bmap);
/* Send the current link status to PF */
rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
} else {
clear_bit(pf, &rvu->pf_notify_bmap);
}
return 0;
}
int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
return 0;
}
int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
return 0;
}
int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
struct cgx_link_info_msg *rsp)
{
u8 cgx_id, lmac_id;
int pf, err;
pf = rvu_get_pf(req->hdr.pcifunc);
if (!is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
&rsp->link_info);
return err;
}
static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
/* This msg is expected only from PFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, en);
}
int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
return 0;
}
int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
return 0;
}
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include "rvu_struct.h"
#include "rvu_reg.h"
#include "rvu.h"
#include "cgx.h"
static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
{
if (blkaddr == BLKADDR_NIX0 && hw->nix0)
return hw->nix0;
return NULL;
}
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
int lvl, u16 pcifunc, u16 schq)
{
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return false;
txsch = &nix_hw->txsch[lvl];
/* Check out of bounds */
if (schq >= txsch->schq.max)
return false;
spin_lock(&rvu->rsrc_lock);
if (txsch->pfvf_map[schq] != pcifunc) {
spin_unlock(&rvu->rsrc_lock);
return false;
}
spin_unlock(&rvu->rsrc_lock);
return true;
}
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
u64 format, bool v4, u64 *fidx)
{
struct nix_lso_format field = {0};
/* IP's Length field */
field.layer = NIX_TXLAYER_OL3;
/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
field.offset = v4 ? 2 : 4;
field.sizem1 = 1; /* i.e 2 bytes */
field.alg = NIX_LSOALG_ADD_PAYLEN;
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
*(u64 *)&field);
/* No ID field in IPv6 header */
if (!v4)
return;
/* IP's ID field */
field.layer = NIX_TXLAYER_OL3;
field.offset = 4;
field.sizem1 = 1; /* i.e 2 bytes */
field.alg = NIX_LSOALG_ADD_SEGNUM;
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
*(u64 *)&field);
}
static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
u64 format, u64 *fidx)
{
struct nix_lso_format field = {0};
/* TCP's sequence number field */
field.layer = NIX_TXLAYER_OL4;
field.offset = 4;
field.sizem1 = 3; /* i.e 4 bytes */
field.alg = NIX_LSOALG_ADD_OFFSET;
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
*(u64 *)&field);
/* TCP's flags field */
field.layer = NIX_TXLAYER_OL4;
field.offset = 12;
field.sizem1 = 0; /* not needed */
field.alg = NIX_LSOALG_TCP_FLAGS;
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
*(u64 *)&field);
}
static void nix_setup_lso(struct rvu *rvu, int blkaddr)
{
u64 cfg, idx, fidx = 0;
/* Enable LSO */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
/* For TSO, set first and middle segment flags to
* mask out PSH, RST & FIN flags in TCP packet
*/
cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
/* Configure format fields for TCPv4 segmentation offload */
idx = NIX_LSO_FORMAT_IDX_TSOV4;
nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
/* Set rest of the fields to NOP */
for (; fidx < 8; fidx++) {
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
}
/* Configure format fields for TCPv6 segmentation offload */
idx = NIX_LSO_FORMAT_IDX_TSOV6;
fidx = 0;
nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
/* Set rest of the fields to NOP */
for (; fidx < 8; fidx++) {
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
}
}
static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
{
kfree(pfvf->rq_bmap);
kfree(pfvf->sq_bmap);
kfree(pfvf->cq_bmap);
if (pfvf->rq_ctx)
qmem_free(rvu->dev, pfvf->rq_ctx);
if (pfvf->sq_ctx)
qmem_free(rvu->dev, pfvf->sq_ctx);
if (pfvf->cq_ctx)
qmem_free(rvu->dev, pfvf->cq_ctx);
if (pfvf->rss_ctx)
qmem_free(rvu->dev, pfvf->rss_ctx);
if (pfvf->nix_qints_ctx)
qmem_free(rvu->dev, pfvf->nix_qints_ctx);
if (pfvf->cq_ints_ctx)
qmem_free(rvu->dev, pfvf->cq_ints_ctx);
pfvf->rq_bmap = NULL;
pfvf->cq_bmap = NULL;
pfvf->sq_bmap = NULL;
pfvf->rq_ctx = NULL;
pfvf->sq_ctx = NULL;
pfvf->cq_ctx = NULL;
pfvf->rss_ctx = NULL;
pfvf->nix_qints_ctx = NULL;
pfvf->cq_ints_ctx = NULL;
}
static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
struct rvu_pfvf *pfvf, int nixlf,
int rss_sz, int rss_grps, int hwctx_size)
{
int err, grp, num_indices;
/* RSS is not requested for this NIXLF */
if (!rss_sz)
return 0;
num_indices = rss_sz * rss_grps;
/* Alloc NIX RSS HW context memory and config the base */
err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
if (err)
return err;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
(u64)pfvf->rss_ctx->iova);
/* Config full RSS table size, enable RSS and caching */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
BIT_ULL(36) | BIT_ULL(4) |
ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
/* Config RSS group offset and sizes */
for (grp = 0; grp < rss_grps; grp++)
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
return 0;
}
static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
struct nix_aq_inst_s *inst)
{
struct admin_queue *aq = block->aq;
struct nix_aq_res_s *result;
int timeout = 1000;
u64 reg, head;
result = (struct nix_aq_res_s *)aq->res->base;
/* Get current head pointer where to append this instruction */
reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
head = (reg >> 4) & AQ_PTR_MASK;
memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
(void *)inst, aq->inst->entry_sz);
memset(result, 0, sizeof(*result));
/* sync into memory */
wmb();
/* Ring the doorbell and wait for result */
rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
while (result->compcode == NIX_AQ_COMP_NOTDONE) {
cpu_relax();
udelay(1);
timeout--;
if (!timeout)
return -EBUSY;
}
if (result->compcode != NIX_AQ_COMP_GOOD)
/* TODO: Replace this with some error code */
return -EBUSY;
return 0;
}
static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int nixlf, blkaddr, rc = 0;
struct nix_aq_inst_s inst;
struct rvu_block *block;
struct admin_queue *aq;
struct rvu_pfvf *pfvf;
void *ctx, *mask;
bool ena;
u64 cfg;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
block = &hw->block[blkaddr];
aq = block->aq;
if (!aq) {
dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
return NIX_AF_ERR_AQ_ENQUEUE;
}
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
switch (req->ctype) {
case NIX_AQ_CTYPE_RQ:
/* Check if index exceeds max no of queues */
if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
rc = NIX_AF_ERR_AQ_ENQUEUE;
break;
case NIX_AQ_CTYPE_SQ:
if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
rc = NIX_AF_ERR_AQ_ENQUEUE;
break;
case NIX_AQ_CTYPE_CQ:
if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
rc = NIX_AF_ERR_AQ_ENQUEUE;
break;
case NIX_AQ_CTYPE_RSS:
/* Check if RSS is enabled and qidx is within range */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
(req->qidx >= (256UL << (cfg & 0xF))))
rc = NIX_AF_ERR_AQ_ENQUEUE;
break;
default:
rc = NIX_AF_ERR_AQ_ENQUEUE;
}
if (rc)
return rc;
/* Check if SQ pointed SMQ belongs to this PF/VF or not */
if (req->ctype == NIX_AQ_CTYPE_SQ &&
req->op != NIX_AQ_INSTOP_WRITE) {
if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
pcifunc, req->sq.smq))
return NIX_AF_ERR_AQ_ENQUEUE;
}
memset(&inst, 0, sizeof(struct nix_aq_inst_s));
inst.lf = nixlf;
inst.cindex = req->qidx;
inst.ctype = req->ctype;
inst.op = req->op;
/* Currently we are not supporting enqueuing multiple instructions,
* so always choose first entry in result memory.
*/
inst.res_addr = (u64)aq->res->iova;
/* Clean result + context memory */
memset(aq->res->base, 0, aq->res->entry_sz);
/* Context needs to be written at RES_ADDR + 128 */
ctx = aq->res->base + 128;
/* Mask needs to be written at RES_ADDR + 256 */
mask = aq->res->base + 256;
switch (req->op) {
case NIX_AQ_INSTOP_WRITE:
if (req->ctype == NIX_AQ_CTYPE_RQ)
memcpy(mask, &req->rq_mask,
sizeof(struct nix_rq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_SQ)
memcpy(mask, &req->sq_mask,
sizeof(struct nix_sq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_CQ)
memcpy(mask, &req->cq_mask,
sizeof(struct nix_cq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_RSS)
memcpy(mask, &req->rss_mask,
sizeof(struct nix_rsse_s));
/* Fall through */
case NIX_AQ_INSTOP_INIT:
if (req->ctype == NIX_AQ_CTYPE_RQ)
memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_SQ)
memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_CQ)
memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_RSS)
memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
break;
case NIX_AQ_INSTOP_NOP:
case NIX_AQ_INSTOP_READ:
case NIX_AQ_INSTOP_LOCK:
case NIX_AQ_INSTOP_UNLOCK:
break;
default:
rc = NIX_AF_ERR_AQ_ENQUEUE;
return rc;
}
spin_lock(&aq->lock);
/* Submit the instruction to AQ */
rc = nix_aq_enqueue_wait(rvu, block, &inst);
if (rc) {
spin_unlock(&aq->lock);
return rc;
}
/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
if (req->op == NIX_AQ_INSTOP_INIT) {
if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
__set_bit(req->qidx, pfvf->rq_bmap);
if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
__set_bit(req->qidx, pfvf->sq_bmap);
if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
__set_bit(req->qidx, pfvf->cq_bmap);
}
if (req->op == NIX_AQ_INSTOP_WRITE) {
if (req->ctype == NIX_AQ_CTYPE_RQ) {
ena = (req->rq.ena & req->rq_mask.ena) |
(test_bit(req->qidx, pfvf->rq_bmap) &
~req->rq_mask.ena);
if (ena)
__set_bit(req->qidx, pfvf->rq_bmap);
else
__clear_bit(req->qidx, pfvf->rq_bmap);
}
if (req->ctype == NIX_AQ_CTYPE_SQ) {
ena = (req->rq.ena & req->sq_mask.ena) |
(test_bit(req->qidx, pfvf->sq_bmap) &
~req->sq_mask.ena);
if (ena)
__set_bit(req->qidx, pfvf->sq_bmap);
else
__clear_bit(req->qidx, pfvf->sq_bmap);
}
if (req->ctype == NIX_AQ_CTYPE_CQ) {
ena = (req->rq.ena & req->cq_mask.ena) |
(test_bit(req->qidx, pfvf->cq_bmap) &
~req->cq_mask.ena);
if (ena)
__set_bit(req->qidx, pfvf->cq_bmap);
else
__clear_bit(req->qidx, pfvf->cq_bmap);
}
}
if (rsp) {
/* Copy read context into mailbox */
if (req->op == NIX_AQ_INSTOP_READ) {
if (req->ctype == NIX_AQ_CTYPE_RQ)
memcpy(&rsp->rq, ctx,
sizeof(struct nix_rq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_SQ)
memcpy(&rsp->sq, ctx,
sizeof(struct nix_sq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_CQ)
memcpy(&rsp->cq, ctx,
sizeof(struct nix_cq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_RSS)
memcpy(&rsp->rss, ctx,
sizeof(struct nix_cq_ctx_s));
}
}
spin_unlock(&aq->lock);
return 0;
}
static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
struct nix_aq_enq_req aq_req;
unsigned long *bmap;
int qidx, q_cnt = 0;
int err = 0, rc;
if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
return NIX_AF_ERR_AQ_ENQUEUE;
memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
aq_req.hdr.pcifunc = req->hdr.pcifunc;
if (req->ctype == NIX_AQ_CTYPE_CQ) {
aq_req.cq.ena = 0;
aq_req.cq_mask.ena = 1;
q_cnt = pfvf->cq_ctx->qsize;
bmap = pfvf->cq_bmap;
}
if (req->ctype == NIX_AQ_CTYPE_SQ) {
aq_req.sq.ena = 0;
aq_req.sq_mask.ena = 1;
q_cnt = pfvf->sq_ctx->qsize;
bmap = pfvf->sq_bmap;
}
if (req->ctype == NIX_AQ_CTYPE_RQ) {
aq_req.rq.ena = 0;
aq_req.rq_mask.ena = 1;
q_cnt = pfvf->rq_ctx->qsize;
bmap = pfvf->rq_bmap;
}
aq_req.ctype = req->ctype;
aq_req.op = NIX_AQ_INSTOP_WRITE;
for (qidx = 0; qidx < q_cnt; qidx++) {
if (!test_bit(qidx, bmap))
continue;
aq_req.qidx = qidx;
rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
if (rc) {
err = rc;
dev_err(rvu->dev, "Failed to disable %s:%d context\n",
(req->ctype == NIX_AQ_CTYPE_CQ) ?
"CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
"RQ" : "SQ"), qidx);
}
}
return err;
}
int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
return rvu_nix_aq_enq_inst(rvu, req, rsp);
}
int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp)
{
return nix_lf_hwctx_disable(rvu, req);
}
int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
struct nix_lf_alloc_req *req,
struct nix_lf_alloc_rsp *rsp)
{
int nixlf, qints, hwctx_size, err, rc = 0;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
struct rvu_pfvf *pfvf;
u64 cfg, ctx_cfg;
int blkaddr;
if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
return NIX_AF_ERR_PARAM;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
block = &hw->block[blkaddr];
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
/* If RSS is being enabled, check if requested config is valid.
* RSS table size should be power of two, otherwise
* RSS_GRP::OFFSET + adder might go beyond that group or
* won't be able to use entire table.
*/
if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
!is_power_of_2(req->rss_sz)))
return NIX_AF_ERR_RSS_SIZE_INVALID;
if (req->rss_sz &&
(!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
return NIX_AF_ERR_RSS_GRPS_INVALID;
/* Reset this NIX LF */
err = rvu_lf_reset(rvu, block, nixlf);
if (err) {
dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
block->addr - BLKADDR_NIX0, nixlf);
return NIX_AF_ERR_LF_RESET;
}
ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
/* Alloc NIX RQ HW context memory and config the base */
hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
if (err)
goto free_mem;
pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
if (!pfvf->rq_bmap)
goto free_mem;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
(u64)pfvf->rq_ctx->iova);
/* Set caching and queue count in HW */
cfg = BIT_ULL(36) | (req->rq_cnt - 1);
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
/* Alloc NIX SQ HW context memory and config the base */
hwctx_size = 1UL << (ctx_cfg & 0xF);
err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
if (err)
goto free_mem;
pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
if (!pfvf->sq_bmap)
goto free_mem;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
(u64)pfvf->sq_ctx->iova);
cfg = BIT_ULL(36) | (req->sq_cnt - 1);
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
/* Alloc NIX CQ HW context memory and config the base */
hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
if (err)
goto free_mem;
pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
if (!pfvf->cq_bmap)
goto free_mem;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
(u64)pfvf->cq_ctx->iova);
cfg = BIT_ULL(36) | (req->cq_cnt - 1);
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
/* Initialize receive side scaling (RSS) */
hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
req->rss_sz, req->rss_grps, hwctx_size);
if (err)
goto free_mem;
/* Alloc memory for CQINT's HW contexts */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
qints = (cfg >> 24) & 0xFFF;
hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
if (err)
goto free_mem;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
(u64)pfvf->cq_ints_ctx->iova);
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
/* Alloc memory for QINT's HW contexts */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
qints = (cfg >> 12) & 0xFFF;
hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
if (err)
goto free_mem;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
(u64)pfvf->nix_qints_ctx->iova);
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
/* Enable LMTST for this NIX LF */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
* If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
* PCIFUNC itself.
*/
if (req->npa_func == RVU_DEFAULT_PF_FUNC)
cfg = pcifunc;
else
cfg = req->npa_func;
if (req->sso_func == RVU_DEFAULT_PF_FUNC)
cfg |= (u64)pcifunc << 16;
else
cfg |= (u64)req->sso_func << 16;
cfg |= (u64)req->xqe_sz << 33;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
/* Config Rx pkt length, csum checks and apad enable / disable */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
goto exit;
free_mem:
nix_ctx_free(rvu, pfvf);
rc = -ENOMEM;
exit:
/* Set macaddr of this PF/VF */
ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
/* set SQB size info */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
rsp->sqb_size = (cfg >> 34) & 0xFFFF;
rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
return rc;
}
int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
block = &hw->block[blkaddr];
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
/* Reset this NIX LF */
err = rvu_lf_reset(rvu, block, nixlf);
if (err) {
dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
block->addr - BLKADDR_NIX0, nixlf);
return NIX_AF_ERR_LF_RESET;
}
nix_ctx_free(rvu, pfvf);
return 0;
}
static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
{
struct nix_txsch *txsch;
u64 cfg, reg;
int err, lvl;
/* Get scheduler queue count of each type and alloc
* bitmap for each for alloc/free/attach operations.
*/
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
txsch->lvl = lvl;
switch (lvl) {
case NIX_TXSCH_LVL_SMQ:
reg = NIX_AF_MDQ_CONST;
break;
case NIX_TXSCH_LVL_TL4:
reg = NIX_AF_TL4_CONST;
break;
case NIX_TXSCH_LVL_TL3:
reg = NIX_AF_TL3_CONST;
break;
case NIX_TXSCH_LVL_TL2:
reg = NIX_AF_TL2_CONST;
break;
case NIX_TXSCH_LVL_TL1:
reg = NIX_AF_TL1_CONST;
break;
}
cfg = rvu_read64(rvu, blkaddr, reg);
txsch->schq.max = cfg & 0xFFFF;
err = rvu_alloc_bitmap(&txsch->schq);
if (err)
return err;
/* Allocate memory for scheduler queues to
* PF/VF pcifunc mapping info.
*/
txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
sizeof(u16), GFP_KERNEL);
if (!txsch->pfvf_map)
return -ENOMEM;
}
return 0;
}
static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
{
int idx, err;
u64 status;
/* Start X2P bus calibration */
rvu_write64(rvu, blkaddr, NIX_AF_CFG,
rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
/* Wait for calibration to complete */
err = rvu_poll_reg(rvu, blkaddr,
NIX_AF_STATUS, BIT_ULL(10), false);
if (err) {
dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
return err;
}
status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
/* Check if CGX devices are ready */
for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
if (status & (BIT_ULL(16 + idx)))
continue;
dev_err(rvu->dev,
"CGX%d didn't respond to NIX X2P calibration\n", idx);
err = -EBUSY;
}
/* Check if LBK is ready */
if (!(status & BIT_ULL(19))) {
dev_err(rvu->dev,
"LBK didn't respond to NIX X2P calibration\n");
err = -EBUSY;
}
/* Clear 'calibrate_x2p' bit */
rvu_write64(rvu, blkaddr, NIX_AF_CFG,
rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
if (err || (status & 0x3FFULL))
dev_err(rvu->dev,
"NIX X2P calibration failed, status 0x%llx\n", status);
if (err)
return err;
return 0;
}
static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
{
u64 cfg;
int err;
/* Set admin queue endianness */
cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
#ifdef __BIG_ENDIAN
cfg |= BIT_ULL(1);
rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
#else
cfg &= ~BIT_ULL(1);
rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
#endif
/* Do not bypass NDC cache */
cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
cfg &= ~0x3FFEULL;
rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
/* Result structure can be followed by RQ/SQ/CQ context at
* RES + 128bytes and a write mask at RES + 256 bytes, depending on
* operation type. Alloc sufficient result memory for all operations.
*/
err = rvu_aq_alloc(rvu, &block->aq,
Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
if (err)
return err;
rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
rvu_write64(rvu, block->addr,
NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
return 0;
}
int rvu_nix_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
int blkaddr, err;
u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
if (blkaddr < 0)
return 0;
block = &hw->block[blkaddr];
/* Calibrate X2P bus to check if CGX/LBK links are fine */
err = nix_calibrate_x2p(rvu, blkaddr);
if (err)
return err;
/* Set num of links of each type */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
hw->cgx = (cfg >> 12) & 0xF;
hw->lmac_per_cgx = (cfg >> 8) & 0xF;
hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
hw->lbk_links = 1;
hw->sdp_links = 1;
/* Initialize admin queue */
err = nix_aq_init(rvu, block);
if (err)
return err;
/* Restore CINT timer delay to HW reset values */
rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
/* Configure segmentation offload formats */
nix_setup_lso(rvu, blkaddr);
if (blkaddr == BLKADDR_NIX0) {
hw->nix0 = devm_kzalloc(rvu->dev,
sizeof(struct nix_hw), GFP_KERNEL);
if (!hw->nix0)
return -ENOMEM;
err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
if (err)
return err;
}
return 0;
}
void rvu_nix_freemem(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
int blkaddr, lvl;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
if (blkaddr < 0)
return;
block = &hw->block[blkaddr];
rvu_aq_free(rvu, block->aq);
if (blkaddr == BLKADDR_NIX0) {
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return;
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
kfree(txsch->schq.bmap);
}
}
}
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include "rvu_struct.h"
#include "rvu_reg.h"
#include "rvu.h"
static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
struct npa_aq_inst_s *inst)
{
struct admin_queue *aq = block->aq;
struct npa_aq_res_s *result;
int timeout = 1000;
u64 reg, head;
result = (struct npa_aq_res_s *)aq->res->base;
/* Get current head pointer where to append this instruction */
reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
head = (reg >> 4) & AQ_PTR_MASK;
memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
(void *)inst, aq->inst->entry_sz);
memset(result, 0, sizeof(*result));
/* sync into memory */
wmb();
/* Ring the doorbell and wait for result */
rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
while (result->compcode == NPA_AQ_COMP_NOTDONE) {
cpu_relax();
udelay(1);
timeout--;
if (!timeout)
return -EBUSY;
}
if (result->compcode != NPA_AQ_COMP_GOOD)
/* TODO: Replace this with some error code */
return -EBUSY;
return 0;
}
static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, npalf, rc = 0;
struct npa_aq_inst_s inst;
struct rvu_block *block;
struct admin_queue *aq;
struct rvu_pfvf *pfvf;
void *ctx, *mask;
bool ena;
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
return NPA_AF_ERR_AQ_ENQUEUE;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
if (!pfvf->npalf || blkaddr < 0)
return NPA_AF_ERR_AF_LF_INVALID;
block = &hw->block[blkaddr];
aq = block->aq;
if (!aq) {
dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
return NPA_AF_ERR_AQ_ENQUEUE;
}
npalf = rvu_get_lf(rvu, block, pcifunc, 0);
if (npalf < 0)
return NPA_AF_ERR_AF_LF_INVALID;
memset(&inst, 0, sizeof(struct npa_aq_inst_s));
inst.cindex = req->aura_id;
inst.lf = npalf;
inst.ctype = req->ctype;
inst.op = req->op;
/* Currently we are not supporting enqueuing multiple instructions,
* so always choose first entry in result memory.
*/
inst.res_addr = (u64)aq->res->iova;
/* Clean result + context memory */
memset(aq->res->base, 0, aq->res->entry_sz);
/* Context needs to be written at RES_ADDR + 128 */
ctx = aq->res->base + 128;
/* Mask needs to be written at RES_ADDR + 256 */
mask = aq->res->base + 256;
switch (req->op) {
case NPA_AQ_INSTOP_WRITE:
/* Copy context and write mask */
if (req->ctype == NPA_AQ_CTYPE_AURA) {
memcpy(mask, &req->aura_mask,
sizeof(struct npa_aura_s));
memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
} else {
memcpy(mask, &req->pool_mask,
sizeof(struct npa_pool_s));
memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
}
break;
case NPA_AQ_INSTOP_INIT:
if (req->ctype == NPA_AQ_CTYPE_AURA) {
if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
rc = NPA_AF_ERR_AQ_FULL;
break;
}
/* Set pool's context address */
req->aura.pool_addr = pfvf->pool_ctx->iova +
(req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
} else { /* POOL's context */
memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
}
break;
case NPA_AQ_INSTOP_NOP:
case NPA_AQ_INSTOP_READ:
case NPA_AQ_INSTOP_LOCK:
case NPA_AQ_INSTOP_UNLOCK:
break;
default:
rc = NPA_AF_ERR_AQ_FULL;
break;
}
if (rc)
return rc;
spin_lock(&aq->lock);
/* Submit the instruction to AQ */
rc = npa_aq_enqueue_wait(rvu, block, &inst);
if (rc) {
spin_unlock(&aq->lock);
return rc;
}
/* Set aura bitmap if aura hw context is enabled */
if (req->ctype == NPA_AQ_CTYPE_AURA) {
if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
__set_bit(req->aura_id, pfvf->aura_bmap);
if (req->op == NPA_AQ_INSTOP_WRITE) {
ena = (req->aura.ena & req->aura_mask.ena) |
(test_bit(req->aura_id, pfvf->aura_bmap) &
~req->aura_mask.ena);
if (ena)
__set_bit(req->aura_id, pfvf->aura_bmap);
else
__clear_bit(req->aura_id, pfvf->aura_bmap);
}
}
/* Set pool bitmap if pool hw context is enabled */
if (req->ctype == NPA_AQ_CTYPE_POOL) {
if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
__set_bit(req->aura_id, pfvf->pool_bmap);
if (req->op == NPA_AQ_INSTOP_WRITE) {
ena = (req->pool.ena & req->pool_mask.ena) |
(test_bit(req->aura_id, pfvf->pool_bmap) &
~req->pool_mask.ena);
if (ena)
__set_bit(req->aura_id, pfvf->pool_bmap);
else
__clear_bit(req->aura_id, pfvf->pool_bmap);
}
}
spin_unlock(&aq->lock);
if (rsp) {
/* Copy read context into mailbox */
if (req->op == NPA_AQ_INSTOP_READ) {
if (req->ctype == NPA_AQ_CTYPE_AURA)
memcpy(&rsp->aura, ctx,
sizeof(struct npa_aura_s));
else
memcpy(&rsp->pool, ctx,
sizeof(struct npa_pool_s));
}
}
return 0;
}
static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
struct npa_aq_enq_req aq_req;
unsigned long *bmap;
int id, cnt = 0;
int err = 0, rc;
if (!pfvf->pool_ctx || !pfvf->aura_ctx)
return NPA_AF_ERR_AQ_ENQUEUE;
memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
aq_req.hdr.pcifunc = req->hdr.pcifunc;
if (req->ctype == NPA_AQ_CTYPE_POOL) {
aq_req.pool.ena = 0;
aq_req.pool_mask.ena = 1;
cnt = pfvf->pool_ctx->qsize;
bmap = pfvf->pool_bmap;
} else if (req->ctype == NPA_AQ_CTYPE_AURA) {
aq_req.aura.ena = 0;
aq_req.aura_mask.ena = 1;
cnt = pfvf->aura_ctx->qsize;
bmap = pfvf->aura_bmap;
}
aq_req.ctype = req->ctype;
aq_req.op = NPA_AQ_INSTOP_WRITE;
for (id = 0; id < cnt; id++) {
if (!test_bit(id, bmap))
continue;
aq_req.aura_id = id;
rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
if (rc) {
err = rc;
dev_err(rvu->dev, "Failed to disable %s:%d context\n",
(req->ctype == NPA_AQ_CTYPE_AURA) ?
"Aura" : "Pool", id);
}
}
return err;
}
int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
return rvu_npa_aq_enq_inst(rvu, req, rsp);
}
int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp)
{
return npa_lf_hwctx_disable(rvu, req);
}
static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
{
kfree(pfvf->aura_bmap);
pfvf->aura_bmap = NULL;
qmem_free(rvu->dev, pfvf->aura_ctx);
pfvf->aura_ctx = NULL;
kfree(pfvf->pool_bmap);
pfvf->pool_bmap = NULL;
qmem_free(rvu->dev, pfvf->pool_ctx);
pfvf->pool_ctx = NULL;
qmem_free(rvu->dev, pfvf->npa_qints_ctx);
pfvf->npa_qints_ctx = NULL;
}
int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
struct npa_lf_alloc_req *req,
struct npa_lf_alloc_rsp *rsp)
{
int npalf, qints, hwctx_size, err, rc = 0;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
struct rvu_pfvf *pfvf;
u64 cfg, ctx_cfg;
int blkaddr;
if (req->aura_sz > NPA_AURA_SZ_MAX ||
req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
return NPA_AF_ERR_PARAM;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
if (!pfvf->npalf || blkaddr < 0)
return NPA_AF_ERR_AF_LF_INVALID;
block = &hw->block[blkaddr];
npalf = rvu_get_lf(rvu, block, pcifunc, 0);
if (npalf < 0)
return NPA_AF_ERR_AF_LF_INVALID;
/* Reset this NPA LF */
err = rvu_lf_reset(rvu, block, npalf);
if (err) {
dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
return NPA_AF_ERR_LF_RESET;
}
ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
/* Alloc memory for aura HW contexts */
hwctx_size = 1UL << (ctx_cfg & 0xF);
err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
NPA_AURA_COUNT(req->aura_sz), hwctx_size);
if (err)
goto free_mem;
pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
GFP_KERNEL);
if (!pfvf->aura_bmap)
goto free_mem;
/* Alloc memory for pool HW contexts */
hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
if (err)
goto free_mem;
pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
GFP_KERNEL);
if (!pfvf->pool_bmap)
goto free_mem;
/* Get no of queue interrupts supported */
cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
qints = (cfg >> 28) & 0xFFF;
/* Alloc memory for Qints HW contexts */
hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
if (err)
goto free_mem;
cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
/* Clear way partition mask and set aura offset to '0' */
cfg &= ~(BIT_ULL(34) - 1);
/* Set aura size & enable caching of contexts */
cfg |= (req->aura_sz << 16) | BIT_ULL(34);
rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
/* Configure aura HW context's base */
rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
(u64)pfvf->aura_ctx->iova);
/* Enable caching of qints hw context */
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36));
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
(u64)pfvf->npa_qints_ctx->iova);
goto exit;
free_mem:
npa_ctx_free(rvu, pfvf);
rc = -ENOMEM;
exit:
/* set stack page info */
cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
rsp->stack_pg_bytes = cfg & 0xFF;
rsp->qints = (cfg >> 28) & 0xFFF;
return rc;
}
int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
struct rvu_pfvf *pfvf;
int npalf, err;
int blkaddr;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
if (!pfvf->npalf || blkaddr < 0)
return NPA_AF_ERR_AF_LF_INVALID;
block = &hw->block[blkaddr];
npalf = rvu_get_lf(rvu, block, pcifunc, 0);
if (npalf < 0)
return NPA_AF_ERR_AF_LF_INVALID;
/* Reset this NPA LF */
err = rvu_lf_reset(rvu, block, npalf);
if (err) {
dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
return NPA_AF_ERR_LF_RESET;
}
npa_ctx_free(rvu, pfvf);
return 0;
}
static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
{
u64 cfg;
int err;
/* Set admin queue endianness */
cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
#ifdef __BIG_ENDIAN
cfg |= BIT_ULL(1);
rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
#else
cfg &= ~BIT_ULL(1);
rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
#endif
/* Do not bypass NDC cache */
cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
cfg &= ~0x03DULL;
rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
/* Result structure can be followed by Aura/Pool context at
* RES + 128bytes and a write mask at RES + 256 bytes, depending on
* operation type. Alloc sufficient result memory for all operations.
*/
err = rvu_aq_alloc(rvu, &block->aq,
Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
if (err)
return err;
rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
rvu_write64(rvu, block->addr,
NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
return 0;
}
int rvu_npa_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
int blkaddr, err;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
if (blkaddr < 0)
return 0;
block = &hw->block[blkaddr];
/* Initialize admin queue */
err = npa_aq_init(rvu, &hw->block[blkaddr]);
if (err)
return err;
return 0;
}
void rvu_npa_freemem(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
if (blkaddr < 0)
return;
block = &hw->block[blkaddr];
rvu_aq_free(rvu, block->aq);
}
...@@ -71,4 +71,812 @@ enum rvu_pf_int_vec_e { ...@@ -71,4 +71,812 @@ enum rvu_pf_int_vec_e {
RVU_PF_INT_VEC_CNT = 0x7, RVU_PF_INT_VEC_CNT = 0x7,
}; };
/* NPA admin queue completion enumeration */
enum npa_aq_comp {
NPA_AQ_COMP_NOTDONE = 0x0,
NPA_AQ_COMP_GOOD = 0x1,
NPA_AQ_COMP_SWERR = 0x2,
NPA_AQ_COMP_CTX_POISON = 0x3,
NPA_AQ_COMP_CTX_FAULT = 0x4,
NPA_AQ_COMP_LOCKERR = 0x5,
};
/* NPA admin queue context types */
enum npa_aq_ctype {
NPA_AQ_CTYPE_AURA = 0x0,
NPA_AQ_CTYPE_POOL = 0x1,
};
/* NPA admin queue instruction opcodes */
enum npa_aq_instop {
NPA_AQ_INSTOP_NOP = 0x0,
NPA_AQ_INSTOP_INIT = 0x1,
NPA_AQ_INSTOP_WRITE = 0x2,
NPA_AQ_INSTOP_READ = 0x3,
NPA_AQ_INSTOP_LOCK = 0x4,
NPA_AQ_INSTOP_UNLOCK = 0x5,
};
/* NPA admin queue instruction structure */
struct npa_aq_inst_s {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 doneint : 1; /* W0 */
u64 reserved_44_62 : 19;
u64 cindex : 20;
u64 reserved_17_23 : 7;
u64 lf : 9;
u64 ctype : 4;
u64 op : 4;
#else
u64 op : 4;
u64 ctype : 4;
u64 lf : 9;
u64 reserved_17_23 : 7;
u64 cindex : 20;
u64 reserved_44_62 : 19;
u64 doneint : 1;
#endif
u64 res_addr; /* W1 */
};
/* NPA admin queue result structure */
struct npa_aq_res_s {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_17_63 : 47; /* W0 */
u64 doneint : 1;
u64 compcode : 8;
u64 ctype : 4;
u64 op : 4;
#else
u64 op : 4;
u64 ctype : 4;
u64 compcode : 8;
u64 doneint : 1;
u64 reserved_17_63 : 47;
#endif
u64 reserved_64_127; /* W1 */
};
struct npa_aura_s {
u64 pool_addr; /* W0 */
#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
u64 avg_level : 8;
u64 reserved_118_119 : 2;
u64 shift : 6;
u64 aura_drop : 8;
u64 reserved_98_103 : 6;
u64 bp_ena : 2;
u64 aura_drop_ena : 1;
u64 pool_drop_ena : 1;
u64 reserved_93 : 1;
u64 avg_con : 9;
u64 pool_way_mask : 16;
u64 pool_caching : 1;
u64 reserved_65 : 2;
u64 ena : 1;
#else
u64 ena : 1;
u64 reserved_65 : 2;
u64 pool_caching : 1;
u64 pool_way_mask : 16;
u64 avg_con : 9;
u64 reserved_93 : 1;
u64 pool_drop_ena : 1;
u64 aura_drop_ena : 1;
u64 bp_ena : 2;
u64 reserved_98_103 : 6;
u64 aura_drop : 8;
u64 shift : 6;
u64 reserved_118_119 : 2;
u64 avg_level : 8;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
u64 reserved_189_191 : 3;
u64 nix1_bpid : 9;
u64 reserved_177_179 : 3;
u64 nix0_bpid : 9;
u64 reserved_164_167 : 4;
u64 count : 36;
#else
u64 count : 36;
u64 reserved_164_167 : 4;
u64 nix0_bpid : 9;
u64 reserved_177_179 : 3;
u64 nix1_bpid : 9;
u64 reserved_189_191 : 3;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
u64 reserved_252_255 : 4;
u64 fc_hyst_bits : 4;
u64 fc_stype : 2;
u64 fc_up_crossing : 1;
u64 fc_ena : 1;
u64 reserved_240_243 : 4;
u64 bp : 8;
u64 reserved_228_231 : 4;
u64 limit : 36;
#else
u64 limit : 36;
u64 reserved_228_231 : 4;
u64 bp : 8;
u64 reserved_240_243 : 4;
u64 fc_ena : 1;
u64 fc_up_crossing : 1;
u64 fc_stype : 2;
u64 fc_hyst_bits : 4;
u64 reserved_252_255 : 4;
#endif
u64 fc_addr; /* W4 */
#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
u64 reserved_379_383 : 5;
u64 err_qint_idx : 7;
u64 reserved_371 : 1;
u64 thresh_qint_idx : 7;
u64 reserved_363 : 1;
u64 thresh_up : 1;
u64 thresh_int_ena : 1;
u64 thresh_int : 1;
u64 err_int_ena : 8;
u64 err_int : 8;
u64 update_time : 16;
u64 pool_drop : 8;
#else
u64 pool_drop : 8;
u64 update_time : 16;
u64 err_int : 8;
u64 err_int_ena : 8;
u64 thresh_int : 1;
u64 thresh_int_ena : 1;
u64 thresh_up : 1;
u64 reserved_363 : 1;
u64 thresh_qint_idx : 7;
u64 reserved_371 : 1;
u64 err_qint_idx : 7;
u64 reserved_379_383 : 5;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
u64 reserved_420_447 : 28;
u64 thresh : 36;
#else
u64 thresh : 36;
u64 reserved_420_447 : 28;
#endif
u64 reserved_448_511; /* W7 */
};
struct npa_pool_s {
u64 stack_base; /* W0 */
#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
u64 reserved_115_127 : 13;
u64 buf_size : 11;
u64 reserved_100_103 : 4;
u64 buf_offset : 12;
u64 stack_way_mask : 16;
u64 reserved_70_71 : 3;
u64 stack_caching : 1;
u64 reserved_66_67 : 2;
u64 nat_align : 1;
u64 ena : 1;
#else
u64 ena : 1;
u64 nat_align : 1;
u64 reserved_66_67 : 2;
u64 stack_caching : 1;
u64 reserved_70_71 : 3;
u64 stack_way_mask : 16;
u64 buf_offset : 12;
u64 reserved_100_103 : 4;
u64 buf_size : 11;
u64 reserved_115_127 : 13;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
u64 stack_pages : 32;
u64 stack_max_pages : 32;
#else
u64 stack_max_pages : 32;
u64 stack_pages : 32;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
u64 reserved_240_255 : 16;
u64 op_pc : 48;
#else
u64 op_pc : 48;
u64 reserved_240_255 : 16;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
u64 reserved_316_319 : 4;
u64 update_time : 16;
u64 reserved_297_299 : 3;
u64 fc_up_crossing : 1;
u64 fc_hyst_bits : 4;
u64 fc_stype : 2;
u64 fc_ena : 1;
u64 avg_con : 9;
u64 avg_level : 8;
u64 reserved_270_271 : 2;
u64 shift : 6;
u64 reserved_260_263 : 4;
u64 stack_offset : 4;
#else
u64 stack_offset : 4;
u64 reserved_260_263 : 4;
u64 shift : 6;
u64 reserved_270_271 : 2;
u64 avg_level : 8;
u64 avg_con : 9;
u64 fc_ena : 1;
u64 fc_stype : 2;
u64 fc_hyst_bits : 4;
u64 fc_up_crossing : 1;
u64 reserved_297_299 : 3;
u64 update_time : 16;
u64 reserved_316_319 : 4;
#endif
u64 fc_addr; /* W5 */
u64 ptr_start; /* W6 */
u64 ptr_end; /* W7 */
#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
u64 reserved_571_575 : 5;
u64 err_qint_idx : 7;
u64 reserved_563 : 1;
u64 thresh_qint_idx : 7;
u64 reserved_555 : 1;
u64 thresh_up : 1;
u64 thresh_int_ena : 1;
u64 thresh_int : 1;
u64 err_int_ena : 8;
u64 err_int : 8;
u64 reserved_512_535 : 24;
#else
u64 reserved_512_535 : 24;
u64 err_int : 8;
u64 err_int_ena : 8;
u64 thresh_int : 1;
u64 thresh_int_ena : 1;
u64 thresh_up : 1;
u64 reserved_555 : 1;
u64 thresh_qint_idx : 7;
u64 reserved_563 : 1;
u64 err_qint_idx : 7;
u64 reserved_571_575 : 5;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
u64 reserved_612_639 : 28;
u64 thresh : 36;
#else
u64 thresh : 36;
u64 reserved_612_639 : 28;
#endif
u64 reserved_640_703; /* W10 */
u64 reserved_704_767; /* W11 */
u64 reserved_768_831; /* W12 */
u64 reserved_832_895; /* W13 */
u64 reserved_896_959; /* W14 */
u64 reserved_960_1023; /* W15 */
};
/* NIX admin queue completion status */
enum nix_aq_comp {
NIX_AQ_COMP_NOTDONE = 0x0,
NIX_AQ_COMP_GOOD = 0x1,
NIX_AQ_COMP_SWERR = 0x2,
NIX_AQ_COMP_CTX_POISON = 0x3,
NIX_AQ_COMP_CTX_FAULT = 0x4,
NIX_AQ_COMP_LOCKERR = 0x5,
NIX_AQ_COMP_SQB_ALLOC_FAIL = 0x6,
};
/* NIX admin queue context types */
enum nix_aq_ctype {
NIX_AQ_CTYPE_RQ = 0x0,
NIX_AQ_CTYPE_SQ = 0x1,
NIX_AQ_CTYPE_CQ = 0x2,
NIX_AQ_CTYPE_MCE = 0x3,
NIX_AQ_CTYPE_RSS = 0x4,
NIX_AQ_CTYPE_DYNO = 0x5,
};
/* NIX admin queue instruction opcodes */
enum nix_aq_instop {
NIX_AQ_INSTOP_NOP = 0x0,
NIX_AQ_INSTOP_INIT = 0x1,
NIX_AQ_INSTOP_WRITE = 0x2,
NIX_AQ_INSTOP_READ = 0x3,
NIX_AQ_INSTOP_LOCK = 0x4,
NIX_AQ_INSTOP_UNLOCK = 0x5,
};
/* NIX admin queue instruction structure */
struct nix_aq_inst_s {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 doneint : 1; /* W0 */
u64 reserved_44_62 : 19;
u64 cindex : 20;
u64 reserved_15_23 : 9;
u64 lf : 7;
u64 ctype : 4;
u64 op : 4;
#else
u64 op : 4;
u64 ctype : 4;
u64 lf : 7;
u64 reserved_15_23 : 9;
u64 cindex : 20;
u64 reserved_44_62 : 19;
u64 doneint : 1;
#endif
u64 res_addr; /* W1 */
};
/* NIX admin queue result structure */
struct nix_aq_res_s {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_17_63 : 47; /* W0 */
u64 doneint : 1;
u64 compcode : 8;
u64 ctype : 4;
u64 op : 4;
#else
u64 op : 4;
u64 ctype : 4;
u64 compcode : 8;
u64 doneint : 1;
u64 reserved_17_63 : 47;
#endif
u64 reserved_64_127; /* W1 */
};
/* NIX Completion queue context structure */
struct nix_cq_ctx_s {
u64 base;
#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
u64 wrptr : 20;
u64 avg_con : 9;
u64 cint_idx : 7;
u64 cq_err : 1;
u64 qint_idx : 7;
u64 rsvd_81_83 : 3;
u64 bpid : 9;
u64 rsvd_69_71 : 3;
u64 bp_ena : 1;
u64 rsvd_64_67 : 4;
#else
u64 rsvd_64_67 : 4;
u64 bp_ena : 1;
u64 rsvd_69_71 : 3;
u64 bpid : 9;
u64 rsvd_81_83 : 3;
u64 qint_idx : 7;
u64 cq_err : 1;
u64 cint_idx : 7;
u64 avg_con : 9;
u64 wrptr : 20;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
u64 update_time : 16;
u64 avg_level : 8;
u64 head : 20;
u64 tail : 20;
#else
u64 tail : 20;
u64 head : 20;
u64 avg_level : 8;
u64 update_time : 16;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
u64 cq_err_int_ena : 8;
u64 cq_err_int : 8;
u64 qsize : 4;
u64 rsvd_233_235 : 3;
u64 caching : 1;
u64 substream : 20;
u64 rsvd_210_211 : 2;
u64 ena : 1;
u64 drop_ena : 1;
u64 drop : 8;
u64 dp : 8;
#else
u64 dp : 8;
u64 drop : 8;
u64 drop_ena : 1;
u64 ena : 1;
u64 rsvd_210_211 : 2;
u64 substream : 20;
u64 caching : 1;
u64 rsvd_233_235 : 3;
u64 qsize : 4;
u64 cq_err_int : 8;
u64 cq_err_int_ena : 8;
#endif
};
/* NIX Receive queue context structure */
struct nix_rq_ctx_s {
#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
u64 wqe_aura : 20;
u64 substream : 20;
u64 cq : 20;
u64 ena_wqwd : 1;
u64 ipsech_ena : 1;
u64 sso_ena : 1;
u64 ena : 1;
#else
u64 ena : 1;
u64 sso_ena : 1;
u64 ipsech_ena : 1;
u64 ena_wqwd : 1;
u64 cq : 20;
u64 substream : 20;
u64 wqe_aura : 20;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
u64 rsvd_127_122 : 6;
u64 lpb_drop_ena : 1;
u64 spb_drop_ena : 1;
u64 xqe_drop_ena : 1;
u64 wqe_caching : 1;
u64 pb_caching : 2;
u64 sso_tt : 2;
u64 sso_grp : 10;
u64 lpb_aura : 20;
u64 spb_aura : 20;
#else
u64 spb_aura : 20;
u64 lpb_aura : 20;
u64 sso_grp : 10;
u64 sso_tt : 2;
u64 pb_caching : 2;
u64 wqe_caching : 1;
u64 xqe_drop_ena : 1;
u64 spb_drop_ena : 1;
u64 lpb_drop_ena : 1;
u64 rsvd_127_122 : 6;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
u64 xqe_hdr_split : 1;
u64 xqe_imm_copy : 1;
u64 rsvd_189_184 : 6;
u64 xqe_imm_size : 6;
u64 later_skip : 6;
u64 rsvd_171 : 1;
u64 first_skip : 7;
u64 lpb_sizem1 : 12;
u64 spb_ena : 1;
u64 rsvd_150_148 : 3;
u64 wqe_skip : 2;
u64 spb_sizem1 : 6;
u64 rsvd_139_128 : 12;
#else
u64 rsvd_139_128 : 12;
u64 spb_sizem1 : 6;
u64 wqe_skip : 2;
u64 rsvd_150_148 : 3;
u64 spb_ena : 1;
u64 lpb_sizem1 : 12;
u64 first_skip : 7;
u64 rsvd_171 : 1;
u64 later_skip : 6;
u64 xqe_imm_size : 6;
u64 rsvd_189_184 : 6;
u64 xqe_imm_copy : 1;
u64 xqe_hdr_split : 1;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
u64 spb_pool_pass : 8;
u64 spb_pool_drop : 8;
u64 spb_aura_pass : 8;
u64 spb_aura_drop : 8;
u64 wqe_pool_pass : 8;
u64 wqe_pool_drop : 8;
u64 xqe_pass : 8;
u64 xqe_drop : 8;
#else
u64 xqe_drop : 8;
u64 xqe_pass : 8;
u64 wqe_pool_drop : 8;
u64 wqe_pool_pass : 8;
u64 spb_aura_drop : 8;
u64 spb_aura_pass : 8;
u64 spb_pool_drop : 8;
u64 spb_pool_pass : 8;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
u64 rsvd_319_315 : 5;
u64 qint_idx : 7;
u64 rq_int_ena : 8;
u64 rq_int : 8;
u64 rsvd_291_288 : 4;
u64 lpb_pool_pass : 8;
u64 lpb_pool_drop : 8;
u64 lpb_aura_pass : 8;
u64 lpb_aura_drop : 8;
#else
u64 lpb_aura_drop : 8;
u64 lpb_aura_pass : 8;
u64 lpb_pool_drop : 8;
u64 lpb_pool_pass : 8;
u64 rsvd_291_288 : 4;
u64 rq_int : 8;
u64 rq_int_ena : 8;
u64 qint_idx : 7;
u64 rsvd_319_315 : 5;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
u64 rsvd_383_366 : 18;
u64 flow_tagw : 6;
u64 bad_utag : 8;
u64 good_utag : 8;
u64 ltag : 24;
#else
u64 ltag : 24;
u64 good_utag : 8;
u64 bad_utag : 8;
u64 flow_tagw : 6;
u64 rsvd_383_366 : 18;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
u64 rsvd_447_432 : 16;
u64 octs : 48;
#else
u64 octs : 48;
u64 rsvd_447_432 : 16;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W7 */
u64 rsvd_511_496 : 16;
u64 pkts : 48;
#else
u64 pkts : 48;
u64 rsvd_511_496 : 16;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
u64 rsvd_575_560 : 16;
u64 drop_octs : 48;
#else
u64 drop_octs : 48;
u64 rsvd_575_560 : 16;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
u64 rsvd_639_624 : 16;
u64 drop_pkts : 48;
#else
u64 drop_pkts : 48;
u64 rsvd_639_624 : 16;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
u64 rsvd_703_688 : 16;
u64 re_pkts : 48;
#else
u64 re_pkts : 48;
u64 rsvd_703_688 : 16;
#endif
u64 rsvd_767_704; /* W11 */
u64 rsvd_831_768; /* W12 */
u64 rsvd_895_832; /* W13 */
u64 rsvd_959_896; /* W14 */
u64 rsvd_1023_960; /* W15 */
};
/* NIX sqe sizes */
enum nix_maxsqesz {
NIX_MAXSQESZ_W16 = 0x0,
NIX_MAXSQESZ_W8 = 0x1,
};
/* NIX SQB caching type */
enum nix_stype {
NIX_STYPE_STF = 0x0,
NIX_STYPE_STT = 0x1,
NIX_STYPE_STP = 0x2,
};
/* NIX Send queue context structure */
struct nix_sq_ctx_s {
#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
u64 sqe_way_mask : 16;
u64 cq : 20;
u64 sdp_mcast : 1;
u64 substream : 20;
u64 qint_idx : 6;
u64 ena : 1;
#else
u64 ena : 1;
u64 qint_idx : 6;
u64 substream : 20;
u64 sdp_mcast : 1;
u64 cq : 20;
u64 sqe_way_mask : 16;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
u64 sqb_count : 16;
u64 default_chan : 12;
u64 smq_rr_quantum : 24;
u64 sso_ena : 1;
u64 xoff : 1;
u64 cq_ena : 1;
u64 smq : 9;
#else
u64 smq : 9;
u64 cq_ena : 1;
u64 xoff : 1;
u64 sso_ena : 1;
u64 smq_rr_quantum : 24;
u64 default_chan : 12;
u64 sqb_count : 16;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
u64 rsvd_191 : 1;
u64 sqe_stype : 2;
u64 sq_int_ena : 8;
u64 sq_int : 8;
u64 sqb_aura : 20;
u64 smq_rr_count : 25;
#else
u64 smq_rr_count : 25;
u64 sqb_aura : 20;
u64 sq_int : 8;
u64 sq_int_ena : 8;
u64 sqe_stype : 2;
u64 rsvd_191 : 1;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
u64 rsvd_255_253 : 3;
u64 smq_next_sq_vld : 1;
u64 smq_pend : 1;
u64 smenq_next_sqb_vld : 1;
u64 head_offset : 6;
u64 smenq_offset : 6;
u64 tail_offset : 6;
u64 smq_lso_segnum : 8;
u64 smq_next_sq : 20;
u64 mnq_dis : 1;
u64 lmt_dis : 1;
u64 cq_limit : 8;
u64 max_sqe_size : 2;
#else
u64 max_sqe_size : 2;
u64 cq_limit : 8;
u64 lmt_dis : 1;
u64 mnq_dis : 1;
u64 smq_next_sq : 20;
u64 smq_lso_segnum : 8;
u64 tail_offset : 6;
u64 smenq_offset : 6;
u64 head_offset : 6;
u64 smenq_next_sqb_vld : 1;
u64 smq_pend : 1;
u64 smq_next_sq_vld : 1;
u64 rsvd_255_253 : 3;
#endif
u64 next_sqb : 64;/* W4 */
u64 tail_sqb : 64;/* W5 */
u64 smenq_sqb : 64;/* W6 */
u64 smenq_next_sqb : 64;/* W7 */
u64 head_sqb : 64;/* W8 */
#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
u64 rsvd_639_630 : 10;
u64 vfi_lso_vld : 1;
u64 vfi_lso_vlan1_ins_ena : 1;
u64 vfi_lso_vlan0_ins_ena : 1;
u64 vfi_lso_mps : 14;
u64 vfi_lso_sb : 8;
u64 vfi_lso_sizem1 : 3;
u64 vfi_lso_total : 18;
u64 rsvd_583_576 : 8;
#else
u64 rsvd_583_576 : 8;
u64 vfi_lso_total : 18;
u64 vfi_lso_sizem1 : 3;
u64 vfi_lso_sb : 8;
u64 vfi_lso_mps : 14;
u64 vfi_lso_vlan0_ins_ena : 1;
u64 vfi_lso_vlan1_ins_ena : 1;
u64 vfi_lso_vld : 1;
u64 rsvd_639_630 : 10;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
u64 rsvd_703_658 : 46;
u64 scm_lso_rem : 18;
#else
u64 scm_lso_rem : 18;
u64 rsvd_703_658 : 46;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W11 */
u64 rsvd_767_752 : 16;
u64 octs : 48;
#else
u64 octs : 48;
u64 rsvd_767_752 : 16;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W12 */
u64 rsvd_831_816 : 16;
u64 pkts : 48;
#else
u64 pkts : 48;
u64 rsvd_831_816 : 16;
#endif
u64 rsvd_895_832 : 64;/* W13 */
#if defined(__BIG_ENDIAN_BITFIELD) /* W14 */
u64 rsvd_959_944 : 16;
u64 dropped_octs : 48;
#else
u64 dropped_octs : 48;
u64 rsvd_959_944 : 16;
#endif
#if defined(__BIG_ENDIAN_BITFIELD) /* W15 */
u64 rsvd_1023_1008 : 16;
u64 dropped_pkts : 48;
#else
u64 dropped_pkts : 48;
u64 rsvd_1023_1008 : 16;
#endif
};
/* NIX Receive side scaling entry structure*/
struct nix_rsse_s {
#if defined(__BIG_ENDIAN_BITFIELD)
uint32_t reserved_20_31 : 12;
uint32_t rq : 20;
#else
uint32_t rq : 20;
uint32_t reserved_20_31 : 12;
#endif
};
/* NIX receive multicast/mirror entry structure */
struct nix_rx_mce_s {
#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
uint64_t next : 16;
uint64_t pf_func : 16;
uint64_t rsvd_31_24 : 8;
uint64_t index : 20;
uint64_t eol : 1;
uint64_t rsvd_2 : 1;
uint64_t op : 2;
#else
uint64_t op : 2;
uint64_t rsvd_2 : 1;
uint64_t eol : 1;
uint64_t index : 20;
uint64_t rsvd_31_24 : 8;
uint64_t pf_func : 16;
uint64_t next : 16;
#endif
};
enum nix_lsoalg {
NIX_LSOALG_NOP,
NIX_LSOALG_ADD_SEGNUM,
NIX_LSOALG_ADD_PAYLEN,
NIX_LSOALG_ADD_OFFSET,
NIX_LSOALG_TCP_FLAGS,
};
enum nix_txlayer {
NIX_TXLAYER_OL3,
NIX_TXLAYER_OL4,
NIX_TXLAYER_IL3,
NIX_TXLAYER_IL4,
};
struct nix_lso_format {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 rsvd_19_63 : 45;
u64 alg : 3;
u64 rsvd_14_15 : 2;
u64 sizem1 : 2;
u64 rsvd_10_11 : 2;
u64 layer : 2;
u64 offset : 8;
#else
u64 offset : 8;
u64 layer : 2;
u64 rsvd_10_11 : 2;
u64 sizem1 : 2;
u64 rsvd_14_15 : 2;
u64 alg : 3;
u64 rsvd_19_63 : 45;
#endif
};
#endif /* RVU_STRUCT_H */ #endif /* RVU_STRUCT_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment