Commit e943d94e authored by David S. Miller's avatar David S. Miller

Merge branch 'octeontx2-af-NPA-and-NIX-blocks-initialization'

Sunil Goutham says:

====================
octeontx2-af: NPA and NIX blocks initialization

This patchset is a continuation to earlier submitted patch series
to add a new driver for Marvell's OcteonTX2 SOC's
Resource virtualization unit (RVU) admin function driver.

octeontx2-af: Add RVU Admin Function driver
https://www.spinics.net/lists/netdev/msg528272.html

This patch series adds logic for the following.
- Modified register polling loop to use time_before(jiffies, timeout),
  as suggested by Arnd Bergmann.
- Support to forward interface link status notifications sent by
  firmware to registered PFs mapped to a CGX::LMAC.
- Support to set CGX LMAC in loopback mode, retrieve stats,
  configure DMAC filters at CGX level etc.
- Network pool allocator (NPA) functional block initialization,
  admin queue support, NPALF aura/pool contexts memory allocation, init
  and deinit.
- Network interface controller (NIX) functional block basic init,
  admin queue support, NIXLF RQ/CQ/SQ HW contexts memory allocation,
  init and deinit.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 53e50a6e 557dd485
......@@ -7,4 +7,4 @@ obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
octeontx2_mbox-y := mbox.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o
......@@ -29,6 +29,7 @@
* @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
* @cmd_lock: Lock to serialize the command interface
* @resp: command response
* @link_info: link related information
* @event_cb: callback for linkchange events
* @cmd_pend: flag set before new command is started
* flag cleared after command response is received
......@@ -40,6 +41,7 @@ struct lmac {
wait_queue_head_t wq_cmd_cmplt;
struct mutex cmd_lock;
u64 resp;
struct cgx_link_user_info link_info;
struct cgx_event_cb event_cb;
bool cmd_pend;
struct cgx *cgx;
......@@ -58,6 +60,12 @@ struct cgx {
static LIST_HEAD(cgx_list);
/* Convert firmware speed encoding to user format(Mbps) */
static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
/* Convert firmware lmac type encoding to string */
static char *cgx_lmactype_string[LMAC_MODE_MAX];
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
......@@ -119,6 +127,177 @@ void *cgx_get_pdata(int cgx_id)
}
EXPORT_SYMBOL(cgx_get_pdata);
/* Ensure the required lock for event queue(where asynchronous events are
* posted) is acquired before calling this API. Else an asynchronous event(with
* latest link status) can reach the destination before this function returns
* and could make the link status appear wrong.
*/
int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo)
{
struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
if (!lmac)
return -ENODEV;
*linfo = lmac->link_info;
return 0;
}
EXPORT_SYMBOL(cgx_get_link_info);
static u64 mac2u64 (u8 *mac_addr)
{
u64 mac = 0;
int index;
for (index = ETH_ALEN - 1; index >= 0; index--)
mac |= ((u64)*mac_addr++) << (8 * index);
return mac;
}
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
u64 cfg;
/* copy 6bytes from macaddr */
/* memcpy(&cfg, mac_addr, 6); */
cfg = mac2u64 (mac_addr);
cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
return 0;
}
EXPORT_SYMBOL(cgx_lmac_addr_set);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
u64 cfg;
cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
return cfg & CGX_RX_DMAC_ADR_MASK;
}
EXPORT_SYMBOL(cgx_lmac_addr_get);
static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
{
u64 cfg;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
}
/* Configure CGX LMAC in internal loopback mode */
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
u8 lmac_type;
u64 cfg;
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
lmac_type = cgx_get_lmac_type(cgx, lmac_id);
if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
if (enable)
cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
else
cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
} else {
cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
if (enable)
cfg |= CGXX_SPUX_CONTROL1_LBK;
else
cfg &= ~CGXX_SPUX_CONTROL1_LBK;
cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
}
return 0;
}
EXPORT_SYMBOL(cgx_lmac_internal_loopback);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
u64 cfg = 0;
if (!cgx)
return;
if (enable) {
/* Enable promiscuous mode on LMAC */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
cfg |= CGX_DMAC_BCAST_MODE;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
cfg = cgx_read(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
cgx_write(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
} else {
/* Disable promiscuous mode */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
cfg = cgx_read(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
cgx_write(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
}
}
EXPORT_SYMBOL(cgx_lmac_promisc_config);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
{
struct cgx *cgx = cgxd;
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
}
EXPORT_SYMBOL(cgx_get_rx_stats);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
{
struct cgx *cgx = cgxd;
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
return 0;
}
EXPORT_SYMBOL(cgx_get_tx_stats);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
u64 cfg;
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
if (enable)
cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
else
cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
return 0;
}
EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{
......@@ -191,36 +370,79 @@ static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
return err;
}
static inline void cgx_link_usertable_init(void)
{
cgx_speed_mbps[CGX_LINK_NONE] = 0;
cgx_speed_mbps[CGX_LINK_10M] = 10;
cgx_speed_mbps[CGX_LINK_100M] = 100;
cgx_speed_mbps[CGX_LINK_1G] = 1000;
cgx_speed_mbps[CGX_LINK_2HG] = 2500;
cgx_speed_mbps[CGX_LINK_5G] = 5000;
cgx_speed_mbps[CGX_LINK_10G] = 10000;
cgx_speed_mbps[CGX_LINK_20G] = 20000;
cgx_speed_mbps[CGX_LINK_25G] = 25000;
cgx_speed_mbps[CGX_LINK_40G] = 40000;
cgx_speed_mbps[CGX_LINK_50G] = 50000;
cgx_speed_mbps[CGX_LINK_100G] = 100000;
cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
}
static inline void link_status_user_format(u64 lstat,
struct cgx_link_user_info *linfo,
struct cgx *cgx, u8 lmac_id)
{
char *lmac_string;
linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
}
/* Hardware event handlers */
static inline void cgx_link_change_handler(u64 lstat,
struct lmac *lmac)
{
struct cgx_link_user_info *linfo;
struct cgx *cgx = lmac->cgx;
struct cgx_link_event event;
struct device *dev;
int err_type;
dev = &cgx->pdev->dev;
event.lstat.link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
event.lstat.full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
event.lstat.speed = FIELD_GET(RESP_LINKSTAT_SPEED, lstat);
event.lstat.err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
event.cgx_id = cgx->cgx_id;
event.lmac_id = lmac->lmac_id;
/* update the local copy of link status */
lmac->link_info = event.link_uinfo;
linfo = &lmac->link_info;
if (!lmac->event_cb.notify_link_chg) {
dev_dbg(dev, "cgx port %d:%d Link change handler null",
cgx->cgx_id, lmac->lmac_id);
if (event.lstat.err_type != CGX_ERR_NONE) {
if (err_type != CGX_ERR_NONE) {
dev_err(dev, "cgx port %d:%d Link error %d\n",
cgx->cgx_id, lmac->lmac_id,
event.lstat.err_type);
cgx->cgx_id, lmac->lmac_id, err_type);
}
dev_info(dev, "cgx port %d:%d Link status %s, speed %x\n",
dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
cgx->cgx_id, lmac->lmac_id,
event.lstat.link_up ? "UP" : "DOWN",
event.lstat.speed);
linfo->link_up ? "UP" : "DOWN", linfo->speed);
return;
}
......@@ -448,6 +670,8 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
list_add(&cgx->cgx_list, &cgx_list);
cgx->cgx_id = cgx_get_cgx_cnt() - 1;
cgx_link_usertable_init();
err = cgx_lmac_init(cgx);
if (err)
goto err_release_lmac;
......
......@@ -11,6 +11,7 @@
#ifndef CGX_H
#define CGX_H
#include "mbox.h"
#include "cgx_fw_if.h"
/* PCI device IDs */
......@@ -24,14 +25,35 @@
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
/* Registers */
#define CGXX_CMRX_CFG 0x00
#define CMR_EN BIT_ULL(55)
#define DATA_PKT_TX_EN BIT_ULL(53)
#define DATA_PKT_RX_EN BIT_ULL(54)
#define CGX_LMAC_TYPE_SHIFT 40
#define CGX_LMAC_TYPE_MASK 0xF
#define CGXX_CMRX_INT 0x040
#define FW_CGX_INT BIT_ULL(1)
#define CGXX_CMRX_INT_ENA_W1S 0x058
#define CGXX_CMRX_RX_ID_MAP 0x060
#define CGXX_CMRX_RX_STAT0 0x070
#define CGXX_CMRX_RX_LMACS 0x128
#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8
#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
#define CGXX_CMRX_RX_DMAC_CAM0 0x200
#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
#define CGXX_CMRX_TX_STAT0 0x700
#define CGXX_SCRATCH0_REG 0x1050
#define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000
#define CGXX_SPUX_CONTROL1 0x10000
#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
#define CGXX_GMP_PCS_MRX_CTL 0x30000
#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
......@@ -40,8 +62,22 @@
#define CGX_NVEC 37
#define CGX_LMAC_FWI 0
enum LMAC_TYPE {
LMAC_MODE_SGMII = 0,
LMAC_MODE_XAUI = 1,
LMAC_MODE_RXAUI = 2,
LMAC_MODE_10G_R = 3,
LMAC_MODE_40G_R = 4,
LMAC_MODE_QSGMII = 6,
LMAC_MODE_25G_R = 7,
LMAC_MODE_50G_R = 8,
LMAC_MODE_100G_R = 9,
LMAC_MODE_USXGMII = 10,
LMAC_MODE_MAX,
};
struct cgx_link_event {
struct cgx_lnk_sts lstat;
struct cgx_link_user_info link_uinfo;
u8 cgx_id;
u8 lmac_id;
};
......@@ -62,4 +98,13 @@ int cgx_get_cgx_cnt(void);
int cgx_get_lmac_cnt(void *cgxd);
void *cgx_get_pdata(int cgx_id);
int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
#endif /* CGX_H */
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef COMMON_H
#define COMMON_H
#include "rvu_struct.h"
#define OTX2_ALIGN 128 /* Align to cacheline */
#define Q_SIZE_16 0ULL /* 16 entries */
#define Q_SIZE_64 1ULL /* 64 entries */
#define Q_SIZE_256 2ULL
#define Q_SIZE_1K 3ULL
#define Q_SIZE_4K 4ULL
#define Q_SIZE_16K 5ULL
#define Q_SIZE_64K 6ULL
#define Q_SIZE_256K 7ULL
#define Q_SIZE_1M 8ULL /* Million entries */
#define Q_SIZE_MIN Q_SIZE_16
#define Q_SIZE_MAX Q_SIZE_1M
#define Q_COUNT(x) (16ULL << (2 * x))
#define Q_SIZE(x, n) ((ilog2(x) - (n)) / 2)
/* Admin queue info */
/* Since we intend to add only one instruction at a time,
* keep queue size to it's minimum.
*/
#define AQ_SIZE Q_SIZE_16
/* HW head & tail pointer mask */
#define AQ_PTR_MASK 0xFFFFF
struct qmem {
void *base;
dma_addr_t iova;
int alloc_sz;
u8 entry_sz;
u8 align;
u32 qsize;
};
static inline int qmem_alloc(struct device *dev, struct qmem **q,
int qsize, int entry_sz)
{
struct qmem *qmem;
int aligned_addr;
if (!qsize)
return -EINVAL;
*q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
if (!*q)
return -ENOMEM;
qmem = *q;
qmem->entry_sz = entry_sz;
qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
qmem->base = dma_zalloc_coherent(dev, qmem->alloc_sz,
&qmem->iova, GFP_KERNEL);
if (!qmem->base)
return -ENOMEM;
qmem->qsize = qsize;
aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
qmem->align = (aligned_addr - qmem->iova);
qmem->base += qmem->align;
qmem->iova += qmem->align;
return 0;
}
static inline void qmem_free(struct device *dev, struct qmem *qmem)
{
if (!qmem)
return;
if (qmem->base)
dma_free_coherent(dev, qmem->alloc_sz,
qmem->base - qmem->align,
qmem->iova - qmem->align);
devm_kfree(dev, qmem);
}
struct admin_queue {
struct qmem *inst;
struct qmem *res;
spinlock_t lock; /* Serialize inst enqueue from PFs */
};
/* NPA aura count */
enum npa_aura_sz {
NPA_AURA_SZ_0,
NPA_AURA_SZ_128,
NPA_AURA_SZ_256,
NPA_AURA_SZ_512,
NPA_AURA_SZ_1K,
NPA_AURA_SZ_2K,
NPA_AURA_SZ_4K,
NPA_AURA_SZ_8K,
NPA_AURA_SZ_16K,
NPA_AURA_SZ_32K,
NPA_AURA_SZ_64K,
NPA_AURA_SZ_128K,
NPA_AURA_SZ_256K,
NPA_AURA_SZ_512K,
NPA_AURA_SZ_1M,
NPA_AURA_SZ_MAX,
};
#define NPA_AURA_COUNT(x) (1ULL << ((x) + 6))
/* NPA AQ result structure for init/read/write of aura HW contexts */
struct npa_aq_aura_res {
struct npa_aq_res_s res;
struct npa_aura_s aura_ctx;
struct npa_aura_s ctx_mask;
};
/* NPA AQ result structure for init/read/write of pool HW contexts */
struct npa_aq_pool_res {
struct npa_aq_res_s res;
struct npa_pool_s pool_ctx;
struct npa_pool_s ctx_mask;
};
/* NIX Transmit schedulers */
enum nix_scheduler {
NIX_TXSCH_LVL_SMQ = 0x0,
NIX_TXSCH_LVL_MDQ = 0x0,
NIX_TXSCH_LVL_TL4 = 0x1,
NIX_TXSCH_LVL_TL3 = 0x2,
NIX_TXSCH_LVL_TL2 = 0x3,
NIX_TXSCH_LVL_TL1 = 0x4,
NIX_TXSCH_LVL_CNT = 0x5,
};
/* NIX LSO format indices.
* As of now TSO is the only one using, so statically assigning indices.
*/
#define NIX_LSO_FORMAT_IDX_TSOV4 0
#define NIX_LSO_FORMAT_IDX_TSOV6 1
/* RSS info */
#define MAX_RSS_GROUPS 8
/* Group 0 has to be used in default pkt forwarding MCAM entries
* reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple
* filters.
*/
#define DEFAULT_RSS_CONTEXT_GROUP 0
#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
#endif /* COMMON_H */
......@@ -124,21 +124,50 @@ M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \
M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, msg_req, msg_rsp) \
M(CGX_STATS, 0x202, msg_req, cgx_stats_rsp) \
M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set_or_get, \
cgx_mac_addr_set_or_get) \
M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_set_or_get, \
cgx_mac_addr_set_or_get) \
M(CGX_PROMISC_ENABLE, 0x205, msg_req, msg_rsp) \
M(CGX_PROMISC_DISABLE, 0x206, msg_req, msg_rsp) \
M(CGX_START_LINKEVENTS, 0x207, msg_req, msg_rsp) \
M(CGX_STOP_LINKEVENTS, 0x208, msg_req, msg_rsp) \
M(CGX_GET_LINKINFO, 0x209, msg_req, cgx_link_info_msg) \
M(CGX_INTLBK_ENABLE, 0x20A, msg_req, msg_rsp) \
M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \
M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \
M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \
M(NPA_HWCTX_DISABLE, 0x403, hwctx_disable_req, msg_rsp) \
/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc_req, nix_lf_alloc_rsp) \
M(NIX_LF_FREE, 0x8001, msg_req, msg_rsp) \
M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \
M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
M(CGX_LINK_EVENT, 0xC00, cgx_link_info_msg, msg_rsp)
enum {
#define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
#undef M
};
/* Mailbox message formats */
#define RVU_DEFAULT_PF_FUNC 0xFFFF
/* Generic request msg used for those mbox messages which
* don't send any data in the request.
*/
......@@ -208,4 +237,181 @@ struct msix_offset_rsp {
u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
};
/* CGX mbox message formats */
struct cgx_stats_rsp {
struct mbox_msghdr hdr;
#define CGX_RX_STATS_COUNT 13
#define CGX_TX_STATS_COUNT 18
u64 rx_stats[CGX_RX_STATS_COUNT];
u64 tx_stats[CGX_TX_STATS_COUNT];
};
/* Structure for requesting the operation for
* setting/getting mac address in the CGX interface
*/
struct cgx_mac_addr_set_or_get {
struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN];
};
struct cgx_link_user_info {
uint64_t link_up:1;
uint64_t full_duplex:1;
uint64_t lmac_type_id:4;
uint64_t speed:20; /* speed in Mbps */
#define LMACTYPE_STR_LEN 16
char lmac_type[LMACTYPE_STR_LEN];
};
struct cgx_link_info_msg {
struct mbox_msghdr hdr;
struct cgx_link_user_info link_info;
};
/* NPA mbox message formats */
/* NPA mailbox error codes
* Range 301 - 400.
*/
enum npa_af_status {
NPA_AF_ERR_PARAM = -301,
NPA_AF_ERR_AQ_FULL = -302,
NPA_AF_ERR_AQ_ENQUEUE = -303,
NPA_AF_ERR_AF_LF_INVALID = -304,
NPA_AF_ERR_AF_LF_ALLOC = -305,
NPA_AF_ERR_LF_RESET = -306,
};
/* For NPA LF context alloc and init */
struct npa_lf_alloc_req {
struct mbox_msghdr hdr;
int node;
int aura_sz; /* No of auras */
u32 nr_pools; /* No of pools */
};
struct npa_lf_alloc_rsp {
struct mbox_msghdr hdr;
u32 stack_pg_ptrs; /* No of ptrs per stack page */
u32 stack_pg_bytes; /* Size of stack page */
u16 qints; /* NPA_AF_CONST::QINTS */
};
/* NPA AQ enqueue msg */
struct npa_aq_enq_req {
struct mbox_msghdr hdr;
u32 aura_id;
u8 ctype;
u8 op;
union {
/* Valid when op == WRITE/INIT and ctype == AURA.
* LF fills the pool_id in aura.pool_addr. AF will translate
* the pool_id to pool context pointer.
*/
struct npa_aura_s aura;
/* Valid when op == WRITE/INIT and ctype == POOL */
struct npa_pool_s pool;
};
/* Mask data when op == WRITE (1=write, 0=don't write) */
union {
/* Valid when op == WRITE and ctype == AURA */
struct npa_aura_s aura_mask;
/* Valid when op == WRITE and ctype == POOL */
struct npa_pool_s pool_mask;
};
};
struct npa_aq_enq_rsp {
struct mbox_msghdr hdr;
union {
/* Valid when op == READ and ctype == AURA */
struct npa_aura_s aura;
/* Valid when op == READ and ctype == POOL */
struct npa_pool_s pool;
};
};
/* Disable all contexts of type 'ctype' */
struct hwctx_disable_req {
struct mbox_msghdr hdr;
u8 ctype;
};
/* NIX mailbox error codes
* Range 401 - 500.
*/
enum nix_af_status {
NIX_AF_ERR_PARAM = -401,
NIX_AF_ERR_AQ_FULL = -402,
NIX_AF_ERR_AQ_ENQUEUE = -403,
NIX_AF_ERR_AF_LF_INVALID = -404,
NIX_AF_ERR_AF_LF_ALLOC = -405,
NIX_AF_ERR_TLX_ALLOC_FAIL = -406,
NIX_AF_ERR_TLX_INVALID = -407,
NIX_AF_ERR_RSS_SIZE_INVALID = -408,
NIX_AF_ERR_RSS_GRPS_INVALID = -409,
NIX_AF_ERR_FRS_INVALID = -410,
NIX_AF_ERR_RX_LINK_INVALID = -411,
NIX_AF_INVAL_TXSCHQ_CFG = -412,
NIX_AF_SMQ_FLUSH_FAILED = -413,
NIX_AF_ERR_LF_RESET = -414,
};
/* For NIX LF context alloc and init */
struct nix_lf_alloc_req {
struct mbox_msghdr hdr;
int node;
u32 rq_cnt; /* No of receive queues */
u32 sq_cnt; /* No of send queues */
u32 cq_cnt; /* No of completion queues */
u8 xqe_sz;
u16 rss_sz;
u8 rss_grps;
u16 npa_func;
u16 sso_func;
u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
};
struct nix_lf_alloc_rsp {
struct mbox_msghdr hdr;
u16 sqb_size;
u8 lso_tsov4_idx;
u8 lso_tsov6_idx;
u8 mac_addr[ETH_ALEN];
};
/* NIX AQ enqueue msg */
struct nix_aq_enq_req {
struct mbox_msghdr hdr;
u32 qidx;
u8 ctype;
u8 op;
union {
struct nix_rq_ctx_s rq;
struct nix_sq_ctx_s sq;
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
};
union {
struct nix_rq_ctx_s rq_mask;
struct nix_sq_ctx_s sq_mask;
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
};
};
struct nix_aq_enq_rsp {
struct mbox_msghdr hdr;
union {
struct nix_rq_ctx_s rq;
struct nix_sq_ctx_s sq;
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
};
};
#endif /* MBOX_H */
......@@ -47,18 +47,18 @@ MODULE_DEVICE_TABLE(pci, rvu_id_table);
*/
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
{
unsigned long timeout = jiffies + usecs_to_jiffies(100);
void __iomem *reg;
int timeout = 100;
u64 reg_val;
reg = rvu->afreg_base + ((block << 28) | offset);
while (timeout) {
while (time_before(jiffies, timeout)) {
reg_val = readq(reg);
if (zero && !(reg_val & mask))
return 0;
if (!zero && (reg_val & mask))
return 0;
usleep_range(1, 2);
usleep_range(1, 5);
timeout--;
}
return -EBUSY;
......@@ -361,6 +361,19 @@ static void rvu_check_block_implemented(struct rvu *rvu)
}
}
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
{
int err;
if (!block->implemented)
return 0;
rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
true);
return err;
}
static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
{
struct rvu_block *block = &rvu->hw->block[blkaddr];
......@@ -552,6 +565,9 @@ static void rvu_free_hw_resources(struct rvu *rvu)
int id, max_msix;
u64 cfg;
rvu_npa_freemem(rvu);
rvu_nix_freemem(rvu);
/* Free block LF bitmaps */
for (id = 0; id < BLK_COUNT; id++) {
block = &hw->block[id];
......@@ -755,6 +771,54 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
rvu_scan_block(rvu, block);
}
err = rvu_npa_init(rvu);
if (err)
return err;
err = rvu_nix_init(rvu);
if (err)
return err;
return 0;
}
/* NPA and NIX admin queue APIs */
void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
{
if (!aq)
return;
qmem_free(rvu->dev, aq->inst);
qmem_free(rvu->dev, aq->res);
devm_kfree(rvu->dev, aq);
}
int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
int qsize, int inst_size, int res_size)
{
struct admin_queue *aq;
int err;
*ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
if (!*ad_queue)
return -ENOMEM;
aq = *ad_queue;
/* Alloc memory for instructions i.e AQ */
err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
if (err) {
devm_kfree(rvu->dev, aq);
return err;
}
/* Alloc memory for results */
err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
if (err) {
rvu_aq_free(rvu, aq);
return err;
}
spin_lock_init(&aq->lock);
return 0;
}
......@@ -1316,6 +1380,63 @@ static void rvu_mbox_handler(struct work_struct *work)
otx2_mbox_msg_send(mbox, pf);
}
static void rvu_mbox_up_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
struct rvu *rvu = mwork->rvu;
struct otx2_mbox_dev *mdev;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
struct otx2_mbox *mbox;
int offset, id;
u16 pf;
mbox = &rvu->mbox_up;
pf = mwork - rvu->mbox_wrk_up;
mdev = &mbox->dev[pf];
rsp_hdr = mdev->mbase + mbox->rx_start;
if (rsp_hdr->num_msgs == 0) {
dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
return;
}
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < rsp_hdr->num_msgs; id++) {
msg = mdev->mbase + offset;
if (msg->id >= MBOX_MSG_MAX) {
dev_err(rvu->dev,
"Mbox msg with unknown ID 0x%x\n", msg->id);
goto end;
}
if (msg->sig != OTX2_MBOX_RSP_SIG) {
dev_err(rvu->dev,
"Mbox msg with wrong signature %x, ID 0x%x\n",
msg->sig, msg->id);
goto end;
}
switch (msg->id) {
case MBOX_MSG_CGX_LINK_EVENT:
break;
default:
if (msg->rc)
dev_err(rvu->dev,
"Mbox msg response has err %d, ID 0x%x\n",
msg->rc, msg->id);
break;
}
end:
offset = mbox->rx_start + msg->next_msgoff;
mdev->msgs_acked++;
}
otx2_mbox_reset(mbox, 0);
}
static int rvu_mbox_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
......@@ -1337,6 +1458,13 @@ static int rvu_mbox_init(struct rvu *rvu)
goto exit;
}
rvu->mbox_wrk_up = devm_kcalloc(rvu->dev, hw->total_pfs,
sizeof(struct rvu_work), GFP_KERNEL);
if (!rvu->mbox_wrk_up) {
err = -ENOMEM;
goto exit;
}
/* Map mbox region shared with PFs */
bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
/* Mailbox is a reserved memory (in RAM) region shared between
......@@ -1355,12 +1483,23 @@ static int rvu_mbox_init(struct rvu *rvu)
if (err)
goto exit;
err = otx2_mbox_init(&rvu->mbox_up, hwbase, rvu->pdev, rvu->afreg_base,
MBOX_DIR_AFPF_UP, hw->total_pfs);
if (err)
goto exit;
for (pf = 0; pf < hw->total_pfs; pf++) {
mwork = &rvu->mbox_wrk[pf];
mwork->rvu = rvu;
INIT_WORK(&mwork->work, rvu_mbox_handler);
}
for (pf = 0; pf < hw->total_pfs; pf++) {
mwork = &rvu->mbox_wrk_up[pf];
mwork->rvu = rvu;
INIT_WORK(&mwork->work, rvu_mbox_up_handler);
}
return 0;
exit:
if (hwbase)
......@@ -1381,6 +1520,7 @@ static void rvu_mbox_destroy(struct rvu *rvu)
iounmap((void __iomem *)rvu->mbox.hwbase);
otx2_mbox_destroy(&rvu->mbox);
otx2_mbox_destroy(&rvu->mbox_up);
}
static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
......@@ -1407,6 +1547,12 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
if (hdr->num_msgs)
queue_work(rvu->mbox_wq,
&rvu->mbox_wrk[pf].work);
mbox = &rvu->mbox_up;
mdev = &mbox->dev[pf];
hdr = mdev->mbase + mbox->rx_start;
if (hdr->num_msgs)
queue_work(rvu->mbox_wq,
&rvu->mbox_wrk_up[pf].work);
}
}
......
......@@ -12,6 +12,7 @@
#define RVU_H
#include "rvu_struct.h"
#include "common.h"
#include "mbox.h"
/* PCI device IDs */
......@@ -41,7 +42,8 @@ struct rsrc_bmap {
};
struct rvu_block {
struct rsrc_bmap lf;
struct rsrc_bmap lf;
struct admin_queue *aq; /* NIX/NPA AQ */
u16 *fn_map; /* LF to pcifunc mapping */
bool multislot;
bool implemented;
......@@ -70,14 +72,50 @@ struct rvu_pfvf {
struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */
#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF))
u16 *msix_lfmap; /* Vector to block LF mapping */
/* NPA contexts */
struct qmem *aura_ctx;
struct qmem *pool_ctx;
struct qmem *npa_qints_ctx;
unsigned long *aura_bmap;
unsigned long *pool_bmap;
/* NIX contexts */
struct qmem *rq_ctx;
struct qmem *sq_ctx;
struct qmem *cq_ctx;
struct qmem *rss_ctx;
struct qmem *cq_ints_ctx;
struct qmem *nix_qints_ctx;
unsigned long *sq_bmap;
unsigned long *rq_bmap;
unsigned long *cq_bmap;
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
};
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
u16 *pfvf_map;
};
struct nix_hw {
struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
};
struct rvu_hwinfo {
u8 total_pfs; /* MAX RVU PFs HW supports */
u16 total_vfs; /* Max RVU VFs HW supports */
u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */
u8 cgx;
u8 lmac_per_cgx;
u8 cgx_links;
u8 lbk_links;
u8 sdp_links;
struct rvu_block block[BLK_COUNT]; /* Block info */
struct nix_hw *nix0;
};
struct rvu {
......@@ -93,6 +131,8 @@ struct rvu {
/* Mbox */
struct otx2_mbox mbox;
struct rvu_work *mbox_wrk;
struct otx2_mbox mbox_up;
struct rvu_work *mbox_wrk_up;
struct workqueue_struct *mbox_wq;
/* MSI-X */
......@@ -109,6 +149,7 @@ struct rvu {
u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
* every cgx lmac port
*/
unsigned long pf_notify_bmap; /* Flags for PF notification */
void **cgx_idmap; /* cgx id to cgx data map table */
struct work_struct cgx_evh_work;
struct workqueue_struct *cgx_evh_wq;
......@@ -149,10 +190,84 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
/* NPA/NIX AQ APIs */
int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
int qsize, int inst_size, int res_size);
void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
/* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
{
return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs);
}
static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
{
*cgx_id = (map >> 4) & 0xF;
*lmac_id = (map & 0xF);
}
int rvu_cgx_probe(struct rvu *rvu);
void rvu_cgx_wq_destroy(struct rvu *rvu);
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
struct cgx_stats_rsp *rsp);
int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp);
int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp);
int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
struct cgx_link_info_msg *rsp);
int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp);
int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
struct npa_lf_alloc_req *req,
struct npa_lf_alloc_rsp *rsp);
int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
/* NIX APIs */
int rvu_nix_init(struct rvu *rvu);
void rvu_nix_freemem(struct rvu *rvu);
int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
struct nix_lf_alloc_req *req,
struct nix_lf_alloc_rsp *rsp);
int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp);
int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp);
#endif /* RVU_H */
......@@ -20,6 +20,31 @@ struct cgx_evq_entry {
struct cgx_link_event link_event;
};
#define M(_name, _id, _req_type, _rsp_type) \
static struct _req_type __maybe_unused \
*otx2_mbox_alloc_msg_ ## _name(struct rvu *rvu, int devid) \
{ \
struct _req_type *req; \
\
req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
&rvu->mbox_up, devid, sizeof(struct _req_type), \
sizeof(struct _rsp_type)); \
if (!req) \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
return req; \
}
MBOX_UP_CGX_MESSAGES
#undef M
/* Returns bitmap of mapped PFs */
static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
{
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
{
return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
......@@ -77,6 +102,34 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
return 0;
}
static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
{
struct cgx_evq_entry *qentry;
unsigned long flags;
int err;
qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
if (!qentry)
return -ENOMEM;
/* Lock the event queue before we read the local link status */
spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
&qentry->link_event.link_uinfo);
qentry->link_event.cgx_id = cgx_id;
qentry->link_event.lmac_id = lmac_id;
if (err)
goto skip_add;
list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
skip_add:
spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
/* start worker to process the events */
queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
return 0;
}
/* This is called from interrupt context and is expected to be atomic */
static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
{
......@@ -98,6 +151,41 @@ static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
return 0;
}
static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
{
struct cgx_link_user_info *linfo;
struct cgx_link_info_msg *msg;
unsigned long pfmap;
int err, pfid;
linfo = &event->link_uinfo;
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
do {
pfid = find_first_bit(&pfmap, 16);
clear_bit(pfid, &pfmap);
/* check if notification is enabled */
if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
event->cgx_id, event->lmac_id,
linfo->link_up ? "UP" : "DOWN");
continue;
}
/* Send mbox message to PF */
msg = otx2_mbox_alloc_msg_CGX_LINK_EVENT(rvu, pfid);
if (!msg)
continue;
msg->link_info = *linfo;
otx2_mbox_msg_send(&rvu->mbox_up, pfid);
err = otx2_mbox_wait_for_rsp(&rvu->mbox_up, pfid);
if (err)
dev_warn(rvu->dev, "notification to pf %d failed\n",
pfid);
} while (pfmap);
}
static void cgx_evhandler_task(struct work_struct *work)
{
struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
......@@ -119,7 +207,8 @@ static void cgx_evhandler_task(struct work_struct *work)
event = &qentry->link_event;
/* Do nothing for now */
/* process event */
cgx_notify_pfs(event, rvu);
kfree(qentry);
} while (1);
}
......@@ -192,3 +281,232 @@ int rvu_cgx_probe(struct rvu *rvu)
cgx_lmac_event_handler_init(rvu);
return 0;
}
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
/* This msg is expected only from PFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
return 0;
}
int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
return 0;
}
int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
return 0;
}
int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
struct cgx_stats_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
int stat = 0, err = 0;
u64 tx_stat, rx_stat;
u8 cgx_idx, lmac;
void *cgxd;
if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
/* Rx stats */
while (stat < CGX_RX_STATS_COUNT) {
err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
if (err)
return err;
rsp->rx_stats[stat] = rx_stat;
stat++;
}
/* Tx stats */
stat = 0;
while (stat < CGX_TX_STATS_COUNT) {
err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
if (err)
return err;
rsp->tx_stats[stat] = tx_stat;
stat++;
}
return 0;
}
int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
return 0;
}
int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
int rc = 0, i;
u64 cfg;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rsp->hdr.rc = rc;
cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
/* copy 48 bit mac address to req->mac_addr */
for (i = 0; i < ETH_ALEN; i++)
rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
return 0;
}
int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
/* This msg is expected only from PFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_promisc_config(cgx_id, lmac_id, true);
return 0;
}
int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
/* This msg is expected only from PFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_promisc_config(cgx_id, lmac_id, false);
return 0;
}
static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
/* This msg is expected only from PFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
if (en) {
set_bit(pf, &rvu->pf_notify_bmap);
/* Send the current link status to PF */
rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
} else {
clear_bit(pf, &rvu->pf_notify_bmap);
}
return 0;
}
int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
return 0;
}
int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
return 0;
}
int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
struct cgx_link_info_msg *rsp)
{
u8 cgx_id, lmac_id;
int pf, err;
pf = rvu_get_pf(req->hdr.pcifunc);
if (!is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
&rsp->link_info);
return err;
}
static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
/* This msg is expected only from PFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, en);
}
int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
return 0;
}
int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
return 0;
}
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment