Commit 08e8b91c authored by David S. Miller's avatar David S. Miller

Merge branch 'octeontx2-vf-Add-network-driver-for-virtual-function'

Sunil Goutham says:

====================
octeontx2-vf: Add network driver for virtual function

This patch series adds  network driver for the virtual functions of
OcteonTX2 SOC's resource virtualization unit (RVU).

Changes from v3:
   * Removed missed out EXPORT symbols in VF driver.

Changes from v2:
   * Removed Copyright license text.
   * Removed wrapper fn()s around mutex_lock and unlock.
   * Got rid of using macro with 'return'.
   * Removed __weak fn()s.
        - Sugested by Leon Romanovsky and Andrew Lunn

Changes from v1:
   * Removed driver version and fixed authorship
   * Removed driver version and fixed authorship in the already
     upstreamed AF, PF drivers.
   * Removed unnecessary checks in sriov_enable and xmit fn()s.
   * Removed WQ_MEM_RECLAIM flag while creating workqueue.
   * Added lock in tx_timeout task.
   * Added 'supported_coalesce_params' in ethtool ops.
   * Minor other cleanups.
        - Sugested by Jakub Kicinski
====================
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4e8386fc 4c3212f5
...@@ -33,3 +33,9 @@ config OCTEONTX2_PF ...@@ -33,3 +33,9 @@ config OCTEONTX2_PF
depends on PCI depends on PCI
help help
This driver supports Marvell's OcteonTX2 NIC physical function. This driver supports Marvell's OcteonTX2 NIC physical function.
config OCTEONTX2_VF
tristate "Marvell OcteonTX2 NIC Virtual Function driver"
depends on OCTEONTX2_PF
help
This driver supports Marvell's OcteonTX2 NIC virtual function.
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#define DRV_NAME "octeontx2-af" #define DRV_NAME "octeontx2-af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
#define DRV_VERSION "1.0"
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc); static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
...@@ -46,10 +45,9 @@ static const struct pci_device_id rvu_id_table[] = { ...@@ -46,10 +45,9 @@ static const struct pci_device_id rvu_id_table[] = {
{ 0, } /* end of table */ { 0, } /* end of table */
}; };
MODULE_AUTHOR("Marvell International Ltd."); MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
MODULE_DESCRIPTION(DRV_STRING); MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, rvu_id_table); MODULE_DEVICE_TABLE(pci, rvu_id_table);
static char *mkex_profile; /* MKEX profile name */ static char *mkex_profile; /* MKEX profile name */
......
...@@ -4,7 +4,9 @@ ...@@ -4,7 +4,9 @@
# #
obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o
octeontx2_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
...@@ -49,15 +49,15 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf) ...@@ -49,15 +49,15 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf)
if (!netif_running(pfvf->netdev)) if (!netif_running(pfvf->netdev))
return; return;
otx2_mbox_lock(&pfvf->mbox); mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox); req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox);
if (!req) { if (!req) {
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return; return;
} }
otx2_sync_mbox_msg(&pfvf->mbox); otx2_sync_mbox_msg(&pfvf->mbox);
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
} }
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
...@@ -128,6 +128,7 @@ void otx2_get_stats64(struct net_device *netdev, ...@@ -128,6 +128,7 @@ void otx2_get_stats64(struct net_device *netdev,
stats->tx_packets = dev_stats->tx_frames; stats->tx_packets = dev_stats->tx_frames;
stats->tx_dropped = dev_stats->tx_drops; stats->tx_dropped = dev_stats->tx_drops;
} }
EXPORT_SYMBOL(otx2_get_stats64);
/* Sync MAC address with RVU AF */ /* Sync MAC address with RVU AF */
static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac) static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
...@@ -135,17 +136,17 @@ static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac) ...@@ -135,17 +136,17 @@ static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
struct nix_set_mac_addr *req; struct nix_set_mac_addr *req;
int err; int err;
otx2_mbox_lock(&pfvf->mbox); mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox); req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox);
if (!req) { if (!req) {
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM; return -ENOMEM;
} }
ether_addr_copy(req->mac_addr, mac); ether_addr_copy(req->mac_addr, mac);
err = otx2_sync_mbox_msg(&pfvf->mbox); err = otx2_sync_mbox_msg(&pfvf->mbox);
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return err; return err;
} }
...@@ -157,27 +158,27 @@ static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf, ...@@ -157,27 +158,27 @@ static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
struct msg_req *req; struct msg_req *req;
int err; int err;
otx2_mbox_lock(&pfvf->mbox); mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox); req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox);
if (!req) { if (!req) {
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM; return -ENOMEM;
} }
err = otx2_sync_mbox_msg(&pfvf->mbox); err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) { if (err) {
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return err; return err;
} }
msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
if (IS_ERR(msghdr)) { if (IS_ERR(msghdr)) {
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return PTR_ERR(msghdr); return PTR_ERR(msghdr);
} }
rsp = (struct nix_get_mac_addr_rsp *)msghdr; rsp = (struct nix_get_mac_addr_rsp *)msghdr;
ether_addr_copy(netdev->dev_addr, rsp->mac_addr); ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return 0; return 0;
} }
...@@ -197,26 +198,25 @@ int otx2_set_mac_address(struct net_device *netdev, void *p) ...@@ -197,26 +198,25 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
return 0; return 0;
} }
EXPORT_SYMBOL(otx2_set_mac_address);
int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
{ {
struct nix_frs_cfg *req; struct nix_frs_cfg *req;
int err; int err;
otx2_mbox_lock(&pfvf->mbox); mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox); req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
if (!req) { if (!req) {
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM; return -ENOMEM;
} }
/* SMQ config limits maximum pkt size that can be transmitted */
req->update_smq = true;
pfvf->max_frs = mtu + OTX2_ETH_HLEN; pfvf->max_frs = mtu + OTX2_ETH_HLEN;
req->maxlen = pfvf->max_frs; req->maxlen = pfvf->max_frs;
err = otx2_sync_mbox_msg(&pfvf->mbox); err = otx2_sync_mbox_msg(&pfvf->mbox);
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return err; return err;
} }
...@@ -225,7 +225,10 @@ int otx2_config_pause_frm(struct otx2_nic *pfvf) ...@@ -225,7 +225,10 @@ int otx2_config_pause_frm(struct otx2_nic *pfvf)
struct cgx_pause_frm_cfg *req; struct cgx_pause_frm_cfg *req;
int err; int err;
otx2_mbox_lock(&pfvf->mbox); if (is_otx2_lbkvf(pfvf->pdev))
return 0;
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
if (!req) { if (!req) {
err = -ENOMEM; err = -ENOMEM;
...@@ -238,7 +241,7 @@ int otx2_config_pause_frm(struct otx2_nic *pfvf) ...@@ -238,7 +241,7 @@ int otx2_config_pause_frm(struct otx2_nic *pfvf)
err = otx2_sync_mbox_msg(&pfvf->mbox); err = otx2_sync_mbox_msg(&pfvf->mbox);
unlock: unlock:
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return err; return err;
} }
...@@ -248,10 +251,10 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) ...@@ -248,10 +251,10 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
struct nix_rss_flowkey_cfg *req; struct nix_rss_flowkey_cfg *req;
int err; int err;
otx2_mbox_lock(&pfvf->mbox); mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox); req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox);
if (!req) { if (!req) {
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM; return -ENOMEM;
} }
req->mcam_index = -1; /* Default or reserved index */ req->mcam_index = -1; /* Default or reserved index */
...@@ -259,7 +262,7 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) ...@@ -259,7 +262,7 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
req->group = DEFAULT_RSS_CONTEXT_GROUP; req->group = DEFAULT_RSS_CONTEXT_GROUP;
err = otx2_sync_mbox_msg(&pfvf->mbox); err = otx2_sync_mbox_msg(&pfvf->mbox);
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return err; return err;
} }
...@@ -270,7 +273,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf) ...@@ -270,7 +273,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
struct nix_aq_enq_req *aq; struct nix_aq_enq_req *aq;
int idx, err; int idx, err;
otx2_mbox_lock(mbox); mutex_lock(&mbox->lock);
/* Get memory to put this msg */ /* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) { for (idx = 0; idx < rss->rss_size; idx++) {
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
...@@ -280,12 +283,12 @@ int otx2_set_rss_table(struct otx2_nic *pfvf) ...@@ -280,12 +283,12 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
*/ */
err = otx2_sync_mbox_msg(mbox); err = otx2_sync_mbox_msg(mbox);
if (err) { if (err) {
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
return err; return err;
} }
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!aq) { if (!aq) {
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
return -ENOMEM; return -ENOMEM;
} }
} }
...@@ -298,7 +301,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf) ...@@ -298,7 +301,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
aq->op = NIX_AQ_INSTOP_INIT; aq->op = NIX_AQ_INSTOP_INIT;
} }
err = otx2_sync_mbox_msg(mbox); err = otx2_sync_mbox_msg(mbox);
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
return err; return err;
} }
...@@ -416,6 +419,7 @@ void otx2_tx_timeout(struct net_device *netdev, unsigned int txq) ...@@ -416,6 +419,7 @@ void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
schedule_work(&pfvf->reset_task); schedule_work(&pfvf->reset_task);
} }
EXPORT_SYMBOL(otx2_tx_timeout);
void otx2_get_mac_from_af(struct net_device *netdev) void otx2_get_mac_from_af(struct net_device *netdev)
{ {
...@@ -430,6 +434,7 @@ void otx2_get_mac_from_af(struct net_device *netdev) ...@@ -430,6 +434,7 @@ void otx2_get_mac_from_af(struct net_device *netdev)
if (!is_valid_ether_addr(netdev->dev_addr)) if (!is_valid_ether_addr(netdev->dev_addr))
eth_hw_addr_random(netdev); eth_hw_addr_random(netdev);
} }
EXPORT_SYMBOL(otx2_get_mac_from_af);
static int otx2_get_link(struct otx2_nic *pfvf) static int otx2_get_link(struct otx2_nic *pfvf)
{ {
...@@ -465,7 +470,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) ...@@ -465,7 +470,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
/* Set topology e.t.c configuration */ /* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) { if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq); req->reg[0] = NIX_AF_SMQX_CFG(schq);
req->regval[0] = ((pfvf->netdev->mtu + OTX2_ETH_HLEN) << 8) | req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) |
OTX2_MIN_MTU; OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) | req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
...@@ -551,17 +556,17 @@ int otx2_txschq_stop(struct otx2_nic *pfvf) ...@@ -551,17 +556,17 @@ int otx2_txschq_stop(struct otx2_nic *pfvf)
struct nix_txsch_free_req *free_req; struct nix_txsch_free_req *free_req;
int lvl, schq, err; int lvl, schq, err;
otx2_mbox_lock(&pfvf->mbox); mutex_lock(&pfvf->mbox.lock);
/* Free the transmit schedulers */ /* Free the transmit schedulers */
free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox); free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
if (!free_req) { if (!free_req) {
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM; return -ENOMEM;
} }
free_req->flags = TXSCHQ_FREE_ALL; free_req->flags = TXSCHQ_FREE_ALL;
err = otx2_sync_mbox_msg(&pfvf->mbox); err = otx2_sync_mbox_msg(&pfvf->mbox);
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
/* Clear the txschq list */ /* Clear the txschq list */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
...@@ -575,17 +580,19 @@ void otx2_sqb_flush(struct otx2_nic *pfvf) ...@@ -575,17 +580,19 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
{ {
int qidx, sqe_tail, sqe_head; int qidx, sqe_tail, sqe_head;
u64 incr, *ptr, val; u64 incr, *ptr, val;
int timeout = 1000;
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
incr = (u64)qidx << 32; incr = (u64)qidx << 32;
while (1) { while (timeout) {
val = otx2_atomic64_add(incr, ptr); val = otx2_atomic64_add(incr, ptr);
sqe_head = (val >> 20) & 0x3F; sqe_head = (val >> 20) & 0x3F;
sqe_tail = (val >> 28) & 0x3F; sqe_tail = (val >> 28) & 0x3F;
if (sqe_head == sqe_tail) if (sqe_head == sqe_tail)
break; break;
usleep_range(1, 3); usleep_range(1, 3);
timeout--;
} }
} }
} }
...@@ -981,6 +988,7 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf) ...@@ -981,6 +988,7 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
qmem_free(pfvf->dev, pool->fc_addr); qmem_free(pfvf->dev, pool->fc_addr);
} }
devm_kfree(pfvf->dev, pfvf->qset.pool); devm_kfree(pfvf->dev, pfvf->qset.pool);
pfvf->qset.pool = NULL;
} }
static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
...@@ -1248,10 +1256,10 @@ int otx2_detach_resources(struct mbox *mbox) ...@@ -1248,10 +1256,10 @@ int otx2_detach_resources(struct mbox *mbox)
{ {
struct rsrc_detach *detach; struct rsrc_detach *detach;
otx2_mbox_lock(mbox); mutex_lock(&mbox->lock);
detach = otx2_mbox_alloc_msg_detach_resources(mbox); detach = otx2_mbox_alloc_msg_detach_resources(mbox);
if (!detach) { if (!detach) {
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1260,9 +1268,10 @@ int otx2_detach_resources(struct mbox *mbox) ...@@ -1260,9 +1268,10 @@ int otx2_detach_resources(struct mbox *mbox)
/* Send detach request to AF */ /* Send detach request to AF */
otx2_mbox_msg_send(&mbox->mbox, 0); otx2_mbox_msg_send(&mbox->mbox, 0);
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
return 0; return 0;
} }
EXPORT_SYMBOL(otx2_detach_resources);
int otx2_attach_npa_nix(struct otx2_nic *pfvf) int otx2_attach_npa_nix(struct otx2_nic *pfvf)
{ {
...@@ -1270,11 +1279,11 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf) ...@@ -1270,11 +1279,11 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
struct msg_req *msix; struct msg_req *msix;
int err; int err;
otx2_mbox_lock(&pfvf->mbox); mutex_lock(&pfvf->mbox.lock);
/* Get memory to put this msg */ /* Get memory to put this msg */
attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox); attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox);
if (!attach) { if (!attach) {
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1284,7 +1293,7 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf) ...@@ -1284,7 +1293,7 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
/* Send attach request to AF */ /* Send attach request to AF */
err = otx2_sync_mbox_msg(&pfvf->mbox); err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) { if (err) {
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return err; return err;
} }
...@@ -1299,16 +1308,16 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf) ...@@ -1299,16 +1308,16 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
/* Get NPA and NIX MSIX vector offsets */ /* Get NPA and NIX MSIX vector offsets */
msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox); msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
if (!msix) { if (!msix) {
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM; return -ENOMEM;
} }
err = otx2_sync_mbox_msg(&pfvf->mbox); err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) { if (err) {
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return err; return err;
} }
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID || if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID ||
pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) { pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) {
...@@ -1319,12 +1328,13 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf) ...@@ -1319,12 +1328,13 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
return 0; return 0;
} }
EXPORT_SYMBOL(otx2_attach_npa_nix);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa) void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
{ {
struct hwctx_disable_req *req; struct hwctx_disable_req *req;
otx2_mbox_lock(mbox); mutex_lock(&mbox->lock);
/* Request AQ to disable this context */ /* Request AQ to disable this context */
if (npa) if (npa)
req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox); req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox);
...@@ -1332,7 +1342,7 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa) ...@@ -1332,7 +1342,7 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox); req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox);
if (!req) { if (!req) {
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
return; return;
} }
...@@ -1342,7 +1352,7 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa) ...@@ -1342,7 +1352,7 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
dev_err(mbox->pfvf->dev, "%s failed to disable context\n", dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
__func__); __func__);
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
} }
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
...@@ -1387,6 +1397,7 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, ...@@ -1387,6 +1397,7 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
pf->hw.txschq_list[lvl][schq] = pf->hw.txschq_list[lvl][schq] =
rsp->schq_list[lvl][schq]; rsp->schq_list[lvl][schq];
} }
EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
struct npa_lf_alloc_rsp *rsp) struct npa_lf_alloc_rsp *rsp)
...@@ -1394,6 +1405,7 @@ void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, ...@@ -1394,6 +1405,7 @@ void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs; pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs;
pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes; pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes;
} }
EXPORT_SYMBOL(mbox_handler_npa_lf_alloc);
void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
struct nix_lf_alloc_rsp *rsp) struct nix_lf_alloc_rsp *rsp)
...@@ -1404,6 +1416,7 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, ...@@ -1404,6 +1416,7 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx; pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
} }
EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
void mbox_handler_msix_offset(struct otx2_nic *pfvf, void mbox_handler_msix_offset(struct otx2_nic *pfvf,
struct msix_offset_rsp *rsp) struct msix_offset_rsp *rsp)
...@@ -1411,6 +1424,7 @@ void mbox_handler_msix_offset(struct otx2_nic *pfvf, ...@@ -1411,6 +1424,7 @@ void mbox_handler_msix_offset(struct otx2_nic *pfvf,
pfvf->hw.npa_msixoff = rsp->npa_msixoff; pfvf->hw.npa_msixoff = rsp->npa_msixoff;
pfvf->hw.nix_msixoff = rsp->nix_msixoff; pfvf->hw.nix_msixoff = rsp->nix_msixoff;
} }
EXPORT_SYMBOL(mbox_handler_msix_offset);
void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
struct nix_bp_cfg_rsp *rsp) struct nix_bp_cfg_rsp *rsp)
...@@ -1422,6 +1436,7 @@ void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, ...@@ -1422,6 +1436,7 @@ void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF; pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF;
} }
} }
EXPORT_SYMBOL(mbox_handler_nix_bp_enable);
void otx2_free_cints(struct otx2_nic *pfvf, int n) void otx2_free_cints(struct otx2_nic *pfvf, int n)
{ {
......
...@@ -20,6 +20,8 @@ ...@@ -20,6 +20,8 @@
/* PCI device IDs */ /* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
...@@ -191,6 +193,17 @@ struct otx2_hw { ...@@ -191,6 +193,17 @@ struct otx2_hw {
u64 cgx_tx_stats[CGX_TX_STATS_COUNT]; u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
}; };
struct otx2_vf_config {
struct otx2_nic *pf;
struct delayed_work link_event_work;
bool intf_down; /* interface was either configured or not */
};
struct flr_work {
struct work_struct work;
struct otx2_nic *pf;
};
struct refill_work { struct refill_work {
struct delayed_work pool_refill_work; struct delayed_work pool_refill_work;
struct otx2_nic *pf; struct otx2_nic *pf;
...@@ -215,14 +228,20 @@ struct otx2_nic { ...@@ -215,14 +228,20 @@ struct otx2_nic {
/* Mbox */ /* Mbox */
struct mbox mbox; struct mbox mbox;
struct mbox *mbox_pfvf;
struct workqueue_struct *mbox_wq; struct workqueue_struct *mbox_wq;
struct workqueue_struct *mbox_pfvf_wq;
u8 total_vfs;
u16 pcifunc; /* RVU PF_FUNC */ u16 pcifunc; /* RVU PF_FUNC */
u16 bpid[NIX_MAX_BPID_CHAN]; u16 bpid[NIX_MAX_BPID_CHAN];
struct otx2_vf_config *vf_configs;
struct cgx_link_user_info linfo; struct cgx_link_user_info linfo;
u64 reset_count; u64 reset_count;
struct work_struct reset_task; struct work_struct reset_task;
struct workqueue_struct *flr_wq;
struct flr_work *flr_wrk;
struct refill_work *refill_wrk; struct refill_work *refill_wrk;
/* Ethtool stuff */ /* Ethtool stuff */
...@@ -232,6 +251,11 @@ struct otx2_nic { ...@@ -232,6 +251,11 @@ struct otx2_nic {
int nix_blkaddr; int nix_blkaddr;
}; };
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
{
return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
}
static inline bool is_96xx_A0(struct pci_dev *pdev) static inline bool is_96xx_A0(struct pci_dev *pdev)
{ {
return (pdev->revision == 0x00) && return (pdev->revision == 0x00) &&
...@@ -351,21 +375,6 @@ static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid) ...@@ -351,21 +375,6 @@ static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
hw_mbase + mbox->rx_start, msg_size + msgs_offset); hw_mbase + mbox->rx_start, msg_size + msgs_offset);
} }
static inline void otx2_mbox_lock_init(struct mbox *mbox)
{
mutex_init(&mbox->lock);
}
static inline void otx2_mbox_lock(struct mbox *mbox)
{
mutex_lock(&mbox->lock);
}
static inline void otx2_mbox_unlock(struct mbox *mbox)
{
mutex_unlock(&mbox->lock);
}
/* With the absence of API for 128-bit IO memory access for arm64, /* With the absence of API for 128-bit IO memory access for arm64,
* implement required operations at place. * implement required operations at place.
*/ */
...@@ -614,6 +623,7 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf); ...@@ -614,6 +623,7 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf);
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx); int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx); int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
void otx2_set_ethtool_ops(struct net_device *netdev); void otx2_set_ethtool_ops(struct net_device *netdev);
void otx2vf_set_ethtool_ops(struct net_device *netdev);
int otx2_open(struct net_device *netdev); int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev); int otx2_stop(struct net_device *netdev);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "otx2_common.h" #include "otx2_common.h"
#define DRV_NAME "octeontx2-nicpf" #define DRV_NAME "octeontx2-nicpf"
#define DRV_VF_NAME "octeontx2-nicvf"
struct otx2_stat { struct otx2_stat {
char name[ETH_GSTRING_LEN]; char name[ETH_GSTRING_LEN];
...@@ -63,16 +64,6 @@ static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats); ...@@ -63,16 +64,6 @@ static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats); static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats); static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
static void otx2_dev_open(struct net_device *netdev)
{
otx2_open(netdev);
}
static void otx2_dev_stop(struct net_device *netdev)
{
otx2_stop(netdev);
}
static void otx2_get_drvinfo(struct net_device *netdev, static void otx2_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info) struct ethtool_drvinfo *info)
{ {
...@@ -232,7 +223,7 @@ static int otx2_set_channels(struct net_device *dev, ...@@ -232,7 +223,7 @@ static int otx2_set_channels(struct net_device *dev,
return -EINVAL; return -EINVAL;
if (if_up) if (if_up)
otx2_dev_stop(dev); dev->netdev_ops->ndo_stop(dev);
err = otx2_set_real_num_queues(dev, channel->tx_count, err = otx2_set_real_num_queues(dev, channel->tx_count,
channel->rx_count); channel->rx_count);
...@@ -245,7 +236,7 @@ static int otx2_set_channels(struct net_device *dev, ...@@ -245,7 +236,7 @@ static int otx2_set_channels(struct net_device *dev,
fail: fail:
if (if_up) if (if_up)
otx2_dev_open(dev); dev->netdev_ops->ndo_open(dev);
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
pfvf->hw.tx_queues, pfvf->hw.rx_queues); pfvf->hw.tx_queues, pfvf->hw.rx_queues);
...@@ -259,6 +250,9 @@ static void otx2_get_pauseparam(struct net_device *netdev, ...@@ -259,6 +250,9 @@ static void otx2_get_pauseparam(struct net_device *netdev,
struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_nic *pfvf = netdev_priv(netdev);
struct cgx_pause_frm_cfg *req, *rsp; struct cgx_pause_frm_cfg *req, *rsp;
if (is_otx2_lbkvf(pfvf->pdev))
return;
req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
if (!req) if (!req)
return; return;
...@@ -279,6 +273,9 @@ static int otx2_set_pauseparam(struct net_device *netdev, ...@@ -279,6 +273,9 @@ static int otx2_set_pauseparam(struct net_device *netdev,
if (pause->autoneg) if (pause->autoneg)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (is_otx2_lbkvf(pfvf->pdev))
return -EOPNOTSUPP;
if (pause->rx_pause) if (pause->rx_pause)
pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
else else
...@@ -336,14 +333,15 @@ static int otx2_set_ringparam(struct net_device *netdev, ...@@ -336,14 +333,15 @@ static int otx2_set_ringparam(struct net_device *netdev,
return 0; return 0;
if (if_up) if (if_up)
otx2_dev_stop(netdev); netdev->netdev_ops->ndo_stop(netdev);
/* Assigned to the nearest possible exponent. */ /* Assigned to the nearest possible exponent. */
qs->sqe_cnt = tx_count; qs->sqe_cnt = tx_count;
qs->rqe_cnt = rx_count; qs->rqe_cnt = rx_count;
if (if_up) if (if_up)
otx2_dev_open(netdev); netdev->netdev_ops->ndo_open(netdev);
return 0; return 0;
} }
...@@ -659,6 +657,9 @@ static u32 otx2_get_link(struct net_device *netdev) ...@@ -659,6 +657,9 @@ static u32 otx2_get_link(struct net_device *netdev)
{ {
struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_nic *pfvf = netdev_priv(netdev);
/* LBK link is internal and always UP */
if (is_otx2_lbkvf(pfvf->pdev))
return 1;
return pfvf->linfo.link_up; return pfvf->linfo.link_up;
} }
...@@ -692,3 +693,102 @@ void otx2_set_ethtool_ops(struct net_device *netdev) ...@@ -692,3 +693,102 @@ void otx2_set_ethtool_ops(struct net_device *netdev)
{ {
netdev->ethtool_ops = &otx2_ethtool_ops; netdev->ethtool_ops = &otx2_ethtool_ops;
} }
/* VF's ethtool APIs */
static void otx2vf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct otx2_nic *vf = netdev_priv(netdev);
strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
}
static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
{
struct otx2_nic *vf = netdev_priv(netdev);
int stats;
if (sset != ETH_SS_STATS)
return;
for (stats = 0; stats < otx2_n_dev_stats; stats++) {
memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
for (stats = 0; stats < otx2_n_drv_stats; stats++) {
memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
otx2_get_qset_strings(vf, &data, 0);
strcpy(data, "reset_count");
data += ETH_GSTRING_LEN;
}
static void otx2vf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct otx2_nic *vf = netdev_priv(netdev);
int stat;
otx2_get_dev_stats(vf);
for (stat = 0; stat < otx2_n_dev_stats; stat++)
*(data++) = ((u64 *)&vf->hw.dev_stats)
[otx2_dev_stats[stat].index];
for (stat = 0; stat < otx2_n_drv_stats; stat++)
*(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats)
[otx2_drv_stats[stat].index]);
otx2_get_qset_stats(vf, stats, &data);
*(data++) = vf->reset_count;
}
static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
{
struct otx2_nic *vf = netdev_priv(netdev);
int qstats_count;
if (sset != ETH_SS_STATS)
return -EINVAL;
qstats_count = otx2_n_queue_stats *
(vf->hw.rx_queues + vf->hw.tx_queues);
return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
}
static const struct ethtool_ops otx2vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.get_link = otx2_get_link,
.get_drvinfo = otx2vf_get_drvinfo,
.get_strings = otx2vf_get_strings,
.get_ethtool_stats = otx2vf_get_ethtool_stats,
.get_sset_count = otx2vf_get_sset_count,
.set_channels = otx2_set_channels,
.get_channels = otx2_get_channels,
.get_rxnfc = otx2_get_rxnfc,
.set_rxnfc = otx2_set_rxnfc,
.get_rxfh_key_size = otx2_get_rxfh_key_size,
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
.get_ringparam = otx2_get_ringparam,
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
.set_coalesce = otx2_set_coalesce,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
};
void otx2vf_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &otx2vf_ethtool_ops;
}
EXPORT_SYMBOL(otx2vf_set_ethtool_ops);
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#define DRV_NAME "octeontx2-nicpf" #define DRV_NAME "octeontx2-nicpf"
#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver" #define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
#define DRV_VERSION "1.0"
/* Supported devices */ /* Supported devices */
static const struct pci_device_id otx2_pf_id_table[] = { static const struct pci_device_id otx2_pf_id_table[] = {
...@@ -32,10 +31,9 @@ static const struct pci_device_id otx2_pf_id_table[] = { ...@@ -32,10 +31,9 @@ static const struct pci_device_id otx2_pf_id_table[] = {
{ 0, } /* end of table */ { 0, } /* end of table */
}; };
MODULE_AUTHOR("Marvell International Ltd."); MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
MODULE_DESCRIPTION(DRV_STRING); MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, otx2_pf_id_table); MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
enum { enum {
...@@ -61,6 +59,224 @@ static int otx2_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -61,6 +59,224 @@ static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
return err; return err;
} }
static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
{
int irq, vfs = pf->total_vfs;
/* Disable VFs ME interrupts */
otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
free_irq(irq, pf);
/* Disable VFs FLR interrupts */
otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
free_irq(irq, pf);
if (vfs <= 64)
return;
otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
free_irq(irq, pf);
otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
free_irq(irq, pf);
}
static void otx2_flr_wq_destroy(struct otx2_nic *pf)
{
if (!pf->flr_wq)
return;
destroy_workqueue(pf->flr_wq);
pf->flr_wq = NULL;
devm_kfree(pf->dev, pf->flr_wrk);
}
static void otx2_flr_handler(struct work_struct *work)
{
struct flr_work *flrwork = container_of(work, struct flr_work, work);
struct otx2_nic *pf = flrwork->pf;
struct mbox *mbox = &pf->mbox;
struct msg_req *req;
int vf, reg = 0;
vf = flrwork - pf->flr_wrk;
mutex_lock(&mbox->lock);
req = otx2_mbox_alloc_msg_vf_flr(mbox);
if (!req) {
mutex_unlock(&mbox->lock);
return;
}
req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
if (!otx2_sync_mbox_msg(&pf->mbox)) {
if (vf >= 64) {
reg = 1;
vf = vf - 64;
}
/* clear transcation pending bit */
otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
}
mutex_unlock(&mbox->lock);
}
static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
int reg, dev, vf, start_vf, num_reg = 1;
u64 intr;
if (pf->total_vfs > 64)
num_reg = 2;
for (reg = 0; reg < num_reg; reg++) {
intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
if (!intr)
continue;
start_vf = 64 * reg;
for (vf = 0; vf < 64; vf++) {
if (!(intr & BIT_ULL(vf)))
continue;
dev = vf + start_vf;
queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
/* Clear interrupt */
otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
/* Disable the interrupt */
otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
BIT_ULL(vf));
}
}
return IRQ_HANDLED;
}
static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
int vf, reg, num_reg = 1;
u64 intr;
if (pf->total_vfs > 64)
num_reg = 2;
for (reg = 0; reg < num_reg; reg++) {
intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
if (!intr)
continue;
for (vf = 0; vf < 64; vf++) {
if (!(intr & BIT_ULL(vf)))
continue;
/* clear trpend bit */
otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
/* clear interrupt */
otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
}
}
return IRQ_HANDLED;
}
static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
{
struct otx2_hw *hw = &pf->hw;
char *irq_name;
int ret;
/* Register ME interrupt handler*/
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
otx2_pf_me_intr_handler, 0, irq_name, pf);
if (ret) {
dev_err(pf->dev,
"RVUPF: IRQ registration failed for ME0\n");
}
/* Register FLR interrupt handler */
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
otx2_pf_flr_intr_handler, 0, irq_name, pf);
if (ret) {
dev_err(pf->dev,
"RVUPF: IRQ registration failed for FLR0\n");
return ret;
}
if (numvfs > 64) {
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
rvu_get_pf(pf->pcifunc));
ret = request_irq(pci_irq_vector
(pf->pdev, RVU_PF_INT_VEC_VFME1),
otx2_pf_me_intr_handler, 0, irq_name, pf);
if (ret) {
dev_err(pf->dev,
"RVUPF: IRQ registration failed for ME1\n");
}
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
rvu_get_pf(pf->pcifunc));
ret = request_irq(pci_irq_vector
(pf->pdev, RVU_PF_INT_VEC_VFFLR1),
otx2_pf_flr_intr_handler, 0, irq_name, pf);
if (ret) {
dev_err(pf->dev,
"RVUPF: IRQ registration failed for FLR1\n");
return ret;
}
}
/* Enable ME interrupt for all VFs*/
otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
/* Enable FLR interrupt for all VFs*/
otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
if (numvfs > 64) {
numvfs -= 64;
otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
INTR_MASK(numvfs));
otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
INTR_MASK(numvfs));
}
return 0;
}
static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
{
int vf;
pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
WQ_UNBOUND | WQ_HIGHPRI, 1);
if (!pf->flr_wq)
return -ENOMEM;
pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
sizeof(struct flr_work), GFP_KERNEL);
if (!pf->flr_wrk) {
destroy_workqueue(pf->flr_wq);
return -ENOMEM;
}
for (vf = 0; vf < num_vfs; vf++) {
pf->flr_wrk[vf].pf = pf;
INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
}
return 0;
}
static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq, static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
int first, int mdevs, u64 intr, int type) int first, int mdevs, u64 intr, int type)
{ {
...@@ -115,9 +331,391 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq, ...@@ -115,9 +331,391 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
} }
} }
static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
struct otx2_mbox *pfvf_mbox, void *bbuf_base,
int devid)
{
struct otx2_mbox_dev *src_mdev = mdev;
int offset;
/* Msgs are already copied, trigger VF's mbox irq */
smp_wmb();
offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
/* Restore VF's mbox bounce buffer region address */
src_mdev->mbase = bbuf_base;
}
static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
struct otx2_mbox *src_mbox,
int dir, int vf, int num_msgs)
{
struct otx2_mbox_dev *src_mdev, *dst_mdev;
struct mbox_hdr *mbox_hdr;
struct mbox_hdr *req_hdr;
struct mbox *dst_mbox;
int dst_size, err;
if (dir == MBOX_DIR_PFAF) {
/* Set VF's mailbox memory as PF's bounce buffer memory, so
* that explicit copying of VF's msgs to PF=>AF mbox region
* and AF=>PF responses to VF's mbox region can be avoided.
*/
src_mdev = &src_mbox->dev[vf];
mbox_hdr = src_mbox->hwbase +
src_mbox->rx_start + (vf * MBOX_SIZE);
dst_mbox = &pf->mbox;
dst_size = dst_mbox->mbox.tx_size -
ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
/* Check if msgs fit into destination area */
if (mbox_hdr->msg_size > dst_size)
return -EINVAL;
dst_mdev = &dst_mbox->mbox.dev[0];
mutex_lock(&pf->mbox.lock);
dst_mdev->mbase = src_mdev->mbase;
dst_mdev->msg_size = mbox_hdr->msg_size;
dst_mdev->num_msgs = num_msgs;
err = otx2_sync_mbox_msg(dst_mbox);
if (err) {
dev_warn(pf->dev,
"AF not responding to VF%d messages\n", vf);
/* restore PF mbase and exit */
dst_mdev->mbase = pf->mbox.bbuf_base;
mutex_unlock(&pf->mbox.lock);
return err;
}
/* At this point, all the VF messages sent to AF are acked
* with proper responses and responses are copied to VF
* mailbox hence raise interrupt to VF.
*/
req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
dst_mbox->mbox.rx_start);
req_hdr->num_msgs = num_msgs;
otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
pf->mbox.bbuf_base, vf);
mutex_unlock(&pf->mbox.lock);
} else if (dir == MBOX_DIR_PFVF_UP) {
src_mdev = &src_mbox->dev[0];
mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
src_mbox->rx_start);
req_hdr->num_msgs = num_msgs;
dst_mbox = &pf->mbox_pfvf[0];
dst_size = dst_mbox->mbox_up.tx_size -
ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
/* Check if msgs fit into destination area */
if (mbox_hdr->msg_size > dst_size)
return -EINVAL;
dst_mdev = &dst_mbox->mbox_up.dev[vf];
dst_mdev->mbase = src_mdev->mbase;
dst_mdev->msg_size = mbox_hdr->msg_size;
dst_mdev->num_msgs = mbox_hdr->num_msgs;
err = otx2_sync_mbox_up_msg(dst_mbox, vf);
if (err) {
dev_warn(pf->dev,
"VF%d is not responding to mailbox\n", vf);
return err;
}
} else if (dir == MBOX_DIR_VFPF_UP) {
req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
src_mbox->rx_start);
req_hdr->num_msgs = num_msgs;
otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
&pf->mbox.mbox_up,
pf->mbox_pfvf[vf].bbuf_base,
0);
}
return 0;
}
static void otx2_pfvf_mbox_handler(struct work_struct *work)
{
struct mbox_msghdr *msg = NULL;
int offset, vf_idx, id, err;
struct otx2_mbox_dev *mdev;
struct mbox_hdr *req_hdr;
struct otx2_mbox *mbox;
struct mbox *vf_mbox;
struct otx2_nic *pf;
vf_mbox = container_of(work, struct mbox, mbox_wrk);
pf = vf_mbox->pfvf;
vf_idx = vf_mbox - pf->mbox_pfvf;
mbox = &pf->mbox_pfvf[0].mbox;
mdev = &mbox->dev[vf_idx];
req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < vf_mbox->num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
offset);
if (msg->sig != OTX2_MBOX_REQ_SIG)
goto inval_msg;
/* Set VF's number in each of the msg */
msg->pcifunc &= RVU_PFVF_FUNC_MASK;
msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
offset = msg->next_msgoff;
}
err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
vf_mbox->num_msgs);
if (err)
goto inval_msg;
return;
inval_msg:
otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
otx2_mbox_msg_send(mbox, vf_idx);
}
static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
{
struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
struct otx2_nic *pf = vf_mbox->pfvf;
struct otx2_mbox_dev *mdev;
int offset, id, vf_idx = 0;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
struct otx2_mbox *mbox;
vf_idx = vf_mbox - pf->mbox_pfvf;
mbox = &pf->mbox_pfvf[0].mbox_up;
mdev = &mbox->dev[vf_idx];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < vf_mbox->up_num_msgs; id++) {
msg = mdev->mbase + offset;
if (msg->id >= MBOX_MSG_MAX) {
dev_err(pf->dev,
"Mbox msg with unknown ID 0x%x\n", msg->id);
goto end;
}
if (msg->sig != OTX2_MBOX_RSP_SIG) {
dev_err(pf->dev,
"Mbox msg with wrong signature %x, ID 0x%x\n",
msg->sig, msg->id);
goto end;
}
switch (msg->id) {
case MBOX_MSG_CGX_LINK_EVENT:
break;
default:
if (msg->rc)
dev_err(pf->dev,
"Mbox msg response has err %d, ID 0x%x\n",
msg->rc, msg->id);
break;
}
end:
offset = mbox->rx_start + msg->next_msgoff;
mdev->msgs_acked++;
}
otx2_mbox_reset(mbox, vf_idx);
}
static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
int vfs = pf->total_vfs;
struct mbox *mbox;
u64 intr;
mbox = pf->mbox_pfvf;
/* Handle VF interrupts */
if (vfs > 64) {
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
TYPE_PFVF);
vfs -= 64;
}
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
return IRQ_HANDLED;
}
static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
{
void __iomem *hwbase;
struct mbox *mbox;
int err, vf;
u64 base;
if (!numvfs)
return -EINVAL;
pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
sizeof(struct mbox), GFP_KERNEL);
if (!pf->mbox_pfvf)
return -ENOMEM;
pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
WQ_UNBOUND | WQ_HIGHPRI |
WQ_MEM_RECLAIM, 1);
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
base = readq((void __iomem *)((u64)pf->reg_base + RVU_PF_VF_BAR4_ADDR));
hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
if (!hwbase) {
err = -ENOMEM;
goto free_wq;
}
mbox = &pf->mbox_pfvf[0];
err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
MBOX_DIR_PFVF, numvfs);
if (err)
goto free_iomem;
err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
MBOX_DIR_PFVF_UP, numvfs);
if (err)
goto free_iomem;
for (vf = 0; vf < numvfs; vf++) {
mbox->pfvf = pf;
INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
mbox++;
}
return 0;
free_iomem:
if (hwbase)
iounmap(hwbase);
free_wq:
destroy_workqueue(pf->mbox_pfvf_wq);
return err;
}
static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
{
struct mbox *mbox = &pf->mbox_pfvf[0];
if (!mbox)
return;
if (pf->mbox_pfvf_wq) {
destroy_workqueue(pf->mbox_pfvf_wq);
pf->mbox_pfvf_wq = NULL;
}
if (mbox->mbox.hwbase)
iounmap(mbox->mbox.hwbase);
otx2_mbox_destroy(&mbox->mbox);
}
static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
{
/* Clear PF <=> VF mailbox IRQ */
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
/* Enable PF <=> VF mailbox IRQ */
otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
if (numvfs > 64) {
numvfs -= 64;
otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
INTR_MASK(numvfs));
}
}
static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
{
int vector;
/* Disable PF <=> VF mailbox IRQ */
otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
free_irq(vector, pf);
if (numvfs > 64) {
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
free_irq(vector, pf);
}
}
static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
{
struct otx2_hw *hw = &pf->hw;
char *irq_name;
int err;
/* Register MBOX0 interrupt handler */
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
if (pf->pcifunc)
snprintf(irq_name, NAME_SIZE,
"RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
else
snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
if (err) {
dev_err(pf->dev,
"RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
return err;
}
if (numvfs > 64) {
/* Register MBOX1 interrupt handler */
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
if (pf->pcifunc)
snprintf(irq_name, NAME_SIZE,
"RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
else
snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
err = request_irq(pci_irq_vector(pf->pdev,
RVU_PF_INT_VEC_VFPF_MBOX1),
otx2_pfvf_mbox_intr_handler,
0, irq_name, pf);
if (err) {
dev_err(pf->dev,
"RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
return err;
}
}
otx2_enable_pfvf_mbox_intr(pf, numvfs);
return 0;
}
static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf, static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
struct mbox_msghdr *msg) struct mbox_msghdr *msg)
{ {
int devid;
if (msg->id >= MBOX_MSG_MAX) { if (msg->id >= MBOX_MSG_MAX) {
dev_err(pf->dev, dev_err(pf->dev,
"Mbox msg with unknown ID 0x%x\n", msg->id); "Mbox msg with unknown ID 0x%x\n", msg->id);
...@@ -131,6 +729,26 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf, ...@@ -131,6 +729,26 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
return; return;
} }
/* message response heading VF */
devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
if (devid) {
struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
struct delayed_work *dwork;
switch (msg->id) {
case MBOX_MSG_NIX_LF_START_RX:
config->intf_down = false;
dwork = &config->link_event_work;
schedule_delayed_work(dwork, msecs_to_jiffies(100));
break;
case MBOX_MSG_NIX_LF_STOP_RX:
config->intf_down = true;
break;
}
return;
}
switch (msg->id) { switch (msg->id) {
case MBOX_MSG_READY: case MBOX_MSG_READY:
pf->pcifunc = msg->pcifunc; pf->pcifunc = msg->pcifunc;
...@@ -212,9 +830,22 @@ int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf, ...@@ -212,9 +830,22 @@ int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
struct cgx_link_info_msg *msg, struct cgx_link_info_msg *msg,
struct msg_rsp *rsp) struct msg_rsp *rsp)
{ {
int i;
/* Copy the link info sent by AF */ /* Copy the link info sent by AF */
pf->linfo = msg->link_info; pf->linfo = msg->link_info;
/* notify VFs about link event */
for (i = 0; i < pci_num_vf(pf->pdev); i++) {
struct otx2_vf_config *config = &pf->vf_configs[i];
struct delayed_work *dwork = &config->link_event_work;
if (config->intf_down)
continue;
schedule_delayed_work(dwork, msecs_to_jiffies(100));
}
/* interface has not been fully configured yet */ /* interface has not been fully configured yet */
if (pf->flags & OTX2_FLAG_INTF_DOWN) if (pf->flags & OTX2_FLAG_INTF_DOWN)
return 0; return 0;
...@@ -286,6 +917,12 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work) ...@@ -286,6 +917,12 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
otx2_process_mbox_msg_up(pf, msg); otx2_process_mbox_msg_up(pf, msg);
offset = mbox->rx_start + msg->next_msgoff; offset = mbox->rx_start + msg->next_msgoff;
} }
if (devid) {
otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
MBOX_DIR_PFVF_UP, devid - 1,
af_mbox->up_num_msgs);
return;
}
otx2_mbox_msg_send(mbox, 0); otx2_mbox_msg_send(mbox, 0);
} }
...@@ -362,7 +999,6 @@ static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf) ...@@ -362,7 +999,6 @@ static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
struct mbox *mbox = &pf->mbox; struct mbox *mbox = &pf->mbox;
if (pf->mbox_wq) { if (pf->mbox_wq) {
flush_workqueue(pf->mbox_wq);
destroy_workqueue(pf->mbox_wq); destroy_workqueue(pf->mbox_wq);
pf->mbox_wq = NULL; pf->mbox_wq = NULL;
} }
...@@ -415,7 +1051,7 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf) ...@@ -415,7 +1051,7 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler); INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler); INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
otx2_mbox_lock_init(&pf->mbox); mutex_init(&mbox->lock);
return 0; return 0;
exit: exit:
...@@ -428,19 +1064,19 @@ static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable) ...@@ -428,19 +1064,19 @@ static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
struct msg_req *msg; struct msg_req *msg;
int err; int err;
otx2_mbox_lock(&pf->mbox); mutex_lock(&pf->mbox.lock);
if (enable) if (enable)
msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox); msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
else else
msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox); msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
if (!msg) { if (!msg) {
otx2_mbox_unlock(&pf->mbox); mutex_unlock(&pf->mbox.lock);
return -ENOMEM; return -ENOMEM;
} }
err = otx2_sync_mbox_msg(&pf->mbox); err = otx2_sync_mbox_msg(&pf->mbox);
otx2_mbox_unlock(&pf->mbox); mutex_unlock(&pf->mbox.lock);
return err; return err;
} }
...@@ -449,19 +1085,19 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) ...@@ -449,19 +1085,19 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg; struct msg_req *msg;
int err; int err;
otx2_mbox_lock(&pf->mbox); mutex_lock(&pf->mbox.lock);
if (enable) if (enable)
msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox); msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
else else
msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox); msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
if (!msg) { if (!msg) {
otx2_mbox_unlock(&pf->mbox); mutex_unlock(&pf->mbox.lock);
return -ENOMEM; return -ENOMEM;
} }
err = otx2_sync_mbox_msg(&pf->mbox); err = otx2_sync_mbox_msg(&pf->mbox);
otx2_mbox_unlock(&pf->mbox); mutex_unlock(&pf->mbox.lock);
return err; return err;
} }
...@@ -483,6 +1119,7 @@ int otx2_set_real_num_queues(struct net_device *netdev, ...@@ -483,6 +1119,7 @@ int otx2_set_real_num_queues(struct net_device *netdev,
"Failed to set no of Rx queues: %d\n", rx_queues); "Failed to set no of Rx queues: %d\n", rx_queues);
return err; return err;
} }
EXPORT_SYMBOL(otx2_set_real_num_queues);
static irqreturn_t otx2_q_intr_handler(int irq, void *data) static irqreturn_t otx2_q_intr_handler(int irq, void *data)
{ {
...@@ -646,7 +1283,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf) ...@@ -646,7 +1283,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
/* Get the size of receive buffers to allocate */ /* Get the size of receive buffers to allocate */
pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN); pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
otx2_mbox_lock(mbox); mutex_lock(&mbox->lock);
/* NPA init */ /* NPA init */
err = otx2_config_npa(pf); err = otx2_config_npa(pf);
if (err) if (err)
...@@ -663,35 +1300,35 @@ static int otx2_init_hw_resources(struct otx2_nic *pf) ...@@ -663,35 +1300,35 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
/* Init Auras and pools used by NIX RQ, for free buffer ptrs */ /* Init Auras and pools used by NIX RQ, for free buffer ptrs */
err = otx2_rq_aura_pool_init(pf); err = otx2_rq_aura_pool_init(pf);
if (err) { if (err) {
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
goto err_free_nix_lf; goto err_free_nix_lf;
} }
/* Init Auras and pools used by NIX SQ, for queueing SQEs */ /* Init Auras and pools used by NIX SQ, for queueing SQEs */
err = otx2_sq_aura_pool_init(pf); err = otx2_sq_aura_pool_init(pf);
if (err) { if (err) {
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
goto err_free_rq_ptrs; goto err_free_rq_ptrs;
} }
err = otx2_txsch_alloc(pf); err = otx2_txsch_alloc(pf);
if (err) { if (err) {
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
goto err_free_sq_ptrs; goto err_free_sq_ptrs;
} }
err = otx2_config_nix_queues(pf); err = otx2_config_nix_queues(pf);
if (err) { if (err) {
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
goto err_free_txsch; goto err_free_txsch;
} }
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
err = otx2_txschq_config(pf, lvl); err = otx2_txschq_config(pf, lvl);
if (err) { if (err) {
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
goto err_free_nix_queues; goto err_free_nix_queues;
} }
} }
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
return err; return err;
err_free_nix_queues: err_free_nix_queues:
...@@ -709,7 +1346,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf) ...@@ -709,7 +1346,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true); otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
otx2_aura_pool_free(pf); otx2_aura_pool_free(pf);
err_free_nix_lf: err_free_nix_lf:
otx2_mbox_lock(mbox); mutex_lock(&mbox->lock);
req = otx2_mbox_alloc_msg_nix_lf_free(mbox); req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
if (req) { if (req) {
if (otx2_sync_mbox_msg(mbox)) if (otx2_sync_mbox_msg(mbox))
...@@ -723,7 +1360,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf) ...@@ -723,7 +1360,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
dev_err(pf->dev, "%s failed to free npalf\n", __func__); dev_err(pf->dev, "%s failed to free npalf\n", __func__);
} }
exit: exit:
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
return err; return err;
} }
...@@ -743,11 +1380,11 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) ...@@ -743,11 +1380,11 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
if (err) if (err)
dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n"); dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
otx2_mbox_lock(mbox); mutex_lock(&mbox->lock);
/* Disable backpressure */ /* Disable backpressure */
if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK)) if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_nix_config_bp(pf, false); otx2_nix_config_bp(pf, false);
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
/* Disable RQs */ /* Disable RQs */
otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false); otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
...@@ -768,28 +1405,28 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) ...@@ -768,28 +1405,28 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_free_cq_res(pf); otx2_free_cq_res(pf);
otx2_mbox_lock(mbox); mutex_lock(&mbox->lock);
/* Reset NIX LF */ /* Reset NIX LF */
req = otx2_mbox_alloc_msg_nix_lf_free(mbox); req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
if (req) { if (req) {
if (otx2_sync_mbox_msg(mbox)) if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free nixlf\n", __func__); dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
} }
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
/* Disable NPA Pool and Aura hw context */ /* Disable NPA Pool and Aura hw context */
otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true); otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true); otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
otx2_aura_pool_free(pf); otx2_aura_pool_free(pf);
otx2_mbox_lock(mbox); mutex_lock(&mbox->lock);
/* Reset NPA LF */ /* Reset NPA LF */
req = otx2_mbox_alloc_msg_npa_lf_free(mbox); req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
if (req) { if (req) {
if (otx2_sync_mbox_msg(mbox)) if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free npalf\n", __func__); dev_err(pf->dev, "%s failed to free npalf\n", __func__);
} }
otx2_mbox_unlock(mbox); mutex_unlock(&mbox->lock);
} }
int otx2_open(struct net_device *netdev) int otx2_open(struct net_device *netdev)
...@@ -918,6 +1555,9 @@ int otx2_open(struct net_device *netdev) ...@@ -918,6 +1555,9 @@ int otx2_open(struct net_device *netdev)
if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK)) if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_handle_link_event(pf); otx2_handle_link_event(pf);
/* Restore pause frame settings */
otx2_config_pause_frm(pf);
err = otx2_rxtx_enable(pf, true); err = otx2_rxtx_enable(pf, true);
if (err) if (err)
goto err_free_cints; goto err_free_cints;
...@@ -941,6 +1581,7 @@ int otx2_open(struct net_device *netdev) ...@@ -941,6 +1581,7 @@ int otx2_open(struct net_device *netdev)
kfree(qset->napi); kfree(qset->napi);
return err; return err;
} }
EXPORT_SYMBOL(otx2_open);
int otx2_stop(struct net_device *netdev) int otx2_stop(struct net_device *netdev)
{ {
...@@ -1001,6 +1642,7 @@ int otx2_stop(struct net_device *netdev) ...@@ -1001,6 +1642,7 @@ int otx2_stop(struct net_device *netdev)
sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt)); sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
return 0; return 0;
} }
EXPORT_SYMBOL(otx2_stop);
static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
{ {
...@@ -1042,10 +1684,10 @@ static void otx2_set_rx_mode(struct net_device *netdev) ...@@ -1042,10 +1684,10 @@ static void otx2_set_rx_mode(struct net_device *netdev)
if (!(netdev->flags & IFF_UP)) if (!(netdev->flags & IFF_UP))
return; return;
otx2_mbox_lock(&pf->mbox); mutex_lock(&pf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox); req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
if (!req) { if (!req) {
otx2_mbox_unlock(&pf->mbox); mutex_unlock(&pf->mbox.lock);
return; return;
} }
...@@ -1058,7 +1700,7 @@ static void otx2_set_rx_mode(struct net_device *netdev) ...@@ -1058,7 +1700,7 @@ static void otx2_set_rx_mode(struct net_device *netdev)
req->mode |= NIX_RX_MODE_ALLMULTI; req->mode |= NIX_RX_MODE_ALLMULTI;
otx2_sync_mbox_msg(&pf->mbox); otx2_sync_mbox_msg(&pf->mbox);
otx2_mbox_unlock(&pf->mbox); mutex_unlock(&pf->mbox.lock);
} }
static int otx2_set_features(struct net_device *netdev, static int otx2_set_features(struct net_device *netdev,
...@@ -1129,7 +1771,6 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf) ...@@ -1129,7 +1771,6 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
otx2_disable_mbox_intr(pf); otx2_disable_mbox_intr(pf);
pci_free_irq_vectors(hw->pdev); pci_free_irq_vectors(hw->pdev);
pci_free_irq_vectors(hw->pdev);
err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
if (err < 0) { if (err < 0) {
dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n", dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
...@@ -1184,6 +1825,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1184,6 +1825,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pf->netdev = netdev; pf->netdev = netdev;
pf->pdev = pdev; pf->pdev = pdev;
pf->dev = dev; pf->dev = dev;
pf->total_vfs = pci_sriov_get_totalvfs(pdev);
pf->flags |= OTX2_FLAG_INTF_DOWN; pf->flags |= OTX2_FLAG_INTF_DOWN;
hw = &pf->hw; hw = &pf->hw;
...@@ -1295,6 +1937,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1295,6 +1937,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enable link notifications */ /* Enable link notifications */
otx2_cgx_config_linkevents(pf, true); otx2_cgx_config_linkevents(pf, true);
/* Enable pause frames by default */
pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
return 0; return 0;
err_detach_rsrc: err_detach_rsrc:
...@@ -1313,6 +1959,121 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1313,6 +1959,121 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err; return err;
} }
static void otx2_vf_link_event_task(struct work_struct *work)
{
struct otx2_vf_config *config;
struct cgx_link_info_msg *req;
struct mbox_msghdr *msghdr;
struct otx2_nic *pf;
int vf_idx;
config = container_of(work, struct otx2_vf_config,
link_event_work.work);
vf_idx = config - config->pf->vf_configs;
pf = config->pf;
msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
sizeof(*req), sizeof(struct msg_rsp));
if (!msghdr) {
dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
return;
}
req = (struct cgx_link_info_msg *)msghdr;
req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
}
static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct otx2_nic *pf = netdev_priv(netdev);
int ret, i;
/* Init PF <=> VF mailbox stuff */
ret = otx2_pfvf_mbox_init(pf, numvfs);
if (ret)
return ret;
ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
if (ret)
goto free_mbox;
pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config),
GFP_KERNEL);
if (!pf->vf_configs) {
ret = -ENOMEM;
goto free_intr;
}
for (i = 0; i < numvfs; i++) {
pf->vf_configs[i].pf = pf;
pf->vf_configs[i].intf_down = true;
INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
otx2_vf_link_event_task);
}
ret = otx2_pf_flr_init(pf, numvfs);
if (ret)
goto free_configs;
ret = otx2_register_flr_me_intr(pf, numvfs);
if (ret)
goto free_flr;
ret = pci_enable_sriov(pdev, numvfs);
if (ret)
goto free_flr_intr;
return numvfs;
free_flr_intr:
otx2_disable_flr_me_intr(pf);
free_flr:
otx2_flr_wq_destroy(pf);
free_configs:
kfree(pf->vf_configs);
free_intr:
otx2_disable_pfvf_mbox_intr(pf, numvfs);
free_mbox:
otx2_pfvf_mbox_destroy(pf);
return ret;
}
static int otx2_sriov_disable(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct otx2_nic *pf = netdev_priv(netdev);
int numvfs = pci_num_vf(pdev);
int i;
if (!numvfs)
return 0;
pci_disable_sriov(pdev);
for (i = 0; i < pci_num_vf(pdev); i++)
cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
kfree(pf->vf_configs);
otx2_disable_flr_me_intr(pf);
otx2_flr_wq_destroy(pf);
otx2_disable_pfvf_mbox_intr(pf, numvfs);
otx2_pfvf_mbox_destroy(pf);
return 0;
}
static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
{
if (numvfs == 0)
return otx2_sriov_disable(pdev);
else
return otx2_sriov_enable(pdev, numvfs);
}
static void otx2_remove(struct pci_dev *pdev) static void otx2_remove(struct pci_dev *pdev)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
...@@ -1327,6 +2088,8 @@ static void otx2_remove(struct pci_dev *pdev) ...@@ -1327,6 +2088,8 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_cgx_config_linkevents(pf, false); otx2_cgx_config_linkevents(pf, false);
unregister_netdev(netdev); unregister_netdev(netdev);
otx2_sriov_disable(pf->pdev);
otx2_detach_resources(&pf->mbox); otx2_detach_resources(&pf->mbox);
otx2_disable_mbox_intr(pf); otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf); otx2_pfaf_mbox_destroy(pf);
...@@ -1343,6 +2106,7 @@ static struct pci_driver otx2_pf_driver = { ...@@ -1343,6 +2106,7 @@ static struct pci_driver otx2_pf_driver = {
.probe = otx2_probe, .probe = otx2_probe,
.shutdown = otx2_remove, .shutdown = otx2_remove,
.remove = otx2_remove, .remove = otx2_remove,
.sriov_configure = otx2_sriov_configure
}; };
static int __init otx2_rvupf_init_module(void) static int __init otx2_rvupf_init_module(void)
......
...@@ -45,6 +45,19 @@ ...@@ -45,6 +45,19 @@
#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4) #define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3) #define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
/* RVU VF registers */
#define RVU_VF_VFPF_MBOX0 (0x00000)
#define RVU_VF_VFPF_MBOX1 (0x00008)
#define RVU_VF_VFPF_MBOXX(a) (0x00 | (a) << 3)
#define RVU_VF_INT (0x20)
#define RVU_VF_INT_W1S (0x28)
#define RVU_VF_INT_ENA_W1S (0x30)
#define RVU_VF_INT_ENA_W1C (0x38)
#define RVU_VF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
#define RVU_VF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
#define RVU_VF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
#define RVU_FUNC_BLKADDR_SHIFT 20 #define RVU_FUNC_BLKADDR_SHIFT 20
#define RVU_FUNC_BLKADDR_MASK 0x1FULL #define RVU_FUNC_BLKADDR_MASK 0x1FULL
......
...@@ -138,6 +138,25 @@ static void otx2_set_rxhash(struct otx2_nic *pfvf, ...@@ -138,6 +138,25 @@ static void otx2_set_rxhash(struct otx2_nic *pfvf,
skb_set_hash(skb, hash, hash_type); skb_set_hash(skb, hash, hash_type);
} }
static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
int qidx)
{
struct nix_rx_sg_s *sg = &cqe->sg;
void *end, *start;
u64 *seg_addr;
int seg;
start = (void *)sg;
end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
while (start < end) {
sg = (struct nix_rx_sg_s *)start;
seg_addr = &sg->seg_addr;
for (seg = 0; seg < sg->segs; seg++, seg_addr++)
otx2_aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL);
start += sizeof(*sg);
}
}
static bool otx2_check_rcv_errors(struct otx2_nic *pfvf, static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe, int qidx) struct nix_cqe_rx_s *cqe, int qidx)
{ {
...@@ -189,16 +208,17 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf, ...@@ -189,16 +208,17 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
/* For now ignore all the NPC parser errors and /* For now ignore all the NPC parser errors and
* pass the packets to stack. * pass the packets to stack.
*/ */
return false; if (cqe->sg.segs == 1)
return false;
} }
/* If RXALL is enabled pass on packets to stack. */ /* If RXALL is enabled pass on packets to stack. */
if (cqe->sg.segs && (pfvf->netdev->features & NETIF_F_RXALL)) if (cqe->sg.segs == 1 && (pfvf->netdev->features & NETIF_F_RXALL))
return false; return false;
/* Free buffer back to pool */ /* Free buffer back to pool */
if (cqe->sg.segs) if (cqe->sg.segs)
otx2_aura_freeptr(pfvf, qidx, cqe->sg.seg_addr & ~0x07ULL); otx2_free_rcv_seg(pfvf, cqe, qidx);
return true; return true;
} }
...@@ -210,7 +230,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, ...@@ -210,7 +230,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_rx_parse_s *parse = &cqe->parse; struct nix_rx_parse_s *parse = &cqe->parse;
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
if (unlikely(parse->errlev || parse->errcode)) { if (unlikely(parse->errlev || parse->errcode || cqe->sg.segs > 1)) {
if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
return; return;
} }
...@@ -778,6 +798,7 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, ...@@ -778,6 +798,7 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
return true; return true;
} }
EXPORT_SYMBOL(otx2_sq_append_skb);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
{ {
...@@ -788,11 +809,15 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) ...@@ -788,11 +809,15 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) { while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
if (!cqe->sg.subdc) if (!cqe->sg.subdc)
continue; continue;
processed_cqe++;
if (cqe->sg.segs > 1) {
otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
continue;
}
iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE); otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
put_page(virt_to_page(phys_to_virt(pa))); put_page(virt_to_page(phys_to_virt(pa)));
processed_cqe++;
} }
/* Free CQEs to HW */ /* Free CQEs to HW */
...@@ -831,18 +856,18 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable) ...@@ -831,18 +856,18 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
struct msg_req *msg; struct msg_req *msg;
int err; int err;
otx2_mbox_lock(&pfvf->mbox); mutex_lock(&pfvf->mbox.lock);
if (enable) if (enable)
msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox); msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
else else
msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox); msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
if (!msg) { if (!msg) {
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM; return -ENOMEM;
} }
err = otx2_sync_mbox_msg(&pfvf->mbox); err = otx2_sync_mbox_msg(&pfvf->mbox);
otx2_mbox_unlock(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock);
return err; return err;
} }
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTx2 RVU Virtual Function ethernet driver */
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "otx2_common.h"
#include "otx2_reg.h"
#define DRV_NAME "octeontx2-nicvf"
#define DRV_STRING "Marvell OcteonTX2 NIC Virtual Function Driver"
static const struct pci_device_id otx2_vf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) },
{ }
};
MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, otx2_vf_id_table);
/* RVU VF Interrupt Vector Enumeration */
enum {
RVU_VF_INT_VEC_MBOX = 0x0,
};
static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf,
struct mbox_msghdr *msg)
{
if (msg->id >= MBOX_MSG_MAX) {
dev_err(vf->dev,
"Mbox msg with unknown ID %d\n", msg->id);
return;
}
if (msg->sig != OTX2_MBOX_RSP_SIG) {
dev_err(vf->dev,
"Mbox msg with wrong signature %x, ID %d\n",
msg->sig, msg->id);
return;
}
if (msg->rc == MBOX_MSG_INVALID) {
dev_err(vf->dev,
"PF/AF says the sent msg(s) %d were invalid\n",
msg->id);
return;
}
switch (msg->id) {
case MBOX_MSG_READY:
vf->pcifunc = msg->pcifunc;
break;
case MBOX_MSG_MSIX_OFFSET:
mbox_handler_msix_offset(vf, (struct msix_offset_rsp *)msg);
break;
case MBOX_MSG_NPA_LF_ALLOC:
mbox_handler_npa_lf_alloc(vf, (struct npa_lf_alloc_rsp *)msg);
break;
case MBOX_MSG_NIX_LF_ALLOC:
mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg);
break;
case MBOX_MSG_NIX_TXSCH_ALLOC:
mbox_handler_nix_txsch_alloc(vf,
(struct nix_txsch_alloc_rsp *)msg);
break;
case MBOX_MSG_NIX_BP_ENABLE:
mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg);
break;
default:
if (msg->rc)
dev_err(vf->dev,
"Mbox msg response has err %d, ID %d\n",
msg->rc, msg->id);
}
}
static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
{
struct otx2_mbox_dev *mdev;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
struct otx2_mbox *mbox;
struct mbox *af_mbox;
int offset, id;
af_mbox = container_of(work, struct mbox, mbox_wrk);
mbox = &af_mbox->mbox;
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (af_mbox->num_msgs == 0)
return;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < af_mbox->num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
offset = mbox->rx_start + msg->next_msgoff;
mdev->msgs_acked++;
}
otx2_mbox_reset(mbox, 0);
}
static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
struct mbox_msghdr *req)
{
struct msg_rsp *rsp;
int err;
/* Check if valid, if not reply with a invalid msg */
if (req->sig != OTX2_MBOX_REQ_SIG) {
otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
return -ENODEV;
}
switch (req->id) {
case MBOX_MSG_CGX_LINK_EVENT:
rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(
&vf->mbox.mbox_up, 0,
sizeof(struct msg_rsp));
if (!rsp)
return -ENOMEM;
rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
rsp->hdr.pcifunc = 0;
rsp->hdr.rc = 0;
err = otx2_mbox_up_handler_cgx_link_event(
vf, (struct cgx_link_info_msg *)req, rsp);
return err;
default:
otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
return -ENODEV;
}
return 0;
}
static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
{
struct otx2_mbox_dev *mdev;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
struct otx2_mbox *mbox;
struct mbox *vf_mbox;
struct otx2_nic *vf;
int offset, id;
vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
vf = vf_mbox->pfvf;
mbox = &vf_mbox->mbox_up;
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (vf_mbox->up_num_msgs == 0)
return;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < vf_mbox->up_num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_mbox_msg_up(vf, msg);
offset = mbox->rx_start + msg->next_msgoff;
}
otx2_mbox_msg_send(mbox, 0);
}
static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
{
struct otx2_nic *vf = (struct otx2_nic *)vf_irq;
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
struct mbox_hdr *hdr;
/* Clear the IRQ */
otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
/* Read latest mbox data */
smp_rmb();
/* Check for PF => VF response messages */
mbox = &vf->mbox.mbox;
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs) {
vf->mbox.num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
memset(mbox->hwbase + mbox->rx_start, 0,
ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
}
/* Check for PF => VF notification messages */
mbox = &vf->mbox.mbox_up;
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs) {
vf->mbox.up_num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
memset(mbox->hwbase + mbox->rx_start, 0,
ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
}
return IRQ_HANDLED;
}
static void otx2vf_disable_mbox_intr(struct otx2_nic *vf)
{
int vector = pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX);
/* Disable VF => PF mailbox IRQ */
otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0));
free_irq(vector, vf);
}
static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
{
struct otx2_hw *hw = &vf->hw;
struct msg_req *req;
char *irq_name;
int err;
/* Register mailbox interrupt handler */
irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox");
err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
if (err) {
dev_err(vf->dev,
"RVUPF: IRQ registration failed for VFAF mbox irq\n");
return err;
}
/* Enable mailbox interrupt for msgs coming from PF.
* First clear to avoid spurious interrupts, if any.
*/
otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
if (!probe_pf)
return 0;
/* Check mailbox communication with PF */
req = otx2_mbox_alloc_msg_ready(&vf->mbox);
if (!req) {
otx2vf_disable_mbox_intr(vf);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&vf->mbox);
if (err) {
dev_warn(vf->dev,
"AF not responding to mailbox, deferring probe\n");
otx2vf_disable_mbox_intr(vf);
return -EPROBE_DEFER;
}
return 0;
}
static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
{
struct mbox *mbox = &vf->mbox;
if (vf->mbox_wq) {
flush_workqueue(vf->mbox_wq);
destroy_workqueue(vf->mbox_wq);
vf->mbox_wq = NULL;
}
if (mbox->mbox.hwbase)
iounmap((void __iomem *)mbox->mbox.hwbase);
otx2_mbox_destroy(&mbox->mbox);
otx2_mbox_destroy(&mbox->mbox_up);
}
static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
{
struct mbox *mbox = &vf->mbox;
void __iomem *hwbase;
int err;
mbox->pfvf = vf;
vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox",
WQ_UNBOUND | WQ_HIGHPRI |
WQ_MEM_RECLAIM, 1);
if (!vf->mbox_wq)
return -ENOMEM;
/* Mailbox is a reserved memory (in RAM) region shared between
* admin function (i.e PF0) and this VF, shouldn't be mapped as
* device memory to allow unaligned accesses.
*/
hwbase = ioremap_wc(pci_resource_start(vf->pdev, PCI_MBOX_BAR_NUM),
pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM));
if (!hwbase) {
dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
err = -ENOMEM;
goto exit;
}
err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base,
MBOX_DIR_VFPF, 1);
if (err)
goto exit;
err = otx2_mbox_init(&mbox->mbox_up, hwbase, vf->pdev, vf->reg_base,
MBOX_DIR_VFPF_UP, 1);
if (err)
goto exit;
err = otx2_mbox_bbuf_init(mbox, vf->pdev);
if (err)
goto exit;
INIT_WORK(&mbox->mbox_wrk, otx2vf_vfaf_mbox_handler);
INIT_WORK(&mbox->mbox_up_wrk, otx2vf_vfaf_mbox_up_handler);
mutex_init(&mbox->lock);
return 0;
exit:
destroy_workqueue(vf->mbox_wq);
return err;
}
static int otx2vf_open(struct net_device *netdev)
{
struct otx2_nic *vf;
int err;
err = otx2_open(netdev);
if (err)
return err;
/* LBKs do not receive link events so tell everyone we are up here */
vf = netdev_priv(netdev);
if (is_otx2_lbkvf(vf->pdev)) {
pr_info("%s NIC Link is UP\n", netdev->name);
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
}
return 0;
}
static int otx2vf_stop(struct net_device *netdev)
{
return otx2_stop(netdev);
}
static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct otx2_nic *vf = netdev_priv(netdev);
int qidx = skb_get_queue_mapping(skb);
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
sq = &vf->qset.sq[qidx];
txq = netdev_get_tx_queue(netdev, qidx);
if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
netif_tx_stop_queue(txq);
/* Check again, incase SQBs got freed up */
smp_mb();
if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
> sq->sqe_thresh)
netif_tx_wake_queue(txq);
return NETDEV_TX_BUSY;
}
return NETDEV_TX_OK;
}
static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
{
bool if_up = netif_running(netdev);
int err = 0;
if (if_up)
otx2vf_stop(netdev);
netdev_info(netdev, "Changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (if_up)
err = otx2vf_open(netdev);
return err;
}
static void otx2vf_reset_task(struct work_struct *work)
{
struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task);
rtnl_lock();
if (netif_running(vf->netdev)) {
otx2vf_stop(vf->netdev);
vf->reset_count++;
otx2vf_open(vf->netdev);
}
rtnl_unlock();
}
static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_open = otx2vf_open,
.ndo_stop = otx2vf_stop,
.ndo_start_xmit = otx2vf_xmit,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2vf_change_mtu,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
};
static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
{
struct otx2_hw *hw = &vf->hw;
int num_vec, err;
num_vec = hw->nix_msixoff;
num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
otx2vf_disable_mbox_intr(vf);
pci_free_irq_vectors(hw->pdev);
err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
if (err < 0) {
dev_err(vf->dev, "%s: Failed to realloc %d IRQ vectors\n",
__func__, num_vec);
return err;
}
return otx2vf_register_mbox_intr(vf, false);
}
static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int num_vec = pci_msix_vec_count(pdev);
struct device *dev = &pdev->dev;
struct net_device *netdev;
struct otx2_nic *vf;
struct otx2_hw *hw;
int err, qcount;
err = pcim_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
return err;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
}
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
goto err_release_regions;
}
pci_set_master(pdev);
qcount = num_online_cpus();
netdev = alloc_etherdev_mqs(sizeof(*vf), qcount, qcount);
if (!netdev) {
err = -ENOMEM;
goto err_release_regions;
}
pci_set_drvdata(pdev, netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
vf = netdev_priv(netdev);
vf->netdev = netdev;
vf->pdev = pdev;
vf->dev = dev;
vf->iommu_domain = iommu_get_domain_for_dev(dev);
vf->flags |= OTX2_FLAG_INTF_DOWN;
hw = &vf->hw;
hw->pdev = vf->pdev;
hw->rx_queues = qcount;
hw->tx_queues = qcount;
hw->max_queues = qcount;
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
if (!hw->irq_name)
goto err_free_netdev;
hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
sizeof(cpumask_var_t), GFP_KERNEL);
if (!hw->affinity_mask)
goto err_free_netdev;
err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
if (err < 0) {
dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
__func__, num_vec);
goto err_free_netdev;
}
vf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
if (!vf->reg_base) {
dev_err(dev, "Unable to map physical function CSRs, aborting\n");
err = -ENOMEM;
goto err_free_irq_vectors;
}
/* Init VF <=> PF mailbox stuff */
err = otx2vf_vfaf_mbox_init(vf);
if (err)
goto err_free_irq_vectors;
/* Register mailbox interrupt */
err = otx2vf_register_mbox_intr(vf, true);
if (err)
goto err_mbox_destroy;
/* Request AF to attach NPA and LIX LFs to this AF */
err = otx2_attach_npa_nix(vf);
if (err)
goto err_disable_mbox_intr;
err = otx2vf_realloc_msix_vectors(vf);
if (err)
goto err_mbox_destroy;
err = otx2_set_real_num_queues(netdev, qcount, qcount);
if (err)
goto err_detach_rsrc;
otx2_setup_dev_hw_settings(vf);
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
netdev->features = netdev->hw_features;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2vf_netdev_ops;
/* MTU range: 68 - 9190 */
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = OTX2_MAX_MTU;
INIT_WORK(&vf->reset_task, otx2vf_reset_task);
/* To distinguish, for LBK VFs set netdev name explicitly */
if (is_otx2_lbkvf(vf->pdev)) {
int n;
n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK;
/* Need to subtract 1 to get proper VF number */
n -= 1;
snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
}
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
goto err_detach_rsrc;
}
otx2vf_set_ethtool_ops(netdev);
/* Enable pause frames by default */
vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
return 0;
err_detach_rsrc:
otx2_detach_resources(&vf->mbox);
err_disable_mbox_intr:
otx2vf_disable_mbox_intr(vf);
err_mbox_destroy:
otx2vf_vfaf_mbox_destroy(vf);
err_free_irq_vectors:
pci_free_irq_vectors(hw->pdev);
err_free_netdev:
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
err_release_regions:
pci_release_regions(pdev);
return err;
}
static void otx2vf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct otx2_nic *vf;
if (!netdev)
return;
vf = netdev_priv(netdev);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
pci_release_regions(pdev);
}
static struct pci_driver otx2vf_driver = {
.name = DRV_NAME,
.id_table = otx2_vf_id_table,
.probe = otx2vf_probe,
.remove = otx2vf_remove,
.shutdown = otx2vf_remove,
};
static int __init otx2vf_init_module(void)
{
pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
return pci_register_driver(&otx2vf_driver);
}
static void __exit otx2vf_cleanup_module(void)
{
pci_unregister_driver(&otx2vf_driver);
}
module_init(otx2vf_init_module);
module_exit(otx2vf_cleanup_module);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment