Commit 92845948 authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'net-lan966x-use-the-newly-introduced-fdma-library'

Daniel Machon says:

====================
net: lan966x: use the newly introduced FDMA library

This patch series is the second of a 2-part series [1], that adds a new
common FDMA library for Microchip switch chips Sparx5 and lan966x. These
chips share the same FDMA engine, and as such will benefit from a common
library with a common implementation.  This also has the benefit of
removing a lot of open-coded bookkeeping and duplicate code for the two
drivers.

In this second series, the FDMA library will be taken into use by the
lan966x switch driver.

 ###################
 # Example of use: #
 ###################

- Initialize the rx and tx fdma structs with values for: number of
  DCB's, number of DB's, channel ID, DB size (data buffer size), and
  total size of the requested memory. Also provide two callbacks:
  nextptr_cb() and dataptr_cb() for getting the nextptr and dataptr.

- Allocate memory using fdma_alloc_phys() or fdma_alloc_coherent().

- Initialize the DCB's with fdma_dcb_init().

- Add new DCB's with fdma_dcb_add().

- Free memory with fdma_free_phys() or fdma_free_coherent().

 #####################
 # Patch  breakdown: #
 #####################

Patch #1:  select FDMA library for lan966x.

Patch #2:  includes the fdma_api.h header and removes old symbols.

Patch #3:  replaces old rx and tx variables with equivalent ones from the
           fdma struct. Only the variables that can be changed without
           breaking traffic is changed in this patch.

Patch #4:  uses the library for allocation of rx buffers. This requires
           quite a bit of refactoring in this single patch.

Patch #5:  uses the library for adding DCB's in the rx path.

Patch #6:  uses the library for freeing rx buffers.

Patch #7:  uses the library for allocation of tx buffers. This requires
           quite a bit of refactoring in this single patch.

Patch #8:  uses the library for adding DCB's in the tx path.

Patch #9:  uses the library helpers in the tx path.

Patch #10: ditch last_in_use variable and use library instead.

Patch #11: uses library helpers throughout.

Patch #12: refactor lan966x_fdma_reload() function.

[1] https://lore.kernel.org/netdev/20240902-fdma-sparx5-v1-0-1e7d5e5a9f34@microchip.com/Signed-off-by: default avatarDaniel Machon <daniel.machon@microchip.com>
====================

Link: https://patch.msgid.link/20240905-fdma-lan966x-v1-0-e083f8620165@microchip.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents a9b1fab3 89ba464f
...@@ -8,6 +8,7 @@ config LAN966X_SWITCH ...@@ -8,6 +8,7 @@ config LAN966X_SWITCH
select PHYLINK select PHYLINK
select PAGE_POOL select PAGE_POOL
select VCAP select VCAP
select FDMA
help help
This driver supports the Lan966x network switch device. This driver supports the Lan966x network switch device.
......
...@@ -20,3 +20,4 @@ lan966x-switch-$(CONFIG_DEBUG_FS) += lan966x_vcap_debugfs.o ...@@ -20,3 +20,4 @@ lan966x-switch-$(CONFIG_DEBUG_FS) += lan966x_vcap_debugfs.o
# Provide include files # Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma
...@@ -6,31 +6,55 @@ ...@@ -6,31 +6,55 @@
#include "lan966x_main.h" #include "lan966x_main.h"
static int lan966x_fdma_channel_active(struct lan966x *lan966x) static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
{ u64 *dataptr)
return lan_rd(lan966x, FDMA_CH_ACTIVE);
}
static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
struct lan966x_db *db)
{ {
struct lan966x *lan966x = (struct lan966x *)fdma->priv;
struct lan966x_rx *rx = &lan966x->rx;
struct page *page; struct page *page;
page = page_pool_dev_alloc_pages(rx->page_pool); page = page_pool_dev_alloc_pages(rx->page_pool);
if (unlikely(!page)) if (unlikely(!page))
return NULL; return -ENOMEM;
rx->page[dcb][db] = page;
*dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
return 0;
}
db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM; static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
u64 *dataptr)
{
struct lan966x *lan966x = (struct lan966x *)fdma->priv;
*dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr;
return page; return 0;
}
static int lan966x_fdma_xdp_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
u64 *dataptr)
{
struct lan966x *lan966x = (struct lan966x *)fdma->priv;
*dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM;
return 0;
}
static int lan966x_fdma_channel_active(struct lan966x *lan966x)
{
return lan_rd(lan966x, FDMA_CH_ACTIVE);
} }
static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx) static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
{ {
struct fdma *fdma = &rx->fdma;
int i, j; int i, j;
for (i = 0; i < FDMA_DCB_MAX; ++i) { for (i = 0; i < fdma->n_dcbs; ++i) {
for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) for (j = 0; j < fdma->n_dbs; ++j)
page_pool_put_full_page(rx->page_pool, page_pool_put_full_page(rx->page_pool,
rx->page[i][j], false); rx->page[i][j], false);
} }
...@@ -38,41 +62,23 @@ static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx) ...@@ -38,41 +62,23 @@ static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx) static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
{ {
struct fdma *fdma = &rx->fdma;
struct page *page; struct page *page;
page = rx->page[rx->dcb_index][rx->db_index]; page = rx->page[fdma->dcb_index][fdma->db_index];
if (unlikely(!page)) if (unlikely(!page))
return; return;
page_pool_recycle_direct(rx->page_pool, page); page_pool_recycle_direct(rx->page_pool, page);
} }
static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
struct lan966x_rx_dcb *dcb,
u64 nextptr)
{
struct lan966x_db *db;
int i;
for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
db = &dcb->db[i];
db->status = FDMA_DCB_STATUS_INTR;
}
dcb->nextptr = FDMA_DCB_INVALID_DATA;
dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
rx->last_entry->nextptr = nextptr;
rx->last_entry = dcb;
}
static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx) static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
{ {
struct lan966x *lan966x = rx->lan966x; struct lan966x *lan966x = rx->lan966x;
struct page_pool_params pp_params = { struct page_pool_params pp_params = {
.order = rx->page_order, .order = rx->page_order,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = FDMA_DCB_MAX, .pool_size = rx->fdma.n_dcbs,
.nid = NUMA_NO_NODE, .nid = NUMA_NO_NODE,
.dev = lan966x->dev, .dev = lan966x->dev,
.dma_dir = DMA_FROM_DEVICE, .dma_dir = DMA_FROM_DEVICE,
...@@ -104,84 +110,41 @@ static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx) ...@@ -104,84 +110,41 @@ static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx) static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
{ {
struct lan966x *lan966x = rx->lan966x; struct lan966x *lan966x = rx->lan966x;
struct lan966x_rx_dcb *dcb; struct fdma *fdma = &rx->fdma;
struct lan966x_db *db; int err;
struct page *page;
int i, j;
int size;
if (lan966x_fdma_rx_alloc_page_pool(rx)) if (lan966x_fdma_rx_alloc_page_pool(rx))
return PTR_ERR(rx->page_pool); return PTR_ERR(rx->page_pool);
/* calculate how many pages are needed to allocate the dcbs */ err = fdma_alloc_coherent(lan966x->dev, fdma);
size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; if (err)
size = ALIGN(size, PAGE_SIZE); return err;
rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
if (!rx->dcbs)
return -ENOMEM;
rx->last_entry = rx->dcbs;
rx->db_index = 0;
rx->dcb_index = 0;
/* Now for each dcb allocate the dbs */
for (i = 0; i < FDMA_DCB_MAX; ++i) {
dcb = &rx->dcbs[i];
dcb->info = 0;
/* For each db allocate a page and map it to the DB dataptr. */
for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
db = &dcb->db[j];
page = lan966x_fdma_rx_alloc_page(rx, db);
if (!page)
return -ENOMEM;
db->status = 0;
rx->page[i][j] = page;
}
lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i); fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
} FDMA_DCB_STATUS_INTR);
return 0; return 0;
} }
static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
{
rx->dcb_index++;
rx->dcb_index &= FDMA_DCB_MAX - 1;
}
static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
u32 size;
/* Now it is possible to do the cleanup of dcb */
size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
size = ALIGN(size, PAGE_SIZE);
dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
}
static void lan966x_fdma_rx_start(struct lan966x_rx *rx) static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
{ {
struct lan966x *lan966x = rx->lan966x; struct lan966x *lan966x = rx->lan966x;
struct fdma *fdma = &rx->fdma;
u32 mask; u32 mask;
/* When activating a channel, first is required to write the first DCB /* When activating a channel, first is required to write the first DCB
* address and then to activate it * address and then to activate it
*/ */
lan_wr(lower_32_bits((u64)rx->dma), lan966x, lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
FDMA_DCB_LLP(rx->channel_id)); FDMA_DCB_LLP(fdma->channel_id));
lan_wr(upper_32_bits((u64)rx->dma), lan966x, lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
FDMA_DCB_LLP1(rx->channel_id)); FDMA_DCB_LLP1(fdma->channel_id));
lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) | lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(0) | FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
FDMA_CH_CFG_CH_MEM_SET(1), FDMA_CH_CFG_CH_MEM_SET(1),
lan966x, FDMA_CH_CFG(rx->channel_id)); lan966x, FDMA_CH_CFG(fdma->channel_id));
/* Start fdma */ /* Start fdma */
lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
...@@ -191,13 +154,13 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx) ...@@ -191,13 +154,13 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
/* Enable interrupts */ /* Enable interrupts */
mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
mask |= BIT(rx->channel_id); mask |= BIT(fdma->channel_id);
lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
FDMA_INTR_DB_ENA_INTR_DB_ENA, FDMA_INTR_DB_ENA_INTR_DB_ENA,
lan966x, FDMA_INTR_DB_ENA); lan966x, FDMA_INTR_DB_ENA);
/* Activate the channel */ /* Activate the channel */
lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)), lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
FDMA_CH_ACTIVATE_CH_ACTIVATE, FDMA_CH_ACTIVATE_CH_ACTIVATE,
lan966x, FDMA_CH_ACTIVATE); lan966x, FDMA_CH_ACTIVATE);
} }
...@@ -205,18 +168,19 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx) ...@@ -205,18 +168,19 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
static void lan966x_fdma_rx_disable(struct lan966x_rx *rx) static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
{ {
struct lan966x *lan966x = rx->lan966x; struct lan966x *lan966x = rx->lan966x;
struct fdma *fdma = &rx->fdma;
u32 val; u32 val;
/* Disable the channel */ /* Disable the channel */
lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)), lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
FDMA_CH_DISABLE_CH_DISABLE, FDMA_CH_DISABLE_CH_DISABLE,
lan966x, FDMA_CH_DISABLE); lan966x, FDMA_CH_DISABLE);
readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
val, !(val & BIT(rx->channel_id)), val, !(val & BIT(fdma->channel_id)),
READL_SLEEP_US, READL_TIMEOUT_US); READL_SLEEP_US, READL_TIMEOUT_US);
lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)), lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
FDMA_CH_DB_DISCARD_DB_DISCARD, FDMA_CH_DB_DISCARD_DB_DISCARD,
lan966x, FDMA_CH_DB_DISCARD); lan966x, FDMA_CH_DB_DISCARD);
} }
...@@ -225,50 +189,27 @@ static void lan966x_fdma_rx_reload(struct lan966x_rx *rx) ...@@ -225,50 +189,27 @@ static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
{ {
struct lan966x *lan966x = rx->lan966x; struct lan966x *lan966x = rx->lan966x;
lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)), lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
FDMA_CH_RELOAD_CH_RELOAD, FDMA_CH_RELOAD_CH_RELOAD,
lan966x, FDMA_CH_RELOAD); lan966x, FDMA_CH_RELOAD);
} }
static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
struct lan966x_tx_dcb *dcb)
{
dcb->nextptr = FDMA_DCB_INVALID_DATA;
dcb->info = 0;
}
static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx) static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
{ {
struct lan966x *lan966x = tx->lan966x; struct lan966x *lan966x = tx->lan966x;
struct lan966x_tx_dcb *dcb; struct fdma *fdma = &tx->fdma;
struct lan966x_db *db; int err;
int size;
int i, j;
tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf), tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf),
GFP_KERNEL); GFP_KERNEL);
if (!tx->dcbs_buf) if (!tx->dcbs_buf)
return -ENOMEM; return -ENOMEM;
/* calculate how many pages are needed to allocate the dcbs */ err = fdma_alloc_coherent(lan966x->dev, fdma);
size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; if (err)
size = ALIGN(size, PAGE_SIZE);
tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
if (!tx->dcbs)
goto out; goto out;
/* Now for each dcb allocate the db */ fdma_dcbs_init(fdma, 0, 0);
for (i = 0; i < FDMA_DCB_MAX; ++i) {
dcb = &tx->dcbs[i];
for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
db = &dcb->db[j];
db->dataptr = 0;
db->status = 0;
}
lan966x_fdma_tx_add_dcb(tx, dcb);
}
return 0; return 0;
...@@ -280,33 +221,30 @@ static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx) ...@@ -280,33 +221,30 @@ static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
static void lan966x_fdma_tx_free(struct lan966x_tx *tx) static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
{ {
struct lan966x *lan966x = tx->lan966x; struct lan966x *lan966x = tx->lan966x;
int size;
kfree(tx->dcbs_buf); kfree(tx->dcbs_buf);
fdma_free_coherent(lan966x->dev, &tx->fdma);
size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
size = ALIGN(size, PAGE_SIZE);
dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
} }
static void lan966x_fdma_tx_activate(struct lan966x_tx *tx) static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
{ {
struct lan966x *lan966x = tx->lan966x; struct lan966x *lan966x = tx->lan966x;
struct fdma *fdma = &tx->fdma;
u32 mask; u32 mask;
/* When activating a channel, first is required to write the first DCB /* When activating a channel, first is required to write the first DCB
* address and then to activate it * address and then to activate it
*/ */
lan_wr(lower_32_bits((u64)tx->dma), lan966x, lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
FDMA_DCB_LLP(tx->channel_id)); FDMA_DCB_LLP(fdma->channel_id));
lan_wr(upper_32_bits((u64)tx->dma), lan966x, lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
FDMA_DCB_LLP1(tx->channel_id)); FDMA_DCB_LLP1(fdma->channel_id));
lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) | lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(0) | FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
FDMA_CH_CFG_CH_MEM_SET(1), FDMA_CH_CFG_CH_MEM_SET(1),
lan966x, FDMA_CH_CFG(tx->channel_id)); lan966x, FDMA_CH_CFG(fdma->channel_id));
/* Start fdma */ /* Start fdma */
lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
...@@ -316,13 +254,13 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx) ...@@ -316,13 +254,13 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
/* Enable interrupts */ /* Enable interrupts */
mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
mask |= BIT(tx->channel_id); mask |= BIT(fdma->channel_id);
lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
FDMA_INTR_DB_ENA_INTR_DB_ENA, FDMA_INTR_DB_ENA_INTR_DB_ENA,
lan966x, FDMA_INTR_DB_ENA); lan966x, FDMA_INTR_DB_ENA);
/* Activate the channel */ /* Activate the channel */
lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)), lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
FDMA_CH_ACTIVATE_CH_ACTIVATE, FDMA_CH_ACTIVATE_CH_ACTIVATE,
lan966x, FDMA_CH_ACTIVATE); lan966x, FDMA_CH_ACTIVATE);
} }
...@@ -330,23 +268,23 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx) ...@@ -330,23 +268,23 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
static void lan966x_fdma_tx_disable(struct lan966x_tx *tx) static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
{ {
struct lan966x *lan966x = tx->lan966x; struct lan966x *lan966x = tx->lan966x;
struct fdma *fdma = &tx->fdma;
u32 val; u32 val;
/* Disable the channel */ /* Disable the channel */
lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)), lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
FDMA_CH_DISABLE_CH_DISABLE, FDMA_CH_DISABLE_CH_DISABLE,
lan966x, FDMA_CH_DISABLE); lan966x, FDMA_CH_DISABLE);
readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
val, !(val & BIT(tx->channel_id)), val, !(val & BIT(fdma->channel_id)),
READL_SLEEP_US, READL_TIMEOUT_US); READL_SLEEP_US, READL_TIMEOUT_US);
lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)), lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
FDMA_CH_DB_DISCARD_DB_DISCARD, FDMA_CH_DB_DISCARD_DB_DISCARD,
lan966x, FDMA_CH_DB_DISCARD); lan966x, FDMA_CH_DB_DISCARD);
tx->activated = false; tx->activated = false;
tx->last_in_use = -1;
} }
static void lan966x_fdma_tx_reload(struct lan966x_tx *tx) static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
...@@ -354,7 +292,7 @@ static void lan966x_fdma_tx_reload(struct lan966x_tx *tx) ...@@ -354,7 +292,7 @@ static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
struct lan966x *lan966x = tx->lan966x; struct lan966x *lan966x = tx->lan966x;
/* Write the registers to reload the channel */ /* Write the registers to reload the channel */
lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)), lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
FDMA_CH_RELOAD_CH_RELOAD, FDMA_CH_RELOAD_CH_RELOAD,
lan966x, FDMA_CH_RELOAD); lan966x, FDMA_CH_RELOAD);
} }
...@@ -393,23 +331,24 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) ...@@ -393,23 +331,24 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
struct lan966x_tx *tx = &lan966x->tx; struct lan966x_tx *tx = &lan966x->tx;
struct lan966x_rx *rx = &lan966x->rx; struct lan966x_rx *rx = &lan966x->rx;
struct lan966x_tx_dcb_buf *dcb_buf; struct lan966x_tx_dcb_buf *dcb_buf;
struct fdma *fdma = &tx->fdma;
struct xdp_frame_bulk bq; struct xdp_frame_bulk bq;
struct lan966x_db *db;
unsigned long flags; unsigned long flags;
bool clear = false; bool clear = false;
struct fdma_db *db;
int i; int i;
xdp_frame_bulk_init(&bq); xdp_frame_bulk_init(&bq);
spin_lock_irqsave(&lan966x->tx_lock, flags); spin_lock_irqsave(&lan966x->tx_lock, flags);
for (i = 0; i < FDMA_DCB_MAX; ++i) { for (i = 0; i < fdma->n_dcbs; ++i) {
dcb_buf = &tx->dcbs_buf[i]; dcb_buf = &tx->dcbs_buf[i];
if (!dcb_buf->used) if (!dcb_buf->used)
continue; continue;
db = &tx->dcbs[i].db[0]; db = fdma_db_get(fdma, i, 0);
if (!(db->status & FDMA_DCB_STATUS_DONE)) if (!fdma_db_is_done(db))
continue; continue;
dcb_buf->dev->stats.tx_packets++; dcb_buf->dev->stats.tx_packets++;
...@@ -449,27 +388,16 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) ...@@ -449,27 +388,16 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
spin_unlock_irqrestore(&lan966x->tx_lock, flags); spin_unlock_irqrestore(&lan966x->tx_lock, flags);
} }
static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
{
struct lan966x_db *db;
/* Check if there is any data */
db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
return false;
return true;
}
static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port) static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
{ {
struct lan966x *lan966x = rx->lan966x; struct lan966x *lan966x = rx->lan966x;
struct fdma *fdma = &rx->fdma;
struct lan966x_port *port; struct lan966x_port *port;
struct lan966x_db *db; struct fdma_db *db;
struct page *page; struct page *page;
db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; db = fdma_db_next_get(fdma);
page = rx->page[rx->dcb_index][rx->db_index]; page = rx->page[fdma->dcb_index][fdma->db_index];
if (unlikely(!page)) if (unlikely(!page))
return FDMA_ERROR; return FDMA_ERROR;
...@@ -494,16 +422,17 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx, ...@@ -494,16 +422,17 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
u64 src_port) u64 src_port)
{ {
struct lan966x *lan966x = rx->lan966x; struct lan966x *lan966x = rx->lan966x;
struct lan966x_db *db; struct fdma *fdma = &rx->fdma;
struct sk_buff *skb; struct sk_buff *skb;
struct fdma_db *db;
struct page *page; struct page *page;
u64 timestamp; u64 timestamp;
/* Get the received frame and unmap it */ /* Get the received frame and unmap it */
db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; db = fdma_db_next_get(fdma);
page = rx->page[rx->dcb_index][rx->db_index]; page = rx->page[fdma->dcb_index][fdma->db_index];
skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order); skb = build_skb(page_address(page), fdma->db_size);
if (unlikely(!skb)) if (unlikely(!skb))
goto free_page; goto free_page;
...@@ -546,21 +475,19 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) ...@@ -546,21 +475,19 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
{ {
struct lan966x *lan966x = container_of(napi, struct lan966x, napi); struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
struct lan966x_rx *rx = &lan966x->rx; struct lan966x_rx *rx = &lan966x->rx;
int dcb_reload = rx->dcb_index; int old_dcb, dcb_reload, counter = 0;
struct lan966x_rx_dcb *old_dcb; struct fdma *fdma = &rx->fdma;
struct lan966x_db *db;
bool redirect = false; bool redirect = false;
struct sk_buff *skb; struct sk_buff *skb;
struct page *page;
int counter = 0;
u64 src_port; u64 src_port;
u64 nextptr;
dcb_reload = fdma->dcb_index;
lan966x_fdma_tx_clear_buf(lan966x, weight); lan966x_fdma_tx_clear_buf(lan966x, weight);
/* Get all received skb */ /* Get all received skb */
while (counter < weight) { while (counter < weight) {
if (!lan966x_fdma_rx_more_frames(rx)) if (!fdma_has_frames(fdma))
break; break;
counter++; counter++;
...@@ -570,22 +497,22 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) ...@@ -570,22 +497,22 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
break; break;
case FDMA_ERROR: case FDMA_ERROR:
lan966x_fdma_rx_free_page(rx); lan966x_fdma_rx_free_page(rx);
lan966x_fdma_rx_advance_dcb(rx); fdma_dcb_advance(fdma);
goto allocate_new; goto allocate_new;
case FDMA_REDIRECT: case FDMA_REDIRECT:
redirect = true; redirect = true;
fallthrough; fallthrough;
case FDMA_TX: case FDMA_TX:
lan966x_fdma_rx_advance_dcb(rx); fdma_dcb_advance(fdma);
continue; continue;
case FDMA_DROP: case FDMA_DROP:
lan966x_fdma_rx_free_page(rx); lan966x_fdma_rx_free_page(rx);
lan966x_fdma_rx_advance_dcb(rx); fdma_dcb_advance(fdma);
continue; continue;
} }
skb = lan966x_fdma_rx_get_frame(rx, src_port); skb = lan966x_fdma_rx_get_frame(rx, src_port);
lan966x_fdma_rx_advance_dcb(rx); fdma_dcb_advance(fdma);
if (!skb) if (!skb)
goto allocate_new; goto allocate_new;
...@@ -594,20 +521,14 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) ...@@ -594,20 +521,14 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
allocate_new: allocate_new:
/* Allocate new pages and map them */ /* Allocate new pages and map them */
while (dcb_reload != rx->dcb_index) { while (dcb_reload != fdma->dcb_index) {
db = &rx->dcbs[dcb_reload].db[rx->db_index]; old_dcb = dcb_reload;
page = lan966x_fdma_rx_alloc_page(rx, db);
if (unlikely(!page))
break;
rx->page[dcb_reload][rx->db_index] = page;
old_dcb = &rx->dcbs[dcb_reload];
dcb_reload++; dcb_reload++;
dcb_reload &= FDMA_DCB_MAX - 1; dcb_reload &= fdma->n_dcbs - 1;
fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size),
FDMA_DCB_STATUS_INTR);
nextptr = rx->dma + ((unsigned long)old_dcb -
(unsigned long)rx->dcbs);
lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
lan966x_fdma_rx_reload(rx); lan966x_fdma_rx_reload(rx);
} }
...@@ -650,56 +571,30 @@ irqreturn_t lan966x_fdma_irq_handler(int irq, void *args) ...@@ -650,56 +571,30 @@ irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx) static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
{ {
struct lan966x_tx_dcb_buf *dcb_buf; struct lan966x_tx_dcb_buf *dcb_buf;
struct fdma *fdma = &tx->fdma;
int i; int i;
for (i = 0; i < FDMA_DCB_MAX; ++i) { for (i = 0; i < fdma->n_dcbs; ++i) {
dcb_buf = &tx->dcbs_buf[i]; dcb_buf = &tx->dcbs_buf[i];
if (!dcb_buf->used && i != tx->last_in_use) if (!dcb_buf->used &&
!fdma_is_last(&tx->fdma, &tx->fdma.dcbs[i]))
return i; return i;
} }
return -1; return -1;
} }
static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx, static void lan966x_fdma_tx_start(struct lan966x_tx *tx)
int next_to_use, int len,
dma_addr_t dma_addr)
{
struct lan966x_tx_dcb *next_dcb;
struct lan966x_db *next_db;
next_dcb = &tx->dcbs[next_to_use];
next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
next_db = &next_dcb->db[0];
next_db->dataptr = dma_addr;
next_db->status = FDMA_DCB_STATUS_SOF |
FDMA_DCB_STATUS_EOF |
FDMA_DCB_STATUS_INTR |
FDMA_DCB_STATUS_BLOCKO(0) |
FDMA_DCB_STATUS_BLOCKL(len);
}
static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
{ {
struct lan966x *lan966x = tx->lan966x; struct lan966x *lan966x = tx->lan966x;
struct lan966x_tx_dcb *dcb;
if (likely(lan966x->tx.activated)) { if (likely(lan966x->tx.activated)) {
/* Connect current dcb to the next db */
dcb = &tx->dcbs[tx->last_in_use];
dcb->nextptr = tx->dma + (next_to_use *
sizeof(struct lan966x_tx_dcb));
lan966x_fdma_tx_reload(tx); lan966x_fdma_tx_reload(tx);
} else { } else {
/* Because it is first time, then just activate */ /* Because it is first time, then just activate */
lan966x->tx.activated = true; lan966x->tx.activated = true;
lan966x_fdma_tx_activate(tx); lan966x_fdma_tx_activate(tx);
} }
/* Move to next dcb because this last in use */
tx->last_in_use = next_to_use;
} }
int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len) int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
...@@ -752,11 +647,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len) ...@@ -752,11 +647,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
next_dcb_buf->data.xdpf = xdpf; next_dcb_buf->data.xdpf = xdpf;
next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES; next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
/* Setup next dcb */
lan966x_fdma_tx_setup_dcb(tx, next_to_use,
xdpf->len + IFH_LEN_BYTES,
dma_addr);
} else { } else {
page = ptr; page = ptr;
...@@ -773,11 +663,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len) ...@@ -773,11 +663,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
next_dcb_buf->data.page = page; next_dcb_buf->data.page = page;
next_dcb_buf->len = len + IFH_LEN_BYTES; next_dcb_buf->len = len + IFH_LEN_BYTES;
/* Setup next dcb */
lan966x_fdma_tx_setup_dcb(tx, next_to_use,
len + IFH_LEN_BYTES,
dma_addr + XDP_PACKET_HEADROOM);
} }
/* Fill up the buffer */ /* Fill up the buffer */
...@@ -788,8 +673,19 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len) ...@@ -788,8 +673,19 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
next_dcb_buf->ptp = false; next_dcb_buf->ptp = false;
next_dcb_buf->dev = port->dev; next_dcb_buf->dev = port->dev;
__fdma_dcb_add(&tx->fdma,
next_to_use,
0,
FDMA_DCB_STATUS_INTR |
FDMA_DCB_STATUS_SOF |
FDMA_DCB_STATUS_EOF |
FDMA_DCB_STATUS_BLOCKO(0) |
FDMA_DCB_STATUS_BLOCKL(next_dcb_buf->len),
&fdma_nextptr_cb,
&lan966x_fdma_xdp_tx_dataptr_cb);
/* Start the transmission */ /* Start the transmission */
lan966x_fdma_tx_start(tx, next_to_use); lan966x_fdma_tx_start(tx);
out: out:
spin_unlock(&lan966x->tx_lock); spin_unlock(&lan966x->tx_lock);
...@@ -847,9 +743,6 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) ...@@ -847,9 +743,6 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
goto release; goto release;
} }
/* Setup next dcb */
lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
/* Fill up the buffer */ /* Fill up the buffer */
next_dcb_buf = &tx->dcbs_buf[next_to_use]; next_dcb_buf = &tx->dcbs_buf[next_to_use];
next_dcb_buf->use_skb = true; next_dcb_buf->use_skb = true;
...@@ -861,12 +754,21 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) ...@@ -861,12 +754,21 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
next_dcb_buf->ptp = false; next_dcb_buf->ptp = false;
next_dcb_buf->dev = dev; next_dcb_buf->dev = dev;
fdma_dcb_add(&tx->fdma,
next_to_use,
0,
FDMA_DCB_STATUS_INTR |
FDMA_DCB_STATUS_SOF |
FDMA_DCB_STATUS_EOF |
FDMA_DCB_STATUS_BLOCKO(0) |
FDMA_DCB_STATUS_BLOCKL(skb->len));
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
next_dcb_buf->ptp = true; next_dcb_buf->ptp = true;
/* Start the transmission */ /* Start the transmission */
lan966x_fdma_tx_start(tx, next_to_use); lan966x_fdma_tx_start(tx);
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -908,14 +810,11 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x) ...@@ -908,14 +810,11 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x)
static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
{ {
struct page_pool *page_pool; struct page_pool *page_pool;
dma_addr_t rx_dma; struct fdma fdma_rx_old;
void *rx_dcbs;
u32 size;
int err; int err;
/* Store these for later to free them */ /* Store these for later to free them */
rx_dma = lan966x->rx.dma; memcpy(&fdma_rx_old, &lan966x->rx.fdma, sizeof(struct fdma));
rx_dcbs = lan966x->rx.dcbs;
page_pool = lan966x->rx.page_pool; page_pool = lan966x->rx.page_pool;
napi_synchronize(&lan966x->napi); napi_synchronize(&lan966x->napi);
...@@ -931,9 +830,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) ...@@ -931,9 +830,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
goto restore; goto restore;
lan966x_fdma_rx_start(&lan966x->rx); lan966x_fdma_rx_start(&lan966x->rx);
size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; fdma_free_coherent(lan966x->dev, &fdma_rx_old);
size = ALIGN(size, PAGE_SIZE);
dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
page_pool_destroy(page_pool); page_pool_destroy(page_pool);
...@@ -943,8 +840,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) ...@@ -943,8 +840,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
return err; return err;
restore: restore:
lan966x->rx.page_pool = page_pool; lan966x->rx.page_pool = page_pool;
lan966x->rx.dma = rx_dma; memcpy(&lan966x->rx.fdma, &fdma_rx_old, sizeof(struct fdma));
lan966x->rx.dcbs = rx_dcbs;
lan966x_fdma_rx_start(&lan966x->rx); lan966x_fdma_rx_start(&lan966x->rx);
return err; return err;
...@@ -1034,11 +930,24 @@ int lan966x_fdma_init(struct lan966x *lan966x) ...@@ -1034,11 +930,24 @@ int lan966x_fdma_init(struct lan966x *lan966x)
return 0; return 0;
lan966x->rx.lan966x = lan966x; lan966x->rx.lan966x = lan966x;
lan966x->rx.channel_id = FDMA_XTR_CHANNEL; lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
lan966x->rx.fdma.priv = lan966x;
lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x); lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
lan966x->tx.lan966x = lan966x; lan966x->tx.lan966x = lan966x;
lan966x->tx.channel_id = FDMA_INJ_CHANNEL; lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
lan966x->tx.last_in_use = -1; lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
lan966x->tx.fdma.priv = lan966x;
lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma);
lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb;
err = lan966x_fdma_rx_alloc(&lan966x->rx); err = lan966x_fdma_rx_alloc(&lan966x->rx);
if (err) if (err)
...@@ -1046,7 +955,7 @@ int lan966x_fdma_init(struct lan966x *lan966x) ...@@ -1046,7 +955,7 @@ int lan966x_fdma_init(struct lan966x *lan966x)
err = lan966x_fdma_tx_alloc(&lan966x->tx); err = lan966x_fdma_tx_alloc(&lan966x->tx);
if (err) { if (err) {
lan966x_fdma_rx_free(&lan966x->rx); fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
return err; return err;
} }
...@@ -1067,7 +976,7 @@ void lan966x_fdma_deinit(struct lan966x *lan966x) ...@@ -1067,7 +976,7 @@ void lan966x_fdma_deinit(struct lan966x *lan966x)
napi_disable(&lan966x->napi); napi_disable(&lan966x->napi);
lan966x_fdma_rx_free_pages(&lan966x->rx); lan966x_fdma_rx_free_pages(&lan966x->rx);
lan966x_fdma_rx_free(&lan966x->rx); fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
page_pool_destroy(lan966x->rx.page_pool); page_pool_destroy(lan966x->rx.page_pool);
lan966x_fdma_tx_free(&lan966x->tx); lan966x_fdma_tx_free(&lan966x->tx);
} }
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <net/switchdev.h> #include <net/switchdev.h>
#include <net/xdp.h> #include <net/xdp.h>
#include <fdma_api.h>
#include <vcap_api.h> #include <vcap_api.h>
#include <vcap_api_client.h> #include <vcap_api_client.h>
...@@ -76,15 +77,6 @@ ...@@ -76,15 +77,6 @@
#define FDMA_RX_DCB_MAX_DBS 1 #define FDMA_RX_DCB_MAX_DBS 1
#define FDMA_TX_DCB_MAX_DBS 1 #define FDMA_TX_DCB_MAX_DBS 1
#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
#define FDMA_DCB_STATUS_SOF BIT(16)
#define FDMA_DCB_STATUS_EOF BIT(17)
#define FDMA_DCB_STATUS_INTR BIT(18)
#define FDMA_DCB_STATUS_DONE BIT(19)
#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
#define FDMA_DCB_INVALID_DATA 0x1
#define FDMA_XTR_CHANNEL 6 #define FDMA_XTR_CHANNEL 6
#define FDMA_INJ_CHANNEL 0 #define FDMA_INJ_CHANNEL 0
...@@ -199,49 +191,14 @@ enum vcap_is1_port_sel_rt { ...@@ -199,49 +191,14 @@ enum vcap_is1_port_sel_rt {
struct lan966x_port; struct lan966x_port;
struct lan966x_db {
u64 dataptr;
u64 status;
};
struct lan966x_rx_dcb {
u64 nextptr;
u64 info;
struct lan966x_db db[FDMA_RX_DCB_MAX_DBS];
};
struct lan966x_tx_dcb {
u64 nextptr;
u64 info;
struct lan966x_db db[FDMA_TX_DCB_MAX_DBS];
};
struct lan966x_rx { struct lan966x_rx {
struct lan966x *lan966x; struct lan966x *lan966x;
/* Pointer to the array of hardware dcbs. */ struct fdma fdma;
struct lan966x_rx_dcb *dcbs;
/* Pointer to the last address in the dcbs. */
struct lan966x_rx_dcb *last_entry;
/* For each DB, there is a page */ /* For each DB, there is a page */
struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
/* Represents the db_index, it can have a value between 0 and
* FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS
* it means that the DCB can be reused.
*/
int db_index;
/* Represents the index in the dcbs. It has a value between 0 and
* FDMA_DCB_MAX
*/
int dcb_index;
/* Represents the dma address to the dcbs array */
dma_addr_t dma;
/* Represents the page order that is used to allocate the pages for the /* Represents the page order that is used to allocate the pages for the
* RX buffers. This value is calculated based on max MTU of the devices. * RX buffers. This value is calculated based on max MTU of the devices.
*/ */
...@@ -252,8 +209,6 @@ struct lan966x_rx { ...@@ -252,8 +209,6 @@ struct lan966x_rx {
*/ */
u32 max_mtu; u32 max_mtu;
u8 channel_id;
struct page_pool *page_pool; struct page_pool *page_pool;
}; };
...@@ -275,18 +230,11 @@ struct lan966x_tx_dcb_buf { ...@@ -275,18 +230,11 @@ struct lan966x_tx_dcb_buf {
struct lan966x_tx { struct lan966x_tx {
struct lan966x *lan966x; struct lan966x *lan966x;
/* Pointer to the dcb list */ struct fdma fdma;
struct lan966x_tx_dcb *dcbs;
u16 last_in_use;
/* Represents the DMA address to the first entry of the dcb entries. */
dma_addr_t dma;
/* Array of dcbs that are given to the HW */ /* Array of dcbs that are given to the HW */
struct lan966x_tx_dcb_buf *dcbs_buf; struct lan966x_tx_dcb_buf *dcbs_buf;
u8 channel_id;
bool activated; bool activated;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment