Commit d0b9805e authored by Aviad Krawczyk's avatar Aviad Krawczyk Committed by David S. Miller

net-next/hinic: Initialize cmdq

Create the work queues for cmdq and update the nic about the work queue
contexts. cmdq commands are used for updating the nic about the qp
contexts.
Signed-off-by: default avatarAviad Krawczyk <aviad.krawczyk@huawei.com>
Signed-off-by: default avatarZhao Chen <zhaochen6@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 53e7d6fe
......@@ -13,11 +13,49 @@
*
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/sizes.h>
#include <linux/atomic.h>
#include <linux/log2.h>
#include <asm/byteorder.h>
#include "hinic_hw_if.h"
#include "hinic_hw_mgmt.h"
#include "hinic_hw_wq.h"
#include "hinic_hw_cmdq.h"
#include "hinic_hw_io.h"
#include "hinic_hw_dev.h"
#define CMDQ_DB_OFF SZ_2K
#define CMDQ_WQEBB_SIZE 64
#define CMDQ_DEPTH SZ_4K
#define CMDQ_WQ_PAGE_SIZE SZ_4K
#define WQE_LCMD_SIZE 64
#define WQE_SCMD_SIZE 64
#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
struct hinic_cmdqs, cmdq[0])
#define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \
struct hinic_func_to_io, \
cmdqs)
enum cmdq_wqe_type {
WQE_LCMD_TYPE = 0,
WQE_SCMD_TYPE = 1,
};
/**
* hinic_alloc_cmdq_buf - alloc buffer for sending command
......@@ -29,8 +67,17 @@
int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
struct hinic_cmdq_buf *cmdq_buf)
{
/* should be implemented */
return -ENOMEM;
struct hinic_hwif *hwif = cmdqs->hwif;
struct pci_dev *pdev = hwif->pdev;
cmdq_buf->buf = pci_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL,
&cmdq_buf->dma_addr);
if (!cmdq_buf->buf) {
dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n");
return -ENOMEM;
}
return 0;
}
/**
......@@ -41,7 +88,7 @@ int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
struct hinic_cmdq_buf *cmdq_buf)
{
/* should be implemented */
pci_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr);
}
/**
......@@ -62,6 +109,169 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
return -EINVAL;
}
/**
* cmdq_init_queue_ctxt - init the queue ctxt of a cmdq
* @cmdq_ctxt: cmdq ctxt to initialize
* @cmdq: the cmdq
* @cmdq_pages: the memory of the queue
**/
static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
struct hinic_cmdq *cmdq,
struct hinic_cmdq_pages *cmdq_pages)
{
struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq);
struct hinic_wq *wq = cmdq->wq;
/* The data in the HW is in Big Endian Format */
wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size);
ctxt_info->curr_wqe_page_pfn =
HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) |
HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) |
HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED);
/* block PFN - Read Modify Write */
cmdq_first_block_paddr = cmdq_pages->page_paddr;
pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
ctxt_info->wq_block_pfn =
HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI);
cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif);
cmdq_ctxt->cmdq_type = cmdq->cmdq_type;
}
/**
* init_cmdq - initialize cmdq
* @cmdq: the cmdq
* @wq: the wq attaced to the cmdq
* @q_type: the cmdq type of the cmdq
* @db_area: doorbell area for the cmdq
*
* Return 0 - Success, negative - Failure
**/
static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq,
enum hinic_cmdq_type q_type, void __iomem *db_area)
{
int err;
cmdq->wq = wq;
cmdq->cmdq_type = q_type;
cmdq->wrapped = 1;
spin_lock_init(&cmdq->cmdq_lock);
cmdq->done = vzalloc(wq->q_depth * sizeof(*cmdq->done));
if (!cmdq->done)
return -ENOMEM;
cmdq->errcode = vzalloc(wq->q_depth * sizeof(*cmdq->errcode));
if (!cmdq->errcode) {
err = -ENOMEM;
goto err_errcode;
}
cmdq->db_base = db_area + CMDQ_DB_OFF;
return 0;
err_errcode:
vfree(cmdq->done);
return err;
}
/**
* free_cmdq - Free cmdq
* @cmdq: the cmdq to free
**/
static void free_cmdq(struct hinic_cmdq *cmdq)
{
vfree(cmdq->errcode);
vfree(cmdq->done);
}
/**
* init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq
* @hwdev: the NIC HW device
* @cmdqs: cmdqs to write the ctxts for
* &db_area: db_area for all the cmdqs
*
* Return 0 - Success, negative - Failure
**/
static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
struct hinic_cmdqs *cmdqs, void __iomem **db_area)
{
struct hinic_hwif *hwif = hwdev->hwif;
enum hinic_cmdq_type type, cmdq_type;
struct hinic_cmdq_ctxt *cmdq_ctxts;
struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
size_t cmdq_ctxts_size;
int err;
if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
dev_err(&pdev->dev, "Unsupported PCI function type\n");
return -EINVAL;
}
cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
if (!cmdq_ctxts)
return -ENOMEM;
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
cmdq_type = HINIC_CMDQ_SYNC;
for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
err = init_cmdq(&cmdqs->cmdq[cmdq_type],
&cmdqs->saved_wqs[cmdq_type], cmdq_type,
db_area[cmdq_type]);
if (err) {
dev_err(&pdev->dev, "Failed to initialize cmdq\n");
goto err_init_cmdq;
}
cmdq_init_queue_ctxt(&cmdq_ctxts[cmdq_type],
&cmdqs->cmdq[cmdq_type],
&cmdqs->cmdq_pages);
}
/* Write the CMDQ ctxts */
cmdq_type = HINIC_CMDQ_SYNC;
for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
HINIC_COMM_CMD_CMDQ_CTXT_SET,
&cmdq_ctxts[cmdq_type],
sizeof(cmdq_ctxts[cmdq_type]),
NULL, NULL, HINIC_MGMT_MSG_SYNC);
if (err) {
dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n",
cmdq_type);
goto err_write_cmdq_ctxt;
}
}
devm_kfree(&pdev->dev, cmdq_ctxts);
return 0;
err_write_cmdq_ctxt:
cmdq_type = HINIC_MAX_CMDQ_TYPES;
err_init_cmdq:
for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++)
free_cmdq(&cmdqs->cmdq[type]);
devm_kfree(&pdev->dev, cmdq_ctxts);
return err;
}
/**
* hinic_init_cmdqs - init all cmdqs
* @cmdqs: cmdqs to init
......@@ -73,8 +283,55 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
void __iomem **db_area)
{
/* should be implemented */
return -EINVAL;
struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
struct pci_dev *pdev = hwif->pdev;
struct hinic_hwdev *hwdev;
size_t saved_wqs_size;
u16 max_wqe_size;
int err;
cmdqs->hwif = hwif;
cmdqs->cmdq_buf_pool = pci_pool_create("hinic_cmdq", pdev,
HINIC_CMDQ_BUF_SIZE,
HINIC_CMDQ_BUF_SIZE, 0);
if (!cmdqs->cmdq_buf_pool)
return -ENOMEM;
saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL);
if (!cmdqs->saved_wqs) {
err = -ENOMEM;
goto err_saved_wqs;
}
max_wqe_size = WQE_LCMD_SIZE;
err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif,
HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE,
CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size);
if (err) {
dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n");
goto err_cmdq_wqs;
}
hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io);
err = init_cmdqs_ctxt(hwdev, cmdqs, db_area);
if (err) {
dev_err(&pdev->dev, "Failed to write cmdq ctxt\n");
goto err_cmdq_ctxt;
}
return 0;
err_cmdq_ctxt:
hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
HINIC_MAX_CMDQ_TYPES);
err_cmdq_wqs:
devm_kfree(&pdev->dev, cmdqs->saved_wqs);
err_saved_wqs:
pci_pool_destroy(cmdqs->cmdq_buf_pool);
return err;
}
/**
......@@ -83,5 +340,18 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
**/
void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs)
{
/* should be implemented */
struct hinic_hwif *hwif = cmdqs->hwif;
struct pci_dev *pdev = hwif->pdev;
enum hinic_cmdq_type cmdq_type;
cmdq_type = HINIC_CMDQ_SYNC;
for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
free_cmdq(&cmdqs->cmdq[cmdq_type]);
hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
HINIC_MAX_CMDQ_TYPES);
devm_kfree(&pdev->dev, cmdqs->saved_wqs);
pci_pool_destroy(cmdqs->cmdq_buf_pool);
}
......@@ -24,6 +24,40 @@
#include "hinic_hw_if.h"
#include "hinic_hw_wq.h"
#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0
#define HINIC_CMDQ_CTXT_EQ_ID_SHIFT 56
#define HINIC_CMDQ_CTXT_CEQ_ARM_SHIFT 61
#define HINIC_CMDQ_CTXT_CEQ_EN_SHIFT 62
#define HINIC_CMDQ_CTXT_WRAPPED_SHIFT 63
#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF
#define HINIC_CMDQ_CTXT_EQ_ID_MASK 0x1F
#define HINIC_CMDQ_CTXT_CEQ_ARM_MASK 0x1
#define HINIC_CMDQ_CTXT_CEQ_EN_MASK 0x1
#define HINIC_CMDQ_CTXT_WRAPPED_MASK 0x1
#define HINIC_CMDQ_CTXT_PAGE_INFO_SET(val, member) \
(((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \
<< HINIC_CMDQ_CTXT_##member##_SHIFT)
#define HINIC_CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \
((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \
<< HINIC_CMDQ_CTXT_##member##_SHIFT)))
#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0
#define HINIC_CMDQ_CTXT_CI_SHIFT 52
#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF
#define HINIC_CMDQ_CTXT_CI_MASK 0xFFF
#define HINIC_CMDQ_CTXT_BLOCK_INFO_SET(val, member) \
(((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \
<< HINIC_CMDQ_CTXT_##member##_SHIFT)
#define HINIC_CMDQ_CTXT_BLOCK_INFO_CLEAR(val, member) \
((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \
<< HINIC_CMDQ_CTXT_##member##_SHIFT)))
#define HINIC_CMDQ_BUF_SIZE 2048
enum hinic_cmdq_type {
......@@ -38,6 +72,25 @@ struct hinic_cmdq_buf {
size_t size;
};
struct hinic_cmdq_ctxt_info {
u64 curr_wqe_page_pfn;
u64 wq_block_pfn;
};
struct hinic_cmdq_ctxt {
u8 status;
u8 version;
u8 rsvd0[6];
u16 func_idx;
u8 cmdq_type;
u8 rsvd1[1];
u8 rsvd2[4];
struct hinic_cmdq_ctxt_info ctxt_info;
};
struct hinic_cmdq {
struct hinic_wq *wq;
......
......@@ -106,6 +106,8 @@
#define HINIC_EQ_PAGE_SIZE SZ_4K
#define HINIC_CEQ_ID_CMDQ 0
enum hinic_eq_type {
HINIC_AEQ,
};
......
......@@ -68,6 +68,11 @@ enum hinic_cfg_cmd {
HINIC_CFG_NIC_CAP = 0,
};
enum hinic_comm_cmd {
HINIC_COMM_CMD_CMDQ_CTXT_SET = 0x10,
HINIC_COMM_CMD_CMDQ_CTXT_GET = 0x11,
};
enum hinic_mgmt_cb_state {
HINIC_MGMT_CB_ENABLED = BIT(0),
HINIC_MGMT_CB_RUNNING = BIT(1),
......
......@@ -27,6 +27,7 @@
#include "hinic_hw_if.h"
#include "hinic_hw_wq.h"
#include "hinic_hw_cmdq.h"
#define WQS_BLOCKS_PER_PAGE 4
......@@ -42,6 +43,11 @@
#define WQ_PAGE_ADDR_SIZE sizeof(u64)
#define WQ_MAX_PAGES (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
#define CMDQ_BLOCK_SIZE 512
#define CMDQ_PAGE_SIZE 4096
#define CMDQ_WQ_MAX_PAGES (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
#define WQ_BASE_VADDR(wqs, wq) \
((void *)((wqs)->page_vaddr[(wq)->page_idx]) \
+ (wq)->block_idx * WQ_BLOCK_SIZE)
......@@ -54,6 +60,18 @@
((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \
+ (wq)->block_idx * WQ_BLOCK_SIZE)
#define CMDQ_BASE_VADDR(cmdq_pages, wq) \
((void *)((cmdq_pages)->page_vaddr) \
+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
#define CMDQ_BASE_PADDR(cmdq_pages, wq) \
((cmdq_pages)->page_paddr \
+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
#define CMDQ_BASE_ADDR(cmdq_pages, wq) \
((void *)((cmdq_pages)->shadow_page_vaddr) \
+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
/**
* queue_alloc_page - allocate page for Queue
* @hwif: HW interface for allocating DMA
......@@ -122,6 +140,37 @@ static void wqs_free_page(struct hinic_wqs *wqs, int page_idx)
vfree(wqs->shadow_page_vaddr[page_idx]);
}
/**
* cmdq_allocate_page - allocate page for cmdq
* @cmdq_pages: the pages of the cmdq queue struct to hold the page
*
* Return 0 - Success, negative - Failure
**/
static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
{
return queue_alloc_page(cmdq_pages->hwif, &cmdq_pages->page_vaddr,
&cmdq_pages->page_paddr,
&cmdq_pages->shadow_page_vaddr,
CMDQ_PAGE_SIZE);
}
/**
* cmdq_free_page - free page from cmdq
* @cmdq_pages: the pages of the cmdq queue struct that hold the page
*
* Return 0 - Success, negative - Failure
**/
static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
{
struct hinic_hwif *hwif = cmdq_pages->hwif;
struct pci_dev *pdev = hwif->pdev;
dma_free_coherent(&pdev->dev, CMDQ_PAGE_SIZE,
cmdq_pages->page_vaddr,
(dma_addr_t)cmdq_pages->page_paddr);
vfree(cmdq_pages->shadow_page_vaddr);
}
static int alloc_page_arrays(struct hinic_wqs *wqs)
{
struct hinic_hwif *hwif = wqs->hwif;
......@@ -514,3 +563,110 @@ void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
wqs_return_block(wqs, wq->page_idx, wq->block_idx);
}
/**
* hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs
* @cmdq_pages: will hold the pages of the cmdq
* @wq: returned wqs
* @hwif: HW interface
* @cmdq_blocks: number of cmdq blocks/wq to allocate
* @wqebb_size: Work Queue Block Byte Size
* @wq_page_size: the page size in the Work Queue
* @q_depth: number of wqebbs in WQ
* @max_wqe_size: maximum WQE size that will be used in the WQ
*
* Return 0 - Success, negative - Failure
**/
int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
struct hinic_wq *wq, struct hinic_hwif *hwif,
int cmdq_blocks, u16 wqebb_size, u16 wq_page_size,
u16 q_depth, u16 max_wqe_size)
{
struct pci_dev *pdev = hwif->pdev;
u16 num_wqebbs_per_page;
int i, j, err = -ENOMEM;
if (wqebb_size == 0) {
dev_err(&pdev->dev, "wqebb_size must be > 0\n");
return -EINVAL;
}
if (wq_page_size == 0) {
dev_err(&pdev->dev, "wq_page_size must be > 0\n");
return -EINVAL;
}
if (q_depth & (q_depth - 1)) {
dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
return -EINVAL;
}
num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
return -EINVAL;
}
cmdq_pages->hwif = hwif;
err = cmdq_allocate_page(cmdq_pages);
if (err) {
dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
return err;
}
for (i = 0; i < cmdq_blocks; i++) {
wq[i].hwif = hwif;
wq[i].page_idx = 0;
wq[i].block_idx = i;
wq[i].wqebb_size = wqebb_size;
wq[i].wq_page_size = wq_page_size;
wq[i].q_depth = q_depth;
wq[i].max_wqe_size = max_wqe_size;
wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
err = alloc_wq_pages(&wq[i], cmdq_pages->hwif,
CMDQ_WQ_MAX_PAGES);
if (err) {
dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n");
goto err_cmdq_block;
}
atomic_set(&wq[i].cons_idx, 0);
atomic_set(&wq[i].prod_idx, 0);
atomic_set(&wq[i].delta, q_depth);
wq[i].mask = q_depth - 1;
}
return 0;
err_cmdq_block:
for (j = 0; j < i; j++)
free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages);
cmdq_free_page(cmdq_pages);
return err;
}
/**
* hinic_wqs_cmdq_free - Free wqs from cmdqs
* @cmdq_pages: hold the pages of the cmdq
* @wq: wqs to free
* @cmdq_blocks: number of wqs to free
**/
void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
struct hinic_wq *wq, int cmdq_blocks)
{
int i;
for (i = 0; i < cmdq_blocks; i++)
free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages);
cmdq_free_page(cmdq_pages);
}
......@@ -81,6 +81,14 @@ struct hinic_cmdq_pages {
struct hinic_hwif *hwif;
};
int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
struct hinic_wq *wq, struct hinic_hwif *hwif,
int cmdq_blocks, u16 wqebb_size, u16 wq_page_size,
u16 q_depth, u16 max_wqe_size);
void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
struct hinic_wq *wq, int cmdq_blocks);
int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs,
struct hinic_hwif *hwif);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment