Commit 44a7b3b6 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-next'

Michael Chan says:

====================
bnxt_en: Update for net-next.

Three main changes in this series, besides the usual firmware spec
update:

1. Add support for a new firmware communication channel direct to the
firmware processor that handles flow offloads.  This speeds up
flow offload operations.

2. Use 64-bit internal flow handles to increase the number of flows
that can be offloaded.

3. Add level-2 context memory paging so that we can configure more
context memory for RDMA on the 57500 chips.  Allocate more context
memory if RDMA is enabled on the 57500 chips.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ac68a3d3 0c2ff8d7
......@@ -1812,7 +1812,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
case CMPL_BASE_TYPE_HWRM_DONE:
seq_id = le16_to_cpu(h_cmpl->sequence_id);
if (seq_id == bp->hwrm_intr_seq_id)
bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
else
netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
break;
......@@ -2375,7 +2375,11 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
rmem->pg_arr[i] = NULL;
}
if (rmem->pg_tbl) {
dma_free_coherent(&pdev->dev, rmem->nr_pages * 8,
size_t pg_tbl_size = rmem->nr_pages * 8;
if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
pg_tbl_size = rmem->page_size;
dma_free_coherent(&pdev->dev, pg_tbl_size,
rmem->pg_tbl, rmem->pg_tbl_map);
rmem->pg_tbl = NULL;
}
......@@ -2393,9 +2397,12 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
valid_bit = PTU_PTE_VALID;
if (rmem->nr_pages > 1) {
rmem->pg_tbl = dma_alloc_coherent(&pdev->dev,
rmem->nr_pages * 8,
if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
size_t pg_tbl_size = rmem->nr_pages * 8;
if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
pg_tbl_size = rmem->page_size;
rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
&rmem->pg_tbl_map,
GFP_KERNEL);
if (!rmem->pg_tbl)
......@@ -2412,7 +2419,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i])
return -ENOMEM;
if (rmem->nr_pages > 1) {
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
extra_bits |= PTU_PTE_NEXT_TO_LAST;
......@@ -3279,6 +3286,27 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr);
bp->hwrm_cmd_resp_addr = NULL;
}
if (bp->hwrm_cmd_kong_resp_addr) {
dma_free_coherent(&pdev->dev, PAGE_SIZE,
bp->hwrm_cmd_kong_resp_addr,
bp->hwrm_cmd_kong_resp_dma_addr);
bp->hwrm_cmd_kong_resp_addr = NULL;
}
}
static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
bp->hwrm_cmd_kong_resp_addr =
dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
&bp->hwrm_cmd_kong_resp_dma_addr,
GFP_KERNEL);
if (!bp->hwrm_cmd_kong_resp_addr)
return -ENOMEM;
return 0;
}
static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
......@@ -3740,7 +3768,10 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
req->req_type = cpu_to_le16(req_type);
req->cmpl_ring = cpu_to_le16(cmpl_ring);
req->target_id = cpu_to_le16(target_id);
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
if (bnxt_kong_hwrm_message(bp, req))
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
else
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
}
static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
......@@ -3755,11 +3786,10 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
struct hwrm_short_input short_input = {0};
req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
memset(resp, 0, PAGE_SIZE);
cp_ring_id = le16_to_cpu(req->cmpl_ring);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
u16 dst = BNXT_HWRM_CHNL_CHIMP;
if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
if (msg_len > bp->hwrm_max_ext_req_len ||
......@@ -3767,6 +3797,23 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
return -EINVAL;
}
if (bnxt_hwrm_kong_chnl(bp, req)) {
dst = BNXT_HWRM_CHNL_KONG;
bar_offset = BNXT_GRCPF_REG_KONG_COMM;
doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
resp = bp->hwrm_cmd_kong_resp_addr;
resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
}
memset(resp, 0, PAGE_SIZE);
cp_ring_id = le16_to_cpu(req->cmpl_ring);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
/* currently supports only one outstanding message */
if (intr_process)
bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
msg_len > BNXT_HWRM_MAX_REQ_LEN) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
......@@ -3800,17 +3847,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
/* Write request msg to hwrm channel */
__iowrite32_copy(bp->bar0, data, msg_len / 4);
__iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
for (i = msg_len; i < max_req_len; i += 4)
writel(0, bp->bar0 + i);
/* currently supports only one outstanding message */
if (intr_process)
bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
writel(0, bp->bar0 + bar_offset + i);
/* Ring channel doorbell */
writel(1, bp->bar0 + 0x100);
writel(1, bp->bar0 + doorbell_offset);
if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT;
......@@ -3825,10 +3868,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
if (intr_process) {
u16 seq_id = bp->hwrm_intr_seq_id;
/* Wait until hwrm response cmpl interrupt is processed */
while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
i++ < tmo_count) {
/* on first few passes, just barely sleep */
if (i < HWRM_SHORT_TIMEOUT_COUNTER)
......@@ -3839,14 +3885,14 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
HWRM_MAX_TIMEOUT);
}
if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
le16_to_cpu(req->req_type));
return -1;
}
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
valid = bp->hwrm_cmd_resp_addr + len - 1;
valid = resp_addr + len - 1;
} else {
int j;
......@@ -3874,7 +3920,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
/* Last byte of resp contains valid bit */
valid = bp->hwrm_cmd_resp_addr + len - 1;
valid = resp_addr + len - 1;
for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
/* make sure we read from updated DMA memory */
dma_rmb();
......@@ -4009,6 +4055,10 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
}
if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
req.flags |= cpu_to_le32(
FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
......@@ -4137,12 +4187,11 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
int rc = 0;
struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
struct hwrm_cfa_ntuple_filter_alloc_output *resp =
bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct flow_keys *keys = &fltr->fkeys;
struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
int rc = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
......@@ -4188,8 +4237,10 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc)
if (!rc) {
resp = bnxt_get_hwrm_resp_addr(bp, &req);
fltr->filter_id = resp->ntuple_filter_id;
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
......@@ -6000,8 +6051,11 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
pg_size = 2 << 4;
*pg_attr = pg_size;
if (rmem->nr_pages > 1) {
*pg_attr |= 1;
if (rmem->depth >= 1) {
if (rmem->depth == 2)
*pg_attr |= 2;
else
*pg_attr |= 1;
*pg_dir = cpu_to_le64(rmem->pg_tbl_map);
} else {
*pg_dir = cpu_to_le64(rmem->dma_arr[0]);
......@@ -6078,6 +6132,22 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
&req.stat_pg_size_stat_lvl,
&req.stat_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
ctx_pg = &ctx->mrav_mem;
req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.mrav_pg_size_mrav_lvl,
&req.mrav_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
ctx_pg = &ctx->tim_mem;
req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.tim_pg_size_tim_lvl,
&req.tim_page_dir);
}
for (i = 0, num_entries = &req.tqm_sp_num_entries,
pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req.tqm_sp_page_dir,
......@@ -6098,25 +6168,104 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
}
static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size)
struct bnxt_ctx_pg_info *ctx_pg)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
if (!mem_size)
return 0;
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > MAX_CTX_PAGES) {
rmem->nr_pages = 0;
return -EINVAL;
}
rmem->page_size = BNXT_PAGE_SIZE;
rmem->pg_arr = ctx_pg->ctx_pg_arr;
rmem->dma_arr = ctx_pg->ctx_dma_arr;
rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
if (rmem->depth >= 1)
rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
return bnxt_alloc_ring(bp, rmem);
}
static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
u8 depth)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc;
if (!mem_size)
return 0;
ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
ctx_pg->nr_pages = 0;
return -EINVAL;
}
if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
int nr_tbls, i;
rmem->depth = 2;
ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
GFP_KERNEL);
if (!ctx_pg->ctx_pg_tbl)
return -ENOMEM;
nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
rmem->nr_pages = nr_tbls;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
if (rc)
return rc;
for (i = 0; i < nr_tbls; i++) {
struct bnxt_ctx_pg_info *pg_tbl;
pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
if (!pg_tbl)
return -ENOMEM;
ctx_pg->ctx_pg_tbl[i] = pg_tbl;
rmem = &pg_tbl->ring_mem;
rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
if (i == (nr_tbls - 1))
rmem->nr_pages = ctx_pg->nr_pages %
MAX_CTX_PAGES;
rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
if (rc)
break;
}
} else {
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth)
rmem->depth = 1;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
}
return rc;
}
static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
ctx_pg->ctx_pg_tbl) {
int i, nr_tbls = rmem->nr_pages;
for (i = 0; i < nr_tbls; i++) {
struct bnxt_ctx_pg_info *pg_tbl;
struct bnxt_ring_mem_info *rmem2;
pg_tbl = ctx_pg->ctx_pg_tbl[i];
if (!pg_tbl)
continue;
rmem2 = &pg_tbl->ring_mem;
bnxt_free_ring(bp, rmem2);
ctx_pg->ctx_pg_arr[i] = NULL;
kfree(pg_tbl);
ctx_pg->ctx_pg_tbl[i] = NULL;
}
kfree(ctx_pg->ctx_pg_tbl);
ctx_pg->ctx_pg_tbl = NULL;
}
bnxt_free_ring(bp, rmem);
ctx_pg->nr_pages = 0;
}
static void bnxt_free_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
......@@ -6127,16 +6276,18 @@ static void bnxt_free_ctx_mem(struct bnxt *bp)
if (ctx->tqm_mem[0]) {
for (i = 0; i < bp->max_q + 1; i++)
bnxt_free_ring(bp, &ctx->tqm_mem[i]->ring_mem);
bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
kfree(ctx->tqm_mem[0]);
ctx->tqm_mem[0] = NULL;
}
bnxt_free_ring(bp, &ctx->stat_mem.ring_mem);
bnxt_free_ring(bp, &ctx->vnic_mem.ring_mem);
bnxt_free_ring(bp, &ctx->cq_mem.ring_mem);
bnxt_free_ring(bp, &ctx->srq_mem.ring_mem);
bnxt_free_ring(bp, &ctx->qp_mem.ring_mem);
bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
ctx->flags &= ~BNXT_CTX_FLAG_INITED;
}
......@@ -6145,6 +6296,9 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
u32 mem_size, ena, entries;
u32 extra_srqs = 0;
u32 extra_qps = 0;
u8 pg_lvl = 1;
int i, rc;
rc = bnxt_hwrm_func_backing_store_qcaps(bp);
......@@ -6157,24 +6311,31 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
if (bp->flags & BNXT_FLAG_ROCE_CAP) {
pg_lvl = 2;
extra_qps = 65536;
extra_srqs = 8192;
}
ctx_pg = &ctx->qp_mem;
ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
extra_qps;
mem_size = ctx->qp_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
if (rc)
return rc;
ctx_pg = &ctx->srq_mem;
ctx_pg->entries = ctx->srq_max_l2_entries;
ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
mem_size = ctx->srq_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
if (rc)
return rc;
ctx_pg = &ctx->cq_mem;
ctx_pg->entries = ctx->cq_max_l2_entries;
ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
mem_size = ctx->cq_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
if (rc)
return rc;
......@@ -6182,26 +6343,47 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg->entries = ctx->vnic_max_vnic_entries +
ctx->vnic_max_ring_table_entries;
mem_size = ctx->vnic_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
if (rc)
return rc;
ctx_pg = &ctx->stat_mem;
ctx_pg->entries = ctx->stat_max_entries;
mem_size = ctx->stat_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
if (rc)
return rc;
entries = ctx->qp_max_l2_entries;
ena = 0;
if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
goto skip_rdma;
ctx_pg = &ctx->mrav_mem;
ctx_pg->entries = extra_qps * 4;
mem_size = ctx->mrav_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
if (rc)
return rc;
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
ctx_pg = &ctx->tim_mem;
ctx_pg->entries = ctx->qp_mem.entries;
mem_size = ctx->tim_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
if (rc)
return rc;
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma:
entries = ctx->qp_max_l2_entries + extra_qps;
entries = roundup(entries, ctx->tqm_entries_multiple);
entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
ctx->tqm_max_entries_per_ring);
for (i = 0, ena = 0; i < bp->max_q + 1; i++) {
for (i = 0; i < bp->max_q + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
ctx_pg->entries = entries;
mem_size = ctx->tqm_entry_size * entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
if (rc)
return rc;
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
......@@ -6481,6 +6663,13 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
(dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
if (dev_caps_cfg &
VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
......@@ -9227,7 +9416,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
* 1 coal_buf x bufs_per_record = 1 completion record.
*/
coal = &bp->rx_coal;
coal->coal_ticks = 14;
coal->coal_ticks = 10;
coal->coal_bufs = 30;
coal->coal_ticks_irq = 1;
coal->coal_bufs_irq = 2;
......@@ -10219,6 +10408,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
rc = bnxt_alloc_kong_hwrm_resources(bp);
if (rc)
bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
}
if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
rc = bnxt_alloc_hwrm_short_cmd_req(bp);
......
......@@ -567,7 +567,6 @@ struct nqe_cn {
#define HWRM_RESP_LEN_MASK 0xffff0000
#define HWRM_RESP_LEN_SFT 16
#define HWRM_RESP_VALID_MASK 0xff000000
#define HWRM_SEQ_ID_INVALID -1
#define BNXT_HWRM_REQ_MAX_SIZE 128
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE)
......@@ -585,6 +584,9 @@ struct nqe_cn {
#define HWRM_VALID_BIT_DELAY_USEC 20
#define BNXT_HWRM_CHNL_CHIMP 0
#define BNXT_HWRM_CHNL_KONG 1
#define BNXT_RX_EVENT 1
#define BNXT_AGG_EVENT 2
#define BNXT_TX_EVENT 4
......@@ -615,9 +617,12 @@ struct bnxt_sw_rx_agg_bd {
struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
u32 flags;
u16 flags;
#define BNXT_RMEM_VALID_PTE_FLAG 1
#define BNXT_RMEM_RING_PTE_FLAG 2
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth;
void **pg_arr;
dma_addr_t *dma_arr;
......@@ -1113,9 +1118,14 @@ struct bnxt_test_info {
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
#define BNXT_GRCPF_REG_KONG_COMM 0xA00
#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00
struct bnxt_tc_flow_stats {
u64 packets;
......@@ -1183,12 +1193,15 @@ struct bnxt_vf_rep {
#define PTU_PTE_NEXT_TO_LAST 0x4UL
#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
struct bnxt_ctx_pg_info {
u32 entries;
u32 nr_pages;
void *ctx_pg_arr[MAX_CTX_PAGES];
dma_addr_t ctx_dma_arr[MAX_CTX_PAGES];
struct bnxt_ring_mem_info ring_mem;
struct bnxt_ctx_pg_info **ctx_pg_tbl;
};
struct bnxt_ctx_mem_info {
......@@ -1224,6 +1237,8 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info cq_mem;
struct bnxt_ctx_pg_info vnic_mem;
struct bnxt_ctx_pg_info stat_mem;
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
struct bnxt_ctx_pg_info *tqm_mem[9];
};
......@@ -1457,20 +1472,25 @@ struct bnxt {
u32 msg_enable;
u32 fw_cap;
#define BNXT_FW_CAP_SHORT_CMD 0x00000001
#define BNXT_FW_CAP_LLDP_AGENT 0x00000002
#define BNXT_FW_CAP_DCBX_AGENT 0x00000004
#define BNXT_FW_CAP_NEW_RM 0x00000008
#define BNXT_FW_CAP_IF_CHANGE 0x00000010
#define BNXT_FW_CAP_SHORT_CMD 0x00000001
#define BNXT_FW_CAP_LLDP_AGENT 0x00000002
#define BNXT_FW_CAP_DCBX_AGENT 0x00000004
#define BNXT_FW_CAP_NEW_RM 0x00000008
#define BNXT_FW_CAP_IF_CHANGE 0x00000010
#define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
#define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u32 hwrm_intr_seq_id;
u16 hwrm_cmd_kong_seq;
u16 hwrm_intr_seq_id;
void *hwrm_short_cmd_req_addr;
dma_addr_t hwrm_short_cmd_req_dma_addr;
void *hwrm_cmd_resp_addr;
dma_addr_t hwrm_cmd_resp_dma_addr;
void *hwrm_cmd_kong_resp_addr;
dma_addr_t hwrm_cmd_kong_resp_dma_addr;
struct rtnl_link_stats64 net_stats_prev;
struct rx_port_stats *hw_rx_port_stats;
......@@ -1672,6 +1692,66 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
}
}
static inline bool bnxt_cfa_hwrm_message(u16 req_type)
{
switch (req_type) {
case HWRM_CFA_ENCAP_RECORD_ALLOC:
case HWRM_CFA_ENCAP_RECORD_FREE:
case HWRM_CFA_DECAP_FILTER_ALLOC:
case HWRM_CFA_DECAP_FILTER_FREE:
case HWRM_CFA_NTUPLE_FILTER_ALLOC:
case HWRM_CFA_NTUPLE_FILTER_FREE:
case HWRM_CFA_NTUPLE_FILTER_CFG:
case HWRM_CFA_EM_FLOW_ALLOC:
case HWRM_CFA_EM_FLOW_FREE:
case HWRM_CFA_EM_FLOW_CFG:
case HWRM_CFA_FLOW_ALLOC:
case HWRM_CFA_FLOW_FREE:
case HWRM_CFA_FLOW_INFO:
case HWRM_CFA_FLOW_FLUSH:
case HWRM_CFA_FLOW_STATS:
case HWRM_CFA_METER_PROFILE_ALLOC:
case HWRM_CFA_METER_PROFILE_FREE:
case HWRM_CFA_METER_PROFILE_CFG:
case HWRM_CFA_METER_INSTANCE_ALLOC:
case HWRM_CFA_METER_INSTANCE_FREE:
return true;
default:
return false;
}
}
static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req)
{
return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type)));
}
static inline bool bnxt_hwrm_kong_chnl(struct bnxt *bp, struct input *req)
{
return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
req->resp_addr == cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr));
}
static inline void *bnxt_get_hwrm_resp_addr(struct bnxt *bp, void *req)
{
if (bnxt_hwrm_kong_chnl(bp, (struct input *)req))
return bp->hwrm_cmd_kong_resp_addr;
else
return bp->hwrm_cmd_resp_addr;
}
static inline u16 bnxt_get_hwrm_seq_id(struct bnxt *bp, u16 dst)
{
u16 seq_id;
if (dst == BNXT_HWRM_CHNL_CHIMP)
seq_id = bp->hwrm_cmd_seq++;
else
seq_id = bp->hwrm_cmd_kong_seq++;
return seq_id;
}
extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
......
......@@ -194,6 +194,8 @@ struct cmd_nums {
#define HWRM_STAT_CTX_QUERY 0xb2UL
#define HWRM_STAT_CTX_CLR_STATS 0xb3UL
#define HWRM_PORT_QSTATS_EXT 0xb4UL
#define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL
#define HWRM_PORT_PHY_MDIO_READ 0xb6UL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
......@@ -213,6 +215,7 @@ struct cmd_nums {
#define HWRM_WOL_FILTER_FREE 0xf1UL
#define HWRM_WOL_FILTER_QCFG 0xf2UL
#define HWRM_WOL_REASON_QCFG 0xf3UL
#define HWRM_CFA_METER_QCAPS 0xf4UL
#define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
#define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
#define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
......@@ -239,6 +242,24 @@ struct cmd_nums {
#define HWRM_FW_IPC_MSG 0x110UL
#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
#define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
#define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL
#define HWRM_CFA_FLOW_AGING_CFG 0x114UL
#define HWRM_CFA_FLOW_AGING_QCFG 0x115UL
#define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL
#define HWRM_CFA_CTX_MEM_RGTR 0x117UL
#define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL
#define HWRM_CFA_CTX_MEM_QCTX 0x119UL
#define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL
#define HWRM_CFA_COUNTER_QCAPS 0x11bUL
#define HWRM_CFA_COUNTER_CFG 0x11cUL
#define HWRM_CFA_COUNTER_QCFG 0x11dUL
#define HWRM_CFA_COUNTER_QSTATS 0x11eUL
#define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL
#define HWRM_CFA_EEM_QCAPS 0x120UL
#define HWRM_CFA_EEM_CFG 0x121UL
#define HWRM_CFA_EEM_QCFG 0x122UL
#define HWRM_CFA_EEM_OP 0x123UL
#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
#define HWRM_ENGINE_CKV_HELLO 0x12dUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
......@@ -335,6 +356,8 @@ struct ret_codes {
#define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
#define HWRM_ERR_CODE_NO_BUFFER 0x8UL
#define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
#define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
#define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
#define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
......@@ -363,8 +386,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 0
#define HWRM_VERSION_RSVD 3
#define HWRM_VERSION_STR "1.10.0.3"
#define HWRM_VERSION_RSVD 33
#define HWRM_VERSION_STR "1.10.0.33"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
......@@ -411,6 +434,10 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
#define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
#define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
#define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL
#define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
......@@ -465,14 +492,27 @@ struct hwrm_ver_get_output {
/* eject_cmpl (size:128b/16B) */
struct eject_cmpl {
__le16 type;
#define EJECT_CMPL_TYPE_MASK 0x3fUL
#define EJECT_CMPL_TYPE_SFT 0
#define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
#define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
#define EJECT_CMPL_TYPE_MASK 0x3fUL
#define EJECT_CMPL_TYPE_SFT 0
#define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
#define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
#define EJECT_CMPL_FLAGS_MASK 0xffc0UL
#define EJECT_CMPL_FLAGS_SFT 6
#define EJECT_CMPL_FLAGS_ERROR 0x40UL
__le16 len;
__le32 opaque;
__le32 v;
#define EJECT_CMPL_V 0x1UL
__le16 v;
#define EJECT_CMPL_V 0x1UL
#define EJECT_CMPL_ERRORS_MASK 0xfffeUL
#define EJECT_CMPL_ERRORS_SFT 1
#define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL
#define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
#define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1)
#define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1)
#define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1)
#define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1)
#define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH
__le16 reserved16;
__le32 unused_2;
};
......@@ -552,6 +592,10 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
#define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
__le32 event_data2;
......@@ -647,6 +691,39 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change {
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
};
/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */
struct hwrm_async_event_cmpl_reset_notify {
__le16 type;
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT
__le16 event_id;
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
__le32 event_data2;
u8 opaque_v;
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
};
/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_cfg_change {
__le16 type;
......@@ -672,6 +749,74 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
};
/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */
struct hwrm_async_event_cmpl_hw_flow_aged {
__le16 type;
#define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL
#define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0
#define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
#define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT
__le16 event_id;
#define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL
#define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED
__le32 event_data2;
u8 opaque_v;
#define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL
#define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL
#define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
#define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL
#define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0
#define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL
#define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31)
#define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31)
#define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX
};
/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */
struct hwrm_async_event_cmpl_eem_cache_flush_req {
__le16 type;
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT
__le16 event_id;
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ
__le32 event_data2;
u8 opaque_v;
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
};
/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */
struct hwrm_async_event_cmpl_eem_cache_flush_done {
__le16 type;
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT
__le16 event_id;
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE
__le32 event_data2;
u8 opaque_v;
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
};
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
......@@ -867,6 +1012,8 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
#define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
#define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
#define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
#define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
......@@ -902,7 +1049,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
/* hwrm_func_qcfg_output (size:640b/80B) */
/* hwrm_func_qcfg_output (size:704b/88B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
......@@ -919,6 +1066,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
#define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
#define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
......@@ -1000,7 +1148,11 @@ struct hwrm_func_qcfg_output {
__le16 alloc_sp_tx_rings;
__le16 alloc_stat_ctx;
__le16 alloc_msix;
u8 unused_2[5];
__le16 registered_vfs;
u8 unused_1[3];
u8 always_1;
__le32 reset_addr_poll;
u8 unused_2[3];
u8 valid;
};
......@@ -1031,6 +1183,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
#define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
#define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
#define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
......@@ -1235,6 +1388,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
#define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
#define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
#define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
......@@ -1888,7 +2042,8 @@ struct hwrm_func_drv_if_change_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
#define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
#define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
#define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
u8 unused_0[3];
u8 valid;
};
......@@ -2864,6 +3019,60 @@ struct hwrm_port_phy_i2c_read_output {
u8 valid;
};
/* hwrm_port_phy_mdio_write_input (size:320b/40B) */
struct hwrm_port_phy_mdio_write_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le32 unused_0[2];
__le16 port_id;
u8 phy_addr;
u8 dev_addr;
__le16 reg_addr;
__le16 reg_data;
u8 cl45_mdio;
u8 unused_1[7];
};
/* hwrm_port_phy_mdio_write_output (size:128b/16B) */
struct hwrm_port_phy_mdio_write_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 unused_0[7];
u8 valid;
};
/* hwrm_port_phy_mdio_read_input (size:256b/32B) */
struct hwrm_port_phy_mdio_read_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le32 unused_0[2];
__le16 port_id;
u8 phy_addr;
u8 dev_addr;
__le16 reg_addr;
u8 cl45_mdio;
u8 unused_1;
};
/* hwrm_port_phy_mdio_read_output (size:128b/16B) */
struct hwrm_port_phy_mdio_read_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le16 reg_data;
u8 unused_0[5];
u8 valid;
};
/* hwrm_port_led_cfg_input (size:512b/64B) */
struct hwrm_port_led_cfg_input {
__le16 req_type;
......@@ -4869,6 +5078,10 @@ struct hwrm_ring_grp_free_output {
u8 unused_0[7];
u8 valid;
};
#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL
/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
struct hwrm_cfa_l2_filter_alloc_input {
......@@ -4937,20 +5150,21 @@ struct hwrm_cfa_l2_filter_alloc_input {
u8 unused_3;
__le32 src_id;
u8 tunnel_type;
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_4;
__le16 dst_id;
__le16 mirror_vnic_id;
......@@ -5108,20 +5322,21 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
u8 l3_addr_type;
u8 t_l3_addr_type;
u8 tunnel_type;
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 tunnel_flags;
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
......@@ -5326,20 +5541,21 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
__le16 dst_id;
__le16 mirror_vnic_id;
u8 tunnel_type;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 pri_hint;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
......@@ -5459,20 +5675,21 @@ struct hwrm_cfa_decap_filter_alloc_input {
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
__be32 tunnel_id;
u8 tunnel_type;
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_0;
__le16 unused_1;
u8 src_macaddr[6];
......@@ -5559,20 +5776,23 @@ struct hwrm_cfa_flow_alloc_input {
#define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL
#define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL
#define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL
#define CFA_FLOW_ALLOC_REQ_FLAGS_VHOST_ID_USE_VLAN 0x200UL
__le16 src_fid;
__le32 tunnel_handle;
__le16 action_flags;
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
__le16 dst_fid;
__be16 l2_rewrite_vlan_tpid;
__be16 l2_rewrite_vlan_tci;
......@@ -5597,20 +5817,21 @@ struct hwrm_cfa_flow_alloc_input {
__be16 l2_rewrite_smac[3];
u8 ip_proto;
u8 tunnel_type;
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
};
/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
......@@ -5623,7 +5844,8 @@ struct hwrm_cfa_flow_alloc_output {
u8 unused_0[2];
__le32 flow_id;
__le64 ext_flow_handle;
u8 unused_1[7];
__le32 flow_counter_id;
u8 unused_1[3];
u8 valid;
};
......@@ -5651,6 +5873,46 @@ struct hwrm_cfa_flow_free_output {
u8 valid;
};
/* hwrm_cfa_flow_info_input (size:256b/32B) */
struct hwrm_cfa_flow_info_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le16 flow_handle;
#define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
#define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_SFT 0
#define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
#define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
#define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
#define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
u8 unused_0[6];
__le64 ext_flow_handle;
};
/* hwrm_cfa_flow_info_output (size:448b/56B) */
struct hwrm_cfa_flow_info_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 flags;
u8 profile;
__le16 src_fid;
__le16 dst_fid;
__le16 l2_ctxt_id;
__le64 em_info;
__le64 tcam_info;
__le64 vfp_tcam_info;
__le16 ar_id;
__le16 flow_handle;
__le32 tunnel_handle;
__le16 flow_timer;
u8 unused_0[5];
u8 valid;
};
/* hwrm_cfa_flow_stats_input (size:640b/80B) */
struct hwrm_cfa_flow_stats_input {
__le16 req_type;
......@@ -5757,6 +6019,128 @@ struct hwrm_cfa_vfr_free_output {
u8 valid;
};
/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */
struct hwrm_cfa_eem_qcaps_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le32 flags;
#define CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX 0x1UL
#define CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX 0x2UL
#define CFA_EEM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
__le32 unused_0;
};
/* hwrm_cfa_eem_qcaps_output (size:256b/32B) */
struct hwrm_cfa_eem_qcaps_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le32 flags;
#define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
#define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
__le32 unused_0;
__le32 supported;
#define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL
#define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
#define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
#define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
__le32 max_entries_supported;
__le16 key_entry_size;
__le16 record_entry_size;
__le16 efc_entry_size;
u8 unused_1;
u8 valid;
};
/* hwrm_cfa_eem_cfg_input (size:320b/40B) */
struct hwrm_cfa_eem_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le32 flags;
#define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL
#define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL
#define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
__le32 unused_0;
__le32 num_entries;
__le32 unused_1;
__le16 key0_ctx_id;
__le16 key1_ctx_id;
__le16 record_ctx_id;
__le16 efc_ctx_id;
};
/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
struct hwrm_cfa_eem_cfg_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 unused_0[7];
u8 valid;
};
/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */
struct hwrm_cfa_eem_qcfg_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le32 flags;
#define CFA_EEM_QCFG_REQ_FLAGS_PATH_TX 0x1UL
#define CFA_EEM_QCFG_REQ_FLAGS_PATH_RX 0x2UL
__le32 unused_0;
};
/* hwrm_cfa_eem_qcfg_output (size:128b/16B) */
struct hwrm_cfa_eem_qcfg_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le32 flags;
#define CFA_EEM_QCFG_RESP_FLAGS_PATH_TX 0x1UL
#define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
#define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
__le32 num_entries;
};
/* hwrm_cfa_eem_op_input (size:192b/24B) */
struct hwrm_cfa_eem_op_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le32 flags;
#define CFA_EEM_OP_REQ_FLAGS_PATH_TX 0x1UL
#define CFA_EEM_OP_REQ_FLAGS_PATH_RX 0x2UL
__le16 unused_0;
__le16 op;
#define CFA_EEM_OP_REQ_OP_RESERVED 0x0UL
#define CFA_EEM_OP_REQ_OP_EEM_DISABLE 0x1UL
#define CFA_EEM_OP_REQ_OP_EEM_ENABLE 0x2UL
#define CFA_EEM_OP_REQ_OP_EEM_CLEANUP 0x3UL
#define CFA_EEM_OP_REQ_OP_LAST CFA_EEM_OP_REQ_OP_EEM_CLEANUP
};
/* hwrm_cfa_eem_op_output (size:128b/16B) */
struct hwrm_cfa_eem_op_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 unused_0[7];
u8 valid;
};
/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
__le16 req_type;
......@@ -5765,12 +6149,13 @@ struct hwrm_tunnel_dst_port_query_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
u8 unused_0[7];
};
......@@ -5794,12 +6179,13 @@ struct hwrm_tunnel_dst_port_alloc_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
u8 unused_0;
__be16 tunnel_dst_port_val;
u8 unused_1[4];
......@@ -5824,12 +6210,13 @@ struct hwrm_tunnel_dst_port_free_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
u8 unused_0;
__le16 tunnel_dst_port_id;
u8 unused_1[4];
......@@ -6040,7 +6427,9 @@ struct hwrm_fw_reset_input {
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
#define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
u8 host_idx;
u8 unused_0[5];
u8 flags;
#define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
u8 unused_0[4];
};
/* hwrm_fw_reset_output (size:128b/16B) */
......@@ -6137,6 +6526,7 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
#define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
#define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
#define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
......
......@@ -337,18 +337,21 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
}
static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node)
{
struct hwrm_cfa_flow_free_input req = { 0 };
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
req.flow_handle = flow_handle;
if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
req.ext_flow_handle = flow_node->ext_flow_handle;
else
req.flow_handle = flow_node->flow_handle;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
__func__, flow_handle, rc);
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
if (rc)
rc = -EIO;
......@@ -418,13 +421,14 @@ static bool bits_set(void *key, int len)
static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
__le16 ref_flow_handle,
__le32 tunnel_handle, __le16 *flow_handle)
__le32 tunnel_handle,
struct bnxt_tc_flow_node *flow_node)
{
struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_tc_actions *actions = &flow->actions;
struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
struct hwrm_cfa_flow_alloc_input req = { 0 };
struct hwrm_cfa_flow_alloc_output *resp;
u16 flow_flags = 0, action_flags = 0;
int rc;
......@@ -527,8 +531,23 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc)
*flow_handle = resp->flow_handle;
if (!rc) {
resp = bnxt_get_hwrm_resp_addr(bp, &req);
/* CFA_FLOW_ALLOC response interpretation:
* fw with fw with
* 16-bit 64-bit
* flow handle flow handle
* =========== ===========
* flow_handle flow handle flow context id
* ext_flow_handle INVALID flow handle
* flow_id INVALID flow counter id
*/
flow_node->flow_handle = resp->flow_handle;
if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
flow_node->ext_flow_handle = resp->ext_flow_handle;
flow_node->flow_id = resp->flow_id;
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
......@@ -544,9 +563,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
__le32 ref_decap_handle,
__le32 *decap_filter_handle)
{
struct hwrm_cfa_decap_filter_alloc_output *resp =
bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
struct hwrm_cfa_decap_filter_alloc_output *resp;
struct ip_tunnel_key *tun_key = &flow->tun_key;
u32 enables = 0;
int rc;
......@@ -599,10 +617,12 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc)
if (!rc) {
resp = bnxt_get_hwrm_resp_addr(bp, &req);
*decap_filter_handle = resp->decap_filter_id;
else
} else {
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc)
......@@ -633,9 +653,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
struct bnxt_tc_l2_key *l2_info,
__le32 *encap_record_handle)
{
struct hwrm_cfa_encap_record_alloc_output *resp =
bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_encap_record_alloc_input req = { 0 };
struct hwrm_cfa_encap_record_alloc_output *resp;
struct hwrm_cfa_encap_data_vxlan *encap =
(struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
......@@ -667,10 +686,12 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc)
if (!rc) {
resp = bnxt_get_hwrm_resp_addr(bp, &req);
*encap_record_handle = resp->encap_record_id;
else
} else {
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc)
......@@ -1224,7 +1245,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
int rc;
/* send HWRM cmd to free the flow-id */
bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
bnxt_hwrm_cfa_flow_free(bp, flow_node);
mutex_lock(&tc_info->lock);
......@@ -1246,6 +1267,12 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
return 0;
}
static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
u16 src_fid)
{
flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
}
static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
u16 src_fid)
{
......@@ -1293,6 +1320,9 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
bnxt_tc_set_src_fid(bp, flow, src_fid);
if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
bnxt_tc_set_flow_dir(bp, flow, src_fid);
if (!bnxt_tc_can_offload(bp, flow)) {
rc = -ENOSPC;
goto free_node;
......@@ -1320,7 +1350,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
/* send HWRM cmd to alloc the flow */
rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
tunnel_handle, &new_node->flow_handle);
tunnel_handle, new_node);
if (rc)
goto put_tunnel;
......@@ -1336,7 +1366,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
return 0;
hwrm_flow_free:
bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
bnxt_hwrm_cfa_flow_free(bp, new_node);
put_tunnel:
bnxt_tc_put_tunnel_handle(bp, flow, new_node);
put_l2:
......@@ -1397,13 +1427,40 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
return 0;
}
static void bnxt_fill_cfa_stats_req(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node,
__le16 *flow_handle, __le32 *flow_id)
{
u16 handle;
if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
*flow_id = flow_node->flow_id;
/* If flow_id is used to fetch flow stats then:
* 1. lower 12 bits of flow_handle must be set to all 1s.
* 2. 15th bit of flow_handle must specify the flow
* direction (TX/RX).
*/
if (flow_node->flow.dir == BNXT_DIR_RX)
handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
else
handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
*flow_handle = cpu_to_le16(handle);
} else {
*flow_handle = flow_node->flow_handle;
}
}
static int
bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
struct bnxt_tc_stats_batch stats_batch[])
{
struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_flow_stats_input req = { 0 };
struct hwrm_cfa_flow_stats_output *resp;
__le16 *req_flow_handles = &req.flow_handle_0;
__le32 *req_flow_ids = &req.flow_id_0;
int rc, i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
......@@ -1411,14 +1468,19 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
for (i = 0; i < num_flows; i++) {
struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
req_flow_handles[i] = flow_node->flow_handle;
bnxt_fill_cfa_stats_req(bp, flow_node,
&req_flow_handles[i], &req_flow_ids[i]);
}
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
__le64 *resp_packets = &resp->packet_0;
__le64 *resp_bytes = &resp->byte_0;
__le64 *resp_packets;
__le64 *resp_bytes;
resp = bnxt_get_hwrm_resp_addr(bp, &req);
resp_packets = &resp->packet_0;
resp_bytes = &resp->byte_0;
for (i = 0; i < num_flows; i++) {
stats_batch[i].hw_stats.packets =
......
......@@ -98,6 +98,9 @@ struct bnxt_tc_flow {
/* flow applicable to pkts ingressing on this fid */
u16 src_fid;
u8 dir;
#define BNXT_DIR_RX 1
#define BNXT_DIR_TX 0
struct bnxt_tc_l2_key l2_key;
struct bnxt_tc_l2_key l2_mask;
struct bnxt_tc_l3_key l3_key;
......@@ -170,7 +173,9 @@ struct bnxt_tc_flow_node {
struct bnxt_tc_flow flow;
__le64 ext_flow_handle;
__le16 flow_handle;
__le32 flow_id;
/* L2 node in l2 hashtable that shares flow's l2 key */
struct bnxt_tc_l2_node *l2_node;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment