Commit 68f684e2 authored by Edwin Peer's avatar Edwin Peer Committed by David S. Miller

bnxt_en: support multiple HWRM commands in flight

Add infrastructure to maintain a pending list of HWRM commands awaiting
completion and reduce the scope of the hwrm_cmd_lock mutex so that it
protects only the request mailbox. The mailbox is free to use for one
or more concurrent commands after receiving deferred response events.

For uniformity and completeness, use the same pending list for
collecting completions for commands that respond via a completion ring.
These commands are only used for freeing rings and for IRQ test and
we only support one such command in flight.

Note deferred responses are also only supported on the main channel.
The secondary channel (KONG) does not support deferred responses.
Signed-off-by: default avatarEdwin Peer <edwin.peer@broadcom.com>
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b34695a8
...@@ -277,6 +277,7 @@ static const u16 bnxt_async_events_arr[] = { ...@@ -277,6 +277,7 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
...@@ -2269,6 +2270,12 @@ static int bnxt_async_event_process(struct bnxt *bp, ...@@ -2269,6 +2270,12 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_event_error_report(bp, data1, data2); bnxt_event_error_report(bp, data1, data2);
goto async_event_process_exit; goto async_event_process_exit;
} }
case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
goto async_event_process_exit;
}
default: default:
goto async_event_process_exit; goto async_event_process_exit;
} }
...@@ -2288,10 +2295,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) ...@@ -2288,10 +2295,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
switch (cmpl_type) { switch (cmpl_type) {
case CMPL_BASE_TYPE_HWRM_DONE: case CMPL_BASE_TYPE_HWRM_DONE:
seq_id = le16_to_cpu(h_cmpl->sequence_id); seq_id = le16_to_cpu(h_cmpl->sequence_id);
if (seq_id == bp->hwrm_intr_seq_id) hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
else
netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
break; break;
case CMPL_BASE_TYPE_HWRM_FWD_REQ: case CMPL_BASE_TYPE_HWRM_FWD_REQ:
...@@ -3956,8 +3960,15 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp) ...@@ -3956,8 +3960,15 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
static void bnxt_free_hwrm_resources(struct bnxt *bp) static void bnxt_free_hwrm_resources(struct bnxt *bp)
{ {
struct bnxt_hwrm_wait_token *token;
dma_pool_destroy(bp->hwrm_dma_pool); dma_pool_destroy(bp->hwrm_dma_pool);
bp->hwrm_dma_pool = NULL; bp->hwrm_dma_pool = NULL;
rcu_read_lock();
hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
rcu_read_unlock();
} }
static int bnxt_alloc_hwrm_resources(struct bnxt *bp) static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
...@@ -3968,6 +3979,8 @@ static int bnxt_alloc_hwrm_resources(struct bnxt *bp) ...@@ -3968,6 +3979,8 @@ static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
if (!bp->hwrm_dma_pool) if (!bp->hwrm_dma_pool)
return -ENOMEM; return -ENOMEM;
INIT_HLIST_HEAD(&bp->hwrm_pending_list);
return 0; return 0;
} }
......
...@@ -1880,8 +1880,8 @@ struct bnxt { ...@@ -1880,8 +1880,8 @@ struct bnxt {
u32 hwrm_spec_code; u32 hwrm_spec_code;
u16 hwrm_cmd_seq; u16 hwrm_cmd_seq;
u16 hwrm_cmd_kong_seq; u16 hwrm_cmd_kong_seq;
u16 hwrm_intr_seq_id;
struct dma_pool *hwrm_dma_pool; struct dma_pool *hwrm_dma_pool;
struct hlist_head hwrm_pending_list;
struct rtnl_link_stats64 net_stats_prev; struct rtnl_link_stats64 net_stats_prev;
struct bnxt_stats_mem port_stats; struct bnxt_stats_mem port_stats;
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
...@@ -363,19 +364,72 @@ static int __hwrm_to_stderr(u32 hwrm_err) ...@@ -363,19 +364,72 @@ static int __hwrm_to_stderr(u32 hwrm_err)
} }
} }
static struct bnxt_hwrm_wait_token *
__hwrm_acquire_token(struct bnxt *bp, enum bnxt_hwrm_chnl dst)
{
struct bnxt_hwrm_wait_token *token;
token = kzalloc(sizeof(*token), GFP_KERNEL);
if (!token)
return NULL;
mutex_lock(&bp->hwrm_cmd_lock);
token->dst = dst;
token->state = BNXT_HWRM_PENDING;
if (dst == BNXT_HWRM_CHNL_CHIMP) {
token->seq_id = bp->hwrm_cmd_seq++;
hlist_add_head_rcu(&token->node, &bp->hwrm_pending_list);
} else {
token->seq_id = bp->hwrm_cmd_kong_seq++;
}
return token;
}
static void
__hwrm_release_token(struct bnxt *bp, struct bnxt_hwrm_wait_token *token)
{
if (token->dst == BNXT_HWRM_CHNL_CHIMP) {
hlist_del_rcu(&token->node);
kfree_rcu(token, rcu);
} else {
kfree(token);
}
mutex_unlock(&bp->hwrm_cmd_lock);
}
void
hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state)
{
struct bnxt_hwrm_wait_token *token;
rcu_read_lock();
hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) {
if (token->seq_id == seq_id) {
WRITE_ONCE(token->state, state);
rcu_read_unlock();
return;
}
}
rcu_read_unlock();
netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
}
static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
{ {
u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
enum bnxt_hwrm_chnl dst = BNXT_HWRM_CHNL_CHIMP;
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
struct bnxt_hwrm_wait_token *token = NULL;
struct hwrm_short_input short_input = {0}; struct hwrm_short_input short_input = {0};
u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
unsigned int i, timeout, tmo_count; unsigned int i, timeout, tmo_count;
u16 dst = BNXT_HWRM_CHNL_CHIMP;
int intr_process, rc = -EBUSY;
u32 *data = (u32 *)ctx->req; u32 *data = (u32 *)ctx->req;
u32 msg_len = ctx->req_len; u32 msg_len = ctx->req_len;
u16 cp_ring_id, len = 0; int rc = -EBUSY;
u32 req_type; u32 req_type;
u16 len = 0;
u8 *valid; u8 *valid;
if (ctx->flags & BNXT_HWRM_INTERNAL_RESP_DIRTY) if (ctx->flags & BNXT_HWRM_INTERNAL_RESP_DIRTY)
...@@ -403,13 +457,12 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) ...@@ -403,13 +457,12 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
} }
} }
cp_ring_id = le16_to_cpu(ctx->req->cmpl_ring); token = __hwrm_acquire_token(bp, dst);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; if (!token) {
rc = -ENOMEM;
ctx->req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); goto exit;
/* currently supports only one outstanding message */ }
if (intr_process) ctx->req->seq_id = cpu_to_le16(token->seq_id);
bp->hwrm_intr_seq_id = le16_to_cpu(ctx->req->seq_id);
if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
msg_len > BNXT_HWRM_MAX_REQ_LEN) { msg_len > BNXT_HWRM_MAX_REQ_LEN) {
...@@ -456,11 +509,9 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) ...@@ -456,11 +509,9 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
if (intr_process) { if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
u16 seq_id = bp->hwrm_intr_seq_id;
/* Wait until hwrm response cmpl interrupt is processed */ /* Wait until hwrm response cmpl interrupt is processed */
while (bp->hwrm_intr_seq_id != (u16)~seq_id && while (READ_ONCE(token->state) < BNXT_HWRM_COMPLETE &&
i++ < tmo_count) { i++ < tmo_count) {
/* Abort the wait for completion if the FW health /* Abort the wait for completion if the FW health
* check has failed. * check has failed.
...@@ -479,7 +530,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) ...@@ -479,7 +530,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
} }
} }
if (bp->hwrm_intr_seq_id != (u16)~seq_id) { if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) {
if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
le16_to_cpu(ctx->req->req_type)); le16_to_cpu(ctx->req->req_type));
...@@ -498,6 +549,13 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) ...@@ -498,6 +549,13 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
*/ */
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
goto exit; goto exit;
if (token &&
READ_ONCE(token->state) == BNXT_HWRM_DEFERRED) {
__hwrm_release_token(bp, token);
token = NULL;
}
len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len)); len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
if (len) { if (len) {
__le16 resp_seq = READ_ONCE(ctx->resp->seq_id); __le16 resp_seq = READ_ONCE(ctx->resp->seq_id);
...@@ -569,6 +627,8 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) ...@@ -569,6 +627,8 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
} }
rc = __hwrm_to_stderr(rc); rc = __hwrm_to_stderr(rc);
exit: exit:
if (token)
__hwrm_release_token(bp, token);
if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED)
ctx->flags |= BNXT_HWRM_INTERNAL_RESP_DIRTY; ctx->flags |= BNXT_HWRM_INTERNAL_RESP_DIRTY;
else else
...@@ -609,15 +669,11 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) ...@@ -609,15 +669,11 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
int hwrm_req_send(struct bnxt *bp, void *req) int hwrm_req_send(struct bnxt *bp, void *req)
{ {
struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
int rc;
if (!ctx) if (!ctx)
return -EINVAL; return -EINVAL;
mutex_lock(&bp->hwrm_cmd_lock); return __hwrm_send(bp, ctx);
rc = __hwrm_send(bp, ctx);
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
} }
/** /**
......
...@@ -37,6 +37,25 @@ struct bnxt_hwrm_ctx { ...@@ -37,6 +37,25 @@ struct bnxt_hwrm_ctx {
gfp_t gfp; gfp_t gfp;
}; };
enum bnxt_hwrm_wait_state {
BNXT_HWRM_PENDING,
BNXT_HWRM_DEFERRED,
BNXT_HWRM_COMPLETE,
BNXT_HWRM_CANCELLED,
};
enum bnxt_hwrm_chnl { BNXT_HWRM_CHNL_CHIMP, BNXT_HWRM_CHNL_KONG };
struct bnxt_hwrm_wait_token {
struct rcu_head rcu;
struct hlist_node node;
enum bnxt_hwrm_wait_state state;
enum bnxt_hwrm_chnl dst;
u16 seq_id;
};
void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len) #define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) #define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
#define HWRM_CMD_MAX_TIMEOUT 40000 #define HWRM_CMD_MAX_TIMEOUT 40000
...@@ -78,9 +97,6 @@ static inline unsigned int hwrm_total_timeout(unsigned int n) ...@@ -78,9 +97,6 @@ static inline unsigned int hwrm_total_timeout(unsigned int n)
#define HWRM_VALID_BIT_DELAY_USEC 150 #define HWRM_VALID_BIT_DELAY_USEC 150
#define BNXT_HWRM_CHNL_CHIMP 0
#define BNXT_HWRM_CHNL_KONG 1
static inline bool bnxt_cfa_hwrm_message(u16 req_type) static inline bool bnxt_cfa_hwrm_message(u16 req_type)
{ {
switch (req_type) { switch (req_type) {
...@@ -114,17 +130,6 @@ static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req) ...@@ -114,17 +130,6 @@ static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req)
le16_to_cpu(req->target_id) == HWRM_TARGET_ID_KONG)); le16_to_cpu(req->target_id) == HWRM_TARGET_ID_KONG));
} }
static inline u16 bnxt_get_hwrm_seq_id(struct bnxt *bp, u16 dst)
{
u16 seq_id;
if (dst == BNXT_HWRM_CHNL_CHIMP)
seq_id = bp->hwrm_cmd_seq++;
else
seq_id = bp->hwrm_cmd_kong_seq++;
return seq_id;
}
int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len); int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len);
#define hwrm_req_init(bp, req, req_type) \ #define hwrm_req_init(bp, req, req_type) \
__hwrm_req_init((bp), (void **)&(req), (req_type), sizeof(*(req))) __hwrm_req_init((bp), (void **)&(req), (req_type), sizeof(*(req)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment