Commit 76a9a364 authored by Tomer Tayar's avatar Tomer Tayar Committed by David S. Miller

qed: fix handling of concurrent ramrods.

Concurrent non-blocking slowpath ramrods can be completed
out-of-order on the completion chain. Recycling completed elements,
while previously sent elements are still completion pending,
can lead to overriding of active elements on the chain. Furthermore,
sending pending slowpath ramrods currently lacks the update of the
chain element physical pointer.

This patch:
* Ensures that ramrods are sent to the FW with
  consecutive echo values.
* Handles out-of-order completions by freeing only first
  successive completed entries.
* Updates the chain element physical pointer when copying
  a pending element into a free element for sending.
Signed-off-by: default avatarTomer Tayar <Tomer.Tayar@qlogic.com>
Signed-off-by: default avatarManish Chopra <manish.chopra@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4639d60d
...@@ -124,8 +124,12 @@ struct qed_spq { ...@@ -124,8 +124,12 @@ struct qed_spq {
dma_addr_t p_phys; dma_addr_t p_phys;
struct qed_spq_entry *p_virt; struct qed_spq_entry *p_virt;
/* Used as index for completions (returns on EQ by FW) */ #define SPQ_RING_SIZE \
u16 echo_idx; (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
/* Bitmap for handling out-of-order completions */
DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
u8 comp_bitmap_idx;
/* Statistics */ /* Statistics */
u32 unlimited_pending_count; u32 unlimited_pending_count;
......
...@@ -112,8 +112,6 @@ static int ...@@ -112,8 +112,6 @@ static int
qed_spq_fill_entry(struct qed_hwfn *p_hwfn, qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent) struct qed_spq_entry *p_ent)
{ {
p_ent->elem.hdr.echo = 0;
p_hwfn->p_spq->echo_idx++;
p_ent->flags = 0; p_ent->flags = 0;
switch (p_ent->comp_mode) { switch (p_ent->comp_mode) {
...@@ -196,9 +194,11 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, ...@@ -196,9 +194,11 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent) struct qed_spq_entry *p_ent)
{ {
struct qed_chain *p_chain = &p_hwfn->p_spq->chain; struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
u16 echo = qed_chain_get_prod_idx(p_chain);
struct slow_path_element *elem; struct slow_path_element *elem;
struct core_db_data db; struct core_db_data db;
p_ent->elem.hdr.echo = cpu_to_le16(echo);
elem = qed_chain_produce(p_chain); elem = qed_chain_produce(p_chain);
if (!elem) { if (!elem) {
DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
...@@ -437,7 +437,9 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) ...@@ -437,7 +437,9 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
p_spq->comp_count = 0; p_spq->comp_count = 0;
p_spq->comp_sent_count = 0; p_spq->comp_sent_count = 0;
p_spq->unlimited_pending_count = 0; p_spq->unlimited_pending_count = 0;
p_spq->echo_idx = 0;
bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
p_spq->comp_bitmap_idx = 0;
/* SPQ cid, cannot fail */ /* SPQ cid, cannot fail */
qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
...@@ -582,27 +584,33 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn, ...@@ -582,27 +584,33 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq *p_spq = p_hwfn->p_spq;
if (p_ent->queue == &p_spq->unlimited_pending) { if (p_ent->queue == &p_spq->unlimited_pending) {
struct qed_spq_entry *p_en2;
if (list_empty(&p_spq->free_pool)) { if (list_empty(&p_spq->free_pool)) {
list_add_tail(&p_ent->list, &p_spq->unlimited_pending); list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
p_spq->unlimited_pending_count++; p_spq->unlimited_pending_count++;
return 0; return 0;
} } else {
struct qed_spq_entry *p_en2;
p_en2 = list_first_entry(&p_spq->free_pool, p_en2 = list_first_entry(&p_spq->free_pool,
struct qed_spq_entry, struct qed_spq_entry,
list); list);
list_del(&p_en2->list); list_del(&p_en2->list);
/* Strcut assignment */ /* Copy the ring element physical pointer to the new
* entry, since we are about to override the entire ring
* entry and don't want to lose the pointer.
*/
p_ent->elem.data_ptr = p_en2->elem.data_ptr;
*p_en2 = *p_ent; *p_en2 = *p_ent;
kfree(p_ent); kfree(p_ent);
p_ent = p_en2; p_ent = p_en2;
} }
}
/* entry is to be placed in 'pending' queue */ /* entry is to be placed in 'pending' queue */
switch (priority) { switch (priority) {
...@@ -777,13 +785,38 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, ...@@ -777,13 +785,38 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
list) { list) {
if (p_ent->elem.hdr.echo == echo) { if (p_ent->elem.hdr.echo == echo) {
u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
list_del(&p_ent->list); list_del(&p_ent->list);
/* Avoid overriding of SPQ entries when getting
* out-of-order completions, by marking the completions
* in a bitmap and increasing the chain consumer only
* for the first successive completed entries.
*/
bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
while (test_bit(p_spq->comp_bitmap_idx,
p_spq->p_comp_bitmap)) {
bitmap_clear(p_spq->p_comp_bitmap,
p_spq->comp_bitmap_idx,
SPQ_RING_SIZE);
p_spq->comp_bitmap_idx++;
qed_chain_return_produced(&p_spq->chain); qed_chain_return_produced(&p_spq->chain);
}
p_spq->comp_count++; p_spq->comp_count++;
found = p_ent; found = p_ent;
break; break;
} }
/* This is relatively uncommon - depends on scenarios
* which have mutliple per-PF sent ramrods.
*/
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
le16_to_cpu(echo),
le16_to_cpu(p_ent->elem.hdr.echo));
} }
/* Release lock before callback, as callback may post /* Release lock before callback, as callback may post
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#ifndef __COMMON_HSI__ #ifndef __COMMON_HSI__
#define __COMMON_HSI__ #define __COMMON_HSI__
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
#define FW_MAJOR_VERSION 8 #define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 4 #define FW_MINOR_VERSION 4
#define FW_REVISION_VERSION 2 #define FW_REVISION_VERSION 2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment