Commit f5955a6c authored by Golan Ben Ami's avatar Golan Ben Ami Committed by Luca Coelho

iwlwifi: cancel the injective function between hw pointers to tfd entry index

Nowadays, the tfd queue max size is 2^8, and the reserved size in the
command header sequence field for the tfd entry index is 8 bits,
allowing an injective function from the hw pointers to the tfd entry index
in the sequence field.

In 22560 devices the tfd queue max size is 2^16, meaning that
the hw pointers are 16 bit long (allowing to point to each entry
in the tfd queue). However, the reserved space in the sequence field for
the tfd entry doesn't change, and we are limited to 8 bit.
This requires cancelling the injective function from hw pointer to
tfd entry in the sequence number.

Use iwl_pcie_get_cmd_index to wrap the hw pointer's to the n_window
size, which is maximum 256 in tx queues, and so, keep the injective
function between the window wrapped hw pointers to tfd entry index in
the sequence.
Signed-off-by: default avatarGolan Ben Ami <golan.ben.ami@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent 9b58419e
...@@ -749,7 +749,7 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) ...@@ -749,7 +749,7 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
} }
} }
static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index) static inline u8 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
{ {
return index & (q->n_window - 1); return index & (q->n_window - 1);
} }
...@@ -819,9 +819,13 @@ static inline void iwl_stop_queue(struct iwl_trans *trans, ...@@ -819,9 +819,13 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
static inline bool iwl_queue_used(const struct iwl_txq *q, int i) static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
{ {
return q->write_ptr >= q->read_ptr ? int index = iwl_pcie_get_cmd_index(q, i);
(i >= q->read_ptr && i < q->write_ptr) : int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
!(i < q->read_ptr && i >= q->write_ptr); int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
return w >= r ?
(index >= r && index < w) :
!(index < r && index >= w);
} }
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
......
...@@ -1225,9 +1225,13 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) ...@@ -1225,9 +1225,13 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
struct iwl_txq *txq = trans_pcie->txq[txq_id]; struct iwl_txq *txq = trans_pcie->txq[txq_id];
unsigned long flags; unsigned long flags;
int nfreed = 0; int nfreed = 0;
u16 r;
lockdep_assert_held(&txq->lock); lockdep_assert_held(&txq->lock);
idx = iwl_pcie_get_cmd_index(txq, idx);
r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) { if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) {
IWL_ERR(trans, IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
...@@ -1236,12 +1240,13 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) ...@@ -1236,12 +1240,13 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
return; return;
} }
for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx; for (idx = iwl_queue_inc_wrap(idx); r != idx;
txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { r = iwl_queue_inc_wrap(r)) {
txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
if (nfreed++ > 0) { if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
idx, txq->write_ptr, txq->read_ptr); idx, txq->write_ptr, r);
iwl_force_nmi(trans); iwl_force_nmi(trans);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment