Commit fc4b6853 authored by Tomas Winkler's avatar Tomas Winkler Committed by David S. Miller

iwlwifi: renaming last_used and first_empty

This patch renames queue pointers to write_ptr and read_ptr
instead of  first_empty and last_used. This is closer to technical
terminology we everyday use
Signed-off-by: default avatarTomas Winkler <tomas.winkler@intel.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 91e17473
...@@ -526,14 +526,14 @@ int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr, ...@@ -526,14 +526,14 @@ int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
} }
/** /**
* iwl_hw_txq_free_tfd - Free one TFD, those at index [txq->q.last_used] * iwl_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
* *
* Does NOT advance any indexes * Does NOT advance any indexes
*/ */
int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
{ {
struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0]; struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0];
struct iwl_tfd_frame *bd = &bd_tmp[txq->q.last_used]; struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
struct pci_dev *dev = priv->pci_dev; struct pci_dev *dev = priv->pci_dev;
int i; int i;
int counter; int counter;
...@@ -556,12 +556,12 @@ int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) ...@@ -556,12 +556,12 @@ int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
for (i = 1; i < counter; i++) { for (i = 1; i < counter; i++) {
pci_unmap_single(dev, le32_to_cpu(bd->pa[i].addr), pci_unmap_single(dev, le32_to_cpu(bd->pa[i].addr),
le32_to_cpu(bd->pa[i].len), PCI_DMA_TODEVICE); le32_to_cpu(bd->pa[i].len), PCI_DMA_TODEVICE);
if (txq->txb[txq->q.last_used].skb[0]) { if (txq->txb[txq->q.read_ptr].skb[0]) {
struct sk_buff *skb = txq->txb[txq->q.last_used].skb[0]; struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[0];
if (txq->txb[txq->q.last_used].skb[0]) { if (txq->txb[txq->q.read_ptr].skb[0]) {
/* Can be called from interrupt context */ /* Can be called from interrupt context */
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
txq->txb[txq->q.last_used].skb[0] = NULL; txq->txb[txq->q.read_ptr].skb[0] = NULL;
} }
} }
} }
......
...@@ -1749,14 +1749,14 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) ...@@ -1749,14 +1749,14 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
} }
/** /**
* iwl_hw_txq_free_tfd - Free one TFD, those at index [txq->q.last_used] * iwl_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
* *
* Does NOT advance any indexes * Does NOT advance any indexes
*/ */
int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
{ {
struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0]; struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0];
struct iwl_tfd_frame *bd = &bd_tmp[txq->q.last_used]; struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
struct pci_dev *dev = priv->pci_dev; struct pci_dev *dev = priv->pci_dev;
int i; int i;
int counter = 0; int counter = 0;
...@@ -1796,11 +1796,11 @@ int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) ...@@ -1796,11 +1796,11 @@ int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
IWL_GET_BITS(bd->pa[index], tb1_len), IWL_GET_BITS(bd->pa[index], tb1_len),
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
if (txq->txb[txq->q.last_used].skb[i]) { if (txq->txb[txq->q.read_ptr].skb[i]) {
struct sk_buff *skb = txq->txb[txq->q.last_used].skb[i]; struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
dev_kfree_skb(skb); dev_kfree_skb(skb);
txq->txb[txq->q.last_used].skb[i] = NULL; txq->txb[txq->q.read_ptr].skb[i] = NULL;
} }
} }
return 0; return 0;
...@@ -2776,11 +2776,11 @@ int iwl4965_tx_queue_update_wr_ptr(struct iwl_priv *priv, ...@@ -2776,11 +2776,11 @@ int iwl4965_tx_queue_update_wr_ptr(struct iwl_priv *priv,
len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
tfd_offset[txq->q.first_empty], byte_cnt, len); tfd_offset[txq->q.write_ptr], byte_cnt, len);
if (txq->q.first_empty < IWL4965_MAX_WIN_SIZE) if (txq->q.write_ptr < IWL4965_MAX_WIN_SIZE)
IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
tfd_offset[IWL4965_QUEUE_SIZE + txq->q.first_empty], tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr],
byte_cnt, len); byte_cnt, len);
return 0; return 0;
...@@ -4134,7 +4134,7 @@ static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, ...@@ -4134,7 +4134,7 @@ static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
*/ */
iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp); iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
/* releases all the TFDs until the SSN */ /* releases all the TFDs until the SSN */
if (txq->q.last_used != (ba_resp_scd_ssn & 0xff)) if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff))
iwl_tx_queue_reclaim(priv, ba_resp_scd_flow, index); iwl_tx_queue_reclaim(priv, ba_resp_scd_flow, index);
} }
...@@ -4205,8 +4205,8 @@ static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id, ...@@ -4205,8 +4205,8 @@ static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
iwl_set_bits_restricted_reg(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id)); iwl_set_bits_restricted_reg(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id));
priv->txq[txq_id].q.last_used = (ssn_idx & 0xff); priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
priv->txq[txq_id].q.first_empty = (ssn_idx & 0xff); priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
/* supposes that ssn_idx is valid (!= 0xFFF) */ /* supposes that ssn_idx is valid (!= 0xFFF) */
iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
...@@ -4257,8 +4257,8 @@ static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id, ...@@ -4257,8 +4257,8 @@ static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
iwl_clear_bits_restricted_reg(priv, SCD_QUEUECHAIN_SEL, (1 << txq_id)); iwl_clear_bits_restricted_reg(priv, SCD_QUEUECHAIN_SEL, (1 << txq_id));
priv->txq[txq_id].q.last_used = (ssn_idx & 0xff); priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
priv->txq[txq_id].q.first_empty = (ssn_idx & 0xff); priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
/* supposes that ssn_idx is valid (!= 0xFFF) */ /* supposes that ssn_idx is valid (!= 0xFFF) */
iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
......
...@@ -209,9 +209,9 @@ static void iwl_print_hex_dump(int level, void *p, u32 len) ...@@ -209,9 +209,9 @@ static void iwl_print_hex_dump(int level, void *p, u32 len)
static int iwl_queue_space(const struct iwl_queue *q) static int iwl_queue_space(const struct iwl_queue *q)
{ {
int s = q->last_used - q->first_empty; int s = q->read_ptr - q->write_ptr;
if (q->last_used > q->first_empty) if (q->read_ptr > q->write_ptr)
s -= q->n_bd; s -= q->n_bd;
if (s <= 0) if (s <= 0)
...@@ -237,9 +237,9 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd) ...@@ -237,9 +237,9 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
static inline int x2_queue_used(const struct iwl_queue *q, int i) static inline int x2_queue_used(const struct iwl_queue *q, int i)
{ {
return q->first_empty > q->last_used ? return q->write_ptr > q->read_ptr ?
(i >= q->last_used && i < q->first_empty) : (i >= q->read_ptr && i < q->write_ptr) :
!(i < q->last_used && i >= q->first_empty); !(i < q->read_ptr && i >= q->write_ptr);
} }
static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge) static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
...@@ -273,7 +273,7 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, ...@@ -273,7 +273,7 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
if (q->high_mark < 2) if (q->high_mark < 2)
q->high_mark = 2; q->high_mark = 2;
q->first_empty = q->last_used = 0; q->write_ptr = q->read_ptr = 0;
return 0; return 0;
} }
...@@ -369,8 +369,8 @@ void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) ...@@ -369,8 +369,8 @@ void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
return; return;
/* first, empty all BD's */ /* first, empty all BD's */
for (; q->first_empty != q->last_used; for (; q->write_ptr != q->read_ptr;
q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
iwl_hw_txq_free_tfd(priv, txq); iwl_hw_txq_free_tfd(priv, txq);
len = sizeof(struct iwl_cmd) * q->n_window; len = sizeof(struct iwl_cmd) * q->n_window;
...@@ -649,12 +649,12 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) ...@@ -649,12 +649,12 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
spin_lock_irqsave(&priv->hcmd_lock, flags); spin_lock_irqsave(&priv->hcmd_lock, flags);
tfd = &txq->bd[q->first_empty]; tfd = &txq->bd[q->write_ptr];
memset(tfd, 0, sizeof(*tfd)); memset(tfd, 0, sizeof(*tfd));
control_flags = (u32 *) tfd; control_flags = (u32 *) tfd;
idx = get_cmd_index(q, q->first_empty, cmd->meta.flags & CMD_SIZE_HUGE); idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
out_cmd = &txq->cmd[idx]; out_cmd = &txq->cmd[idx];
out_cmd->hdr.cmd = cmd->id; out_cmd->hdr.cmd = cmd->id;
...@@ -666,7 +666,7 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) ...@@ -666,7 +666,7 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
out_cmd->hdr.flags = 0; out_cmd->hdr.flags = 0;
out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
INDEX_TO_SEQ(q->first_empty)); INDEX_TO_SEQ(q->write_ptr));
if (out_cmd->meta.flags & CMD_SIZE_HUGE) if (out_cmd->meta.flags & CMD_SIZE_HUGE)
out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
...@@ -682,10 +682,10 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) ...@@ -682,10 +682,10 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
"%d bytes at %d[%d]:%d\n", "%d bytes at %d[%d]:%d\n",
get_cmd_string(out_cmd->hdr.cmd), get_cmd_string(out_cmd->hdr.cmd),
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
fix_size, q->first_empty, idx, IWL_CMD_QUEUE_NUM); fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
txq->need_update = 1; txq->need_update = 1;
q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
ret = iwl_tx_queue_update_write_ptr(priv, txq); ret = iwl_tx_queue_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->hcmd_lock, flags); spin_unlock_irqrestore(&priv->hcmd_lock, flags);
...@@ -2799,21 +2799,21 @@ static int iwl_tx_skb(struct iwl_priv *priv, ...@@ -2799,21 +2799,21 @@ static int iwl_tx_skb(struct iwl_priv *priv,
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
tfd = &txq->bd[q->first_empty]; tfd = &txq->bd[q->write_ptr];
memset(tfd, 0, sizeof(*tfd)); memset(tfd, 0, sizeof(*tfd));
control_flags = (u32 *) tfd; control_flags = (u32 *) tfd;
idx = get_cmd_index(q, q->first_empty, 0); idx = get_cmd_index(q, q->write_ptr, 0);
memset(&(txq->txb[q->first_empty]), 0, sizeof(struct iwl_tx_info)); memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
txq->txb[q->first_empty].skb[0] = skb; txq->txb[q->write_ptr].skb[0] = skb;
memcpy(&(txq->txb[q->first_empty].status.control), memcpy(&(txq->txb[q->write_ptr].status.control),
ctl, sizeof(struct ieee80211_tx_control)); ctl, sizeof(struct ieee80211_tx_control));
out_cmd = &txq->cmd[idx]; out_cmd = &txq->cmd[idx];
memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx)); memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
out_cmd->hdr.cmd = REPLY_TX; out_cmd->hdr.cmd = REPLY_TX;
out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
INDEX_TO_SEQ(q->first_empty))); INDEX_TO_SEQ(q->write_ptr)));
/* copy frags header */ /* copy frags header */
memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len); memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
...@@ -2881,7 +2881,7 @@ static int iwl_tx_skb(struct iwl_priv *priv, ...@@ -2881,7 +2881,7 @@ static int iwl_tx_skb(struct iwl_priv *priv,
iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr, iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
ieee80211_get_hdrlen(fc)); ieee80211_get_hdrlen(fc));
q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
rc = iwl_tx_queue_update_write_ptr(priv, txq); rc = iwl_tx_queue_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
...@@ -3375,20 +3375,20 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) ...@@ -3375,20 +3375,20 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) { if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
"is out of range [0-%d] %d %d.\n", txq_id, "is out of range [0-%d] %d %d.\n", txq_id,
index, q->n_bd, q->first_empty, q->last_used); index, q->n_bd, q->write_ptr, q->read_ptr);
return 0; return 0;
} }
for (index = iwl_queue_inc_wrap(index, q->n_bd); for (index = iwl_queue_inc_wrap(index, q->n_bd);
q->last_used != index; q->read_ptr != index;
q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) { q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
if (txq_id != IWL_CMD_QUEUE_NUM) { if (txq_id != IWL_CMD_QUEUE_NUM) {
iwl_txstatus_to_ieee(priv, iwl_txstatus_to_ieee(priv,
&(txq->txb[txq->q.last_used])); &(txq->txb[txq->q.read_ptr]));
iwl_hw_txq_free_tfd(priv, txq); iwl_hw_txq_free_tfd(priv, txq);
} else if (nfreed > 1) { } else if (nfreed > 1) {
IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index, IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
q->first_empty, q->last_used); q->write_ptr, q->read_ptr);
queue_work(priv->workqueue, &priv->restart); queue_work(priv->workqueue, &priv->restart);
} }
nfreed++; nfreed++;
...@@ -3428,12 +3428,12 @@ static void iwl_rx_reply_tx(struct iwl_priv *priv, ...@@ -3428,12 +3428,12 @@ static void iwl_rx_reply_tx(struct iwl_priv *priv,
if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) { if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
IWL_ERROR("Read index for DMA queue txq_id (%d) index %d " IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
"is out of range [0-%d] %d %d\n", txq_id, "is out of range [0-%d] %d %d\n", txq_id,
index, txq->q.n_bd, txq->q.first_empty, index, txq->q.n_bd, txq->q.write_ptr,
txq->q.last_used); txq->q.read_ptr);
return; return;
} }
tx_status = &(txq->txb[txq->q.last_used].status); tx_status = &(txq->txb[txq->q.read_ptr].status);
tx_status->retry_count = tx_resp->failure_frame; tx_status->retry_count = tx_resp->failure_frame;
tx_status->queue_number = status; tx_status->queue_number = status;
...@@ -4368,14 +4368,14 @@ int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv, ...@@ -4368,14 +4368,14 @@ int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv,
if (rc) if (rc)
return rc; return rc;
iwl_write_restricted(priv, HBUS_TARG_WRPTR, iwl_write_restricted(priv, HBUS_TARG_WRPTR,
txq->q.first_empty | (txq_id << 8)); txq->q.write_ptr | (txq_id << 8));
iwl_release_restricted_access(priv); iwl_release_restricted_access(priv);
/* else not in power-save mode, uCode will never sleep when we're /* else not in power-save mode, uCode will never sleep when we're
* trying to tx (during RFKILL, we're not trying to tx). */ * trying to tx (during RFKILL, we're not trying to tx). */
} else } else
iwl_write32(priv, HBUS_TARG_WRPTR, iwl_write32(priv, HBUS_TARG_WRPTR,
txq->q.first_empty | (txq_id << 8)); txq->q.write_ptr | (txq_id << 8));
txq->need_update = 0; txq->need_update = 0;
......
...@@ -208,9 +208,9 @@ static void iwl_print_hex_dump(int level, void *p, u32 len) ...@@ -208,9 +208,9 @@ static void iwl_print_hex_dump(int level, void *p, u32 len)
static int iwl_queue_space(const struct iwl_queue *q) static int iwl_queue_space(const struct iwl_queue *q)
{ {
int s = q->last_used - q->first_empty; int s = q->read_ptr - q->write_ptr;
if (q->last_used > q->first_empty) if (q->read_ptr > q->write_ptr)
s -= q->n_bd; s -= q->n_bd;
if (s <= 0) if (s <= 0)
...@@ -236,9 +236,9 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd) ...@@ -236,9 +236,9 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
static inline int x2_queue_used(const struct iwl_queue *q, int i) static inline int x2_queue_used(const struct iwl_queue *q, int i)
{ {
return q->first_empty > q->last_used ? return q->write_ptr > q->read_ptr ?
(i >= q->last_used && i < q->first_empty) : (i >= q->read_ptr && i < q->write_ptr) :
!(i < q->last_used && i >= q->first_empty); !(i < q->read_ptr && i >= q->write_ptr);
} }
static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge) static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
...@@ -272,7 +272,7 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, ...@@ -272,7 +272,7 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
if (q->high_mark < 2) if (q->high_mark < 2)
q->high_mark = 2; q->high_mark = 2;
q->first_empty = q->last_used = 0; q->write_ptr = q->read_ptr = 0;
return 0; return 0;
} }
...@@ -368,8 +368,8 @@ void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) ...@@ -368,8 +368,8 @@ void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
return; return;
/* first, empty all BD's */ /* first, empty all BD's */
for (; q->first_empty != q->last_used; for (; q->write_ptr != q->read_ptr;
q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
iwl_hw_txq_free_tfd(priv, txq); iwl_hw_txq_free_tfd(priv, txq);
len = sizeof(struct iwl_cmd) * q->n_window; len = sizeof(struct iwl_cmd) * q->n_window;
...@@ -652,12 +652,12 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) ...@@ -652,12 +652,12 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
spin_lock_irqsave(&priv->hcmd_lock, flags); spin_lock_irqsave(&priv->hcmd_lock, flags);
tfd = &txq->bd[q->first_empty]; tfd = &txq->bd[q->write_ptr];
memset(tfd, 0, sizeof(*tfd)); memset(tfd, 0, sizeof(*tfd));
control_flags = (u32 *) tfd; control_flags = (u32 *) tfd;
idx = get_cmd_index(q, q->first_empty, cmd->meta.flags & CMD_SIZE_HUGE); idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
out_cmd = &txq->cmd[idx]; out_cmd = &txq->cmd[idx];
out_cmd->hdr.cmd = cmd->id; out_cmd->hdr.cmd = cmd->id;
...@@ -669,7 +669,7 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) ...@@ -669,7 +669,7 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
out_cmd->hdr.flags = 0; out_cmd->hdr.flags = 0;
out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
INDEX_TO_SEQ(q->first_empty)); INDEX_TO_SEQ(q->write_ptr));
if (out_cmd->meta.flags & CMD_SIZE_HUGE) if (out_cmd->meta.flags & CMD_SIZE_HUGE)
out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
...@@ -681,11 +681,11 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) ...@@ -681,11 +681,11 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
"%d bytes at %d[%d]:%d\n", "%d bytes at %d[%d]:%d\n",
get_cmd_string(out_cmd->hdr.cmd), get_cmd_string(out_cmd->hdr.cmd),
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
fix_size, q->first_empty, idx, IWL_CMD_QUEUE_NUM); fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
txq->need_update = 1; txq->need_update = 1;
ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0); ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0);
q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
iwl_tx_queue_update_write_ptr(priv, txq); iwl_tx_queue_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->hcmd_lock, flags); spin_unlock_irqrestore(&priv->hcmd_lock, flags);
...@@ -2889,21 +2889,21 @@ static int iwl_tx_skb(struct iwl_priv *priv, ...@@ -2889,21 +2889,21 @@ static int iwl_tx_skb(struct iwl_priv *priv,
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
tfd = &txq->bd[q->first_empty]; tfd = &txq->bd[q->write_ptr];
memset(tfd, 0, sizeof(*tfd)); memset(tfd, 0, sizeof(*tfd));
control_flags = (u32 *) tfd; control_flags = (u32 *) tfd;
idx = get_cmd_index(q, q->first_empty, 0); idx = get_cmd_index(q, q->write_ptr, 0);
memset(&(txq->txb[q->first_empty]), 0, sizeof(struct iwl_tx_info)); memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
txq->txb[q->first_empty].skb[0] = skb; txq->txb[q->write_ptr].skb[0] = skb;
memcpy(&(txq->txb[q->first_empty].status.control), memcpy(&(txq->txb[q->write_ptr].status.control),
ctl, sizeof(struct ieee80211_tx_control)); ctl, sizeof(struct ieee80211_tx_control));
out_cmd = &txq->cmd[idx]; out_cmd = &txq->cmd[idx];
memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx)); memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
out_cmd->hdr.cmd = REPLY_TX; out_cmd->hdr.cmd = REPLY_TX;
out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
INDEX_TO_SEQ(q->first_empty))); INDEX_TO_SEQ(q->write_ptr)));
/* copy frags header */ /* copy frags header */
memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len); memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
...@@ -2969,7 +2969,7 @@ static int iwl_tx_skb(struct iwl_priv *priv, ...@@ -2969,7 +2969,7 @@ static int iwl_tx_skb(struct iwl_priv *priv,
iwl4965_tx_queue_update_wr_ptr(priv, txq, len); iwl4965_tx_queue_update_wr_ptr(priv, txq, len);
q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
rc = iwl_tx_queue_update_write_ptr(priv, txq); rc = iwl_tx_queue_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
...@@ -3463,20 +3463,20 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) ...@@ -3463,20 +3463,20 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) { if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
"is out of range [0-%d] %d %d.\n", txq_id, "is out of range [0-%d] %d %d.\n", txq_id,
index, q->n_bd, q->first_empty, q->last_used); index, q->n_bd, q->write_ptr, q->read_ptr);
return 0; return 0;
} }
for (index = iwl_queue_inc_wrap(index, q->n_bd); for (index = iwl_queue_inc_wrap(index, q->n_bd);
q->last_used != index; q->read_ptr != index;
q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) { q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
if (txq_id != IWL_CMD_QUEUE_NUM) { if (txq_id != IWL_CMD_QUEUE_NUM) {
iwl_txstatus_to_ieee(priv, iwl_txstatus_to_ieee(priv,
&(txq->txb[txq->q.last_used])); &(txq->txb[txq->q.read_ptr]));
iwl_hw_txq_free_tfd(priv, txq); iwl_hw_txq_free_tfd(priv, txq);
} else if (nfreed > 1) { } else if (nfreed > 1) {
IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index, IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
q->first_empty, q->last_used); q->write_ptr, q->read_ptr);
queue_work(priv->workqueue, &priv->restart); queue_work(priv->workqueue, &priv->restart);
} }
nfreed++; nfreed++;
...@@ -3564,7 +3564,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv, ...@@ -3564,7 +3564,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d \n", IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d \n",
agg->frame_count, agg->start_idx); agg->frame_count, agg->start_idx);
tx_status = &(priv->txq[txq_id].txb[txq->q.last_used].status); tx_status = &(priv->txq[txq_id].txb[txq->q.read_ptr].status);
tx_status->retry_count = tx_resp->failure_frame; tx_status->retry_count = tx_resp->failure_frame;
tx_status->queue_number = status & 0xff; tx_status->queue_number = status & 0xff;
tx_status->queue_length = tx_resp->bt_kill_count; tx_status->queue_length = tx_resp->bt_kill_count;
...@@ -3669,8 +3669,8 @@ static void iwl_rx_reply_tx(struct iwl_priv *priv, ...@@ -3669,8 +3669,8 @@ static void iwl_rx_reply_tx(struct iwl_priv *priv,
if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) { if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
IWL_ERROR("Read index for DMA queue txq_id (%d) index %d " IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
"is out of range [0-%d] %d %d\n", txq_id, "is out of range [0-%d] %d %d\n", txq_id,
index, txq->q.n_bd, txq->q.first_empty, index, txq->q.n_bd, txq->q.write_ptr,
txq->q.last_used); txq->q.read_ptr);
return; return;
} }
...@@ -3705,7 +3705,7 @@ static void iwl_rx_reply_tx(struct iwl_priv *priv, ...@@ -3705,7 +3705,7 @@ static void iwl_rx_reply_tx(struct iwl_priv *priv,
/* TODO: send BAR */ /* TODO: send BAR */
} }
if ((txq->q.last_used != (scd_ssn & 0xff))) { if ((txq->q.read_ptr != (scd_ssn & 0xff))) {
index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn " IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
"%d index %d\n", scd_ssn , index); "%d index %d\n", scd_ssn , index);
...@@ -3714,7 +3714,7 @@ static void iwl_rx_reply_tx(struct iwl_priv *priv, ...@@ -3714,7 +3714,7 @@ static void iwl_rx_reply_tx(struct iwl_priv *priv,
} else { } else {
#endif /* CONFIG_IWLWIFI_HT_AGG */ #endif /* CONFIG_IWLWIFI_HT_AGG */
#endif /* CONFIG_IWLWIFI_HT */ #endif /* CONFIG_IWLWIFI_HT */
tx_status = &(txq->txb[txq->q.last_used].status); tx_status = &(txq->txb[txq->q.read_ptr].status);
tx_status->retry_count = tx_resp->failure_frame; tx_status->retry_count = tx_resp->failure_frame;
tx_status->queue_number = status; tx_status->queue_number = status;
...@@ -4692,14 +4692,14 @@ int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv, ...@@ -4692,14 +4692,14 @@ int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv,
if (rc) if (rc)
return rc; return rc;
iwl_write_restricted(priv, HBUS_TARG_WRPTR, iwl_write_restricted(priv, HBUS_TARG_WRPTR,
txq->q.first_empty | (txq_id << 8)); txq->q.write_ptr | (txq_id << 8));
iwl_release_restricted_access(priv); iwl_release_restricted_access(priv);
/* else not in power-save mode, uCode will never sleep when we're /* else not in power-save mode, uCode will never sleep when we're
* trying to tx (during RFKILL, we're not trying to tx). */ * trying to tx (during RFKILL, we're not trying to tx). */
} else } else
iwl_write32(priv, HBUS_TARG_WRPTR, iwl_write32(priv, HBUS_TARG_WRPTR,
txq->q.first_empty | (txq_id << 8)); txq->q.write_ptr | (txq_id << 8));
txq->need_update = 0; txq->need_update = 0;
......
...@@ -144,8 +144,8 @@ struct iwl_rt_tx_hdr { ...@@ -144,8 +144,8 @@ struct iwl_rt_tx_hdr {
*/ */
struct iwl_queue { struct iwl_queue {
int n_bd; /* number of BDs in this queue */ int n_bd; /* number of BDs in this queue */
int first_empty; /* 1-st empty entry (index) host_w*/ int write_ptr; /* 1-st empty entry (index) host_w*/
int last_used; /* last used entry (index) host_r*/ int read_ptr; /* last used entry (index) host_r*/
dma_addr_t dma_addr; /* physical addr for BD's */ dma_addr_t dma_addr; /* physical addr for BD's */
int n_window; /* safe queue window */ int n_window; /* safe queue window */
u32 id; u32 id;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment