Commit 7b3e42ea authored by Golan Ben Ami's avatar Golan Ben Ami Committed by Luca Coelho

iwlwifi: support multiple tfd queue max sizes for different devices

22560 devices tfd queue max size is 2^16. Allow a configurable
max size in the driver for supporting different devices.
Signed-off-by: default avatarGolan Ben Ami <golan.ben.ami@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent f5955a6c
......@@ -63,6 +63,7 @@
static const struct iwl_base_params iwl2000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 51,
......@@ -76,6 +77,7 @@ static const struct iwl_base_params iwl2000_base_params = {
static const struct iwl_base_params iwl2030_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 57,
......
......@@ -102,6 +102,19 @@
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
.num_of_queues = 512,
.max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
};
static const struct iwl_base_params iwl_22560_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
......@@ -119,7 +132,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
#define IWL_DEVICE_22000_COMMON \
.ucode_api_max = IWL_22000_UCODE_API_MAX, \
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
.base_params = &iwl_22000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \
.non_shared_ant = ANT_A, \
......@@ -148,11 +160,13 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
#define IWL_DEVICE_22500 \
IWL_DEVICE_22000_COMMON, \
.device_family = IWL_DEVICE_FAMILY_22000, \
.base_params = &iwl_22000_base_params, \
.csr = &iwl_csr_v1
#define IWL_DEVICE_22560 \
IWL_DEVICE_22000_COMMON, \
.device_family = IWL_DEVICE_FAMILY_22560, \
.base_params = &iwl_22560_base_params, \
.csr = &iwl_csr_v2
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
......
......@@ -53,6 +53,7 @@
static const struct iwl_base_params iwl5000_base_params = {
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.pll_cfg = true,
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
......
......@@ -72,6 +72,7 @@
static const struct iwl_base_params iwl6000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 51,
......@@ -84,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
static const struct iwl_base_params iwl6050_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
.led_compensation = 51,
......@@ -96,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
static const struct iwl_base_params iwl6000_g2_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 57,
......
......@@ -123,6 +123,7 @@
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
.num_of_queues = 31,
.max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
......
......@@ -104,6 +104,7 @@
static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = 31,
.max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
......
......@@ -95,6 +95,7 @@
static const struct iwl_base_params iwl9000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000,
.num_of_queues = 31,
.max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
......
......@@ -177,6 +177,7 @@ static inline u8 num_of_ant(u8 mask)
* @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
* is in flight. This is due to a HW bug in 7260, 3160 and 7265.
* @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
* @max_tfd_queue_size: max number of entries in tfd queue.
*/
struct iwl_base_params {
unsigned int wd_timeout;
......@@ -192,6 +193,7 @@ struct iwl_base_params {
scd_chain_ext_wa:1;
u16 num_of_queues; /* def: HW dependent */
u32 max_tfd_queue_size; /* def: HW dependent */
u8 max_ll_items;
u8 led_compensation;
......
......@@ -7,6 +7,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -18,9 +19,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
* along with this program.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
......@@ -33,6 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -643,10 +643,13 @@ struct iwl_rb_status {
#define TFD_QUEUE_SIZE_MAX (256)
#define TFD_QUEUE_SIZE_MAX_GEN3 (65536)
/* cb size is the exponent - 3 */
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
#define TFD_QUEUE_BC_SIZE_GEN3 (TFD_QUEUE_SIZE_MAX_GEN3 + \
TFD_QUEUE_SIZE_BC_DUP)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25
......@@ -753,7 +756,7 @@ struct iwl_tfh_tfd {
* For devices up to 22000:
* @tfd_offset 0-12 - tx command byte count
* 12-16 - station index
* For 22000 and on:
* For 22000:
* @tfd_offset 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
......@@ -762,4 +765,15 @@ struct iwlagn_scd_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
} __packed;
/**
* struct iwl_gen3_bc_tbl scheduler byte count table gen3
* For 22560 and on:
* @tfd_offset: 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
struct iwl_gen3_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
} __packed;
#endif /* !__iwl_fh_h__ */
......@@ -184,18 +184,18 @@ struct iwl_dma_ptr {
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
* @index -- current index
*/
static inline int iwl_queue_inc_wrap(int index)
static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
{
return ++index & (TFD_QUEUE_SIZE_MAX - 1);
return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
}
/**
* iwl_queue_dec_wrap - decrement queue index, wrap back to end
* @index -- current index
*/
static inline int iwl_queue_dec_wrap(int index)
static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
{
return --index & (TFD_QUEUE_SIZE_MAX - 1);
return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
}
struct iwl_cmd_meta {
......@@ -749,7 +749,7 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
}
}
static inline u8 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
......@@ -894,7 +894,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
bool was_in_rfkill);
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
int iwl_queue_space(const struct iwl_txq *q);
int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
......
......@@ -2259,9 +2259,9 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
jiffies_to_msecs(txq->wd_timeout),
txq->read_ptr, txq->write_ptr,
iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
(TFD_QUEUE_SIZE_MAX - 1),
(trans->cfg->base_params->max_tfd_queue_size - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
(TFD_QUEUE_SIZE_MAX - 1),
(trans->cfg->base_params->max_tfd_queue_size - 1),
iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
}
......@@ -3080,7 +3080,7 @@ static struct iwl_trans_dump_data
txcmd = (void *)((u8 *)txcmd->data + caplen);
}
ptr = iwl_queue_dec_wrap(ptr);
ptr = iwl_queue_dec_wrap(trans, ptr);
}
spin_unlock_bh(&cmdq->lock);
......
......@@ -91,6 +91,8 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
u8 filled_tfd_size, num_fetch_chunks;
u16 len = byte_cnt;
......@@ -115,7 +117,10 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
scd_bc_tbl->tfd_offset[idx] = bc_ent;
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
else
scd_bc_tbl->tfd_offset[idx] = bc_ent;
}
/*
......@@ -492,11 +497,11 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
if (iwl_queue_space(txq) < txq->high_mark) {
if (iwl_queue_space(trans, txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(txq) < 3)) {
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
......@@ -542,7 +547,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Tell device the write index *just past* this latest filled TFD */
txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
/*
* At this point the frame is "transmitted" successfully
......@@ -654,7 +659,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
......@@ -791,7 +796,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
iwl_trans_ref(trans);
}
/* Increment and update queue's write index */
txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
......@@ -958,7 +963,7 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_gen2_free_tfd(trans, txq);
txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
......@@ -1066,6 +1071,9 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
if (!txq)
return -ENOMEM;
ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
(trans->cfg->device_family >=
IWL_DEVICE_FAMILY_22560) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl));
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
......@@ -1117,7 +1125,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
txq->id = qid;
trans_pcie->txq[qid] = txq;
wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1);
wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);
/* Place first TFD at index corresponding to start sequence number */
txq->read_ptr = wr_ptr;
......
......@@ -71,27 +71,28 @@
*
***************************************************/
int iwl_queue_space(const struct iwl_txq *q)
int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
{
unsigned int max;
unsigned int used;
/*
* To avoid ambiguity between empty and completely full queues, there
* should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
* If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
* should always be less than max_tfd_queue_size elements in the queue.
* If q->n_window is smaller than max_tfd_queue_size, there is no need
* to reserve any queue entries for this purpose.
*/
if (q->n_window < TFD_QUEUE_SIZE_MAX)
if (q->n_window < trans->cfg->base_params->max_tfd_queue_size)
max = q->n_window;
else
max = TFD_QUEUE_SIZE_MAX - 1;
max = trans->cfg->base_params->max_tfd_queue_size - 1;
/*
* TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
* modulo by TFD_QUEUE_SIZE_MAX and is well defined.
* max_tfd_queue_size is a power of 2, so the following is equivalent to
* modulo by max_tfd_queue_size and is well defined.
*/
used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
used = (q->write_ptr - q->read_ptr) &
(trans->cfg->base_params->max_tfd_queue_size - 1);
if (WARN_ON(used > max))
return 0;
......@@ -489,7 +490,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
size_t tfd_sz = trans_pcie->tfd_size *
trans->cfg->base_params->max_tfd_queue_size;
size_t tb0_buf_sz;
int i;
......@@ -555,12 +557,16 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
int ret;
u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size;
txq->need_update = false;
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
/* max_tfd_queue_size must be power-of-two size, otherwise
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
"Max tfd queue size must be a power of two, but is %d",
tfd_queue_max_size))
return -EINVAL;
/* Initialize queue's high/low-water marks, and head/tail indexes */
ret = iwl_queue_init(txq, slots_num);
......@@ -637,7 +643,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
......@@ -696,7 +702,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
trans_pcie->tfd_size *
trans->cfg->base_params->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
txq->tfds = NULL;
......@@ -916,9 +923,11 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
int ret;
int txq_id, slots_num;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u16 bc_tbls_size = trans->cfg->base_params->num_of_queues;
u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
sizeof(struct iwlagn_scd_bc_tbl);
bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl);
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
......@@ -928,7 +937,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
}
ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
scd_bc_tbls_size);
bc_tbls_size);
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
goto error;
......@@ -1064,7 +1073,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id];
int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
int last_to_free;
/* This function is not meant to release cmd queue*/
......@@ -1079,7 +1089,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
}
if (txq->read_ptr == tfd_num)
if (read_ptr == tfd_num)
goto out;
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
......@@ -1087,12 +1097,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
last_to_free = iwl_queue_dec_wrap(tfd_num);
last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
if (!iwl_queue_used(txq, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
__func__, txq_id, last_to_free,
trans->cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
goto out;
}
......@@ -1101,10 +1112,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
for (;
txq->read_ptr != tfd_num;
txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
struct sk_buff *skb = txq->entries[idx].skb;
read_ptr != tfd_num;
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
struct sk_buff *skb = txq->entries[read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
......@@ -1113,7 +1124,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
__skb_queue_tail(skbs, skb);
txq->entries[idx].skb = NULL;
txq->entries[read_ptr].skb = NULL;
if (!trans->cfg->use_tfh)
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
......@@ -1123,7 +1134,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
iwl_pcie_txq_progress(txq);
if (iwl_queue_space(txq) > txq->low_mark &&
if (iwl_queue_space(trans, txq) > txq->low_mark &&
test_bit(txq_id, trans_pcie->queue_stopped)) {
struct sk_buff_head overflow_skbs;
......@@ -1155,7 +1166,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
}
spin_lock_bh(&txq->lock);
if (iwl_queue_space(txq) > txq->low_mark)
if (iwl_queue_space(trans, txq) > txq->low_mark)
iwl_wake_queue(trans, txq);
}
......@@ -1232,17 +1243,19 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
idx = iwl_pcie_get_cmd_index(txq, idx);
r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) {
if (idx >= trans->cfg->base_params->max_tfd_queue_size ||
(!iwl_queue_used(txq, idx))) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
__func__, txq_id, idx,
trans->cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
return;
}
for (idx = iwl_queue_inc_wrap(idx); r != idx;
r = iwl_queue_inc_wrap(r)) {
txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
r = iwl_queue_inc_wrap(trans, r)) {
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
......@@ -1560,7 +1573,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
......@@ -1716,7 +1729,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
/* Increment and update queue's write index */
txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
......@@ -2316,11 +2329,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
if (iwl_queue_space(txq) < txq->high_mark) {
if (iwl_queue_space(trans, txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(txq) < 3)) {
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
......@@ -2449,7 +2462,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Tell device the write index *just past* this latest filled TFD */
txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
if (!wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment