Commit 4f4822b7 authored by Mordechay Goodstein's avatar Mordechay Goodstein Committed by Luca Coelho

iwlwifi: move txq-specific from trans_pcie to common trans

We don't want to have txq code in the PCIe transport code, so move all
the relevant elements to a new iwl_txq structure and store it in
iwl_trans.

spatch

@ replace_pcie @
struct iwl_trans_pcie *trans_pcie;
@@

(
-trans_pcie->queue_stopped
+trans->txqs.queue_stopped
|
-trans_pcie->queue_used
+trans->txqs.queue_used
|
-trans_pcie->txq
+trans->txqs.txq
|
-trans_pcie->txq
+trans->txqs.txq
|
-trans_pcie->cmd_queue
+trans->txqs.cmd.q_id
|
-trans_pcie->cmd_fifo
+trans->txqs.cmd.fifo
|
-trans_pcie->cmd_q_wdg_timeout
+trans->txqs.cmd.wdg_timeout
)

// clean all new unused variables
@ depends on replace_pcie @
type T;
identifier i;
expression E;
@@
- T i = E;
 ... when != i
Signed-off-by: default avatarMordechay Goodstein <mordechay.goodstein@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
Link: https://lore.kernel.org/r/iwlwifi.20200529092401.a428d3c9d66f.Ie04ae55f33954636a39c98e7ae1e739c0507435b@changeid
parent 4807e736
...@@ -902,6 +902,25 @@ struct iwl_txq { ...@@ -902,6 +902,25 @@ struct iwl_txq {
bool overflow_tx; bool overflow_tx;
}; };
/**
* struct iwl_trans_txqs - transport tx queues data
*
* @queue_used - bit mask of used queues
* @queue_stopped - bit mask of stopped queues
*/
struct iwl_trans_txqs {
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
struct {
u8 fifo;
u8 q_id;
unsigned int wdg_timeout;
} cmd;
};
/** /**
* struct iwl_trans - transport common data * struct iwl_trans - transport common data
* *
...@@ -935,6 +954,7 @@ struct iwl_txq { ...@@ -935,6 +954,7 @@ struct iwl_txq {
* @system_pm_mode: the system-wide power management mode in use. * @system_pm_mode: the system-wide power management mode in use.
* This mode is set dynamically, depending on the WoWLAN values * This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime. * configured from the userspace at runtime.
* @iwl_trans_txqs: transport tx queues data.
*/ */
struct iwl_trans { struct iwl_trans {
const struct iwl_trans_ops *ops; const struct iwl_trans_ops *ops;
...@@ -982,6 +1002,7 @@ struct iwl_trans { ...@@ -982,6 +1002,7 @@ struct iwl_trans {
enum iwl_plat_pm_mode system_pm_mode; enum iwl_plat_pm_mode system_pm_mode;
const char *name; const char *name;
struct iwl_trans_txqs txqs;
/* pointer to trans specific struct */ /* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */ /*Ensure that this pointer will always be aligned to sizeof pointer */
......
...@@ -221,7 +221,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, ...@@ -221,7 +221,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->tr_idx_arr_size = ctxt_info_gen3->tr_idx_arr_size =
cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS); cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
ctxt_info_gen3->mtr_base_addr = ctxt_info_gen3->mtr_base_addr =
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
ctxt_info_gen3->mcr_base_addr = ctxt_info_gen3->mcr_base_addr =
cpu_to_le64(trans_pcie->rxq->used_bd_dma); cpu_to_le64(trans_pcie->rxq->used_bd_dma);
ctxt_info_gen3->mtr_size = ctxt_info_gen3->mtr_size =
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation * Copyright(c) 2018 - 2020 Intel Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -263,7 +263,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, ...@@ -263,7 +263,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* initialize TX command queue */ /* initialize TX command queue */
ctxt_info->hcmd_cfg.cmd_queue_addr = ctxt_info->hcmd_cfg.cmd_queue_addr =
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size = ctxt_info->hcmd_cfg.cmd_queue_size =
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE); TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
......
...@@ -454,9 +454,6 @@ struct iwl_trans_pcie { ...@@ -454,9 +454,6 @@ struct iwl_trans_pcie {
struct dma_pool *bc_pool; struct dma_pool *bc_pool;
struct iwl_txq *txq_memory; struct iwl_txq *txq_memory;
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
/* PCI bus related data */ /* PCI bus related data */
struct pci_dev *pci_dev; struct pci_dev *pci_dev;
...@@ -470,10 +467,7 @@ struct iwl_trans_pcie { ...@@ -470,10 +467,7 @@ struct iwl_trans_pcie {
u8 page_offs, dev_cmd_offs; u8 page_offs, dev_cmd_offs;
u8 cmd_queue;
u8 def_rx_queue; u8 def_rx_queue;
u8 cmd_fifo;
unsigned int cmd_q_wdg_timeout;
u8 n_no_reclaim_cmds; u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u8 max_tbs; u8 max_tbs;
...@@ -876,9 +870,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); ...@@ -876,9 +870,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
static inline void iwl_wake_queue(struct iwl_trans *trans, static inline void iwl_wake_queue(struct iwl_trans *trans,
struct iwl_txq *txq) struct iwl_txq *txq)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
iwl_op_mode_queue_not_full(trans->op_mode, txq->id); iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
} }
...@@ -887,9 +879,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans, ...@@ -887,9 +879,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
static inline void iwl_stop_queue(struct iwl_trans *trans, static inline void iwl_stop_queue(struct iwl_trans *trans,
struct iwl_txq *txq) struct iwl_txq *txq)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
iwl_op_mode_queue_full(trans->op_mode, txq->id); iwl_op_mode_queue_full(trans->op_mode, txq->id);
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
} else } else
......
...@@ -1284,7 +1284,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -1284,7 +1284,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
int i) int i)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
bool page_stolen = false; bool page_stolen = false;
int max_len = trans_pcie->rx_buf_bytes; int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0; u32 offset = 0;
...@@ -1671,9 +1671,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) ...@@ -1671,9 +1671,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
} }
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
if (!trans_pcie->txq[i]) if (!trans->txqs.txq[i])
continue; continue;
del_timer(&trans_pcie->txq[i]->stuck_timer); del_timer(&trans->txqs.txq[i]->stuck_timer);
} }
/* The STATUS_FW_ERROR bit is set in this function. This must happen /* The STATUS_FW_ERROR bit is set in this function. This must happen
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation * Copyright(c) 2018 - 2020 Intel Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -245,7 +245,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) ...@@ -245,7 +245,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return -ENOMEM; return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */ /* Allocate or reset and init all Tx and Command queues */
if (iwl_pcie_gen2_tx_init(trans, trans_pcie->cmd_queue, queue_size)) if (iwl_pcie_gen2_tx_init(trans, trans->txqs.cmd.q_id, queue_size))
return -ENOMEM; return -ENOMEM;
/* enable shadow regs in HW */ /* enable shadow regs in HW */
...@@ -262,8 +262,9 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr) ...@@ -262,8 +262,9 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
iwl_pcie_reset_ict(trans); iwl_pcie_reset_ict(trans);
/* make sure all queue are not stopped/used */ /* make sure all queue are not stopped/used */
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); memset(trans->txqs.queue_stopped, 0,
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); sizeof(trans->txqs.queue_stopped));
memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* now that we got alive we can free the fw image & the context info. /* now that we got alive we can free the fw image & the context info.
* paging memory cannot be freed included since FW will still use it * paging memory cannot be freed included since FW will still use it
......
...@@ -1904,9 +1904,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, ...@@ -1904,9 +1904,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->cmd_queue = trans_cfg->cmd_queue; trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
trans_pcie->cmd_fifo = trans_cfg->cmd_fifo; trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout; trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0; trans_pcie->n_no_reclaim_cmds = 0;
else else
...@@ -2199,11 +2199,10 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, ...@@ -2199,11 +2199,10 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
unsigned long txqs, unsigned long txqs,
bool freeze) bool freeze)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int queue; int queue;
for_each_set_bit(queue, &txqs, BITS_PER_LONG) { for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
struct iwl_txq *txq = trans_pcie->txq[queue]; struct iwl_txq *txq = trans->txqs.txq[queue];
unsigned long now; unsigned long now;
spin_lock_bh(&txq->lock); spin_lock_bh(&txq->lock);
...@@ -2251,13 +2250,12 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, ...@@ -2251,13 +2250,12 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i; int i;
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
struct iwl_txq *txq = trans_pcie->txq[i]; struct iwl_txq *txq = trans->txqs.txq[i];
if (i == trans_pcie->cmd_queue) if (i == trans->txqs.cmd.q_id)
continue; continue;
spin_lock_bh(&txq->lock); spin_lock_bh(&txq->lock);
...@@ -2326,7 +2324,6 @@ static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, ...@@ -2326,7 +2324,6 @@ static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq; struct iwl_txq *txq;
unsigned long now = jiffies; unsigned long now = jiffies;
bool overflow_tx; bool overflow_tx;
...@@ -2336,11 +2333,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) ...@@ -2336,11 +2333,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
if (test_bit(STATUS_TRANS_DEAD, &trans->status)) if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return -ENODEV; return -ENODEV;
if (!test_bit(txq_idx, trans_pcie->queue_used)) if (!test_bit(txq_idx, trans->txqs.queue_used))
return -EINVAL; return -EINVAL;
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
txq = trans_pcie->txq[txq_idx]; txq = trans->txqs.txq[txq_idx];
spin_lock_bh(&txq->lock); spin_lock_bh(&txq->lock);
overflow_tx = txq->overflow_tx || overflow_tx = txq->overflow_tx ||
...@@ -2388,7 +2385,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) ...@@ -2388,7 +2385,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cnt; int cnt;
int ret = 0; int ret = 0;
...@@ -2397,9 +2393,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) ...@@ -2397,9 +2393,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
cnt < trans->trans_cfg->base_params->num_of_queues; cnt < trans->trans_cfg->base_params->num_of_queues;
cnt++) { cnt++) {
if (cnt == trans_pcie->cmd_queue) if (cnt == trans->txqs.cmd.q_id)
continue; continue;
if (!test_bit(cnt, trans_pcie->queue_used)) if (!test_bit(cnt, trans->txqs.queue_used))
continue; continue;
if (!(BIT(cnt) & txq_bm)) if (!(BIT(cnt) & txq_bm))
continue; continue;
...@@ -2573,13 +2569,12 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) ...@@ -2573,13 +2569,12 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
struct iwl_dbgfs_tx_queue_priv *priv = seq->private; struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
struct iwl_dbgfs_tx_queue_state *state = v; struct iwl_dbgfs_tx_queue_state *state = v;
struct iwl_trans *trans = priv->trans; struct iwl_trans *trans = priv->trans;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans->txqs.txq[state->pos];
struct iwl_txq *txq = trans_pcie->txq[state->pos];
seq_printf(seq, "hwq %.3u: used=%d stopped=%d ", seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
(unsigned int)state->pos, (unsigned int)state->pos,
!!test_bit(state->pos, trans_pcie->queue_used), !!test_bit(state->pos, trans->txqs.queue_used),
!!test_bit(state->pos, trans_pcie->queue_stopped)); !!test_bit(state->pos, trans->txqs.queue_stopped));
if (txq) if (txq)
seq_printf(seq, seq_printf(seq,
"read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d", "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
...@@ -2589,7 +2584,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) ...@@ -2589,7 +2584,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
else else
seq_puts(seq, "(unallocated)"); seq_puts(seq, "(unallocated)");
if (state->pos == trans_pcie->cmd_queue) if (state->pos == trans->txqs.cmd.q_id)
seq_puts(seq, " (HCMD)"); seq_puts(seq, " (HCMD)");
seq_puts(seq, "\n"); seq_puts(seq, "\n");
...@@ -3265,7 +3260,7 @@ static struct iwl_trans_dump_data ...@@ -3265,7 +3260,7 @@ static struct iwl_trans_dump_data
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data; struct iwl_fw_error_dump_data *data;
struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data; struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0, monitor_len = 0; u32 len, num_rbs = 0, monitor_len = 0;
......
...@@ -64,7 +64,6 @@ ...@@ -64,7 +64,6 @@
*/ */
void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans) void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int txq_id; int txq_id;
/* /*
...@@ -72,12 +71,13 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans) ...@@ -72,12 +71,13 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
* queues. This happens when we have an rfkill interrupt. * queues. This happens when we have an rfkill interrupt.
* Since we stop Tx altogether - mark the queues as stopped. * Since we stop Tx altogether - mark the queues as stopped.
*/ */
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); memset(trans->txqs.queue_stopped, 0,
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); sizeof(trans->txqs.queue_stopped));
memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* Unmap DMA from host system and free skb's */ /* Unmap DMA from host system and free skb's */
for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) { for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
if (!trans_pcie->txq[txq_id]) if (!trans->txqs.txq[txq_id])
continue; continue;
iwl_pcie_gen2_txq_unmap(trans, txq_id); iwl_pcie_gen2_txq_unmap(trans, txq_id);
} }
...@@ -716,7 +716,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -716,7 +716,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_cmd_meta *out_meta; struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq = trans_pcie->txq[txq_id]; struct iwl_txq *txq = trans->txqs.txq[txq_id];
u16 cmd_len; u16 cmd_len;
int idx; int idx;
void *tfd; void *tfd;
...@@ -725,7 +725,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -725,7 +725,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
"queue %d out of range", txq_id)) "queue %d out of range", txq_id))
return -EINVAL; return -EINVAL;
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
"TX on unused queue %d\n", txq_id)) "TX on unused queue %d\n", txq_id))
return -EINVAL; return -EINVAL;
...@@ -819,7 +819,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, ...@@ -819,7 +819,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd) struct iwl_host_cmd *cmd)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd; struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta; struct iwl_cmd_meta *out_meta;
unsigned long flags; unsigned long flags;
...@@ -931,7 +931,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, ...@@ -931,7 +931,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence = out_cmd->hdr_wide.sequence =
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr)); INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide); cmd_pos = sizeof(struct iwl_cmd_header_wide);
...@@ -979,7 +979,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, ...@@ -979,7 +979,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
"Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
iwl_get_cmd_string(trans, cmd->id), group_id, iwl_get_cmd_string(trans, cmd->id), group_id,
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
/* start the TFD with the minimum copy bytes */ /* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
...@@ -1056,7 +1056,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, ...@@ -1056,7 +1056,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
int cmd_idx; int cmd_idx;
int ret; int ret;
...@@ -1175,14 +1175,14 @@ int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, ...@@ -1175,14 +1175,14 @@ int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id]; struct iwl_txq *txq = trans->txqs.txq[txq_id];
spin_lock_bh(&txq->lock); spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) { while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr); txq_id, txq->read_ptr);
if (txq_id != trans_pcie->cmd_queue) { if (txq_id != trans->txqs.cmd.q_id) {
int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
struct sk_buff *skb = txq->entries[idx].skb; struct sk_buff *skb = txq->entries[idx].skb;
...@@ -1240,7 +1240,6 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, ...@@ -1240,7 +1240,6 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
*/ */
static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq; struct iwl_txq *txq;
int i; int i;
...@@ -1248,7 +1247,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) ...@@ -1248,7 +1247,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
"queue %d out of range", txq_id)) "queue %d out of range", txq_id))
return; return;
txq = trans_pcie->txq[txq_id]; txq = trans->txqs.txq[txq_id];
if (WARN_ON(!txq)) if (WARN_ON(!txq))
return; return;
...@@ -1256,7 +1255,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) ...@@ -1256,7 +1255,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_gen2_txq_unmap(trans, txq_id); iwl_pcie_gen2_txq_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */ /* De-alloc array of command/tx buffers */
if (txq_id == trans_pcie->cmd_queue) if (txq_id == trans->txqs.cmd.q_id)
for (i = 0; i < txq->n_window; i++) { for (i = 0; i < txq->n_window; i++) {
kzfree(txq->entries[i].cmd); kzfree(txq->entries[i].cmd);
kzfree(txq->entries[i].free_buf); kzfree(txq->entries[i].free_buf);
...@@ -1265,9 +1264,9 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) ...@@ -1265,9 +1264,9 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_gen2_txq_free_memory(trans, txq); iwl_pcie_gen2_txq_free_memory(trans, txq);
trans_pcie->txq[txq_id] = NULL; trans->txqs.txq[txq_id] = NULL;
clear_bit(txq_id, trans_pcie->queue_used); clear_bit(txq_id, trans->txqs.queue_used);
} }
int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans, int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
...@@ -1327,7 +1326,6 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, ...@@ -1327,7 +1326,6 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
struct iwl_txq *txq, struct iwl_txq *txq,
struct iwl_host_cmd *hcmd) struct iwl_host_cmd *hcmd)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue_cfg_rsp *rsp; struct iwl_tx_queue_cfg_rsp *rsp;
int ret, qid; int ret, qid;
u32 wr_ptr; u32 wr_ptr;
...@@ -1342,20 +1340,20 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, ...@@ -1342,20 +1340,20 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
qid = le16_to_cpu(rsp->queue_number); qid = le16_to_cpu(rsp->queue_number);
wr_ptr = le16_to_cpu(rsp->write_pointer); wr_ptr = le16_to_cpu(rsp->write_pointer);
if (qid >= ARRAY_SIZE(trans_pcie->txq)) { if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
WARN_ONCE(1, "queue index %d unsupported", qid); WARN_ONCE(1, "queue index %d unsupported", qid);
ret = -EIO; ret = -EIO;
goto error_free_resp; goto error_free_resp;
} }
if (test_and_set_bit(qid, trans_pcie->queue_used)) { if (test_and_set_bit(qid, trans->txqs.queue_used)) {
WARN_ONCE(1, "queue %d already used", qid); WARN_ONCE(1, "queue %d already used", qid);
ret = -EIO; ret = -EIO;
goto error_free_resp; goto error_free_resp;
} }
txq->id = qid; txq->id = qid;
trans_pcie->txq[qid] = txq; trans->txqs.txq[qid] = txq;
wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
/* Place first TFD at index corresponding to start sequence number */ /* Place first TFD at index corresponding to start sequence number */
...@@ -1413,8 +1411,6 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, ...@@ -1413,8 +1411,6 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (WARN(queue >= IWL_MAX_TVQM_QUEUES, if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
"queue %d out of range", queue)) "queue %d out of range", queue))
return; return;
...@@ -1425,7 +1421,7 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) ...@@ -1425,7 +1421,7 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
* allow the op_mode to call txq_disable after it already called * allow the op_mode to call txq_disable after it already called
* stop_device. * stop_device.
*/ */
if (!test_and_clear_bit(queue, trans_pcie->queue_used)) { if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
"queue %d not used", queue); "queue %d not used", queue);
return; return;
...@@ -1433,22 +1429,21 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) ...@@ -1433,22 +1429,21 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
iwl_pcie_gen2_txq_unmap(trans, queue); iwl_pcie_gen2_txq_unmap(trans, queue);
iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]); iwl_pcie_gen2_txq_free_memory(trans, trans->txqs.txq[queue]);
trans_pcie->txq[queue] = NULL; trans->txqs.txq[queue] = NULL;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
} }
void iwl_pcie_gen2_tx_free(struct iwl_trans *trans) void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i; int i;
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* Free all TX queues */ /* Free all TX queues */
for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) { for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
if (!trans_pcie->txq[i]) if (!trans->txqs.txq[i])
continue; continue;
iwl_pcie_gen2_txq_free(trans, i); iwl_pcie_gen2_txq_free(trans, i);
...@@ -1457,35 +1452,34 @@ void iwl_pcie_gen2_tx_free(struct iwl_trans *trans) ...@@ -1457,35 +1452,34 @@ void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size) int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *queue; struct iwl_txq *queue;
int ret; int ret;
/* alloc and init the tx queue */ /* alloc and init the tx queue */
if (!trans_pcie->txq[txq_id]) { if (!trans->txqs.txq[txq_id]) {
queue = kzalloc(sizeof(*queue), GFP_KERNEL); queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue) { if (!queue) {
IWL_ERR(trans, "Not enough memory for tx queue\n"); IWL_ERR(trans, "Not enough memory for tx queue\n");
return -ENOMEM; return -ENOMEM;
} }
trans_pcie->txq[txq_id] = queue; trans->txqs.txq[txq_id] = queue;
ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true); ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
if (ret) { if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error; goto error;
} }
} else { } else {
queue = trans_pcie->txq[txq_id]; queue = trans->txqs.txq[txq_id];
} }
ret = iwl_pcie_txq_init(trans, queue, queue_size, ret = iwl_pcie_txq_init(trans, queue, queue_size,
(txq_id == trans_pcie->cmd_queue)); (txq_id == trans->txqs.cmd.q_id));
if (ret) { if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error; goto error;
} }
trans_pcie->txq[txq_id]->id = txq_id; trans->txqs.txq[txq_id]->id = txq_id;
set_bit(txq_id, trans_pcie->queue_used); set_bit(txq_id, trans->txqs.queue_used);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment