Commit 22852fad authored by Mordechay Goodstein's avatar Mordechay Goodstein Committed by Luca Coelho

iwl-trans: move dev_cmd_offs, page_offs to a common trans header

dev_cmd_offs, page_offs field is not directly related to the PCIe
transport, so move the structures it uses to the common iwl-trans.h header.
Signed-off-by: default avatarMordechay Goodstein <mordechay.goodstein@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
Link: https://lore.kernel.org/r/iwlwifi.20200926002540.83b41765961f.Icd12bfb2a736ccf4cbe080973c746fb70a3c4a50@changeidSigned-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent e27c506a
...@@ -914,6 +914,8 @@ struct iwl_txq { ...@@ -914,6 +914,8 @@ struct iwl_txq {
* struct iwl_trans_txqs - transport tx queues data * struct iwl_trans_txqs - transport tx queues data
* *
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @page_offs: offset from skb->cb to mac header page pointer
* @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
* @queue_used - bit mask of used queues * @queue_used - bit mask of used queues
* @queue_stopped - bit mask of stopped queues * @queue_stopped - bit mask of stopped queues
*/ */
...@@ -924,6 +926,8 @@ struct iwl_trans_txqs { ...@@ -924,6 +926,8 @@ struct iwl_trans_txqs {
struct dma_pool *bc_pool; struct dma_pool *bc_pool;
size_t bc_tbl_size; size_t bc_tbl_size;
bool bc_table_dword; bool bc_table_dword;
u8 page_offs;
u8 dev_cmd_offs;
struct { struct {
u8 fifo; u8 fifo;
......
...@@ -457,8 +457,6 @@ struct iwl_trans_pcie { ...@@ -457,8 +457,6 @@ struct iwl_trans_pcie {
wait_queue_head_t wait_command_queue; wait_queue_head_t wait_command_queue;
wait_queue_head_t sx_waitq; wait_queue_head_t sx_waitq;
u8 page_offs, dev_cmd_offs;
u8 def_rx_queue; u8 def_rx_queue;
u8 n_no_reclaim_cmds; u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
...@@ -962,7 +960,7 @@ int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, ...@@ -962,7 +960,7 @@ int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
struct iwl_dma_ptr *ptr, size_t size); struct iwl_dma_ptr *ptr, size_t size);
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
void iwl_pcie_apply_destination(struct iwl_trans *trans); void iwl_pcie_apply_destination(struct iwl_trans *trans);
void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, void iwl_pcie_free_tso_page(struct iwl_trans *trans,
struct sk_buff *skb); struct sk_buff *skb);
#ifdef CONFIG_INET #ifdef CONFIG_INET
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
......
...@@ -1911,6 +1911,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, ...@@ -1911,6 +1911,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans->txqs.cmd.q_id = trans_cfg->cmd_queue; trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
trans->txqs.page_offs = trans_cfg->cb_data_offs;
trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0; trans_pcie->n_no_reclaim_cmds = 0;
else else
...@@ -1932,9 +1935,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, ...@@ -1932,9 +1935,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->scd_set_active = trans_cfg->scd_set_active; trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx; trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
trans_pcie->page_offs = trans_cfg->cb_data_offs;
trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
trans->command_groups = trans_cfg->command_groups; trans->command_groups = trans_cfg->command_groups;
trans->command_groups_size = trans_cfg->command_groups_size; trans->command_groups_size = trans_cfg->command_groups_size;
......
...@@ -253,11 +253,10 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, ...@@ -253,11 +253,10 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
static struct page *get_workaround_page(struct iwl_trans *trans, static struct page *get_workaround_page(struct iwl_trans *trans,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct page **page_ptr; struct page **page_ptr;
struct page *ret; struct page *ret;
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
ret = alloc_page(GFP_ATOMIC); ret = alloc_page(GFP_ATOMIC);
if (!ret) if (!ret)
...@@ -711,7 +710,6 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, ...@@ -711,7 +710,6 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id) struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_cmd_meta *out_meta; struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq = trans->txqs.txq[txq_id]; struct iwl_txq *txq = trans->txqs.txq[txq_id];
u16 cmd_len; u16 cmd_len;
...@@ -741,7 +739,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -741,7 +739,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd **dev_cmd_ptr; struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb + dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs); trans->txqs.dev_cmd_offs);
*dev_cmd_ptr = dev_cmd; *dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb); __skb_queue_tail(&txq->overflow_q, skb);
...@@ -1171,7 +1169,6 @@ int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, ...@@ -1171,7 +1169,6 @@ int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
*/ */
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id]; struct iwl_txq *txq = trans->txqs.txq[txq_id];
spin_lock_bh(&txq->lock); spin_lock_bh(&txq->lock);
...@@ -1186,7 +1183,7 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) ...@@ -1186,7 +1183,7 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
if (WARN_ON_ONCE(!skb)) if (WARN_ON_ONCE(!skb))
continue; continue;
iwl_pcie_free_tso_page(trans_pcie, skb); iwl_pcie_free_tso_page(trans, skb);
} }
iwl_pcie_gen2_free_tfd(trans, txq); iwl_pcie_gen2_free_tfd(trans, txq);
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
......
...@@ -614,13 +614,13 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, ...@@ -614,13 +614,13 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
return 0; return 0;
} }
void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, void iwl_pcie_free_tso_page(struct iwl_trans *trans,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct page **page_ptr; struct page **page_ptr;
struct page *next; struct page *next;
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
next = *page_ptr; next = *page_ptr;
*page_ptr = NULL; *page_ptr = NULL;
...@@ -668,7 +668,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) ...@@ -668,7 +668,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
if (WARN_ON_ONCE(!skb)) if (WARN_ON_ONCE(!skb))
continue; continue;
iwl_pcie_free_tso_page(trans_pcie, skb); iwl_pcie_free_tso_page(trans, skb);
} }
iwl_pcie_txq_free_tfd(trans, txq); iwl_pcie_txq_free_tfd(trans, txq);
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
...@@ -1107,7 +1107,6 @@ static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) ...@@ -1107,7 +1107,6 @@ static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs) struct sk_buff_head *skbs)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id]; struct iwl_txq *txq = trans->txqs.txq[txq_id];
int tfd_num = iwl_pcie_get_cmd_index(txq, ssn); int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr); int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
...@@ -1156,7 +1155,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, ...@@ -1156,7 +1155,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (WARN_ON_ONCE(!skb)) if (WARN_ON_ONCE(!skb))
continue; continue;
iwl_pcie_free_tso_page(trans_pcie, skb); iwl_pcie_free_tso_page(trans, skb);
__skb_queue_tail(skbs, skb); __skb_queue_tail(skbs, skb);
...@@ -1200,7 +1199,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, ...@@ -1200,7 +1199,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct iwl_device_tx_cmd *dev_cmd_ptr; struct iwl_device_tx_cmd *dev_cmd_ptr;
dev_cmd_ptr = *(void **)((u8 *)skb->cb + dev_cmd_ptr = *(void **)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs); trans->txqs.dev_cmd_offs);
/* /*
* Note that we can very well be overflowing again. * Note that we can very well be overflowing again.
...@@ -2058,7 +2057,7 @@ struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, ...@@ -2058,7 +2057,7 @@ struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
struct page **page_ptr; struct page **page_ptr;
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
if (WARN_ON(*page_ptr)) if (WARN_ON(*page_ptr))
return NULL; return NULL;
...@@ -2369,7 +2368,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -2369,7 +2368,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd **dev_cmd_ptr; struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb + dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs); trans->txqs.dev_cmd_offs);
*dev_cmd_ptr = dev_cmd; *dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb); __skb_queue_tail(&txq->overflow_q, skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment