Commit 5d19e208 authored by Johannes Berg's avatar Johannes Berg Committed by Luca Coelho

iwlwifi: pcie: adjust to Bz completion descriptor

The Bz devices got a new completion descriptor again since
we only ever really used 4 out of 32 bytes anyway. Adjust
the code to deal with that. Note that the intention was to
reduce the size, but the hardware was implemented wrongly.

While at it, do some cleanups and remove the union to simplify
the code, clean up iwl_pcie_free_bd_size() to no longer need
an argument and add iwl_pcie_used_bd_size() with the logic to
selct completion descriptor size.
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
Link: https://lore.kernel.org/r/iwlwifi.20220204122220.bef461a04110.I90c8885550fa54eb0aaa4363d322f50e301175a6@changeidSigned-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent 9966904e
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* /*
* Copyright (C) 2003-2015, 2018-2021 Intel Corporation * Copyright (C) 2003-2015, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH
*/ */
...@@ -103,6 +103,18 @@ struct iwl_rx_completion_desc { ...@@ -103,6 +103,18 @@ struct iwl_rx_completion_desc {
u8 reserved2[25]; u8 reserved2[25];
} __packed; } __packed;
/**
* struct iwl_rx_completion_desc_bz - Bz completion descriptor
* @rbid: unique tag of the received buffer
* @flags: flags (0: fragmented, all others: reserved)
* @reserved: reserved
*/
struct iwl_rx_completion_desc_bz {
__le16 rbid;
u8 flags;
u8 reserved[1];
} __packed;
/** /**
* struct iwl_rxq - Rx queue * struct iwl_rxq - Rx queue
* @id: queue index * @id: queue index
...@@ -133,11 +145,7 @@ struct iwl_rxq { ...@@ -133,11 +145,7 @@ struct iwl_rxq {
int id; int id;
void *bd; void *bd;
dma_addr_t bd_dma; dma_addr_t bd_dma;
union { void *used_bd;
void *used_bd;
__le32 *bd_32;
struct iwl_rx_completion_desc *cd;
};
dma_addr_t used_bd_dma; dma_addr_t used_bd_dma;
u32 read; u32 read;
u32 write; u32 write;
......
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* /*
* Copyright (C) 2003-2014, 2018-2021 Intel Corporation * Copyright (C) 2003-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH
*/ */
...@@ -652,23 +652,30 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data) ...@@ -652,23 +652,30 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
iwl_pcie_rx_allocator(trans_pcie->trans); iwl_pcie_rx_allocator(trans_pcie->trans);
} }
static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td) static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
{ {
struct iwl_rx_transfer_desc *rx_td; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return sizeof(struct iwl_rx_transfer_desc);
if (use_rx_td) return trans->trans_cfg->mq_rx_supported ?
return sizeof(*rx_td); sizeof(__le64) : sizeof(__le32);
else }
return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) :
sizeof(__le32); static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
{
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
return sizeof(struct iwl_rx_completion_desc_bz);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return sizeof(struct iwl_rx_completion_desc);
return sizeof(__le32);
} }
static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
struct iwl_rxq *rxq) struct iwl_rxq *rxq)
{ {
bool use_rx_td = (trans->trans_cfg->device_family >= int free_size = iwl_pcie_free_bd_size(trans);
IWL_DEVICE_FAMILY_AX210);
int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
if (rxq->bd) if (rxq->bd)
dma_free_coherent(trans->dev, dma_free_coherent(trans->dev,
...@@ -682,8 +689,8 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, ...@@ -682,8 +689,8 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
if (rxq->used_bd) if (rxq->used_bd)
dma_free_coherent(trans->dev, dma_free_coherent(trans->dev,
(use_rx_td ? sizeof(*rxq->cd) : iwl_pcie_used_bd_size(trans) *
sizeof(__le32)) * rxq->queue_size, rxq->queue_size,
rxq->used_bd, rxq->used_bd_dma); rxq->used_bd, rxq->used_bd_dma);
rxq->used_bd_dma = 0; rxq->used_bd_dma = 0;
rxq->used_bd = NULL; rxq->used_bd = NULL;
...@@ -707,7 +714,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, ...@@ -707,7 +714,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
else else
rxq->queue_size = RX_QUEUE_SIZE; rxq->queue_size = RX_QUEUE_SIZE;
free_size = iwl_pcie_free_bd_size(trans, use_rx_td); free_size = iwl_pcie_free_bd_size(trans);
/* /*
* Allocate the circular buffer of Read Buffer Descriptors * Allocate the circular buffer of Read Buffer Descriptors
...@@ -720,7 +727,8 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, ...@@ -720,7 +727,8 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
if (trans->trans_cfg->mq_rx_supported) { if (trans->trans_cfg->mq_rx_supported) {
rxq->used_bd = dma_alloc_coherent(dev, rxq->used_bd = dma_alloc_coherent(dev,
(use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, iwl_pcie_used_bd_size(trans) *
rxq->queue_size,
&rxq->used_bd_dma, &rxq->used_bd_dma,
GFP_KERNEL); GFP_KERNEL);
if (!rxq->used_bd) if (!rxq->used_bd)
...@@ -1417,6 +1425,7 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, ...@@ -1417,6 +1425,7 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
u16 vid; u16 vid;
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
if (!trans->trans_cfg->mq_rx_supported) { if (!trans->trans_cfg->mq_rx_supported) {
rxb = rxq->queue[i]; rxb = rxq->queue[i];
...@@ -1424,11 +1433,20 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, ...@@ -1424,11 +1433,20 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
return rxb; return rxb;
} }
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
vid = le16_to_cpu(rxq->cd[i].rbid); struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
*join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
vid = le16_to_cpu(cd[i].rbid);
*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
} else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_rx_completion_desc *cd = rxq->used_bd;
vid = le16_to_cpu(cd[i].rbid);
*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
} else { } else {
vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */ __le32 *cd = rxq->used_bd;
vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
} }
if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs)) if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment