Commit 66c27e3b authored by Alexander Lobakin's avatar Alexander Lobakin Committed by Tony Nguyen

idpf: stop using macros for accessing queue descriptors

In C, we have structures and unions.
Casting `void *` via macros is not only error-prone, but also looks
confusing and awful in general.
In preparation for splitting the queue structs, replace it with a
union and direct array dereferences.
Reviewed-by: default avatarPrzemek Kitszel <przemyslaw.kitszel@intel.com>
Reviewed-by: default avatarMina Almasry <almasrymina@google.com>
Signed-off-by: default avatarAlexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 62c88425
...@@ -20,7 +20,6 @@ struct idpf_vport_max_q; ...@@ -20,7 +20,6 @@ struct idpf_vport_max_q;
#include <linux/dim.h> #include <linux/dim.h>
#include "virtchnl2.h" #include "virtchnl2.h"
#include "idpf_lan_txrx.h"
#include "idpf_txrx.h" #include "idpf_txrx.h"
#include "idpf_controlq.h" #include "idpf_controlq.h"
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#ifndef _IDPF_LAN_TXRX_H_ #ifndef _IDPF_LAN_TXRX_H_
#define _IDPF_LAN_TXRX_H_ #define _IDPF_LAN_TXRX_H_
#include <linux/bits.h>
enum idpf_rss_hash { enum idpf_rss_hash {
IDPF_HASH_INVALID = 0, IDPF_HASH_INVALID = 0,
/* Values 1 - 28 are reserved for future use */ /* Values 1 - 28 are reserved for future use */
......
...@@ -205,7 +205,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, ...@@ -205,7 +205,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
data_len = skb->data_len; data_len = skb->data_len;
size = skb_headlen(skb); size = skb_headlen(skb);
tx_desc = IDPF_BASE_TX_DESC(tx_q, i); tx_desc = &tx_q->base_tx[i];
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
...@@ -239,7 +239,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, ...@@ -239,7 +239,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
i++; i++;
if (i == tx_q->desc_count) { if (i == tx_q->desc_count) {
tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); tx_desc = &tx_q->base_tx[0];
i = 0; i = 0;
} }
...@@ -259,7 +259,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, ...@@ -259,7 +259,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
i++; i++;
if (i == tx_q->desc_count) { if (i == tx_q->desc_count) {
tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); tx_desc = &tx_q->base_tx[0];
i = 0; i = 0;
} }
...@@ -307,7 +307,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq) ...@@ -307,7 +307,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf)); memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
txq->tx_buf[ntu].ctx_entry = true; txq->tx_buf[ntu].ctx_entry = true;
ctx_desc = IDPF_BASE_TX_CTX_DESC(txq, ntu); ctx_desc = &txq->base_ctx[ntu];
IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu); IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu);
txq->next_to_use = ntu; txq->next_to_use = ntu;
...@@ -455,7 +455,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget, ...@@ -455,7 +455,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
struct netdev_queue *nq; struct netdev_queue *nq;
bool dont_wake; bool dont_wake;
tx_desc = IDPF_BASE_TX_DESC(tx_q, ntc); tx_desc = &tx_q->base_tx[ntc];
tx_buf = &tx_q->tx_buf[ntc]; tx_buf = &tx_q->tx_buf[ntc];
ntc -= tx_q->desc_count; ntc -= tx_q->desc_count;
...@@ -517,7 +517,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget, ...@@ -517,7 +517,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
if (unlikely(!ntc)) { if (unlikely(!ntc)) {
ntc -= tx_q->desc_count; ntc -= tx_q->desc_count;
tx_buf = tx_q->tx_buf; tx_buf = tx_q->tx_buf;
tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); tx_desc = &tx_q->base_tx[0];
} }
/* unmap any remaining paged data */ /* unmap any remaining paged data */
...@@ -540,7 +540,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget, ...@@ -540,7 +540,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
if (unlikely(!ntc)) { if (unlikely(!ntc)) {
ntc -= tx_q->desc_count; ntc -= tx_q->desc_count;
tx_buf = tx_q->tx_buf; tx_buf = tx_q->tx_buf;
tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); tx_desc = &tx_q->base_tx[0];
} }
} while (likely(budget)); } while (likely(budget));
...@@ -895,7 +895,7 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, ...@@ -895,7 +895,7 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
if (!cleaned_count) if (!cleaned_count)
return false; return false;
desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta); desc = &rx_q->single_buf[nta];
buf = &rx_q->rx_buf.buf[nta]; buf = &rx_q->rx_buf.buf[nta];
do { do {
...@@ -915,7 +915,7 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, ...@@ -915,7 +915,7 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
buf++; buf++;
nta++; nta++;
if (unlikely(nta == rx_q->desc_count)) { if (unlikely(nta == rx_q->desc_count)) {
desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0); desc = &rx_q->single_buf[0];
buf = rx_q->rx_buf.buf; buf = rx_q->rx_buf.buf;
nta = 0; nta = 0;
} }
...@@ -1016,7 +1016,7 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget) ...@@ -1016,7 +1016,7 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
struct idpf_rx_buf *rx_buf; struct idpf_rx_buf *rx_buf;
/* get the Rx desc from Rx queue based on 'next_to_clean' */ /* get the Rx desc from Rx queue based on 'next_to_clean' */
rx_desc = IDPF_RX_DESC(rx_q, ntc); rx_desc = &rx_q->rx[ntc];
/* status_error_ptype_len will always be zero for unused /* status_error_ptype_len will always be zero for unused
* descriptors because it's cleared in cleanup, and overlaps * descriptors because it's cleared in cleanup, and overlaps
......
...@@ -531,7 +531,7 @@ static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id) ...@@ -531,7 +531,7 @@ static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id)
struct idpf_rx_buf *buf; struct idpf_rx_buf *buf;
dma_addr_t addr; dma_addr_t addr;
splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta); splitq_rx_desc = &bufq->split_buf[nta];
buf = &bufq->rx_buf.buf[buf_id]; buf = &bufq->rx_buf.buf[buf_id];
if (bufq->rx_hsplit_en) { if (bufq->rx_hsplit_en) {
...@@ -1584,7 +1584,7 @@ do { \ ...@@ -1584,7 +1584,7 @@ do { \
if (unlikely(!(ntc))) { \ if (unlikely(!(ntc))) { \
ntc -= (txq)->desc_count; \ ntc -= (txq)->desc_count; \
buf = (txq)->tx_buf; \ buf = (txq)->tx_buf; \
desc = IDPF_FLEX_TX_DESC(txq, 0); \ desc = &(txq)->flex_tx[0]; \
} else { \ } else { \
(buf)++; \ (buf)++; \
(desc)++; \ (desc)++; \
...@@ -1617,8 +1617,8 @@ static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end, ...@@ -1617,8 +1617,8 @@ static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end,
s16 ntc = tx_q->next_to_clean; s16 ntc = tx_q->next_to_clean;
struct idpf_tx_buf *tx_buf; struct idpf_tx_buf *tx_buf;
tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc); tx_desc = &tx_q->flex_tx[ntc];
next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end); next_pending_desc = &tx_q->flex_tx[end];
tx_buf = &tx_q->tx_buf[ntc]; tx_buf = &tx_q->tx_buf[ntc];
ntc -= tx_q->desc_count; ntc -= tx_q->desc_count;
...@@ -1814,7 +1814,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, ...@@ -1814,7 +1814,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
int i; int i;
complq_budget = vport->compln_clean_budget; complq_budget = vport->compln_clean_budget;
tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc); tx_desc = &complq->comp[ntc];
ntc -= complq->desc_count; ntc -= complq->desc_count;
do { do {
...@@ -1879,7 +1879,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, ...@@ -1879,7 +1879,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
ntc++; ntc++;
if (unlikely(!ntc)) { if (unlikely(!ntc)) {
ntc -= complq->desc_count; ntc -= complq->desc_count;
tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0); tx_desc = &complq->comp[0];
change_bit(__IDPF_Q_GEN_CHK, complq->flags); change_bit(__IDPF_Q_GEN_CHK, complq->flags);
} }
...@@ -2143,7 +2143,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, ...@@ -2143,7 +2143,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
* used one additional descriptor for a context * used one additional descriptor for a context
* descriptor. Reset that here. * descriptor. Reset that here.
*/ */
tx_desc = IDPF_FLEX_TX_DESC(txq, idx); tx_desc = &txq->flex_tx[idx];
memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc)); memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
if (idx == 0) if (idx == 0)
idx = txq->desc_count; idx = txq->desc_count;
...@@ -2202,7 +2202,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q, ...@@ -2202,7 +2202,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
data_len = skb->data_len; data_len = skb->data_len;
size = skb_headlen(skb); size = skb_headlen(skb);
tx_desc = IDPF_FLEX_TX_DESC(tx_q, i); tx_desc = &tx_q->flex_tx[i];
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
...@@ -2275,7 +2275,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q, ...@@ -2275,7 +2275,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
i++; i++;
if (i == tx_q->desc_count) { if (i == tx_q->desc_count) {
tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); tx_desc = &tx_q->flex_tx[0];
i = 0; i = 0;
tx_q->compl_tag_cur_gen = tx_q->compl_tag_cur_gen =
IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
...@@ -2320,7 +2320,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q, ...@@ -2320,7 +2320,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
i++; i++;
if (i == tx_q->desc_count) { if (i == tx_q->desc_count) {
tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); tx_desc = &tx_q->flex_tx[0];
i = 0; i = 0;
tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
} }
...@@ -2553,7 +2553,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq) ...@@ -2553,7 +2553,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
/* grab the next descriptor */ /* grab the next descriptor */
desc = IDPF_FLEX_TX_CTX_DESC(txq, i); desc = &txq->flex_ctx[i];
txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i); txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
return desc; return desc;
...@@ -3128,7 +3128,6 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) ...@@ -3128,7 +3128,6 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
struct idpf_sw_queue *refillq = NULL; struct idpf_sw_queue *refillq = NULL;
struct idpf_rxq_set *rxq_set = NULL; struct idpf_rxq_set *rxq_set = NULL;
struct idpf_rx_buf *rx_buf = NULL; struct idpf_rx_buf *rx_buf = NULL;
union virtchnl2_rx_desc *desc;
unsigned int pkt_len = 0; unsigned int pkt_len = 0;
unsigned int hdr_len = 0; unsigned int hdr_len = 0;
u16 gen_id, buf_id = 0; u16 gen_id, buf_id = 0;
...@@ -3138,8 +3137,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) ...@@ -3138,8 +3137,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
u8 rxdid; u8 rxdid;
/* get the Rx desc from Rx queue based on 'next_to_clean' */ /* get the Rx desc from Rx queue based on 'next_to_clean' */
desc = IDPF_RX_DESC(rxq, ntc); rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc;
/* This memory barrier is needed to keep us from reading /* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc * any other fields out of the rx_desc
...@@ -3320,11 +3318,11 @@ static void idpf_rx_clean_refillq(struct idpf_queue *bufq, ...@@ -3320,11 +3318,11 @@ static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
int cleaned = 0; int cleaned = 0;
u16 gen; u16 gen;
buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta); buf_desc = &bufq->split_buf[bufq_nta];
/* make sure we stop at ring wrap in the unlikely case ring is full */ /* make sure we stop at ring wrap in the unlikely case ring is full */
while (likely(cleaned < refillq->desc_count)) { while (likely(cleaned < refillq->desc_count)) {
u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc); u16 refill_desc = refillq->ring[ntc];
bool failure; bool failure;
gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc); gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc);
...@@ -3342,7 +3340,7 @@ static void idpf_rx_clean_refillq(struct idpf_queue *bufq, ...@@ -3342,7 +3340,7 @@ static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
} }
if (unlikely(++bufq_nta == bufq->desc_count)) { if (unlikely(++bufq_nta == bufq->desc_count)) {
buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0); buf_desc = &bufq->split_buf[0];
bufq_nta = 0; bufq_nta = 0;
} else { } else {
buf_desc++; buf_desc++;
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <net/tcp.h> #include <net/tcp.h>
#include <net/netdev_queues.h> #include <net/netdev_queues.h>
#include "idpf_lan_txrx.h"
#include "virtchnl2_lan_desc.h" #include "virtchnl2_lan_desc.h"
#define IDPF_LARGE_MAX_Q 256 #define IDPF_LARGE_MAX_Q 256
...@@ -117,24 +118,6 @@ do { \ ...@@ -117,24 +118,6 @@ do { \
#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M #define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M #define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \
(&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i]))
#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \
(&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i]))
#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i])
#define IDPF_BASE_TX_DESC(txq, i) \
(&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i]))
#define IDPF_BASE_TX_CTX_DESC(txq, i) \
(&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i]))
#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \
(&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i]))
#define IDPF_FLEX_TX_DESC(txq, i) \
(&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i]))
#define IDPF_FLEX_TX_CTX_DESC(txq, i) \
(&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i]))
#define IDPF_DESC_UNUSED(txq) \ #define IDPF_DESC_UNUSED(txq) \
((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \ ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
(txq)->next_to_clean - (txq)->next_to_use - 1) (txq)->next_to_clean - (txq)->next_to_use - 1)
...@@ -317,8 +300,6 @@ struct idpf_rx_extracted { ...@@ -317,8 +300,6 @@ struct idpf_rx_extracted {
#define IDPF_RX_DMA_ATTR \ #define IDPF_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
#define IDPF_RX_DESC(rxq, i) \
(&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i]))
struct idpf_rx_buf { struct idpf_rx_buf {
struct page *page; struct page *page;
...@@ -655,7 +636,15 @@ union idpf_queue_stats { ...@@ -655,7 +636,15 @@ union idpf_queue_stats {
* @q_vector: Backreference to associated vector * @q_vector: Backreference to associated vector
* @size: Length of descriptor ring in bytes * @size: Length of descriptor ring in bytes
* @dma: Physical address of ring * @dma: Physical address of ring
* @desc_ring: Descriptor ring memory * @rx: universal receive descriptor array
* @single_buf: Rx buffer descriptor array in singleq
* @split_buf: Rx buffer descriptor array in splitq
* @base_tx: basic Tx descriptor array
* @base_ctx: basic Tx context descriptor array
* @flex_tx: flex Tx descriptor array
* @flex_ctx: flex Tx context descriptor array
* @comp: completion descriptor array
* @desc_ring: virtual descriptor ring address
* @tx_max_bufs: Max buffers that can be transmitted with scatter-gather * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
* @tx_min_pkt_len: Min supported packet length * @tx_min_pkt_len: Min supported packet length
* @num_completions: Only relevant for TX completion queue. It tracks the * @num_completions: Only relevant for TX completion queue. It tracks the
...@@ -733,7 +722,21 @@ struct idpf_queue { ...@@ -733,7 +722,21 @@ struct idpf_queue {
struct idpf_q_vector *q_vector; struct idpf_q_vector *q_vector;
unsigned int size; unsigned int size;
dma_addr_t dma; dma_addr_t dma;
union {
union virtchnl2_rx_desc *rx;
struct virtchnl2_singleq_rx_buf_desc *single_buf;
struct virtchnl2_splitq_rx_buf_desc *split_buf;
struct idpf_base_tx_desc *base_tx;
struct idpf_base_tx_ctx_desc *base_ctx;
union idpf_tx_flex_desc *flex_tx;
struct idpf_flex_tx_ctx_desc *flex_ctx;
struct idpf_splitq_tx_compl_desc *comp;
void *desc_ring; void *desc_ring;
};
u16 tx_max_bufs; u16 tx_max_bufs;
u8 tx_min_pkt_len; u8 tx_min_pkt_len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment