Commit 4123c3fb authored by Niklas Söderlund's avatar Niklas Söderlund Committed by David S. Miller

ravb: Group descriptor types used in Rx ring

The Rx ring can either be made up of normal or extended descriptors, not
a mix of the two at the same time. Make this explicit by grouping the
two variables in a rx_ring union.

The extension of the storage for more than one queue of normal
descriptors from a single to NUM_RX_QUEUE queues have no practical
effect. But aids in making the code readable as the code that uses it
already piggyback on other members of struct ravb_private that are
arrays of max length NUM_RX_QUEUE, e.g. rx_desc_dma. This will also make
further refactoring easier.

While at it, rename the normal descriptor Rx ring to make it clear it's
not strictly related to the GbEthernet E-MAC IP found in RZ/G2L, normal
descriptors could be used on R-Car SoCs too.
Signed-off-by: default avatarNiklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
Reviewed-by: default avatarPaul Barker <paul.barker.ct@bp.renesas.com>
Reviewed-by: default avatarSergey Shtylyov <s.shtylyov@omp.ru>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dbb0b6ca
...@@ -1092,8 +1092,10 @@ struct ravb_private { ...@@ -1092,8 +1092,10 @@ struct ravb_private {
struct ravb_desc *desc_bat; struct ravb_desc *desc_bat;
dma_addr_t rx_desc_dma[NUM_RX_QUEUE]; dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
dma_addr_t tx_desc_dma[NUM_TX_QUEUE]; dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
struct ravb_rx_desc *gbeth_rx_ring; union {
struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE]; struct ravb_rx_desc *desc;
struct ravb_ex_rx_desc *ex_desc;
} rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE]; struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE]; void *tx_align[NUM_TX_QUEUE];
struct sk_buff *rx_1st_skb; struct sk_buff *rx_1st_skb;
......
...@@ -241,11 +241,11 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q) ...@@ -241,11 +241,11 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
unsigned int ring_size; unsigned int ring_size;
unsigned int i; unsigned int i;
if (!priv->gbeth_rx_ring) if (!priv->rx_ring[q].desc)
return; return;
for (i = 0; i < priv->num_rx_ring[q]; i++) { for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i]; struct ravb_rx_desc *desc = &priv->rx_ring[q].desc[i];
if (!dma_mapping_error(ndev->dev.parent, if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr))) le32_to_cpu(desc->dptr)))
...@@ -255,9 +255,9 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q) ...@@ -255,9 +255,9 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring, dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].desc,
priv->rx_desc_dma[q]); priv->rx_desc_dma[q]);
priv->gbeth_rx_ring = NULL; priv->rx_ring[q].desc = NULL;
} }
static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q) static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
...@@ -266,11 +266,11 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q) ...@@ -266,11 +266,11 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
unsigned int ring_size; unsigned int ring_size;
unsigned int i; unsigned int i;
if (!priv->rx_ring[q]) if (!priv->rx_ring[q].ex_desc)
return; return;
for (i = 0; i < priv->num_rx_ring[q]; i++) { for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; struct ravb_ex_rx_desc *desc = &priv->rx_ring[q].ex_desc[i];
if (!dma_mapping_error(ndev->dev.parent, if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr))) le32_to_cpu(desc->dptr)))
...@@ -281,9 +281,9 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q) ...@@ -281,9 +281,9 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
} }
ring_size = sizeof(struct ravb_ex_rx_desc) * ring_size = sizeof(struct ravb_ex_rx_desc) *
(priv->num_rx_ring[q] + 1); (priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].ex_desc,
priv->rx_desc_dma[q]); priv->rx_desc_dma[q]);
priv->rx_ring[q] = NULL; priv->rx_ring[q].ex_desc = NULL;
} }
/* Free skb's and DMA buffers for Ethernet AVB */ /* Free skb's and DMA buffers for Ethernet AVB */
...@@ -335,11 +335,11 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q) ...@@ -335,11 +335,11 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
unsigned int i; unsigned int i;
rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
memset(priv->gbeth_rx_ring, 0, rx_ring_size); memset(priv->rx_ring[q].desc, 0, rx_ring_size);
/* Build RX ring buffer */ /* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) { for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */ /* RX descriptor */
rx_desc = &priv->gbeth_rx_ring[i]; rx_desc = &priv->rx_ring[q].desc[i];
rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
GBETH_RX_BUFF_MAX, GBETH_RX_BUFF_MAX,
...@@ -352,7 +352,7 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q) ...@@ -352,7 +352,7 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
rx_desc->dptr = cpu_to_le32(dma_addr); rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY; rx_desc->die_dt = DT_FEMPTY;
} }
rx_desc = &priv->gbeth_rx_ring[i]; rx_desc = &priv->rx_ring[q].desc[i];
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */ rx_desc->die_dt = DT_LINKFIX; /* type */
} }
...@@ -365,11 +365,11 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q) ...@@ -365,11 +365,11 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
dma_addr_t dma_addr; dma_addr_t dma_addr;
unsigned int i; unsigned int i;
memset(priv->rx_ring[q], 0, rx_ring_size); memset(priv->rx_ring[q].ex_desc, 0, rx_ring_size);
/* Build RX ring buffer */ /* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) { for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */ /* RX descriptor */
rx_desc = &priv->rx_ring[q][i]; rx_desc = &priv->rx_ring[q].ex_desc[i];
rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ); rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
RX_BUF_SZ, RX_BUF_SZ,
...@@ -382,7 +382,7 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q) ...@@ -382,7 +382,7 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
rx_desc->dptr = cpu_to_le32(dma_addr); rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY; rx_desc->die_dt = DT_FEMPTY;
} }
rx_desc = &priv->rx_ring[q][i]; rx_desc = &priv->rx_ring[q].ex_desc[i];
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */ rx_desc->die_dt = DT_LINKFIX; /* type */
} }
...@@ -437,10 +437,10 @@ static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q) ...@@ -437,10 +437,10 @@ static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].desc = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->rx_desc_dma[q], &priv->rx_desc_dma[q],
GFP_KERNEL); GFP_KERNEL);
return priv->gbeth_rx_ring; return priv->rx_ring[q].desc;
} }
static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q) static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
...@@ -450,10 +450,11 @@ static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q) ...@@ -450,10 +450,11 @@ static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].ex_desc = dma_alloc_coherent(ndev->dev.parent,
&priv->rx_desc_dma[q], ring_size,
GFP_KERNEL); &priv->rx_desc_dma[q],
return priv->rx_ring[q]; GFP_KERNEL);
return priv->rx_ring[q].ex_desc;
} }
/* Init skb and descriptor buffer for Ethernet AVB */ /* Init skb and descriptor buffer for Ethernet AVB */
...@@ -830,7 +831,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) ...@@ -830,7 +831,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q]; stats = &priv->stats[q];
desc = &priv->gbeth_rx_ring[entry]; desc = &priv->rx_ring[q].desc[entry];
for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) { for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
/* Descriptor type must be checked before all other reads */ /* Descriptor type must be checked before all other reads */
dma_rmb(); dma_rmb();
...@@ -901,13 +902,13 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) ...@@ -901,13 +902,13 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
} }
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
desc = &priv->gbeth_rx_ring[entry]; desc = &priv->rx_ring[q].desc[entry];
} }
/* Refill the RX ring buffers. */ /* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->gbeth_rx_ring[entry]; desc = &priv->rx_ring[q].desc[entry];
desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
if (!priv->rx_skb[q][entry]) { if (!priv->rx_skb[q][entry]) {
...@@ -957,7 +958,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) ...@@ -957,7 +958,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
boguscnt = min(boguscnt, *quota); boguscnt = min(boguscnt, *quota);
limit = boguscnt; limit = boguscnt;
desc = &priv->rx_ring[q][entry]; desc = &priv->rx_ring[q].ex_desc[entry];
while (desc->die_dt != DT_FEMPTY) { while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */ /* Descriptor type must be checked before all other reads */
dma_rmb(); dma_rmb();
...@@ -1017,13 +1018,13 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) ...@@ -1017,13 +1018,13 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
} }
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry]; desc = &priv->rx_ring[q].ex_desc[entry];
} }
/* Refill the RX ring buffers. */ /* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry]; desc = &priv->rx_ring[q].ex_desc[entry];
desc->ds_cc = cpu_to_le16(RX_BUF_SZ); desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
if (!priv->rx_skb[q][entry]) { if (!priv->rx_skb[q][entry]) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment