Commit 770f52d5 authored by Shailend Chand's avatar Shailend Chand Committed by David S. Miller

gve: Reset Rx ring state in the ring-stop funcs

This does not fix any existing bug. In anticipation of the ndo queue api
hooks that alloc/free/start/stop a single Rx queue, the already existing
per-queue stop functions are being made more robust. Specifically for
this use case: rx_queue_n.stop() + rx_queue_n.start()

Note that this is not the use case being used in devmem tcp (the first
place these new ndo hooks would be used). There the usecase is:
new_queue.alloc() + old_queue.stop() + new_queue.start() + old_queue.free()
Tested-by: default avatarMina Almasry <almasrymina@google.com>
Reviewed-by: default avatarPraveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: default avatarHarshitha Ramamurthy <hramamurthy@google.com>
Signed-off-by: default avatarShailend Chand <shailend@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9a5e0776
...@@ -53,6 +53,41 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, ...@@ -53,6 +53,41 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
rx->data.page_info = NULL; rx->data.page_info = NULL;
} }
static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
{
ctx->skb_head = NULL;
ctx->skb_tail = NULL;
ctx->total_size = 0;
ctx->frag_cnt = 0;
ctx->drop_pkt = false;
}
static void gve_rx_init_ring_state_gqi(struct gve_rx_ring *rx)
{
rx->desc.seqno = 1;
rx->cnt = 0;
gve_rx_ctx_clear(&rx->ctx);
}
static void gve_rx_reset_ring_gqi(struct gve_priv *priv, int idx)
{
struct gve_rx_ring *rx = &priv->rx[idx];
const u32 slots = priv->rx_desc_cnt;
size_t size;
/* Reset desc ring */
if (rx->desc.desc_ring) {
size = slots * sizeof(rx->desc.desc_ring[0]);
memset(rx->desc.desc_ring, 0, size);
}
/* Reset q_resources */
if (rx->q_resources)
memset(rx->q_resources, 0, sizeof(*rx->q_resources));
gve_rx_init_ring_state_gqi(rx);
}
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx) void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
{ {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
...@@ -62,6 +97,7 @@ void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx) ...@@ -62,6 +97,7 @@ void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx); gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx); gve_rx_remove_from_block(priv, idx);
gve_rx_reset_ring_gqi(priv, idx);
} }
static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx, static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
...@@ -222,15 +258,6 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx, ...@@ -222,15 +258,6 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
return err; return err;
} }
static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
{
ctx->skb_head = NULL;
ctx->skb_tail = NULL;
ctx->total_size = 0;
ctx->frag_cnt = 0;
ctx->drop_pkt = false;
}
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx) void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
{ {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
...@@ -309,9 +336,8 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv, ...@@ -309,9 +336,8 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
err = -ENOMEM; err = -ENOMEM;
goto abort_with_q_resources; goto abort_with_q_resources;
} }
rx->cnt = 0;
rx->db_threshold = slots / 2; rx->db_threshold = slots / 2;
rx->desc.seqno = 1; gve_rx_init_ring_state_gqi(rx);
rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx); gve_rx_ctx_clear(&rx->ctx);
......
...@@ -211,6 +211,82 @@ static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx) ...@@ -211,6 +211,82 @@ static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
} }
} }
static void gve_rx_init_ring_state_dqo(struct gve_rx_ring *rx,
const u32 buffer_queue_slots,
const u32 completion_queue_slots)
{
int i;
/* Set buffer queue state */
rx->dqo.bufq.mask = buffer_queue_slots - 1;
rx->dqo.bufq.head = 0;
rx->dqo.bufq.tail = 0;
/* Set completion queue state */
rx->dqo.complq.num_free_slots = completion_queue_slots;
rx->dqo.complq.mask = completion_queue_slots - 1;
rx->dqo.complq.cur_gen_bit = 0;
rx->dqo.complq.head = 0;
/* Set RX SKB context */
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
/* Set up linked list of buffer IDs */
if (rx->dqo.buf_states) {
for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
rx->dqo.buf_states[i].next = i + 1;
rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
}
rx->dqo.free_buf_states = 0;
rx->dqo.recycled_buf_states.head = -1;
rx->dqo.recycled_buf_states.tail = -1;
rx->dqo.used_buf_states.head = -1;
rx->dqo.used_buf_states.tail = -1;
}
static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
{
struct gve_rx_ring *rx = &priv->rx[idx];
size_t size;
int i;
const u32 buffer_queue_slots = priv->rx_desc_cnt;
const u32 completion_queue_slots = priv->rx_desc_cnt;
/* Reset buffer queue */
if (rx->dqo.bufq.desc_ring) {
size = sizeof(rx->dqo.bufq.desc_ring[0]) *
buffer_queue_slots;
memset(rx->dqo.bufq.desc_ring, 0, size);
}
/* Reset completion queue */
if (rx->dqo.complq.desc_ring) {
size = sizeof(rx->dqo.complq.desc_ring[0]) *
completion_queue_slots;
memset(rx->dqo.complq.desc_ring, 0, size);
}
/* Reset q_resources */
if (rx->q_resources)
memset(rx->q_resources, 0, sizeof(*rx->q_resources));
/* Reset buf states */
if (rx->dqo.buf_states) {
for (i = 0; i < rx->dqo.num_buf_states; i++) {
struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
if (bs->page_info.page)
gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
}
}
gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
completion_queue_slots);
}
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx) void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
{ {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
...@@ -220,6 +296,7 @@ void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx) ...@@ -220,6 +296,7 @@ void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx); gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx); gve_rx_remove_from_block(priv, idx);
gve_rx_reset_ring_dqo(priv, idx);
} }
static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
...@@ -273,10 +350,10 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, ...@@ -273,10 +350,10 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
} }
static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx) static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx,
const u32 buf_count)
{ {
struct device *hdev = &priv->pdev->dev; struct device *hdev = &priv->pdev->dev;
int buf_count = rx->dqo.bufq.mask + 1;
rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count, rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count,
&rx->dqo.hdr_bufs.addr, GFP_KERNEL); &rx->dqo.hdr_bufs.addr, GFP_KERNEL);
...@@ -301,7 +378,6 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, ...@@ -301,7 +378,6 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
{ {
struct device *hdev = &priv->pdev->dev; struct device *hdev = &priv->pdev->dev;
size_t size; size_t size;
int i;
const u32 buffer_queue_slots = cfg->ring_size; const u32 buffer_queue_slots = cfg->ring_size;
const u32 completion_queue_slots = cfg->ring_size; const u32 completion_queue_slots = cfg->ring_size;
...@@ -311,11 +387,6 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, ...@@ -311,11 +387,6 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
memset(rx, 0, sizeof(*rx)); memset(rx, 0, sizeof(*rx));
rx->gve = priv; rx->gve = priv;
rx->q_num = idx; rx->q_num = idx;
rx->dqo.bufq.mask = buffer_queue_slots - 1;
rx->dqo.complq.num_free_slots = completion_queue_slots;
rx->dqo.complq.mask = completion_queue_slots - 1;
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
rx->dqo.num_buf_states = cfg->raw_addressing ? rx->dqo.num_buf_states = cfg->raw_addressing ?
min_t(s16, S16_MAX, buffer_queue_slots * 4) : min_t(s16, S16_MAX, buffer_queue_slots * 4) :
...@@ -328,19 +399,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, ...@@ -328,19 +399,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
/* Allocate header buffers for header-split */ /* Allocate header buffers for header-split */
if (cfg->enable_header_split) if (cfg->enable_header_split)
if (gve_rx_alloc_hdr_bufs(priv, rx)) if (gve_rx_alloc_hdr_bufs(priv, rx, buffer_queue_slots))
goto err; goto err;
/* Set up linked list of buffer IDs */
for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
rx->dqo.buf_states[i].next = i + 1;
rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
rx->dqo.recycled_buf_states.head = -1;
rx->dqo.recycled_buf_states.tail = -1;
rx->dqo.used_buf_states.head = -1;
rx->dqo.used_buf_states.tail = -1;
/* Allocate RX completion queue */ /* Allocate RX completion queue */
size = sizeof(rx->dqo.complq.desc_ring[0]) * size = sizeof(rx->dqo.complq.desc_ring[0]) *
completion_queue_slots; completion_queue_slots;
...@@ -368,6 +429,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, ...@@ -368,6 +429,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
if (!rx->q_resources) if (!rx->q_resources)
goto err; goto err;
gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
completion_queue_slots);
return 0; return 0;
err: err:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment