Commit 290f1c03 authored by Alexander Lobakin's avatar Alexander Lobakin Committed by Jakub Kicinski

idpf: fix UAFs when destroying the queues

The second tagged commit started sometimes (very rarely, but possible)
throwing WARNs from
net/core/page_pool.c:page_pool_disable_direct_recycling().
Turned out idpf frees interrupt vectors with embedded NAPIs *before*
freeing the queues making page_pools' NAPI pointers lead to freed
memory before these pools are destroyed by libeth.
It's not clear whether there are other accesses to the freed vectors
when destroying the queues, but anyway, we usually free queue/interrupt
vectors only when the queues are destroyed and the NAPIs are guaranteed
to not be referenced anywhere.

Invert the allocation and freeing logic making queue/interrupt vectors
be allocated first and freed last. Vectors don't require queues to be
present, so this is safe. Additionally, this change allows to remove
that useless queue->q_vector pointer cleanup, as vectors are still
valid when freeing the queues (+ both are freed within one function,
so it's not clear why nullify the pointers at all).

Fixes: 1c325aac ("idpf: configure resources for TX queues")
Fixes: 90912f9f ("idpf: convert header split mode to libeth + napi_build_skb()")
Reported-by: default avatarMichal Kubiak <michal.kubiak@intel.com>
Signed-off-by: default avatarAlexander Lobakin <aleksander.lobakin@intel.com>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Tested-by: default avatarKrishneil Singh <krishneil.k.singh@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
Link: https://patch.msgid.link/20240806220923.3359860-4-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 3cc88e84
...@@ -900,8 +900,8 @@ static void idpf_vport_stop(struct idpf_vport *vport) ...@@ -900,8 +900,8 @@ static void idpf_vport_stop(struct idpf_vport *vport)
vport->link_up = false; vport->link_up = false;
idpf_vport_intr_deinit(vport); idpf_vport_intr_deinit(vport);
idpf_vport_intr_rel(vport);
idpf_vport_queues_rel(vport); idpf_vport_queues_rel(vport);
idpf_vport_intr_rel(vport);
np->state = __IDPF_VPORT_DOWN; np->state = __IDPF_VPORT_DOWN;
} }
...@@ -1349,43 +1349,43 @@ static int idpf_vport_open(struct idpf_vport *vport) ...@@ -1349,43 +1349,43 @@ static int idpf_vport_open(struct idpf_vport *vport)
/* we do not allow interface up just yet */ /* we do not allow interface up just yet */
netif_carrier_off(vport->netdev); netif_carrier_off(vport->netdev);
err = idpf_vport_queues_alloc(vport);
if (err)
return err;
err = idpf_vport_intr_alloc(vport); err = idpf_vport_intr_alloc(vport);
if (err) { if (err) {
dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n", dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
vport->vport_id, err); vport->vport_id, err);
goto queues_rel; return err;
} }
err = idpf_vport_queues_alloc(vport);
if (err)
goto intr_rel;
err = idpf_vport_queue_ids_init(vport); err = idpf_vport_queue_ids_init(vport);
if (err) { if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n", dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
vport->vport_id, err); vport->vport_id, err);
goto intr_rel; goto queues_rel;
} }
err = idpf_vport_intr_init(vport); err = idpf_vport_intr_init(vport);
if (err) { if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n", dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
vport->vport_id, err); vport->vport_id, err);
goto intr_rel; goto queues_rel;
} }
err = idpf_rx_bufs_init_all(vport); err = idpf_rx_bufs_init_all(vport);
if (err) { if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n", dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
vport->vport_id, err); vport->vport_id, err);
goto intr_rel; goto queues_rel;
} }
err = idpf_queue_reg_init(vport); err = idpf_queue_reg_init(vport);
if (err) { if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n", dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err); vport->vport_id, err);
goto intr_rel; goto queues_rel;
} }
idpf_rx_init_buf_tail(vport); idpf_rx_init_buf_tail(vport);
...@@ -1452,10 +1452,10 @@ static int idpf_vport_open(struct idpf_vport *vport) ...@@ -1452,10 +1452,10 @@ static int idpf_vport_open(struct idpf_vport *vport)
idpf_send_map_unmap_queue_vector_msg(vport, false); idpf_send_map_unmap_queue_vector_msg(vport, false);
intr_deinit: intr_deinit:
idpf_vport_intr_deinit(vport); idpf_vport_intr_deinit(vport);
intr_rel:
idpf_vport_intr_rel(vport);
queues_rel: queues_rel:
idpf_vport_queues_rel(vport); idpf_vport_queues_rel(vport);
intr_rel:
idpf_vport_intr_rel(vport);
return err; return err;
} }
......
...@@ -3576,9 +3576,7 @@ static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport) ...@@ -3576,9 +3576,7 @@ static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
*/ */
void idpf_vport_intr_rel(struct idpf_vport *vport) void idpf_vport_intr_rel(struct idpf_vport *vport)
{ {
int i, j, v_idx; for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
kfree(q_vector->complq); kfree(q_vector->complq);
...@@ -3593,26 +3591,6 @@ void idpf_vport_intr_rel(struct idpf_vport *vport) ...@@ -3593,26 +3591,6 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
free_cpumask_var(q_vector->affinity_mask); free_cpumask_var(q_vector->affinity_mask);
} }
/* Clean up the mapping of queues to vectors */
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
if (idpf_is_queue_model_split(vport->rxq_model))
for (j = 0; j < rx_qgrp->splitq.num_rxq_sets; j++)
rx_qgrp->splitq.rxq_sets[j]->rxq.q_vector = NULL;
else
for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
rx_qgrp->singleq.rxqs[j]->q_vector = NULL;
}
if (idpf_is_queue_model_split(vport->txq_model))
for (i = 0; i < vport->num_txq_grp; i++)
vport->txq_grps[i].complq->q_vector = NULL;
else
for (i = 0; i < vport->num_txq_grp; i++)
for (j = 0; j < vport->txq_grps[i].num_txq; j++)
vport->txq_grps[i].txqs[j]->q_vector = NULL;
kfree(vport->q_vectors); kfree(vport->q_vectors);
vport->q_vectors = NULL; vport->q_vectors = NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment