Commit a292ba98 authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Tony Nguyen

ice: make ice_vsi_cfg_txq() static

Currently, XSK control path in ice driver calls directly
ice_vsi_cfg_txq() whereas we have ice_vsi_cfg_single_txq() for that
purpose. Use the latter from XSK side and make ice_vsi_cfg_txq() static.

ice_vsi_cfg_txq() resides in ice_base.c and is rather big, so to reduce
the code churn let us move the callers of it from ice_lib.c to
ice_base.c.

This change puts ice_qp_ena() on nice diet due to the checks and
operations that ice_vsi_cfg_single_{r,t}xq() do internally.

add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-182 (-182)
Function                                     old     new   delta
ice_xsk_pool_setup                          2165    1983    -182
Total: Before=472597, After=472415, chg -0.04%
Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Acked-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 3e5fb691
...@@ -884,7 +884,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi) ...@@ -884,7 +884,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @ring: Tx ring to be configured * @ring: Tx ring to be configured
* @qg_buf: queue group buffer * @qg_buf: queue group buffer
*/ */
int static int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf) struct ice_aqc_add_tx_qgrp *qg_buf)
{ {
...@@ -955,6 +955,80 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, ...@@ -955,6 +955,80 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
return 0; return 0;
} }
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
u16 q_idx)
{
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
return -EINVAL;
qg_buf->num_txqs = 1;
return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
}
/**
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
* @rings: Tx ring array to be configured
* @count: number of Tx ring array elements
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
int err = 0;
u16 q_idx;
qg_buf->num_txqs = 1;
for (q_idx = 0; q_idx < count; q_idx++) {
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
if (err)
break;
}
return err;
}
/**
* ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
}
/**
* ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Tx queues dedicated for XDP in given VSI for operation.
*/
int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
{
int ret;
int i;
ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
if (ret)
return ret;
ice_for_each_rxq(vsi, i)
ice_tx_xsk_pool(vsi, i);
return 0;
}
/** /**
* ice_cfg_itr - configure the initial interrupt throttle values * ice_cfg_itr - configure the initial interrupt throttle values
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
......
...@@ -15,9 +15,10 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx); ...@@ -15,9 +15,10 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi); int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi); void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
int int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, u16 q_idx);
struct ice_aqc_add_tx_qgrp *qg_buf); int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector); void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
......
...@@ -1774,79 +1774,6 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, ...@@ -1774,79 +1774,6 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
wr32(hw, QRXFLXP_CNTXT(pf_q), regval); wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
} }
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
{
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
return -EINVAL;
qg_buf->num_txqs = 1;
return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
}
/**
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
* @rings: Tx ring array to be configured
* @count: number of Tx ring array elements
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
int err = 0;
u16 q_idx;
qg_buf->num_txqs = 1;
for (q_idx = 0; q_idx < count; q_idx++) {
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
if (err)
break;
}
return err;
}
/**
* ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
}
/**
* ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Tx queues dedicated for XDP in given VSI for operation.
*/
int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
{
int ret;
int i;
ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
if (ret)
return ret;
ice_for_each_rxq(vsi, i)
ice_tx_xsk_pool(vsi, i);
return 0;
}
/** /**
* ice_intrl_usec_to_reg - convert interrupt rate limit to register value * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
* @intrl: interrupt rate limit in usecs * @intrl: interrupt rate limit in usecs
......
...@@ -54,10 +54,6 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf); ...@@ -54,10 +54,6 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf);
void ice_update_eth_stats(struct ice_vsi *vsi); void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi); void ice_vsi_cfg_msix(struct ice_vsi *vsi);
int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi); int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
...@@ -68,8 +64,6 @@ int ...@@ -68,8 +64,6 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num); u16 rel_vmvf_num);
int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi); int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
......
...@@ -217,32 +217,17 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) ...@@ -217,32 +217,17 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
*/ */
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{ {
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
u16 size = __struct_size(qg_buf);
struct ice_q_vector *q_vector; struct ice_q_vector *q_vector;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
int err; int err;
if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
return -EINVAL;
qg_buf->num_txqs = 1;
tx_ring = vsi->tx_rings[q_idx];
rx_ring = vsi->rx_rings[q_idx];
q_vector = rx_ring->q_vector;
err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
if (err) if (err)
return err; return err;
if (ice_is_xdp_ena_vsi(vsi)) { if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
memset(qg_buf, 0, size); err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
qg_buf->num_txqs = 1;
err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
if (err) if (err)
return err; return err;
ice_set_ring_xdp(xdp_ring); ice_set_ring_xdp(xdp_ring);
...@@ -253,6 +238,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) ...@@ -253,6 +238,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err) if (err)
return err; return err;
q_vector = vsi->rx_rings[q_idx]->q_vector;
ice_qvec_cfg_msix(vsi, q_vector); ice_qvec_cfg_msix(vsi, q_vector);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true); err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment