Commit 3e5fb691 authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Tony Nguyen

ice: make ice_vsi_cfg_rxq() static

Currently, XSK control path in ice driver calls directly
ice_vsi_cfg_rxq() whereas we have ice_vsi_cfg_single_rxq() for that
purpose. Use the latter from XSK side and make ice_vsi_cfg_rxq() static.

ice_vsi_cfg_rxq() resides in ice_base.c and is rather big, so to reduce
the code churn let us move two callers of it from ice_lib.c to
ice_base.c.
Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Acked-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent d81c0792
...@@ -538,7 +538,7 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring) ...@@ -538,7 +538,7 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
* *
* Return 0 on success and a negative value on error. * Return 0 on success and a negative value on error.
*/ */
int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{ {
struct device *dev = ice_pf_to_dev(ring->vsi->back); struct device *dev = ice_pf_to_dev(ring->vsi->back);
u32 num_bufs = ICE_RX_DESC_UNUSED(ring); u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
...@@ -633,6 +633,62 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) ...@@ -633,6 +633,62 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0; return 0;
} }
int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
{
if (q_idx >= vsi->num_rxq)
return -EINVAL;
return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
}
/**
* ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
* @vsi: VSI
*/
static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
{
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
vsi->rx_buf_len = ICE_RXBUF_1664;
#if (PAGE_SIZE < 8192)
} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
(vsi->netdev->mtu <= ETH_DATA_LEN)) {
vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
#endif
} else {
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
vsi->rx_buf_len = ICE_RXBUF_3072;
}
}
/**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Rx VSI for operation.
*/
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
{
u16 i;
if (vsi->type == ICE_VSI_VF)
goto setup_rings;
ice_vsi_cfg_frame_size(vsi);
setup_rings:
/* set up individual rings */
ice_for_each_rxq(vsi, i) {
int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
if (err)
return err;
}
return 0;
}
/** /**
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
* @qs_cfg: gathered variables needed for pf->vsi queues assignment * @qs_cfg: gathered variables needed for pf->vsi queues assignment
......
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include "ice.h" #include "ice.h"
int ice_vsi_cfg_rxq(struct ice_rx_ring *ring); int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg); int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait); ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
......
...@@ -1671,27 +1671,6 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) ...@@ -1671,27 +1671,6 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
} }
} }
/**
* ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
* @vsi: VSI
*/
static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
{
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
vsi->rx_buf_len = ICE_RXBUF_1664;
#if (PAGE_SIZE < 8192)
} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
(vsi->netdev->mtu <= ETH_DATA_LEN)) {
vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
#endif
} else {
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
vsi->rx_buf_len = ICE_RXBUF_3072;
}
}
/** /**
* ice_pf_state_is_nominal - checks the PF for nominal state * ice_pf_state_is_nominal - checks the PF for nominal state
* @pf: pointer to PF to check * @pf: pointer to PF to check
...@@ -1795,14 +1774,6 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, ...@@ -1795,14 +1774,6 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
wr32(hw, QRXFLXP_CNTXT(pf_q), regval); wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
} }
int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
{
if (q_idx >= vsi->num_rxq)
return -EINVAL;
return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
}
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx) int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
{ {
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
...@@ -1815,33 +1786,6 @@ int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u ...@@ -1815,33 +1786,6 @@ int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u
return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
} }
/**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Rx VSI for operation.
*/
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
{
u16 i;
if (vsi->type == ICE_VSI_VF)
goto setup_rings;
ice_vsi_cfg_frame_size(vsi);
setup_rings:
/* set up individual rings */
ice_for_each_rxq(vsi, i) {
int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
if (err)
return err;
}
return 0;
}
/** /**
* ice_vsi_cfg_txqs - Configure the VSI for Tx * ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured * @vsi: the VSI being configured
......
...@@ -54,12 +54,8 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf); ...@@ -54,12 +54,8 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf);
void ice_update_eth_stats(struct ice_vsi *vsi); void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx); int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi); int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi); void ice_vsi_cfg_msix(struct ice_vsi *vsi);
......
...@@ -249,7 +249,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) ...@@ -249,7 +249,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
ice_tx_xsk_pool(vsi, q_idx); ice_tx_xsk_pool(vsi, q_idx);
} }
err = ice_vsi_cfg_rxq(rx_ring); err = ice_vsi_cfg_single_rxq(vsi, q_idx);
if (err) if (err)
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment