Commit 109eb291 authored by Lukasz Czapnik's avatar Lukasz Czapnik Committed by Tony Nguyen

ice: Add tx_scheduling_layers devlink param

It was observed that Tx performance was inconsistent across all queues
and/or VSIs and that it was directly connected to existing 9-layer
topology of the Tx scheduler.

Introduce new private devlink param - tx_scheduling_layers. This parameter
gives user flexibility to choose the 5-layer transmit scheduler topology
which helps to smooth out the transmit performance.

Allowed parameter values are 5 and 9.

Example usage:

Show:
devlink dev param show pci/0000:4b:00.0 name tx_scheduling_layers
pci/0000:4b:00.0:
  name tx_scheduling_layers type driver-specific
    values:
      cmode permanent value 9

Set:
devlink dev param set pci/0000:4b:00.0 name tx_scheduling_layers value 5
cmode permanent

devlink dev param set pci/0000:4b:00.0 name tx_scheduling_layers value 9
cmode permanent
Signed-off-by: default avatarLukasz Czapnik <lukasz.czapnik@intel.com>
Reviewed-by: default avatarPrzemek Kitszel <przemyslaw.kitszel@intel.com>
Co-developed-by: default avatarMateusz Polchlopek <mateusz.polchlopek@intel.com>
Signed-off-by: default avatarMateusz Polchlopek <mateusz.polchlopek@intel.com>
Tested-by: default avatarPucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent cc5776fe
......@@ -523,6 +523,156 @@ ice_devlink_reload_empr_finish(struct ice_pf *pf,
return 0;
}
/**
* ice_get_tx_topo_user_sel - Read user's choice from flash
* @pf: pointer to pf structure
* @layers: value read from flash will be saved here
*
* Reads user's preference for Tx Scheduler Topology Tree from PFA TLV.
*
* Return: zero when read was successful, negative values otherwise.
*/
static int ice_get_tx_topo_user_sel(struct ice_pf *pf, uint8_t *layers)
{
struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {};
struct ice_hw *hw = &pf->hw;
int err;
err = ice_acquire_nvm(hw, ICE_RES_READ);
if (err)
return err;
err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0,
sizeof(usr_sel), &usr_sel, true, true, NULL);
if (err)
goto exit_release_res;
if (usr_sel.data & ICE_AQC_NVM_TX_TOPO_USER_SEL)
*layers = ICE_SCHED_5_LAYERS;
else
*layers = ICE_SCHED_9_LAYERS;
exit_release_res:
ice_release_nvm(hw);
return err;
}
/**
* ice_update_tx_topo_user_sel - Save user's preference in flash
* @pf: pointer to pf structure
* @layers: value to be saved in flash
*
* Variable "layers" defines user's preference about number of layers in Tx
* Scheduler Topology Tree. This choice should be stored in PFA TLV field
* and be picked up by driver, next time during init.
*
* Return: zero when save was successful, negative values otherwise.
*/
static int ice_update_tx_topo_user_sel(struct ice_pf *pf, int layers)
{
struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {};
struct ice_hw *hw = &pf->hw;
int err;
err = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (err)
return err;
err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0,
sizeof(usr_sel), &usr_sel, true, true, NULL);
if (err)
goto exit_release_res;
if (layers == ICE_SCHED_5_LAYERS)
usr_sel.data |= ICE_AQC_NVM_TX_TOPO_USER_SEL;
else
usr_sel.data &= ~ICE_AQC_NVM_TX_TOPO_USER_SEL;
err = ice_write_one_nvm_block(pf, ICE_AQC_NVM_TX_TOPO_MOD_ID, 2,
sizeof(usr_sel.data), &usr_sel.data,
true, NULL, NULL);
exit_release_res:
ice_release_nvm(hw);
return err;
}
/**
* ice_devlink_tx_sched_layers_get - Get tx_scheduling_layers parameter
* @devlink: pointer to the devlink instance
* @id: the parameter ID to set
* @ctx: context to store the parameter value
*
* Return: zero on success and negative value on failure.
*/
static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct ice_pf *pf = devlink_priv(devlink);
int err;
err = ice_get_tx_topo_user_sel(pf, &ctx->val.vu8);
if (err)
return err;
return 0;
}
/**
* ice_devlink_tx_sched_layers_set - Set tx_scheduling_layers parameter
* @devlink: pointer to the devlink instance
* @id: the parameter ID to set
* @ctx: context to get the parameter value
* @extack: netlink extended ACK structure
*
* Return: zero on success and negative value on failure.
*/
static int ice_devlink_tx_sched_layers_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
int err;
err = ice_update_tx_topo_user_sel(pf, ctx->val.vu8);
if (err)
return err;
NL_SET_ERR_MSG_MOD(extack,
"Tx scheduling layers have been changed on this device. You must do the PCI slot powercycle for the change to take effect.");
return 0;
}
/**
* ice_devlink_tx_sched_layers_validate - Validate passed tx_scheduling_layers
* parameter value
* @devlink: unused pointer to devlink instance
* @id: the parameter ID to validate
* @val: value to validate
* @extack: netlink extended ACK structure
*
* Supported values are:
* - 5 - five layers Tx Scheduler Topology Tree
* - 9 - nine layers Tx Scheduler Topology Tree
*
* Return: zero when passed parameter value is supported. Negative value on
* error.
*/
static int ice_devlink_tx_sched_layers_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
{
if (val.vu8 != ICE_SCHED_5_LAYERS && val.vu8 != ICE_SCHED_9_LAYERS) {
NL_SET_ERR_MSG_MOD(extack,
"Wrong number of tx scheduler layers provided.");
return -EINVAL;
}
return 0;
}
/**
* ice_tear_down_devlink_rate_tree - removes devlink-rate exported tree
* @pf: pf struct
......@@ -1235,6 +1385,11 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
return 0;
}
enum ice_param_id {
ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
};
static const struct devlink_param ice_devlink_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
ice_devlink_enable_roce_get,
......@@ -1244,7 +1399,13 @@ static const struct devlink_param ice_devlink_params[] = {
ice_devlink_enable_iw_get,
ice_devlink_enable_iw_set,
ice_devlink_enable_iw_validate),
DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
"tx_scheduling_layers",
DEVLINK_PARAM_TYPE_U8,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
ice_devlink_tx_sched_layers_get,
ice_devlink_tx_sched_layers_set,
ice_devlink_tx_sched_layers_validate),
};
static void ice_devlink_free(void *devlink_ptr)
......@@ -1304,9 +1465,16 @@ void ice_devlink_unregister(struct ice_pf *pf)
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct ice_hw *hw = &pf->hw;
size_t params_size;
params_size = ARRAY_SIZE(ice_devlink_params);
if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
params_size--;
return devl_params_register(devlink, ice_devlink_params,
ARRAY_SIZE(ice_devlink_params));
params_size);
}
void ice_devlink_unregister_params(struct ice_pf *pf)
......
......@@ -1684,6 +1684,15 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_START_POINT 0
#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B
struct ice_aqc_nvm_tx_topo_user_sel {
__le16 length;
u8 data;
#define ICE_AQC_NVM_TX_TOPO_USER_SEL BIT(4)
u8 reserved;
};
/* NVM Checksum Command (direct, 0x0706) */
struct ice_aqc_nvm_checksum {
u8 flags;
......
......@@ -286,10 +286,9 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
*
* Returns: zero on success, or a negative error code on failure.
*/
static int
ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
u16 block_size, u8 *block, bool last_cmd,
u8 *reset_level, struct netlink_ext_ack *extack)
int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
u16 block_size, u8 *block, bool last_cmd,
u8 *reset_level, struct netlink_ext_ack *extack)
{
u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf);
......
......@@ -9,5 +9,8 @@ int ice_devlink_flash_update(struct devlink *devlink,
struct netlink_ext_ack *extack);
int ice_get_pending_updates(struct ice_pf *pf, u8 *pending,
struct netlink_ext_ack *extack);
int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
u16 block_size, u8 *block, bool last_cmd,
u8 *reset_level, struct netlink_ext_ack *extack);
#endif
......@@ -18,10 +18,9 @@
*
* Read the NVM using the admin queue commands (0x0701)
*/
static int
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, bool read_shadow_ram,
struct ice_sq_cd *cd)
int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command,
bool read_shadow_ram, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;
......
......@@ -14,6 +14,9 @@ struct ice_orom_civd_info {
int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command,
bool read_shadow_ram, struct ice_sq_cd *cd);
int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment