Commit f5fbd324 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/nex

t-queue

Tony Nguyen says:

====================
ice: Implement support for SRIOV + LAG

Dave Ertman says:

Implement support for SRIOV VF's on interfaces that are in an
aggregate interface.

The first interface added into the aggregate will be flagged as
the primary interface, and this primary interface will be
responsible for managing the VF's resources.  VF's created on the
primary are the only VFs that will be supported on the aggregate.
Only Active-Backup mode will be supported and only aggregates whose
primary interface is in switchdev mode will be supported.

The ice-lag DDP must be loaded to support this feature.

Additional restrictions on what interfaces can be added to the aggregate
and still support SRIOV VFs are:
- interfaces have to all be on the same physical NIC
- all interfaces have to have the same QoS settings
- interfaces have to have the FW LLDP agent disabled
- only the primary interface is to be put into switchdev mode
- no more than two interfaces in the aggregate
---
v2:
- Move NULL check for q_ctx in ice_lag_qbuf_recfg() earlier (patch 6)

v1: https://lore.kernel.org/netdev/20230726182141.3797928-1-anthony.l.nguyen@intel.com/
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7f6c4039 3579aa86
......@@ -200,6 +200,8 @@ enum ice_feature {
ICE_F_PTP_EXTTS,
ICE_F_SMA_CTRL,
ICE_F_GNSS,
ICE_F_ROCE_LAG,
ICE_F_SRIOV_LAG,
ICE_F_MAX
};
......@@ -569,6 +571,7 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
struct mutex adev_mutex; /* lock to protect aux device access */
struct mutex lag_mutex; /* protect ice_lag struct in PF */
u32 msg_enable;
struct ice_ptp ptp;
struct gnss_serial *gnss_serial;
......@@ -639,6 +642,8 @@ struct ice_pf {
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
};
extern struct workqueue_struct *ice_lag_wq;
struct ice_netdev_priv {
struct ice_vsi *vsi;
struct ice_repr *repr;
......
......@@ -120,6 +120,9 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
#define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
#define ICE_AQC_BIT_ROCEV2_LAG 0x01
#define ICE_AQC_BIT_SRIOV_LAG 0x02
u8 major_ver;
u8 minor_ver;
......@@ -232,6 +235,8 @@ struct ice_aqc_set_port_params {
#define ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA BIT(2)
__le16 bad_frame_vsi;
__le16 swid;
#define ICE_AQC_PORT_SWID_VALID BIT(15)
#define ICE_AQC_PORT_SWID_M 0xFF
u8 reserved[10];
};
......@@ -241,10 +246,12 @@ struct ice_aqc_set_port_params {
* Allocate Resources command (indirect 0x0208)
* Free Resources command (indirect 0x0209)
* Get Allocated Resource Descriptors Command (indirect 0x020A)
* Share Resource command (indirect 0x020B)
*/
#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
#define ICE_AQC_RES_TYPE_RECIPE 0x05
#define ICE_AQC_RES_TYPE_SWID 0x07
#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21
#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22
#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23
......@@ -264,6 +271,7 @@ struct ice_aqc_set_port_params {
/* Allocate Resources command (indirect 0x0208)
* Free Resources command (indirect 0x0209)
* Share Resource command (indirect 0x020B)
*/
struct ice_aqc_alloc_free_res_cmd {
__le16 num_entries; /* Number of Resource entries */
......@@ -818,7 +826,11 @@ struct ice_aqc_txsched_move_grp_info_hdr {
__le32 src_parent_teid;
__le32 dest_parent_teid;
__le16 num_elems;
__le16 reserved;
u8 mode;
#define ICE_AQC_MOVE_ELEM_MODE_SAME_PF 0x0
#define ICE_AQC_MOVE_ELEM_MODE_GIVE_OWN 0x1
#define ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN 0x2
u8 reserved;
};
struct ice_aqc_move_elem {
......@@ -1923,6 +1935,42 @@ struct ice_aqc_dis_txq_item {
__le16 q_id[];
} __packed;
/* Move/Reconfigure Tx queue (indirect 0x0C32) */
struct ice_aqc_cfg_txqs {
u8 cmd_type;
#define ICE_AQC_Q_CFG_MOVE_NODE 0x1
#define ICE_AQC_Q_CFG_TC_CHNG 0x2
#define ICE_AQC_Q_CFG_MOVE_TC_CHNG 0x3
#define ICE_AQC_Q_CFG_SUBSEQ_CALL BIT(2)
#define ICE_AQC_Q_CFG_FLUSH BIT(3)
u8 num_qs;
u8 port_num_chng;
#define ICE_AQC_Q_CFG_SRC_PRT_M 0x7
#define ICE_AQC_Q_CFG_DST_PRT_S 3
#define ICE_AQC_Q_CFG_DST_PRT_M (0x7 << ICE_AQC_Q_CFG_DST_PRT_S)
u8 time_out;
#define ICE_AQC_Q_CFG_TIMEOUT_S 2
#define ICE_AQC_Q_CFG_TIMEOUT_M (0x1F << ICE_AQC_Q_CFG_TIMEOUT_S)
__le32 blocked_cgds;
__le32 addr_high;
__le32 addr_low;
};
/* Per Q struct for Move/Reconfigure Tx LAN Queues (indirect 0x0C32) */
struct ice_aqc_cfg_txq_perq {
__le16 q_handle;
u8 tc;
u8 rsvd;
__le32 q_teid;
};
/* The buffer for Move/Reconfigure Tx LAN Queues (indirect 0x0C32) */
struct ice_aqc_cfg_txqs_buf {
__le32 src_parent_teid;
__le32 dst_parent_teid;
struct ice_aqc_cfg_txq_perq queue_info[];
};
/* Add Tx RDMA Queue Set (indirect 0x0C33) */
struct ice_aqc_add_rdma_qset {
u8 num_qset_grps;
......@@ -2181,6 +2229,7 @@ struct ice_aq_desc {
struct ice_aqc_neigh_dev_req neigh_dev;
struct ice_aqc_add_txqs add_txqs;
struct ice_aqc_dis_txqs dis_txqs;
struct ice_aqc_cfg_txqs cfg_txqs;
struct ice_aqc_add_rdma_qset add_rdma_qset;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
......@@ -2263,6 +2312,7 @@ enum ice_adminq_opc {
/* Alloc/Free/Get Resources */
ice_aqc_opc_alloc_res = 0x0208,
ice_aqc_opc_free_res = 0x0209,
ice_aqc_opc_share_res = 0x020B,
ice_aqc_opc_set_vlan_mode_parameters = 0x020C,
ice_aqc_opc_get_vlan_mode_parameters = 0x020D,
......@@ -2356,6 +2406,7 @@ enum ice_adminq_opc {
/* Tx queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
ice_aqc_opc_cfg_txqs = 0x0C32,
ice_aqc_opc_add_rdma_qset = 0x0C33,
/* package commands */
......
......@@ -2241,6 +2241,14 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
"%s: reset_restrict_support = %d\n", prefix,
caps->reset_restrict_support);
break;
case ICE_AQC_CAPS_FW_LAG_SUPPORT:
caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
prefix, caps->roce_lag);
caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG);
ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
prefix, caps->sriov_lag);
break;
default:
/* Not one of the recognized common capabilities */
found = false;
......@@ -4221,6 +4229,53 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
return status;
}
/**
* ice_aq_cfg_lan_txq
* @hw: pointer to the hardware structure
* @buf: buffer for command
* @buf_size: size of buffer in bytes
* @num_qs: number of queues being configured
* @oldport: origination lport
* @newport: destination lport
* @cd: pointer to command details structure or NULL
*
* Move/Configure LAN Tx queue (0x0C32)
*
* There is a better AQ command to use for moving nodes, so only coding
* this one for configuring the node.
*/
int
ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
struct ice_sq_cd *cd)
{
struct ice_aqc_cfg_txqs *cmd;
struct ice_aq_desc desc;
int status;
cmd = &desc.params.cfg_txqs;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
if (!buf)
return -EINVAL;
cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG;
cmd->num_qs = num_qs;
cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M);
cmd->port_num_chng |= (newport << ICE_AQC_Q_CFG_DST_PRT_S) &
ICE_AQC_Q_CFG_DST_PRT_M;
cmd->time_out = (5 << ICE_AQC_Q_CFG_TIMEOUT_S) &
ICE_AQC_Q_CFG_TIMEOUT_M;
cmd->blocked_cgds = 0;
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (status)
ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n",
hw->adminq.sq_last_status);
return status;
}
/**
* ice_aq_add_rdma_qsets
* @hw: pointer to the hardware structure
......@@ -4700,6 +4755,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
break;
ice_free_sched_node(pi, node);
q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
q_ctx->q_teid = ICE_INVAL_TEID;
}
mutex_unlock(&pi->sched_lock);
kfree(qg_list);
......
......@@ -186,6 +186,10 @@ int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
int
ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
struct ice_sq_cd *cd);
int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
......
......@@ -70,6 +70,11 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
if (pf->lag && pf->lag->bonded) {
netdev_err(netdev, "DCB changes not allowed when in a bond\n");
return -EINVAL;
}
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
mutex_lock(&pf->tc_mutex);
......@@ -170,6 +175,11 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
if (mode == pf->dcbx_cap)
return ICE_DCB_NO_HW_CHG;
if (pf->lag && pf->lag->bonded) {
netdev_err(netdev, "DCB changes not allowed when in a bond\n");
return ICE_DCB_NO_HW_CHG;
}
qos_cfg = &pf->hw.port_info->qos_cfg;
/* DSCP configuration is not DCBx negotiated */
......@@ -261,6 +271,11 @@ static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
if (pf->lag && pf->lag->bonded) {
netdev_err(netdev, "DCB changes not allowed when in a bond\n");
return -EINVAL;
}
mutex_lock(&pf->tc_mutex);
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
......@@ -323,6 +338,11 @@ static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
if (prio >= ICE_MAX_USER_PRIORITY)
return;
if (pf->lag && pf->lag->bonded) {
netdev_err(netdev, "DCB changes not allowed when in a bond\n");
return;
}
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
......@@ -379,6 +399,11 @@ static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return ICE_DCB_NO_HW_CHG;
if (pf->lag && pf->lag->bonded) {
netdev_err(netdev, "DCB changes not allowed when in a bond\n");
return ICE_DCB_NO_HW_CHG;
}
/* Nothing to do */
if (!!state == test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return ICE_DCB_NO_HW_CHG;
......@@ -451,6 +476,11 @@ ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
if (tc >= ICE_MAX_TRAFFIC_CLASS)
return;
if (pf->lag && pf->lag->bonded) {
netdev_err(netdev, "DCB changes not allowed when in a bond\n");
return;
}
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
/* prio_type, bwg_id and bw_pct per UP are not supported */
......@@ -505,6 +535,11 @@ ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct)
if (pgid >= ICE_MAX_TRAFFIC_CLASS)
return;
if (pf->lag && pf->lag->bonded) {
netdev_err(netdev, "DCB changes not allowed when in a bond\n");
return;
}
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
new_cfg->etscfg.tcbwtable[pgid] = bw_pct;
......@@ -725,6 +760,11 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
return -EINVAL;
}
if (pf->lag && pf->lag->bonded) {
netdev_err(netdev, "DCB changes not allowed when in a bond\n");
return -EINVAL;
}
max_tc = pf->hw.func_caps.common_cap.maxtc;
if (app->priority >= max_tc) {
netdev_err(netdev, "TC %d out of range, max TC %d\n",
......@@ -836,6 +876,11 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
return -EINVAL;
}
if (pf->lag && pf->lag->bonded) {
netdev_err(netdev, "DCB changes not allowed when in a bond\n");
return -EINVAL;
}
mutex_lock(&pf->tc_mutex);
old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
......@@ -937,6 +982,11 @@ static u8 ice_dcbnl_cee_set_all(struct net_device *netdev)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return ICE_DCB_NO_HW_CHG;
if (pf->lag && pf->lag->bonded) {
netdev_err(netdev, "DCB changes not allowed when in a bond\n");
return ICE_DCB_NO_HW_CHG;
}
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
mutex_lock(&pf->tc_mutex);
......
This diff is collapsed.
......@@ -14,20 +14,52 @@ enum ice_lag_role {
ICE_LAG_UNSET
};
#define ICE_LAG_INVALID_PORT 0xFF
#define ICE_LAG_RESET_RETRIES 5
struct ice_pf;
struct ice_vf;
struct ice_lag_netdev_list {
struct list_head node;
struct net_device *netdev;
};
/* LAG info struct */
struct ice_lag {
struct ice_pf *pf; /* backlink to PF struct */
struct net_device *netdev; /* this PF's netdev */
struct net_device *peer_netdev;
struct net_device *upper_netdev; /* upper bonding netdev */
struct list_head *netdev_head;
struct notifier_block notif_block;
s32 bond_mode;
u16 bond_swid; /* swid for primary interface */
u8 active_port; /* lport value for the current active port */
u8 bonded:1; /* currently bonded */
u8 primary:1; /* this is primary */
u16 pf_recipe;
u16 pf_rule_id;
u16 cp_rule_idx;
u8 role;
};
/* LAG workqueue struct */
struct ice_lag_work {
struct work_struct lag_task;
struct ice_lag_netdev_list netdev_list;
struct ice_lag *lag;
unsigned long event;
struct net_device *event_netdev;
union {
struct netdev_notifier_changeupper_info changeupper_info;
struct netdev_notifier_bonding_info bonding_info;
struct netdev_notifier_info notifier_info;
} info;
};
void ice_lag_move_new_vf_nodes(struct ice_vf *vf);
int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
#endif /* _ICE_LAG_H_ */
......@@ -3970,7 +3970,7 @@ bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
* @pf: pointer to the struct ice_pf instance
* @f: feature enum to set
*/
static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
{
if (f < 0 || f >= ICE_F_MAX)
return;
......
......@@ -163,6 +163,7 @@ int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi);
u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi);
bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);
void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_init_feature_support(struct ice_pf *pf);
bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi);
......
......@@ -64,6 +64,7 @@ struct device *ice_hw_to_dev(struct ice_hw *hw)
}
static struct workqueue_struct *ice_wq;
struct workqueue_struct *ice_lag_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
......@@ -635,6 +636,11 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
reset_type = ICE_RESET_CORER;
}
ice_prepare_for_reset(pf, reset_type);
/* trigger the reset */
......@@ -718,8 +724,13 @@ static void ice_reset_subtask(struct ice_pf *pf)
}
/* No pending resets to finish processing. Check for new resets */
if (test_bit(ICE_PFR_REQ, pf->state))
if (test_bit(ICE_PFR_REQ, pf->state)) {
reset_type = ICE_RESET_PFR;
if (pf->lag && pf->lag->bonded) {
dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
reset_type = ICE_RESET_CORER;
}
}
if (test_bit(ICE_CORER_REQ, pf->state))
reset_type = ICE_RESET_CORER;
if (test_bit(ICE_GLOBR_REQ, pf->state))
......@@ -3795,6 +3806,7 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
static void ice_deinit_pf(struct ice_pf *pf)
{
ice_service_task_stop(pf);
mutex_destroy(&pf->lag_mutex);
mutex_destroy(&pf->adev_mutex);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex);
......@@ -3875,6 +3887,7 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
mutex_init(&pf->adev_mutex);
mutex_init(&pf->lag_mutex);
INIT_HLIST_HEAD(&pf->aq_wait_list);
spin_lock_init(&pf->aq_wait_lock);
......@@ -5571,7 +5584,7 @@ static struct pci_driver ice_driver = {
*/
static int __init ice_module_init(void)
{
int status;
int status = -ENOMEM;
pr_info("%s\n", ice_driver_string);
pr_info("%s\n", ice_copyright);
......@@ -5579,15 +5592,27 @@ static int __init ice_module_init(void)
ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
return status;
}
ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
if (!ice_lag_wq) {
pr_err("Failed to create LAG workqueue\n");
goto err_dest_wq;
}
status = pci_register_driver(&ice_driver);
if (status) {
pr_err("failed to register PCI driver, err %d\n", status);
destroy_workqueue(ice_wq);
goto err_dest_lag_wq;
}
return 0;
err_dest_lag_wq:
destroy_workqueue(ice_lag_wq);
err_dest_wq:
destroy_workqueue(ice_wq);
return status;
}
module_init(ice_module_init);
......@@ -5602,6 +5627,7 @@ static void __exit ice_module_exit(void)
{
pci_unregister_driver(&ice_driver);
destroy_workqueue(ice_wq);
destroy_workqueue(ice_lag_wq);
pr_info("module unloaded\n");
}
module_exit(ice_module_exit);
......@@ -7357,6 +7383,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
clear_bit(ICE_RESET_FAILED, pf->state);
ice_plug_aux_dev(pf);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
ice_lag_rebuild(pf);
return;
err_vsi_rebuild:
......
......@@ -447,7 +447,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* Move scheduling elements (0x0408)
*/
static int
int
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd)
......@@ -526,7 +526,7 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
*
* This function suspends or resumes HW nodes
*/
static int
int
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
bool suspend)
{
......@@ -569,18 +569,24 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
struct ice_q_ctx *q_ctx;
u16 idx;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
return -EINVAL;
/* allocate LAN queue contexts */
if (!vsi_ctx->lan_q_ctx[tc]) {
vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
new_numqs,
sizeof(*q_ctx),
GFP_KERNEL);
if (!vsi_ctx->lan_q_ctx[tc])
q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx)
return -ENOMEM;
for (idx = 0; idx < new_numqs; idx++) {
q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE;
q_ctx[idx].q_teid = ICE_INVAL_TEID;
}
vsi_ctx->lan_q_ctx[tc] = q_ctx;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
return 0;
}
......@@ -592,9 +598,16 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx)
return -ENOMEM;
memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
for (idx = prev_num; idx < new_numqs; idx++) {
q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE;
q_ctx[idx].q_teid = ICE_INVAL_TEID;
}
vsi_ctx->lan_q_ctx[tc] = q_ctx;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
}
......@@ -1044,7 +1057,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
*
* This function add nodes to a given layer.
*/
static int
int
ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
......@@ -1119,7 +1132,7 @@ static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
*
* This function returns the current VSI layer number
*/
static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
{
/* Num Layers VSI layer
* 9 6
......@@ -1142,7 +1155,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
*
* This function returns the current aggregator layer number
*/
static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
u8 ice_sched_get_agg_layer(struct ice_hw *hw)
{
/* Num Layers aggregator layer
* 9 4
......@@ -1577,7 +1590,7 @@ ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
* This function retrieves an aggregator node for a given aggregator ID from
* a given TC branch
*/
static struct ice_sched_node *
struct ice_sched_node *
ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
u32 agg_id)
{
......@@ -2139,7 +2152,7 @@ ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
* This function walks through the aggregator subtree to find a free parent
* node
*/
static struct ice_sched_node *
struct ice_sched_node *
ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
u16 *num_nodes)
{
......
......@@ -146,8 +146,29 @@ ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc,
enum ice_rl_type rl_type, u32 bw);
int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
int
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
bool suspend);
struct ice_sched_node *
ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
u32 agg_id);
u8 ice_sched_get_agg_layer(struct ice_hw *hw);
u8 ice_sched_get_vsi_layer(struct ice_hw *hw);
struct ice_sched_node *
ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
u16 *num_nodes);
int
ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
u16 num_nodes, u32 *first_node_teid,
u16 *num_nodes_added);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
int
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd);
int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
......@@ -20,12 +20,11 @@
* byte 0 = 0x2: to identify it as locally administered DA MAC
* byte 6 = 0x2: to identify it as locally administered SA MAC
* byte 12 = 0x81 & byte 13 = 0x00:
* In case of VLAN filter first two bytes defines ether type (0x8100)
* and remaining two bytes are placeholder for programming a given VLAN ID
* In case of Ether type filter it is treated as header without VLAN tag
* and byte 12 and 13 is used to program a given Ether type instead
* In case of VLAN filter first two bytes defines ether type (0x8100)
* and remaining two bytes are placeholder for programming a given VLAN ID
* In case of Ether type filter it is treated as header without VLAN tag
* and byte 12 and 13 is used to program a given Ether type instead
*/
#define DUMMY_ETH_HDR_LEN 16
static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
......@@ -1369,14 +1368,6 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(tcp, 0),
};
#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \
ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \
ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
#define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n))
#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
/* this is a recipe to profile association bitmap */
static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
ICE_MAX_NUM_PROFILES);
......@@ -1841,8 +1832,13 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_DFLT) {
sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
sw_buf->res_type =
cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
if (opc == ice_aqc_opc_alloc_res)
sw_buf->res_type =
cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE |
ICE_AQC_RES_TYPE_FLAG_SHARED);
else
sw_buf->res_type =
cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
} else {
status = -EINVAL;
goto ice_aq_alloc_free_vsi_list_exit;
......@@ -1910,7 +1906,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
*
* Add(0x0290)
*/
static int
int
ice_aq_add_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 num_recipes, struct ice_sq_cd *cd)
......@@ -1947,7 +1943,7 @@ ice_aq_add_recipe(struct ice_hw *hw,
* The caller must supply enough space in s_recipe_list to hold all possible
* recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
*/
static int
int
ice_aq_get_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
......@@ -2040,7 +2036,7 @@ ice_update_recipe_lkup_idx(struct ice_hw *hw,
* @cd: pointer to command details structure or NULL
* Recipe to profile association (0x0291)
*/
static int
int
ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd)
{
......@@ -2066,7 +2062,7 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* @cd: pointer to command details structure or NULL
* Associate profile ID with given recipe (0x0293)
*/
static int
int
ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd)
{
......@@ -2090,7 +2086,7 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* @hw: pointer to the hardware structure
* @rid: recipe ID returned as response to AQ call
*/
static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
u16 buf_len;
......@@ -2463,6 +2459,15 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
}
}
/**
* ice_fill_eth_hdr - helper to copy dummy_eth_hdr into supplied buffer
* @eth_hdr: pointer to buffer to populate
*/
void ice_fill_eth_hdr(u8 *eth_hdr)
{
memcpy(eth_hdr, dummy_eth_header, DUMMY_ETH_HDR_LEN);
}
/**
* ice_fill_sw_rule - Helper function to fill switch rule structure
* @hw: pointer to the hardware structure
......@@ -3122,7 +3127,7 @@ ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
* handle element. This can be extended further to search VSI list with more
* than 1 vsi_count. Returns pointer to VSI list entry if found.
*/
static struct ice_vsi_list_map_info *
struct ice_vsi_list_map_info *
ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
u16 *vsi_list_id)
{
......@@ -3133,7 +3138,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
list_head = &sw->recp_list[recp_id].filt_rules;
list_for_each_entry(list_itr, list_head, list_entry) {
if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
if (list_itr->vsi_list_info) {
map_info = list_itr->vsi_list_info;
if (test_bit(vsi_handle, map_info->vsi_map)) {
*vsi_list_id = map_info->vsi_list_id;
......@@ -4544,6 +4549,45 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
.offs = {__VA_ARGS__}, \
}
/**
* ice_share_res - set a resource as shared or dedicated
* @hw: hw struct of original owner of resource
* @type: resource type
* @shared: is the resource being set to shared
* @res_id: resource id (descriptor)
*/
int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
{
struct ice_aqc_alloc_free_res_elem *buf;
u16 buf_len;
int status;
buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf->num_elems = cpu_to_le16(1);
if (shared)
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
ICE_AQC_RES_TYPE_M) |
ICE_AQC_RES_TYPE_FLAG_SHARED);
else
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
ICE_AQC_RES_TYPE_M) &
~ICE_AQC_RES_TYPE_FLAG_SHARED);
buf->elem[0].e.sw_resp = cpu_to_le16(res_id);
status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
ice_aqc_opc_share_res, NULL);
if (status)
ice_debug(hw, ICE_DBG_SW, "Could not set resource type %u id %u to %s\n",
type, res_id, shared ? "SHARED" : "DEDICATED");
kfree(buf);
return status;
}
/* This is mapping table entry that maps every word within a given protocol
* structure to the real byte offset as per the specification of that
* protocol header.
......
......@@ -22,6 +22,16 @@
#define ICE_PROFID_IPV6_GTPU_TEID 46
#define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70
#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \
ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \
ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
#define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n))
#define DUMMY_ETH_HDR_LEN 16
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
u16 vsi_num;
......@@ -345,6 +355,7 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id);
int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id);
/* Switch/bridge related commands */
void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup);
......@@ -393,6 +404,7 @@ u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
void ice_fill_eth_hdr(u8 *eth_hdr);
int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
......@@ -401,4 +413,21 @@ int
ice_update_recipe_lkup_idx(struct ice_hw *hw,
struct ice_update_recipe_lkup_idx_params *params);
void ice_change_proto_id_to_dvm(void);
struct ice_vsi_list_map_info *
ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
u16 *vsi_list_id);
int ice_alloc_recipe(struct ice_hw *hw, u16 *rid);
int ice_aq_get_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd);
int ice_aq_add_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 num_recipes, struct ice_sq_cd *cd);
int
ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd);
int
ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd);
#endif /* _ICE_SWITCH_H_ */
......@@ -277,6 +277,8 @@ struct ice_hw_common_caps {
u8 dcb;
u8 ieee_1588;
u8 rdma;
u8 roce_lag;
u8 sriov_lag;
bool nvm_update_pending_nvm;
bool nvm_update_pending_orom;
......
......@@ -1724,6 +1724,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vf->vf_id, i);
}
ice_lag_move_new_vf_nodes(vf);
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment