Commit 9e4e1dd4 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Jake Keller says:

====================
100GbE Intel Wired LAN Driver Updates 2021-06-11

Extend the ice driver to support basic PTP clock functionality for E810
devices.

This includes some tangential work required to setup the sideband queue and
driver shared parameters as well.

This series only supports E810-based devices. This is because other devices
based on the E822 MAC use a different and more complex PHY.

The low level device functionality is kept within ice_ptp_hw.c and is
designed to be extensible for supporting E822 devices in a future series.

This series also only supports very basic functionality including the
ptp_clock device and timestamping. Support for configuring periodic outputs
and external input timestamps will be implemented in a future series.

There are a couple of potential "what? why?" bits in this series I want to
point out:

1) the PTP hardware functionality is shared between multiple functions. This
means that the same clock registers are shared across multiple PFs. In order
to avoid contention or clashing between PFs, firmware assigns "ownership" to
one PF, while other PFs are merely "associated" with the timer. Because we
share the hardware resource, only the clock owner will allocate and register
a PTP clock device. Other PFs determine the appropriate PTP clock index to
report by using a firmware interface to read a shared parameter that is set
by the owning PF.

2) the ice driver uses its own kthread instead of using do_aux_work. This is
because the periodic and asynchronous tasks are necessary for all PFs, but
only one PF will allocate the clock.

The series is broken up into functional pieces to allow easy review.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5aa3bd9b ea9b847c
......@@ -299,6 +299,7 @@ config ICE
select DIMLIB
select NET_DEVLINK
select PLDMFW
imply PTP_1588_CLOCK
help
This driver supports Intel(R) Ethernet Connection E800 Series of
devices. For more information on how to identify your adapter, go
......
......@@ -29,6 +29,7 @@ ice-y := ice_main.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o
ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
......@@ -59,6 +59,7 @@
#include "ice_idc_int.h"
#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
#include "ice_ptp.h"
#include "ice_fdir.h"
#include "ice_xsk.h"
#include "ice_arfs.h"
......@@ -74,8 +75,9 @@
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_AQ_LEN 64
#define ICE_AQ_LEN 192
#define ICE_MBXSQ_LEN 64
#define ICE_SBQ_LEN 64
#define ICE_MIN_LAN_TXRX_MSIX 1
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
......@@ -227,6 +229,7 @@ enum ice_pf_state {
ICE_STATE_NOMINAL_CHECK_BITS,
ICE_ADMINQ_EVENT_PENDING,
ICE_MAILBOXQ_EVENT_PENDING,
ICE_SIDEBANDQ_EVENT_PENDING,
ICE_MDD_EVENT_PENDING,
ICE_VFLR_EVENT_PENDING,
ICE_FLTR_OVERFLOW_PROMISC,
......@@ -387,6 +390,8 @@ enum ice_pf_flags {
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
ICE_FLAG_FD_ENA,
ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */
ICE_FLAG_PTP, /* PTP is enabled by software */
ICE_FLAG_AUX_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
......@@ -449,6 +454,7 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
struct ice_ptp ptp;
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
u16 rdma_base_vector;
......
......@@ -108,6 +108,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_TXQS 0x0042
#define ICE_AQC_CAPS_MSIX 0x0043
#define ICE_AQC_CAPS_FD 0x0045
#define ICE_AQC_CAPS_1588 0x0046
#define ICE_AQC_CAPS_MAX_MTU 0x0047
#define ICE_AQC_CAPS_NVM_VER 0x0048
#define ICE_AQC_CAPS_PENDING_NVM_VER 0x0049
......@@ -1611,6 +1612,15 @@ struct ice_aqc_get_set_rss_lut {
__le32 addr_low;
};
/* Sideband Control Interface Commands */
/* Neighbor Device Request (indirect 0x0C00); also used for the response. */
struct ice_aqc_neigh_dev_req {
__le16 sb_data_len;
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
/* Add Tx LAN Queues (indirect 0x0C30) */
struct ice_aqc_add_txqs {
u8 num_qgrps;
......@@ -1843,6 +1853,30 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[];
};
/* Driver Shared Parameters (direct, 0x0C90) */
struct ice_aqc_driver_shared_params {
u8 set_or_get_op;
#define ICE_AQC_DRIVER_PARAM_OP_MASK BIT(0)
#define ICE_AQC_DRIVER_PARAM_SET 0
#define ICE_AQC_DRIVER_PARAM_GET 1
u8 param_indx;
#define ICE_AQC_DRIVER_PARAM_MAX_IDX 15
u8 rsvd[2];
__le32 param_val;
__le32 addr_high;
__le32 addr_low;
};
enum ice_aqc_driver_params {
/* OS clock index for PTP timer Domain 0 */
ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0 = 0,
/* OS clock index for PTP timer Domain 1 */
ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1,
/* Add new parameters above */
ICE_AQC_DRIVER_PARAM_MAX = 16,
};
/* Lan Queue Overflow Event (direct, 0x1001) */
struct ice_aqc_event_lan_overflow {
__le32 prtdcb_ruptq;
......@@ -1911,6 +1945,7 @@ struct ice_aq_desc {
struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_neigh_dev_req neigh_dev;
struct ice_aqc_add_txqs add_txqs;
struct ice_aqc_dis_txqs dis_txqs;
struct ice_aqc_add_rdma_qset add_rdma_qset;
......@@ -1919,6 +1954,7 @@ struct ice_aq_desc {
struct ice_aqc_fw_logging fw_logging;
struct ice_aqc_get_clear_fw_log get_clear_fw_log;
struct ice_aqc_download_pkg download_pkg;
struct ice_aqc_driver_shared_params drv_shared_params;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_mac_cfg set_mac_cfg;
......@@ -2059,6 +2095,9 @@ enum ice_adminq_opc {
ice_aqc_opc_get_rss_key = 0x0B04,
ice_aqc_opc_get_rss_lut = 0x0B05,
/* Sideband Control Interface commands */
ice_aqc_opc_neighbour_device_request = 0x0C00,
/* Tx queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
......@@ -2069,6 +2108,8 @@ enum ice_adminq_opc {
ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
ice_aqc_opc_driver_shared_params = 0x0C90,
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
......
......@@ -287,6 +287,15 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
/* make sure the context is associated with the right VSI */
tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
/* Restrict Tx timestamps to the PF VSI */
switch (vsi->type) {
case ICE_VSI_PF:
tlan_ctx->tsyn_ena = 1;
break;
default:
break;
}
tlan_ctx->tso_ena = ICE_TX_LEGACY;
tlan_ctx->tso_qnum = pf_q;
......@@ -393,9 +402,10 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
* of same priority
*/
if (vsi->type != ICE_VSI_VF)
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3);
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
else
ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3);
ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
false);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
......
......@@ -58,6 +58,17 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
return 0;
}
/**
* ice_is_e810
* @hw: pointer to the hardware structure
*
* returns true if the device is E810 based, false if not.
*/
bool ice_is_e810(struct ice_hw *hw)
{
return hw->mac_type == ICE_MAC_E810;
}
/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
......@@ -1293,6 +1304,64 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
{ 0 }
};
/* Sideband Queue command wrappers */
/**
* ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
* @hw: pointer to the HW struct
* @desc: descriptor describing the command
* @buf: buffer to use for indirect commands (NULL for direct commands)
* @buf_size: size of buffer for indirect commands (0 for direct commands)
* @cd: pointer to command details structure
*/
static int
ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd)
{
return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw),
(struct ice_aq_desc *)desc,
buf, buf_size, cd));
}
/**
* ice_sbq_rw_reg - Fill Sideband Queue command
* @hw: pointer to the HW struct
* @in: message info to be filled in descriptor
*/
int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
{
struct ice_sbq_cmd_desc desc = {0};
struct ice_sbq_msg_req msg = {0};
u16 msg_len;
int status;
msg_len = sizeof(msg);
msg.dest_dev = in->dest_dev;
msg.opcode = in->opcode;
msg.flags = ICE_SBQ_MSG_FLAGS;
msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
if (in->opcode)
msg.data = cpu_to_le32(in->data);
else
/* data read comes back in completion, so shorten the struct by
* sizeof(msg.data)
*/
msg_len -= sizeof(msg.data);
desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
desc.param0.cmd_len = cpu_to_le16(msg_len);
status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
if (!status && !in->opcode)
in->data = le32_to_cpu
(((struct ice_sbq_msg_cmpl *)&msg)->data);
return status;
}
/* FW Admin Queue command wrappers */
/* Software lock/mutex that is meant to be held while the Global Config Lock
......@@ -2034,6 +2103,48 @@ ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
func_p->guar_num_vsi);
}
/**
* ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
* @hw: pointer to the HW struct
* @func_p: pointer to function capabilities structure
* @cap: pointer to the capability element to parse
*
* Extract function capabilities for ICE_AQC_CAPS_1588.
*/
static void
ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
struct ice_aqc_list_caps_elem *cap)
{
struct ice_ts_func_info *info = &func_p->ts_func_info;
u32 number = le32_to_cpu(cap->number);
info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
func_p->common_cap.ieee_1588 = info->ena;
info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
func_p->common_cap.ieee_1588);
ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
info->src_tmr_owned);
ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
info->tmr_ena);
ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
info->tmr_index_owned);
ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
info->tmr_index_assoc);
ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
info->clk_freq);
ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
info->clk_src);
}
/**
* ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
* @hw: pointer to the HW struct
......@@ -2100,6 +2211,9 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
case ICE_AQC_CAPS_VSI:
ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
break;
case ICE_AQC_CAPS_1588:
ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
break;
case ICE_AQC_CAPS_FD:
ice_parse_fdir_func_caps(hw, func_p);
break;
......@@ -2172,6 +2286,57 @@ ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
dev_p->num_vsi_allocd_to_host);
}
/**
* ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
* @cap: capability element to parse
*
* Parse ICE_AQC_CAPS_1588 for device capabilities.
*/
static void
ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
struct ice_aqc_list_caps_elem *cap)
{
struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
u32 logical_id = le32_to_cpu(cap->logical_id);
u32 phys_id = le32_to_cpu(cap->phys_id);
u32 number = le32_to_cpu(cap->number);
info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
dev_p->common_cap.ieee_1588 = info->ena;
info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
dev_p->common_cap.ieee_1588);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
info->tmr0_owner);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
info->tmr0_owned);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
info->tmr0_ena);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
info->tmr1_owner);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
info->tmr1_owned);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
info->tmr1_ena);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
info->tmr_own_map);
}
/**
* ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
* @hw: pointer to the HW struct
......@@ -2233,6 +2398,9 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
case ICE_AQC_CAPS_VSI:
ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
break;
case ICE_AQC_CAPS_1588:
ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
break;
case ICE_AQC_CAPS_FD:
ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
break;
......@@ -4523,6 +4691,81 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
return status;
}
/**
* ice_aq_set_driver_param - Set driver parameter to share via firmware
* @hw: pointer to the HW struct
* @idx: parameter index to set
* @value: the value to set the parameter to
* @cd: pointer to command details structure or NULL
*
* Set the value of one of the software defined parameters. All PFs connected
* to this device can read the value using ice_aq_get_driver_param.
*
* Note that firmware provides no synchronization or locking, and will not
* save the parameter value during a device reset. It is expected that
* a single PF will write the parameter value, while all other PFs will only
* read it.
*/
int
ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
u32 value, struct ice_sq_cd *cd)
{
struct ice_aqc_driver_shared_params *cmd;
struct ice_aq_desc desc;
if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
return -EIO;
cmd = &desc.params.drv_shared_params;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
cmd->param_indx = idx;
cmd->param_val = cpu_to_le32(value);
return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
}
/**
* ice_aq_get_driver_param - Get driver parameter shared via firmware
* @hw: pointer to the HW struct
* @idx: parameter index to set
* @value: storage to return the shared parameter
* @cd: pointer to command details structure or NULL
*
* Get the value of one of the software defined parameters.
*
* Note that firmware provides no synchronization or locking. It is expected
* that only a single PF will write a given parameter.
*/
int
ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
u32 *value, struct ice_sq_cd *cd)
{
struct ice_aqc_driver_shared_params *cmd;
struct ice_aq_desc desc;
enum ice_status status;
if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
return -EIO;
cmd = &desc.params.drv_shared_params;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
cmd->param_indx = idx;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
return ice_status_to_errno(status);
*value = le32_to_cpu(cmd->param_val);
return 0;
}
/**
* ice_fw_supports_link_override
* @hw: pointer to the hardware structure
......
......@@ -40,6 +40,8 @@ enum ice_status
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
bool ice_is_sbq_supported(struct ice_hw *hw);
struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw);
enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
......@@ -97,6 +99,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
enum ice_status
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
bool ice_is_e810(struct ice_hw *hw);
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
enum ice_status
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
......@@ -173,6 +176,7 @@ void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
......@@ -182,6 +186,12 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
int
ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
u32 value, struct ice_sq_cd *cd);
int
ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
u32 *value, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
......
......@@ -51,6 +51,19 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
ICE_CQ_INIT_REGS(cq, PF_MBX);
}
/**
* ice_sb_init_regs - Initialize Sideband registers
* @hw: pointer to the hardware structure
*
* This assumes the alloc_sq and alloc_rq functions have already been called
*/
static void ice_sb_init_regs(struct ice_hw *hw)
{
struct ice_ctl_q_info *cq = &hw->sbq;
ICE_CQ_INIT_REGS(cq, PF_SB);
}
/**
* ice_check_sq_alive
* @hw: pointer to the HW struct
......@@ -609,6 +622,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
ice_adminq_init_regs(hw);
cq = &hw->adminq;
break;
case ICE_CTL_Q_SB:
ice_sb_init_regs(hw);
cq = &hw->sbq;
break;
case ICE_CTL_Q_MAILBOX:
ice_mailbox_init_regs(hw);
cq = &hw->mailboxq;
......@@ -645,6 +662,32 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
return ret_code;
}
/**
* ice_is_sbq_supported - is the sideband queue supported
* @hw: pointer to the hardware structure
*
* Returns true if the sideband control queue interface is
* supported for the device, false otherwise
*/
bool ice_is_sbq_supported(struct ice_hw *hw)
{
/* The device sideband queue is only supported on devices with the
* generic MAC type.
*/
return hw->mac_type == ICE_MAC_GENERIC;
}
/**
* ice_get_sbq - returns the right control queue to use for sideband
* @hw: pointer to the hardware structure
*/
struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
{
if (ice_is_sbq_supported(hw))
return &hw->sbq;
return &hw->adminq;
}
/**
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
......@@ -662,6 +705,9 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
if (ice_check_sq_alive(hw, cq))
ice_aq_q_shutdown(hw, true);
break;
case ICE_CTL_Q_SB:
cq = &hw->sbq;
break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
break;
......@@ -685,6 +731,9 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
{
/* Shutdown FW admin queue */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
/* Shutdown PHY Sideband */
if (ice_is_sbq_supported(hw))
ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
/* Shutdown PF-VF Mailbox */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
......@@ -724,6 +773,15 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
if (status)
return status;
/* sideband control queue (SBQ) interface is not supported on some
* devices. Initialize if supported, else fallback to the admin queue
* interface
*/
if (ice_is_sbq_supported(hw)) {
status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
if (status)
return status;
}
/* Init Mailbox queue */
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
......@@ -759,6 +817,8 @@ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
{
ice_init_ctrlq_locks(&hw->adminq);
if (ice_is_sbq_supported(hw))
ice_init_ctrlq_locks(&hw->sbq);
ice_init_ctrlq_locks(&hw->mailboxq);
return ice_init_all_ctrlq(hw);
......@@ -791,6 +851,8 @@ void ice_destroy_all_ctrlq(struct ice_hw *hw)
ice_shutdown_all_ctrlq(hw);
ice_destroy_ctrlq_locks(&hw->adminq);
if (ice_is_sbq_supported(hw))
ice_destroy_ctrlq_locks(&hw->sbq);
ice_destroy_ctrlq_locks(&hw->mailboxq);
}
......
......@@ -9,6 +9,7 @@
/* Maximum buffer lengths for all control queue types */
#define ICE_AQ_MAX_BUF_LEN 4096
#define ICE_MBXQ_MAX_BUF_LEN 4096
#define ICE_SBQ_MAX_BUF_LEN 512
#define ICE_CTL_Q_DESC(R, i) \
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
......@@ -29,6 +30,7 @@ enum ice_ctl_q {
ICE_CTL_Q_UNKNOWN = 0,
ICE_CTL_Q_ADMIN,
ICE_CTL_Q_MAILBOX,
ICE_CTL_Q_SB,
};
/* Control Queue timeout settings - max delay 1s */
......
......@@ -3195,6 +3195,31 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return 0;
}
static int
ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
{
struct ice_pf *pf = ice_netdev_to_pf(dev);
/* only report timestamping if PTP is enabled */
if (!test_bit(ICE_FLAG_PTP, pf->flags))
return ethtool_op_get_ts_info(dev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = ice_get_ptp_clock_index(pf);
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
return 0;
}
/**
* ice_get_max_txq - return the maximum number of Tx queues for in a PF
* @pf: PF structure
......@@ -3986,7 +4011,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_rxfh = ice_set_rxfh,
.get_channels = ice_get_channels,
.set_channels = ice_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
.get_ts_info = ice_get_ts_info,
.get_per_queue_coalesce = ice_get_per_q_coalesce,
.set_per_queue_coalesce = ice_set_per_q_coalesce,
.get_fecparam = ice_get_fecparam,
......
......@@ -52,6 +52,54 @@
#define PF_MBX_ATQLEN_ATQCRIT_M BIT(30)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300
#define PF_SB_ARQBAH 0x0022FF00
#define PF_SB_ARQBAH_ARQBAH_S 0
#define PF_SB_ARQBAH_ARQBAH_M ICE_M(0xFFFFFFFF, 0)
#define PF_SB_ARQBAL 0x0022FE80
#define PF_SB_ARQBAL_ARQBAL_LSB_S 0
#define PF_SB_ARQBAL_ARQBAL_LSB_M ICE_M(0x3F, 0)
#define PF_SB_ARQBAL_ARQBAL_S 6
#define PF_SB_ARQBAL_ARQBAL_M ICE_M(0x3FFFFFF, 6)
#define PF_SB_ARQH 0x00230000
#define PF_SB_ARQH_ARQH_S 0
#define PF_SB_ARQH_ARQH_M ICE_M(0x3FF, 0)
#define PF_SB_ARQLEN 0x0022FF80
#define PF_SB_ARQLEN_ARQLEN_S 0
#define PF_SB_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
#define PF_SB_ARQLEN_ARQVFE_S 28
#define PF_SB_ARQLEN_ARQVFE_M BIT(28)
#define PF_SB_ARQLEN_ARQOVFL_S 29
#define PF_SB_ARQLEN_ARQOVFL_M BIT(29)
#define PF_SB_ARQLEN_ARQCRIT_S 30
#define PF_SB_ARQLEN_ARQCRIT_M BIT(30)
#define PF_SB_ARQLEN_ARQENABLE_S 31
#define PF_SB_ARQLEN_ARQENABLE_M BIT(31)
#define PF_SB_ARQT 0x00230080
#define PF_SB_ARQT_ARQT_S 0
#define PF_SB_ARQT_ARQT_M ICE_M(0x3FF, 0)
#define PF_SB_ATQBAH 0x0022FC80
#define PF_SB_ATQBAH_ATQBAH_S 0
#define PF_SB_ATQBAH_ATQBAH_M ICE_M(0xFFFFFFFF, 0)
#define PF_SB_ATQBAL 0x0022FC00
#define PF_SB_ATQBAL_ATQBAL_S 6
#define PF_SB_ATQBAL_ATQBAL_M ICE_M(0x3FFFFFF, 6)
#define PF_SB_ATQH 0x0022FD80
#define PF_SB_ATQH_ATQH_S 0
#define PF_SB_ATQH_ATQH_M ICE_M(0x3FF, 0)
#define PF_SB_ATQLEN 0x0022FD00
#define PF_SB_ATQLEN_ATQLEN_S 0
#define PF_SB_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
#define PF_SB_ATQLEN_ATQVFE_S 28
#define PF_SB_ATQLEN_ATQVFE_M BIT(28)
#define PF_SB_ATQLEN_ATQOVFL_S 29
#define PF_SB_ATQLEN_ATQOVFL_M BIT(29)
#define PF_SB_ATQLEN_ATQCRIT_S 30
#define PF_SB_ATQLEN_ATQCRIT_M BIT(30)
#define PF_SB_ATQLEN_ATQENABLE_S 31
#define PF_SB_ATQLEN_ATQENABLE_M BIT(31)
#define PF_SB_ATQT 0x0022FE00
#define PF_SB_ATQT_ATQT_S 0
#define PF_SB_ATQT_ATQT_M ICE_M(0x3FF, 0)
#define PRTDCB_GENC 0x00083000
#define PRTDCB_GENC_PFCLDA_S 16
#define PRTDCB_GENC_PFCLDA_M ICE_M(0xFFFF, 16)
......@@ -154,6 +202,7 @@
#define PFINT_MBX_CTL_ITR_INDX_M ICE_M(0x3, 11)
#define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30)
#define PFINT_OICR 0x0016CA00
#define PFINT_OICR_TSYN_TX_M BIT(11)
#define PFINT_OICR_ECC_ERR_M BIT(16)
#define PFINT_OICR_MAL_DETECT_M BIT(19)
#define PFINT_OICR_GRST_M BIT(20)
......@@ -169,6 +218,9 @@
#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, 11)
#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(30)
#define PFINT_OICR_ENA 0x0016C900
#define PFINT_SB_CTL 0x0016B600
#define PFINT_SB_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_SB_CTL_CAUSE_ENA_M BIT(30)
#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
#define QINT_RQCTL_MSIX_INDX_S 0
#define QINT_RQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
......@@ -382,6 +434,23 @@
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PRTRPB_RDPC 0x000AC260
#define GLTSYN_CMD 0x00088810
#define GLTSYN_CMD_SYNC 0x00088814
#define GLTSYN_ENA(_i) (0x00088808 + ((_i) * 4))
#define GLTSYN_ENA_TSYN_ENA_M BIT(0)
#define GLTSYN_INCVAL_H(_i) (0x00088920 + ((_i) * 4))
#define GLTSYN_INCVAL_L(_i) (0x00088918 + ((_i) * 4))
#define GLTSYN_SHADJ_H(_i) (0x00088910 + ((_i) * 4))
#define GLTSYN_SHADJ_L(_i) (0x00088908 + ((_i) * 4))
#define GLTSYN_SHTIME_0(_i) (0x000888E0 + ((_i) * 4))
#define GLTSYN_SHTIME_H(_i) (0x000888F0 + ((_i) * 4))
#define GLTSYN_SHTIME_L(_i) (0x000888E8 + ((_i) * 4))
#define GLTSYN_STAT(_i) (0x000888C0 + ((_i) * 4))
#define GLTSYN_SYNC_DLAY 0x00088818
#define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4))
#define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4))
#define PFTSYN_SEM 0x00088880
#define PFTSYN_SEM_BUSY_M BIT(0)
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
#define VSIQF_FD_CNT_FD_GCNT_S 0
#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
......
......@@ -1298,6 +1298,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->reg_idx = vsi->txq_map[i];
ring->ring_active = false;
ring->vsi = vsi;
ring->tx_tstamps = &pf->ptp.port.tx;
ring->dev = dev;
ring->count = vsi->num_tx_desc;
WRITE_ONCE(vsi->tx_rings[i], ring);
......@@ -1675,9 +1676,11 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
* @pf_q: index of the Rx queue in the PF's queue space
* @rxdid: flexible descriptor RXDID
* @prio: priority for the RXDID for this queue
* @ena_ts: true to enable timestamp and false to disable timestamp
*/
void
ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio)
ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
bool ena_ts)
{
int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
......@@ -1692,6 +1695,10 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio)
regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
if (ena_ts)
/* Enable TimeSync on this queue */
regval |= QRXFLXP_CNTXT_TS_M;
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
}
......@@ -3392,13 +3399,22 @@ int ice_status_to_errno(enum ice_status err)
case ICE_ERR_DOES_NOT_EXIST:
return -ENOENT;
case ICE_ERR_OUT_OF_RANGE:
return -ENOTTY;
case ICE_ERR_AQ_ERROR:
case ICE_ERR_AQ_TIMEOUT:
case ICE_ERR_AQ_EMPTY:
case ICE_ERR_AQ_FW_CRITICAL:
return -EIO;
case ICE_ERR_PARAM:
case ICE_ERR_INVAL_SIZE:
return -EINVAL;
case ICE_ERR_NO_MEMORY:
return -ENOMEM;
case ICE_ERR_MAX_LIMIT:
return -EAGAIN;
case ICE_ERR_RESET_ONGOING:
return -EBUSY;
case ICE_ERR_AQ_FULL:
return -ENOSPC;
default:
return -EINVAL;
}
......
......@@ -80,7 +80,8 @@ bool ice_is_reset_in_progress(unsigned long *state);
int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
void
ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio);
ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
bool ena_ts);
void ice_vsi_dis_irq(struct ice_vsi *vsi);
......
......@@ -471,6 +471,9 @@ ice_prepare_for_reset(struct ice_pf *pf)
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf, false);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_release(pf);
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
......@@ -1231,6 +1234,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
cq = &hw->adminq;
qtype = "Admin";
break;
case ICE_CTL_Q_SB:
cq = &hw->sbq;
qtype = "Sideband";
break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
qtype = "Mailbox";
......@@ -1404,6 +1411,34 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
ice_flush(hw);
}
/**
* ice_clean_sbq_subtask - clean the Sideband Queue rings
* @pf: board private structure
*/
static void ice_clean_sbq_subtask(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
/* Nothing to do here if sideband queue is not supported */
if (!ice_is_sbq_supported(hw)) {
clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
return;
}
if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
return;
if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
return;
clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
if (ice_ctrlq_pending(hw, &hw->sbq))
__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
ice_flush(hw);
}
/**
* ice_service_task_schedule - schedule the service task to wake up
* @pf: board private structure
......@@ -2106,6 +2141,7 @@ static void ice_service_task(struct work_struct *work)
ice_process_vflr_event(pf);
ice_clean_mailboxq_subtask(pf);
ice_clean_sbq_subtask(pf);
ice_sync_arfs_fltrs(pf);
ice_flush_fdir_ctx(pf);
......@@ -2121,6 +2157,7 @@ static void ice_service_task(struct work_struct *work)
test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
mod_timer(&pf->serv_tmr, jiffies);
}
......@@ -2139,6 +2176,10 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->sbq.num_rq_entries = ICE_SBQ_LEN;
hw->sbq.num_sq_entries = ICE_SBQ_LEN;
hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
}
/**
......@@ -2679,6 +2720,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
dev = ice_pf_to_dev(pf);
set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA);
......@@ -2750,6 +2792,11 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
}
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
ice_ptp_process_ts(pf);
}
#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
if (oicr & ICE_AUX_CRIT_ERR) {
struct iidc_event *event;
......@@ -2800,6 +2847,9 @@ static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
wr32(hw, PFINT_MBX_CTL,
rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_SB_CTL,
rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
/* disable Control queue Interrupt causes */
wr32(hw, PFINT_OICR_CTL,
rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
......@@ -2854,6 +2904,11 @@ static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_MBX_CTL, val);
/* This enables Sideband queue Interrupt causes */
val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
PFINT_SB_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_SB_CTL, val);
ice_flush(hw);
}
......@@ -3317,6 +3372,9 @@ static void ice_deinit_pf(struct ice_pf *pf)
bitmap_free(pf->avail_rxqs);
pf->avail_rxqs = NULL;
}
if (pf->ptp.clock)
ptp_clock_unregister(pf->ptp.clock);
}
/**
......@@ -3363,6 +3421,10 @@ static void ice_set_pf_caps(struct ice_pf *pf)
func_caps->fd_fltr_best_effort);
}
clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
if (func_caps->common_cap.ieee_1588)
set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
pf->max_pf_txqs = func_caps->common_cap.num_txq;
pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
}
......@@ -4345,6 +4407,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
}
/* initialize DDP driven features */
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_init(pf);
/* Note: Flow director init failure is non-fatal to load */
if (ice_init_fdir(pf))
......@@ -4512,6 +4576,8 @@ static void ice_remove(struct pci_dev *pdev)
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
ice_deinit_lag(pf);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_release(pf);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf);
......@@ -6303,6 +6369,13 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
ice_dcb_rebuild(pf);
/* If the PF previously had enabled PTP, PTP init needs to happen before
* the VSI rebuild. If not, this causes the PTP link status events to
* fail.
*/
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_init(pf);
/* rebuild PF VSI */
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
if (err) {
......@@ -6451,6 +6524,27 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
/**
* ice_do_ioctl - Access the hwtstamp interface
* @netdev: network interface device structure
* @ifr: interface request data
* @cmd: ioctl command
*/
static int ice_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_pf *pf = np->vsi->back;
switch (cmd) {
case SIOCGHWTSTAMP:
return ice_ptp_get_ts_config(pf, ifr);
case SIOCSHWTSTAMP:
return ice_ptp_set_ts_config(pf, ifr);
default:
return -EOPNOTSUPP;
}
}
/**
* ice_aq_str - convert AQ err code to a string
* @aq_err: the AQ error code to convert
......@@ -7101,6 +7195,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
.ndo_set_tx_maxrate = ice_set_tx_maxrate,
.ndo_do_ioctl = ice_do_ioctl,
.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
.ndo_set_vf_mac = ice_set_vf_mac,
.ndo_get_vf_config = ice_get_vf_cfg,
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2021, Intel Corporation. */
#ifndef _ICE_PTP_H_
#define _ICE_PTP_H_
#include <linux/ptp_clock_kernel.h>
#include <linux/kthread.h>
#include "ice_ptp_hw.h"
/* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp
* is stored in a buffer of registers. Depending on the specific hardware,
* this buffer might be shared across multiple PHY ports.
*
* On transmit of a packet to be timestamped, software is responsible for
* selecting an open index. Hardware makes no attempt to lock or prevent
* re-use of an index for multiple packets.
*
* To handle this, timestamp indexes must be tracked by software to ensure
* that an index is not re-used for multiple transmitted packets. The
* structures and functions declared in this file track the available Tx
* register indexes, as well as provide storage for the SKB pointers.
*
* To allow multiple ports to access the shared register block independently,
* the blocks are split up so that indexes are assigned to each port based on
* hardware logical port number.
*/
/**
* struct ice_tx_tstamp - Tracking for a single Tx timestamp
* @skb: pointer to the SKB for this timestamp request
* @start: jiffies when the timestamp was first requested
*
* This structure tracks a single timestamp request. The SKB pointer is
* provided when initiating a request. The start time is used to ensure that
* we discard old requests that were not fulfilled within a 2 second time
* window.
*/
struct ice_tx_tstamp {
struct sk_buff *skb;
unsigned long start;
};
/**
* struct ice_ptp_tx - Tracking structure for all Tx timestamp requests on a port
* @work: work function to handle processing of Tx timestamps
* @lock: lock to prevent concurrent write to in_use bitmap
* @tstamps: array of len to store outstanding requests
* @in_use: bitmap of len to indicate which slots are in use
* @quad: which quad the timestamps are captured in
* @quad_offset: offset into timestamp block of the quad to get the real index
* @len: length of the tstamps and in_use fields.
* @init: if true, the tracker is initialized;
*/
struct ice_ptp_tx {
struct kthread_work work;
spinlock_t lock; /* lock protecting in_use bitmap */
struct ice_tx_tstamp *tstamps;
unsigned long *in_use;
u8 quad;
u8 quad_offset;
u8 len;
u8 init;
};
/* Quad and port information for initializing timestamp blocks */
#define INDEX_PER_QUAD 64
#define INDEX_PER_PORT (INDEX_PER_QUAD / ICE_PORTS_PER_QUAD)
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
*
* This structure contains PTP data related to the external ports. Currently
* it is used for tracking the Tx timestamps of a port. In the future this
* structure will also hold information for the E822 port initialization
* logic.
*
* @tx: Tx timestamp tracking for this port
*/
struct ice_ptp_port {
struct ice_ptp_tx tx;
};
/**
* struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
* @port: data for the PHY port initialization procedure
* @work: delayed work function for periodic tasks
* @cached_phc_time: a cached copy of the PHC time for timestamp extension
* @kworker: kwork thread for handling periodic work
* @info: structure defining PTP hardware capabilities
* @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration
*/
struct ice_ptp {
struct ice_ptp_port port;
struct kthread_delayed_work work;
u64 cached_phc_time;
struct kthread_worker *kworker;
struct ptp_clock_info info;
struct ptp_clock *clock;
struct hwtstamp_config tstamp_config;
};
#define __ptp_port_to_ptp(p) \
container_of((p), struct ice_ptp, port)
#define ptp_port_to_pf(p) \
container_of(__ptp_port_to_ptp((p)), struct ice_pf, ptp)
#define __ptp_info_to_ptp(i) \
container_of((i), struct ice_ptp, info)
#define ptp_info_to_pf(i) \
container_of(__ptp_info_to_ptp((i)), struct ice_pf, ptp)
#define PTP_SHARED_CLK_IDX_VALID BIT(31)
#define ICE_PTP_TS_VALID BIT(0)
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
struct ice_pf;
int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
int ice_get_ptp_clock_index(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_process_ts(struct ice_pf *pf);
void
ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
return -EOPNOTSUPP;
}
static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
return -EOPNOTSUPP;
}
static inline int ice_get_ptp_clock_index(struct ice_pf *pf)
{
return -1;
}
static inline
ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
{
return -1;
}
static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
static inline void
ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
#endif /* _ICE_PTP_H_ */
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2021, Intel Corporation. */
#ifndef _ICE_PTP_HW_H_
#define _ICE_PTP_HW_H_
enum ice_ptp_tmr_cmd {
INIT_TIME,
INIT_INCVAL,
ADJ_TIME,
ADJ_TIME_AT_TIME,
READ_TIME
};
/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
* the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
*/
#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
bool ice_ptp_lock(struct ice_hw *hw);
void ice_ptp_unlock(struct ice_hw *hw);
int ice_ptp_init_time(struct ice_hw *hw, u64 time);
int ice_ptp_write_incval(struct ice_hw *hw, u64 incval);
int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj);
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp);
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
/* E810 family functions */
int ice_ptp_init_phy_e810(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
/* PHY timer commands */
#define SEL_CPK_SRC 8
/* Time Sync command Definitions */
#define GLTSYN_CMD_INIT_TIME BIT(0)
#define GLTSYN_CMD_INIT_INCVAL BIT(1)
#define GLTSYN_CMD_ADJ_TIME BIT(2)
#define GLTSYN_CMD_ADJ_INIT_TIME (BIT(2) | BIT(3))
#define GLTSYN_CMD_READ_TIME BIT(7)
#define TS_CMD_MASK_E810 0xFF
#define SYNC_EXEC_CMD 0x3
/* E810 timesync enable register */
#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
/* E810 shadow init time registers */
#define ETH_GLTSYN_SHTIME_0(i) (0x03000368 + ((i) * 32))
#define ETH_GLTSYN_SHTIME_L(i) (0x0300036C + ((i) * 32))
/* E810 shadow time adjust registers */
#define ETH_GLTSYN_SHADJ_L(_i) (0x03000378 + ((_i) * 32))
#define ETH_GLTSYN_SHADJ_H(_i) (0x0300037C + ((_i) * 32))
/* E810 timer command register */
#define ETH_GLTSYN_CMD 0x03000344
/* Source timer incval macros */
#define INCVAL_HIGH_M 0xFF
/* Timestamp block macros */
#define TS_LOW_M 0xFFFFFFFF
#define TS_HIGH_S 32
#define BYTES_PER_IDX_ADDR_L_U 8
/* External PHY timestamp address */
#define TS_EXT(a, port, idx) ((a) + (0x1000 * (port)) + \
((idx) * BYTES_PER_IDX_ADDR_L_U))
#define LOW_TX_MEMORY_BANK_START 0x03090000
#define HIGH_TX_MEMORY_BANK_START 0x03090004
#endif /* _ICE_PTP_HW_H_ */
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2021, Intel Corporation. */
#ifndef _ICE_SBQ_CMD_H_
#define _ICE_SBQ_CMD_H_
/* This header file defines the Sideband Queue commands, error codes and
* descriptor format. It is shared between Firmware and Software.
*/
/* Sideband Queue command structure and opcodes */
enum ice_sbq_opc {
/* Sideband Queue commands */
ice_sbq_opc_neigh_dev_req = 0x0C00,
ice_sbq_opc_neigh_dev_ev = 0x0C01
};
/* Sideband Queue descriptor. Indirect command
* and non posted
*/
struct ice_sbq_cmd_desc {
__le16 flags;
__le16 opcode;
__le16 datalen;
__le16 cmd_retval;
/* Opaque message data */
__le32 cookie_high;
__le32 cookie_low;
union {
__le16 cmd_len;
__le16 cmpl_len;
} param0;
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
struct ice_sbq_evt_desc {
__le16 flags;
__le16 opcode;
__le16 datalen;
__le16 cmd_retval;
u8 data[24];
};
enum ice_sbq_msg_dev {
rmn_0 = 0x02,
rmn_1 = 0x03,
rmn_2 = 0x04,
cgu = 0x06
};
enum ice_sbq_msg_opcode {
ice_sbq_msg_rd = 0x00,
ice_sbq_msg_wr = 0x01
};
#define ICE_SBQ_MSG_FLAGS 0x40
#define ICE_SBQ_MSG_SBE_FBE 0x0F
struct ice_sbq_msg_req {
u8 dest_dev;
u8 src_dev;
u8 opcode;
u8 flags;
u8 sbe_fbe;
u8 func_id;
__le16 msg_addr_low;
__le32 msg_addr_high;
__le32 data;
};
struct ice_sbq_msg_cmpl {
u8 dest_dev;
u8 src_dev;
u8 opcode;
u8 flags;
__le32 data;
};
/* Internal struct */
struct ice_sbq_msg_input {
u8 dest_dev;
u8 opcode;
u16 msg_addr_low;
u32 msg_addr_high;
u32 data;
};
#endif /* _ICE_SBQ_CMD_H_ */
......@@ -2136,6 +2136,41 @@ static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
return count != ICE_MAX_BUF_TXD;
}
/**
* ice_tstamp - set up context descriptor for hardware timestamp
* @tx_ring: pointer to the Tx ring to send buffer on
* @skb: pointer to the SKB we're sending
* @first: Tx buffer
* @off: Tx offload parameters
*/
static void
ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb,
struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
s8 idx;
/* only timestamp the outbound packet if the user has requested it */
if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
return;
if (!tx_ring->ptp_tx)
return;
/* Tx timestamps cannot be sampled when doing TSO */
if (first->tx_flags & ICE_TX_FLAGS_TSO)
return;
/* Grab an open timestamp slot */
idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
if (idx < 0)
return;
off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
(ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
first->tx_flags |= ICE_TX_FLAGS_TSYN;
}
/**
* ice_xmit_frame_ring - Sends buffer on Tx ring
* @skb: send buffer
......@@ -2205,6 +2240,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
ICE_TX_CTX_DESC_SWTCH_UPLINK <<
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
u16 i = tx_ring->next_to_use;
......
......@@ -118,6 +118,7 @@ static inline int ice_skb_pad(void)
* freed instead of returned like skb packets.
*/
#define ICE_TX_FLAGS_DUMMY_PKT BIT(3)
#define ICE_TX_FLAGS_TSYN BIT(4)
#define ICE_TX_FLAGS_IPV4 BIT(5)
#define ICE_TX_FLAGS_IPV6 BIT(6)
#define ICE_TX_FLAGS_TUNNEL BIT(7)
......@@ -311,6 +312,10 @@ struct ice_ring {
u32 txq_teid; /* Added Tx queue TEID */
u16 rx_buf_len;
u8 dcb_tc; /* Traffic class of ring */
struct ice_ptp_tx *tx_tstamps;
u64 cached_phctime;
u8 ptp_rx:1;
u8 ptp_tx:1;
} ____cacheline_internodealigned_in_smp;
static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
......
......@@ -175,6 +175,9 @@ ice_process_skb_fields(struct ice_ring *rx_ring,
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
if (rx_ring->ptp_rx)
ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
}
/**
......
......@@ -14,6 +14,7 @@
#include "ice_lan_tx_rx.h"
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
#include "ice_sbq_cmd.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
......@@ -48,6 +49,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
#define ICE_DBG_RDMA BIT_ULL(15)
#define ICE_DBG_PKG BIT_ULL(16)
#define ICE_DBG_RES BIT_ULL(17)
#define ICE_DBG_PTP BIT_ULL(19)
#define ICE_DBG_AQ_MSG BIT_ULL(24)
#define ICE_DBG_AQ_DESC BIT_ULL(25)
#define ICE_DBG_AQ_DESC_BUF BIT_ULL(26)
......@@ -264,6 +266,7 @@ struct ice_hw_common_caps {
u8 rss_table_entry_width; /* RSS Entry width in bits */
u8 dcb;
u8 ieee_1588;
u8 rdma;
bool nvm_update_pending_nvm;
......@@ -276,6 +279,54 @@ struct ice_hw_common_caps {
#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
};
/* IEEE 1588 TIME_SYNC specific info */
/* Function specific definitions */
#define ICE_TS_FUNC_ENA_M BIT(0)
#define ICE_TS_SRC_TMR_OWND_M BIT(1)
#define ICE_TS_TMR_ENA_M BIT(2)
#define ICE_TS_TMR_IDX_OWND_S 4
#define ICE_TS_TMR_IDX_OWND_M BIT(4)
#define ICE_TS_CLK_FREQ_S 16
#define ICE_TS_CLK_FREQ_M ICE_M(0x7, ICE_TS_CLK_FREQ_S)
#define ICE_TS_CLK_SRC_S 20
#define ICE_TS_CLK_SRC_M BIT(20)
#define ICE_TS_TMR_IDX_ASSOC_S 24
#define ICE_TS_TMR_IDX_ASSOC_M BIT(24)
struct ice_ts_func_info {
/* Function specific info */
u32 clk_freq;
u8 clk_src;
u8 tmr_index_assoc;
u8 ena;
u8 tmr_index_owned;
u8 src_tmr_owned;
u8 tmr_ena;
};
/* Device specific definitions */
#define ICE_TS_TMR0_OWNR_M 0x7
#define ICE_TS_TMR0_OWND_M BIT(3)
#define ICE_TS_TMR1_OWNR_S 4
#define ICE_TS_TMR1_OWNR_M ICE_M(0x7, ICE_TS_TMR1_OWNR_S)
#define ICE_TS_TMR1_OWND_M BIT(7)
#define ICE_TS_DEV_ENA_M BIT(24)
#define ICE_TS_TMR0_ENA_M BIT(25)
#define ICE_TS_TMR1_ENA_M BIT(26)
struct ice_ts_dev_info {
/* Device specific info */
u32 ena_ports;
u32 tmr_own_map;
u32 tmr0_owner;
u32 tmr1_owner;
u8 tmr0_owned;
u8 tmr1_owned;
u8 ena;
u8 tmr0_ena;
u8 tmr1_ena;
};
/* Function specific capabilities */
struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
......@@ -284,6 +335,7 @@ struct ice_hw_func_caps {
u32 guar_num_vsi;
u32 fd_fltr_guar; /* Number of filters guaranteed */
u32 fd_fltr_best_effort; /* Number of best effort filters */
struct ice_ts_func_info ts_func_info;
};
/* Device wide capabilities */
......@@ -292,6 +344,7 @@ struct ice_hw_dev_caps {
u32 num_vfs_exposed; /* Total number of VFs exposed */
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
u32 num_flow_director_fltr; /* Number of FD filters available */
struct ice_ts_dev_info ts_dev_info;
u32 num_funcs;
};
......@@ -754,6 +807,7 @@ struct ice_hw {
/* Control Queue info */
struct ice_ctl_q_info adminq;
struct ice_ctl_q_info sbq;
struct ice_ctl_q_info mailboxq;
u8 api_branch; /* API branch version */
......@@ -789,6 +843,14 @@ struct ice_hw {
u8 ucast_shared; /* true if VSIs can share unicast addr */
#define ICE_PHY_PER_NAC 1
#define ICE_MAX_QUAD 2
#define ICE_NUM_QUAD_TYPE 2
#define ICE_PORTS_PER_QUAD 4
#define ICE_PHY_0_LAST_QUAD 1
#define ICE_PORTS_PER_PHY 8
#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
u32 active_track_id;
......
......@@ -71,6 +71,18 @@
*/
#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
/**
* upper_16_bits - return bits 16-31 of a number
* @n: the number we're accessing
*/
#define upper_16_bits(n) ((u16)((n) >> 16))
/**
* lower_16_bits - return bits 0-15 of a number
* @n: the number we're accessing
*/
#define lower_16_bits(n) ((u16)((n) & 0xffff))
struct completion;
struct pt_regs;
struct user;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment