Commit ec72001d authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2018-09-27

This series contains fixes to the ice driver only.

Jake fixes a potential crash due to attempting to access the mutex which
is already destroyed.  Fix this by using rq.count and sq.count to
determine if the queue was initialized.  Fixed the current logic for
checking the firmware version to properly handle situations when
firmware major/minor versions differ and when the branch version
differs.

Bruce replaces a memcpy() with a direct assignment, which is preferred.
Also updated the branding strings and device ids supported by the
driver.  Fixed the "ethtool -G" command in the driver, which was always
returning EINVAL when changing the descriptor ring size.

Brett update and clarified code comments.

Anirudh updates the driver to ensure we query the firmware for the
transmit scheduler node information before adding it to the driver
database, to ensure we have the current information.  Also update the
"get capabilities" command to get device and function capabilities.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5362700c f934bb9b
......@@ -39,9 +39,9 @@
extern const char ice_drv_ver[];
#define ICE_BAR0 0
#define ICE_DFLT_NUM_DESC 128
#define ICE_MIN_NUM_DESC 8
#define ICE_MAX_NUM_DESC 8160
#define ICE_REQ_DESC_MULTIPLE 32
#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE
#define ICE_MAX_NUM_DESC 8160
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
......
......@@ -736,6 +736,10 @@ struct ice_aqc_add_elem {
struct ice_aqc_txsched_elem_data generic[1];
};
struct ice_aqc_get_elem {
struct ice_aqc_txsched_elem_data generic[1];
};
struct ice_aqc_get_topo_elem {
struct ice_aqc_txsched_topo_grp_info_hdr hdr;
struct ice_aqc_txsched_elem_data
......@@ -1409,6 +1413,7 @@ enum ice_adminq_opc {
/* transmit scheduler commands */
ice_aqc_opc_get_dflt_topo = 0x0400,
ice_aqc_opc_add_sched_elems = 0x0401,
ice_aqc_opc_get_sched_elems = 0x0404,
ice_aqc_opc_suspend_sched_elems = 0x0409,
ice_aqc_opc_resume_sched_elems = 0x040A,
ice_aqc_opc_delete_sched_elems = 0x040F,
......
......@@ -1451,7 +1451,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
* @hw: pointer to the hw struct
* @buf: a virtual buffer to hold the capabilities
* @buf_size: Size of the virtual buffer
* @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
* @cap_count: cap count needed if AQ err==ENOMEM
* @opc: capabilities type to discover - pass in the command opcode
* @cd: pointer to command details structure or NULL
*
......@@ -1459,7 +1459,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
* the firmware.
*/
static enum ice_status
ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aqc_list_caps *cmd;
......@@ -1477,58 +1477,76 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status)
ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
*data_size = le16_to_cpu(desc.datalen);
else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
*cap_count =
DIV_ROUND_UP(le16_to_cpu(desc.datalen),
sizeof(struct ice_aqc_list_caps_elem));
return status;
}
/**
* ice_get_caps - get info about the HW
* ice_discover_caps - get info about the HW
* @hw: pointer to the hardware structure
* @opc: capabilities type to discover - pass in the command opcode
*/
enum ice_status ice_get_caps(struct ice_hw *hw)
static enum ice_status ice_discover_caps(struct ice_hw *hw,
enum ice_adminq_opc opc)
{
enum ice_status status;
u16 data_size = 0;
u32 cap_count;
u16 cbuf_len;
u8 retries;
/* The driver doesn't know how many capabilities the device will return
* so the buffer size required isn't known ahead of time. The driver
* starts with cbuf_len and if this turns out to be insufficient, the
* device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
* The driver then allocates the buffer of this size and retries the
* operation. So it follows that the retry count is 2.
* device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
* The driver then allocates the buffer based on the count and retries
* the operation. So it follows that the retry count is 2.
*/
#define ICE_GET_CAP_BUF_COUNT 40
#define ICE_GET_CAP_RETRY_COUNT 2
cbuf_len = ICE_GET_CAP_BUF_COUNT *
sizeof(struct ice_aqc_list_caps_elem);
cap_count = ICE_GET_CAP_BUF_COUNT;
retries = ICE_GET_CAP_RETRY_COUNT;
do {
void *cbuf;
cbuf_len = (u16)(cap_count *
sizeof(struct ice_aqc_list_caps_elem));
cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
if (!cbuf)
return ICE_ERR_NO_MEMORY;
status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
ice_aqc_opc_list_func_caps, NULL);
status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
opc, NULL);
devm_kfree(ice_hw_to_dev(hw), cbuf);
if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
break;
/* If ENOMEM is returned, try again with bigger buffer */
cbuf_len = data_size;
} while (--retries);
return status;
}
/**
* ice_get_caps - get info about the HW
* @hw: pointer to the hardware structure
*/
enum ice_status ice_get_caps(struct ice_hw *hw)
{
enum ice_status status;
status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
if (!status)
status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
return status;
}
/**
* ice_aq_manage_mac_write - manage MAC address write command
* @hw: pointer to the hw struct
......
......@@ -518,22 +518,31 @@ ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/**
* ice_aq_ver_check - Check the reported AQ API version.
* @fw_branch: The "branch" of FW, typically describes the device type
* @fw_major: The major version of the FW API
* @fw_minor: The minor version increment of the FW API
* @hw: pointer to the hardware structure
*
* Checks if the driver should load on a given AQ API version.
*
* Return: 'true' iff the driver should attempt to load. 'false' otherwise.
*/
static bool ice_aq_ver_check(u8 fw_branch, u8 fw_major, u8 fw_minor)
static bool ice_aq_ver_check(struct ice_hw *hw)
{
if (fw_branch != EXP_FW_API_VER_BRANCH)
return false;
if (fw_major != EXP_FW_API_VER_MAJOR)
return false;
if (fw_minor != EXP_FW_API_VER_MINOR)
if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
/* Major API version is newer than expected, don't load */
dev_warn(ice_hw_to_dev(hw),
"The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
return false;
} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
dev_info(ice_hw_to_dev(hw),
"The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
dev_info(ice_hw_to_dev(hw),
"The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
} else {
/* Major API version is older than expected, log a warning */
dev_info(ice_hw_to_dev(hw),
"The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
}
return true;
}
......@@ -588,8 +597,7 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
if (status)
goto init_ctrlq_free_rq;
if (!ice_aq_ver_check(hw->api_branch, hw->api_maj_ver,
hw->api_min_ver)) {
if (!ice_aq_ver_check(hw)) {
status = ICE_ERR_FW_API_VER;
goto init_ctrlq_free_rq;
}
......@@ -597,11 +605,11 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
return 0;
init_ctrlq_free_rq:
if (cq->rq.head) {
if (cq->rq.count) {
ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->rq_lock);
}
if (cq->sq.head) {
if (cq->sq.count) {
ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock);
}
......@@ -710,11 +718,11 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
return;
}
if (cq->sq.head) {
if (cq->sq.count) {
ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock);
}
if (cq->rq.head) {
if (cq->rq.count) {
ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->rq_lock);
}
......@@ -850,7 +858,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
if (cd)
memcpy(details, cd, sizeof(*details));
*details = *cd;
else
memset(details, 0, sizeof(*details));
......
......@@ -5,15 +5,11 @@
#define _ICE_DEVIDS_H_
/* Device IDs */
/* Intel(R) Ethernet Controller C810 for backplane */
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_C810_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller C810 for QSFP */
/* Intel(R) Ethernet Controller E810-C for QSFP */
#define ICE_DEV_ID_C810_QSFP 0x1592
/* Intel(R) Ethernet Controller C810 for SFP */
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_C810_SFP 0x1593
/* Intel(R) Ethernet Controller C810/X557-AT 10GBASE-T */
#define ICE_DEV_ID_C810_10G_BASE_T 0x1594
/* Intel(R) Ethernet Controller C810 1GbE */
#define ICE_DEV_ID_C810_SGMII 0x1595
#endif /* _ICE_DEVIDS_H_ */
......@@ -1198,9 +1198,11 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
ring->tx_max_pending = ICE_MAX_NUM_DESC;
ring->rx_pending = vsi->rx_rings[0]->count;
ring->tx_pending = vsi->tx_rings[0]->count;
ring->rx_mini_pending = ICE_MIN_NUM_DESC;
/* Rx mini and jumbo rings are not supported */
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
......@@ -1218,14 +1220,23 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
ring->tx_pending < ICE_MIN_NUM_DESC ||
ring->rx_pending > ICE_MAX_NUM_DESC ||
ring->rx_pending < ICE_MIN_NUM_DESC) {
netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
ring->tx_pending, ring->rx_pending,
ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC);
ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC,
ICE_REQ_DESC_MULTIPLE);
return -EINVAL;
}
new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
if (new_tx_cnt != ring->tx_pending)
netdev_info(netdev,
"Requested Tx descriptor count rounded up to %d\n",
new_tx_cnt);
new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
if (new_rx_cnt != ring->rx_pending)
netdev_info(netdev,
"Requested Rx descriptor count rounded up to %d\n",
new_rx_cnt);
/* if nothing to do return success */
if (new_tx_cnt == vsi->tx_rings[0]->count &&
......
......@@ -3777,8 +3777,6 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SGMII), 0 },
/* required last entry */
{ 0, }
};
......
......@@ -84,6 +84,62 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
return NULL;
}
/**
* ice_aq_query_sched_elems - query scheduler elements
* @hw: pointer to the hw struct
* @elems_req: number of elements to query
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @elems_ret: returns total number of elements returned
* @cd: pointer to command details structure or NULL
*
* Query scheduling elements (0x0404)
*/
static enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_get_elem *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
{
struct ice_aqc_get_cfg_elem *cmd;
struct ice_aq_desc desc;
enum ice_status status;
cmd = &desc.params.get_update_elem;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sched_elems);
cmd->num_elem_req = cpu_to_le16(elems_req);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status && elems_ret)
*elems_ret = le16_to_cpu(cmd->num_elem_resp);
return status;
}
/**
* ice_sched_query_elem - query element information from hw
* @hw: pointer to the hw struct
* @node_teid: node teid to be queried
* @buf: buffer to element information
*
* This function queries HW element information
*/
static enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf)
{
u16 buf_size, num_elem_ret = 0;
enum ice_status status;
buf_size = sizeof(*buf);
memset(buf, 0, buf_size);
buf->generic[0].node_teid = cpu_to_le32(node_teid);
status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
NULL);
if (status || num_elem_ret != 1)
ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
return status;
}
/**
* ice_sched_add_node - Insert the Tx scheduler node in SW DB
* @pi: port information structure
......@@ -97,7 +153,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info)
{
struct ice_sched_node *parent;
struct ice_aqc_get_elem elem;
struct ice_sched_node *node;
enum ice_status status;
struct ice_hw *hw;
if (!pi)
......@@ -115,6 +173,13 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
return ICE_ERR_PARAM;
}
/* query the current node information from FW before additing it
* to the SW DB
*/
status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
if (status)
return status;
node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
if (!node)
return ICE_ERR_NO_MEMORY;
......@@ -133,7 +198,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
node->parent = parent;
node->tx_sched_layer = layer;
parent->children[parent->num_children++] = node;
memcpy(&node->info, info, sizeof(*info));
memcpy(&node->info, &elem.generic[0], sizeof(node->info));
return 0;
}
......
......@@ -140,7 +140,8 @@ struct ice_fltr_list_entry {
/* This defines an entry in the list that maintains MAC or VLAN membership
* to HW list mapping, since multiple VSIs can subscribe to the same MAC or
* VLAN. As an optimization the VSI list should be created only when a
* second VSI becomes a subscriber to the VLAN address.
* second VSI becomes a subscriber to the same MAC address. VSI lists are always
* used for VLAN membership.
*/
struct ice_fltr_mgmt_list_entry {
/* back pointer to VSI list id to VSI list mapping */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment