Commit 4f3ebb04 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Fixes 2018-10-24

This series contains fixes for the ice driver.

Anirudh fixes a namespace issue which was introduced with a previous
patch to remove ice_netpoll.  Fixed up the device ID define names to
align with the branding string names.  Use the capability count returned
by the firmware, instead of calculating the count.  Introduced driver
workarounds due to current firmware limitations.  Fixed the queue
mapping for a VF, which needs to be set in the config and scatter queue
modes.  Fixed the driver which is setup to handle link status events
(LSE), even though the firmware does not have this feature yet, so add
the ability to poll for link status changes while we wait for updated
firmware.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4ed591c8 4f4be03b
......@@ -42,6 +42,23 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
return 0;
}
/**
* ice_dev_onetime_setup - Temporary HW/FW workarounds
* @hw: pointer to the HW structure
*
* This function provides temporary workarounds for certain issues
* that are expected to be fixed in the HW/FW.
*/
void ice_dev_onetime_setup(struct ice_hw *hw)
{
/* configure Rx - set non pxe mode */
wr32(hw, GLLAN_RCTL_0, 0x1);
#define MBX_PF_VT_PFALLOC 0x00231E80
/* set VFs per PF */
wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
}
/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
......@@ -218,7 +235,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
*
* Get Link Status (0x607). Returns the link status of the adapter.
*/
enum ice_status
static enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
......@@ -740,6 +757,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
ice_dev_onetime_setup(hw);
/* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */
mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
......@@ -1531,9 +1550,7 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
if (!status)
ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
*cap_count =
DIV_ROUND_UP(le16_to_cpu(desc.datalen),
sizeof(struct ice_aqc_list_caps_elem));
*cap_count = le32_to_cpu(cmd->count);
return status;
}
......@@ -1987,33 +2004,6 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
}
/**
* ice_aq_set_event_mask
* @hw: pointer to the hw struct
* @port_num: port number of the physical function
* @mask: event mask to be set
* @cd: pointer to command details structure or NULL
*
* Set event mask (0x0613)
*/
enum ice_status
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd)
{
struct ice_aqc_set_event_mask *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.set_event_mask;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
cmd->lport_num = port_num;
cmd->event_mask = cpu_to_le16(mask);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
......
......@@ -34,6 +34,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
enum ice_status ice_get_caps(struct ice_hw *hw);
void ice_dev_onetime_setup(struct ice_hw *hw);
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
......@@ -83,12 +86,6 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd);
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cmd_details);
......
......@@ -19,11 +19,10 @@
/* Defines that help manage the driver vs FW API checks.
* Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
*
*/
#define EXP_FW_API_VER_BRANCH 0x00
#define EXP_FW_API_VER_MAJOR 0x00
#define EXP_FW_API_VER_MINOR 0x01
#define EXP_FW_API_VER_MAJOR 0x01
#define EXP_FW_API_VER_MINOR 0x03
/* Different control queue types: These are mainly for SW consumption. */
enum ice_ctl_q {
......
......@@ -6,10 +6,10 @@
/* Device IDs */
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_C810_BACKPLANE 0x1591
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
#define ICE_DEV_ID_C810_QSFP 0x1592
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_C810_SFP 0x1593
#define ICE_DEV_ID_E810C_SFP 0x1593
#endif /* _ICE_DEVIDS_H_ */
......@@ -157,6 +157,13 @@
#define VPINT_ALLOC_LAST_S 12
#define VPINT_ALLOC_LAST_M ICE_M(0x7FF, 12)
#define VPINT_ALLOC_VALID_M BIT(31)
#define VPINT_ALLOC_PCI(_VF) (0x0009D000 + ((_VF) * 4))
#define VPINT_ALLOC_PCI_FIRST_S 0
#define VPINT_ALLOC_PCI_FIRST_M ICE_M(0x7FF, 0)
#define VPINT_ALLOC_PCI_LAST_S 12
#define VPINT_ALLOC_PCI_LAST_M ICE_M(0x7FF, 12)
#define VPINT_ALLOC_PCI_VALID_M BIT(31)
#define GLLAN_RCTL_0 0x002941F8
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
#define QRX_CTRL_MAX_INDEX 2047
......@@ -320,6 +327,7 @@
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PF_VT_PFALLOC_HIF 0x0009DD80
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
......
......@@ -433,7 +433,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
* @irq: interrupt number
* @data: pointer to a q_vector
*/
irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
......@@ -2529,6 +2529,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
vsi->hw_base_vector = 0;
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi, false);
ice_dev_onetime_setup(&vsi->back->hw);
ice_vsi_set_num_qs(vsi);
/* Initialize VSI struct elements and create VSI in FW */
......
......@@ -73,5 +73,4 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data);
#endif /* !_ICE_LIB_H_ */
......@@ -456,35 +456,6 @@ static void ice_reset_subtask(struct ice_pf *pf)
}
}
/**
* ice_watchdog_subtask - periodic tasks not using event driven scheduling
* @pf: board private structure
*/
static void ice_watchdog_subtask(struct ice_pf *pf)
{
int i;
/* if interface is down do nothing */
if (test_bit(__ICE_DOWN, pf->state) ||
test_bit(__ICE_CFG_BUSY, pf->state))
return;
/* make sure we don't do these things too often */
if (time_before(jiffies,
pf->serv_tmr_prev + pf->serv_tmr_period))
return;
pf->serv_tmr_prev = jiffies;
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
ice_update_pf_stats(pf);
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->netdev)
ice_update_vsi_stats(pf->vsi[i]);
}
/**
* ice_print_link_msg - print link up or down message
* @vsi: the VSI whose link status is being queried
......@@ -554,36 +525,6 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
speed, fc);
}
/**
* ice_init_link_events - enable/initialize link events
* @pi: pointer to the port_info instance
*
* Returns -EIO on failure, 0 on success
*/
static int ice_init_link_events(struct ice_port_info *pi)
{
u16 mask;
mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
dev_dbg(ice_hw_to_dev(pi->hw),
"Failed to set link event mask for port %d\n",
pi->lport);
return -EIO;
}
if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
dev_dbg(ice_hw_to_dev(pi->hw),
"Failed to enable link events for port %d\n",
pi->lport);
return -EIO;
}
return 0;
}
/**
* ice_vsi_link_event - update the vsi's netdev
* @vsi: the vsi on which the link event occurred
......@@ -671,27 +612,35 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
}
/**
* ice_handle_link_event - handle link event via ARQ
* @pf: pf that the link event is associated with
*
* Return -EINVAL if port_info is null
* Return status on succes
* ice_watchdog_subtask - periodic tasks not using event driven scheduling
* @pf: board private structure
*/
static int ice_handle_link_event(struct ice_pf *pf)
static void ice_watchdog_subtask(struct ice_pf *pf)
{
struct ice_port_info *port_info;
int status;
int i;
port_info = pf->hw.port_info;
if (!port_info)
return -EINVAL;
/* if interface is down do nothing */
if (test_bit(__ICE_DOWN, pf->state) ||
test_bit(__ICE_CFG_BUSY, pf->state))
return;
status = ice_link_event(pf, port_info);
if (status)
dev_dbg(&pf->pdev->dev,
"Could not process link event, error %d\n", status);
/* make sure we don't do these things too often */
if (time_before(jiffies,
pf->serv_tmr_prev + pf->serv_tmr_period))
return;
return status;
pf->serv_tmr_prev = jiffies;
if (ice_link_event(pf, pf->hw.port_info))
dev_dbg(&pf->pdev->dev, "ice_link_event failed\n");
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
ice_update_pf_stats(pf);
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->netdev)
ice_update_vsi_stats(pf->vsi[i]);
}
/**
......@@ -797,11 +746,6 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
opcode = le16_to_cpu(event.desc.opcode);
switch (opcode) {
case ice_aqc_opc_get_link_status:
if (ice_handle_link_event(pf))
dev_err(&pf->pdev->dev,
"Could not handle link event\n");
break;
case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event);
break;
......@@ -2207,12 +2151,6 @@ static int ice_probe(struct pci_dev *pdev,
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
err = ice_init_link_events(pf->hw.port_info);
if (err) {
dev_err(&pdev->dev, "ice_init_link_events failed: %d\n", err);
goto err_alloc_sw_unroll;
}
return 0;
err_alloc_sw_unroll:
......@@ -2271,9 +2209,9 @@ static void ice_remove(struct pci_dev *pdev)
* Class, Class Mask, private data (not used) }
*/
static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
/* required last entry */
{ 0, }
};
......
......@@ -173,6 +173,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
vsi = pf->vsi[vf->lan_vsi_idx];
wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
first = vf->first_vector_idx;
last = first + pf->num_vf_msix - 1;
......@@ -519,6 +520,10 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
VPINT_ALLOC_VALID_M);
wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
reg = (((first << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) |
((last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
VPINT_ALLOC_PCI_VALID_M);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
/* map the interrupts to its functions */
for (v = first; v <= last; v++) {
reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
......@@ -528,10 +533,11 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
wr32(hw, GLINT_VECT2FUNC(v), reg);
}
/* set regardless of mapping mode */
wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
/* VF Tx queues allocation */
if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id),
VPLAN_TXQ_MAPENA_TX_ENA_M);
/* set the VF PF Tx queue range
* VFNUMQ value should be set to (number of queues - 1). A value
* of 0 means 1 queue and a value of 255 means 256 queues
......@@ -546,10 +552,11 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
"Scattered mode for VF Tx queues is not yet implemented\n");
}
/* set regardless of mapping mode */
wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
/* VF Rx queues allocation */
if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id),
VPLAN_RXQ_MAPENA_RX_ENA_M);
/* set the VF PF Rx queue range
* VFNUMQ value should be set to (number of queues - 1). A value
* of 0 means 1 queue and a value of 255 means 256 queues
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment