Commit d75b4c7d authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2022-05-06

This series contains updates to ice driver only.

Ivan Vecera fixes a race with aux plug/unplug by delaying setting adev
until initialization is complete and adding locking.

Anatolii ensures VF queues are completely disabled before attempting to
reconfigure them.

Michal ensures stale Tx timestamps are cleared from hardware.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
  ice: fix PTP stale Tx timestamps cleanup
  ice: clear stale Tx queue settings before configuring
  ice: Fix race during aux device (un)plugging
====================

Link: https://lore.kernel.org/r/20220506174129.4976-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 91a7cda1 a11b6c1a
...@@ -540,6 +540,7 @@ struct ice_pf { ...@@ -540,6 +540,7 @@ struct ice_pf {
struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */ struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */ struct mutex tc_mutex; /* lock to protect TC changes */
struct mutex adev_mutex; /* lock to protect aux device access */
u32 msg_enable; u32 msg_enable;
struct ice_ptp ptp; struct ice_ptp ptp;
struct tty_driver *ice_gnss_tty_driver; struct tty_driver *ice_gnss_tty_driver;
......
...@@ -37,14 +37,17 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event) ...@@ -37,14 +37,17 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
if (WARN_ON_ONCE(!in_task())) if (WARN_ON_ONCE(!in_task()))
return; return;
mutex_lock(&pf->adev_mutex);
if (!pf->adev) if (!pf->adev)
return; goto finish;
device_lock(&pf->adev->dev); device_lock(&pf->adev->dev);
iadrv = ice_get_auxiliary_drv(pf); iadrv = ice_get_auxiliary_drv(pf);
if (iadrv && iadrv->event_handler) if (iadrv && iadrv->event_handler)
iadrv->event_handler(pf, event); iadrv->event_handler(pf, event);
device_unlock(&pf->adev->dev); device_unlock(&pf->adev->dev);
finish:
mutex_unlock(&pf->adev_mutex);
} }
/** /**
...@@ -290,7 +293,6 @@ int ice_plug_aux_dev(struct ice_pf *pf) ...@@ -290,7 +293,6 @@ int ice_plug_aux_dev(struct ice_pf *pf)
return -ENOMEM; return -ENOMEM;
adev = &iadev->adev; adev = &iadev->adev;
pf->adev = adev;
iadev->pf = pf; iadev->pf = pf;
adev->id = pf->aux_idx; adev->id = pf->aux_idx;
...@@ -300,18 +302,20 @@ int ice_plug_aux_dev(struct ice_pf *pf) ...@@ -300,18 +302,20 @@ int ice_plug_aux_dev(struct ice_pf *pf)
ret = auxiliary_device_init(adev); ret = auxiliary_device_init(adev);
if (ret) { if (ret) {
pf->adev = NULL;
kfree(iadev); kfree(iadev);
return ret; return ret;
} }
ret = auxiliary_device_add(adev); ret = auxiliary_device_add(adev);
if (ret) { if (ret) {
pf->adev = NULL;
auxiliary_device_uninit(adev); auxiliary_device_uninit(adev);
return ret; return ret;
} }
mutex_lock(&pf->adev_mutex);
pf->adev = adev;
mutex_unlock(&pf->adev_mutex);
return 0; return 0;
} }
...@@ -320,12 +324,17 @@ int ice_plug_aux_dev(struct ice_pf *pf) ...@@ -320,12 +324,17 @@ int ice_plug_aux_dev(struct ice_pf *pf)
*/ */
void ice_unplug_aux_dev(struct ice_pf *pf) void ice_unplug_aux_dev(struct ice_pf *pf)
{ {
if (!pf->adev) struct auxiliary_device *adev;
return;
auxiliary_device_delete(pf->adev); mutex_lock(&pf->adev_mutex);
auxiliary_device_uninit(pf->adev); adev = pf->adev;
pf->adev = NULL; pf->adev = NULL;
mutex_unlock(&pf->adev_mutex);
if (adev) {
auxiliary_device_delete(adev);
auxiliary_device_uninit(adev);
}
} }
/** /**
......
...@@ -3769,6 +3769,7 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf) ...@@ -3769,6 +3769,7 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
static void ice_deinit_pf(struct ice_pf *pf) static void ice_deinit_pf(struct ice_pf *pf)
{ {
ice_service_task_stop(pf); ice_service_task_stop(pf);
mutex_destroy(&pf->adev_mutex);
mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex); mutex_destroy(&pf->tc_mutex);
mutex_destroy(&pf->avail_q_mutex); mutex_destroy(&pf->avail_q_mutex);
...@@ -3847,6 +3848,7 @@ static int ice_init_pf(struct ice_pf *pf) ...@@ -3847,6 +3848,7 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex); mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex); mutex_init(&pf->tc_mutex);
mutex_init(&pf->adev_mutex);
INIT_HLIST_HEAD(&pf->aq_wait_list); INIT_HLIST_HEAD(&pf->aq_wait_list);
spin_lock_init(&pf->aq_wait_lock); spin_lock_init(&pf->aq_wait_lock);
......
...@@ -2287,6 +2287,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) ...@@ -2287,6 +2287,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
/** /**
* ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
* @hw: pointer to the hw struct
* @tx: PTP Tx tracker to clean up * @tx: PTP Tx tracker to clean up
* *
* Loop through the Tx timestamp requests and see if any of them have been * Loop through the Tx timestamp requests and see if any of them have been
...@@ -2295,7 +2296,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) ...@@ -2295,7 +2296,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
* timestamp will never be captured. This might happen if the packet gets * timestamp will never be captured. This might happen if the packet gets
* discarded before it reaches the PHY timestamping block. * discarded before it reaches the PHY timestamping block.
*/ */
static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx) static void ice_ptp_tx_tstamp_cleanup(struct ice_hw *hw, struct ice_ptp_tx *tx)
{ {
u8 idx; u8 idx;
...@@ -2304,11 +2305,16 @@ static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx) ...@@ -2304,11 +2305,16 @@ static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
for_each_set_bit(idx, tx->in_use, tx->len) { for_each_set_bit(idx, tx->in_use, tx->len) {
struct sk_buff *skb; struct sk_buff *skb;
u64 raw_tstamp;
/* Check if this SKB has been waiting for too long */ /* Check if this SKB has been waiting for too long */
if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ)) if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
continue; continue;
/* Read tstamp to be able to use this register again */
ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
&raw_tstamp);
spin_lock(&tx->lock); spin_lock(&tx->lock);
skb = tx->tstamps[idx].skb; skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL; tx->tstamps[idx].skb = NULL;
...@@ -2330,7 +2336,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work) ...@@ -2330,7 +2336,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
ice_ptp_update_cached_phctime(pf); ice_ptp_update_cached_phctime(pf);
ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx); ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
/* Run twice a second */ /* Run twice a second */
kthread_queue_delayed_work(ptp->kworker, &ptp->work, kthread_queue_delayed_work(ptp->kworker, &ptp->work,
......
...@@ -1307,13 +1307,52 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1307,13 +1307,52 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
NULL, 0); NULL, 0);
} }
/**
* ice_vf_vsi_dis_single_txq - disable a single Tx queue
* @vf: VF to disable queue for
* @vsi: VSI for the VF
* @q_id: VF relative (0-based) queue ID
*
* Attempt to disable the Tx queue passed in. If the Tx queue was successfully
* disabled then clear q_id bit in the enabled queues bitmap and return
* success. Otherwise return error.
*/
static int
ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
{
struct ice_txq_meta txq_meta = { 0 };
struct ice_tx_ring *ring;
int err;
if (!test_bit(q_id, vf->txq_ena))
dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
q_id, vsi->vsi_num);
ring = vsi->tx_rings[q_id];
if (!ring)
return -EINVAL;
ice_fill_txq_meta(vsi, ring, &txq_meta);
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
q_id, vsi->vsi_num);
return err;
}
/* Clear enabled queues flag */
clear_bit(q_id, vf->txq_ena);
return 0;
}
/** /**
* ice_vc_dis_qs_msg * ice_vc_dis_qs_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* *
* called from the VF to disable all or specific * called from the VF to disable all or specific queue(s)
* queue(s)
*/ */
static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
{ {
...@@ -1350,30 +1389,15 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1350,30 +1389,15 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues; q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
struct ice_txq_meta txq_meta = { 0 };
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
if (!test_bit(vf_q_id, vf->txq_ena)) if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
vf_q_id, vsi->vsi_num);
ice_fill_txq_meta(vsi, ring, &txq_meta);
if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
ring, &txq_meta)) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
/* Clear enabled queues flag */
clear_bit(vf_q_id, vf->txq_ena);
} }
} }
...@@ -1622,6 +1646,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1622,6 +1646,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
if (qpi->txq.ring_len > 0) { if (qpi->txq.ring_len > 0) {
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[i]->count = qpi->txq.ring_len; vsi->tx_rings[i]->count = qpi->txq.ring_len;
/* Disable any existing queue first */
if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Configure a queue with the requested settings */
if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) { if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment