Commit 899dc833 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-ptp-enhancements'

Sudarsana Reddy Kalluru says:

====================
qed*: PTP enhancements.

The patch series contains set of enhancements for qed/qede ptp
implementation.
Patches (1)-(3) adds resource locking implementation to allow
PTP functionality only on the first detected ethernet PF of the port.
The change is required as the adapter currently supports only one
instance of the PTP client on a given port.
Patch (4) removes the un-needed header file.
Patch (5) moves the ptt-lock get/release logic to the ptp specific
code.

Please consider applying this series to "net-next" branch.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 26d31ac1 d179bd16
......@@ -474,6 +474,11 @@ struct qed_hwfn {
struct qed_ptt *p_main_ptt;
struct qed_ptt *p_dpc_ptt;
/* PTP will be used only by the leading function.
* Usage of all PTP-apis should be synchronized as result.
*/
struct qed_ptt *p_ptp_ptt;
struct qed_sb_sp_info *p_sp_sb;
struct qed_sb_attn_info *p_sb_attn;
......@@ -532,8 +537,6 @@ struct qed_hwfn {
struct qed_ptt *p_arfs_ptt;
/* p_ptp_ptt is valid for leading HWFN only */
struct qed_ptt *p_ptp_ptt;
struct qed_simd_fp_handler simd_proto_handler[64];
#ifdef CONFIG_QED_SRIOV
......@@ -767,6 +770,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_device_num_engines(struct qed_dev *cdev);
int qed_device_get_port_id(struct qed_dev *cdev);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
......
......@@ -2347,9 +2347,6 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn)
return 0;
}
#define QED_RESC_ALLOC_LOCK_RETRY_CNT 10
#define QED_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */
static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_resc_unlock_params resc_unlock_params;
......@@ -2366,13 +2363,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
* needed, and proceed to the queries. Other failures, including a
* failure to acquire the lock, will cause this function to fail.
*/
memset(&resc_lock_params, 0, sizeof(resc_lock_params));
resc_lock_params.resource = QED_RESC_LOCK_RESC_ALLOC;
resc_lock_params.retry_num = QED_RESC_ALLOC_LOCK_RETRY_CNT;
resc_lock_params.retry_interval = QED_RESC_ALLOC_LOCK_RETRY_INTVL_US;
resc_lock_params.sleep_b4_retry = true;
memset(&resc_unlock_params, 0, sizeof(resc_unlock_params));
resc_unlock_params.resource = QED_RESC_LOCK_RESC_ALLOC;
qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
QED_RESC_LOCK_RESC_ALLOC, false);
rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
if (rc && rc != -EINVAL) {
......@@ -4072,3 +4064,17 @@ int qed_device_num_engines(struct qed_dev *cdev)
{
return QED_IS_BB(cdev) ? 2 : 1;
}
static int qed_device_num_ports(struct qed_dev *cdev)
{
/* in CMT always only one port */
if (cdev->num_hwfns > 1)
return 1;
return cdev->num_ports_in_engines * qed_device_num_engines(cdev);
}
int qed_device_get_port_id(struct qed_dev *cdev)
{
return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev);
}
......@@ -956,13 +956,6 @@ static int qed_slowpath_start(struct qed_dev *cdev,
}
}
#endif
p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
if (p_ptt) {
QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt;
} else {
DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n");
goto err;
}
}
cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
......@@ -1076,9 +1069,6 @@ static int qed_slowpath_start(struct qed_dev *cdev,
qed_ptt_release(QED_LEADING_HWFN(cdev),
QED_LEADING_HWFN(cdev)->p_arfs_ptt);
#endif
if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt)
qed_ptt_release(QED_LEADING_HWFN(cdev),
QED_LEADING_HWFN(cdev)->p_ptp_ptt);
qed_iov_wq_stop(cdev, false);
......@@ -1098,8 +1088,6 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
qed_ptt_release(QED_LEADING_HWFN(cdev),
QED_LEADING_HWFN(cdev)->p_arfs_ptt);
#endif
qed_ptt_release(QED_LEADING_HWFN(cdev),
QED_LEADING_HWFN(cdev)->p_ptp_ptt);
qed_free_stream_mem(cdev);
if (IS_QED_ETH_IF(cdev))
qed_sriov_disable(cdev, true);
......
......@@ -2615,3 +2615,33 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
return 0;
}
void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
struct qed_resc_unlock_params *p_unlock,
enum qed_resc_lock
resource, bool b_is_permanent)
{
if (p_lock) {
memset(p_lock, 0, sizeof(*p_lock));
/* Permanent resources don't require aging, and there's no
* point in trying to acquire them more than once since it's
* unexpected another entity would release them.
*/
if (b_is_permanent) {
p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
} else {
p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
p_lock->retry_interval =
QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
p_lock->sleep_b4_retry = true;
}
p_lock->resource = resource;
}
if (p_unlock) {
memset(p_unlock, 0, sizeof(*p_unlock));
p_unlock->resource = resource;
}
}
......@@ -795,7 +795,12 @@ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
enum qed_resc_lock {
QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL,
QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL
QED_RESC_LOCK_PTP_PORT0,
QED_RESC_LOCK_PTP_PORT1,
QED_RESC_LOCK_PTP_PORT2,
QED_RESC_LOCK_PTP_PORT3,
QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL,
QED_RESC_LOCK_RESC_INVALID
};
/**
......@@ -818,9 +823,11 @@ struct qed_resc_lock_params {
/* Number of times to retry locking */
u8 retry_num;
#define QED_MCP_RESC_LOCK_RETRY_CNT_DFLT 10
/* The interval in usec between retries */
u16 retry_interval;
#define QED_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000
/* Use sleep or delay between retries */
bool sleep_b4_retry;
......@@ -872,4 +879,17 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_resc_unlock_params *p_params);
/**
* @brief - default initialization for lock/unlock resource structs
*
* @param p_lock - lock params struct to be initialized; Can be NULL
* @param p_unlock - unlock params struct to be initialized; Can be NULL
* @param resource - the requested resource
* @paral b_is_permanent - disable retries & aging when set
*/
void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
struct qed_resc_unlock_params *p_unlock,
enum qed_resc_lock
resource, bool b_is_permanent);
#endif
......@@ -34,7 +34,7 @@
#include "qed_dev_api.h"
#include "qed_hw.h"
#include "qed_l2.h"
#include "qed_ptp.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
/* 16 nano second time quantas to wait before making a Drift adjustment */
......@@ -45,6 +45,82 @@
#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31
#define QED_TIMESTAMP_MASK BIT(16)
static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
{
switch (qed_device_get_port_id(p_hwfn->cdev)) {
case 0:
return QED_RESC_LOCK_PTP_PORT0;
case 1:
return QED_RESC_LOCK_PTP_PORT1;
case 2:
return QED_RESC_LOCK_PTP_PORT2;
case 3:
return QED_RESC_LOCK_PTP_PORT3;
default:
return QED_RESC_LOCK_RESC_INVALID;
}
}
static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_resc_lock_params params;
enum qed_resc_lock resource;
int rc;
resource = qed_ptcdev_to_resc(p_hwfn);
if (resource == QED_RESC_LOCK_RESC_INVALID)
return -EINVAL;
qed_mcp_resc_lock_default_init(&params, NULL, resource, true);
rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &params);
if (rc && rc != -EINVAL) {
return rc;
} else if (rc == -EINVAL) {
/* MFW doesn't support resource locking, first PF on the port
* has lock ownership.
*/
if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines)
return 0;
DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
return -EBUSY;
} else if (!rc && !params.b_granted) {
DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n");
return -EBUSY;
}
return rc;
}
static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_resc_unlock_params params;
enum qed_resc_lock resource;
int rc;
resource = qed_ptcdev_to_resc(p_hwfn);
if (resource == QED_RESC_LOCK_RESC_INVALID)
return -EINVAL;
qed_mcp_resc_lock_default_init(NULL, &params, resource, true);
rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &params);
if (rc == -EINVAL) {
/* MFW doesn't support locking, first PF has lock ownership */
if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) {
rc = 0;
} else {
DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
return -EINVAL;
}
} else if (rc) {
DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n");
}
return rc;
}
/* Read Rx timestamp */
static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
{
......@@ -248,7 +324,25 @@ static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
static int qed_ptp_hw_enable(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
struct qed_ptt *p_ptt;
int rc;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt) {
DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n");
return -EBUSY;
}
p_hwfn->p_ptp_ptt = p_ptt;
rc = qed_ptp_res_lock(p_hwfn, p_ptt);
if (rc) {
DP_INFO(p_hwfn,
"Couldn't acquire the resource lock, skip ptp enable for this PF\n");
qed_ptt_release(p_hwfn, p_ptt);
p_hwfn->p_ptp_ptt = NULL;
return rc;
}
/* Reset PTP event detection rules - will be configured in the IOCTL */
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
......@@ -305,6 +399,8 @@ static int qed_ptp_hw_disable(struct qed_dev *cdev)
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
qed_ptp_res_unlock(p_hwfn, p_ptt);
/* Reset PTP event detection rules */
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
......@@ -316,6 +412,9 @@ static int qed_ptp_hw_disable(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
qed_ptt_release(p_hwfn, p_ptt);
p_hwfn->p_ptp_ptt = NULL;
return 0;
}
......
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and /or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _QED_PTP_H
#define _QED_PTP_H
#include <linux/types.h>
int qed_ptp_hwtstamp_tx_on(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_ptp_cfg_rx_filters(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
enum qed_ptp_filter_type type);
int qed_ptp_read_rx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts);
int qed_ptp_read_tx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts);
int qed_ptp_read_cc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u64 *cycles);
int qed_ptp_adjfreq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, s32 ppb);
int qed_ptp_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_ptp_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#endif
......@@ -907,13 +907,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
/* PTP not supported on VFs */
if (!is_vf) {
rc = qede_ptp_register_phc(edev);
if (rc) {
DP_NOTICE(edev, "Cannot register PHC\n");
goto err5;
}
}
if (!is_vf)
qede_ptp_enable(edev, true);
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
......@@ -928,8 +923,6 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
return 0;
err5:
unregister_netdev(edev->ndev);
err4:
qede_roce_dev_remove(edev);
err3:
......@@ -980,7 +973,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
unregister_netdev(ndev);
cancel_delayed_work_sync(&edev->sp_task);
qede_ptp_remove(edev);
qede_ptp_disable(edev);
qede_roce_dev_remove(edev);
......@@ -1877,8 +1870,6 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
qede_roce_dev_event_close(edev);
edev->state = QEDE_STATE_CLOSED;
qede_ptp_stop(edev);
/* Close OS Tx */
netif_tx_disable(edev->ndev);
netif_carrier_off(edev->ndev);
......@@ -1987,13 +1978,10 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
qede_roce_dev_event_open(edev);
qede_ptp_start(edev, (mode == QEDE_LOAD_NORMAL));
edev->state = QEDE_STATE_OPEN;
DP_INFO(edev, "Ending successfully qede load\n");
goto out;
err4:
qede_sync_free_irqs(edev);
......
......@@ -206,21 +206,6 @@ static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
return phc_cycles;
}
static void qede_ptp_init_cc(struct qede_dev *edev)
{
struct qede_ptp *ptp;
ptp = edev->ptp;
if (!ptp)
return;
memset(&ptp->cc, 0, sizeof(ptp->cc));
ptp->cc.read = qede_ptp_read_cc;
ptp->cc.mask = CYCLECOUNTER_MASK(64);
ptp->cc.shift = 0;
ptp->cc.mult = 1;
}
static int qede_ptp_cfg_filters(struct qede_dev *edev)
{
struct qede_ptp *ptp = edev->ptp;
......@@ -324,61 +309,6 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
/* Called during load, to initialize PTP-related stuff */
static void qede_ptp_init(struct qede_dev *edev, bool init_tc)
{
struct qede_ptp *ptp;
int rc;
ptp = edev->ptp;
if (!ptp)
return;
spin_lock_init(&ptp->lock);
/* Configure PTP in HW */
rc = ptp->ops->enable(edev->cdev);
if (rc) {
DP_ERR(edev, "Stopping PTP initialization\n");
return;
}
/* Init work queue for Tx timestamping */
INIT_WORK(&ptp->work, qede_ptp_task);
/* Init cyclecounter and timecounter. This is done only in the first
* load. If done in every load, PTP application will fail when doing
* unload / load (e.g. MTU change) while it is running.
*/
if (init_tc) {
qede_ptp_init_cc(edev);
timecounter_init(&ptp->tc, &ptp->cc,
ktime_to_ns(ktime_get_real()));
}
DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP initialization is successful\n");
}
void qede_ptp_start(struct qede_dev *edev, bool init_tc)
{
qede_ptp_init(edev, init_tc);
qede_ptp_cfg_filters(edev);
}
void qede_ptp_remove(struct qede_dev *edev)
{
struct qede_ptp *ptp;
ptp = edev->ptp;
if (ptp && ptp->clock) {
ptp_clock_unregister(ptp->clock);
ptp->clock = NULL;
}
kfree(ptp);
edev->ptp = NULL;
}
int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
{
struct qede_ptp *ptp = edev->ptp;
......@@ -417,8 +347,7 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
return 0;
}
/* Called during unload, to stop PTP-related stuff */
void qede_ptp_stop(struct qede_dev *edev)
void qede_ptp_disable(struct qede_dev *edev)
{
struct qede_ptp *ptp;
......@@ -426,6 +355,11 @@ void qede_ptp_stop(struct qede_dev *edev)
if (!ptp)
return;
if (ptp->clock) {
ptp_clock_unregister(ptp->clock);
ptp->clock = NULL;
}
/* Cancel PTP work queue. Should be done after the Tx queues are
* drained to prevent additional scheduling.
*/
......@@ -439,11 +373,54 @@ void qede_ptp_stop(struct qede_dev *edev)
spin_lock_bh(&ptp->lock);
ptp->ops->disable(edev->cdev);
spin_unlock_bh(&ptp->lock);
kfree(ptp);
edev->ptp = NULL;
}
int qede_ptp_register_phc(struct qede_dev *edev)
static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
{
struct qede_ptp *ptp;
int rc;
ptp = edev->ptp;
if (!ptp)
return -EINVAL;
spin_lock_init(&ptp->lock);
/* Configure PTP in HW */
rc = ptp->ops->enable(edev->cdev);
if (rc) {
DP_INFO(edev, "PTP HW enable failed\n");
return rc;
}
/* Init work queue for Tx timestamping */
INIT_WORK(&ptp->work, qede_ptp_task);
/* Init cyclecounter and timecounter. This is done only in the first
* load. If done in every load, PTP application will fail when doing
* unload / load (e.g. MTU change) while it is running.
*/
if (init_tc) {
memset(&ptp->cc, 0, sizeof(ptp->cc));
ptp->cc.read = qede_ptp_read_cc;
ptp->cc.mask = CYCLECOUNTER_MASK(64);
ptp->cc.shift = 0;
ptp->cc.mult = 1;
timecounter_init(&ptp->tc, &ptp->cc,
ktime_to_ns(ktime_get_real()));
}
return rc;
}
int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
{
struct qede_ptp *ptp;
int rc;
ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
if (!ptp) {
......@@ -454,14 +431,19 @@ int qede_ptp_register_phc(struct qede_dev *edev)
ptp->edev = edev;
ptp->ops = edev->ops->ptp;
if (!ptp->ops) {
kfree(ptp);
edev->ptp = NULL;
DP_ERR(edev, "PTP clock registeration failed\n");
return -EIO;
DP_INFO(edev, "PTP enable failed\n");
rc = -EIO;
goto err1;
}
edev->ptp = ptp;
rc = qede_ptp_init(edev, init_tc);
if (rc)
goto err1;
qede_ptp_cfg_filters(edev);
/* Fill the ptp_clock_info struct and register PTP clock */
ptp->clock_info.owner = THIS_MODULE;
snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
......@@ -478,13 +460,21 @@ int qede_ptp_register_phc(struct qede_dev *edev)
ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
if (IS_ERR(ptp->clock)) {
ptp->clock = NULL;
kfree(ptp);
edev->ptp = NULL;
rc = -EINVAL;
DP_ERR(edev, "PTP clock registeration failed\n");
goto err2;
}
return 0;
err2:
qede_ptp_disable(edev);
ptp->clock = NULL;
err1:
kfree(ptp);
edev->ptp = NULL;
return rc;
}
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
......
......@@ -40,10 +40,8 @@
void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
void qede_ptp_start(struct qede_dev *edev, bool init_tc);
void qede_ptp_stop(struct qede_dev *edev);
void qede_ptp_remove(struct qede_dev *edev);
int qede_ptp_register_phc(struct qede_dev *edev);
void qede_ptp_disable(struct qede_dev *edev);
int qede_ptp_enable(struct qede_dev *edev, bool init_tc);
int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment