Commit 006e8964 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2024-02-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2024-02-01

1) IPSec global stats for xfrm and mlx5
2) XSK memory improvements for non-linear SKBs
3) Software steering debug dump to use seq_file ops
4) Various code clean-ups

* tag 'mlx5-updates-2024-02-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: XDP, Exclude headroom and tailroom from memory calculations
  net/mlx5e: XSK, Exclude tailroom from non-linear SKBs memory calculations
  net/mlx5: DR, Change SWS usage to debug fs seq_file interface
  net/mlx5: Change missing SyncE capability print to debug
  net/mlx5: Remove initial segmentation duplicate definitions
  net/mlx5: Return specific error code for timeout on wait_fw_init
  net/mlx5: SF, Stop waiting for FW as teardown was called
  net/mlx5: remove fw reporter dump option for non PF
  net/mlx5: remove fw_fatal reporter dump option for non PF
  net/mlx5: Rename mlx5_sf_dev_remove
  Documentation: Fix counter name of mlx5 vnic reporter
  net/mlx5e: Delete obsolete IPsec code
  net/mlx5e: Connect mlx5 IPsec statistics with XFRM core
  xfrm: get global statistics from the offloaded device
  xfrm: generalize xdo_dev_state_update_curlft to allow statistics update
====================

Link: https://lore.kernel.org/r/20240206005527.1353368-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 313fb184 a90f5591
......@@ -250,7 +250,7 @@ them in realtime.
Description of the vnic counters:
- total_q_under_processor_handle
- total_error_queues
number of queues in an error state due to
an async error or errored command.
- send_queue_priority_update_flow
......@@ -259,7 +259,8 @@ Description of the vnic counters:
number of times CQ entered an error state due to an overflow.
- async_eq_overrun
number of times an EQ mapped to async events was overrun.
comp_eq_overrun number of times an EQ mapped to completion events was
- comp_eq_overrun
number of times an EQ mapped to completion events was
overrun.
- quota_exceeded_command
number of commands issued and failed due to quota exceeded.
......
......@@ -71,9 +71,9 @@ Callbacks to implement
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
/* Solely packet offload callbacks */
void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
......@@ -191,6 +191,6 @@ xdo_dev_policy_free() on any remaining offloaded states.
Outcome of HW handling packets, the XFRM core can't count hard, soft limits.
The HW/driver are responsible to perform it and provide accurate data when
xdo_dev_state_update_curlft() is called. In case of one of these limits
xdo_dev_state_update_stats() is called. In case of one of these limits
occuried, the driver needs to call to xfrm_state_check_expire() to make sure
that XFRM performs rekeying sequence.
......@@ -210,7 +210,7 @@ static bool is_dpll_supported(struct mlx5_core_dev *dev)
return false;
if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) {
mlx5_core_warn(dev, "Missing SyncE capability\n");
mlx5_core_dbg(dev, "Missing SyncE capability\n");
return false;
}
......
......@@ -240,11 +240,14 @@ static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
return xsk->headroom + hw_mtu;
}
static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk)
static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool no_head_tail_room)
{
/* SKBs built on XDP_PASS on XSK RQs don't have headroom. */
u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL);
u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
u16 headroom;
if (no_head_tail_room)
return SKB_DATA_ALIGN(hw_mtu);
headroom = mlx5e_get_linear_rq_headroom(params, NULL);
return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
}
......@@ -254,6 +257,7 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk,
bool mpwqe)
{
bool no_head_tail_room;
u32 sz;
/* XSK frames are mapped as individual pages, because frames may come in
......@@ -262,7 +266,13 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
if (xsk)
return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false));
no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk);
/* When no_head_tail_room is set, headroom and tailroom are excluded from skb calculations.
* no_head_tail_room should be set in the case of XDP with Striding RQ
* when SKB is not linear. This is because another page is allocated for the linear part.
*/
sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, no_head_tail_room));
/* XDP in mlx5e doesn't support multiple packets per page.
* Do not assume sz <= PAGE_SIZE if params->xdp_prog is set.
......@@ -289,7 +299,11 @@ bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
return false;
/* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
/* Call mlx5e_rx_get_linear_sz_skb with the no_head_tail_room parameter set
* to exclude headroom and tailroom from calculations.
* no_head_tail_room is true when SKB is built on XDP_PASS on XSK RQs
* since packet data buffers don't have headroom and tailroom resreved for the SKB.
* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
* must fit into a CPU page.
*/
if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
......
......@@ -984,21 +984,41 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
queue_work(sa_entry->ipsec->wq, &work->work);
}
static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct net *net = dev_net(x->xso.dev);
u64 packets, bytes, lastuse;
lockdep_assert(lockdep_is_held(&x->lock) ||
lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex));
lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
return;
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse);
x->stats.integrity_failed += packets;
XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets);
mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse);
XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets);
}
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return;
mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
x->curlft.packets += packets;
x->curlft.bytes += bytes;
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse);
x->stats.replay += packets;
XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets);
}
}
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
......@@ -1156,7 +1176,7 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
.xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
.xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
......
......@@ -137,7 +137,6 @@ struct mlx5e_ipsec_hw_stats {
struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_rx_drop_sp_alloc;
atomic64_t ipsec_rx_drop_sadb_miss;
atomic64_t ipsec_rx_drop_syndrome;
atomic64_t ipsec_tx_drop_bundle;
atomic64_t ipsec_tx_drop_no_state;
atomic64_t ipsec_tx_drop_not_ip;
......
......@@ -304,12 +304,6 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
return false;
}
enum {
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
};
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
u32 ipsec_meta_data)
......@@ -343,20 +337,7 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
xo->status = CRYPTO_SUCCESS;
break;
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
break;
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
break;
default:
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
}
xo->status = CRYPTO_SUCCESS;
}
int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata)
......@@ -374,8 +355,6 @@ int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metada
return err;
}
*metadata = MLX5_IPSEC_METADATA_CREATE(ipsec_obj_id,
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED);
*metadata = ipsec_obj_id;
return 0;
}
......@@ -43,7 +43,6 @@
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
#define MLX5_IPSEC_METADATA_CREATE(id, syndrome) ((id) | ((syndrome) << 24))
struct mlx5e_accel_tx_ipsec_state {
struct xfrm_offload *xo;
......
......@@ -51,7 +51,6 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_syndrome) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_bundle) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
......
......@@ -366,18 +366,18 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
return -EIO;
}
mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED);
/* Loop until device state turns to disable */
end = jiffies + msecs_to_jiffies(delay_ms);
do {
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
cond_resched();
} while (!time_after(jiffies, end));
if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms);
return -EIO;
......
......@@ -116,9 +116,9 @@ u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev)
return MLX5_SENSOR_PCI_COMM_ERR;
if (pci_channel_offline(dev->pdev))
return MLX5_SENSOR_PCI_ERR;
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
return MLX5_SENSOR_NIC_DISABLED;
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET)
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET)
return MLX5_SENSOR_NIC_SW_RESET;
if (sensor_fw_synd_rfr(dev))
return MLX5_SENSOR_FW_SYND_RFR;
......@@ -185,7 +185,7 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
/* Write the NIC interface field to initiate the reset, the command
* interface address also resides here, don't overwrite it.
*/
mlx5_set_nic_state(dev, MLX5_NIC_IFC_SW_RESET);
mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET);
return true;
}
......@@ -246,13 +246,13 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
/* Recover from SW reset */
end = jiffies + msecs_to_jiffies(delay_ms);
do {
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
msleep(20);
} while (!time_after(jiffies, end));
if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms);
}
......@@ -272,26 +272,26 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
u8 nic_interface = mlx5_get_nic_state(dev);
switch (nic_interface) {
case MLX5_NIC_IFC_FULL:
case MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
break;
case MLX5_NIC_IFC_DISABLED:
case MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED:
mlx5_core_warn(dev, "starting teardown\n");
break;
case MLX5_NIC_IFC_NO_DRAM_NIC:
case MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
break;
case MLX5_NIC_IFC_SW_RESET:
case MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET:
/* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases:
* 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
* and this is a VF), this is not recoverable by SW reset.
* Logging of this is handled elsewhere.
* 2. FW reset has been issued by another function, driver can
* be reloaded to recover after the mode switches to
* MLX5_NIC_IFC_DISABLED.
* MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED.
*/
if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
mlx5_core_warn(dev, "NIC SW reset in progress\n");
......@@ -555,12 +555,17 @@ static void mlx5_fw_reporter_err_work(struct work_struct *work)
&fw_reporter_ctx);
}
static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
static const struct devlink_health_reporter_ops mlx5_fw_reporter_pf_ops = {
.name = "fw",
.diagnose = mlx5_fw_reporter_diagnose,
.dump = mlx5_fw_reporter_dump,
};
static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
.name = "fw",
.diagnose = mlx5_fw_reporter_diagnose,
};
static int
mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
void *priv_ctx,
......@@ -646,12 +651,17 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
}
}
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_pf_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
.dump = mlx5_fw_fatal_reporter_dump,
};
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
};
#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
......@@ -659,10 +669,14 @@ static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
{
const struct devlink_health_reporter_ops *fw_fatal_ops;
struct mlx5_core_health *health = &dev->priv.health;
const struct devlink_health_reporter_ops *fw_ops;
struct devlink *devlink = priv_to_devlink(dev);
u64 grace_period;
fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops;
fw_ops = &mlx5_fw_reporter_pf_ops;
if (mlx5_core_is_ecpf(dev)) {
grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD;
} else if (mlx5_core_is_pf(dev)) {
......@@ -670,18 +684,19 @@ void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
} else {
/* VF or SF */
grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD;
fw_fatal_ops = &mlx5_fw_fatal_reporter_ops;
fw_ops = &mlx5_fw_reporter_ops;
}
health->fw_reporter =
devl_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
0, dev);
devl_health_reporter_create(devlink, fw_ops, 0, dev);
if (IS_ERR(health->fw_reporter))
mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
PTR_ERR(health->fw_reporter));
health->fw_fatal_reporter =
devl_health_reporter_create(devlink,
&mlx5_fw_fatal_reporter_ops,
fw_fatal_ops,
grace_period,
dev);
if (IS_ERR(health->fw_fatal_reporter))
......
......@@ -187,31 +187,36 @@ static struct mlx5_profile profile[] = {
};
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
u32 warn_time_mili)
u32 warn_time_mili, const char *init_state)
{
unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
u32 fw_initializing;
int err = 0;
do {
fw_initializing = ioread32be(&dev->iseg->initializing);
if (!(fw_initializing >> 31))
break;
if (time_after(jiffies, end) ||
test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
err = -EBUSY;
break;
if (time_after(jiffies, end)) {
mlx5_core_err(dev, "Firmware over %u MS in %s state, aborting\n",
max_wait_mili, init_state);
return -ETIMEDOUT;
}
if (test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
mlx5_core_warn(dev, "device is being removed, stop waiting for FW %s\n",
init_state);
return -ENODEV;
}
if (warn_time_mili && time_after(jiffies, warn)) {
mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds (0x%x)\n",
jiffies_to_msecs(end - warn) / 1000, fw_initializing);
mlx5_core_warn(dev, "Waiting for FW %s, timeout abort in %ds (0x%x)\n",
init_state, jiffies_to_msecs(end - warn) / 1000,
fw_initializing);
warn = jiffies + msecs_to_jiffies(warn_time_mili);
}
msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
} while (true);
return err;
return 0;
}
static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
......@@ -1151,12 +1156,10 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
/* wait for firmware to accept initialization segments configurations
*/
err = wait_fw_init(dev, timeout,
mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL));
if (err) {
mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
timeout);
mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL),
"pre-initializing");
if (err)
return err;
}
err = mlx5_cmd_enable(dev);
if (err) {
......@@ -1166,12 +1169,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
mlx5_tout_query_iseg(dev);
err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0);
if (err) {
mlx5_core_err(dev, "Firmware over %llu MS in initializing state, aborting\n",
mlx5_tout_ms(dev, FW_INIT));
err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0, "initializing");
if (err)
goto err_cmd_cleanup;
}
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
......
......@@ -312,13 +312,6 @@ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
return ret;
}
enum {
MLX5_NIC_IFC_FULL = 0,
MLX5_NIC_IFC_DISABLED = 1,
MLX5_NIC_IFC_NO_DRAM_NIC = 2,
MLX5_NIC_IFC_SW_RESET = 7
};
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
......
......@@ -74,7 +74,8 @@ static void mlx5_sf_dev_release(struct device *device)
kfree(sf_dev);
}
static void mlx5_sf_dev_remove(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev)
static void mlx5_sf_dev_remove_aux(struct mlx5_core_dev *dev,
struct mlx5_sf_dev *sf_dev)
{
int id;
......@@ -138,7 +139,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id,
return;
xa_err:
mlx5_sf_dev_remove(dev, sf_dev);
mlx5_sf_dev_remove_aux(dev, sf_dev);
add_err:
mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n",
sf_index, sfnum, err);
......@@ -149,7 +150,7 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
xa_erase(&table->devices, sf_index);
mlx5_sf_dev_remove(dev, sf_dev);
mlx5_sf_dev_remove_aux(dev, sf_dev);
}
static int
......@@ -367,7 +368,7 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
xa_for_each(&table->devices, index, sf_dev) {
xa_erase(&table->devices, index);
mlx5_sf_dev_remove(table->dev, sf_dev);
mlx5_sf_dev_remove_aux(table->dev, sf_dev);
}
}
......
......@@ -95,24 +95,29 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
struct mlx5_core_dev *mdev = sf_dev->mdev;
struct devlink *devlink;
mlx5_drain_health_wq(sf_dev->mdev);
devlink = priv_to_devlink(mdev);
set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
mlx5_drain_health_wq(mdev);
devlink_unregister(devlink);
if (mlx5_dev_is_lightweight(sf_dev->mdev))
mlx5_uninit_one_light(sf_dev->mdev);
if (mlx5_dev_is_lightweight(mdev))
mlx5_uninit_one_light(mdev);
else
mlx5_uninit_one(sf_dev->mdev);
iounmap(sf_dev->mdev->iseg);
mlx5_mdev_uninit(sf_dev->mdev);
mlx5_uninit_one(mdev);
iounmap(mdev->iseg);
mlx5_mdev_uninit(mdev);
mlx5_devlink_free(devlink);
}
static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
struct mlx5_core_dev *mdev = sf_dev->mdev;
mlx5_unload_one(sf_dev->mdev, false);
set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
mlx5_unload_one(mdev, false);
}
static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
......
......@@ -54,6 +54,107 @@ enum dr_dump_rec_type {
DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE = 3425,
};
static struct mlx5dr_dbg_dump_buff *
mlx5dr_dbg_dump_data_init_new_buff(struct mlx5dr_dbg_dump_data *dump_data)
{
struct mlx5dr_dbg_dump_buff *new_buff;
new_buff = kzalloc(sizeof(*new_buff), GFP_KERNEL);
if (!new_buff)
return NULL;
new_buff->buff = kvzalloc(MLX5DR_DEBUG_DUMP_BUFF_SIZE, GFP_KERNEL);
if (!new_buff->buff) {
kfree(new_buff);
return NULL;
}
INIT_LIST_HEAD(&new_buff->node);
list_add_tail(&new_buff->node, &dump_data->buff_list);
return new_buff;
}
static struct mlx5dr_dbg_dump_data *
mlx5dr_dbg_create_dump_data(void)
{
struct mlx5dr_dbg_dump_data *dump_data;
dump_data = kzalloc(sizeof(*dump_data), GFP_KERNEL);
if (!dump_data)
return NULL;
INIT_LIST_HEAD(&dump_data->buff_list);
if (!mlx5dr_dbg_dump_data_init_new_buff(dump_data)) {
kfree(dump_data);
return NULL;
}
return dump_data;
}
static void
mlx5dr_dbg_destroy_dump_data(struct mlx5dr_dbg_dump_data *dump_data)
{
struct mlx5dr_dbg_dump_buff *dump_buff, *tmp_buff;
if (!dump_data)
return;
list_for_each_entry_safe(dump_buff, tmp_buff, &dump_data->buff_list, node) {
kvfree(dump_buff->buff);
list_del(&dump_buff->node);
kfree(dump_buff);
}
kfree(dump_data);
}
static int
mlx5dr_dbg_dump_data_print(struct seq_file *file, char *str, u32 size)
{
struct mlx5dr_domain *dmn = file->private;
struct mlx5dr_dbg_dump_data *dump_data;
struct mlx5dr_dbg_dump_buff *buff;
u32 buff_capacity, write_size;
int remain_size, ret;
if (size >= MLX5DR_DEBUG_DUMP_BUFF_SIZE)
return -EINVAL;
dump_data = dmn->dump_info.dump_data;
buff = list_last_entry(&dump_data->buff_list,
struct mlx5dr_dbg_dump_buff, node);
buff_capacity = (MLX5DR_DEBUG_DUMP_BUFF_SIZE - 1) - buff->index;
remain_size = buff_capacity - size;
write_size = (remain_size > 0) ? size : buff_capacity;
if (likely(write_size)) {
ret = snprintf(buff->buff + buff->index, write_size + 1, "%s", str);
if (ret < 0)
return ret;
buff->index += write_size;
}
if (remain_size < 0) {
remain_size *= -1;
buff = mlx5dr_dbg_dump_data_init_new_buff(dump_data);
if (!buff)
return -ENOMEM;
ret = snprintf(buff->buff, remain_size + 1, "%s", str + write_size);
if (ret < 0)
return ret;
buff->index += remain_size;
}
return 0;
}
void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl)
{
mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
......@@ -109,36 +210,68 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
{
struct mlx5dr_action *action = action_mem->action;
const u64 action_id = DR_DBG_PTR_TO_ID(action);
char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 hit_tbl_ptr, miss_tbl_ptr;
u32 hit_tbl_id, miss_tbl_id;
int ret;
switch (action->action_type) {
case DR_ACTION_TYP_DROP:
seq_printf(file, "%d,0x%llx,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_DROP, action_id, rule_id);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_DROP, action_id,
rule_id);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
case DR_ACTION_TYP_FT:
if (action->dest_tbl->is_fw_tbl)
seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
rule_id, action->dest_tbl->fw_tbl.id,
-1);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
rule_id, action->dest_tbl->fw_tbl.id,
-1);
else
seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
rule_id, action->dest_tbl->tbl->table_id,
DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,0x%x,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
rule_id, action->dest_tbl->tbl->table_id,
DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
case DR_ACTION_TYP_CTR:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
action->ctr->ctr_id + action->ctr->offset);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
action->ctr->ctr_id + action->ctr->offset);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
case DR_ACTION_TYP_TAG:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
action->flow_tag->flow_tag);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
action->flow_tag->flow_tag);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
case DR_ACTION_TYP_MODIFY_HDR:
{
......@@ -150,83 +283,171 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
ptrn_arg = !action->rewrite->single_action_opt && ptrn && arg;
seq_printf(file, "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x",
DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
rule_id, action->rewrite->index,
action->rewrite->single_action_opt,
ptrn_arg ? action->rewrite->num_of_actions : 0,
ptrn_arg ? ptrn->index : 0,
ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x",
DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
rule_id, action->rewrite->index,
action->rewrite->single_action_opt,
ptrn_arg ? action->rewrite->num_of_actions : 0,
ptrn_arg ? ptrn->index : 0,
ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
if (ptrn_arg) {
for (i = 0; i < action->rewrite->num_of_actions; i++) {
seq_printf(file, ",0x%016llx",
be64_to_cpu(((__be64 *)rewrite_data)[i]));
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
",0x%016llx",
be64_to_cpu(((__be64 *)rewrite_data)[i]));
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
}
}
seq_puts(file, "\n");
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "\n");
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
}
case DR_ACTION_TYP_VPORT:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
action->vport->caps->num);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
action->vport->caps->num);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
seq_printf(file, "%d,0x%llx,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
rule_id);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
rule_id);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
case DR_ACTION_TYP_TNL_L3_TO_L2:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
rule_id,
(action->rewrite->ptrn && action->rewrite->arg) ?
mlx5dr_arg_get_obj_id(action->rewrite->arg) :
action->rewrite->index);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
rule_id,
(action->rewrite->ptrn && action->rewrite->arg) ?
mlx5dr_arg_get_obj_id(action->rewrite->arg) :
action->rewrite->index);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
rule_id, action->reformat->id);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
rule_id, action->reformat->id);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
case DR_ACTION_TYP_L2_TO_TNL_L3:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
rule_id, action->reformat->id);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
rule_id, action->reformat->id);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
case DR_ACTION_TYP_POP_VLAN:
seq_printf(file, "%d,0x%llx,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
rule_id);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
rule_id);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
case DR_ACTION_TYP_PUSH_VLAN:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
rule_id, action->push_vlan->vlan_hdr);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
rule_id, action->push_vlan->vlan_hdr);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
case DR_ACTION_TYP_INSERT_HDR:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
rule_id, action->reformat->id,
action->reformat->param_0,
action->reformat->param_1);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
rule_id, action->reformat->id,
action->reformat->param_0,
action->reformat->param_1);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
case DR_ACTION_TYP_REMOVE_HDR:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
rule_id, action->reformat->id,
action->reformat->param_0,
action->reformat->param_1);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
rule_id, action->reformat->id,
action->reformat->param_0,
action->reformat->param_1);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
case DR_ACTION_TYP_SAMPLER:
seq_printf(file,
"%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id, rule_id,
0, 0, action->sampler->sampler_id,
action->sampler->rx_icm_addr,
action->sampler->tx_icm_addr);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id,
rule_id, 0, 0, action->sampler->sampler_id,
action->sampler->rx_icm_addr,
action->sampler->tx_icm_addr);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
case DR_ACTION_TYP_RANGE:
if (action->range->hit_tbl_action->dest_tbl->is_fw_tbl) {
......@@ -247,10 +468,17 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
DR_DBG_PTR_TO_ID(action->range->miss_tbl_action->dest_tbl->tbl);
}
seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id, rule_id,
hit_tbl_id, hit_tbl_ptr, miss_tbl_id, miss_tbl_ptr,
action->range->definer_id);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id,
rule_id, hit_tbl_id, hit_tbl_ptr, miss_tbl_id,
miss_tbl_ptr, action->range->definer_id);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
break;
default:
return 0;
......@@ -263,8 +491,10 @@ static int
dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
bool is_rx, const u64 rule_id, u8 format_ver)
{
char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char hw_ste_dump[DR_HEX_SIZE];
u32 mem_rec_type;
int ret;
if (format_ver == MLX5_STEERING_FORMAT_CONNECTX_5) {
mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 :
......@@ -277,9 +507,16 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
dr_dump_hex_print(hw_ste_dump, (char *)mlx5dr_ste_get_hw_ste(ste),
DR_STE_SIZE_REDUCED);
seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id,
hw_ste_dump);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,%s\n", mem_rec_type,
dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)),
rule_id, hw_ste_dump);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
return 0;
}
......@@ -309,6 +546,7 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
{
struct mlx5dr_rule_action_member *action_mem;
const u64 rule_id = DR_DBG_PTR_TO_ID(rule);
char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_rule_rx_tx *rx = &rule->rx;
struct mlx5dr_rule_rx_tx *tx = &rule->tx;
u8 format_ver;
......@@ -316,8 +554,15 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
format_ver = rule->matcher->tbl->dmn->info.caps.sw_format_ver;
seq_printf(file, "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE, rule_id,
DR_DBG_PTR_TO_ID(rule->matcher));
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE,
rule_id, DR_DBG_PTR_TO_ID(rule->matcher));
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
if (rx->nic_matcher) {
ret = dr_dump_rule_rx_tx(file, rx, true, rule_id, format_ver);
......@@ -344,46 +589,94 @@ static int
dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
u8 criteria, const u64 matcher_id)
{
char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char dump[DR_HEX_SIZE];
int ret;
seq_printf(file, "%d,0x%llx,", DR_DUMP_REC_TYPE_MATCHER_MASK,
matcher_id);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "%d,0x%llx,",
DR_DUMP_REC_TYPE_MATCHER_MASK, matcher_id);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
if (criteria & DR_MATCHER_CRITERIA_OUTER) {
dr_dump_hex_print(dump, (char *)&mask->outer, sizeof(mask->outer));
seq_printf(file, "%s,", dump);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%s,", dump);
} else {
seq_puts(file, ",");
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
if (criteria & DR_MATCHER_CRITERIA_INNER) {
dr_dump_hex_print(dump, (char *)&mask->inner, sizeof(mask->inner));
seq_printf(file, "%s,", dump);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%s,", dump);
} else {
seq_puts(file, ",");
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
if (criteria & DR_MATCHER_CRITERIA_MISC) {
dr_dump_hex_print(dump, (char *)&mask->misc, sizeof(mask->misc));
seq_printf(file, "%s,", dump);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%s,", dump);
} else {
seq_puts(file, ",");
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
if (criteria & DR_MATCHER_CRITERIA_MISC2) {
dr_dump_hex_print(dump, (char *)&mask->misc2, sizeof(mask->misc2));
seq_printf(file, "%s,", dump);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%s,", dump);
} else {
seq_puts(file, ",");
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
if (criteria & DR_MATCHER_CRITERIA_MISC3) {
dr_dump_hex_print(dump, (char *)&mask->misc3, sizeof(mask->misc3));
seq_printf(file, "%s\n", dump);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%s\n", dump);
} else {
seq_puts(file, ",\n");
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",\n");
}
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
return 0;
}
......@@ -391,9 +684,19 @@ static int
dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
u32 index, bool is_rx, const u64 matcher_id)
{
seq_printf(file, "%d,0x%llx,%d,%d,0x%x\n",
DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index, is_rx,
builder->lu_type);
char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,%d,%d,0x%x\n",
DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index,
is_rx, builder->lu_type);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
return 0;
}
......@@ -403,6 +706,7 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
struct mlx5dr_matcher_rx_tx *matcher_rx_tx,
const u64 matcher_id)
{
char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr, e_icm_addr;
int i, ret;
......@@ -412,11 +716,19 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->s_htbl->chunk);
e_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->e_anchor->chunk);
seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
matcher_id, matcher_rx_tx->num_of_builders,
dr_dump_icm_to_idx(s_icm_addr),
dr_dump_icm_to_idx(e_icm_addr));
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
matcher_id, matcher_rx_tx->num_of_builders,
dr_dump_icm_to_idx(s_icm_addr),
dr_dump_icm_to_idx(e_icm_addr));
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
ret = dr_dump_matcher_builder(file,
......@@ -434,13 +746,22 @@ dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
{
struct mlx5dr_matcher_rx_tx *rx = &matcher->rx;
struct mlx5dr_matcher_rx_tx *tx = &matcher->tx;
char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 matcher_id;
int ret;
matcher_id = DR_DBG_PTR_TO_ID(matcher);
seq_printf(file, "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl), matcher->prio);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl),
matcher->prio);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
ret = dr_dump_matcher_mask(file, &matcher->mask,
matcher->match_criteria, matcher_id);
......@@ -486,15 +807,24 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
struct mlx5dr_table_rx_tx *table_rx_tx,
const u64 table_id)
{
char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr;
int ret;
rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
DR_DUMP_REC_TYPE_TABLE_TX;
s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(table_rx_tx->s_anchor->chunk);
seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id,
dr_dump_icm_to_idx(s_icm_addr));
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx\n", rec_type, table_id,
dr_dump_icm_to_idx(s_icm_addr));
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
return 0;
}
......@@ -503,11 +833,19 @@ static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
{
struct mlx5dr_table_rx_tx *rx = &table->rx;
struct mlx5dr_table_rx_tx *tx = &table->tx;
char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
seq_printf(file, "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
table->table_type, table->level);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
table->table_type, table->level);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
if (rx->nic_dmn) {
ret = dr_dump_table_rx_tx(file, true, rx,
......@@ -546,46 +884,86 @@ static int
dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
const u64 domain_id)
{
seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_DOMAIN_SEND_RING, DR_DBG_PTR_TO_ID(ring),
domain_id, ring->cq->mcq.cqn, ring->qp->qpn);
char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%llx,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_DOMAIN_SEND_RING,
DR_DBG_PTR_TO_ID(ring), domain_id,
ring->cq->mcq.cqn, ring->qp->qpn);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
return 0;
}
static int
static noinline_for_stack int
dr_dump_domain_info_flex_parser(struct seq_file *file,
const char *flex_parser_name,
const u8 flex_parser_value,
const u64 domain_id)
{
seq_printf(file, "%d,0x%llx,%s,0x%x\n",
DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
flex_parser_name, flex_parser_value);
char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,%s,0x%x\n",
DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
flex_parser_name, flex_parser_value);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
return 0;
}
static int
static noinline_for_stack int
dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
const u64 domain_id)
{
char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_cmd_vport_cap *vport_caps;
unsigned long i, vports_num;
int ret;
xa_for_each(&caps->vports.vports_caps_xa, vports_num, vport_caps)
; /* count the number of vports in xarray */
seq_printf(file, "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
caps->nic_rx_drop_address, caps->nic_tx_drop_address,
caps->flex_protocols, vports_num, caps->eswitch_manager);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
caps->nic_rx_drop_address, caps->nic_tx_drop_address,
caps->flex_protocols, vports_num, caps->eswitch_manager);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
xa_for_each(&caps->vports.vports_caps_xa, i, vport_caps) {
vport_caps = xa_load(&caps->vports.vports_caps_xa, i);
seq_printf(file, "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT, domain_id, i,
vport_caps->vport_gvmi, vport_caps->icm_address_rx,
vport_caps->icm_address_tx);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT,
domain_id, i, vport_caps->vport_gvmi,
vport_caps->icm_address_rx,
vport_caps->icm_address_tx);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
}
return 0;
}
......@@ -627,24 +1005,32 @@ dr_dump_domain_info(struct seq_file *file, struct mlx5dr_domain_info *info,
return 0;
}
static int
static noinline_for_stack int
dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn)
{
char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 domain_id = DR_DBG_PTR_TO_ID(dmn);
int ret;
seq_printf(file, "%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n",
DR_DUMP_REC_TYPE_DOMAIN,
domain_id, dmn->type, dmn->info.caps.gvmi,
dmn->info.supp_sw_steering,
/* package version */
LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
LINUX_VERSION_SUBLEVEL,
pci_name(dmn->mdev->pdev),
0, /* domain flags */
dmn->num_buddies[DR_ICM_TYPE_STE],
dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION],
dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]);
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
"%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n",
DR_DUMP_REC_TYPE_DOMAIN,
domain_id, dmn->type, dmn->info.caps.gvmi,
dmn->info.supp_sw_steering,
/* package version */
LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
LINUX_VERSION_SUBLEVEL,
pci_name(dmn->mdev->pdev),
0, /* domain flags */
dmn->num_buddies[DR_ICM_TYPE_STE],
dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION],
dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]);
if (ret < 0)
return ret;
ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
if (ret)
return ret;
ret = dr_dump_domain_info(file, &dmn->info, domain_id);
if (ret < 0)
......@@ -683,11 +1069,91 @@ static int dr_dump_domain_all(struct seq_file *file, struct mlx5dr_domain *dmn)
return ret;
}
static int dr_dump_show(struct seq_file *file, void *priv)
static void *
dr_dump_start(struct seq_file *file, loff_t *pos)
{
return dr_dump_domain_all(file, file->private);
struct mlx5dr_domain *dmn = file->private;
struct mlx5dr_dbg_dump_data *dump_data;
if (atomic_read(&dmn->dump_info.state) != MLX5DR_DEBUG_DUMP_STATE_FREE) {
mlx5_core_warn(dmn->mdev, "Dump already in progress\n");
return ERR_PTR(-EBUSY);
}
atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS);
dump_data = dmn->dump_info.dump_data;
if (dump_data) {
return seq_list_start(&dump_data->buff_list, *pos);
} else if (*pos == 0) {
dump_data = mlx5dr_dbg_create_dump_data();
if (!dump_data)
goto exit;
dmn->dump_info.dump_data = dump_data;
if (dr_dump_domain_all(file, dmn)) {
mlx5dr_dbg_destroy_dump_data(dump_data);
dmn->dump_info.dump_data = NULL;
goto exit;
}
return seq_list_start(&dump_data->buff_list, *pos);
}
exit:
atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
return NULL;
}
DEFINE_SHOW_ATTRIBUTE(dr_dump);
static void *
dr_dump_next(struct seq_file *file, void *v, loff_t *pos)
{
struct mlx5dr_domain *dmn = file->private;
struct mlx5dr_dbg_dump_data *dump_data;
dump_data = dmn->dump_info.dump_data;
return seq_list_next(v, &dump_data->buff_list, pos);
}
static void
dr_dump_stop(struct seq_file *file, void *v)
{
struct mlx5dr_domain *dmn = file->private;
struct mlx5dr_dbg_dump_data *dump_data;
if (v && IS_ERR(v))
return;
if (!v) {
dump_data = dmn->dump_info.dump_data;
if (dump_data) {
mlx5dr_dbg_destroy_dump_data(dump_data);
dmn->dump_info.dump_data = NULL;
}
}
atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
}
static int
dr_dump_show(struct seq_file *file, void *v)
{
struct mlx5dr_dbg_dump_buff *entry;
entry = list_entry(v, struct mlx5dr_dbg_dump_buff, node);
seq_printf(file, "%s", entry->buff);
return 0;
}
static const struct seq_operations dr_dump_sops = {
.start = dr_dump_start,
.next = dr_dump_next,
.stop = dr_dump_stop,
.show = dr_dump_show,
};
DEFINE_SEQ_ATTRIBUTE(dr_dump);
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn)
{
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#define MLX5DR_DEBUG_DUMP_BUFF_SIZE (64 * 1024 * 1024)
#define MLX5DR_DEBUG_DUMP_BUFF_LENGTH 512
enum {
MLX5DR_DEBUG_DUMP_STATE_FREE,
MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS,
};
struct mlx5dr_dbg_dump_buff {
char *buff;
u32 index;
struct list_head node;
};
struct mlx5dr_dbg_dump_data {
struct list_head buff_list;
};
struct mlx5dr_dbg_dump_info {
struct mutex dbg_mutex; /* protect dbg lists */
struct dentry *steering_debugfs;
struct dentry *fdb_debugfs;
struct mlx5dr_dbg_dump_data *dump_data;
atomic_t state;
};
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn);
......
......@@ -10661,6 +10661,7 @@ enum {
MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2,
MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET = 0x7,
};
enum {
......
......@@ -1062,7 +1062,7 @@ struct xfrmdev_ops {
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
......
......@@ -51,8 +51,10 @@
#ifdef CONFIG_XFRM_STATISTICS
#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
#define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val)
#else
#define XFRM_INC_STATS(net, field) ((void)(net))
#define XFRM_ADD_STATS(net, field, val) ((void)(net))
#endif
......@@ -1577,22 +1579,20 @@ struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
unsigned short family);
int xfrm_state_check_expire(struct xfrm_state *x);
void xfrm_state_update_stats(struct net *net);
#ifdef CONFIG_XFRM_OFFLOAD
static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x)
static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
{
struct xfrm_dev_offload *xdo = &x->xso;
struct net_device *dev = xdo->dev;
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return;
if (dev && dev->xfrmdev_ops &&
dev->xfrmdev_ops->xdo_dev_state_update_curlft)
dev->xfrmdev_ops->xdo_dev_state_update_curlft(x);
dev->xfrmdev_ops->xdo_dev_state_update_stats)
dev->xfrmdev_ops->xdo_dev_state_update_stats(x);
}
#else
static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x) {}
static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {}
#endif
void xfrm_state_insert(struct xfrm_state *x);
int xfrm_state_add(struct xfrm_state *x);
......
......@@ -52,6 +52,7 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
memset(buff, 0, sizeof(unsigned long) * LINUX_MIB_XFRMMAX);
xfrm_state_update_stats(net);
snmp_get_cpu_field_batch(buff, xfrm_mib_list,
net->mib.xfrm_statistics);
for (i = 0; xfrm_mib_list[i].name; i++)
......
......@@ -570,7 +570,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
int err = 0;
spin_lock(&x->lock);
xfrm_dev_state_update_curlft(x);
xfrm_dev_state_update_stats(x);
if (x->km.state == XFRM_STATE_DEAD)
goto out;
......@@ -1935,7 +1935,7 @@ EXPORT_SYMBOL(xfrm_state_update);
int xfrm_state_check_expire(struct xfrm_state *x)
{
xfrm_dev_state_update_curlft(x);
xfrm_dev_state_update_stats(x);
if (!READ_ONCE(x->curlft.use_time))
WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
......@@ -1957,6 +1957,19 @@ int xfrm_state_check_expire(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_check_expire);
void xfrm_state_update_stats(struct net *net)
{
struct xfrm_state *x;
int i;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
for (i = 0; i <= net->xfrm.state_hmask; i++) {
hlist_for_each_entry(x, net->xfrm.state_bydst + i, bydst)
xfrm_dev_state_update_stats(x);
}
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
}
struct xfrm_state *
xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family)
......
......@@ -902,7 +902,7 @@ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft));
if (x->xso.dev)
xfrm_dev_state_update_curlft(x);
xfrm_dev_state_update_stats(x);
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
put_unaligned(x->stats.replay_window, &p->stats.replay_window);
put_unaligned(x->stats.replay, &p->stats.replay);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment