Commit 9b1aa3ef authored by Michal Kubiak's avatar Michal Kubiak Committed by Jakub Kicinski

idpf: add get/set for Ethtool's header split ringparam

idpf supports the header split feature and that feature is always
enabled by default.
However, for flexibility reasons and to simplify some scenarios, it
would be useful to have the support for switching the header split
off (and on) from the userspace.

Address that need by adding the user config parameter, the functions
for disabling (or enabling) the header split feature, and calls to
them from the Ethtool ringparam callbacks.
It still is enabled by default if supported by the hardware.
Reviewed-by: default avatarPrzemek Kitszel <przemyslaw.kitszel@intel.com>
Signed-off-by: default avatarMichal Kubiak <michal.kubiak@intel.com>
Co-developed-by: default avatarAlexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: default avatarAlexander Lobakin <aleksander.lobakin@intel.com>
Link: https://lore.kernel.org/r/20231212142752.935000-3-aleksander.lobakin@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 50d73710
...@@ -15,7 +15,7 @@ struct idpf_vport_max_q; ...@@ -15,7 +15,7 @@ struct idpf_vport_max_q;
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/sctp.h> #include <linux/sctp.h>
#include <linux/ethtool.h> #include <linux/ethtool_netlink.h>
#include <net/gro.h> #include <net/gro.h>
#include <linux/dim.h> #include <linux/dim.h>
...@@ -418,11 +418,13 @@ struct idpf_vport { ...@@ -418,11 +418,13 @@ struct idpf_vport {
/** /**
* enum idpf_user_flags * enum idpf_user_flags
* @__IDPF_USER_FLAG_HSPLIT: header split state
* @__IDPF_PROMISC_UC: Unicast promiscuous mode * @__IDPF_PROMISC_UC: Unicast promiscuous mode
* @__IDPF_PROMISC_MC: Multicast promiscuous mode * @__IDPF_PROMISC_MC: Multicast promiscuous mode
* @__IDPF_USER_FLAGS_NBITS: Must be last * @__IDPF_USER_FLAGS_NBITS: Must be last
*/ */
enum idpf_user_flags { enum idpf_user_flags {
__IDPF_USER_FLAG_HSPLIT = 0U,
__IDPF_PROMISC_UC = 32, __IDPF_PROMISC_UC = 32,
__IDPF_PROMISC_MC, __IDPF_PROMISC_MC,
...@@ -965,4 +967,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map); ...@@ -965,4 +967,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs); int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs); int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val);
#endif /* !_IDPF_H_ */ #endif /* !_IDPF_H_ */
...@@ -320,6 +320,8 @@ static void idpf_get_ringparam(struct net_device *netdev, ...@@ -320,6 +320,8 @@ static void idpf_get_ringparam(struct net_device *netdev,
ring->rx_pending = vport->rxq_desc_count; ring->rx_pending = vport->rxq_desc_count;
ring->tx_pending = vport->txq_desc_count; ring->tx_pending = vport->txq_desc_count;
kring->tcp_data_split = idpf_vport_get_hsplit(vport);
idpf_vport_ctrl_unlock(netdev); idpf_vport_ctrl_unlock(netdev);
} }
...@@ -379,6 +381,14 @@ static int idpf_set_ringparam(struct net_device *netdev, ...@@ -379,6 +381,14 @@ static int idpf_set_ringparam(struct net_device *netdev,
new_rx_count == vport->rxq_desc_count) new_rx_count == vport->rxq_desc_count)
goto unlock_mutex; goto unlock_mutex;
if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
NL_SET_ERR_MSG_MOD(ext_ack,
"setting TCP data split is not supported");
err = -EOPNOTSUPP;
goto unlock_mutex;
}
config_data = &vport->adapter->vport_config[idx]->user_config; config_data = &vport->adapter->vport_config[idx]->user_config;
config_data->num_req_txq_desc = new_tx_count; config_data->num_req_txq_desc = new_tx_count;
config_data->num_req_rxq_desc = new_rx_count; config_data->num_req_rxq_desc = new_rx_count;
...@@ -1334,6 +1344,7 @@ static int idpf_get_link_ksettings(struct net_device *netdev, ...@@ -1334,6 +1344,7 @@ static int idpf_get_link_ksettings(struct net_device *netdev,
static const struct ethtool_ops idpf_ethtool_ops = { static const struct ethtool_ops idpf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS | .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE, ETHTOOL_COALESCE_USE_ADAPTIVE,
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_msglevel = idpf_get_msglevel, .get_msglevel = idpf_get_msglevel,
.set_msglevel = idpf_set_msglevel, .set_msglevel = idpf_set_msglevel,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
......
...@@ -1057,6 +1057,71 @@ static void idpf_vport_dealloc(struct idpf_vport *vport) ...@@ -1057,6 +1057,71 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
adapter->next_vport = idpf_get_free_slot(adapter); adapter->next_vport = idpf_get_free_slot(adapter);
} }
/**
* idpf_is_hsplit_supported - check whether the header split is supported
* @vport: virtual port to check the capability for
*
* Return: true if it's supported by the HW/FW, false if not.
*/
static bool idpf_is_hsplit_supported(const struct idpf_vport *vport)
{
return idpf_is_queue_model_split(vport->rxq_model) &&
idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS,
IDPF_CAP_HSPLIT);
}
/**
* idpf_vport_get_hsplit - get the current header split feature state
* @vport: virtual port to query the state for
*
* Return: ``ETHTOOL_TCP_DATA_SPLIT_UNKNOWN`` if not supported,
* ``ETHTOOL_TCP_DATA_SPLIT_DISABLED`` if disabled,
* ``ETHTOOL_TCP_DATA_SPLIT_ENABLED`` if active.
*/
u8 idpf_vport_get_hsplit(const struct idpf_vport *vport)
{
const struct idpf_vport_user_config_data *config;
if (!idpf_is_hsplit_supported(vport))
return ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
config = &vport->adapter->vport_config[vport->idx]->user_config;
return test_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags) ?
ETHTOOL_TCP_DATA_SPLIT_ENABLED :
ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
/**
* idpf_vport_set_hsplit - enable or disable header split on a given vport
* @vport: virtual port to configure
* @val: Ethtool flag controlling the header split state
*
* Return: true on success, false if not supported by the HW.
*/
bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val)
{
struct idpf_vport_user_config_data *config;
if (!idpf_is_hsplit_supported(vport))
return val == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
config = &vport->adapter->vport_config[vport->idx]->user_config;
switch (val) {
case ETHTOOL_TCP_DATA_SPLIT_UNKNOWN:
/* Default is to enable */
case ETHTOOL_TCP_DATA_SPLIT_ENABLED:
__set_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
return true;
case ETHTOOL_TCP_DATA_SPLIT_DISABLED:
__clear_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
return true;
default:
return false;
}
}
/** /**
* idpf_vport_alloc - Allocates the next available struct vport in the adapter * idpf_vport_alloc - Allocates the next available struct vport in the adapter
* @adapter: board private structure * @adapter: board private structure
......
...@@ -1240,12 +1240,15 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) ...@@ -1240,12 +1240,15 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
struct idpf_adapter *adapter = vport->adapter; struct idpf_adapter *adapter = vport->adapter;
struct idpf_queue *q; struct idpf_queue *q;
int i, k, err = 0; int i, k, err = 0;
bool hs;
vport->rxq_grps = kcalloc(vport->num_rxq_grp, vport->rxq_grps = kcalloc(vport->num_rxq_grp,
sizeof(struct idpf_rxq_group), GFP_KERNEL); sizeof(struct idpf_rxq_group), GFP_KERNEL);
if (!vport->rxq_grps) if (!vport->rxq_grps)
return -ENOMEM; return -ENOMEM;
hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
for (i = 0; i < vport->num_rxq_grp; i++) { for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
int j; int j;
...@@ -1298,9 +1301,8 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) ...@@ -1298,9 +1301,8 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
q->rx_buf_size = vport->bufq_size[j]; q->rx_buf_size = vport->bufq_size[j];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
q->rx_buf_stride = IDPF_RX_BUF_STRIDE; q->rx_buf_stride = IDPF_RX_BUF_STRIDE;
if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS,
IDPF_CAP_HSPLIT) && if (hs) {
idpf_is_queue_model_split(vport->rxq_model)) {
q->rx_hsplit_en = true; q->rx_hsplit_en = true;
q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
} }
...@@ -1344,9 +1346,7 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) ...@@ -1344,9 +1346,7 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
rx_qgrp->splitq.rxq_sets[j]->refillq1 = rx_qgrp->splitq.rxq_sets[j]->refillq1 =
&rx_qgrp->splitq.bufq_sets[1].refillqs[j]; &rx_qgrp->splitq.bufq_sets[1].refillqs[j];
if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS, if (hs) {
IDPF_CAP_HSPLIT) &&
idpf_is_queue_model_split(vport->rxq_model)) {
q->rx_hsplit_en = true; q->rx_hsplit_en = true;
q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
} }
......
...@@ -3285,6 +3285,8 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) ...@@ -3285,6 +3285,8 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS);
idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
idpf_vport_init_num_qs(vport, vport_msg); idpf_vport_init_num_qs(vport, vport_msg);
idpf_vport_calc_num_q_desc(vport); idpf_vport_calc_num_q_desc(vport);
idpf_vport_calc_num_q_groups(vport); idpf_vport_calc_num_q_groups(vport);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment