Commit 6bd39bc3 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-add-some-new-features-and-fix-some-bugs'

Peng Li says:

====================
hns3: add some new features and fix some bugs

This patchset adds 3 ethtool features: get_channels,
get_coalesce and get_coalesce, and fix some bugs.

[patch 1/11] adds ethtool_ops.get_channels (ethtool -l) support
for VF.

[patch 2/11] removes TSO config command from VF driver,
as only main PF can config TSO MSS length according to
hardware.

[patch 3/11 - 4/11] add ethtool_ops {get|set}_coalesce
(ethtool -c/-C) support to PF.
[patch 5/11 - 9/11] fix some bugs related to {get|set}_coalesce.

[patch 10/11 - 11/11] fix the features handling in
hns3_nic_set_features(). Local variable "changed" was defined
to indicates features changed, but was used only for feature
NETIF_F_HW_VLAN_CTAG_RX. Add checking to improve the reliability.

---
Change log:
V1 -> V2:
1, Rewrite the cover letter requested by David Miller.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 19d28fbd bd368416
......@@ -133,11 +133,16 @@ struct hnae3_vector_info {
#define HNAE3_RING_TYPE_B 0
#define HNAE3_RING_TYPE_TX 0
#define HNAE3_RING_TYPE_RX 1
#define HNAE3_RING_GL_IDX_S 0
#define HNAE3_RING_GL_IDX_M GENMASK(1, 0)
#define HNAE3_RING_GL_RX 0
#define HNAE3_RING_GL_TX 1
struct hnae3_ring_chain_node {
struct hnae3_ring_chain_node *next;
u32 tqp_index;
u32 flag;
u32 int_gl_idx;
};
#define HNAE3_IS_TX_RING(node) \
......@@ -448,6 +453,8 @@ struct hnae3_knic_private_info {
u16 num_tqps; /* total number of TQPs in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
const struct hnae3_dcb_ops *dcb_ops;
u16 int_rl_setting;
};
struct hnae3_roce_private_info {
......
......@@ -158,43 +158,68 @@ static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
napi_disable(&tqp_vector->napi);
}
static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
u32 gl_value)
{
/* this defines the configuration for GL (Interrupt Gap Limiter)
* GL defines inter interrupt gap.
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
*/
writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
}
static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
u32 rl_value)
{
u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
/* this defines the configuration for RL (Interrupt Rate Limiter).
* Rl defines rate of interrupts i.e. number of interrupts-per-second
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
*/
writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
if (rl_reg > 0 && !tqp_vector->tx_group.gl_adapt_enable &&
!tqp_vector->rx_group.gl_adapt_enable)
/* According to the hardware, the range of rl_reg is
* 0-59 and the unit is 4.
*/
rl_reg |= HNS3_INT_RL_ENABLE_MASK;
writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
}
void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
u32 gl_value)
{
u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
}
void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
u32 gl_value)
{
u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
}
static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
/* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter)
*/
/* Default :enable interrupt coalesce */
tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
/* Default: enable interrupt coalescing self-adaptive and GL */
tqp_vector->tx_group.gl_adapt_enable = 1;
tqp_vector->rx_group.gl_adapt_enable = 1;
tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
/* for now we are disabling Interrupt RL - we
* will re-enable later
*/
hns3_set_vector_coalesc_rl(tqp_vector, 0);
tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
hns3_set_vector_coalesce_tx_gl(tqp_vector,
tqp_vector->tx_group.int_gl);
hns3_set_vector_coalesce_rx_gl(tqp_vector,
tqp_vector->rx_group.int_gl);
/* Default: disable RL */
h->kinfo.int_rl_setting = 0;
hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
}
......@@ -1093,11 +1118,12 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
static int hns3_nic_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t changed = netdev->features ^ features;
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
netdev_features_t changed;
int ret;
if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
priv->ops.fill_desc = hns3_fill_desc_tso;
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
......@@ -1105,14 +1131,18 @@ static int hns3_nic_set_features(struct net_device *netdev,
priv->ops.fill_desc = hns3_fill_desc;
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
}
if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
h->ae_algo->ops->enable_vlan_filter) {
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
h->ae_algo->ops->enable_vlan_filter(h, true);
else
h->ae_algo->ops->enable_vlan_filter(h, false);
}
changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
h->ae_algo->ops->enable_hw_strip_rxvtag) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
else
......@@ -2422,25 +2452,22 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
{
u16 rx_int_gl, tx_int_gl;
bool rx, tx;
rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
rx_int_gl = tqp_vector->rx_group.int_gl;
tx_int_gl = tqp_vector->tx_group.int_gl;
if (rx && tx) {
if (rx_int_gl > tx_int_gl) {
tqp_vector->tx_group.int_gl = rx_int_gl;
tqp_vector->tx_group.flow_level =
tqp_vector->rx_group.flow_level;
hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
} else {
tqp_vector->rx_group.int_gl = tx_int_gl;
tqp_vector->rx_group.flow_level =
tqp_vector->tx_group.flow_level;
hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
bool rx_update, tx_update;
if (rx_group->gl_adapt_enable) {
rx_update = hns3_get_new_int_gl(rx_group);
if (rx_update)
hns3_set_vector_coalesce_rx_gl(tqp_vector,
rx_group->int_gl);
}
if (tx_group->gl_adapt_enable) {
tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
if (tx_update)
hns3_set_vector_coalesce_tx_gl(tqp_vector,
tx_group->int_gl);
}
}
......@@ -2501,6 +2528,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->tqp_index = tx_ring->tqp->tqp_index;
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX);
hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
cur_chain->next = NULL;
......@@ -2516,6 +2545,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
chain->tqp_index = tx_ring->tqp->tqp_index;
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX);
hnae_set_field(chain->int_gl_idx,
HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S,
HNAE3_RING_GL_TX);
cur_chain = chain;
}
......@@ -2527,6 +2560,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->tqp_index = rx_ring->tqp->tqp_index;
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_RX);
hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
rx_ring = rx_ring->next;
}
......@@ -2540,6 +2575,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
chain->tqp_index = rx_ring->tqp->tqp_index;
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_RX);
hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
cur_chain = chain;
rx_ring = rx_ring->next;
......@@ -2628,7 +2666,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector->rx_group.total_packets = 0;
tqp_vector->tx_group.total_bytes = 0;
tqp_vector->tx_group.total_packets = 0;
hns3_vector_gl_rl_init(tqp_vector);
hns3_vector_gl_rl_init(tqp_vector, priv);
tqp_vector->handle = h;
ret = hns3_get_vector_ring_chain(tqp_vector,
......
......@@ -451,10 +451,14 @@ enum hns3_link_mode_bits {
HNS3_LM_COUNT = 15
};
#define HNS3_INT_GL_50K 0x000A
#define HNS3_INT_GL_20K 0x0019
#define HNS3_INT_GL_18K 0x001B
#define HNS3_INT_GL_8K 0x003E
#define HNS3_INT_GL_MAX 0x1FE0
#define HNS3_INT_GL_50K 0x0014
#define HNS3_INT_GL_20K 0x0032
#define HNS3_INT_GL_18K 0x0036
#define HNS3_INT_GL_8K 0x007C
#define HNS3_INT_RL_MAX 0x00EC
#define HNS3_INT_RL_ENABLE_MASK 0x40
struct hns3_enet_ring_group {
/* array of pointers to rings */
......@@ -464,6 +468,7 @@ struct hns3_enet_ring_group {
u16 count;
enum hns3_flow_level_range flow_level;
u16 int_gl;
u8 gl_adapt_enable;
};
struct hns3_enet_tqp_vector {
......@@ -594,6 +599,12 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
#define hns3_gl_usec_to_reg(int_gl) (int_gl >> 1)
#define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
#define hns3_rl_usec_to_reg(int_rl) (int_rl >> 2)
#define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
void hns3_ethtool_set_ops(struct net_device *netdev);
int hns3_set_channels(struct net_device *netdev,
struct ethtool_channels *ch);
......@@ -606,6 +617,13 @@ int hns3_clean_rx_ring(
struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
u32 gl_value);
void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
u32 gl_value);
void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
u32 rl_value);
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
#else
......
......@@ -887,6 +887,182 @@ static void hns3_get_channels(struct net_device *netdev,
h->ae_algo->ops->get_channels(h, ch);
}
static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
struct ethtool_coalesce *cmd)
{
struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
u16 queue_num = h->kinfo.num_tqps;
if (queue >= queue_num) {
netdev_err(netdev,
"Invalid queue value %d! Queue max id=%d\n",
queue, queue_num - 1);
return -EINVAL;
}
tx_vector = priv->ring_data[queue].ring->tqp_vector;
rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
cmd->use_adaptive_tx_coalesce = tx_vector->tx_group.gl_adapt_enable;
cmd->use_adaptive_rx_coalesce = rx_vector->rx_group.gl_adapt_enable;
cmd->tx_coalesce_usecs = tx_vector->tx_group.int_gl;
cmd->rx_coalesce_usecs = rx_vector->rx_group.int_gl;
cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
return 0;
}
static int hns3_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
return hns3_get_coalesce_per_queue(netdev, 0, cmd);
}
static int hns3_check_gl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
u32 rx_gl, tx_gl;
if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) {
netdev_err(netdev,
"Invalid rx-usecs value, rx-usecs range is 0-%d\n",
HNS3_INT_GL_MAX);
return -EINVAL;
}
if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) {
netdev_err(netdev,
"Invalid tx-usecs value, tx-usecs range is 0-%d\n",
HNS3_INT_GL_MAX);
return -EINVAL;
}
rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
if (rx_gl != cmd->rx_coalesce_usecs) {
netdev_info(netdev,
"rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
cmd->rx_coalesce_usecs, rx_gl);
}
tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
if (tx_gl != cmd->tx_coalesce_usecs) {
netdev_info(netdev,
"tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
cmd->tx_coalesce_usecs, tx_gl);
}
return 0;
}
static int hns3_check_rl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
u32 rl;
if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) {
netdev_err(netdev,
"tx_usecs_high must be same as rx_usecs_high.\n");
return -EINVAL;
}
if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) {
netdev_err(netdev,
"Invalid usecs_high value, usecs_high range is 0-%d\n",
HNS3_INT_RL_MAX);
return -EINVAL;
}
rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
if (rl != cmd->rx_coalesce_usecs_high) {
netdev_info(netdev,
"usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n",
cmd->rx_coalesce_usecs_high, rl);
}
return 0;
}
static int hns3_check_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
int ret;
ret = hns3_check_gl_coalesce_para(netdev, cmd);
if (ret) {
netdev_err(netdev,
"Check gl coalesce param fail. ret = %d\n", ret);
return ret;
}
ret = hns3_check_rl_coalesce_para(netdev, cmd);
if (ret) {
netdev_err(netdev,
"Check rl coalesce param fail. ret = %d\n", ret);
return ret;
}
if (cmd->use_adaptive_tx_coalesce == 1 ||
cmd->use_adaptive_rx_coalesce == 1) {
netdev_info(netdev,
"adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n",
cmd->use_adaptive_tx_coalesce,
cmd->use_adaptive_rx_coalesce);
}
return 0;
}
static void hns3_set_coalesce_per_queue(struct net_device *netdev,
struct ethtool_coalesce *cmd,
u32 queue)
{
struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
int queue_num = h->kinfo.num_tqps;
tx_vector = priv->ring_data[queue].ring->tqp_vector;
rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
tx_vector->tx_group.gl_adapt_enable = cmd->use_adaptive_tx_coalesce;
rx_vector->rx_group.gl_adapt_enable = cmd->use_adaptive_rx_coalesce;
tx_vector->tx_group.int_gl = cmd->tx_coalesce_usecs;
rx_vector->rx_group.int_gl = cmd->rx_coalesce_usecs;
hns3_set_vector_coalesce_tx_gl(tx_vector, tx_vector->tx_group.int_gl);
hns3_set_vector_coalesce_rx_gl(rx_vector, rx_vector->rx_group.int_gl);
hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting);
hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting);
}
static int hns3_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
u16 queue_num = h->kinfo.num_tqps;
int ret;
int i;
ret = hns3_check_coalesce_para(netdev, cmd);
if (ret)
return ret;
h->kinfo.int_rl_setting =
hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
for (i = 0; i < queue_num; i++)
hns3_set_coalesce_per_queue(netdev, cmd, i);
return 0;
}
static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_drvinfo = hns3_get_drvinfo,
.get_ringparam = hns3_get_ringparam,
......@@ -900,6 +1076,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
.get_link_ksettings = hns3_get_link_ksettings,
.get_channels = hns3_get_channels,
};
static const struct ethtool_ops hns3_ethtool_ops = {
......@@ -924,6 +1101,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.nway_reset = hns3_nway_reset,
.get_channels = hns3_get_channels,
.set_channels = hns3_set_channels,
.get_coalesce = hns3_get_coalesce,
.set_coalesce = hns3_set_coalesce,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
......
......@@ -3409,6 +3409,11 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
HCLGE_TQP_ID_S, node->tqp_index);
hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
hnae_get_field(node->int_gl_idx,
HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S));
req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
......
......@@ -86,8 +86,6 @@ enum hclgevf_opcode_type {
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
/* TSO cmd */
HCLGEVF_OPC_TSO_GENERIC_CONFIG = 0x0C01,
/* RSS cmd */
HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
......@@ -202,12 +200,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
u8 rsv[14];
};
#define HCLGEVF_TSO_ENABLE_B 0
struct hclgevf_cfg_tso_status_cmd {
u8 tso_enable;
u8 rsv[23];
};
#define HCLGEVF_TYPE_CRQ 0
#define HCLGEVF_TYPE_CSQ 1
#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000
......
......@@ -201,20 +201,6 @@ static int hclge_get_queue_info(struct hclgevf_dev *hdev)
return 0;
}
static int hclgevf_enable_tso(struct hclgevf_dev *hdev, int enable)
{
struct hclgevf_cfg_tso_status_cmd *req;
struct hclgevf_desc desc;
req = (struct hclgevf_cfg_tso_status_cmd *)desc.data;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_TSO_GENERIC_CONFIG,
false);
hnae_set_bit(req->tso_enable, HCLGEVF_TSO_ENABLE_B, enable);
return hclgevf_cmd_send(&hdev->hw, &desc, 1);
}
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{
struct hclgevf_tqp *tqp;
......@@ -1375,12 +1361,6 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_config;
}
ret = hclgevf_enable_tso(hdev, true);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to enable tso\n", ret);
goto err_config;
}
/* Initialize VF's MTA */
hdev->accept_mta_mc = true;
ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
......@@ -1433,6 +1413,35 @@ static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
ae_dev->priv = NULL;
}
static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
{
struct hnae3_handle *nic = &hdev->nic;
struct hnae3_knic_private_info *kinfo = &nic->kinfo;
return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
}
/**
* hclgevf_get_channels - Get the current channels enabled and max supported.
* @handle: hardware information for network interface
* @ch: ethtool channels structure
*
* We don't support separate tx and rx queues as channels. The other count
* represents how many queues are being used for control. max_combined counts
* how many queue pairs we can support. They may not be mapped 1 to 1 with
* q_vectors since we support a lot more queue pairs than q_vectors.
**/
static void hclgevf_get_channels(struct hnae3_handle *handle,
struct ethtool_channels *ch)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
ch->max_combined = hclgevf_get_max_channels(hdev);
ch->other_count = 0;
ch->max_other = 0;
ch->combined_count = hdev->num_tqps;
}
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
......@@ -1462,6 +1471,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_tc_size = hclgevf_get_tc_size,
.get_fw_version = hclgevf_get_fw_version,
.set_vlan_filter = hclgevf_set_vlan_filter,
.get_channels = hclgevf_get_channels,
};
static struct hnae3_ae_algo ae_algovf = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment