Commit f6779e4e authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-a-few-code-improvements'

Peng Li says:

====================
net: hns3: a few code improvements

This patchset removes some redundant code and fixes a few code
stylistic issues from internal concentrated review,
no functional changes introduced.

---
Change log:
V1 -> V2:
1, remove a patch according to the comment reported by David Miller.
---
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 50d4feb5 e4e87715
......@@ -85,10 +85,12 @@ config HNS3
drivers(like ODP)to register with HNAE devices and their associated
operations.
if HNS3
config HNS3_HCLGE
tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
default m
depends on PCI_MSI
depends on HNS3
---help---
This selects the HNS3_HCLGE network acceleration engine & its hardware
compatibility layer. The engine would be used in Hisilicon hip08 family of
......@@ -97,16 +99,15 @@ config HNS3_HCLGE
config HNS3_DCB
bool "Hisilicon HNS3 Data Center Bridge Support"
default n
depends on HNS3 && HNS3_HCLGE && DCB
depends on HNS3_HCLGE && DCB
---help---
Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver.
If unsure, say N.
config HNS3_HCLGEVF
tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
depends on PCI_MSI
depends on HNS3
tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
depends on PCI_MSI
depends on HNS3_HCLGE
---help---
This selects the HNS3 VF drivers network acceleration engine & its hardware
......@@ -115,11 +116,13 @@ config HNS3_HCLGEVF
config HNS3_ENET
tristate "Hisilicon HNS3 Ethernet Device Support"
default m
depends on 64BIT && PCI
depends on HNS3
---help---
This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
devices and their associated operations.
endif #HNS3
endif # NET_VENDOR_HISILICON
......@@ -40,13 +40,13 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client,
{
switch (client->type) {
case HNAE3_CLIENT_KNIC:
hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
break;
case HNAE3_CLIENT_UNIC:
hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
break;
case HNAE3_CLIENT_ROCE:
hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
break;
default:
break;
......@@ -60,16 +60,16 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
switch (client->type) {
case HNAE3_CLIENT_KNIC:
inited = hnae_get_bit(ae_dev->flag,
inited = hnae3_get_bit(ae_dev->flag,
HNAE3_KNIC_CLIENT_INITED_B);
break;
case HNAE3_CLIENT_UNIC:
inited = hnae_get_bit(ae_dev->flag,
inited = hnae3_get_bit(ae_dev->flag,
HNAE3_UNIC_CLIENT_INITED_B);
break;
case HNAE3_CLIENT_ROCE:
inited = hnae_get_bit(ae_dev->flag,
HNAE3_ROCE_CLIENT_INITED_B);
inited = hnae3_get_bit(ae_dev->flag,
HNAE3_ROCE_CLIENT_INITED_B);
break;
default:
break;
......@@ -85,7 +85,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
/* check if this client matches the type of ae_dev */
if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
return 0;
}
......@@ -190,7 +190,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
continue;
}
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
/* check the client list for the match with this ae_dev type and
* initialize the figure out client instance
......@@ -220,7 +220,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_dev */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
......@@ -234,7 +234,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
}
list_del(&ae_algo->node);
......@@ -278,7 +278,7 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
goto out_err;
}
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
break;
}
......@@ -310,7 +310,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_algo */
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
......@@ -321,7 +321,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
}
list_del(&ae_dev->node);
......
......@@ -62,10 +62,10 @@
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
#define hnae3_dev_roce_supported(hdev) \
hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
#define hnae3_dev_dcb_supported(hdev) \
hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
......@@ -167,7 +167,6 @@ struct hnae3_client_ops {
#define HNAE3_CLIENT_NAME_LENGTH 16
struct hnae3_client {
char name[HNAE3_CLIENT_NAME_LENGTH];
u16 version;
unsigned long state;
enum hnae3_client_type type;
const struct hnae3_client_ops *ops;
......@@ -436,7 +435,6 @@ struct hnae3_dcb_ops {
struct hnae3_ae_algo {
const struct hnae3_ae_ops *ops;
struct list_head node;
char name[HNAE3_CLASS_NAME_SIZE];
const struct pci_device_id *pdev_id_table;
};
......@@ -509,17 +507,17 @@ struct hnae3_handle {
u32 numa_node_mask; /* for multi-chip support */
};
#define hnae_set_field(origin, mask, shift, val) \
#define hnae3_set_field(origin, mask, shift, val) \
do { \
(origin) &= (~(mask)); \
(origin) |= ((val) << (shift)) & (mask); \
} while (0)
#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
#define hnae_set_bit(origin, shift, val) \
hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
#define hnae_get_bit(origin, shift) \
hnae_get_field((origin), (0x1 << (shift)), (shift))
#define hnae3_set_bit(origin, shift, val) \
hnae3_set_field((origin), (0x1 << (shift)), (shift), (val))
#define hnae3_get_bit(origin, shift) \
hnae3_get_field((origin), (0x1 << (shift)), (shift))
void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
......
......@@ -499,7 +499,6 @@ struct hns3_enet_tqp_vector {
u16 num_tqps; /* total number of tqps in TQP vector */
cpumask_t affinity_mask;
char name[HNAE3_INT_NAME_LEN];
/* when 0 should adjust interrupt coalesce parameter */
......@@ -591,7 +590,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define hns3_write_dev(a, reg, value) \
hns3_write_reg((a)->io_base, (reg), (value))
#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
(tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev)
......@@ -601,9 +600,9 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
#define hnae_buf_size(_ring) ((_ring)->buf_size)
#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
#define hnae3_buf_size(_ring) ((_ring)->buf_size)
#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
#define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring))
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
......
......@@ -18,8 +18,7 @@
#include "hclge_main.h"
#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \
DMA_TO_DEVICE : DMA_FROM_DEVICE)
#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
static int hclge_ring_space(struct hclge_cmq_ring *ring)
......@@ -46,31 +45,24 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclge_desc);
ring->desc = kzalloc(size, GFP_KERNEL);
ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
size, &ring->desc_dma_addr,
GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
ring->desc_dma_addr = 0;
kfree(ring->desc);
ring->desc = NULL;
return -ENOMEM;
}
return 0;
}
static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
{
dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
ring->desc_num * sizeof(ring->desc[0]),
DMA_BIDIRECTIONAL);
int size = ring->desc_num * sizeof(struct hclge_desc);
ring->desc_dma_addr = 0;
kfree(ring->desc);
ring->desc = NULL;
if (ring->desc) {
dma_free_coherent(cmq_ring_to_dev(ring), size,
ring->desc, ring->desc_dma_addr);
ring->desc = NULL;
}
}
static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
......@@ -111,8 +103,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
if (is_read)
desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
else
desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
}
static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
......@@ -154,31 +144,20 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw)
{
struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_cmq_ring *csq = &hw->cmq.csq;
u16 ntc = csq->next_to_clean;
struct hclge_desc *desc;
int clean = 0;
u32 head;
int clean;
desc = &csq->desc[ntc];
head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
rmb(); /* Make sure head is ready before touch any data */
if (!is_valid_csq_clean_head(csq, head)) {
dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head,
csq->next_to_use, csq->next_to_clean);
dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
csq->next_to_use, csq->next_to_clean);
return 0;
}
while (head != ntc) {
memset(desc, 0, sizeof(*desc));
ntc++;
if (ntc == csq->desc_num)
ntc = 0;
desc = &csq->desc[ntc];
clean++;
}
csq->next_to_clean = ntc;
clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
csq->next_to_clean = head;
return clean;
}
......@@ -256,33 +235,34 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
*/
if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
do {
if (hclge_cmd_csq_done(hw))
if (hclge_cmd_csq_done(hw)) {
complete = true;
break;
}
udelay(1);
timeout++;
} while (timeout < hw->cmq.tx_timeout);
}
if (hclge_cmd_csq_done(hw)) {
complete = true;
if (!complete) {
retval = -EAGAIN;
} else {
handle = 0;
while (handle < num) {
/* Get the result of hardware write back */
desc_to_use = &hw->cmq.csq.desc[ntc];
desc[handle] = *desc_to_use;
pr_debug("Get cmd desc:\n");
if (likely(!hclge_is_special_opcode(opcode)))
desc_ret = le16_to_cpu(desc[handle].retval);
else
desc_ret = le16_to_cpu(desc[0].retval);
if ((enum hclge_cmd_return_status)desc_ret ==
HCLGE_CMD_EXEC_SUCCESS)
if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
retval = 0;
else
retval = -EIO;
hw->cmq.last_status = (enum hclge_cmd_status)desc_ret;
hw->cmq.last_status = desc_ret;
ntc++;
handle++;
if (ntc == hw->cmq.csq.desc_num)
......@@ -290,9 +270,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
}
}
if (!complete)
retval = -EAGAIN;
/* Clean the command send queue */
handle = hclge_cmd_csq_clean(hw);
if (handle != num) {
......
......@@ -128,12 +128,12 @@ static int hclge_get_ring_chain_from_mbx(
HCLGE_MBX_RING_NODE_VARIABLE_NUM))
return -ENOMEM;
hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
ring_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
req->msg[5]);
hnae3_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
req->msg[5]);
cur_chain = ring_chain;
......@@ -142,19 +142,19 @@ static int hclge_get_ring_chain_from_mbx(
if (!new_chain)
goto err;
hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
new_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp
[req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
hnae3_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
cur_chain->next = new_chain;
cur_chain = new_chain;
......@@ -460,7 +460,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %d\n",
req->msg[0]);
......
......@@ -67,16 +67,16 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
HCLGE_MDIO_PHYID_S, phyid);
hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
HCLGE_MDIO_PHYREG_S, regnum);
hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
HCLGE_MDIO_PHYID_S, phyid);
hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
HCLGE_MDIO_PHYREG_S, regnum);
hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
HCLGE_MDIO_CTRL_ST_S, 1);
hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
HCLGE_MDIO_CTRL_ST_S, 1);
hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
mdio_cmd->data_wr = cpu_to_le16(data);
......@@ -105,16 +105,16 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
HCLGE_MDIO_PHYID_S, phyid);
hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
HCLGE_MDIO_PHYREG_S, regnum);
hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
HCLGE_MDIO_PHYID_S, phyid);
hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
HCLGE_MDIO_PHYREG_S, regnum);
hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
HCLGE_MDIO_CTRL_ST_S, 1);
hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
HCLGE_MDIO_CTRL_ST_S, 1);
hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
/* Read out phy data */
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
......@@ -125,7 +125,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
return ret;
}
if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
dev_err(&hdev->pdev->dev, "mdio read data error\n");
return -EIO;
}
......
......@@ -1184,10 +1184,10 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
u16 qs_id = vport->qs_offset + tc;
u8 grp, sub_grp;
grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M,
HCLGE_BP_GRP_ID_S);
sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
HCLGE_BP_SUB_GRP_ID_S);
grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
HCLGE_BP_GRP_ID_S);
sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
HCLGE_BP_SUB_GRP_ID_S);
if (i == grp)
qs_bitmap |= (1 << sub_grp);
......
......@@ -123,10 +123,11 @@ struct hclge_port_shapping_cmd {
};
#define hclge_tm_set_field(dest, string, val) \
hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH), val)
hnae3_set_field((dest), \
(HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH), val)
#define hclge_tm_get_field(src, string) \
hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH))
int hclge_tm_schd_init(struct hclge_dev *hdev);
......
......@@ -76,32 +76,24 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclgevf_desc);
ring->desc = kzalloc(size, GFP_KERNEL);
ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
size, &ring->desc_dma_addr,
GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
ring->desc_dma_addr = 0;
kfree(ring->desc);
ring->desc = NULL;
return -ENOMEM;
}
return 0;
}
static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
{
dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
ring->desc_num * sizeof(ring->desc[0]),
hclgevf_ring_to_dma_dir(ring));
int size = ring->desc_num * sizeof(struct hclgevf_desc);
ring->desc_dma_addr = 0;
kfree(ring->desc);
ring->desc = NULL;
if (ring->desc) {
dma_free_coherent(cmq_ring_to_dev(ring), size,
ring->desc, ring->desc_dma_addr);
ring->desc = NULL;
}
}
static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
......
......@@ -450,12 +450,12 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
(tc_valid[i] & 0x1));
hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
(tc_valid[i] & 0x1));
hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
}
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
......@@ -582,11 +582,11 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
}
req->msg[idx_offset] =
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
req->msg[idx_offset + 1] = node->tqp_index;
req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx,
HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S);
req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S);
i++;
if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
......@@ -1000,8 +1000,8 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
/* wait to check the hardware reset completion status */
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
(cnt < HCLGEVF_RESET_WAIT_CNT)) {
while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
(cnt < HCLGEVF_RESET_WAIT_CNT)) {
msleep(HCLGEVF_RESET_WAIT_MS);
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
cnt++;
......@@ -1959,7 +1959,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
static struct hnae3_ae_algo ae_algovf = {
.ops = &hclgevf_ops,
.name = HCLGEVF_NAME,
.pdev_id_table = ae_algovf_pci_tbl,
};
......
......@@ -152,7 +152,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %d\n",
req->msg[0]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment