Commit 47b62cd8 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-hns-bugfixes-for-HNS-Driver'

Yonglong Liu says:

====================
net: hns: bugfixes for HNS Driver

This patchset fix some bugs that were found in the test of
various scenarios, or identify by KASAN/sparse.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ef0efcd3 15400663
...@@ -150,7 +150,6 @@ static int hnae_alloc_buffers(struct hnae_ring *ring) ...@@ -150,7 +150,6 @@ static int hnae_alloc_buffers(struct hnae_ring *ring)
/* free desc along with its attached buffer */ /* free desc along with its attached buffer */
static void hnae_free_desc(struct hnae_ring *ring) static void hnae_free_desc(struct hnae_ring *ring)
{ {
hnae_free_buffers(ring);
dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
ring->desc_num * sizeof(ring->desc[0]), ring->desc_num * sizeof(ring->desc[0]),
ring_to_dma_dir(ring)); ring_to_dma_dir(ring));
...@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring) ...@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
/* fini ring, also free the buffer for the ring */ /* fini ring, also free the buffer for the ring */
static void hnae_fini_ring(struct hnae_ring *ring) static void hnae_fini_ring(struct hnae_ring *ring)
{ {
if (is_rx_ring(ring))
hnae_free_buffers(ring);
hnae_free_desc(ring); hnae_free_desc(ring);
kfree(ring->desc_cb); kfree(ring->desc_cb);
ring->desc_cb = NULL; ring->desc_cb = NULL;
......
...@@ -357,7 +357,7 @@ struct hnae_buf_ops { ...@@ -357,7 +357,7 @@ struct hnae_buf_ops {
}; };
struct hnae_queue { struct hnae_queue {
void __iomem *io_base; u8 __iomem *io_base;
phys_addr_t phy_base; phys_addr_t phy_base;
struct hnae_ae_dev *dev; /* the device who use this queue */ struct hnae_ae_dev *dev; /* the device who use this queue */
struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
......
...@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn) ...@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
static void hns_mac_param_get(struct mac_params *param, static void hns_mac_param_get(struct mac_params *param,
struct hns_mac_cb *mac_cb) struct hns_mac_cb *mac_cb)
{ {
param->vaddr = (void *)mac_cb->vaddr; param->vaddr = mac_cb->vaddr;
param->mac_mode = hns_get_enet_interface(mac_cb); param->mac_mode = hns_get_enet_interface(mac_cb);
ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr); ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
param->mac_id = mac_cb->mac_id; param->mac_id = mac_cb->mac_id;
......
...@@ -187,7 +187,7 @@ struct mac_statistics { ...@@ -187,7 +187,7 @@ struct mac_statistics {
/*mac para struct ,mac get param from nic or dsaf when initialize*/ /*mac para struct ,mac get param from nic or dsaf when initialize*/
struct mac_params { struct mac_params {
char addr[ETH_ALEN]; char addr[ETH_ALEN];
void *vaddr; /*virtual address*/ u8 __iomem *vaddr; /*virtual address*/
struct device *dev; struct device *dev;
u8 mac_id; u8 mac_id;
/**< Ethernet operation mode (MAC-PHY interface and speed) */ /**< Ethernet operation mode (MAC-PHY interface and speed) */
...@@ -402,7 +402,7 @@ struct mac_driver { ...@@ -402,7 +402,7 @@ struct mac_driver {
enum mac_mode mac_mode; enum mac_mode mac_mode;
u8 mac_id; u8 mac_id;
struct hns_mac_cb *mac_cb; struct hns_mac_cb *mac_cb;
void __iomem *io_base; u8 __iomem *io_base;
unsigned int mac_en_flg;/*you'd better don't enable mac twice*/ unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
unsigned int virt_dev_num; unsigned int virt_dev_num;
struct device *dev; struct device *dev;
......
...@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key( ...@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key(
DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
DSAF_TBL_TCAM_KEY_PORT_S, port); DSAF_TBL_TCAM_KEY_PORT_S, port);
mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan);
} }
/** /**
...@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry( ...@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry(
/* default config dvc to 0 */ /* default config dvc to 0 */
mac_data.tbl_ucast_dvc = 0; mac_data.tbl_ucast_dvc = 0;
mac_data.tbl_ucast_out_port = mac_entry->port_num; mac_data.tbl_ucast_out_port = mac_entry->port_num;
tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); tcam_data.tbl_tcam_data_high = mac_key.high.val;
tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); tcam_data.tbl_tcam_data_low = mac_key.low.val;
hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data); hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
...@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, ...@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
0xff, 0xff,
mc_mask); mc_mask);
mask_key.high.val = le32_to_cpu(mask_key.high.val);
mask_key.low.val = le32_to_cpu(mask_key.low.val);
pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
} }
...@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, ...@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
dsaf_dev->ae_dev.name, mac_key.high.val, dsaf_dev->ae_dev.name, mac_key.high.val,
mac_key.low.val, entry_index); mac_key.low.val, entry_index);
tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); tcam_data.tbl_tcam_data_high = mac_key.high.val;
tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); tcam_data.tbl_tcam_data_low = mac_key.low.val;
/* config mc entry with mask */ /* config mc entry with mask */
hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
...@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, ...@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
/* config key mask */ /* config key mask */
hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
mask_key.high.val = le32_to_cpu(mask_key.high.val);
mask_key.low.val = le32_to_cpu(mask_key.low.val);
pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
} }
...@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, ...@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
soft_mac_entry += entry_index; soft_mac_entry += entry_index;
soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
} else { /* not zero, just del port, update */ } else { /* not zero, just del port, update */
tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); tcam_data.tbl_tcam_data_high = mac_key.high.val;
tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); tcam_data.tbl_tcam_data_low = mac_key.low.val;
hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
&tcam_data, &tcam_data,
...@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void) ...@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void)
return DSAF_DUMP_REGS_NUM; return DSAF_DUMP_REGS_NUM;
} }
static int hns_dsaf_get_port_id(u8 port)
{
if (port < DSAF_SERVICE_NW_NUM)
return port;
if (port >= DSAF_BASE_INNER_PORT_NUM)
return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
return -EINVAL;
}
static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
{ {
struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
...@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) ...@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
memset(&temp_key, 0x0, sizeof(temp_key)); memset(&temp_key, 0x0, sizeof(temp_key));
mask_entry.addr[0] = 0x01; mask_entry.addr[0] = 0x01;
hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
port, mask_entry.addr); 0xf, mask_entry.addr);
tbl_tcam_mcast.tbl_mcast_item_vld = 1; tbl_tcam_mcast.tbl_mcast_item_vld = 1;
tbl_tcam_mcast.tbl_mcast_old_en = 0; tbl_tcam_mcast.tbl_mcast_old_en = 0;
if (port < DSAF_SERVICE_NW_NUM) { /* set MAC port to handle multicast */
mskid = port; mskid = hns_dsaf_get_port_id(port);
} else if (port >= DSAF_BASE_INNER_PORT_NUM) { if (mskid == -EINVAL) {
mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
} else {
dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
dsaf_dev->ae_dev.name, port, dsaf_dev->ae_dev.name, port,
mask_key.high.val, mask_key.low.val); mask_key.high.val, mask_key.low.val);
return; return;
} }
dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
mskid % 32, 1);
/* set pool bit map to handle multicast */
mskid = hns_dsaf_get_port_id(port_num);
if (mskid == -EINVAL) {
dev_err(dsaf_dev->dev,
"%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
dsaf_dev->ae_dev.name, port_num,
mask_key.high.val, mask_key.low.val);
return;
}
dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
mskid % 32, 1); mskid % 32, 1);
memcpy(&temp_key, &mask_key, sizeof(mask_key)); memcpy(&temp_key, &mask_key, sizeof(mask_key));
hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
(struct dsaf_tbl_tcam_data *)(&mask_key), (struct dsaf_tbl_tcam_data *)(&mask_key),
......
...@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, ...@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num); u8 mac_id, u8 port_num);
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
#endif /* __HNS_DSAF_MAIN_H__ */ #endif /* __HNS_DSAF_MAIN_H__ */
...@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en) ...@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
dsaf_set_field(origin, 1ull << 10, 10, en); dsaf_set_field(origin, 1ull << 10, 10, en);
dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
} else { } else {
u8 *base_addr = (u8 *)mac_cb->serdes_vaddr + u8 __iomem *base_addr = mac_cb->serdes_vaddr +
(mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
} }
......
...@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb, ...@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
} }
} }
static void __iomem * static u8 __iomem *
hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
{ {
return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
...@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index) ...@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
dsaf_dev->ppe_common[comm_index] = NULL; dsaf_dev->ppe_common[comm_index] = NULL;
} }
static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
int ppe_idx) int ppe_idx)
{ {
return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
} }
......
...@@ -80,7 +80,7 @@ struct hns_ppe_cb { ...@@ -80,7 +80,7 @@ struct hns_ppe_cb {
struct hns_ppe_hw_stats hw_stats; struct hns_ppe_hw_stats hw_stats;
u8 index; /* index in a ppe common device */ u8 index; /* index in a ppe common device */
void __iomem *io_base; u8 __iomem *io_base;
int virq; int virq;
u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
...@@ -89,7 +89,7 @@ struct hns_ppe_cb { ...@@ -89,7 +89,7 @@ struct hns_ppe_cb {
struct ppe_common_cb { struct ppe_common_cb {
struct device *dev; struct device *dev;
struct dsaf_device *dsaf_dev; struct dsaf_device *dsaf_dev;
void __iomem *io_base; u8 __iomem *io_base;
enum ppe_common_mode ppe_mode; enum ppe_common_mode ppe_mode;
......
...@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) ...@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
} else { } else {
ring = &q->tx_ring; ring = &q->tx_ring;
ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + ring->io_base = ring_pair_cb->q.io_base +
HNS_RCB_TX_REG_OFFSET; HNS_RCB_TX_REG_OFFSET;
irq_idx = HNS_RCB_IRQ_IDX_TX; irq_idx = HNS_RCB_IRQ_IDX_TX;
mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
...@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) ...@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
} }
} }
static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
{ {
struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
......
...@@ -1018,7 +1018,7 @@ ...@@ -1018,7 +1018,7 @@
#define XGMAC_PAUSE_CTL_RSP_MODE_B 2 #define XGMAC_PAUSE_CTL_RSP_MODE_B 2
#define XGMAC_PAUSE_CTL_TX_XOFF_B 3 #define XGMAC_PAUSE_CTL_TX_XOFF_B 3
static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
{ {
writel(value, base + reg); writel(value, base + reg);
} }
...@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val) ...@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
#define dsaf_set_bit(origin, shift, val) \ #define dsaf_set_bit(origin, shift, val) \
dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
u32 shift, u32 val) u32 shift, u32 val)
{ {
u32 origin = dsaf_read_reg(base, reg); u32 origin = dsaf_read_reg(base, reg);
...@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, ...@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
#define dsaf_get_bit(origin, shift) \ #define dsaf_get_bit(origin, shift) \
dsaf_get_field((origin), (1ull << (shift)), (shift)) dsaf_get_field((origin), (1ull << (shift)), (shift))
static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
u32 shift) u32 shift)
{ {
u32 origin; u32 origin;
...@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, ...@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit)) dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
#define dsaf_write_b(addr, data)\ #define dsaf_write_b(addr, data)\
writeb((data), (__iomem unsigned char *)(addr)) writeb((data), (__iomem u8 *)(addr))
#define dsaf_read_b(addr)\ #define dsaf_read_b(addr)\
readb((__iomem unsigned char *)(addr)) readb((__iomem u8 *)(addr))
#define hns_mac_reg_read64(drv, offset) \ #define hns_mac_reg_read64(drv, offset) \
readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset)))) readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
#endif /* _DSAF_REG_H */ #endif /* _DSAF_REG_H */
...@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv) ...@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
} }
/** /**
......
...@@ -29,9 +29,6 @@ ...@@ -29,9 +29,6 @@
#define SERVICE_TIMER_HZ (1 * HZ) #define SERVICE_TIMER_HZ (1 * HZ)
#define NIC_TX_CLEAN_MAX_NUM 256
#define NIC_RX_CLEAN_MAX_NUM 64
#define RCB_IRQ_NOT_INITED 0 #define RCB_IRQ_NOT_INITED 0
#define RCB_IRQ_INITED 1 #define RCB_IRQ_INITED 1
#define HNS_BUFFER_SIZE_2048 2048 #define HNS_BUFFER_SIZE_2048 2048
...@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, ...@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
wmb(); /* commit all data before submit */ wmb(); /* commit all data before submit */
assert(skb->queue_mapping < priv->ae_handle->q_num); assert(skb->queue_mapping < priv->ae_handle->q_num);
hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
ring->stats.tx_pkts++;
ring->stats.tx_bytes += skb->len;
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, ...@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
/* issue prefetch for next Tx descriptor */ /* issue prefetch for next Tx descriptor */
prefetch(&ring->desc_cb[ring->next_to_clean]); prefetch(&ring->desc_cb[ring->next_to_clean]);
} }
/* update tx ring statistics. */
ring->stats.tx_pkts += pkts;
ring->stats.tx_bytes += bytes;
NETIF_TX_UNLOCK(ring); NETIF_TX_UNLOCK(ring);
...@@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) ...@@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_tx_fini_pro_v2; hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi, netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
} }
for (i = h->q_num; i < h->q_num * 2; i++) { for (i = h->q_num; i < h->q_num * 2; i++) {
...@@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) ...@@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_rx_fini_pro_v2; hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi, netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
} }
......
...@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg { ...@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg {
}; };
struct hns_mdio_device { struct hns_mdio_device {
void *vbase; /* mdio reg base address */ u8 __iomem *vbase; /* mdio reg base address */
struct regmap *subctrl_vbase; struct regmap *subctrl_vbase;
struct hns_mdio_sc_reg sc_reg; struct hns_mdio_sc_reg sc_reg;
}; };
...@@ -96,21 +96,17 @@ enum mdio_c45_op_seq { ...@@ -96,21 +96,17 @@ enum mdio_c45_op_seq {
#define MDIO_SC_CLK_ST 0x531C #define MDIO_SC_CLK_ST 0x531C
#define MDIO_SC_RESET_ST 0x5A1C #define MDIO_SC_RESET_ST 0x5A1C
static void mdio_write_reg(void *base, u32 reg, u32 value) static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
{ {
u8 __iomem *reg_addr = (u8 __iomem *)base; writel_relaxed(value, base + reg);
writel_relaxed(value, reg_addr + reg);
} }
#define MDIO_WRITE_REG(a, reg, value) \ #define MDIO_WRITE_REG(a, reg, value) \
mdio_write_reg((a)->vbase, (reg), (value)) mdio_write_reg((a)->vbase, (reg), (value))
static u32 mdio_read_reg(void *base, u32 reg) static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
{ {
u8 __iomem *reg_addr = (u8 __iomem *)base; return readl_relaxed(base + reg);
return readl_relaxed(reg_addr + reg);
} }
#define mdio_set_field(origin, mask, shift, val) \ #define mdio_set_field(origin, mask, shift, val) \
...@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg) ...@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg)
#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask)) #define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
u32 val) u32 val)
{ {
u32 origin = mdio_read_reg(base, reg); u32 origin = mdio_read_reg(base, reg);
...@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, ...@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \ #define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val)) mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
{ {
u32 origin; u32 origin;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment