Commit 7dbc63f0 authored by Tony Nguyen's avatar Tony Nguyen

ice: Misc minor fixes

This is a collection of minor fixes including typos, white space, and
style. No functional changes.
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
parent 6a2c2b2c
...@@ -1718,8 +1718,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) ...@@ -1718,8 +1718,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
* @num: number of resources * @num: number of resources
* @res: pointer to array that contains the resources to free * @res: pointer to array that contains the resources to free
*/ */
enum ice_status enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
{ {
struct ice_aqc_alloc_free_res_elem *buf; struct ice_aqc_alloc_free_res_elem *buf;
enum ice_status status; enum ice_status status;
...@@ -2121,7 +2120,7 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, ...@@ -2121,7 +2120,7 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
* @cap_count: the number of capabilities * @cap_count: the number of capabilities
* *
* Helper device to parse device (0x000B) capabilities list. For * Helper device to parse device (0x000B) capabilities list. For
* capabilities shared between device and device, this relies on * capabilities shared between device and function, this relies on
* ice_parse_common_caps. * ice_parse_common_caps.
* *
* Loop through the list of provided capabilities and extract the relevant * Loop through the list of provided capabilities and extract the relevant
......
...@@ -357,7 +357,7 @@ void ice_devlink_unregister(struct ice_pf *pf) ...@@ -357,7 +357,7 @@ void ice_devlink_unregister(struct ice_pf *pf)
* *
* Create and register a devlink_port for this PF. Note that although each * Create and register a devlink_port for this PF. Note that although each
* physical function is connected to a separate devlink instance, the port * physical function is connected to a separate devlink instance, the port
* will still be numbered according to the physical function id. * will still be numbered according to the physical function ID.
* *
* Return: zero on success or an error code on failure. * Return: zero on success or an error code on failure.
*/ */
......
...@@ -644,7 +644,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) ...@@ -644,7 +644,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
* This function generates a key from a value, a don't care mask and a never * This function generates a key from a value, a don't care mask and a never
* match mask. * match mask.
* upd, dc, and nm are optional parameters, and can be NULL: * upd, dc, and nm are optional parameters, and can be NULL:
* upd == NULL --> udp mask is all 1's (update all bits) * upd == NULL --> upd mask is all 1's (update all bits)
* dc == NULL --> dc mask is all 0's (no don't care bits) * dc == NULL --> dc mask is all 0's (no don't care bits)
* nm == NULL --> nm mask is all 0's (no never match bits) * nm == NULL --> nm mask is all 0's (no never match bits)
*/ */
......
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
#define PRTDCB_GENS 0x00083020 #define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0 #define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0) #define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */ #define PRTDCB_TUP2TC 0x001D26C0
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4)) #define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4)) #define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
...@@ -362,6 +362,7 @@ ...@@ -362,6 +362,7 @@
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) #define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PRTRPB_RDPC 0x000AC260
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) #define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
#define VSIQF_FD_CNT_FD_GCNT_S 0 #define VSIQF_FD_CNT_FD_GCNT_S 0
#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0) #define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
...@@ -378,6 +379,5 @@ ...@@ -378,6 +379,5 @@
#define PFPM_WUS_FW_RST_WK_M BIT(31) #define PFPM_WUS_FW_RST_WK_M BIT(31)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
#define PRTRPB_RDPC 0x000AC260
#endif /* _ICE_HW_AUTOGEN_H_ */ #endif /* _ICE_HW_AUTOGEN_H_ */
...@@ -170,7 +170,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, ...@@ -170,7 +170,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
} }
/* query the current node information from FW before additing it /* query the current node information from FW before adding it
* to the SW DB * to the SW DB
*/ */
status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem); status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
...@@ -578,7 +578,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) ...@@ -578,7 +578,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
/** /**
* ice_aq_rl_profile - performs a rate limiting task * ice_aq_rl_profile - performs a rate limiting task
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
* @opcode:opcode for add, query, or remove profile(s) * @opcode: opcode for add, query, or remove profile(s)
* @num_profiles: the number of profiles * @num_profiles: the number of profiles
* @buf: pointer to buffer * @buf: pointer to buffer
* @buf_size: buffer size in bytes * @buf_size: buffer size in bytes
......
...@@ -631,9 +631,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) ...@@ -631,9 +631,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
dma_addr_t dma; dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */ /* since we are recycling buffers we should seldom need to alloc */
if (likely(page)) { if (likely(page))
return true; return true;
}
/* alloc new page for storage */ /* alloc new page for storage */
page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
...@@ -1252,12 +1251,12 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1252,12 +1251,12 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
* @itr: ITR value to update * @itr: ITR value to update
* *
* Calculate how big of an increment should be applied to the ITR value passed * Calculate how big of an increment should be applied to the ITR value passed
* in based on wmem_default, SKB overhead, Ethernet overhead, and the current * in based on wmem_default, SKB overhead, ethernet overhead, and the current
* link speed. * link speed.
* *
* The following is a calculation derived from: * The following is a calculation derived from:
* wmem_default / (size + overhead) = desired_pkts_per_int * wmem_default / (size + overhead) = desired_pkts_per_int
* rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
* (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
* *
* Assuming wmem_default is 212992 and overhead is 640 bytes per * Assuming wmem_default is 212992 and overhead is 640 bytes per
......
...@@ -2974,8 +2974,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2974,8 +2974,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->max_frame = qpi->rxq.max_pkt_size; vsi->max_frame = qpi->rxq.max_pkt_size;
} }
/* VF can request to configure less than allocated queues /* VF can request to configure less than allocated queues or default
* or default allocated queues. So update the VSI with new number * allocated queues. So update the VSI with new number
*/ */
vsi->num_txq = num_txq; vsi->num_txq = num_txq;
vsi->num_rxq = num_rxq; vsi->num_rxq = num_rxq;
......
...@@ -298,7 +298,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid) ...@@ -298,7 +298,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
} }
} }
/** /**
* ice_xsk_umem_disable - disable a UMEM region * ice_xsk_umem_disable - disable a UMEM region
* @vsi: Current VSI * @vsi: Current VSI
...@@ -594,7 +593,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) ...@@ -594,7 +593,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (!size) if (!size)
break; break;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
rx_buf->xdp->data_end = rx_buf->xdp->data + size; rx_buf->xdp->data_end = rx_buf->xdp->data + size;
xsk_buff_dma_sync_for_cpu(rx_buf->xdp); xsk_buff_dma_sync_for_cpu(rx_buf->xdp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment