Commit d337f2af authored by Anirudh Venkataramanan's avatar Anirudh Venkataramanan Committed by Jeff Kirsher

ice: Use Tx|Rx in comments

In code comments, use Tx|Rx instead of tx|rx
Signed-off-by: default avatarAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent df17b7e0
......@@ -96,14 +96,14 @@ extern const char ice_drv_ver[];
#define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
/* Macros for each tx/rx ring in a VSI */
/* Macros for each Tx/Rx ring in a VSI */
#define ice_for_each_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
#define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
/* Macros for each allocated tx/rx ring whether used or not in a VSI */
/* Macros for each allocated Tx/Rx ring whether used or not in a VSI */
#define ice_for_each_alloc_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
......@@ -183,8 +183,8 @@ struct ice_vsi {
struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */
struct ice_port_info *port_info; /* back pointer to port_info */
struct ice_ring **rx_rings; /* rx ring array */
struct ice_ring **tx_rings; /* tx ring array */
struct ice_ring **rx_rings; /* Rx ring array */
struct ice_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */
irqreturn_t (*irq_handler)(int irq, void *data);
......@@ -255,8 +255,8 @@ struct ice_q_vector {
struct ice_ring_container tx;
struct irq_affinity_notify affinity_notify;
u16 v_idx; /* index in the vsi->q_vector array. */
u8 num_ring_tx; /* total number of tx rings in vector */
u8 num_ring_rx; /* total number of rx rings in vector */
u8 num_ring_tx; /* total number of Tx rings in vector */
u8 num_ring_rx; /* total number of Rx rings in vector */
char name[ICE_INT_NAME_STR_LEN];
/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
* value to the device
......@@ -308,10 +308,10 @@ struct ice_pf {
u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num lan tx queues setup */
u16 num_lan_rx; /* num lan rx queues setup */
u16 q_left_tx; /* remaining num tx queues left unclaimed */
u16 q_left_rx; /* remaining num rx queues left unclaimed */
u16 num_lan_tx; /* num lan Tx queues setup */
u16 num_lan_rx; /* num lan Rx queues setup */
u16 q_left_tx; /* remaining num Tx queues left unclaimed */
u16 q_left_rx; /* remaining num Rx queues left unclaimed */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
u16 num_alloc_vsi;
u16 corer_count; /* Core reset count */
......
......@@ -713,7 +713,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
hw->evb_veb = true;
/* Query the allocated resources for tx scheduler */
/* Query the allocated resources for Tx scheduler */
status = ice_sched_query_res_alloc(hw);
if (status) {
ice_debug(hw, ICE_DBG_SCHED,
......@@ -956,7 +956,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
* ice_copy_rxq_ctx_to_hw
* @hw: pointer to the hardware structure
* @ice_rxq_ctx: pointer to the rxq context
* @rxq_index: the index of the rx queue
* @rxq_index: the index of the Rx queue
*
* Copies rxq context from dense structure to hw register space
*/
......@@ -1012,7 +1012,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* ice_write_rxq_ctx
* @hw: pointer to the hardware structure
* @rlan_ctx: pointer to the rxq context
* @rxq_index: the index of the rx queue
* @rxq_index: the index of the Rx queue
*
* Converts rxq context from sparse to dense structure and then writes
* it to hw register space
......
......@@ -591,7 +591,6 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
*
*/
static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
......
......@@ -260,10 +260,10 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
* a private ethtool flag). This is due to the nature of the
* ethtool stats API.
*
* User space programs such as ethtool must make 3 separate
* Userspace programs such as ethtool must make 3 separate
* ioctl requests, one for size, one for the strings, and
* finally one for the stats. Since these cross into
* user space, changes to the number or size could result in
* userspace, changes to the number or size could result in
* undefined memory access or incorrect string<->value
* correlations for statistics.
*
......@@ -1441,7 +1441,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
/**
* ice_set_pauseparam - Set Flow Control parameter
* @netdev: network interface device structure
* @pause: return tx/rx flow control status
* @pause: return Tx/Rx flow control status
*/
static int
ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
......@@ -1543,7 +1543,7 @@ static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
}
/**
* ice_get_rxfh_indir_size - get the rx flow hash indirection table size
* ice_get_rxfh_indir_size - get the Rx flow hash indirection table size
* @netdev: network interface device structure
*
* Returns the table size.
......@@ -1556,7 +1556,7 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
}
/**
* ice_get_rxfh - get the rx flow hash indirection table
* ice_get_rxfh - get the Rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
......@@ -1603,7 +1603,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
}
/**
* ice_set_rxfh - set the rx flow hash indirection table
* ice_set_rxfh - set the Rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
......
......@@ -20,7 +20,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
u16 pf_q;
int err;
/* what is RX queue number in global space of 2K Rx queues */
/* what is Rx queue number in global space of 2K Rx queues */
pf_q = vsi->rxq_map[ring->q_index];
/* clear the context structure first */
......@@ -1200,7 +1200,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
int i;
/* Allocate tx_rings */
/* Allocate Tx rings */
for (i = 0; i < vsi->alloc_txq; i++) {
struct ice_ring *ring;
......@@ -1219,7 +1219,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
vsi->tx_rings[i] = ring;
}
/* Allocate rx_rings */
/* Allocate Rx rings */
for (i = 0; i < vsi->alloc_rxq; i++) {
struct ice_ring *ring;
......@@ -2333,7 +2333,7 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
int start = res->search_hint;
int end = start;
if ((start + needed) > res->num_entries)
if ((start + needed) > res->num_entries)
return -ENOMEM;
id |= ICE_RES_VALID_BIT;
......
......@@ -1786,7 +1786,7 @@ static void ice_determine_q_usage(struct ice_pf *pf)
pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
/* only 1 rx queue unless RSS is enabled */
/* only 1 Rx queue unless RSS is enabled */
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
pf->num_lan_rx = 1;
else
......
......@@ -1337,7 +1337,7 @@ ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
* @num_nodes: pointer to num nodes array
*
* This function calculates the number of supported nodes needed to add this
* VSI into tx tree including the VSI, parent and intermediate nodes in below
* VSI into Tx tree including the VSI, parent and intermediate nodes in below
* layers
*/
static void
......@@ -1374,13 +1374,13 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
}
/**
* ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree
* ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc_node: pointer to TC node
* @num_nodes: pointer to num nodes array
*
* This function adds the VSI supported nodes into tx tree including the
* This function adds the VSI supported nodes into Tx tree including the
* VSI, its parent and intermediate nodes in below layers
*/
static enum ice_status
......
......@@ -827,7 +827,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
/* Create two back-to-back switch rules and submit them to the HW using
* one memory buffer:
* 1. Large Action
* 2. Look up tx rx
* 2. Look up Tx Rx
*/
lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
......@@ -871,7 +871,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
/* call the fill switch rule to fill the lookup tx rx structure */
/* call the fill switch rule to fill the lookup Tx Rx structure */
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
ice_aqc_opc_update_sw_rules);
......@@ -1168,8 +1168,8 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Call AQ command to add or update previously created VSI list with new VSI.
*
* Helper function to do book keeping associated with adding filter information
* The algorithm to do the booking keeping is described below :
* When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.)
* The algorithm to do the book keeping is described below :
* When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
* if only one VSI has been added till now
* Allocate a new VSI list and add two VSIs
* to this list using switch rule command
......
......@@ -219,7 +219,7 @@ static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
/**
* ice_setup_tx_ring - Allocate the Tx descriptors
* @tx_ring: the tx ring to set up
* @tx_ring: the Tx ring to set up
*
* Return 0 on success, negative on error
*/
......@@ -324,7 +324,7 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
/**
* ice_setup_rx_ring - Allocate the Rx descriptors
* @rx_ring: the rx ring to set up
* @rx_ring: the Rx ring to set up
*
* Return 0 on success, negative on error
*/
......@@ -586,7 +586,7 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
/**
* ice_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on
* @rx_ring: Rx descriptor ring to store buffers on
* @old_buf: donor buffer to have page reused
*
* Synchronizes page for reuse by the adapter
......@@ -609,7 +609,7 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring,
/**
* ice_fetch_rx_buf - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_desc: descriptor containing info written by hardware
*
* This function allocates an skb on the fly, and populates it with the page
......@@ -904,7 +904,7 @@ static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
/**
* ice_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_ring: Rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
* @ptype: the packet type decoded by hardware
......@@ -927,7 +927,7 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring,
/**
* ice_receive_skb - Send a completed packet up the stack
* @rx_ring: rx ring in play
* @rx_ring: Rx ring in play
* @skb: packet to send up
* @vlan_tag: vlan tag for packet
*
......@@ -946,7 +946,7 @@ static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
*
* This function provides a "bounce buffer" approach to Rx interrupt
......@@ -1123,7 +1123,7 @@ build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
* __ice_maybe_stop_tx - 2nd level check for tx stop conditions
* __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
......@@ -1146,7 +1146,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
}
/**
* ice_maybe_stop_tx - 1st level check for tx stop conditions
* ice_maybe_stop_tx - 1st level check for Tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
......@@ -1156,6 +1156,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
{
if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
return 0;
return __ice_maybe_stop_tx(tx_ring, size);
}
......@@ -1569,7 +1570,7 @@ static unsigned int ice_txd_use_count(unsigned int size)
}
/**
* ice_xmit_desc_count - calculate number of tx descriptors needed
* ice_xmit_desc_count - calculate number of Tx descriptors needed
* @skb: send buffer
*
* Returns number of data descriptors needed for this skb.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment