Commit df17b7e0 authored by Anirudh Venkataramanan's avatar Anirudh Venkataramanan Committed by Jeff Kirsher

ice: Cosmetic formatting changes

1. Fix several cases of double spacing
2. Fix typos
3. Capitalize abbreviations
Signed-off-by: default avatarAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 2c5492de
......@@ -150,10 +150,10 @@ enum ice_state {
__ICE_RESET_FAILED, /* set by reset/rebuild */
/* When checking for the PF to be in a nominal operating state, the
* bits that are grouped at the beginning of the list need to be
* checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
* be checked. If you need to add a bit into consideration for nominal
* checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
* be checked. If you need to add a bit into consideration for nominal
* operating state, it must be added before
* __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
* __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
* without appropriate consideration.
*/
__ICE_STATE_NOMINAL_CHECK_BITS,
......@@ -201,8 +201,8 @@ struct ice_vsi {
int sw_base_vector; /* Irq base for OS reserved vectors */
int hw_base_vector; /* HW (absolute) index of a vector */
enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
/* Interrupt thresholds */
u16 work_lmt;
......
......@@ -5,7 +5,7 @@
#define _ICE_ADMINQ_CMD_H_
/* This header file defines the Admin Queue commands, error codes and
* descriptor format. It is shared between Firmware and Software.
* descriptor format. It is shared between Firmware and Software.
*/
#define ICE_MAX_VSI 768
......@@ -463,7 +463,7 @@ struct ice_aqc_sw_rules {
};
/* Add/Update/Get/Remove lookup Rx/Tx command/response entry
* This structures describes the lookup rules and associated actions. "index"
* This structures describes the lookup rules and associated actions. "index"
* is returned as part of a response to a successful Add command, and can be
* used to identify the rule for Update/Get/Remove commands.
*/
......@@ -1111,7 +1111,7 @@ struct ice_aqc_get_set_rss_keys {
};
/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
struct ice_aqc_get_set_rss_lut {
struct ice_aqc_get_set_rss_lut {
#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
......@@ -1315,10 +1315,10 @@ struct ice_aqc_get_clear_fw_log {
* @params: command-specific parameters
*
* Descriptor format for commands the driver posts on the Admin Transmit Queue
* (ATQ). The firmware writes back onto the command descriptor and returns
* the result of the command. Asynchronous events that are not an immediate
* (ATQ). The firmware writes back onto the command descriptor and returns
* the result of the command. Asynchronous events that are not an immediate
* result of the command are written to the Admin Receive Queue (ARQ) using
* the same descriptor format. Descriptors are in little-endian notation with
* the same descriptor format. Descriptors are in little-endian notation with
* 32-bit words.
*/
struct ice_aq_desc {
......@@ -1380,10 +1380,10 @@ struct ice_aq_desc {
/* error codes */
enum ice_aq_err {
ICE_AQ_RC_OK = 0, /* success */
ICE_AQ_RC_OK = 0, /* Success */
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* object already exists */
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
};
......
......@@ -775,7 +775,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @buf_size: size of buffer for indirect commands (or 0 for direct commands)
* @cd: pointer to command details structure
*
* This is the main send command routine for the ATQ. It runs the q,
* This is the main send command routine for the ATQ. It runs the queue,
* cleans the queue, etc.
*/
enum ice_status
......@@ -969,7 +969,7 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* @pending: number of events that could be left to process
*
* This function cleans one Admin Receive Queue element and returns
* the contents through e. It can also return how many events are
* the contents through e. It can also return how many events are
* left to process through 'pending'.
*/
enum ice_status
......
......@@ -6,11 +6,11 @@
union ice_32byte_rx_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
/* bit 0 of hdr_addr is DD bit */
__le64 rsvd1;
__le64 rsvd2;
__le64 rsvd1;
__le64 rsvd2;
} read;
struct {
struct {
......@@ -105,11 +105,11 @@ enum ice_rx_ptype_payload_layer {
*/
union ice_32b_rx_flex_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
/* bit 0 of hdr_addr is DD bit */
__le64 rsvd1;
__le64 rsvd2;
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
/* bit 0 of hdr_addr is DD bit */
__le64 rsvd1;
__le64 rsvd2;
} read;
struct {
/* Qword 0 */
......@@ -256,6 +256,9 @@ enum ice_rx_flex_desc_status_error_0_bits {
#define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
/* RLAN Rx queue context data
*
......@@ -274,18 +277,18 @@ struct ice_rlan_ctx {
u16 dbuf; /* bigger than needed, see above for reason */
#define ICE_RLAN_CTX_HBUF_S 6
u16 hbuf; /* bigger than needed, see above for reason */
u8 dtype;
u8 dsize;
u8 crcstrip;
u8 l2tsel;
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
u8 dtype;
u8 dsize;
u8 crcstrip;
u8 l2tsel;
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
u32 rxmax; /* bigger than needed, see above for reason */
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
u16 lrxqthresh; /* bigger than needed, see above for reason */
};
......@@ -413,35 +416,35 @@ enum ice_tx_ctx_desc_cmd_bits {
struct ice_tlan_ctx {
#define ICE_TLAN_CTX_BASE_S 7
u64 base; /* base is defined in 128-byte units */
u8 port_num;
u8 port_num;
u16 cgd_num; /* bigger than needed, see above for reason */
u8 pf_num;
u8 pf_num;
u16 vmvf_num;
u8 vmvf_type;
u8 vmvf_type;
#define ICE_TLAN_CTX_VMVF_TYPE_VF 0
#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
#define ICE_TLAN_CTX_VMVF_TYPE_PF 2
u16 src_vsi;
u8 tsyn_ena;
u8 alt_vlan;
u8 tsyn_ena;
u8 alt_vlan;
u16 cpuid; /* bigger than needed, see above for reason */
u8 wb_mode;
u8 tphrd_desc;
u8 tphrd;
u8 tphwr_desc;
u8 wb_mode;
u8 tphrd_desc;
u8 tphrd;
u8 tphwr_desc;
u16 cmpq_id;
u16 qnum_in_func;
u8 itr_notification_mode;
u8 adjust_prof_id;
u8 itr_notification_mode;
u8 adjust_prof_id;
u32 qlen; /* bigger than needed, see above for reason */
u8 quanta_prof_idx;
u8 tso_ena;
u8 quanta_prof_idx;
u8 tso_ena;
u16 tso_qnum;
u8 legacy_int;
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
u8 int_q_state; /* width not needed - internal do not write */
u8 legacy_int;
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
u8 int_q_state; /* width not needed - internal do not write */
};
/* macro to make the table lines short */
......
......@@ -1012,7 +1012,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @vsi: the VSI being configured
* @v_idx: index of the vector in the VSI struct
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
* We allocate one q_vector. If allocation fails we return -ENOMEM.
*/
static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
{
......@@ -1051,7 +1051,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
* ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
* @vsi: the VSI being configured
*
* We allocate one q_vector per queue interrupt. If allocation fails we
* We allocate one q_vector per queue interrupt. If allocation fails we
* return -ENOMEM.
*/
static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
......@@ -2136,9 +2136,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
pf->q_left_rx -= vsi->alloc_rxq;
break;
default:
/* if VSI type is not recognized, clean up the resources and
* exit
*/
/* clean up the resources and exit */
goto unroll_vsi_init;
}
......
......@@ -408,7 +408,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
* OICR interrupt. The OICR handler (ice_misc_intr) determines what type
* of reset is pending and sets bits in pf->state indicating the reset
* type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
* type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
* prepare for pending reset if not already (for PF software-initiated
* global resets the software should already be prepared for it as
* indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
......@@ -1382,7 +1382,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
* @pf: board private structure
*
* This sets up the handler for MSIX 0, which is used to manage the
* non-queue interrupts, e.g. AdminQ and errors. This is not used
* non-queue interrupts, e.g. AdminQ and errors. This is not used
* when in MSI or Legacy interrupt mode.
*/
static int ice_req_irq_msix_misc(struct ice_pf *pf)
......@@ -3674,7 +3674,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
*/
status = ice_update_sw_rule_bridge_mode(hw);
if (status) {
netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n",
netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
mode, status, hw->adminq.sq_last_status);
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
......@@ -3785,7 +3785,7 @@ static void ice_tx_timeout(struct net_device *netdev)
* @netdev: network interface device structure
*
* The open entry point is called when a network interface is made
* active by the system (IFF_UP). At this point all resources needed
* active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
* handler is registered with the OS, the netdev watchdog is enabled,
* and the stack is notified that the interface is ready.
......@@ -3818,7 +3818,7 @@ static int ice_open(struct net_device *netdev)
* @netdev: network interface device structure
*
* The stop entry point is called when an interface is de-activated by the OS,
* and the netdevice enters the DOWN state. The hardware is still under the
* and the netdevice enters the DOWN state. The hardware is still under the
* driver's control, but the netdev interface is disabled.
*
* Returns success only - not allowed to fail
......@@ -3847,14 +3847,14 @@ ice_features_check(struct sk_buff *skb,
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
* being requested for this frame. We can rule out both by just
* being requested for this frame. We can rule out both by just
* checking for CHECKSUM_PARTIAL
*/
if (skb->ip_summed != CHECKSUM_PARTIAL)
return features;
/* We cannot support GSO if the MSS is going to be less than
* 64 bytes. If it is then we need to drop support for GSO.
* 64 bytes. If it is then we need to drop support for GSO.
*/
if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
features &= ~NETIF_F_GSO_MASK;
......
......@@ -46,7 +46,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
* @link_speed: variable containing the link_speed to be converted
*
* Convert link speed supported by HW to link speed supported by virtchnl.
* If adv_link_support is true, then return link speed in Mbps. Else return
* If adv_link_support is true, then return link speed in Mbps. Else return
* link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
* needs to cast back to an enum virtchnl_link_speed in the case where
* adv_link_support is false, but when adv_link_support is true the caller can
......
......@@ -129,7 +129,7 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
*
* NOTE: *req_desc is both an input/output parameter.
* The caller of this function first calls this function with *request_desc set
* to 0. If the response from f/w has *req_desc set to 0, all the switch
* to 0. If the response from f/w has *req_desc set to 0, all the switch
* configuration information has been returned; if non-zero (meaning not all
* the information was returned), the caller should call this function again
* with *req_desc set to the previous value returned by f/w to get the
......@@ -1863,7 +1863,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
/* Update the previous switch rule to a new VSI list which
* includes current VSI thats requested
* includes current VSI that is requested
*/
status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
if (status)
......
......@@ -377,7 +377,7 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
rx_ring->next_to_alloc = val;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
......@@ -686,7 +686,7 @@ static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
* ice_pull_tail - ice specific version of skb_pull_tail
* @skb: pointer to current skb being adjusted
*
* This function is an ice specific version of __pskb_pull_tail. The
* This function is an ice specific version of __pskb_pull_tail. The
* main difference between this version and the original function is that
* this function can make several assumptions about the state of things
* that allow for significant optimizations versus the standard function.
......@@ -768,7 +768,7 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
* @rx_desc: Rx descriptor for current buffer
* @skb: Current socket buffer containing buffer in progress
*
* This function updates next to clean. If the buffer is an EOP buffer
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
* sk_buff in the next buffer to be chained and return true indicating
* that this is in fact a non-EOP buffer.
......@@ -950,7 +950,7 @@ static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
* @budget: Total limit on number of packets to process
*
* This function provides a "bounce buffer" approach to Rx interrupt
* processing. The advantage to this is that on systems that have
* processing. The advantage to this is that on systems that have
* expensive overhead for IOMMU access this provides a means of avoiding
* it by maintaining the mapping of the page to the system.
*
......@@ -1553,7 +1553,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
* Finally, we add one to round up. Because 256 isn't an exact multiple of
* 3, we'll underestimate near each multiple of 12K. This is actually more
* accurate as we have 4K - 1 of wiggle room that we can fit into the last
* segment. For our purposes this is accurate out to 1M which is orders of
* segment. For our purposes this is accurate out to 1M which is orders of
* magnitude greater than our largest possible GSO size.
*
* This would then be implemented as:
......@@ -1621,7 +1621,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
nr_frags -= ICE_MAX_BUF_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We
/* Initialize size to the negative value of gso_size minus 1. We
* use this as the worst case scenerio in which the frag ahead
* of us only provides one byte which is why we are limited to 6
* descriptors for a single transmit as the header and previous
......
......@@ -156,8 +156,6 @@ static void ice_free_vf_res(struct ice_vf *vf)
clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
}
/***********************enable_vf routines*****************************/
/**
* ice_dis_vf_mappings
* @vf: pointer to the VF structure
......@@ -454,7 +452,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
/* Clear this bit after VF initialization since we shouldn't reclaim
* and reassign interrupts for synchronous or asynchronous VFR events.
* We don't want to reconfigure interrupts since AVF driver doesn't
* We dont want to reconfigure interrupts since AVF driver doesn't
* expect vector assignment to be changed unless there is a request for
* more vectors.
*/
......@@ -1105,7 +1103,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
* ice_process_vflr_event - Free VF resources via IRQ calls
* @pf: pointer to the PF structure
*
* called from the VLFR IRQ handler to
* called from the VFLR IRQ handler to
* free up VF resources and state variables
*/
void ice_process_vflr_event(struct ice_pf *pf)
......@@ -1764,7 +1762,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* copy Tx queue info from VF into VSI */
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[i]->count = qpi->txq.ring_len;
/* copy Rx queue info from VF into vsi */
/* copy Rx queue info from VF into VSI */
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
......@@ -1830,7 +1828,7 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
* @msg: pointer to the msg buffer
* @set: true if mac filters are being set, false otherwise
*
* add guest mac address filter
* add guest MAC address filter
*/
static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
......@@ -1968,9 +1966,9 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
* @msg: pointer to the msg buffer
*
* VFs get a default number of queues but can use this message to request a
* different number. If the request is successful, PF will reset the VF and
* different number. If the request is successful, PF will reset the VF and
* return 0. If unsuccessful, PF will send message informing VF of number of
* available queue pairs via virtchnl message response to VF.
* available queue pairs via virtchnl message response to vf.
*/
static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
{
......@@ -1991,7 +1989,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
if (req_queues <= 0) {
dev_err(&pf->pdev->dev,
"VF %d tried to request %d queues. Ignoring.\n",
"VF %d tried to request %d queues. Ignoring.\n",
vf->vf_id, req_queues);
} else if (req_queues > ICE_MAX_QS_PER_VF) {
dev_err(&pf->pdev->dev,
......
......@@ -70,7 +70,7 @@ struct ice_vf {
u8 spoofchk;
u16 num_mac;
u16 num_vlan;
u8 num_req_qs; /* num of queue pairs requested by VF */
u8 num_req_qs; /* num of queue pairs requested by VF */
};
#ifdef CONFIG_PCI_IOV
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment