Commit 6cc9c6fb authored by Simon Horman's avatar Simon Horman Committed by Jakub Kicinski

mlx4: Address spelling errors

Address spelling errors flagged by codespell.

This patch follows-up on an earlier patch by Colin Ian King,
which addressed a spelling error in a user-visible log message [1].
This patch includes that change.

[1] https://lore.kernel.org/netdev/20231209225135.4055334-1-colin.i.king@gmail.com/

This patch is intended to cover all files under
drivers/net/ethernet/mellanox/mlx4
Signed-off-by: default avatarSimon Horman <horms@kernel.org>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Reviewed-by: default avatarRandy Dunlap <rdunlap@infradead.org>
Link: https://lore.kernel.org/r/20240205-mlx5-codespell-v1-1-63b86dffbb61@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 60b4dfcd
......@@ -2199,8 +2199,9 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
if (cmd != MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
slave, cmd);
/* Turn on internal error letting slave reset itself immeditaly,
* otherwise it might take till timeout on command is passed
/* Turn on internal error letting slave reset itself
* immediately, otherwise it might take till timeout on
* command is passed
*/
reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
}
......@@ -2954,7 +2955,7 @@ static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
dummy_admin.default_vlan = vlan;
/* VF wants to move to other VST state which is valid with current
* rate limit. Either differnt default vlan in VST or other
* rate limit. Either different default vlan in VST or other
* supported QoS priority. Otherwise we don't allow this change when
* the TX rate is still configured.
*/
......
......@@ -115,7 +115,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
return;
}
/* Acessing the CQ outside of rcu_read_lock is safe, because
/* Accessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
++cq->arm_sn;
......@@ -137,7 +137,7 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
return;
}
/* Acessing the CQ outside of rcu_read_lock is safe, because
/* Accessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
cq->event(cq, event_type);
......
......@@ -96,8 +96,8 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
#define MLX4_EN_WRAP_AROUND_SEC 10UL
/* By scheduling the overflow check every 5 seconds, we have a reasonably
* good chance we wont miss a wrap around.
* TOTO: Use a timer instead of a work queue to increase the guarantee.
* good chance we won't miss a wrap around.
* TODO: Use a timer instead of a work queue to increase the guarantee.
*/
#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
......
......@@ -1072,7 +1072,8 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
1, MLX4_MCAST_CONFIG);
/* Update multicast list - we cache all addresses so they won't
* change while HW is updated holding the command semaphor */
* change while HW is updated holding the command semaphore
*/
netif_addr_lock_bh(dev);
mlx4_en_cache_mclist(dev);
netif_addr_unlock_bh(dev);
......@@ -1817,7 +1818,7 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_set_rss_steer_rules(priv))
mlx4_warn(mdev, "Failed setting steering rules\n");
/* Attach rx QP to bradcast address */
/* Attach rx QP to broadcast address */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
......
......@@ -762,7 +762,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Drop packet on bad receive or bad checksum */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
en_err(priv, "CQE completed in error - vendor syndrome:%d syndrome:%d\n",
((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
((struct mlx4_err_cqe *)cqe)->syndrome);
goto next;
......
......@@ -992,7 +992,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->ts_requested = 1;
}
/* Prepare ctrl segement apart opcode+ownership, which depends on
/* Prepare ctrl segment apart opcode+ownership, which depends on
* whether LSO is used */
tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
......
......@@ -210,7 +210,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
s_eqe->slave_id = slave;
/* ensure all information is written before setting the ownersip bit */
/* ensure all information is written before setting the ownership bit */
dma_wmb();
s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
++slave_eq->prod;
......
......@@ -44,7 +44,7 @@
/* Default supported priorities for VPP allocation */
#define MLX4_DEFAULT_QOS_PRIO (0)
/* Derived from FW feature definition, 0 is the default vport fo all QPs */
/* Derived from FW feature definition, 0 is the default vport for all QPs */
#define MLX4_VPP_DEFAULT_VPORT (0)
struct mlx4_vport_qos_param {
......@@ -98,7 +98,7 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
u16 *available_vpp, u8 *vpp_p_up);
/**
* mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities.
* mlx4_ALLOCATE_VPP_set - Distribution of VPPs among different priorities.
* The total number of VPPs assigned to all for a port must not exceed
* the value reported by available_vpp in mlx4_ALLOCATE_VPP_get.
* VPP allocation is allowed only after the port type has been set,
......@@ -113,7 +113,7 @@ int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up);
/**
* mlx4_SET_VPORT_QOS_get - Query QoS proporties of a Vport.
* mlx4_SET_VPORT_QOS_get - Query QoS properties of a Vport.
* Each priority allowed for the Vport is assigned with a share of the BW,
* and a BW limitation. This commands query the current QoS values.
*
......@@ -128,7 +128,7 @@ int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
struct mlx4_vport_qos_param *out_param);
/**
* mlx4_SET_VPORT_QOS_set - Set QoS proporties of a Vport.
* mlx4_SET_VPORT_QOS_set - Set QoS properties of a Vport.
* QoS parameters can be modified at any time, but must be initialized
* before any QP is associated with the VPort.
*
......
......@@ -129,7 +129,7 @@ static const struct mlx4_profile default_profile = {
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
.num_mpt = 1 << 19,
.num_mtt = 1 << 20, /* It is really num mtt segements */
.num_mtt = 1 << 20, /* It is really num mtt segments */
};
static const struct mlx4_profile low_mem_profile = {
......@@ -1508,7 +1508,7 @@ static int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
priv->v2p.port1 = port1;
priv->v2p.port2 = port2;
} else {
mlx4_err(dev, "Failed to change port mape: %d\n", err);
mlx4_err(dev, "Failed to change port map: %d\n", err);
}
}
......
......@@ -112,7 +112,7 @@ struct mlx4_en_stat_out_flow_control_mbox {
__be64 tx_pause_duration;
/* Number of transmitter transitions from XOFF state to XON state */
__be64 tx_pause_transition;
/* Reserverd */
/* Reserved */
__be64 reserved[2];
};
......
......@@ -2118,7 +2118,7 @@ static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
* @data: output buffer to put the requested data into.
*
* Reads cable module eeprom data, puts the outcome data into
* data pointer paramer.
* data pointer parameter.
* Returns num of read bytes on success or a negative error
* code.
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment