Commit 480c9df5 authored by Colin Ian King's avatar Colin Ian King Committed by Kalle Valo

wifi: ath12k: Fix spelling mistakes in warning messages and comments

There are quite a few spelling mistakes in warning messages and a lot
of the comments. Fix these.
Signed-off-by: default avatarColin Ian King <colin.i.king@gmail.com>
Signed-off-by: default avatarKalle Valo <quic_kvalo@quicinc.com>
Link: https://lore.kernel.org/r/20230214092122.265336-1-colin.i.king@gmail.com
parent 778f83f8
...@@ -946,7 +946,7 @@ int ath12k_ce_alloc_pipes(struct ath12k_base *ab) ...@@ -946,7 +946,7 @@ int ath12k_ce_alloc_pipes(struct ath12k_base *ab)
ret = ath12k_ce_alloc_pipe(ab, i); ret = ath12k_ce_alloc_pipe(ab, i);
if (ret) { if (ret) {
/* Free any parial successful allocation */ /* Free any partial successful allocation */
ath12k_ce_free_pipes(ab); ath12k_ce_free_pipes(ab);
return ret; return ret;
} }
......
...@@ -691,7 +691,7 @@ struct ath12k_base { ...@@ -691,7 +691,7 @@ struct ath12k_base {
/* Below regd's are protected by ab->data_lock */ /* Below regd's are protected by ab->data_lock */
/* This is the regd set for every radio /* This is the regd set for every radio
* by the firmware during initializatin * by the firmware during initialization
*/ */
struct ieee80211_regdomain *default_regd[MAX_RADIOS]; struct ieee80211_regdomain *default_regd[MAX_RADIOS];
/* This regd is set during dynamic country setting /* This regd is set during dynamic country setting
......
...@@ -1429,7 +1429,7 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab) ...@@ -1429,7 +1429,7 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab)
} }
if (dp->spt_info[i].paddr & ATH12K_SPT_4K_ALIGN_CHECK) { if (dp->spt_info[i].paddr & ATH12K_SPT_4K_ALIGN_CHECK) {
ath12k_warn(ab, "SPT allocated memoty is not 4K aligned"); ath12k_warn(ab, "SPT allocated memory is not 4K aligned");
ret = -EINVAL; ret = -EINVAL;
goto free; goto free;
} }
......
...@@ -371,7 +371,7 @@ struct ath12k_dp { ...@@ -371,7 +371,7 @@ struct ath12k_dp {
#define HTT_TX_WBM_COMP_STATUS_OFFSET 8 #define HTT_TX_WBM_COMP_STATUS_OFFSET 8
/* HTT tx completion is overlayed in wbm_release_ring */ /* HTT tx completion is overlaid in wbm_release_ring */
#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13) #define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13)
#define HTT_TX_WBM_COMP_INFO1_REINJECT_REASON GENMASK(3, 0) #define HTT_TX_WBM_COMP_INFO1_REINJECT_REASON GENMASK(3, 0)
#define HTT_TX_WBM_COMP_INFO1_EXCEPTION_FRAME BIT(4) #define HTT_TX_WBM_COMP_INFO1_EXCEPTION_FRAME BIT(4)
...@@ -545,7 +545,7 @@ enum htt_srng_ring_id { ...@@ -545,7 +545,7 @@ enum htt_srng_ring_id {
* 3'b010: 4 usec * 3'b010: 4 usec
* 3'b011: 8 usec (default) * 3'b011: 8 usec (default)
* 3'b100: 16 usec * 3'b100: 16 usec
* Others: Reserverd * Others: Reserved
* b'19 - response_required: * b'19 - response_required:
* Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response * Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
* b'20:31 - reserved: reserved for future use * b'20:31 - reserved: reserved for future use
...@@ -1126,7 +1126,7 @@ struct htt_tx_ring_selection_cfg_cmd { ...@@ -1126,7 +1126,7 @@ struct htt_tx_ring_selection_cfg_cmd {
__le32 tlv_filter_mask_in1; __le32 tlv_filter_mask_in1;
__le32 tlv_filter_mask_in2; __le32 tlv_filter_mask_in2;
__le32 tlv_filter_mask_in3; __le32 tlv_filter_mask_in3;
__le32 reserverd[3]; __le32 reserved[3];
} __packed; } __packed;
#define HTT_TX_RING_TLV_FILTER_MGMT_DMA_LEN GENMASK(3, 0) #define HTT_TX_RING_TLV_FILTER_MGMT_DMA_LEN GENMASK(3, 0)
......
...@@ -813,7 +813,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, ...@@ -813,7 +813,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
spin_unlock_bh(&buf_ring->idr_lock); spin_unlock_bh(&buf_ring->idr_lock);
if (unlikely(!msdu)) { if (unlikely(!msdu)) {
ath12k_warn(ab, "montior destination with invalid buf_id %d\n", ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
buf_id); buf_id);
return HAL_RX_MON_STATUS_PPDU_NOT_DONE; return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
} }
...@@ -1124,7 +1124,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct ...@@ -1124,7 +1124,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
/* PN for multicast packets are not validate in HW, /* PN for multicast packets are not validate in HW,
* so skip 802.3 rx path * so skip 802.3 rx path
* Also, fast_rx expectes the STA to be authorized, hence * Also, fast_rx expects the STA to be authorized, hence
* eapol packets are sent in slow path. * eapol packets are sent in slow path.
*/ */
if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol_tkip && if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol_tkip &&
...@@ -1917,7 +1917,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, ...@@ -1917,7 +1917,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
spin_unlock_bh(&buf_ring->idr_lock); spin_unlock_bh(&buf_ring->idr_lock);
if (unlikely(!msdu)) { if (unlikely(!msdu)) {
ath12k_warn(ab, "montior destination with invalid buf_id %d\n", ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
buf_id); buf_id);
return DP_MON_TX_STATUS_PPDU_NOT_DONE; return DP_MON_TX_STATUS_PPDU_NOT_DONE;
} }
...@@ -2110,7 +2110,7 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget, ...@@ -2110,7 +2110,7 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget,
spin_unlock_bh(&buf_ring->idr_lock); spin_unlock_bh(&buf_ring->idr_lock);
if (unlikely(!skb)) { if (unlikely(!skb)) {
ath12k_warn(ab, "montior destination with invalid buf_id %d\n", ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
buf_id); buf_id);
goto move_next; goto move_next;
} }
...@@ -2511,7 +2511,7 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, ...@@ -2511,7 +2511,7 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
spin_unlock_bh(&buf_ring->idr_lock); spin_unlock_bh(&buf_ring->idr_lock);
if (unlikely(!skb)) { if (unlikely(!skb)) {
ath12k_warn(ab, "montior destination with invalid buf_id %d\n", ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
buf_id); buf_id);
goto move_next; goto move_next;
} }
......
...@@ -2443,7 +2443,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap ...@@ -2443,7 +2443,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
/* PN for multicast packets are not validate in HW, /* PN for multicast packets are not validate in HW,
* so skip 802.3 rx path * so skip 802.3 rx path
* Also, fast_rx expectes the STA to be authorized, hence * Also, fast_rx expects the STA to be authorized, hence
* eapol packets are sent in slow path. * eapol packets are sent in slow path.
*/ */
if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
...@@ -2611,7 +2611,7 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, ...@@ -2611,7 +2611,7 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
if (!desc_info) { if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, cookie); desc_info = ath12k_dp_get_rx_desc(ab, cookie);
if (!desc_info) { if (!desc_info) {
ath12k_warn(ab, "Invalid cookie in manual desc retrival"); ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
continue; continue;
} }
} }
...@@ -3297,7 +3297,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, ...@@ -3297,7 +3297,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
if (!desc_info) { if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, cookie); desc_info = ath12k_dp_get_rx_desc(ab, cookie);
if (!desc_info) { if (!desc_info) {
ath12k_warn(ab, "Invalid cookie in manual desc retrival"); ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
return -EINVAL; return -EINVAL;
} }
} }
...@@ -3718,7 +3718,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, ...@@ -3718,7 +3718,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
if (!desc_info) { if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie); desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
if (!desc_info) { if (!desc_info) {
ath12k_warn(ab, "Invalid cookie in manual desc retrival"); ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
continue; continue;
} }
} }
......
...@@ -609,7 +609,7 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab) ...@@ -609,7 +609,7 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
HAL_WBM0_RELEASE_RING_BASE_LSB(ab); HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP; s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
/* Some LMAC rings are not accesed from the host: /* Some LMAC rings are not accessed from the host:
* RXDMA_BUG, RXDMA_DST, RXDMA_MONITOR_BUF, RXDMA_MONITOR_STATUS, * RXDMA_BUG, RXDMA_DST, RXDMA_MONITOR_BUF, RXDMA_MONITOR_STATUS,
* RXDMA_MONITOR_DST, RXDMA_MONITOR_DESC, RXDMA_DIR_BUF_SRC, * RXDMA_MONITOR_DST, RXDMA_MONITOR_DESC, RXDMA_DIR_BUF_SRC,
* RXDMA_RX_MONITOR_BUF, TX_MONITOR_BUF, TX_MONITOR_DST, SW2RXDMA * RXDMA_RX_MONITOR_BUF, TX_MONITOR_BUF, TX_MONITOR_DST, SW2RXDMA
......
...@@ -270,7 +270,7 @@ struct ath12k_base; ...@@ -270,7 +270,7 @@ struct ath12k_base;
#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN BIT(5) #define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN BIT(5)
#define HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN BIT(8) #define HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN BIT(8)
/* TCL ring feild mask and offset */ /* TCL ring field mask and offset */
#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8) #define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0) #define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0) #define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
...@@ -296,7 +296,7 @@ struct ath12k_base; ...@@ -296,7 +296,7 @@ struct ath12k_base;
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18) #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18)
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21) #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21)
/* REO ring feild mask and offset */ /* REO ring field mask and offset */
#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8) #define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0) #define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8) #define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8)
...@@ -738,7 +738,7 @@ struct hal_srng { ...@@ -738,7 +738,7 @@ struct hal_srng {
} u; } u;
}; };
/* Interrupt mitigation - Batch threshold in terms of numer of frames */ /* Interrupt mitigation - Batch threshold in terms of number of frames */
#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256 #define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128 #define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
#define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1 #define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1
...@@ -813,7 +813,7 @@ enum hal_rx_buf_return_buf_manager { ...@@ -813,7 +813,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7) #define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8) #define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */ /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */
#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8) #define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
#define HAL_REO_CMD_UPD0_VLD BIT(9) #define HAL_REO_CMD_UPD0_VLD BIT(9)
#define HAL_REO_CMD_UPD0_ALDC BIT(10) #define HAL_REO_CMD_UPD0_ALDC BIT(10)
...@@ -838,7 +838,7 @@ enum hal_rx_buf_return_buf_manager { ...@@ -838,7 +838,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_UPD0_PN_VALID BIT(29) #define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
#define HAL_REO_CMD_UPD0_PN BIT(30) #define HAL_REO_CMD_UPD0_PN BIT(30)
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */ /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* fields */
#define HAL_REO_CMD_UPD1_VLD BIT(16) #define HAL_REO_CMD_UPD1_VLD BIT(16)
#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17) #define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19) #define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
...@@ -854,7 +854,7 @@ enum hal_rx_buf_return_buf_manager { ...@@ -854,7 +854,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30) #define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31) #define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */ /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* fields */
#define HAL_REO_CMD_UPD2_SVLD BIT(10) #define HAL_REO_CMD_UPD2_SVLD BIT(10)
#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11) #define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23) #define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
......
...@@ -706,7 +706,7 @@ struct rx_msdu_desc { ...@@ -706,7 +706,7 @@ struct rx_msdu_desc {
* *
* msdu_continuation * msdu_continuation
* When set, this MSDU buffer was not able to hold the entire MSDU. * When set, this MSDU buffer was not able to hold the entire MSDU.
* The next buffer will therefor contain additional information * The next buffer will therefore contain additional information
* related to this MSDU. * related to this MSDU.
* *
* msdu_length * msdu_length
...@@ -1294,7 +1294,7 @@ struct hal_tcl_data_cmd { ...@@ -1294,7 +1294,7 @@ struct hal_tcl_data_cmd {
* link descriptor. * link descriptor.
* *
* tcl_cmd_type * tcl_cmd_type
* used to select the type of TCL Command decriptor * used to select the type of TCL Command descriptor
* *
* desc_type * desc_type
* Indicates the type of address provided in the buf_addr_info. * Indicates the type of address provided in the buf_addr_info.
...@@ -1408,7 +1408,7 @@ struct hal_tcl_data_cmd { ...@@ -1408,7 +1408,7 @@ struct hal_tcl_data_cmd {
* index_loop_override * index_loop_override
* When set, address search and packet routing is forced to use * When set, address search and packet routing is forced to use
* 'search_index' instead of following the register configuration * 'search_index' instead of following the register configuration
* seleced by Bank_id. * selected by Bank_id.
* *
* ring_id * ring_id
* The buffer pointer ring ID. * The buffer pointer ring ID.
...@@ -1990,7 +1990,7 @@ struct hal_wbm_release_ring { ...@@ -1990,7 +1990,7 @@ struct hal_wbm_release_ring {
* Producer: SW/TQM/RXDMA/REO/SWITCH * Producer: SW/TQM/RXDMA/REO/SWITCH
* Consumer: WBM/SW/FW * Consumer: WBM/SW/FW
* *
* HTT tx status is overlayed on wbm_release ring on 4-byte words 2, 3, 4 and 5 * HTT tx status is overlaid on wbm_release ring on 4-byte words 2, 3, 4 and 5
* for software based completions. * for software based completions.
* *
* buf_addr_info * buf_addr_info
...@@ -2552,7 +2552,7 @@ struct hal_reo_status_hdr { ...@@ -2552,7 +2552,7 @@ struct hal_reo_status_hdr {
* commands. * commands.
* *
* execution_time (in us) * execution_time (in us)
* The amount of time REO took to excecute the command. Note that * The amount of time REO took to execute the command. Note that
* this time does not include the duration of the command waiting * this time does not include the duration of the command waiting
* in the command ring, before the execution started. * in the command ring, before the execution started.
* *
......
...@@ -1072,7 +1072,7 @@ struct rx_msdu_end_qcn9274 { ...@@ -1072,7 +1072,7 @@ struct rx_msdu_end_qcn9274 {
* *
* l4_offset * l4_offset
* Depending upon mode bit, this field either indicates the * Depending upon mode bit, this field either indicates the
* L4 offset nin bytes from the start of RX_HEADER (only valid * L4 offset in bytes from the start of RX_HEADER (only valid
* if either ipv4_proto or ipv6_proto is set to 1) or indicates * if either ipv4_proto or ipv6_proto is set to 1) or indicates
* the offset in bytes to the start of TCP or UDP header from * the offset in bytes to the start of TCP or UDP header from
* the start of the IP header after decapsulation (Only valid if * the start of the IP header after decapsulation (Only valid if
......
...@@ -494,7 +494,7 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, ...@@ -494,7 +494,7 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
/* tx/rx chainmask reported from fw depends on the actual hw chains used, /* tx/rx chainmask reported from fw depends on the actual hw chains used,
* For example, for 4x4 capable macphys, first 4 chains can be used for first * For example, for 4x4 capable macphys, first 4 chains can be used for first
* mac and the remaing 4 chains can be used for the second mac or vice-versa. * mac and the remaining 4 chains can be used for the second mac or vice-versa.
* In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
* will be advertised for second mac or vice-versa. Compute the shift value * will be advertised for second mac or vice-versa. Compute the shift value
* for tx/rx chainmask which will be used to advertise supported ht/vht rates to * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
...@@ -1743,7 +1743,7 @@ int ath12k_wmi_vdev_install_key(struct ath12k *ar, ...@@ -1743,7 +1743,7 @@ int ath12k_wmi_vdev_install_key(struct ath12k *ar,
int ret, len, key_len_aligned; int ret, len, key_len_aligned;
/* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key /* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
* length is specifed in cmd->key_len. * length is specified in cmd->key_len.
*/ */
key_len_aligned = roundup(arg->key_len, 4); key_len_aligned = roundup(arg->key_len, 4);
...@@ -5995,7 +5995,7 @@ static void ath12k_service_available_event(struct ath12k_base *ab, struct sk_buf ...@@ -5995,7 +5995,7 @@ static void ath12k_service_available_event(struct ath12k_base *ab, struct sk_buf
} }
/* TODO: Use wmi_service_segment_offset information to get the service /* TODO: Use wmi_service_segment_offset information to get the service
* especially when more services are advertised in multiple sevice * especially when more services are advertised in multiple service
* available events. * available events.
*/ */
for (i = 0, j = WMI_MAX_SERVICE; for (i = 0, j = WMI_MAX_SERVICE;
......
...@@ -4002,7 +4002,7 @@ struct ath12k_wmi_pdev_radar_event { ...@@ -4002,7 +4002,7 @@ struct ath12k_wmi_pdev_radar_event {
} __packed; } __packed;
struct wmi_pdev_temperature_event { struct wmi_pdev_temperature_event {
/* temperature value in Celcius degree */ /* temperature value in Celsius degree */
a_sle32 temp; a_sle32 temp;
__le32 pdev_id; __le32 pdev_id;
} __packed; } __packed;
...@@ -4192,7 +4192,7 @@ enum wmi_sta_ps_param_tx_wake_threshold { ...@@ -4192,7 +4192,7 @@ enum wmi_sta_ps_param_tx_wake_threshold {
*/ */
enum wmi_sta_ps_param_pspoll_count { enum wmi_sta_ps_param_pspoll_count {
WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0, WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
/* Values greater than 0 indicate the maximum numer of PS-Poll frames /* Values greater than 0 indicate the maximum number of PS-Poll frames
* FW will send before waking up. * FW will send before waking up.
*/ */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment