Commit 6bb986e9 authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2020-05-29' of...

Merge tag 'iwlwifi-next-for-kalle-2020-05-29' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

Third set of iwlwifi patches intended for v5.8

* Update range request API;
* Add ACPI DSM support;
* Support enabling 5.2GHz bands in Indonesia via ACPI;
* Bump FW API version to 56;
* TX queues refactoring started;
* Fix one memory leak;
* Some other small fixes and clean-ups;

# gpg: Signature made Fri 29 May 2020 10:38:28 AM EEST using RSA key ID 1A3CC5FA
# gpg: Good signature from "Luciano Roth Coelho (Luca) <luca@coelho.fi>"
# gpg:                 aka "Luciano Roth Coelho (Intel) <luciano.coelho@intel.com>"
parents 5cf2740f e6d4318c
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
#include "iwl-prph.h" #include "iwl-prph.h"
/* Highest firmware API version supported */ /* Highest firmware API version supported */
#define IWL_22000_UCODE_API_MAX 55 #define IWL_22000_UCODE_API_MAX 56
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39 #define IWL_22000_UCODE_API_MIN 39
......
...@@ -58,44 +58,121 @@ ...@@ -58,44 +58,121 @@
* *
*****************************************************************************/ *****************************************************************************/
#include <linux/uuid.h>
#include "iwl-drv.h" #include "iwl-drv.h"
#include "iwl-debug.h" #include "iwl-debug.h"
#include "acpi.h" #include "acpi.h"
#include "fw/runtime.h" #include "fw/runtime.h"
void *iwl_acpi_get_object(struct device *dev, acpi_string method) static const guid_t intel_wifi_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
0xA5, 0xB3, 0x1F, 0x73,
0x8E, 0x28, 0x5A, 0xDE);
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
acpi_handle *ret_handle)
{ {
acpi_handle root_handle; acpi_handle root_handle;
acpi_handle handle;
struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status; acpi_status status;
root_handle = ACPI_HANDLE(dev); root_handle = ACPI_HANDLE(dev);
if (!root_handle) { if (!root_handle) {
IWL_DEBUG_DEV_RADIO(dev, IWL_DEBUG_DEV_RADIO(dev,
"Could not retrieve root port ACPI handle\n"); "ACPI: Could not retrieve root port handle\n");
return ERR_PTR(-ENOENT); return -ENOENT;
} }
/* Get the method's handle */ status = acpi_get_handle(root_handle, method, ret_handle);
status = acpi_get_handle(root_handle, method, &handle);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
IWL_DEBUG_DEV_RADIO(dev, "%s method not found\n", method); IWL_DEBUG_DEV_RADIO(dev,
return ERR_PTR(-ENOENT); "ACPI: %s method not found\n", method);
return -ENOENT;
} }
return 0;
}
void *iwl_acpi_get_object(struct device *dev, acpi_string method)
{
struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_handle handle;
acpi_status status;
int ret;
ret = iwl_acpi_get_handle(dev, method, &handle);
if (ret)
return ERR_PTR(-ENOENT);
/* Call the method with no arguments */ /* Call the method with no arguments */
status = acpi_evaluate_object(handle, NULL, NULL, &buf); status = acpi_evaluate_object(handle, NULL, NULL, &buf);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
IWL_DEBUG_DEV_RADIO(dev, "%s invocation failed (0x%x)\n", IWL_DEBUG_DEV_RADIO(dev,
"ACPI: %s method invocation failed (status: 0x%x)\n",
method, status); method, status);
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
} }
return buf.pointer; return buf.pointer;
} }
IWL_EXPORT_SYMBOL(iwl_acpi_get_object); IWL_EXPORT_SYMBOL(iwl_acpi_get_object);
/**
* Generic function for evaluating a method defined in the device specific
* method (DSM) interface. The returned acpi object must be freed by calling
* function.
*/
void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
union acpi_object *args)
{
union acpi_object *obj;
obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_wifi_guid, rev, func,
args);
if (!obj) {
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method invocation failed (rev: %d, func:%d)\n",
rev, func);
return ERR_PTR(-ENOENT);
}
return obj;
}
/**
* Evaluate a DSM with no arguments and a single u8 return value (inside a
* buffer object), verify and return that value.
*/
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
{
union acpi_object *obj;
int ret;
obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL);
if (IS_ERR(obj))
return -ENOENT;
if (obj->type != ACPI_TYPE_BUFFER) {
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method did not return a valid object, type=%d\n",
obj->type);
ret = -EINVAL;
goto out;
}
if (obj->buffer.length != sizeof(u8)) {
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method returned invalid buffer, length=%d\n",
obj->buffer.length);
ret = -EINVAL;
goto out;
}
ret = obj->buffer.pointer[0];
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method evaluated: func=%d, ret=%d\n",
func, ret);
out:
ACPI_FREE(obj);
return ret;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data, union acpi_object *data,
int data_size, int *tbl_rev) int data_size, int *tbl_rev)
......
...@@ -127,12 +127,23 @@ struct iwl_geo_profile { ...@@ -127,12 +127,23 @@ struct iwl_geo_profile {
u8 values[ACPI_GEO_TABLE_SIZE]; u8 values[ACPI_GEO_TABLE_SIZE];
}; };
enum iwl_dsm_funcs_rev_0 {
DSM_FUNC_QUERY = 0,
DSM_FUNC_DISABLE_SRD = 1,
DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
};
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
struct iwl_fw_runtime; struct iwl_fw_runtime;
void *iwl_acpi_get_object(struct device *dev, acpi_string method); void *iwl_acpi_get_object(struct device *dev, acpi_string method);
void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
union acpi_object *args);
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data, union acpi_object *data,
int data_size, int *tbl_rev); int data_size, int *tbl_rev);
...@@ -192,6 +203,17 @@ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method) ...@@ -192,6 +203,17 @@ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
} }
static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
int func, union acpi_object *args)
{
return ERR_PTR(-ENOENT);
}
static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
{
return -ENOENT;
}
static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data, union acpi_object *data,
int data_size, int data_size,
......
...@@ -550,13 +550,11 @@ struct iwl_tof_range_req_ap_entry_v4 { ...@@ -550,13 +550,11 @@ struct iwl_tof_range_req_ap_entry_v4 {
/** /**
* enum iwl_location_cipher - location cipher selection * enum iwl_location_cipher - location cipher selection
* @IWL_LOCATION_CIPHER_CCMP_128: CCMP 128 * @IWL_LOCATION_CIPHER_CCMP_128: CCMP 128
* @IWL_LOCATION_CIPHER_CCMP_256: CCMP 256
* @IWL_LOCATION_CIPHER_GCMP_128: GCMP 128 * @IWL_LOCATION_CIPHER_GCMP_128: GCMP 128
* @IWL_LOCATION_CIPHER_GCMP_256: GCMP 256 * @IWL_LOCATION_CIPHER_GCMP_256: GCMP 256
*/ */
enum iwl_location_cipher { enum iwl_location_cipher {
IWL_LOCATION_CIPHER_CCMP_128, IWL_LOCATION_CIPHER_CCMP_128,
IWL_LOCATION_CIPHER_CCMP_256,
IWL_LOCATION_CIPHER_GCMP_128, IWL_LOCATION_CIPHER_GCMP_128,
IWL_LOCATION_CIPHER_GCMP_256, IWL_LOCATION_CIPHER_GCMP_256,
}; };
...@@ -577,7 +575,8 @@ enum iwl_location_cipher { ...@@ -577,7 +575,8 @@ enum iwl_location_cipher {
* @samples_per_burst: the number of FTMs pairs in single Burst (1-31); * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
* @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
* the number of measurement iterations (min 2^0 = 1, max 2^14) * the number of measurement iterations (min 2^0 = 1, max 2^14)
* @reserved: For alignment and future use * @sta_id: the station id of the AP. Only relevant when associated to the AP,
* otherwise should be set to &IWL_MVM_INVALID_STA.
* @cipher: pairwise cipher suite for secured measurement. * @cipher: pairwise cipher suite for secured measurement.
* &enum iwl_location_cipher. * &enum iwl_location_cipher.
* @hltk: HLTK to be used for secured 11az measurement * @hltk: HLTK to be used for secured 11az measurement
...@@ -586,7 +585,8 @@ enum iwl_location_cipher { ...@@ -586,7 +585,8 @@ enum iwl_location_cipher {
* If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the
* calibration value that corresponds to the rx bandwidth of the FTM * calibration value that corresponds to the rx bandwidth of the FTM
* frame. * frame.
* @reserved2: For alignment and future use. * @beacon_interval: beacon interval of the AP in TUs. Only required if
* &IWL_INITIATOR_AP_FLAGS_TB is set.
*/ */
struct iwl_tof_range_req_ap_entry { struct iwl_tof_range_req_ap_entry {
__le32 initiator_ap_flags; __le32 initiator_ap_flags;
...@@ -598,13 +598,13 @@ struct iwl_tof_range_req_ap_entry { ...@@ -598,13 +598,13 @@ struct iwl_tof_range_req_ap_entry {
__le16 burst_period; __le16 burst_period;
u8 samples_per_burst; u8 samples_per_burst;
u8 num_of_bursts; u8 num_of_bursts;
u8 reserved; u8 sta_id;
u8 cipher; u8 cipher;
u8 hltk[HLTK_11AZ_LEN]; u8 hltk[HLTK_11AZ_LEN];
u8 tk[TK_11AZ_LEN]; u8 tk[TK_11AZ_LEN];
__le16 calib[IWL_TOF_BW_NUM]; __le16 calib[IWL_TOF_BW_NUM];
__le16 reserved2; __le16 beacon_interval;
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_5 */ } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_6 */
/** /**
* enum iwl_tof_response_mode * enum iwl_tof_response_mode
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(C) 2018 - 2019 Intel Corporation * Copyright(C) 2018 - 2020 Intel Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(C) 2018 - 2019 Intel Corporation * Copyright(C) 2018 - 2020 Intel Corporation
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -74,6 +74,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids { ...@@ -74,6 +74,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
*/ */
NVM_ACCESS_COMPLETE = 0x0, NVM_ACCESS_COMPLETE = 0x0,
/**
* @LARI_CONFIG_CHANGE: &struct iwl_lari_config_change_cmd
*/
LARI_CONFIG_CHANGE = 0x1,
/** /**
* @NVM_GET_INFO: * @NVM_GET_INFO:
* Command is &struct iwl_nvm_get_info, * Command is &struct iwl_nvm_get_info,
...@@ -446,4 +451,29 @@ struct iwl_tas_config_cmd { ...@@ -446,4 +451,29 @@ struct iwl_tas_config_cmd {
__le32 black_list_size; __le32 black_list_size;
__le32 black_list_array[IWL_TAS_BLACK_LIST_MAX]; __le32 black_list_array[IWL_TAS_BLACK_LIST_MAX];
} __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */ } __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */
/**
* enum iwl_lari_configs - bit masks for the various LARI config operations
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
* @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan
* @LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK: ETSI 5.8GHz SRD disabled
* @LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK: enable 5.15/5.35GHz bands in
* Indonesia
*/
enum iwl_lari_config_masks {
LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK = BIT(0),
LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK = BIT(1),
LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK = BIT(2),
LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK = BIT(3),
};
/**
* struct iwl_lari_config_change_cmd - change LARI configuration
* @config_bitmap: bit map of the config commands. each bit will trigger a
* different predefined FW config operation
*/
struct iwl_lari_config_change_cmd {
__le32 config_bitmap;
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_1 */
#endif /* __iwl_fw_api_nvm_reg_h__ */ #endif /* __iwl_fw_api_nvm_reg_h__ */
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* *
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2018 Intel Corporation * Copyright(c) 2018, 2020 Intel Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
* *
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2018 Intel Corporation * Copyright(c) 2018, 2020 Intel Corporation
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -93,6 +93,11 @@ enum iwl_prph_scratch_mtr_format { ...@@ -93,6 +93,11 @@ enum iwl_prph_scratch_mtr_format {
* @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd. * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
* There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit, * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit,
* 3: 256 bit. * 3: 256 bit.
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK: RB size full information, ignored
* by older firmware versions, so set IWL_PRPH_SCRATCH_RB_SIZE_4K
* appropriately; use the below values for this.
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size
*/ */
enum iwl_prph_scratch_flags { enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4), IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
...@@ -103,6 +108,9 @@ enum iwl_prph_scratch_flags { ...@@ -103,6 +108,9 @@ enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16), IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16),
IWL_PRPH_SCRATCH_MTR_MODE = BIT(17), IWL_PRPH_SCRATCH_MTR_MODE = BIT(17),
IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19), IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19),
IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK = 0xf << 20,
IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K = 8 << 20,
IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20,
}; };
/* /*
......
...@@ -240,6 +240,7 @@ enum iwl_nvm_channel_flags { ...@@ -240,6 +240,7 @@ enum iwl_nvm_channel_flags {
* @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden
* for this regulatory domain (valid only in 5Ghz). * for this regulatory domain (valid only in 5Ghz).
* @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed. * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed.
* @REG_CAPA_11AX_DISABLED: 11ax is forbidden for this regulatory domain.
*/ */
enum iwl_reg_capa_flags { enum iwl_reg_capa_flags {
REG_CAPA_BF_CCD_LOW_BAND = BIT(0), REG_CAPA_BF_CCD_LOW_BAND = BIT(0),
...@@ -250,6 +251,7 @@ enum iwl_reg_capa_flags { ...@@ -250,6 +251,7 @@ enum iwl_reg_capa_flags {
REG_CAPA_MCS_9_ALLOWED = BIT(5), REG_CAPA_MCS_9_ALLOWED = BIT(5),
REG_CAPA_40MHZ_FORBIDDEN = BIT(7), REG_CAPA_40MHZ_FORBIDDEN = BIT(7),
REG_CAPA_DC_HIGH_ENABLED = BIT(9), REG_CAPA_DC_HIGH_ENABLED = BIT(9),
REG_CAPA_11AX_DISABLED = BIT(10),
}; };
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
...@@ -1115,6 +1117,9 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, ...@@ -1115,6 +1117,9 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
flags |= NL80211_RRF_NO_160MHZ; flags |= NL80211_RRF_NO_160MHZ;
} }
if (cap_flags & REG_CAPA_11AX_DISABLED)
flags |= NL80211_RRF_NO_HE;
return flags; return flags;
} }
......
...@@ -795,6 +795,132 @@ struct iwl_trans_debug { ...@@ -795,6 +795,132 @@ struct iwl_trans_debug {
u32 domains_bitmap; u32 domains_bitmap;
}; };
struct iwl_dma_ptr {
dma_addr_t dma;
void *addr;
size_t size;
};
struct iwl_cmd_meta {
/* only for SYNC commands, iff the reply skb is wanted */
struct iwl_host_cmd *source;
u32 flags;
u32 tbs;
};
/*
* The FH will write back to the first TB only, so we need to copy some data
* into the buffer regardless of whether it should be mapped or not.
* This indicates how big the first TB must be to include the scratch buffer
* and the assigned PN.
* Since PN location is 8 bytes at offset 12, it's 20 now.
* If we make it bigger then allocations will be bigger and copy slower, so
* that's probably not useful.
*/
#define IWL_FIRST_TB_SIZE 20
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
struct iwl_pcie_txq_entry {
void *cmd;
struct sk_buff *skb;
/* buffer to free after command completes */
const void *free_buf;
struct iwl_cmd_meta meta;
};
struct iwl_pcie_first_tb_buf {
u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
};
/**
* struct iwl_txq - Tx Queue for DMA
* @q: generic Rx/Tx queue descriptor
* @tfds: transmit frame descriptors (DMA memory)
* @first_tb_bufs: start of command headers, including scratch buffers, for
* the writeback -- this is DMA memory and an array holding one buffer
* for each command on the queue
* @first_tb_dma: DMA address for the first_tb_bufs start
* @entries: transmit entries (driver state)
* @lock: queue lock
* @stuck_timer: timer that fires if queue gets stuck
* @trans: pointer back to transport (for timer)
* @need_update: indicates need to update read/write index
* @ampdu: true if this queue is an ampdu queue for an specific RA/TID
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires
* @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
* @write_ptr: 1-st empty entry (index) host_w
* @read_ptr: last used entry (index) host_r
* @dma_addr: physical addr for BD's
* @n_window: safe queue window
* @id: queue id
* @low_mark: low watermark, resume queue if free space more than this
* @high_mark: high watermark, stop queue if free space less than this
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
*
* Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
* always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
* there might be HW changes in the future). For the normal TX
* queues, n_window, which is the size of the software queue data
* is also 256; however, for the command queue, n_window is only
* 32 since we don't need so many commands pending. Since the HW
* still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
* This means that we end up with the following:
* HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
* SW entries: | 0 | ... | 31 |
* where N is a number between 0 and 7. This means that the SW
* data is a window overlayed over the HW queue.
*/
struct iwl_txq {
void *tfds;
struct iwl_pcie_first_tb_buf *first_tb_bufs;
dma_addr_t first_tb_dma;
struct iwl_pcie_txq_entry *entries;
/* lock for syncing changes on the queue */
spinlock_t lock;
unsigned long frozen_expiry_remainder;
struct timer_list stuck_timer;
struct iwl_trans *trans;
bool need_update;
bool frozen;
bool ampdu;
int block;
unsigned long wd_timeout;
struct sk_buff_head overflow_q;
struct iwl_dma_ptr bc_tbl;
int write_ptr;
int read_ptr;
dma_addr_t dma_addr;
int n_window;
u32 id;
int low_mark;
int high_mark;
bool overflow_tx;
};
/**
* struct iwl_trans_txqs - transport tx queues data
*
* @queue_used - bit mask of used queues
* @queue_stopped - bit mask of stopped queues
*/
struct iwl_trans_txqs {
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
struct {
u8 fifo;
u8 q_id;
unsigned int wdg_timeout;
} cmd;
};
/** /**
* struct iwl_trans - transport common data * struct iwl_trans - transport common data
* *
...@@ -828,6 +954,7 @@ struct iwl_trans_debug { ...@@ -828,6 +954,7 @@ struct iwl_trans_debug {
* @system_pm_mode: the system-wide power management mode in use. * @system_pm_mode: the system-wide power management mode in use.
* This mode is set dynamically, depending on the WoWLAN values * This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime. * configured from the userspace at runtime.
* @iwl_trans_txqs: transport tx queues data.
*/ */
struct iwl_trans { struct iwl_trans {
const struct iwl_trans_ops *ops; const struct iwl_trans_ops *ops;
...@@ -875,6 +1002,7 @@ struct iwl_trans { ...@@ -875,6 +1002,7 @@ struct iwl_trans {
enum iwl_plat_pm_mode system_pm_mode; enum iwl_plat_pm_mode system_pm_mode;
const char *name; const char *name;
struct iwl_trans_txqs txqs;
/* pointer to trans specific struct */ /* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */ /*Ensure that this pointer will always be aligned to sizeof pointer */
......
...@@ -391,9 +391,27 @@ iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm, ...@@ -391,9 +391,27 @@ iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
} }
static int static int
iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
struct cfg80211_pmsr_request_peer *peer,
struct iwl_tof_range_req_ap_entry_v4 *target)
{
int ret;
ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
&target->format_bw,
&target->ctrl_ch_position);
if (ret)
return ret;
iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
return 0;
}
static int
iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request_peer *peer, struct cfg80211_pmsr_request_peer *peer,
struct iwl_tof_range_req_ap_entry_v4 *target) struct iwl_tof_range_req_ap_entry *target)
{ {
int ret; int ret;
...@@ -405,6 +423,20 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, ...@@ -405,6 +423,20 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm,
iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
if (vif->bss_conf.assoc &&
!memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
target->sta_id = mvmvif->ap_sta_id;
} else {
target->sta_id = IWL_MVM_INVALID_STA;
}
/*
* TODO: Beacon interval is currently unknown, so use the common value
* of 100 TUs.
*/
target->beacon_interval = cpu_to_le16(100);
return 0; return 0;
} }
...@@ -496,7 +528,7 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -496,7 +528,7 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
for (i = 0; i < cmd.num_of_ap; i++) { for (i = 0; i < cmd.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
err = iwl_mvm_ftm_put_target(mvm, peer, &cmd.ap[i]); err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]);
if (err) if (err)
return err; return err;
} }
...@@ -521,8 +553,9 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -521,8 +553,9 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
for (i = 0; i < cmd.num_of_ap; i++) { for (i = 0; i < cmd.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i];
err = iwl_mvm_ftm_put_target(mvm, peer, (void *)&cmd.ap[i]); err = iwl_mvm_ftm_put_target(mvm, vif, peer, target);
if (err) if (err)
return err; return err;
} }
...@@ -548,6 +581,7 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -548,6 +581,7 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
switch (cmd_ver) { switch (cmd_ver) {
case 9: case 9:
case 10:
err = iwl_mvm_ftm_start_v9(mvm, vif, req); err = iwl_mvm_ftm_start_v9(mvm, vif, req);
break; break;
case 8: case 8:
......
...@@ -988,6 +988,44 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) ...@@ -988,6 +988,44 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
if (ret < 0) if (ret < 0)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret); IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
} }
static bool iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
{
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
DSM_FUNC_ENABLE_INDONESIA_5G2);
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
ret);
return ret == 1;
}
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
int ret;
struct iwl_lari_config_change_cmd cmd = {};
if (iwl_mvm_eval_dsm_indonesia_5g2(mvm))
cmd.config_bitmap |=
cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
/* apply more config masks here */
if (cmd.config_bitmap) {
IWL_DEBUG_RADIO(mvm,
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n",
le32_to_cpu(cmd.config_bitmap));
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE),
0, sizeof(cmd), &cmd);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to send LARI_CONFIG_CHANGE (%d)\n",
ret);
}
}
#else /* CONFIG_ACPI */ #else /* CONFIG_ACPI */
inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
...@@ -1019,6 +1057,10 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) ...@@ -1019,6 +1057,10 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
static void iwl_mvm_tas_init(struct iwl_mvm *mvm) static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{ {
} }
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
}
#endif /* CONFIG_ACPI */ #endif /* CONFIG_ACPI */
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
...@@ -1293,6 +1335,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ...@@ -1293,6 +1335,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret) if (ret)
goto error; goto error;
iwl_mvm_lari_cfg(mvm);
/* /*
* RTNL is not taken during Ct-kill, but we don't need to scan/Tx * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
* anyway, so don't init MCC. * anyway, so don't init MCC.
......
...@@ -1208,14 +1208,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) ...@@ -1208,14 +1208,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
*/ */
flush_work(&mvm->roc_done_wk); flush_work(&mvm->roc_done_wk);
iwl_mvm_rm_aux_sta(mvm);
iwl_mvm_stop_device(mvm); iwl_mvm_stop_device(mvm);
iwl_mvm_async_handlers_purge(mvm); iwl_mvm_async_handlers_purge(mvm);
/* async_handlers_list is empty and will stay empty: HW is stopped */ /* async_handlers_list is empty and will stay empty: HW is stopped */
/* the fw is stopped, the aux sta is dead: clean up driver state */
iwl_mvm_del_aux_sta(mvm);
/* /*
* Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
* hw (as restart_complete() won't be called in this case) and mac80211 * hw (as restart_complete() won't be called in this case) and mac80211
......
...@@ -2093,16 +2093,24 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -2093,16 +2093,24 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret; return ret;
} }
void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
{ {
iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); int ret;
}
void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
return ret;
}
void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
{
iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
} }
/* /*
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation * Copyright(c) 2018 - 2020 Intel Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -541,7 +541,7 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ...@@ -541,7 +541,7 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start); int tid, u8 queue, bool start);
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm);
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
......
...@@ -138,9 +138,17 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, ...@@ -138,9 +138,17 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
case IWL_AMSDU_2K: case IWL_AMSDU_2K:
break; break;
case IWL_AMSDU_4K: case IWL_AMSDU_4K:
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
break;
case IWL_AMSDU_8K: case IWL_AMSDU_8K:
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
/* if firmware supports the ext size, tell it */
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K;
break;
case IWL_AMSDU_12K: case IWL_AMSDU_12K:
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
/* if firmware supports the ext size, tell it */
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K;
break; break;
} }
...@@ -213,7 +221,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, ...@@ -213,7 +221,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->tr_idx_arr_size = ctxt_info_gen3->tr_idx_arr_size =
cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS); cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
ctxt_info_gen3->mtr_base_addr = ctxt_info_gen3->mtr_base_addr =
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
ctxt_info_gen3->mcr_base_addr = ctxt_info_gen3->mcr_base_addr =
cpu_to_le64(trans_pcie->rxq->used_bd_dma); cpu_to_le64(trans_pcie->rxq->used_bd_dma);
ctxt_info_gen3->mtr_size = ctxt_info_gen3->mtr_size =
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation * Copyright(c) 2018 - 2020 Intel Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -263,7 +263,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, ...@@ -263,7 +263,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* initialize TX command queue */ /* initialize TX command queue */
ctxt_info->hcmd_cfg.cmd_queue_addr = ctxt_info->hcmd_cfg.cmd_queue_addr =
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size = ctxt_info->hcmd_cfg.cmd_queue_size =
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE); TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
......
...@@ -246,12 +246,6 @@ struct iwl_rb_allocator { ...@@ -246,12 +246,6 @@ struct iwl_rb_allocator {
struct work_struct rx_alloc; struct work_struct rx_alloc;
}; };
struct iwl_dma_ptr {
dma_addr_t dma;
void *addr;
size_t size;
};
/** /**
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
* @index -- current index * @index -- current index
...@@ -290,107 +284,6 @@ static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index) ...@@ -290,107 +284,6 @@ static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
(trans->trans_cfg->base_params->max_tfd_queue_size - 1); (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
} }
struct iwl_cmd_meta {
/* only for SYNC commands, iff the reply skb is wanted */
struct iwl_host_cmd *source;
u32 flags;
u32 tbs;
};
/*
* The FH will write back to the first TB only, so we need to copy some data
* into the buffer regardless of whether it should be mapped or not.
* This indicates how big the first TB must be to include the scratch buffer
* and the assigned PN.
* Since PN location is 8 bytes at offset 12, it's 20 now.
* If we make it bigger then allocations will be bigger and copy slower, so
* that's probably not useful.
*/
#define IWL_FIRST_TB_SIZE 20
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
struct iwl_pcie_txq_entry {
void *cmd;
struct sk_buff *skb;
/* buffer to free after command completes */
const void *free_buf;
struct iwl_cmd_meta meta;
};
struct iwl_pcie_first_tb_buf {
u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
};
/**
* struct iwl_txq - Tx Queue for DMA
* @q: generic Rx/Tx queue descriptor
* @tfds: transmit frame descriptors (DMA memory)
* @first_tb_bufs: start of command headers, including scratch buffers, for
* the writeback -- this is DMA memory and an array holding one buffer
* for each command on the queue
* @first_tb_dma: DMA address for the first_tb_bufs start
* @entries: transmit entries (driver state)
* @lock: queue lock
* @stuck_timer: timer that fires if queue gets stuck
* @trans_pcie: pointer back to transport (for timer)
* @need_update: indicates need to update read/write index
* @ampdu: true if this queue is an ampdu queue for an specific RA/TID
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires
* @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
* @write_ptr: 1-st empty entry (index) host_w
* @read_ptr: last used entry (index) host_r
* @dma_addr: physical addr for BD's
* @n_window: safe queue window
* @id: queue id
* @low_mark: low watermark, resume queue if free space more than this
* @high_mark: high watermark, stop queue if free space less than this
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
*
* Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
* always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
* there might be HW changes in the future). For the normal TX
* queues, n_window, which is the size of the software queue data
* is also 256; however, for the command queue, n_window is only
* 32 since we don't need so many commands pending. Since the HW
* still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
* This means that we end up with the following:
* HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
* SW entries: | 0 | ... | 31 |
* where N is a number between 0 and 7. This means that the SW
* data is a window overlayed over the HW queue.
*/
struct iwl_txq {
void *tfds;
struct iwl_pcie_first_tb_buf *first_tb_bufs;
dma_addr_t first_tb_dma;
struct iwl_pcie_txq_entry *entries;
spinlock_t lock;
unsigned long frozen_expiry_remainder;
struct timer_list stuck_timer;
struct iwl_trans_pcie *trans_pcie;
bool need_update;
bool frozen;
bool ampdu;
int block;
unsigned long wd_timeout;
struct sk_buff_head overflow_q;
struct iwl_dma_ptr bc_tbl;
int write_ptr;
int read_ptr;
dma_addr_t dma_addr;
int n_window;
u32 id;
int low_mark;
int high_mark;
bool overflow_tx;
};
static inline dma_addr_t static inline dma_addr_t
iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
{ {
...@@ -561,9 +454,6 @@ struct iwl_trans_pcie { ...@@ -561,9 +454,6 @@ struct iwl_trans_pcie {
struct dma_pool *bc_pool; struct dma_pool *bc_pool;
struct iwl_txq *txq_memory; struct iwl_txq *txq_memory;
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
/* PCI bus related data */ /* PCI bus related data */
struct pci_dev *pci_dev; struct pci_dev *pci_dev;
...@@ -577,10 +467,7 @@ struct iwl_trans_pcie { ...@@ -577,10 +467,7 @@ struct iwl_trans_pcie {
u8 page_offs, dev_cmd_offs; u8 page_offs, dev_cmd_offs;
u8 cmd_queue;
u8 def_rx_queue; u8 def_rx_queue;
u8 cmd_fifo;
unsigned int cmd_q_wdg_timeout;
u8 n_no_reclaim_cmds; u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u8 max_tbs; u8 max_tbs;
...@@ -983,9 +870,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); ...@@ -983,9 +870,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
static inline void iwl_wake_queue(struct iwl_trans *trans, static inline void iwl_wake_queue(struct iwl_trans *trans,
struct iwl_txq *txq) struct iwl_txq *txq)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
iwl_op_mode_queue_not_full(trans->op_mode, txq->id); iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
} }
...@@ -994,9 +879,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans, ...@@ -994,9 +879,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
static inline void iwl_stop_queue(struct iwl_trans *trans, static inline void iwl_stop_queue(struct iwl_trans *trans,
struct iwl_txq *txq) struct iwl_txq *txq)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
iwl_op_mode_queue_full(trans->op_mode, txq->id); iwl_op_mode_queue_full(trans->op_mode, txq->id);
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
} else } else
......
...@@ -1284,7 +1284,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -1284,7 +1284,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
int i) int i)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
bool page_stolen = false; bool page_stolen = false;
int max_len = trans_pcie->rx_buf_bytes; int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0; u32 offset = 0;
...@@ -1671,9 +1671,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) ...@@ -1671,9 +1671,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
} }
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
if (!trans_pcie->txq[i]) if (!trans->txqs.txq[i])
continue; continue;
del_timer(&trans_pcie->txq[i]->stuck_timer); del_timer(&trans->txqs.txq[i]->stuck_timer);
} }
/* The STATUS_FW_ERROR bit is set in this function. This must happen /* The STATUS_FW_ERROR bit is set in this function. This must happen
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation * Copyright(c) 2018 - 2020 Intel Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -245,7 +245,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) ...@@ -245,7 +245,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return -ENOMEM; return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */ /* Allocate or reset and init all Tx and Command queues */
if (iwl_pcie_gen2_tx_init(trans, trans_pcie->cmd_queue, queue_size)) if (iwl_pcie_gen2_tx_init(trans, trans->txqs.cmd.q_id, queue_size))
return -ENOMEM; return -ENOMEM;
/* enable shadow regs in HW */ /* enable shadow regs in HW */
...@@ -262,8 +262,9 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr) ...@@ -262,8 +262,9 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
iwl_pcie_reset_ict(trans); iwl_pcie_reset_ict(trans);
/* make sure all queue are not stopped/used */ /* make sure all queue are not stopped/used */
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); memset(trans->txqs.queue_stopped, 0,
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); sizeof(trans->txqs.queue_stopped));
memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* now that we got alive we can free the fw image & the context info. /* now that we got alive we can free the fw image & the context info.
* paging memory cannot be freed included since FW will still use it * paging memory cannot be freed included since FW will still use it
......
...@@ -5,10 +5,9 @@ ...@@ -5,10 +5,9 @@
* *
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -28,10 +27,9 @@ ...@@ -28,10 +27,9 @@
* *
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -1495,14 +1493,10 @@ static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, ...@@ -1495,14 +1493,10 @@ static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
int ret; int ret;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* if (!reset)
* Family IWL_DEVICE_FAMILY_AX210 and above persist mode is set by FW.
*/
if (!reset && trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
/* Enable persistence mode to avoid reset */ /* Enable persistence mode to avoid reset */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PERSIST_MODE); CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
...@@ -1910,9 +1904,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, ...@@ -1910,9 +1904,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->cmd_queue = trans_cfg->cmd_queue; trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
trans_pcie->cmd_fifo = trans_cfg->cmd_fifo; trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout; trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0; trans_pcie->n_no_reclaim_cmds = 0;
else else
...@@ -2205,11 +2199,10 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, ...@@ -2205,11 +2199,10 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
unsigned long txqs, unsigned long txqs,
bool freeze) bool freeze)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int queue; int queue;
for_each_set_bit(queue, &txqs, BITS_PER_LONG) { for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
struct iwl_txq *txq = trans_pcie->txq[queue]; struct iwl_txq *txq = trans->txqs.txq[queue];
unsigned long now; unsigned long now;
spin_lock_bh(&txq->lock); spin_lock_bh(&txq->lock);
...@@ -2257,13 +2250,12 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, ...@@ -2257,13 +2250,12 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i; int i;
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
struct iwl_txq *txq = trans_pcie->txq[i]; struct iwl_txq *txq = trans->txqs.txq[i];
if (i == trans_pcie->cmd_queue) if (i == trans->txqs.cmd.q_id)
continue; continue;
spin_lock_bh(&txq->lock); spin_lock_bh(&txq->lock);
...@@ -2332,7 +2324,6 @@ static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, ...@@ -2332,7 +2324,6 @@ static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq; struct iwl_txq *txq;
unsigned long now = jiffies; unsigned long now = jiffies;
bool overflow_tx; bool overflow_tx;
...@@ -2342,11 +2333,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) ...@@ -2342,11 +2333,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
if (test_bit(STATUS_TRANS_DEAD, &trans->status)) if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return -ENODEV; return -ENODEV;
if (!test_bit(txq_idx, trans_pcie->queue_used)) if (!test_bit(txq_idx, trans->txqs.queue_used))
return -EINVAL; return -EINVAL;
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
txq = trans_pcie->txq[txq_idx]; txq = trans->txqs.txq[txq_idx];
spin_lock_bh(&txq->lock); spin_lock_bh(&txq->lock);
overflow_tx = txq->overflow_tx || overflow_tx = txq->overflow_tx ||
...@@ -2394,7 +2385,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) ...@@ -2394,7 +2385,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cnt; int cnt;
int ret = 0; int ret = 0;
...@@ -2403,9 +2393,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) ...@@ -2403,9 +2393,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
cnt < trans->trans_cfg->base_params->num_of_queues; cnt < trans->trans_cfg->base_params->num_of_queues;
cnt++) { cnt++) {
if (cnt == trans_pcie->cmd_queue) if (cnt == trans->txqs.cmd.q_id)
continue; continue;
if (!test_bit(cnt, trans_pcie->queue_used)) if (!test_bit(cnt, trans->txqs.queue_used))
continue; continue;
if (!(BIT(cnt) & txq_bm)) if (!(BIT(cnt) & txq_bm))
continue; continue;
...@@ -2579,13 +2569,12 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) ...@@ -2579,13 +2569,12 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
struct iwl_dbgfs_tx_queue_priv *priv = seq->private; struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
struct iwl_dbgfs_tx_queue_state *state = v; struct iwl_dbgfs_tx_queue_state *state = v;
struct iwl_trans *trans = priv->trans; struct iwl_trans *trans = priv->trans;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans->txqs.txq[state->pos];
struct iwl_txq *txq = trans_pcie->txq[state->pos];
seq_printf(seq, "hwq %.3u: used=%d stopped=%d ", seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
(unsigned int)state->pos, (unsigned int)state->pos,
!!test_bit(state->pos, trans_pcie->queue_used), !!test_bit(state->pos, trans->txqs.queue_used),
!!test_bit(state->pos, trans_pcie->queue_stopped)); !!test_bit(state->pos, trans->txqs.queue_stopped));
if (txq) if (txq)
seq_printf(seq, seq_printf(seq,
"read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d", "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
...@@ -2595,7 +2584,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) ...@@ -2595,7 +2584,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
else else
seq_puts(seq, "(unallocated)"); seq_puts(seq, "(unallocated)");
if (state->pos == trans_pcie->cmd_queue) if (state->pos == trans->txqs.cmd.q_id)
seq_puts(seq, " (HCMD)"); seq_puts(seq, " (HCMD)");
seq_puts(seq, "\n"); seq_puts(seq, "\n");
...@@ -3271,7 +3260,7 @@ static struct iwl_trans_dump_data ...@@ -3271,7 +3260,7 @@ static struct iwl_trans_dump_data
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data; struct iwl_fw_error_dump_data *data;
struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data; struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0, monitor_len = 0; u32 len, num_rbs = 0, monitor_len = 0;
......
...@@ -64,7 +64,6 @@ ...@@ -64,7 +64,6 @@
*/ */
void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans) void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int txq_id; int txq_id;
/* /*
...@@ -72,12 +71,13 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans) ...@@ -72,12 +71,13 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
* queues. This happens when we have an rfkill interrupt. * queues. This happens when we have an rfkill interrupt.
* Since we stop Tx altogether - mark the queues as stopped. * Since we stop Tx altogether - mark the queues as stopped.
*/ */
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); memset(trans->txqs.queue_stopped, 0,
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); sizeof(trans->txqs.queue_stopped));
memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* Unmap DMA from host system and free skb's */ /* Unmap DMA from host system and free skb's */
for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) { for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
if (!trans_pcie->txq[txq_id]) if (!trans->txqs.txq[txq_id])
continue; continue;
iwl_pcie_gen2_txq_unmap(trans, txq_id); iwl_pcie_gen2_txq_unmap(trans, txq_id);
} }
...@@ -716,7 +716,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -716,7 +716,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_cmd_meta *out_meta; struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq = trans_pcie->txq[txq_id]; struct iwl_txq *txq = trans->txqs.txq[txq_id];
u16 cmd_len; u16 cmd_len;
int idx; int idx;
void *tfd; void *tfd;
...@@ -725,7 +725,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -725,7 +725,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
"queue %d out of range", txq_id)) "queue %d out of range", txq_id))
return -EINVAL; return -EINVAL;
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
"TX on unused queue %d\n", txq_id)) "TX on unused queue %d\n", txq_id))
return -EINVAL; return -EINVAL;
...@@ -819,7 +819,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, ...@@ -819,7 +819,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd) struct iwl_host_cmd *cmd)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd; struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta; struct iwl_cmd_meta *out_meta;
unsigned long flags; unsigned long flags;
...@@ -931,7 +931,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, ...@@ -931,7 +931,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence = out_cmd->hdr_wide.sequence =
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr)); INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide); cmd_pos = sizeof(struct iwl_cmd_header_wide);
...@@ -979,7 +979,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, ...@@ -979,7 +979,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
"Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
iwl_get_cmd_string(trans, cmd->id), group_id, iwl_get_cmd_string(trans, cmd->id), group_id,
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
/* start the TFD with the minimum copy bytes */ /* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
...@@ -1056,7 +1056,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, ...@@ -1056,7 +1056,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
int cmd_idx; int cmd_idx;
int ret; int ret;
...@@ -1175,14 +1175,14 @@ int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, ...@@ -1175,14 +1175,14 @@ int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id]; struct iwl_txq *txq = trans->txqs.txq[txq_id];
spin_lock_bh(&txq->lock); spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) { while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr); txq_id, txq->read_ptr);
if (txq_id != trans_pcie->cmd_queue) { if (txq_id != trans->txqs.cmd.q_id) {
int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
struct sk_buff *skb = txq->entries[idx].skb; struct sk_buff *skb = txq->entries[idx].skb;
...@@ -1240,7 +1240,6 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, ...@@ -1240,7 +1240,6 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
*/ */
static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq; struct iwl_txq *txq;
int i; int i;
...@@ -1248,7 +1247,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) ...@@ -1248,7 +1247,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
"queue %d out of range", txq_id)) "queue %d out of range", txq_id))
return; return;
txq = trans_pcie->txq[txq_id]; txq = trans->txqs.txq[txq_id];
if (WARN_ON(!txq)) if (WARN_ON(!txq))
return; return;
...@@ -1256,7 +1255,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) ...@@ -1256,7 +1255,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_gen2_txq_unmap(trans, txq_id); iwl_pcie_gen2_txq_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */ /* De-alloc array of command/tx buffers */
if (txq_id == trans_pcie->cmd_queue) if (txq_id == trans->txqs.cmd.q_id)
for (i = 0; i < txq->n_window; i++) { for (i = 0; i < txq->n_window; i++) {
kzfree(txq->entries[i].cmd); kzfree(txq->entries[i].cmd);
kzfree(txq->entries[i].free_buf); kzfree(txq->entries[i].free_buf);
...@@ -1265,9 +1264,9 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) ...@@ -1265,9 +1264,9 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_gen2_txq_free_memory(trans, txq); iwl_pcie_gen2_txq_free_memory(trans, txq);
trans_pcie->txq[txq_id] = NULL; trans->txqs.txq[txq_id] = NULL;
clear_bit(txq_id, trans_pcie->queue_used); clear_bit(txq_id, trans->txqs.queue_used);
} }
int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans, int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
...@@ -1327,7 +1326,6 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, ...@@ -1327,7 +1326,6 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
struct iwl_txq *txq, struct iwl_txq *txq,
struct iwl_host_cmd *hcmd) struct iwl_host_cmd *hcmd)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue_cfg_rsp *rsp; struct iwl_tx_queue_cfg_rsp *rsp;
int ret, qid; int ret, qid;
u32 wr_ptr; u32 wr_ptr;
...@@ -1342,20 +1340,20 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, ...@@ -1342,20 +1340,20 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
qid = le16_to_cpu(rsp->queue_number); qid = le16_to_cpu(rsp->queue_number);
wr_ptr = le16_to_cpu(rsp->write_pointer); wr_ptr = le16_to_cpu(rsp->write_pointer);
if (qid >= ARRAY_SIZE(trans_pcie->txq)) { if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
WARN_ONCE(1, "queue index %d unsupported", qid); WARN_ONCE(1, "queue index %d unsupported", qid);
ret = -EIO; ret = -EIO;
goto error_free_resp; goto error_free_resp;
} }
if (test_and_set_bit(qid, trans_pcie->queue_used)) { if (test_and_set_bit(qid, trans->txqs.queue_used)) {
WARN_ONCE(1, "queue %d already used", qid); WARN_ONCE(1, "queue %d already used", qid);
ret = -EIO; ret = -EIO;
goto error_free_resp; goto error_free_resp;
} }
txq->id = qid; txq->id = qid;
trans_pcie->txq[qid] = txq; trans->txqs.txq[qid] = txq;
wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
/* Place first TFD at index corresponding to start sequence number */ /* Place first TFD at index corresponding to start sequence number */
...@@ -1413,8 +1411,6 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, ...@@ -1413,8 +1411,6 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (WARN(queue >= IWL_MAX_TVQM_QUEUES, if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
"queue %d out of range", queue)) "queue %d out of range", queue))
return; return;
...@@ -1425,7 +1421,7 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) ...@@ -1425,7 +1421,7 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
* allow the op_mode to call txq_disable after it already called * allow the op_mode to call txq_disable after it already called
* stop_device. * stop_device.
*/ */
if (!test_and_clear_bit(queue, trans_pcie->queue_used)) { if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
"queue %d not used", queue); "queue %d not used", queue);
return; return;
...@@ -1433,22 +1429,21 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) ...@@ -1433,22 +1429,21 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
iwl_pcie_gen2_txq_unmap(trans, queue); iwl_pcie_gen2_txq_unmap(trans, queue);
iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]); iwl_pcie_gen2_txq_free_memory(trans, trans->txqs.txq[queue]);
trans_pcie->txq[queue] = NULL; trans->txqs.txq[queue] = NULL;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
} }
void iwl_pcie_gen2_tx_free(struct iwl_trans *trans) void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i; int i;
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* Free all TX queues */ /* Free all TX queues */
for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) { for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
if (!trans_pcie->txq[i]) if (!trans->txqs.txq[i])
continue; continue;
iwl_pcie_gen2_txq_free(trans, i); iwl_pcie_gen2_txq_free(trans, i);
...@@ -1457,35 +1452,34 @@ void iwl_pcie_gen2_tx_free(struct iwl_trans *trans) ...@@ -1457,35 +1452,34 @@ void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size) int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *queue; struct iwl_txq *queue;
int ret; int ret;
/* alloc and init the tx queue */ /* alloc and init the tx queue */
if (!trans_pcie->txq[txq_id]) { if (!trans->txqs.txq[txq_id]) {
queue = kzalloc(sizeof(*queue), GFP_KERNEL); queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue) { if (!queue) {
IWL_ERR(trans, "Not enough memory for tx queue\n"); IWL_ERR(trans, "Not enough memory for tx queue\n");
return -ENOMEM; return -ENOMEM;
} }
trans_pcie->txq[txq_id] = queue; trans->txqs.txq[txq_id] = queue;
ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true); ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
if (ret) { if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error; goto error;
} }
} else { } else {
queue = trans_pcie->txq[txq_id]; queue = trans->txqs.txq[txq_id];
} }
ret = iwl_pcie_txq_init(trans, queue, queue_size, ret = iwl_pcie_txq_init(trans, queue, queue_size,
(txq_id == trans_pcie->cmd_queue)); (txq_id == trans->txqs.cmd.q_id));
if (ret) { if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error; goto error;
} }
trans_pcie->txq[txq_id]->id = txq_id; trans->txqs.txq[txq_id]->id = txq_id;
set_bit(txq_id, trans_pcie->queue_used); set_bit(txq_id, trans->txqs.queue_used);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment