Commit 8f0bb5ae authored by John W. Linville's avatar John W. Linville
parents b9d90578 6fe7dd0d
......@@ -84,13 +84,13 @@ static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
static void iwl1000_nic_config(struct iwl_priv *priv)
{
/* set CSR_HW_CONFIG_REG for uCode use */
iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
/* Setting digital SVR for 1000 card to 1.32V */
/* locking is acquired in iwl_set_bits_mask_prph() function */
iwl_set_bits_mask_prph(bus(priv), APMG_DIGITAL_SVR_REG,
iwl_set_bits_mask_prph(trans(priv), APMG_DIGITAL_SVR_REG,
APMG_SVR_DIGITAL_VOLTAGE_1_32,
~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
}
......@@ -128,8 +128,6 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
iwlagn_mod_params.num_of_queues;
hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
......
......@@ -87,7 +87,7 @@ static void iwl2000_nic_config(struct iwl_priv *priv)
iwl_rf_config(priv);
if (cfg(priv)->iq_invert)
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
}
......@@ -124,8 +124,6 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
iwlagn_mod_params.num_of_queues;
hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
......
......@@ -73,7 +73,7 @@ static void iwl5000_nic_config(struct iwl_priv *priv)
* (PCIe power is lost before PERST# is asserted),
* causing ME FW to lose ownership and not being able to obtain it back.
*/
iwl_set_bits_mask_prph(bus(priv), APMG_PS_CTRL_REG,
iwl_set_bits_mask_prph(trans(priv), APMG_PS_CTRL_REG,
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
......@@ -170,8 +170,6 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
iwlagn_mod_params.num_of_queues;
hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
......@@ -199,8 +197,6 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
iwlagn_mod_params.num_of_queues;
hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
......
......@@ -82,7 +82,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
if (iwl_eeprom_calib_version(priv->shrd) >= 6)
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
}
......@@ -90,9 +90,9 @@ static void iwl6150_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
if (iwl_eeprom_calib_version(priv->shrd) >= 6)
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_6050_1x2);
}
......@@ -104,7 +104,7 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
/* no locking required for register write */
if (cfg(priv)->pa_type == IWL_PA_INTERNAL) {
/* 2x2 IPA phy type */
iwl_write32(bus(priv), CSR_GP_DRIVER_REG,
iwl_write32(trans(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
}
/* do additional nic configuration if needed */
......@@ -145,8 +145,6 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
iwlagn_mod_params.num_of_queues;
hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
......
......@@ -628,16 +628,16 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
CT_CARD_DISABLED)) {
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
iwl_write_direct32(bus(priv), HBUS_TARG_MBX_C,
iwl_write_direct32(trans(priv), HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
if (!(flags & RXON_CARD_DISABLED)) {
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
iwl_write_direct32(bus(priv), HBUS_TARG_MBX_C,
iwl_write_direct32(trans(priv), HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
}
if (flags & CT_CARD_DISABLED)
......
......@@ -178,19 +178,19 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
if (tt->state == IWL_TI_CT_KILL) {
if (priv->thermal_throttle.ct_kill_toggle) {
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
priv->thermal_throttle.ct_kill_toggle = false;
} else {
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
priv->thermal_throttle.ct_kill_toggle = true;
}
iwl_read32(bus(priv), CSR_UCODE_DRV_GP1);
spin_lock_irqsave(&bus(priv)->reg_lock, flags);
if (!iwl_grab_nic_access(bus(priv)))
iwl_release_nic_access(bus(priv));
spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
iwl_read32(trans(priv), CSR_UCODE_DRV_GP1);
spin_lock_irqsave(&trans(priv)->reg_lock, flags);
if (!iwl_grab_nic_access(trans(priv)))
iwl_release_nic_access(trans(priv));
spin_unlock_irqrestore(&trans(priv)->reg_lock, flags);
/* Reschedule the ct_kill timer to occur in
* CT_KILL_EXIT_DURATION seconds to ensure we get a
......
This diff is collapsed.
......@@ -73,8 +73,6 @@ struct iwlagn_ucode_capabilities {
extern struct ieee80211_ops iwlagn_hw_ops;
int iwl_reset_ict(struct iwl_trans *trans);
static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
{
hdr->op_code = cmd;
......@@ -109,6 +107,7 @@ void iwlagn_config_ht40(struct ieee80211_conf *conf,
int iwlagn_rx_calib_result(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags);
/* lib */
int iwlagn_send_tx_power(struct iwl_priv *priv);
......
......@@ -118,88 +118,24 @@
struct iwl_shared;
struct iwl_bus;
/**
* struct iwl_bus_ops - bus specific operations
* @get_pm_support: must returns true if the bus can go to sleep
* @apm_config: will be called during the config of the APM
* @get_hw_id_string: prints the hw_id in the provided buffer
* @get_hw_id: get hw_id in u32
* @write8: write a byte to register at offset ofs
* @write32: write a dword to register at offset ofs
* @wread32: read a dword at register at offset ofs
*/
struct iwl_bus_ops {
bool (*get_pm_support)(struct iwl_bus *bus);
void (*apm_config)(struct iwl_bus *bus);
void (*get_hw_id_string)(struct iwl_bus *bus, char buf[], int buf_len);
u32 (*get_hw_id)(struct iwl_bus *bus);
void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val);
void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val);
u32 (*read32)(struct iwl_bus *bus, u32 ofs);
};
/**
* struct iwl_bus - bus common data
*
* This data is common to all bus layer implementations.
*
* @dev - pointer to struct device * that represents the device
* @ops - pointer to iwl_bus_ops
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
* NB: for the time being this needs to be set by the upper layer since
* it allocates the shared data
* @irq - the irq number for the device
* @reg_lock - protect hw register access
*/
struct iwl_bus {
struct device *dev;
const struct iwl_bus_ops *ops;
struct iwl_shared *shrd;
unsigned int irq;
spinlock_t reg_lock;
/* pointer to bus specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char bus_specific[0] __attribute__((__aligned__(sizeof(void *))));
};
static inline bool bus_get_pm_support(struct iwl_bus *bus)
{
return bus->ops->get_pm_support(bus);
}
static inline void bus_apm_config(struct iwl_bus *bus)
{
bus->ops->apm_config(bus);
}
static inline void bus_get_hw_id_string(struct iwl_bus *bus, char buf[],
int buf_len)
{
bus->ops->get_hw_id_string(bus, buf, buf_len);
}
static inline u32 bus_get_hw_id(struct iwl_bus *bus)
{
return bus->ops->get_hw_id(bus);
}
static inline void bus_write8(struct iwl_bus *bus, u32 ofs, u8 val)
{
bus->ops->write8(bus, ofs, val);
}
static inline void bus_write32(struct iwl_bus *bus, u32 ofs, u32 val)
{
bus->ops->write32(bus, ofs, val);
}
static inline u32 bus_read32(struct iwl_bus *bus, u32 ofs)
{
return bus->ops->read32(bus, ofs);
}
/*****************************************************
* Bus layer registration functions
******************************************************/
......
......@@ -203,10 +203,9 @@ int iwl_init_geos(struct iwl_priv *priv)
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
cfg(priv)->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
char buf[32];
bus_get_hw_id_string(bus(priv), buf, sizeof(buf));
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
"Please send your %s to maintainer.\n", buf);
"Please send your %s to maintainer.\n",
trans(priv)->hw_id_str);
cfg(priv)->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
}
......@@ -883,129 +882,6 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
}
}
static int iwl_apm_stop_master(struct iwl_priv *priv)
{
int ret = 0;
/* stop device's busmaster DMA activity */
iwl_set_bit(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
ret = iwl_poll_bit(bus(priv), CSR_RESET,
CSR_RESET_REG_FLAG_MASTER_DISABLED,
CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
if (ret)
IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
IWL_DEBUG_INFO(priv, "stop master\n");
return ret;
}
void iwl_apm_stop(struct iwl_priv *priv)
{
IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
clear_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status);
/* Stop device's DMA activity */
iwl_apm_stop_master(priv);
/* Reset the entire device */
iwl_set_bit(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
udelay(10);
/*
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
iwl_clear_bit(bus(priv), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
/*
* Start up NIC's basic functionality after it has been reset
* (e.g. after platform boot, or shutdown via iwl_apm_stop())
* NOTE: This does not load uCode nor start the embedded processor
*/
int iwl_apm_init(struct iwl_priv *priv)
{
int ret = 0;
IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
/*
* Use "set_bit" below rather than "write", to preserve any hardware
* bits already set by default after reset.
*/
/* Disable L0S exit timer (platform NMI Work/Around) */
iwl_set_bit(bus(priv), CSR_GIO_CHICKEN_BITS,
CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
/*
* Disable L0s without affecting L1;
* don't wait for ICH L0s (ICH bug W/A)
*/
iwl_set_bit(bus(priv), CSR_GIO_CHICKEN_BITS,
CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
/* Set FH wait threshold to maximum (HW error during stress W/A) */
iwl_set_bit(bus(priv), CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
/*
* Enable HAP INTA (interrupt from management bus) to
* wake device's PCI Express link L1a -> L0s
*/
iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
bus_apm_config(bus(priv));
/* Configure analog phase-lock-loop before activating to D0A */
if (cfg(priv)->base_params->pll_cfg_val)
iwl_set_bit(bus(priv), CSR_ANA_PLL_CFG,
cfg(priv)->base_params->pll_cfg_val);
/*
* Set "initialization complete" bit to move adapter from
* D0U* --> D0A* (powered-up active) state.
*/
iwl_set_bit(bus(priv), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/*
* Wait for clock stabilization; once stabilized, access to
* device-internal resources is supported, e.g. iwl_write_prph()
* and accesses to uCode SRAM.
*/
ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
if (ret < 0) {
IWL_DEBUG_INFO(priv, "Failed to init the card\n");
goto out;
}
/*
* Enable DMA clock and wait for it to stabilize.
*
* Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
* do not disable clocks. This preserves any hardware bits already
* set by default in "CLK_CTRL_REG" after reset.
*/
iwl_write_prph(bus(priv), APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
udelay(20);
/* Disable L1-Active */
iwl_set_bits_prph(bus(priv), APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
set_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status);
out:
return ret;
}
int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
{
int ret;
......
......@@ -297,12 +297,6 @@ static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
cfg(priv)->bt_params->advanced_bt_coexist;
}
static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
{
IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
iwl_write32(bus(priv), CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
}
extern bool bt_siso_mode;
#endif /* __iwl_core_h__ */
......@@ -35,10 +35,10 @@
struct iwl_priv;
/*No matter what is m (priv, bus, trans), this will work */
#define IWL_ERR(m, f, a...) dev_err(bus(m)->dev, f, ## a)
#define IWL_WARN(m, f, a...) dev_warn(bus(m)->dev, f, ## a)
#define IWL_INFO(m, f, a...) dev_info(bus(m)->dev, f, ## a)
#define IWL_CRIT(m, f, a...) dev_crit(bus(m)->dev, f, ## a)
#define IWL_ERR(m, f, a...) dev_err(trans(m)->dev, f, ## a)
#define IWL_WARN(m, f, a...) dev_warn(trans(m)->dev, f, ## a)
#define IWL_INFO(m, f, a...) dev_info(trans(m)->dev, f, ## a)
#define IWL_CRIT(m, f, a...) dev_crit(trans(m)->dev, f, ## a)
#define iwl_print_hex_error(m, p, len) \
do { \
......@@ -50,7 +50,7 @@ do { \
#define IWL_DEBUG(m, level, fmt, ...) \
do { \
if (iwl_get_debug_level((m)->shrd) & (level)) \
dev_err(bus(m)->dev, "%c %s " fmt, \
dev_err(trans(m)->dev, "%c %s " fmt, \
in_interrupt() ? 'I' : 'U', __func__, \
##__VA_ARGS__); \
} while (0)
......@@ -59,7 +59,7 @@ do { \
do { \
if (iwl_get_debug_level((m)->shrd) & (level) && \
net_ratelimit()) \
dev_err(bus(m)->dev, "%c %s " fmt, \
dev_err(trans(m)->dev, "%c %s " fmt, \
in_interrupt() ? 'I' : 'U', __func__, \
##__VA_ARGS__); \
} while (0)
......@@ -74,12 +74,12 @@ do { \
#define IWL_DEBUG_QUIET_RFKILL(p, fmt, ...) \
do { \
if (!iwl_is_rfkill(p->shrd)) \
dev_err(bus(p)->dev, "%s%c %s " fmt, \
dev_err(trans(p)->dev, "%s%c %s " fmt, \
"", \
in_interrupt() ? 'I' : 'U', __func__, \
##__VA_ARGS__); \
else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO) \
dev_err(bus(p)->dev, "%s%c %s " fmt, \
dev_err(trans(p)->dev, "%s%c %s " fmt, \
"(RFKILL) ", \
in_interrupt() ? 'I' : 'U', __func__, \
##__VA_ARGS__); \
......
......@@ -263,7 +263,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
sram = priv->dbgfs_sram_offset & ~0x3;
/* read the first u32 from sram */
val = iwl_read_targ_mem(bus(priv), sram);
val = iwl_read_targ_mem(trans(priv), sram);
for (; len; len--) {
/* put the address at the start of every line */
......@@ -282,7 +282,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
if (++offset == 4) {
sram += 4;
offset = 0;
val = iwl_read_targ_mem(bus(priv), sram);
val = iwl_read_targ_mem(trans(priv), sram);
}
/* put in extra spaces and split lines for human readability */
......@@ -2055,7 +2055,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
const size_t bufsz = sizeof(buf);
u32 pwrsave_status;
pwrsave_status = iwl_read32(bus(priv), CSR_GP_CNTRL) &
pwrsave_status = iwl_read32(trans(priv), CSR_GP_CNTRL) &
CSR_GP_REG_POWER_SAVE_STATUS_MSK;
pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
......
......@@ -292,114 +292,6 @@ struct iwl_vif_priv {
u8 ibss_bssid_sta_id;
};
/* v1/v2 uCode file layout */
struct iwl_ucode_header {
__le32 ver; /* major/minor/API/serial */
union {
struct {
__le32 inst_size; /* bytes of runtime code */
__le32 data_size; /* bytes of runtime data */
__le32 init_size; /* bytes of init code */
__le32 init_data_size; /* bytes of init data */
__le32 boot_size; /* bytes of bootstrap code */
u8 data[0]; /* in same order as sizes */
} v1;
struct {
__le32 build; /* build number */
__le32 inst_size; /* bytes of runtime code */
__le32 data_size; /* bytes of runtime data */
__le32 init_size; /* bytes of init code */
__le32 init_data_size; /* bytes of init data */
__le32 boot_size; /* bytes of bootstrap code */
u8 data[0]; /* in same order as sizes */
} v2;
} u;
};
/*
* new TLV uCode file layout
*
* The new TLV file format contains TLVs, that each specify
* some piece of data. To facilitate "groups", for example
* different instruction image with different capabilities,
* bundled with the same init image, an alternative mechanism
* is provided:
* When the alternative field is 0, that means that the item
* is always valid. When it is non-zero, then it is only
* valid in conjunction with items of the same alternative,
* in which case the driver (user) selects one alternative
* to use.
*/
enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_INVALID = 0, /* unused */
IWL_UCODE_TLV_INST = 1,
IWL_UCODE_TLV_DATA = 2,
IWL_UCODE_TLV_INIT = 3,
IWL_UCODE_TLV_INIT_DATA = 4,
IWL_UCODE_TLV_BOOT = 5,
IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
IWL_UCODE_TLV_PAN = 7,
IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11,
IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
IWL_UCODE_TLV_WOWLAN_INST = 16,
IWL_UCODE_TLV_WOWLAN_DATA = 17,
IWL_UCODE_TLV_FLAGS = 18,
};
/**
* enum iwl_ucode_tlv_flag - ucode API flags
* @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
* was a separate TLV but moved here to save space.
* @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
* treats good CRC threshold as a boolean
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
};
struct iwl_ucode_tlv {
__le16 type; /* see above */
__le16 alternative; /* see comment */
__le32 length; /* not including type/length fields */
u8 data[0];
} __packed;
#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
struct iwl_tlv_ucode_header {
/*
* The TLV style ucode header is distinguished from
* the v1/v2 style header by first four bytes being
* zero, as such is an invalid combination of
* major/minor/API/serial versions.
*/
__le32 zero;
__le32 magic;
u8 human_readable[64];
__le32 ver; /* major/minor/API/serial */
__le32 build;
__le64 alternatives; /* bitmask of valid alternatives */
/*
* The data contained herein has a TLV layout,
* see above for the TLV header and types.
* Note that each TLV is padded to a length
* that is a multiple of 4 for alignment.
*/
u8 data[0];
};
struct iwl_sensitivity_ranges {
u16 min_nrg_cck;
u16 max_nrg_cck;
......@@ -821,7 +713,6 @@ struct iwl_wipan_noa_data {
struct iwl_priv {
/*data shared among all the driver's layers */
struct iwl_shared _shrd;
struct iwl_shared *shrd;
/* ieee device used by generic ieee processing code */
......
......@@ -156,16 +156,16 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_bus *bus)
for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
iwl_set_bit(bus, CSR_HW_IF_CONFIG_REG,
iwl_set_bit(trans(bus), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
/* See if we got it */
ret = iwl_poll_bit(bus, CSR_HW_IF_CONFIG_REG,
ret = iwl_poll_bit(trans(bus), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
EEPROM_SEM_TIMEOUT);
if (ret >= 0) {
IWL_DEBUG_EEPROM(bus,
IWL_DEBUG_EEPROM(trans(bus),
"Acquired semaphore after %d tries.\n",
count+1);
return ret;
......@@ -177,14 +177,15 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_bus *bus)
static void iwl_eeprom_release_semaphore(struct iwl_bus *bus)
{
iwl_clear_bit(bus, CSR_HW_IF_CONFIG_REG,
iwl_clear_bit(trans(bus), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
}
static int iwl_eeprom_verify_signature(struct iwl_trans *trans)
{
u32 gp = iwl_read32(bus(trans), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
u32 gp = iwl_read32(trans, CSR_EEPROM_GP) &
CSR_EEPROM_GP_VALID_MSK;
int ret = 0;
IWL_DEBUG_EEPROM(trans, "EEPROM signature=0x%08x\n", gp);
......@@ -305,13 +306,13 @@ void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac)
static void iwl_set_otp_access(struct iwl_bus *bus, enum iwl_access_mode mode)
{
iwl_read32(bus, CSR_OTP_GP_REG);
iwl_read32(trans(bus), CSR_OTP_GP_REG);
if (mode == IWL_OTP_ACCESS_ABSOLUTE)
iwl_clear_bit(bus, CSR_OTP_GP_REG,
iwl_clear_bit(trans(bus), CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
else
iwl_set_bit(bus, CSR_OTP_GP_REG,
iwl_set_bit(trans(bus), CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
}
......@@ -332,7 +333,7 @@ static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev)
nvm_type = NVM_DEVICE_TYPE_EEPROM;
break;
default:
otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
otpgp = iwl_read32(trans(bus), CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
nvm_type = NVM_DEVICE_TYPE_OTP;
else
......@@ -347,22 +348,22 @@ static int iwl_init_otp_access(struct iwl_bus *bus)
int ret;
/* Enable 40MHz radio clock */
iwl_write32(bus, CSR_GP_CNTRL,
iwl_read32(bus, CSR_GP_CNTRL) |
iwl_write32(trans(bus), CSR_GP_CNTRL,
iwl_read32(trans(bus), CSR_GP_CNTRL) |
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* wait for clock to be ready */
ret = iwl_poll_bit(bus, CSR_GP_CNTRL,
ret = iwl_poll_bit(trans(bus), CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
25000);
if (ret < 0)
IWL_ERR(bus, "Time out access OTP\n");
else {
iwl_set_bits_prph(bus, APMG_PS_CTRL_REG,
iwl_set_bits_prph(trans(bus), APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
udelay(5);
iwl_clear_bits_prph(bus, APMG_PS_CTRL_REG,
iwl_clear_bits_prph(trans(bus), APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
/*
......@@ -370,7 +371,7 @@ static int iwl_init_otp_access(struct iwl_bus *bus)
* this is only applicable for HW with OTP shadow RAM
*/
if (cfg(bus)->base_params->shadow_ram_support)
iwl_set_bit(bus, CSR_DBG_LINK_PWR_MGMT_REG,
iwl_set_bit(trans(bus), CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
}
return ret;
......@@ -382,9 +383,9 @@ static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data)
u32 r;
u32 otpgp;
iwl_write32(bus, CSR_EEPROM_REG,
iwl_write32(trans(bus), CSR_EEPROM_REG,
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
ret = iwl_poll_bit(bus, CSR_EEPROM_REG,
ret = iwl_poll_bit(trans(bus), CSR_EEPROM_REG,
CSR_EEPROM_REG_READ_VALID_MSK,
CSR_EEPROM_REG_READ_VALID_MSK,
IWL_EEPROM_ACCESS_TIMEOUT);
......@@ -392,13 +393,13 @@ static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data)
IWL_ERR(bus, "Time out reading OTP[%d]\n", addr);
return ret;
}
r = iwl_read32(bus, CSR_EEPROM_REG);
r = iwl_read32(trans(bus), CSR_EEPROM_REG);
/* check for ECC errors: */
otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
otpgp = iwl_read32(trans(bus), CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
/* stop in this case */
/* set the uncorrectable OTP ECC bit for acknowledgement */
iwl_set_bit(bus, CSR_OTP_GP_REG,
iwl_set_bit(trans(bus), CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
IWL_ERR(bus, "Uncorrectable OTP ECC error, abort OTP read\n");
return -EINVAL;
......@@ -406,7 +407,7 @@ static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data)
if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
/* continue in this case */
/* set the correctable OTP ECC bit for acknowledgement */
iwl_set_bit(bus, CSR_OTP_GP_REG,
iwl_set_bit(trans(bus), CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
IWL_ERR(bus, "Correctable OTP ECC error, continue read\n");
}
......@@ -656,7 +657,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
{
struct iwl_shared *shrd = priv->shrd;
__le16 *e;
u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP);
u32 gp = iwl_read32(trans(priv), CSR_EEPROM_GP);
int sz;
int ret;
u16 addr;
......@@ -676,8 +677,6 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
}
e = (__le16 *)shrd->eeprom;
iwl_apm_init(priv);
ret = iwl_eeprom_verify_signature(trans(priv));
if (ret < 0) {
IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
......@@ -701,11 +700,11 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
ret = -ENOENT;
goto done;
}
iwl_write32(bus(priv), CSR_EEPROM_GP,
iwl_read32(bus(priv), CSR_EEPROM_GP) &
iwl_write32(trans(priv), CSR_EEPROM_GP,
iwl_read32(trans(priv), CSR_EEPROM_GP) &
~CSR_EEPROM_GP_IF_OWNER_MSK);
iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
iwl_set_bit(trans(priv), CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
/* traversing the linked list if no shadow ram supported */
......@@ -730,10 +729,10 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
for (addr = 0; addr < sz; addr += sizeof(u16)) {
u32 r;
iwl_write32(bus(priv), CSR_EEPROM_REG,
iwl_write32(trans(priv), CSR_EEPROM_REG,
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
ret = iwl_poll_bit(trans(priv), CSR_EEPROM_REG,
CSR_EEPROM_REG_READ_VALID_MSK,
CSR_EEPROM_REG_READ_VALID_MSK,
IWL_EEPROM_ACCESS_TIMEOUT);
......@@ -741,7 +740,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
goto done;
}
r = iwl_read32(bus(priv), CSR_EEPROM_REG);
r = iwl_read32(trans(priv), CSR_EEPROM_REG);
e[addr / 2] = cpu_to_le16(r >> 16);
}
}
......@@ -758,8 +757,6 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
err:
if (ret)
iwl_eeprom_free(priv->shrd);
/* Reset chip to save power until we load uCode during "up". */
iwl_apm_stop(priv);
alloc_err:
return ret;
}
......@@ -1072,7 +1069,7 @@ void iwl_rf_config(struct iwl_priv *priv)
/* write radio config values to register */
if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG,
EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
EEPROM_RF_CFG_DASH_MSK(radio_cfg));
......@@ -1084,7 +1081,7 @@ void iwl_rf_config(struct iwl_priv *priv)
WARN_ON(1);
/* set CSR_HW_CONFIG_REG for uCode use */
iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
}
This diff is collapsed.
......@@ -31,63 +31,63 @@
#include "iwl-devtrace.h"
#include "iwl-shared.h"
#include "iwl-bus.h"
#include "iwl-trans.h"
static inline void iwl_write8(struct iwl_bus *bus, u32 ofs, u8 val)
static inline void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
trace_iwlwifi_dev_iowrite8(priv(bus), ofs, val);
bus_write8(bus, ofs, val);
trace_iwlwifi_dev_iowrite8(priv(trans), ofs, val);
iwl_trans_write8(trans, ofs, val);
}
static inline void iwl_write32(struct iwl_bus *bus, u32 ofs, u32 val)
static inline void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
trace_iwlwifi_dev_iowrite32(priv(bus), ofs, val);
bus_write32(bus, ofs, val);
trace_iwlwifi_dev_iowrite32(priv(trans), ofs, val);
iwl_trans_write32(trans, ofs, val);
}
static inline u32 iwl_read32(struct iwl_bus *bus, u32 ofs)
static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
{
u32 val = bus_read32(bus, ofs);
trace_iwlwifi_dev_ioread32(priv(bus), ofs, val);
u32 val = iwl_trans_read32(trans, ofs);
trace_iwlwifi_dev_ioread32(priv(trans), ofs, val);
return val;
}
void iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask);
void iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask);
void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
int iwl_poll_bit(struct iwl_bus *bus, u32 addr,
int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);
int iwl_poll_direct_bit(struct iwl_bus *bus, u32 addr, u32 mask,
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
int timeout);
int iwl_grab_nic_access_silent(struct iwl_bus *bus);
int iwl_grab_nic_access(struct iwl_bus *bus);
void iwl_release_nic_access(struct iwl_bus *bus);
int iwl_grab_nic_access_silent(struct iwl_trans *trans);
int iwl_grab_nic_access(struct iwl_trans *trans);
void iwl_release_nic_access(struct iwl_trans *trans);
u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg);
void iwl_write_direct32(struct iwl_bus *bus, u32 reg, u32 value);
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
u32 iwl_read_prph(struct iwl_bus *bus, u32 reg);
void iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val);
void iwl_set_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask);
void iwl_set_bits_mask_prph(struct iwl_bus *bus, u32 reg,
u32 iwl_read_prph(struct iwl_trans *trans, u32 reg);
void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val);
void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask);
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
u32 bits, u32 mask);
void iwl_clear_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask);
void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
void *buf, int words);
#define iwl_read_targ_mem_words(bus, addr, buf, bufsize) \
#define iwl_read_targ_mem_words(trans, addr, buf, bufsize) \
do { \
BUILD_BUG_ON((bufsize) % sizeof(u32)); \
_iwl_read_targ_mem_words(bus, addr, buf, \
_iwl_read_targ_mem_words(trans, addr, buf, \
(bufsize) / sizeof(u32));\
} while (0)
int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr,
int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
void *buf, int words);
u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr);
int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val);
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
#endif
......@@ -71,7 +71,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = {
/* Set led register off */
void iwlagn_led_enable(struct iwl_priv *priv)
{
iwl_write32(bus(priv), CSR_LED_REG, CSR_LED_REG_TRUN_ON);
iwl_write32(trans(priv), CSR_LED_REG, CSR_LED_REG_TRUN_ON);
}
/*
......@@ -107,9 +107,10 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
};
u32 reg;
reg = iwl_read32(bus(priv), CSR_LED_REG);
reg = iwl_read32(trans(priv), CSR_LED_REG);
if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
iwl_write32(bus(priv), CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
iwl_write32(trans(priv), CSR_LED_REG,
reg & CSR_LED_BSM_CTRL_MSK);
return iwl_trans_send_cmd(trans(priv), &cmd);
}
......@@ -206,7 +207,7 @@ void iwl_leds_init(struct iwl_priv *priv)
break;
}
ret = led_classdev_register(bus(priv)->dev, &priv->led);
ret = led_classdev_register(trans(priv)->dev, &priv->led);
if (ret) {
kfree(priv->led.name);
return;
......
......@@ -35,7 +35,6 @@
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
......@@ -43,6 +42,7 @@
#include <asm/div64.h>
#include "iwl-ucode.h"
#include "iwl-eeprom.h"
#include "iwl-wifi.h"
#include "iwl-dev.h"
......@@ -196,7 +196,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
WIPHY_FLAG_IBSS_RSN;
if (trans(priv)->ucode_wowlan.code.len &&
device_can_wakeup(bus(priv)->dev)) {
device_can_wakeup(trans(priv)->dev)) {
hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_EAP_IDENTITY_REQ |
......@@ -234,7 +234,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&priv->bands[IEEE80211_BAND_5GHZ];
hw->wiphy->hw_version = bus_get_hw_id(bus(priv));
hw->wiphy->hw_version = trans(priv)->hw_id;
iwl_leds_init(priv);
......@@ -346,9 +346,10 @@ static void iwlagn_mac_stop(struct ieee80211_hw *hw)
flush_workqueue(priv->shrd->workqueue);
/* User space software may expect getting rfkill changes
* even if interface is down */
iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
iwl_enable_rfkill_int(priv);
* even if interface is down, trans->down will leave the RF
* kill interrupt enabled
*/
iwl_trans_stop_hw(trans(priv));
IWL_DEBUG_MAC80211(priv, "leave\n");
}
......@@ -405,10 +406,10 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
if (ret)
goto error;
device_set_wakeup_enable(bus(priv)->dev, true);
device_set_wakeup_enable(trans(priv)->dev, true);
/* Now let the ucode operate on its own */
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
goto out;
......@@ -436,19 +437,19 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->shrd->mutex);
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
base = priv->shrd->device_pointers.error_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
spin_lock_irqsave(&bus(priv)->reg_lock, flags);
ret = iwl_grab_nic_access_silent(bus(priv));
spin_lock_irqsave(&trans(priv)->reg_lock, flags);
ret = iwl_grab_nic_access_silent(trans(priv));
if (ret == 0) {
iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
iwl_release_nic_access(bus(priv));
iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, base);
status = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
iwl_release_nic_access(trans(priv));
}
spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
spin_unlock_irqrestore(&trans(priv)->reg_lock, flags);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (ret == 0) {
......@@ -460,7 +461,8 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
if (priv->wowlan_sram)
_iwl_read_targ_mem_words(
bus(priv), 0x800000, priv->wowlan_sram,
trans(priv), 0x800000,
priv->wowlan_sram,
trans->ucode_wowlan.data.len / 4);
}
#endif
......@@ -471,7 +473,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
priv->shrd->wowlan = false;
device_set_wakeup_enable(bus(priv)->dev, false);
device_set_wakeup_enable(trans(priv)->dev, false);
iwlagn_prepare_restart(priv);
......
......@@ -71,112 +71,6 @@
#include "iwl-csr.h"
#include "iwl-cfg.h"
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
struct iwl_pci_bus {
/* basic pci-network driver stuff */
struct pci_dev *pci_dev;
/* pci hardware address support */
void __iomem *hw_base;
};
#define IWL_BUS_GET_PCI_BUS(_iwl_bus) \
((struct iwl_pci_bus *) ((_iwl_bus)->bus_specific))
#define IWL_BUS_GET_PCI_DEV(_iwl_bus) \
((IWL_BUS_GET_PCI_BUS(_iwl_bus))->pci_dev)
static u16 iwl_pciexp_link_ctrl(struct iwl_bus *bus)
{
int pos;
u16 pci_lnk_ctl;
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
pos = pci_pcie_cap(pci_dev);
pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
return pci_lnk_ctl;
}
static bool iwl_pci_is_pm_supported(struct iwl_bus *bus)
{
u16 lctl = iwl_pciexp_link_ctrl(bus);
return !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
}
static void iwl_pci_apm_config(struct iwl_bus *bus)
{
/*
* HW bug W/A for instability in PCIe bus L0S->L1 transition.
* Check if BIOS (or OS) enabled L1-ASPM on this device.
* If so (likely), disable L0S, so device moves directly L0->L1;
* costs negligible amount of power savings.
* If not (unlikely), enable L0S, so there is at least some
* power savings, even without L1.
*/
u16 lctl = iwl_pciexp_link_ctrl(bus);
if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
PCI_CFG_LINK_CTRL_VAL_L1_EN) {
/* L1-ASPM enabled; disable(!) L0S */
iwl_set_bit(bus, CSR_GIO_REG,
CSR_GIO_REG_VAL_L0S_ENABLED);
dev_printk(KERN_INFO, bus->dev, "L1 Enabled; Disabling L0S\n");
} else {
/* L1-ASPM disabled; enable(!) L0S */
iwl_clear_bit(bus, CSR_GIO_REG,
CSR_GIO_REG_VAL_L0S_ENABLED);
dev_printk(KERN_INFO, bus->dev, "L1 Disabled; Enabling L0S\n");
}
}
static void iwl_pci_get_hw_id_string(struct iwl_bus *bus, char buf[],
int buf_len)
{
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
snprintf(buf, buf_len, "PCI ID: 0x%04X:0x%04X", pci_dev->device,
pci_dev->subsystem_device);
}
static u32 iwl_pci_get_hw_id(struct iwl_bus *bus)
{
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
return (pci_dev->device << 16) + pci_dev->subsystem_device;
}
static void iwl_pci_write8(struct iwl_bus *bus, u32 ofs, u8 val)
{
iowrite8(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
}
static void iwl_pci_write32(struct iwl_bus *bus, u32 ofs, u32 val)
{
iowrite32(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
}
static u32 iwl_pci_read32(struct iwl_bus *bus, u32 ofs)
{
u32 val = ioread32(IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
return val;
}
static const struct iwl_bus_ops bus_ops_pci = {
.get_pm_support = iwl_pci_is_pm_supported,
.apm_config = iwl_pci_apm_config,
.get_hw_id_string = iwl_pci_get_hw_id_string,
.get_hw_id = iwl_pci_get_hw_id,
.write8 = iwl_pci_write8,
.write32 = iwl_pci_write32,
.read32 = iwl_pci_read32,
};
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
.vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = (subdev), \
......@@ -362,112 +256,61 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
struct iwl_bus *bus;
struct iwl_pci_bus *pci_bus;
u16 pci_cmd;
int err;
bus = kzalloc(sizeof(*bus) + sizeof(*pci_bus), GFP_KERNEL);
bus = kzalloc(sizeof(*bus), GFP_KERNEL);
if (!bus) {
dev_printk(KERN_ERR, &pdev->dev,
"Couldn't allocate iwl_pci_bus");
err = -ENOMEM;
goto out_no_pci;
return -ENOMEM;
}
pci_bus = IWL_BUS_GET_PCI_BUS(bus);
pci_bus->pci_dev = pdev;
pci_set_drvdata(pdev, bus);
/* W/A - seems to solve weird behavior. We need to remove this if we
* don't want to stay in L1 all the time. This wastes a lot of power */
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
PCIE_LINK_STATE_CLKPM);
if (pci_enable_device(pdev)) {
err = -ENODEV;
goto out_no_pci;
bus->shrd = kzalloc(sizeof(*bus->shrd), GFP_KERNEL);
if (!bus->shrd) {
dev_printk(KERN_ERR, &pdev->dev,
"Couldn't allocate iwl_shared");
err = -ENOMEM;
goto out_free_bus;
}
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
/* both attempts failed: */
if (err) {
dev_printk(KERN_ERR, bus->dev,
"No suitable DMA available.\n");
goto out_pci_disable_device;
}
}
bus->shrd->bus = bus;
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_printk(KERN_ERR, bus->dev, "pci_request_regions failed");
goto out_pci_disable_device;
}
pci_set_drvdata(pdev, bus);
pci_bus->hw_base = pci_iomap(pdev, 0, 0);
if (!pci_bus->hw_base) {
dev_printk(KERN_ERR, bus->dev, "pci_iomap failed");
err = -ENODEV;
goto out_pci_release_regions;
#ifdef CONFIG_IWLWIFI_IDI
trans(bus) = iwl_trans_idi_alloc(bus->shrd, pdev, ent);
if (trans(bus) == NULL) {
err = -ENOMEM;
goto out_free_bus;
}
dev_printk(KERN_INFO, &pdev->dev,
"pci_resource_len = 0x%08llx\n",
(unsigned long long) pci_resource_len(pdev, 0));
dev_printk(KERN_INFO, &pdev->dev,
"pci_resource_base = %p\n", pci_bus->hw_base);
dev_printk(KERN_INFO, &pdev->dev,
"HW Revision ID = 0x%X\n", pdev->revision);
/* We disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
err = pci_enable_msi(pdev);
if (err)
dev_printk(KERN_ERR, &pdev->dev,
"pci_enable_msi failed(0X%x)", err);
/* TODO: Move this away, not needed if not MSI */
/* enable rfkill interrupt: hw bug w/a */
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
err = iwl_probe(bus, &trans_ops_idi, cfg);
#else
trans(bus) = iwl_trans_pcie_alloc(bus->shrd, pdev, ent);
if (trans(bus) == NULL) {
err = -ENOMEM;
goto out_free_bus;
}
bus->dev = &pdev->dev;
bus->irq = pdev->irq;
bus->ops = &bus_ops_pci;
err = iwl_probe(bus, &trans_ops_pcie, cfg);
#endif
if (err)
goto out_disable_msi;
goto out_free_trans;
return 0;
out_disable_msi:
pci_disable_msi(pdev);
pci_iounmap(pdev, pci_bus->hw_base);
out_pci_release_regions:
out_free_trans:
iwl_trans_free(trans(bus));
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
out_no_pci:
out_free_bus:
kfree(bus->shrd);
kfree(bus);
return err;
}
......@@ -475,18 +318,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static void __devexit iwl_pci_remove(struct pci_dev *pdev)
{
struct iwl_bus *bus = pci_get_drvdata(pdev);
struct iwl_pci_bus *pci_bus = IWL_BUS_GET_PCI_BUS(bus);
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
struct iwl_shared *shrd = bus->shrd;
iwl_remove(shrd->priv);
iwl_trans_free(shrd->trans);
pci_disable_msi(pci_dev);
pci_iounmap(pci_dev, pci_bus->hw_base);
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
pci_set_drvdata(pci_dev, NULL);
pci_set_drvdata(pdev, NULL);
kfree(bus->shrd);
kfree(bus);
}
......
......@@ -436,7 +436,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
/* initialize to default */
void iwl_power_initialize(struct iwl_priv *priv)
{
priv->power_data.bus_pm = bus_get_pm_support(bus(priv));
priv->power_data.bus_pm = trans(priv)->pm_support;
priv->power_data.debug_sleep_level_override = -1;
......
......@@ -543,8 +543,6 @@ int iwlagn_hw_valid_rtc_data_addr(u32 addr);
void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state);
void iwl_nic_config(struct iwl_priv *priv);
void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb);
void iwl_apm_stop(struct iwl_priv *priv);
int iwl_apm_init(struct iwl_priv *priv);
void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand);
const char *get_cmd_string(u8 cmd);
bool iwl_check_for_ct_kill(struct iwl_priv *priv);
......
......@@ -79,6 +79,7 @@
#include "iwl-testmode.h"
#include "iwl-trans.h"
#include "iwl-bus.h"
#include "iwl-fh.h"
/* The TLVs used in the gnl message policy between the kernel module and
* user space application. iwl_testmode_gnl_msg_policy is to be carried
......@@ -208,7 +209,7 @@ static void iwl_trace_cleanup(struct iwl_priv *priv)
if (priv->testmode_trace.trace_enabled) {
if (priv->testmode_trace.cpu_addr &&
priv->testmode_trace.dma_addr)
dma_free_coherent(bus(priv)->dev,
dma_free_coherent(trans(priv)->dev,
priv->testmode_trace.total_size,
priv->testmode_trace.cpu_addr,
priv->testmode_trace.dma_addr);
......@@ -288,7 +289,7 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
{
struct iwl_priv *priv = hw->priv;
u32 ofs, val32;
u32 ofs, val32, cmd;
u8 val8;
struct sk_buff *skb;
int status = 0;
......@@ -300,9 +301,22 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
/* Allow access only to FH/CSR/HBUS in direct mode.
Since we don't have the upper bounds for the CSR and HBUS segments,
we will use only the upper bound of FH for sanity check. */
cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
if ((cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 ||
cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 ||
cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8) &&
(ofs >= FH_MEM_UPPER_BOUND)) {
IWL_DEBUG_INFO(priv, "offset out of segment (0x0 - 0x%x)\n",
FH_MEM_UPPER_BOUND);
return -EINVAL;
}
switch (cmd) {
case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
val32 = iwl_read_direct32(bus(priv), ofs);
val32 = iwl_read_direct32(trans(priv), ofs);
IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
......@@ -324,7 +338,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
} else {
val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
iwl_write_direct32(bus(priv), ofs, val32);
iwl_write_direct32(trans(priv), ofs, val32);
}
break;
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
......@@ -334,11 +348,11 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
} else {
val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
iwl_write8(bus(priv), ofs, val8);
iwl_write8(trans(priv), ofs, val8);
}
break;
case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
val32 = iwl_read_prph(bus(priv), ofs);
val32 = iwl_read_prph(trans(priv), ofs);
IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
......@@ -360,7 +374,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
} else {
val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
iwl_write_prph(bus(priv), ofs, val32);
iwl_write_prph(trans(priv), ofs, val32);
}
break;
default:
......@@ -536,7 +550,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
break;
case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
devid = bus_get_hw_id(bus(priv));
devid = trans(priv)->hw_id;
IWL_INFO(priv, "hw version: 0x%x\n", devid);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
......@@ -615,7 +629,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
struct iwl_priv *priv = hw->priv;
struct sk_buff *skb;
int status = 0;
struct device *dev = bus(priv)->dev;
struct device *dev = trans(priv)->dev;
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
......@@ -814,7 +828,7 @@ static int iwl_testmode_sram(struct ieee80211_hw *hw, struct nlattr **tb)
IWL_ERR(priv, "Error allocating memory\n");
return -ENOMEM;
}
_iwl_read_targ_mem_words(bus(priv), ofs,
_iwl_read_targ_mem_words(trans(priv), ofs,
priv->testmode_sram.buff_addr,
priv->testmode_sram.buff_size / 4);
priv->testmode_sram.num_chunks =
......
......@@ -201,6 +201,7 @@ struct iwl_tx_queue {
* @rxq: all the RX queue data
* @rx_replenish: work that will be called when buffers need to be allocated
* @trans: pointer to the generic transport area
* @irq_requested: true when the irq has been requested
* @scd_base_addr: scheduler sram base address in SRAM
* @scd_bc_tbls: pointer to the byte count table of the scheduler
* @kw: keep warm address
......@@ -211,6 +212,8 @@ struct iwl_tx_queue {
* @txq_ctx_active_msk: what queue is active
* queue_stopped: tracks what queue is stopped
* queue_stop_count: tracks what SW queue is stopped
* @pci_dev: basic pci-network driver stuff
* @hw_base: pci hardware address support
*/
struct iwl_trans_pcie {
struct iwl_rx_queue rxq;
......@@ -223,6 +226,7 @@ struct iwl_trans_pcie {
int ict_index;
u32 inta;
bool use_ict;
bool irq_requested;
struct tasklet_struct irq_tasklet;
struct isr_statistics isr_stats;
......@@ -241,6 +245,10 @@ struct iwl_trans_pcie {
#define IWL_MAX_HW_QUEUES 32
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
atomic_t queue_stop_count[4];
/* PCI bus related data */
struct pci_dev *pci_dev;
void __iomem *hw_base;
};
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
......@@ -258,7 +266,7 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
/*****************************************************
* ICT
******************************************************/
int iwl_reset_ict(struct iwl_trans *trans);
void iwl_reset_ict(struct iwl_trans *trans);
void iwl_disable_ict(struct iwl_trans *trans);
int iwl_alloc_isr_ict(struct iwl_trans *trans);
void iwl_free_isr_ict(struct iwl_trans *trans);
......@@ -311,12 +319,12 @@ static inline void iwl_disable_interrupts(struct iwl_trans *trans)
clear_bit(STATUS_INT_ENABLED, &trans->shrd->status);
/* disable interrupts from uCode/NIC to host */
iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
/* acknowledge/clear/reset any interrupts still pending
* from uCode or flow handler (Rx/Tx DMA) */
iwl_write32(bus(trans), CSR_INT, 0xffffffff);
iwl_write32(bus(trans), CSR_FH_INT_STATUS, 0xffffffff);
iwl_write32(trans, CSR_INT, 0xffffffff);
iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
......@@ -327,7 +335,7 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
set_bit(STATUS_INT_ENABLED, &trans->shrd->status);
iwl_write32(bus(trans), CSR_INT_MASK, trans_pcie->inta_mask);
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
}
/*
......
......@@ -100,7 +100,7 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
if (hw_params(trans).shadow_reg_enable) {
/* shadow register enabled */
iwl_write32(bus(trans), HBUS_TARG_WRPTR,
iwl_write32(trans, HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
} else {
/* if we're trying to save power */
......@@ -108,18 +108,18 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
/* wake up nic if it's powered down ...
* uCode will wake up, and interrupt us again, so next
* time we'll skip this part. */
reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
IWL_DEBUG_INFO(trans,
"Tx queue %d requesting wakeup,"
" GP1 = 0x%x\n", txq_id, reg);
iwl_set_bit(bus(trans), CSR_GP_CNTRL,
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
return;
}
iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
/*
......@@ -128,7 +128,7 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
* trying to tx (during RFKILL, we're not trying to tx).
*/
} else
iwl_write32(bus(trans), HBUS_TARG_WRPTR,
iwl_write32(trans, HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
}
txq->need_update = 0;
......@@ -190,14 +190,14 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
/* Unmap tx_cmd */
if (num_tbs)
dma_unmap_single(bus(trans)->dev,
dma_unmap_single(trans->dev,
dma_unmap_addr(meta, mapping),
dma_unmap_len(meta, len),
DMA_BIDIRECTIONAL);
/* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++)
dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
iwl_tfd_tb_get_len(tfd, i), dma_dir);
}
......@@ -383,14 +383,14 @@ static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
tbl_dw_addr = trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
if (txq_id & 0x1)
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
else
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
return 0;
}
......@@ -399,7 +399,7 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
{
/* Simply stop the queue, but don't change any configuration;
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
iwl_write_prph(bus(trans),
iwl_write_prph(trans,
SCD_QUEUE_STATUS_BITS(txq_id),
(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
......@@ -409,9 +409,9 @@ void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
int txq_id, u32 index)
{
IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff);
iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
(index & 0xff) | (txq_id << 8));
iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
}
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
......@@ -423,7 +423,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
int active =
test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
......@@ -431,9 +431,12 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
txq->sched_retry = scd_retry;
IWL_DEBUG_TX_QUEUES(trans, "%s %s Queue %d on FIFO %d\n",
active ? "Activate" : "Deactivate",
scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
if (active)
IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n",
scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
else
IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n",
scd_retry ? "BA" : "AC/CMD", txq_id);
}
static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
......@@ -498,10 +501,10 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
/* Set this queue as a chain-building queue */
iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id));
/* enable aggregations for the queue */
iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id));
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
......@@ -510,7 +513,7 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
/* Set up Tx window size and frame limit for this queue */
iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
sizeof(u32),
((frame_limit <<
......@@ -520,7 +523,7 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
......@@ -584,7 +587,7 @@ int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
iwlagn_tx_queue_stop_scheduler(trans, txq_id);
iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id));
trans_pcie->agg_txq[sta_id][tid] = 0;
trans_pcie->txq[txq_id].q.read_ptr = 0;
......@@ -592,7 +595,7 @@ int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
/* supposes that ssn_idx is valid (!= 0xFFF) */
iwl_trans_set_wr_ptrs(trans, txq_id, 0);
iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
iwl_txq_ctx_deactivate(trans_pcie, txq_id);
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
return 0;
......@@ -725,9 +728,9 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
q->write_ptr, idx, trans->shrd->cmd_queue);
phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
idx = -ENOMEM;
goto out;
}
......@@ -748,10 +751,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
continue;
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
continue;
phys_addr = dma_map_single(bus(trans)->dev,
phys_addr = dma_map_single(trans->dev,
(void *)cmd->data[i],
cmd->len[i], DMA_BIDIRECTIONAL);
if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
if (dma_mapping_error(trans->dev, phys_addr)) {
iwlagn_unmap_tfd(trans, out_meta,
&txq->tfds[q->write_ptr],
DMA_BIDIRECTIONAL);
......
......@@ -131,16 +131,26 @@ struct iwl_host_cmd {
u8 id;
};
/* one for each uCode image (inst/data, boot/init/runtime) */
struct fw_desc {
dma_addr_t p_addr; /* hardware address */
void *v_addr; /* software address */
u32 len; /* size in bytes */
};
struct fw_img {
struct fw_desc code; /* firmware code image */
struct fw_desc data; /* firmware data image */
};
/**
* struct iwl_trans_ops - transport specific operations
* @alloc: allocates the meta data (not the queues themselves)
* @request_irq: requests IRQ - will be called before the FW load in probe flow
* @start_device: allocates and inits all the resources for the transport
* layer.
* @prepare_card_hw: claim the ownership on the HW. Will be called during
* probe.
* @tx_start: starts and configures all the Tx fifo - usually done once the fw
* is alive.
* @start_hw: starts the HW- from that point on, the HW can send interrupts
* @stop_hw: stops the HW- from that point on, the HW will be in low power but
* will still issue interrupt if the HW RF kill is triggered.
* @start_fw: allocates and inits all the resources for the transport
* layer. Also kick a fw image. This handler may sleep.
* @fw_alive: called when the fw sends alive notification
* @wake_any_queue: wake all the queues of a specfic context IWL_RXON_CTX_*
* @stop_device:stops the whole device (embedded CPU put to reset)
* @send_cmd:send a host command
......@@ -150,7 +160,6 @@ struct iwl_host_cmd {
* @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
* ready and a successful ADDBA response has been received.
* @tx_agg_disable: de-configure a Tx queue to send AMPDUs
* @kick_nic: remove the RESET from the embedded CPU and let it run
* @free: release all the ressource for the transport layer itself such as
* irq, tasklet etc...
* @stop_queue: stop a specific queue
......@@ -160,15 +169,17 @@ struct iwl_host_cmd {
* automatically deleted.
* @suspend: stop the device unless WoWLAN is configured
* @resume: resume activity of the device
* @write8: write a u8 to a register at offset ofs from the BAR
* @write32: write a u32 to a register at offset ofs from the BAR
* @read32: read a u32 register at offset ofs from the BAR
*/
struct iwl_trans_ops {
struct iwl_trans *(*alloc)(struct iwl_shared *shrd);
int (*request_irq)(struct iwl_trans *iwl_trans);
int (*start_device)(struct iwl_trans *trans);
int (*prepare_card_hw)(struct iwl_trans *trans);
int (*start_hw)(struct iwl_trans *iwl_trans);
void (*stop_hw)(struct iwl_trans *iwl_trans);
int (*start_fw)(struct iwl_trans *trans, struct fw_img *fw);
void (*fw_alive)(struct iwl_trans *trans);
void (*stop_device)(struct iwl_trans *trans);
void (*tx_start)(struct iwl_trans *trans);
void (*wake_any_queue)(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx,
......@@ -191,8 +202,6 @@ struct iwl_trans_ops {
enum iwl_rxon_context_id ctx, int sta_id, int tid,
int frame_limit, u16 ssn);
void (*kick_nic)(struct iwl_trans *trans);
void (*free)(struct iwl_trans *trans);
void (*stop_queue)(struct iwl_trans *trans, int q, const char *msg);
......@@ -204,18 +213,9 @@ struct iwl_trans_ops {
int (*suspend)(struct iwl_trans *trans);
int (*resume)(struct iwl_trans *trans);
#endif
};
/* one for each uCode image (inst/data, boot/init/runtime) */
struct fw_desc {
dma_addr_t p_addr; /* hardware address */
void *v_addr; /* software address */
u32 len; /* size in bytes */
};
struct fw_img {
struct fw_desc code; /* firmware code image */
struct fw_desc data; /* firmware data image */
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
};
/* Opaque calibration results */
......@@ -231,17 +231,31 @@ struct iwl_calib_result {
* @ops - pointer to iwl_trans_ops
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
* @hcmd_lock: protects HCMD
* @reg_lock - protect hw register access
* @dev - pointer to struct device * that represents the device
* @irq - the irq number for the device
* @hw_id: a u32 with the ID of the device / subdevice.
* Set during transport alloaction.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
* @ucode_write_complete: indicates that the ucode has been copied.
* @ucode_rt: run time ucode image
* @ucode_init: init ucode image
* @ucode_wowlan: wake on wireless ucode image (optional)
* @nvm_device_type: indicates OTP or eeprom
* @pm_support: set to true in start_hw if link pm is supported
* @calib_results: list head for init calibration results
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
struct iwl_shared *shrd;
spinlock_t hcmd_lock;
spinlock_t reg_lock;
struct device *dev;
unsigned int irq;
u32 hw_rev;
u32 hw_id;
char hw_id_str[52];
u8 ucode_write_complete; /* the image write is complete */
struct fw_img ucode_rt;
......@@ -250,6 +264,7 @@ struct iwl_trans {
/* eeprom related variables */
int nvm_device_type;
bool pm_support;
/* init calibration results */
struct list_head calib_results;
......@@ -259,29 +274,31 @@ struct iwl_trans {
char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
};
static inline int iwl_trans_request_irq(struct iwl_trans *trans)
static inline int iwl_trans_start_hw(struct iwl_trans *trans)
{
return trans->ops->request_irq(trans);
return trans->ops->start_hw(trans);
}
static inline int iwl_trans_start_device(struct iwl_trans *trans)
static inline void iwl_trans_stop_hw(struct iwl_trans *trans)
{
return trans->ops->start_device(trans);
trans->ops->stop_hw(trans);
}
static inline int iwl_trans_prepare_card_hw(struct iwl_trans *trans)
static inline void iwl_trans_fw_alive(struct iwl_trans *trans)
{
return trans->ops->prepare_card_hw(trans);
trans->ops->fw_alive(trans);
}
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
static inline int iwl_trans_start_fw(struct iwl_trans *trans, struct fw_img *fw)
{
trans->ops->stop_device(trans);
might_sleep();
return trans->ops->start_fw(trans, fw);
}
static inline void iwl_trans_tx_start(struct iwl_trans *trans)
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
{
trans->ops->tx_start(trans);
trans->ops->stop_device(trans);
}
static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans,
......@@ -337,11 +354,6 @@ static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn);
}
static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
{
trans->ops->kick_nic(trans);
}
static inline void iwl_trans_free(struct iwl_trans *trans)
{
trans->ops->free(trans);
......@@ -380,13 +392,24 @@ static inline int iwl_trans_resume(struct iwl_trans *trans)
}
#endif
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
trans->ops->write8(trans, ofs, val);
}
static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
trans->ops->write32(trans, ofs, val);
}
static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
{
return trans->ops->read32(trans, ofs);
}
/*****************************************************
* Transport layers implementations
* Utils functions
******************************************************/
extern const struct iwl_trans_ops trans_ops_pcie;
int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
const void *data, size_t len);
void iwl_dealloc_ucode(struct iwl_trans *trans);
int iwl_send_calib_results(struct iwl_trans *trans);
......@@ -394,4 +417,18 @@ int iwl_calib_set(struct iwl_trans *trans,
const struct iwl_calib_hdr *cmd, int len);
void iwl_calib_free_results(struct iwl_trans *trans);
/*****************************************************
* Transport layers implementations + their allocation function
******************************************************/
struct pci_dev;
struct pci_device_id;
extern const struct iwl_trans_ops trans_ops_pcie;
struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
struct pci_dev *pdev,
const struct pci_device_id *ent);
extern const struct iwl_trans_ops trans_ops_idi;
struct iwl_trans *iwl_trans_idi_alloc(struct iwl_shared *shrd,
void *pdev_void,
const void *ent_void);
#endif /* __iwl_trans_h__ */
This diff is collapsed.
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef __iwl_ucode_h__
#define __iwl_ucode_h__
/* v1/v2 uCode file layout */
struct iwl_ucode_header {
__le32 ver; /* major/minor/API/serial */
union {
struct {
__le32 inst_size; /* bytes of runtime code */
__le32 data_size; /* bytes of runtime data */
__le32 init_size; /* bytes of init code */
__le32 init_data_size; /* bytes of init data */
__le32 boot_size; /* bytes of bootstrap code */
u8 data[0]; /* in same order as sizes */
} v1;
struct {
__le32 build; /* build number */
__le32 inst_size; /* bytes of runtime code */
__le32 data_size; /* bytes of runtime data */
__le32 init_size; /* bytes of init code */
__le32 init_data_size; /* bytes of init data */
__le32 boot_size; /* bytes of bootstrap code */
u8 data[0]; /* in same order as sizes */
} v2;
} u;
};
/*
* new TLV uCode file layout
*
* The new TLV file format contains TLVs, that each specify
* some piece of data. To facilitate "groups", for example
* different instruction image with different capabilities,
* bundled with the same init image, an alternative mechanism
* is provided:
* When the alternative field is 0, that means that the item
* is always valid. When it is non-zero, then it is only
* valid in conjunction with items of the same alternative,
* in which case the driver (user) selects one alternative
* to use.
*/
enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_INVALID = 0, /* unused */
IWL_UCODE_TLV_INST = 1,
IWL_UCODE_TLV_DATA = 2,
IWL_UCODE_TLV_INIT = 3,
IWL_UCODE_TLV_INIT_DATA = 4,
IWL_UCODE_TLV_BOOT = 5,
IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
IWL_UCODE_TLV_PAN = 7,
IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11,
IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
IWL_UCODE_TLV_WOWLAN_INST = 16,
IWL_UCODE_TLV_WOWLAN_DATA = 17,
IWL_UCODE_TLV_FLAGS = 18,
};
/**
* enum iwl_ucode_tlv_flag - ucode API flags
* @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
* was a separate TLV but moved here to save space.
* @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
* treats good CRC threshold as a boolean
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
};
struct iwl_ucode_tlv {
__le16 type; /* see above */
__le16 alternative; /* see comment */
__le32 length; /* not including type/length fields */
u8 data[0];
};
#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
struct iwl_tlv_ucode_header {
/*
* The TLV style ucode header is distinguished from
* the v1/v2 style header by first four bytes being
* zero, as such is an invalid combination of
* major/minor/API/serial versions.
*/
__le32 zero;
__le32 magic;
u8 human_readable[64];
__le32 ver; /* major/minor/API/serial */
__le32 build;
__le64 alternatives; /* bitmask of valid alternatives */
/*
* The data contained herein has a TLV layout,
* see above for the TLV header and types.
* Note that each TLV is padded to a length
* that is a multiple of 4 for alignment.
*/
u8 data[0];
};
struct iwl_priv;
int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first);
#endif /* __iwl_ucode_h__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment