Commit 12f7a186 authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2018-10-12' of...

Merge tag 'iwlwifi-next-for-kalle-2018-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

Fourth set of iwlwifi patches intended for 4.20

* Support for a new scan type;
* Clean-up in the queue handling code;
* A few bug fixes;
parents d864991b 724fe771
......@@ -1154,14 +1154,14 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
}
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
void iwl_fw_error_dump_wk(struct work_struct *work)
/* this function assumes dump_start was called beforehand and dump_end will be
* called afterwards
*/
void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
{
struct iwl_fw_runtime *fwrt =
container_of(work, struct iwl_fw_runtime, dump.wk.work);
struct iwl_fw_dbg_params params = {0};
if (fwrt->ops && fwrt->ops->dump_start &&
fwrt->ops->dump_start(fwrt->ops_ctx))
if (!test_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
return;
if (fwrt->ops && fwrt->ops->fw_running &&
......@@ -1169,7 +1169,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
iwl_fw_free_dump_desc(fwrt);
clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
goto out;
return;
}
iwl_fw_dbg_stop_recording(fwrt, &params);
......@@ -1183,7 +1183,20 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
udelay(500);
iwl_fw_dbg_restart_recording(fwrt, &params);
}
out:
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_sync);
void iwl_fw_error_dump_wk(struct work_struct *work)
{
struct iwl_fw_runtime *fwrt =
container_of(work, struct iwl_fw_runtime, dump.wk.work);
if (fwrt->ops && fwrt->ops->dump_start &&
fwrt->ops->dump_start(fwrt->ops_ctx))
return;
iwl_fw_dbg_collect_sync(fwrt);
if (fwrt->ops && fwrt->ops->dump_end)
fwrt->ops->dump_end(fwrt->ops_ctx);
}
......
......@@ -367,4 +367,5 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
#endif /* CONFIG_IWLWIFI_DEBUGFS */
void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt);
void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt);
#endif /* __iwl_fw_dbg_h__ */
......@@ -30,38 +30,20 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_data
TRACE_EVENT(iwlwifi_dev_tx_data,
TP_PROTO(const struct device *dev,
struct sk_buff *skb, u8 hdr_len),
TP_ARGS(dev, skb, hdr_len),
TRACE_EVENT(iwlwifi_dev_tx_tb,
TP_PROTO(const struct device *dev, struct sk_buff *skb,
u8 *data_src, size_t data_len),
TP_ARGS(dev, skb, data_src, data_len),
TP_STRUCT__entry(
DEV_ENTRY
__dynamic_array(u8, data,
iwl_trace_data(skb) ? skb->len - hdr_len : 0)
iwl_trace_data(skb) ? data_len : 0)
),
TP_fast_assign(
DEV_ASSIGN;
if (iwl_trace_data(skb))
skb_copy_bits(skb, hdr_len,
__get_dynamic_array(data),
skb->len - hdr_len);
),
TP_printk("[%s] TX frame data", __get_str(dev))
);
TRACE_EVENT(iwlwifi_dev_tx_tso_chunk,
TP_PROTO(const struct device *dev,
u8 *data_src, size_t data_len),
TP_ARGS(dev, data_src, data_len),
TP_STRUCT__entry(
DEV_ENTRY
__dynamic_array(u8, data, data_len)
),
TP_fast_assign(
DEV_ASSIGN;
memcpy(__get_dynamic_array(data), data_src, data_len);
memcpy(__get_dynamic_array(data), data_src, data_len);
),
TP_printk("[%s] TX frame data", __get_str(dev))
);
......
......@@ -722,8 +722,10 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
{
struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
bool unified = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
struct wowlan_key_data key_data = {
.configure_keys = !d0i3,
.configure_keys = !d0i3 && !unified,
.use_rsc_tsc = false,
.tkip = &tkip_cmd,
.use_tkip = false,
......@@ -1636,32 +1638,10 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
}
static struct iwl_wowlan_status *
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm)
{
u32 base = mvm->error_event_table[0];
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
u32 error_id;
} err_info;
int ret;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
if (err_info.valid) {
IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
err_info.valid, err_info.error_id);
if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
struct cfg80211_wowlan_wakeup wakeup = {
.rfkill_release = true,
};
ieee80211_report_wowlan_wakeup(vif, &wakeup,
GFP_KERNEL);
}
return ERR_PTR(-EIO);
}
/* only for tracing for now */
ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
if (ret)
......@@ -1680,7 +1660,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
bool keep;
struct iwl_mvm_sta *mvm_ap_sta;
fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
fw_status = iwl_mvm_get_wakeup_status(mvm);
if (IS_ERR_OR_NULL(fw_status))
goto out_unlock;
......@@ -1805,7 +1785,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
u32 reasons = 0;
int i, j, n_matches, ret;
fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
fw_status = iwl_mvm_get_wakeup_status(mvm);
if (!IS_ERR_OR_NULL(fw_status)) {
reasons = le32_to_cpu(fw_status->wakeup_reasons);
kfree(fw_status);
......@@ -1918,6 +1898,29 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
ieee80211_resume_disconnect(vif);
}
static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
u32 base = mvm->error_event_table[0];
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
u32 error_id;
} err_info;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
if (err_info.valid &&
err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
struct cfg80211_wowlan_wakeup wakeup = {
.rfkill_release = true,
};
ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
}
return err_info.valid;
}
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
struct ieee80211_vif *vif = NULL;
......@@ -1949,6 +1952,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* query SRAM first in case we want event logging */
iwl_mvm_read_d3_sram(mvm);
if (iwl_mvm_check_rt_status(mvm, vif)) {
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
iwl_mvm_dump_nic_error_log(mvm);
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
NULL, 0);
ret = 1;
goto err;
}
if (d0i3_first) {
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
if (ret < 0) {
......
......@@ -364,7 +364,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/
memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
/*
* Set a 'fake' TID for the command queue, since we use the
* hweight() of the tid_bitmap as a refcount now. Not that
* we ever even consider the command queue as one we might
* want to reuse, but be safe nevertheless.
*/
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
BIT(IWL_MAX_TID_COUNT + 2);
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
......
......@@ -512,6 +512,7 @@ enum iwl_mvm_scan_type {
IWL_SCAN_TYPE_WILD,
IWL_SCAN_TYPE_MILD,
IWL_SCAN_TYPE_FRAGMENTED,
IWL_SCAN_TYPE_FAST_BALANCE,
};
enum iwl_mvm_sched_scan_pass_all_states {
......@@ -753,24 +754,12 @@ iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf)
* This is a state in which a single queue serves more than one TID, all of
* which are not aggregated. Note that the queue is only associated to one
* RA.
* @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it
* This is a state of a queue that has had traffic on it, but during the
* last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on
* it. In this state, when a new queue is needed to be allocated but no
* such free queue exists, an inactive queue might be freed and given to
* the new RA/TID.
* @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
* This is the state of a queue that has had traffic pass through it, but
* needs to be reconfigured for some reason, e.g. the queue needs to
* become unshared and aggregations re-enabled on.
*/
enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_FREE,
IWL_MVM_QUEUE_RESERVED,
IWL_MVM_QUEUE_READY,
IWL_MVM_QUEUE_SHARED,
IWL_MVM_QUEUE_INACTIVE,
IWL_MVM_QUEUE_RECONFIGURING,
};
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
......@@ -787,6 +776,17 @@ struct iwl_mvm_geo_profile {
u8 values[ACPI_GEO_TABLE_SIZE];
};
struct iwl_mvm_dqa_txq_info {
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */
u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
u8 txq_tid; /* The TID "owner" of this queue*/
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
/* Timestamp for inactivation per TID of this queue */
unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
enum iwl_mvm_queue_status status;
};
struct iwl_mvm {
/* for logger access */
struct device *dev;
......@@ -843,17 +843,7 @@ struct iwl_mvm {
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
struct {
u8 hw_queue_refcount;
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */
u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
u8 txq_tid; /* The TID "owner" of this queue*/
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
/* Timestamp for inactivation per TID of this queue */
unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
enum iwl_mvm_queue_status status;
} queue_info[IWL_MAX_HW_QUEUES];
struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
struct work_struct add_stream_wk; /* To add streams to queues */
......@@ -1883,17 +1873,6 @@ void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
mvmvif->low_latency &= ~cause;
}
/* hw scheduler queue config */
bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout);
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
u8 sta_id, u8 tid, unsigned int timeout);
int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u8 tid, u8 flags);
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
/* Return a bitmask with all the hw supported queues, except for the
* command queue, which can't be flushed.
*/
......@@ -1905,6 +1884,11 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
/* calling this function without using dump_start/end since at this
* point we already hold the op mode mutex
*/
iwl_fw_dbg_collect_sync(&mvm->fwrt);
iwl_fw_cancel_timestamp(&mvm->fwrt);
iwl_free_fw_paging(&mvm->fwrt);
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
......@@ -1990,8 +1974,6 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
#define MVM_TCM_PERIOD_MSEC 500
#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
#define MVM_LL_PERIOD (10 * HZ)
......
......@@ -110,6 +110,10 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = {
.suspend_time = 95,
.max_out_time = 44,
},
[IWL_SCAN_TYPE_FAST_BALANCE] = {
.suspend_time = 30,
.max_out_time = 37,
},
};
struct iwl_mvm_scan_params {
......@@ -235,8 +239,32 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
return mvm->tcm.result.band_load[band];
}
struct iwl_is_dcm_with_go_iterator_data {
struct ieee80211_vif *current_vif;
bool is_dcm_with_p2p_go;
};
static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_is_dcm_with_go_iterator_data *data = _data;
struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_vif *curr_mvmvif =
iwl_mvm_vif_from_mac80211(data->current_vif);
/* exclude the given vif */
if (vif == data->current_vif)
return;
if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
data->is_dcm_with_p2p_go = true;
}
static enum
iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
enum iwl_mvm_traffic_load load,
bool low_latency)
{
......@@ -249,9 +277,30 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
if (!global_cnt)
return IWL_SCAN_TYPE_UNASSOC;
if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device &&
fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
return IWL_SCAN_TYPE_FRAGMENTED;
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
(!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
return IWL_SCAN_TYPE_FRAGMENTED;
/* in case of DCM with GO where BSS DTIM interval < 220msec
* set all scan requests as fast-balance scan
* */
if (vif && vif->type == NL80211_IFTYPE_STATION &&
vif->bss_conf.dtim_period < 220) {
struct iwl_is_dcm_with_go_iterator_data data = {
.current_vif = vif,
.is_dcm_with_p2p_go = false,
};
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_is_dcm_with_go_iterator,
&data);
if (data.is_dcm_with_p2p_go)
return IWL_SCAN_TYPE_FAST_BALANCE;
}
}
if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
return IWL_SCAN_TYPE_MILD;
......@@ -260,7 +309,8 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
}
static enum
iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
enum iwl_mvm_traffic_load load;
bool low_latency;
......@@ -268,12 +318,12 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
load = iwl_mvm_get_traffic_load(mvm);
low_latency = iwl_mvm_low_latency(mvm);
return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
}
static enum
iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
bool p2p_device,
struct ieee80211_vif *vif,
enum nl80211_band band)
{
enum iwl_mvm_traffic_load load;
......@@ -282,7 +332,7 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
load = iwl_mvm_get_traffic_load_band(mvm, band);
low_latency = iwl_mvm_low_latency_band(mvm, band);
return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
}
static int
......@@ -860,6 +910,12 @@ static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
params->scan_plans[0].iterations == 1;
}
static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type)
{
return (type == IWL_SCAN_TYPE_FRAGMENTED ||
type == IWL_SCAN_TYPE_FAST_BALANCE);
}
static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
......@@ -872,7 +928,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
if (iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm) &&
......@@ -895,7 +951,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
if (iwl_mvm_is_regular_scan(params) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
params->type != IWL_SCAN_TYPE_FRAGMENTED)
!iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
return flags;
......@@ -1044,7 +1100,7 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
u32 flags, u8 channel_flags)
{
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL);
struct iwl_scan_config_v1 *cfg = config;
cfg->flags = cpu_to_le32(flags);
......@@ -1077,9 +1133,9 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
if (iwl_mvm_is_cdb_supported(mvm)) {
enum iwl_mvm_scan_type lb_type, hb_type;
lb_type = iwl_mvm_get_scan_type_band(mvm, false,
lb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_2GHZ);
hb_type = iwl_mvm_get_scan_type_band(mvm, false,
hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_5GHZ);
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
......@@ -1093,7 +1149,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
cpu_to_le32(scan_timing[hb_type].suspend_time);
} else {
enum iwl_mvm_scan_type type =
iwl_mvm_get_scan_type(mvm, false);
iwl_mvm_get_scan_type(mvm, NULL);
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
cpu_to_le32(scan_timing[type].max_out_time);
......@@ -1130,14 +1186,14 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
return -ENOBUFS;
if (iwl_mvm_is_cdb_supported(mvm)) {
type = iwl_mvm_get_scan_type_band(mvm, false,
type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_2GHZ);
hb_type = iwl_mvm_get_scan_type_band(mvm, false,
hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_5GHZ);
if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
return 0;
} else {
type = iwl_mvm_get_scan_type(mvm, false);
type = iwl_mvm_get_scan_type(mvm, NULL);
if (type == mvm->scan_type)
return 0;
}
......@@ -1162,7 +1218,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
SCAN_CONFIG_FLAG_SET_MAC_ADDR |
SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
SCAN_CONFIG_N_CHANNELS(num_channels) |
(type == IWL_SCAN_TYPE_FRAGMENTED ?
(iwl_mvm_is_scan_fragmented(type) ?
SCAN_CONFIG_FLAG_SET_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
......@@ -1177,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
*/
if (iwl_mvm_cdb_scan_api(mvm)) {
if (iwl_mvm_is_cdb_supported(mvm))
flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ?
flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
......@@ -1338,11 +1394,11 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
if (iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
if (iwl_mvm_is_cdb_supported(mvm) &&
params->hb_type == IWL_SCAN_TYPE_FRAGMENTED)
iwl_mvm_is_scan_fragmented(params->hb_type))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm) &&
......@@ -1380,7 +1436,7 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
*/
if (iwl_mvm_is_regular_scan(params) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
params->type != IWL_SCAN_TYPE_FRAGMENTED &&
!iwl_mvm_is_scan_fragmented(params->type) &&
!iwl_mvm_is_adaptive_dwell_supported(mvm) &&
!iwl_mvm_is_oce_supported(mvm))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
......@@ -1589,19 +1645,20 @@ void iwl_mvm_scan_timeout_wk(struct work_struct *work)
static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
bool p2p)
struct ieee80211_vif *vif)
{
if (iwl_mvm_is_cdb_supported(mvm)) {
params->type =
iwl_mvm_get_scan_type_band(mvm, p2p,
iwl_mvm_get_scan_type_band(mvm, vif,
NL80211_BAND_2GHZ);
params->hb_type =
iwl_mvm_get_scan_type_band(mvm, p2p,
iwl_mvm_get_scan_type_band(mvm, vif,
NL80211_BAND_5GHZ);
} else {
params->type = iwl_mvm_get_scan_type(mvm, p2p);
params->type = iwl_mvm_get_scan_type(mvm, vif);
}
}
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
......@@ -1649,8 +1706,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params.scan_plans = &scan_plan;
params.n_scan_plans = 1;
iwl_mvm_fill_scan_type(mvm, &params,
vif->type == NL80211_IFTYPE_P2P_DEVICE);
iwl_mvm_fill_scan_type(mvm, &params, vif);
ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
if (ret < 0)
......@@ -1745,8 +1801,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.n_scan_plans = req->n_scan_plans;
params.scan_plans = req->scan_plans;
iwl_mvm_fill_scan_type(mvm, &params,
vif->type == NL80211_IFTYPE_P2P_DEVICE);
iwl_mvm_fill_scan_type(mvm, &params, vif);
/* In theory, LMAC scans can handle a 32-bit delay, but since
* waiting for over 18 hours to start the scan is a bit silly
......
......@@ -312,9 +312,6 @@ enum iwl_mvm_agg_state {
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
* we are ready to finish the Tx AGG stop / start flow.
* @tx_time: medium time consumed by this A-MPDU
* @is_tid_active: has this TID sent traffic in the last
* %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this
* field should be ignored.
* @tpt_meas_start: time of the throughput measurements start, is reset every HZ
* @tx_count_last: number of frames transmitted during the last second
* @tx_count: counts the number of frames transmitted since the last reset of
......@@ -332,7 +329,6 @@ struct iwl_mvm_tid_data {
u16 txq_id;
u16 ssn;
u16 tx_time;
bool is_tid_active;
unsigned long tpt_meas_start;
u32 tx_count_last;
u32 tx_count;
......@@ -572,8 +568,4 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
int ac, int ssn, unsigned int wdg_timeout,
bool force);
#endif /* __sta_h__ */
......@@ -1140,32 +1140,16 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Check if TXQ needs to be allocated or re-activated */
if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
!mvmsta->tid_data[tid].is_tid_active)) {
/* If TXQ needs to be allocated... */
if (txq_id == IWL_MVM_INVALID_QUEUE) {
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) {
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
/*
* The frame is now deferred, and the worker scheduled
* will re-allocate it, so we can free it for now.
*/
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
return 0;
}
/* queue should always be active in new TX path */
WARN_ON(iwl_mvm_has_new_tx_api(mvm));
/* If we are here - TXQ exists and needs to be re-activated */
spin_lock(&mvm->queue_info_lock);
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
mvmsta->tid_data[tid].is_tid_active = true;
spin_unlock(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
txq_id);
/*
* The frame is now deferred, and the worker scheduled
* will re-allocate it, so we can free it for now.
*/
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
return 0;
}
if (!iwl_mvm_has_new_tx_api(mvm)) {
......
......@@ -330,7 +330,7 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
}
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
......@@ -347,8 +347,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
}
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
tb_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
tb_len);
data_left -= tb_len;
tso_build_data(skb, &tso, tb_len);
......@@ -438,6 +438,9 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
return -ENOMEM;
tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
skb_frag_size(frag));
trace_iwlwifi_dev_tx_tb(trans->dev, skb,
skb_frag_address(frag),
skb_frag_size(frag));
if (tb_idx < 0)
return tb_idx;
......@@ -454,7 +457,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
int tx_cmd_len)
int tx_cmd_len,
bool pad)
{
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
......@@ -478,7 +482,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
IWL_FIRST_TB_SIZE;
tb1_len = ALIGN(len, 4);
if (pad)
tb1_len = ALIGN(len, 4);
else
tb1_len = len;
/* map the data for TB1 */
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
......@@ -486,6 +493,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
/* set up TFD's third entry to point to remainder of skb's head */
tb2_len = skb_headlen(skb) - hdr_len;
......@@ -496,15 +505,14 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb,
skb->data + hdr_len,
tb2_len);
}
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
goto out_err;
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
return tfd;
out_err:
......@@ -551,7 +559,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
out_meta, hdr_len, len);
return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
hdr_len, len);
hdr_len, len, !amsdu);
}
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
......
......@@ -1994,6 +1994,9 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
head_tb_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
trace_iwlwifi_dev_tx_tb(trans->dev, skb,
skb->data + hdr_len,
head_tb_len);
iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
}
......@@ -2011,6 +2014,9 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
trace_iwlwifi_dev_tx_tb(trans->dev, skb,
skb_frag_address(frag),
skb_frag_size(frag));
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
skb_frag_size(frag), false);
if (tb_idx < 0)
......@@ -2190,8 +2196,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
}
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
hdr_tb_len, false);
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
hdr_tb_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
hdr_tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
......@@ -2216,8 +2222,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
size, false);
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
size);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
size);
data_left -= size;
tso_build_data(skb, &tso, size);
......@@ -2398,6 +2404,13 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
goto out_err;
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
trace_iwlwifi_dev_tx(trans->dev, skb,
iwl_pcie_get_tfd(trans, txq,
txq->write_ptr),
trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
hdr_len);
/*
* If gso_size wasn't set, don't give the frame "amsdu treatment"
* (adding subframes, etc.).
......@@ -2421,14 +2434,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
out_meta)))
goto out_err;
}
trace_iwlwifi_dev_tx(trans->dev, skb,
iwl_pcie_get_tfd(trans, txq,
txq->write_ptr),
trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
hdr_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
}
/* building the A-MSDU might have changed this data, so memcpy it now */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment