Commit 843dc664 authored by John W. Linville's avatar John W. Linville

Merge branch 'for-linville' of git://github.com/kvalo/ath6kl

parents 5d6a1b06 9df2a0b7
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# Copyright (c) 2004-2010 Atheros Communications Inc. # Copyright (c) 2004-2011 Atheros Communications Inc.
# Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
# All rights reserved. # All rights reserved.
# #
# #
......
/* /*
* Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
......
/* /*
* Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
...@@ -222,6 +223,29 @@ struct ath6kl_bmi_target_info { ...@@ -222,6 +223,29 @@ struct ath6kl_bmi_target_info {
__le32 type; /* target type */ __le32 type; /* target type */
} __packed; } __packed;
#define ath6kl_bmi_write_hi32(ar, item, val) \
({ \
u32 addr; \
__le32 v; \
\
addr = ath6kl_get_hi_item_addr(ar, HI_ITEM(item)); \
v = cpu_to_le32(val); \
ath6kl_bmi_write(ar, addr, (u8 *) &v, sizeof(v)); \
})
#define ath6kl_bmi_read_hi32(ar, item, val) \
({ \
u32 addr, *check_type = val; \
__le32 tmp; \
int ret; \
\
(void) (check_type == val); \
addr = ath6kl_get_hi_item_addr(ar, HI_ITEM(item)); \
ret = ath6kl_bmi_read(ar, addr, (u8 *) &tmp, 4); \
*val = le32_to_cpu(tmp); \
ret; \
})
int ath6kl_bmi_init(struct ath6kl *ar); int ath6kl_bmi_init(struct ath6kl *ar);
void ath6kl_bmi_cleanup(struct ath6kl *ar); void ath6kl_bmi_cleanup(struct ath6kl *ar);
void ath6kl_bmi_reset(struct ath6kl *ar); void ath6kl_bmi_reset(struct ath6kl *ar);
......
This diff is collapsed.
/* /*
* Copyright (c) 2011 Atheros Communications Inc. * Copyright (c) 2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
......
/* /*
* Copyright (c) 2010-2011 Atheros Communications Inc. * Copyright (c) 2010-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
......
/* /*
* Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
...@@ -26,12 +27,14 @@ ...@@ -26,12 +27,14 @@
unsigned int debug_mask; unsigned int debug_mask;
static unsigned int suspend_mode; static unsigned int suspend_mode;
static unsigned int wow_mode;
static unsigned int uart_debug; static unsigned int uart_debug;
static unsigned int ath6kl_p2p; static unsigned int ath6kl_p2p;
static unsigned int testmode; static unsigned int testmode;
module_param(debug_mask, uint, 0644); module_param(debug_mask, uint, 0644);
module_param(suspend_mode, uint, 0644); module_param(suspend_mode, uint, 0644);
module_param(wow_mode, uint, 0644);
module_param(uart_debug, uint, 0644); module_param(uart_debug, uint, 0644);
module_param(ath6kl_p2p, uint, 0644); module_param(ath6kl_p2p, uint, 0644);
module_param(testmode, uint, 0644); module_param(testmode, uint, 0644);
...@@ -97,48 +100,12 @@ int ath6kl_core_init(struct ath6kl *ar) ...@@ -97,48 +100,12 @@ int ath6kl_core_init(struct ath6kl *ar)
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi); ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
ret = ath6kl_cfg80211_init(ar);
if (ret)
goto err_node_cleanup;
ret = ath6kl_debug_init(ar);
if (ret) {
wiphy_unregister(ar->wiphy);
goto err_node_cleanup;
}
for (i = 0; i < ar->vif_max; i++)
ar->avail_idx_map |= BIT(i);
rtnl_lock();
/* Add an initial station interface */
ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
INFRA_NETWORK);
rtnl_unlock();
if (!ndev) {
ath6kl_err("Failed to instantiate a network device\n");
ret = -ENOMEM;
wiphy_unregister(ar->wiphy);
goto err_debug_init;
}
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
__func__, ndev->name, ndev, ar);
/* setup access class priority mappings */ /* setup access class priority mappings */
ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */ ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
ar->ac_stream_pri_map[WMM_AC_BE] = 1; ar->ac_stream_pri_map[WMM_AC_BE] = 1;
ar->ac_stream_pri_map[WMM_AC_VI] = 2; ar->ac_stream_pri_map[WMM_AC_VI] = 2;
ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */ ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
/* give our connected endpoints some buffers */
ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
/* allocate some buffers that handle larger AMSDU frames */ /* allocate some buffers that handle larger AMSDU frames */
ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS); ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
...@@ -154,26 +121,19 @@ int ath6kl_core_init(struct ath6kl *ar) ...@@ -154,26 +121,19 @@ int ath6kl_core_init(struct ath6kl *ar)
else else
ar->suspend_mode = 0; ar->suspend_mode = 0;
if (suspend_mode == WLAN_POWER_STATE_WOW &&
(wow_mode == WLAN_POWER_STATE_CUT_PWR ||
wow_mode == WLAN_POWER_STATE_DEEP_SLEEP))
ar->wow_suspend_mode = wow_mode;
else
ar->wow_suspend_mode = 0;
if (uart_debug) if (uart_debug)
ar->conf_flags |= ATH6KL_CONF_UART_DEBUG; ar->conf_flags |= ATH6KL_CONF_UART_DEBUG;
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
ar->wiphy->probe_resp_offload =
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
set_bit(FIRST_BOOT, &ar->flag); set_bit(FIRST_BOOT, &ar->flag);
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; ath6kl_debug_init(ar);
ret = ath6kl_init_hw_start(ar); ret = ath6kl_init_hw_start(ar);
if (ret) { if (ret) {
...@@ -181,24 +141,47 @@ int ath6kl_core_init(struct ath6kl *ar) ...@@ -181,24 +141,47 @@ int ath6kl_core_init(struct ath6kl *ar)
goto err_rxbuf_cleanup; goto err_rxbuf_cleanup;
} }
/* /* give our connected endpoints some buffers */
* Set mac address which is received in ready event ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
* FIXME: Move to ath6kl_interface_add() ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
*/
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
return ret; ret = ath6kl_cfg80211_init(ar);
if (ret)
goto err_rxbuf_cleanup;
ret = ath6kl_debug_init_fs(ar);
if (ret) {
wiphy_unregister(ar->wiphy);
goto err_rxbuf_cleanup;
}
for (i = 0; i < ar->vif_max; i++)
ar->avail_idx_map |= BIT(i);
err_rxbuf_cleanup:
ath6kl_htc_flush_rx_buf(ar->htc_target);
ath6kl_cleanup_amsdu_rxbufs(ar);
rtnl_lock(); rtnl_lock();
ath6kl_cfg80211_vif_cleanup(netdev_priv(ndev));
/* Add an initial station interface */
ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
INFRA_NETWORK);
rtnl_unlock(); rtnl_unlock();
if (!ndev) {
ath6kl_err("Failed to instantiate a network device\n");
ret = -ENOMEM;
wiphy_unregister(ar->wiphy); wiphy_unregister(ar->wiphy);
err_debug_init: goto err_rxbuf_cleanup;
}
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
__func__, ndev->name, ndev, ar);
return ret;
err_rxbuf_cleanup:
ath6kl_debug_cleanup(ar); ath6kl_debug_cleanup(ar);
err_node_cleanup: ath6kl_htc_flush_rx_buf(ar->htc_target);
ath6kl_cleanup_amsdu_rxbufs(ar);
ath6kl_wmi_shutdown(ar->wmi); ath6kl_wmi_shutdown(ar->wmi);
clear_bit(WMI_ENABLED, &ar->flag); clear_bit(WMI_ENABLED, &ar->flag);
ar->wmi = NULL; ar->wmi = NULL;
...@@ -245,9 +228,7 @@ struct ath6kl *ath6kl_core_create(struct device *dev) ...@@ -245,9 +228,7 @@ struct ath6kl *ath6kl_core_create(struct device *dev)
clear_bit(SKIP_SCAN, &ar->flag); clear_bit(SKIP_SCAN, &ar->flag);
clear_bit(DESTROY_IN_PROGRESS, &ar->flag); clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
ar->listen_intvl_b = A_DEFAULT_LISTEN_INTERVAL;
ar->tx_pwr = 0; ar->tx_pwr = 0;
ar->intra_bss = 1; ar->intra_bss = 1;
ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD; ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
...@@ -261,6 +242,8 @@ struct ath6kl *ath6kl_core_create(struct device *dev) ...@@ -261,6 +242,8 @@ struct ath6kl *ath6kl_core_create(struct device *dev)
spin_lock_init(&ar->sta_list[ctr].psq_lock); spin_lock_init(&ar->sta_list[ctr].psq_lock);
skb_queue_head_init(&ar->sta_list[ctr].psq); skb_queue_head_init(&ar->sta_list[ctr].psq);
skb_queue_head_init(&ar->sta_list[ctr].apsdq); skb_queue_head_init(&ar->sta_list[ctr].apsdq);
ar->sta_list[ctr].mgmt_psq_len = 0;
INIT_LIST_HEAD(&ar->sta_list[ctr].mgmt_psq);
ar->sta_list[ctr].aggr_conn = ar->sta_list[ctr].aggr_conn =
kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL); kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
if (!ar->sta_list[ctr].aggr_conn) { if (!ar->sta_list[ctr].aggr_conn) {
......
/* /*
* Copyright (c) 2010-2011 Atheros Communications Inc. * Copyright (c) 2010-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
...@@ -59,8 +60,9 @@ ...@@ -59,8 +60,9 @@
#define MAX_DEFAULT_SEND_QUEUE_DEPTH (MAX_DEF_COOKIE_NUM / WMM_NUM_AC) #define MAX_DEFAULT_SEND_QUEUE_DEPTH (MAX_DEF_COOKIE_NUM / WMM_NUM_AC)
#define DISCON_TIMER_INTVAL 10000 /* in msec */ #define DISCON_TIMER_INTVAL 10000 /* in msec */
#define A_DEFAULT_LISTEN_INTERVAL 1 /* beacon intervals */
#define A_MAX_WOW_LISTEN_INTERVAL 1000 /* Channel dwell time in fg scan */
#define ATH6KL_FG_SCAN_INTERVAL 50 /* in ms */
/* includes also the null byte */ /* includes also the null byte */
#define ATH6KL_FIRMWARE_MAGIC "QCA-ATH6KL" #define ATH6KL_FIRMWARE_MAGIC "QCA-ATH6KL"
...@@ -183,6 +185,11 @@ struct ath6kl_fw_ie { ...@@ -183,6 +185,11 @@ struct ath6kl_fw_ie {
#define MBOX_YIELD_LIMIT 99 #define MBOX_YIELD_LIMIT 99
#define ATH6KL_DEFAULT_LISTEN_INTVAL 100 /* in TUs */
#define ATH6KL_DEFAULT_BMISS_TIME 1500
#define ATH6KL_MAX_WOW_LISTEN_INTL 300 /* in TUs */
#define ATH6KL_MAX_BMISS_TIME 5000
/* configuration lags */ /* configuration lags */
/* /*
* ATH6KL_CONF_IGNORE_ERP_BARKER: Ignore the barker premable in * ATH6KL_CONF_IGNORE_ERP_BARKER: Ignore the barker premable in
...@@ -226,6 +233,12 @@ struct rxtid { ...@@ -226,6 +233,12 @@ struct rxtid {
u32 hold_q_sz; u32 hold_q_sz;
struct skb_hold_q *hold_q; struct skb_hold_q *hold_q;
struct sk_buff_head q; struct sk_buff_head q;
/*
* FIXME: No clue what this should protect. Apparently it should
* protect some of the fields above but they are also accessed
* without taking the lock.
*/
spinlock_t lock; spinlock_t lock;
}; };
...@@ -285,6 +298,16 @@ struct ath6kl_cookie { ...@@ -285,6 +298,16 @@ struct ath6kl_cookie {
struct ath6kl_cookie *arc_list_next; struct ath6kl_cookie *arc_list_next;
}; };
struct ath6kl_mgmt_buff {
struct list_head list;
u32 freq;
u32 wait;
u32 id;
bool no_cck;
size_t len;
u8 buf[0];
};
struct ath6kl_sta { struct ath6kl_sta {
u16 sta_flags; u16 sta_flags;
u8 mac[ETH_ALEN]; u8 mac[ETH_ALEN];
...@@ -294,7 +317,12 @@ struct ath6kl_sta { ...@@ -294,7 +317,12 @@ struct ath6kl_sta {
u8 auth; u8 auth;
u8 wpa_ie[ATH6KL_MAX_IE]; u8 wpa_ie[ATH6KL_MAX_IE];
struct sk_buff_head psq; struct sk_buff_head psq;
/* protects psq, mgmt_psq, apsdq, and mgmt_psq_len fields */
spinlock_t psq_lock; spinlock_t psq_lock;
struct list_head mgmt_psq;
size_t mgmt_psq_len;
u8 apsd_info; u8 apsd_info;
struct sk_buff_head apsdq; struct sk_buff_head apsdq;
struct aggr_info_conn *aggr_conn; struct aggr_info_conn *aggr_conn;
...@@ -494,6 +522,8 @@ struct ath6kl_vif { ...@@ -494,6 +522,8 @@ struct ath6kl_vif {
bool probe_req_report; bool probe_req_report;
u16 next_chan; u16 next_chan;
u16 assoc_bss_beacon_int; u16 assoc_bss_beacon_int;
u16 listen_intvl_t;
u16 bmiss_time_t;
u8 assoc_bss_dtim_period; u8 assoc_bss_dtim_period;
struct net_device_stats net_stats; struct net_device_stats net_stats;
struct target_stats target_stats; struct target_stats target_stats;
...@@ -521,6 +551,8 @@ enum ath6kl_dev_state { ...@@ -521,6 +551,8 @@ enum ath6kl_dev_state {
enum ath6kl_state { enum ath6kl_state {
ATH6KL_STATE_OFF, ATH6KL_STATE_OFF,
ATH6KL_STATE_ON, ATH6KL_STATE_ON,
ATH6KL_STATE_SUSPENDING,
ATH6KL_STATE_RESUMING,
ATH6KL_STATE_DEEPSLEEP, ATH6KL_STATE_DEEPSLEEP,
ATH6KL_STATE_CUTPOWER, ATH6KL_STATE_CUTPOWER,
ATH6KL_STATE_WOW, ATH6KL_STATE_WOW,
...@@ -549,9 +581,14 @@ struct ath6kl { ...@@ -549,9 +581,14 @@ struct ath6kl {
unsigned int vif_max; unsigned int vif_max;
u8 max_norm_iface; u8 max_norm_iface;
u8 avail_idx_map; u8 avail_idx_map;
/*
* Protects at least amsdu_rx_buffer_queue, ath6kl_alloc_cookie()
* calls, tx_pending and total_tx_data_pend.
*/
spinlock_t lock; spinlock_t lock;
struct semaphore sem; struct semaphore sem;
u16 listen_intvl_b;
u8 lrssi_roam_threshold; u8 lrssi_roam_threshold;
struct ath6kl_version version; struct ath6kl_version version;
u32 target_type; u32 target_type;
...@@ -577,7 +614,13 @@ struct ath6kl { ...@@ -577,7 +614,13 @@ struct ath6kl {
u8 sta_list_index; u8 sta_list_index;
struct ath6kl_req_key ap_mode_bkey; struct ath6kl_req_key ap_mode_bkey;
struct sk_buff_head mcastpsq; struct sk_buff_head mcastpsq;
/*
* FIXME: protects access to mcastpsq but is actually useless as
* all skbe_queue_*() functions provide serialisation themselves
*/
spinlock_t mcastpsq_lock; spinlock_t mcastpsq_lock;
u8 intra_bss; u8 intra_bss;
struct wmi_ap_mode_stat ap_stats; struct wmi_ap_mode_stat ap_stats;
u8 ap_country_code[3]; u8 ap_country_code[3];
...@@ -620,6 +663,7 @@ struct ath6kl { ...@@ -620,6 +663,7 @@ struct ath6kl {
u16 conf_flags; u16 conf_flags;
u16 suspend_mode; u16 suspend_mode;
u16 wow_suspend_mode;
wait_queue_head_t event_wq; wait_queue_head_t event_wq;
struct ath6kl_mbox_info mbox_info; struct ath6kl_mbox_info mbox_info;
...@@ -650,12 +694,16 @@ struct ath6kl { ...@@ -650,12 +694,16 @@ struct ath6kl {
bool p2p; bool p2p;
bool wiphy_registered;
#ifdef CONFIG_ATH6KL_DEBUG #ifdef CONFIG_ATH6KL_DEBUG
struct { struct {
struct circ_buf fwlog_buf; struct sk_buff_head fwlog_queue;
spinlock_t fwlog_lock; struct completion fwlog_completion;
void *fwlog_tmp; bool fwlog_open;
u32 fwlog_mask; u32 fwlog_mask;
unsigned int dbgfs_diag_reg; unsigned int dbgfs_diag_reg;
u32 diag_reg_addr_wr; u32 diag_reg_addr_wr;
u32 diag_reg_val_wr; u32 diag_reg_val_wr;
...@@ -727,10 +775,10 @@ struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target, ...@@ -727,10 +775,10 @@ struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
void aggr_module_destroy(struct aggr_info *aggr_info); void aggr_module_destroy(struct aggr_info *aggr_info);
void aggr_reset_state(struct aggr_info_conn *aggr_conn); void aggr_reset_state(struct aggr_info_conn *aggr_conn);
struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 * node_addr); struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr);
struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid); struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver); void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver);
int ath6kl_control_tx(void *devt, struct sk_buff *skb, int ath6kl_control_tx(void *devt, struct sk_buff *skb,
enum htc_endpoint_id eid); enum htc_endpoint_id eid);
void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,
......
This diff is collapsed.
/* /*
* Copyright (c) 2011 Atheros Communications Inc. * Copyright (c) 2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
...@@ -77,7 +78,8 @@ int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf, ...@@ -77,7 +78,8 @@ int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
size_t len); size_t len);
void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive); void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive);
void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout); void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout);
int ath6kl_debug_init(struct ath6kl *ar); void ath6kl_debug_init(struct ath6kl *ar);
int ath6kl_debug_init_fs(struct ath6kl *ar);
void ath6kl_debug_cleanup(struct ath6kl *ar); void ath6kl_debug_cleanup(struct ath6kl *ar);
#else #else
...@@ -127,7 +129,11 @@ static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, ...@@ -127,7 +129,11 @@ static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar,
{ {
} }
static inline int ath6kl_debug_init(struct ath6kl *ar) static inline void ath6kl_debug_init(struct ath6kl *ar)
{
}
static inline int ath6kl_debug_init_fs(struct ath6kl *ar)
{ {
return 0; return 0;
} }
......
/* /*
* Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
......
/* /*
* Copyright (c) 2007-2011 Atheros Communications Inc. * Copyright (c) 2007-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
...@@ -106,9 +107,9 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar) ...@@ -106,9 +107,9 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4); BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
for (i = 0; i < REG_DUMP_COUNT_AR6003 / 4; i++) { for (i = 0; i < REG_DUMP_COUNT_AR6003; i += 4) {
ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n", ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
4 * i, i,
le32_to_cpu(regdump_val[i]), le32_to_cpu(regdump_val[i]),
le32_to_cpu(regdump_val[i + 1]), le32_to_cpu(regdump_val[i + 1]),
le32_to_cpu(regdump_val[i + 2]), le32_to_cpu(regdump_val[i + 2]),
...@@ -134,6 +135,7 @@ static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev) ...@@ -134,6 +135,7 @@ static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
ath6kl_warn("Failed to clear debug interrupt: %d\n", ret); ath6kl_warn("Failed to clear debug interrupt: %d\n", ret);
ath6kl_hif_dump_fw_crash(dev->ar); ath6kl_hif_dump_fw_crash(dev->ar);
ath6kl_read_fwlogs(dev->ar);
return ret; return ret;
} }
......
/* /*
* Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
...@@ -197,6 +198,8 @@ struct hif_scatter_req { ...@@ -197,6 +198,8 @@ struct hif_scatter_req {
u8 *virt_dma_buf; u8 *virt_dma_buf;
struct hif_scatter_item scat_list[1]; struct hif_scatter_item scat_list[1];
u32 scat_q_depth;
}; };
struct ath6kl_irq_proc_registers { struct ath6kl_irq_proc_registers {
...@@ -220,6 +223,7 @@ struct ath6kl_irq_enable_reg { ...@@ -220,6 +223,7 @@ struct ath6kl_irq_enable_reg {
} __packed; } __packed;
struct ath6kl_device { struct ath6kl_device {
/* protects irq_proc_reg and irq_en_reg below */
spinlock_t lock; spinlock_t lock;
struct ath6kl_irq_proc_registers irq_proc_reg; struct ath6kl_irq_proc_registers irq_proc_reg;
struct ath6kl_irq_enable_reg irq_en_reg; struct ath6kl_irq_enable_reg irq_en_reg;
......
/* /*
* Copyright (c) 2007-2011 Atheros Communications Inc. * Copyright (c) 2007-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
...@@ -22,6 +23,9 @@ ...@@ -22,6 +23,9 @@
#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
/* threshold to re-enable Tx bundling for an AC*/
#define TX_RESUME_BUNDLE_THRESHOLD 1500
/* Functions for Tx credit handling */ /* Functions for Tx credit handling */
static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info, static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
struct htc_endpoint_credit_dist *ep_dist, struct htc_endpoint_credit_dist *ep_dist,
...@@ -168,31 +172,29 @@ static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info, ...@@ -168,31 +172,29 @@ static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info, static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
struct list_head *epdist_list) struct list_head *epdist_list)
{ {
struct htc_endpoint_credit_dist *cur_dist_list; struct htc_endpoint_credit_dist *cur_list;
list_for_each_entry(cur_dist_list, epdist_list, list) { list_for_each_entry(cur_list, epdist_list, list) {
if (cur_dist_list->endpoint == ENDPOINT_0) if (cur_list->endpoint == ENDPOINT_0)
continue; continue;
if (cur_dist_list->cred_to_dist > 0) { if (cur_list->cred_to_dist > 0) {
cur_dist_list->credits += cur_list->credits += cur_list->cred_to_dist;
cur_dist_list->cred_to_dist; cur_list->cred_to_dist = 0;
cur_dist_list->cred_to_dist = 0;
if (cur_dist_list->credits > if (cur_list->credits > cur_list->cred_assngd)
cur_dist_list->cred_assngd)
ath6kl_credit_reduce(cred_info, ath6kl_credit_reduce(cred_info,
cur_dist_list, cur_list,
cur_dist_list->cred_assngd); cur_list->cred_assngd);
if (cur_dist_list->credits > if (cur_list->credits > cur_list->cred_norm)
cur_dist_list->cred_norm) ath6kl_credit_reduce(cred_info, cur_list,
ath6kl_credit_reduce(cred_info, cur_dist_list, cur_list->cred_norm);
cur_dist_list->cred_norm);
if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) { if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) {
if (cur_dist_list->txq_depth == 0) if (cur_list->txq_depth == 0)
ath6kl_credit_reduce(cred_info, ath6kl_credit_reduce(cred_info,
cur_dist_list, 0); cur_list, 0);
} }
} }
} }
...@@ -670,6 +672,7 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target, ...@@ -670,6 +672,7 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
struct htc_packet *packet; struct htc_packet *packet;
int i, len, rem_scat, cred_pad; int i, len, rem_scat, cred_pad;
int status = 0; int status = 0;
u8 flags;
rem_scat = target->max_tx_bndl_sz; rem_scat = target->max_tx_bndl_sz;
...@@ -696,8 +699,8 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target, ...@@ -696,8 +699,8 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
scat_req->scat_list[i].packet = packet; scat_req->scat_list[i].packet = packet;
/* prepare packet and flag message as part of a send bundle */ /* prepare packet and flag message as part of a send bundle */
ath6kl_htc_tx_prep_pkt(packet, flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE;
packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE, ath6kl_htc_tx_prep_pkt(packet, flags,
cred_pad, packet->info.tx.seqno); cred_pad, packet->info.tx.seqno);
/* Make sure the buffer is 4-byte aligned */ /* Make sure the buffer is 4-byte aligned */
ath6kl_htc_tx_buf_align(&packet->buf, ath6kl_htc_tx_buf_align(&packet->buf,
...@@ -744,6 +747,12 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint, ...@@ -744,6 +747,12 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
struct hif_scatter_req *scat_req = NULL; struct hif_scatter_req *scat_req = NULL;
int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0; int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
int status; int status;
u32 txb_mask;
u8 ac = WMM_NUM_AC;
if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
(WMI_CONTROL_SVC != endpoint->svc_id))
ac = target->dev->ar->ep2ac_map[endpoint->eid];
while (true) { while (true) {
status = 0; status = 0;
...@@ -763,6 +772,31 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint, ...@@ -763,6 +772,31 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
break; break;
} }
if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) {
if (WMM_AC_BE == ac)
/*
* BE, BK have priorities and bit
* positions reversed
*/
txb_mask = (1 << WMM_AC_BK);
else
/*
* any AC with priority lower than
* itself
*/
txb_mask = ((1 << ac) - 1);
/*
* when the scatter request resources drop below a
* certain threshold, disable Tx bundling for all
* AC's with priority lower than the current requesting
* AC. Otherwise re-enable Tx bundling for them
*/
if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
target->tx_bndl_mask &= ~txb_mask;
else
target->tx_bndl_mask |= txb_mask;
}
ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n", ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
n_scat); n_scat);
...@@ -806,6 +840,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target, ...@@ -806,6 +840,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
struct htc_packet *packet; struct htc_packet *packet;
int bundle_sent; int bundle_sent;
int n_pkts_bundle; int n_pkts_bundle;
u8 ac = WMM_NUM_AC;
spin_lock_bh(&target->tx_lock); spin_lock_bh(&target->tx_lock);
...@@ -823,6 +858,10 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target, ...@@ -823,6 +858,10 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
*/ */
INIT_LIST_HEAD(&txq); INIT_LIST_HEAD(&txq);
if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
(WMI_CONTROL_SVC != endpoint->svc_id))
ac = target->dev->ar->ep2ac_map[endpoint->eid];
while (true) { while (true) {
if (list_empty(&endpoint->txq)) if (list_empty(&endpoint->txq))
...@@ -840,16 +879,19 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target, ...@@ -840,16 +879,19 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
while (true) { while (true) {
/* try to send a bundle on each pass */ /* try to send a bundle on each pass */
if ((target->tx_bndl_enable) && if ((target->tx_bndl_mask) &&
(get_queue_depth(&txq) >= (get_queue_depth(&txq) >=
HTC_MIN_HTC_MSGS_TO_BUNDLE)) { HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
int temp1 = 0, temp2 = 0; int temp1 = 0, temp2 = 0;
/* check if bundling is enabled for an AC */
if (target->tx_bndl_mask & (1 << ac)) {
ath6kl_htc_tx_bundle(endpoint, &txq, ath6kl_htc_tx_bundle(endpoint, &txq,
&temp1, &temp2); &temp1, &temp2);
bundle_sent += temp1; bundle_sent += temp1;
n_pkts_bundle += temp2; n_pkts_bundle += temp2;
} }
}
if (list_empty(&txq)) if (list_empty(&txq))
break; break;
...@@ -867,6 +909,26 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target, ...@@ -867,6 +909,26 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
endpoint->ep_st.tx_bundles += bundle_sent; endpoint->ep_st.tx_bundles += bundle_sent;
endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle; endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
/*
* if an AC has bundling disabled and no tx bundling
* has occured continously for a certain number of TX,
* enable tx bundling for this AC
*/
if (!bundle_sent) {
if (!(target->tx_bndl_mask & (1 << ac)) &&
(ac < WMM_NUM_AC)) {
if (++target->ac_tx_count[ac] >=
TX_RESUME_BUNDLE_THRESHOLD) {
target->ac_tx_count[ac] = 0;
target->tx_bndl_mask |= (1 << ac);
}
}
} else {
/* tx bundling will reset the counter */
if (ac < WMM_NUM_AC)
target->ac_tx_count[ac] = 0;
}
} }
endpoint->tx_proc_cnt = 0; endpoint->tx_proc_cnt = 0;
...@@ -1609,8 +1671,8 @@ static int htc_parse_trailer(struct htc_target *target, ...@@ -1609,8 +1671,8 @@ static int htc_parse_trailer(struct htc_target *target,
} }
lk_ahd = (struct htc_lookahead_report *) record_buf; lk_ahd = (struct htc_lookahead_report *) record_buf;
if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
&& next_lk_ahds) { next_lk_ahds) {
ath6kl_dbg(ATH6KL_DBG_HTC, ath6kl_dbg(ATH6KL_DBG_HTC,
"htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n", "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
...@@ -2309,7 +2371,21 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target) ...@@ -2309,7 +2371,21 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target)
"htc rx flush pkt 0x%p len %d ep %d\n", "htc rx flush pkt 0x%p len %d ep %d\n",
packet, packet->buf_len, packet, packet->buf_len,
packet->endpoint); packet->endpoint);
/*
* packets in rx_bufq of endpoint 0 have originally
* been queued from target->free_ctrl_rxbuf where
* packet and packet->buf_start are allocated
* separately using kmalloc(). For other endpoint
* rx_bufq, it is allocated as skb where packet is
* skb->head. Take care of this difference while freeing
* the memory.
*/
if (packet->endpoint == ENDPOINT_0) {
kfree(packet->buf_start);
kfree(packet);
} else {
dev_kfree_skb(packet->pkt_cntxt); dev_kfree_skb(packet->pkt_cntxt);
}
spin_lock_bh(&target->rx_lock); spin_lock_bh(&target->rx_lock);
} }
spin_unlock_bh(&target->rx_lock); spin_unlock_bh(&target->rx_lock);
...@@ -2328,6 +2404,7 @@ int ath6kl_htc_conn_service(struct htc_target *target, ...@@ -2328,6 +2404,7 @@ int ath6kl_htc_conn_service(struct htc_target *target,
enum htc_endpoint_id assigned_ep = ENDPOINT_MAX; enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
unsigned int max_msg_sz = 0; unsigned int max_msg_sz = 0;
int status = 0; int status = 0;
u16 msg_id;
ath6kl_dbg(ATH6KL_DBG_HTC, ath6kl_dbg(ATH6KL_DBG_HTC,
"htc connect service target 0x%p service id 0x%x\n", "htc connect service target 0x%p service id 0x%x\n",
...@@ -2371,9 +2448,10 @@ int ath6kl_htc_conn_service(struct htc_target *target, ...@@ -2371,9 +2448,10 @@ int ath6kl_htc_conn_service(struct htc_target *target,
} }
resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf; resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
msg_id = le16_to_cpu(resp_msg->msg_id);
if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID) if ((msg_id != HTC_MSG_CONN_SVC_RESP_ID) ||
|| (rx_pkt->act_len < sizeof(*resp_msg))) { (rx_pkt->act_len < sizeof(*resp_msg))) {
status = -ENOMEM; status = -ENOMEM;
goto fail_tx; goto fail_tx;
} }
...@@ -2420,6 +2498,15 @@ int ath6kl_htc_conn_service(struct htc_target *target, ...@@ -2420,6 +2498,15 @@ int ath6kl_htc_conn_service(struct htc_target *target,
endpoint->cred_dist.endpoint = assigned_ep; endpoint->cred_dist.endpoint = assigned_ep;
endpoint->cred_dist.cred_sz = target->tgt_cred_sz; endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
switch (endpoint->svc_id) {
case WMI_DATA_BK_SVC:
endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3;
break;
default:
endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
break;
}
if (conn_req->max_rxmsg_sz) { if (conn_req->max_rxmsg_sz) {
/* /*
* Override cred_per_msg calculation, this optimizes * Override cred_per_msg calculation, this optimizes
...@@ -2517,7 +2604,8 @@ static void htc_setup_msg_bndl(struct htc_target *target) ...@@ -2517,7 +2604,8 @@ static void htc_setup_msg_bndl(struct htc_target *target)
target->max_rx_bndl_sz, target->max_tx_bndl_sz); target->max_rx_bndl_sz, target->max_tx_bndl_sz);
if (target->max_tx_bndl_sz) if (target->max_tx_bndl_sz)
target->tx_bndl_enable = true; /* tx_bndl_mask is enabled per AC, each has 1 bit */
target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1;
if (target->max_rx_bndl_sz) if (target->max_rx_bndl_sz)
target->rx_bndl_enable = true; target->rx_bndl_enable = true;
...@@ -2532,7 +2620,7 @@ static void htc_setup_msg_bndl(struct htc_target *target) ...@@ -2532,7 +2620,7 @@ static void htc_setup_msg_bndl(struct htc_target *target)
* padding will spill into the next credit buffer * padding will spill into the next credit buffer
* which is fatal. * which is fatal.
*/ */
target->tx_bndl_enable = false; target->tx_bndl_mask = 0;
} }
} }
......
/* /*
* Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
...@@ -87,6 +88,8 @@ ...@@ -87,6 +88,8 @@
#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4) #define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
#define WMI_MAX_SERVICES 5 #define WMI_MAX_SERVICES 5
#define WMM_NUM_AC 4
/* reserved and used to flush ALL packets */ /* reserved and used to flush ALL packets */
#define HTC_TX_PACKET_TAG_ALL 0 #define HTC_TX_PACKET_TAG_ALL 0
#define HTC_SERVICE_TX_PACKET_TAG 1 #define HTC_SERVICE_TX_PACKET_TAG 1
...@@ -498,6 +501,7 @@ struct htc_endpoint { ...@@ -498,6 +501,7 @@ struct htc_endpoint {
u8 seqno; u8 seqno;
u32 conn_flags; u32 conn_flags;
struct htc_endpoint_stats ep_st; struct htc_endpoint_stats ep_st;
u16 tx_drop_packet_threshold;
}; };
struct htc_control_buffer { struct htc_control_buffer {
...@@ -519,9 +523,16 @@ struct htc_target { ...@@ -519,9 +523,16 @@ struct htc_target {
struct ath6kl_htc_credit_info *credit_info; struct ath6kl_htc_credit_info *credit_info;
int tgt_creds; int tgt_creds;
unsigned int tgt_cred_sz; unsigned int tgt_cred_sz;
/* protects free_ctrl_txbuf and free_ctrl_rxbuf */
spinlock_t htc_lock; spinlock_t htc_lock;
/* FIXME: does this protext rx_bufq and endpoint structures or what? */
spinlock_t rx_lock; spinlock_t rx_lock;
/* protects endpoint->txq */
spinlock_t tx_lock; spinlock_t tx_lock;
struct ath6kl_device *dev; struct ath6kl_device *dev;
u32 htc_flags; u32 htc_flags;
u32 rx_st_flags; u32 rx_st_flags;
...@@ -531,7 +542,7 @@ struct htc_target { ...@@ -531,7 +542,7 @@ struct htc_target {
/* max messages per bundle for HTC */ /* max messages per bundle for HTC */
int msg_per_bndl_max; int msg_per_bndl_max;
bool tx_bndl_enable; u32 tx_bndl_mask;
int rx_bndl_enable; int rx_bndl_enable;
int max_rx_bndl_sz; int max_rx_bndl_sz;
int max_tx_bndl_sz; int max_tx_bndl_sz;
...@@ -543,6 +554,9 @@ struct htc_target { ...@@ -543,6 +554,9 @@ struct htc_target {
int max_xfer_szper_scatreq; int max_xfer_szper_scatreq;
int chk_irq_status_cnt; int chk_irq_status_cnt;
/* counts the number of Tx without bundling continously per AC */
u32 ac_tx_count[WMM_NUM_AC];
}; };
void *ath6kl_htc_create(struct ath6kl *ar); void *ath6kl_htc_create(struct ath6kl *ar);
......
This diff is collapsed.
/* /*
* Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
...@@ -80,11 +81,21 @@ static void ath6kl_add_new_sta(struct ath6kl_vif *vif, u8 *mac, u16 aid, ...@@ -80,11 +81,21 @@ static void ath6kl_add_new_sta(struct ath6kl_vif *vif, u8 *mac, u16 aid,
static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i) static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
{ {
struct ath6kl_sta *sta = &ar->sta_list[i]; struct ath6kl_sta *sta = &ar->sta_list[i];
struct ath6kl_mgmt_buff *entry, *tmp;
/* empty the queued pkts in the PS queue if any */ /* empty the queued pkts in the PS queue if any */
spin_lock_bh(&sta->psq_lock); spin_lock_bh(&sta->psq_lock);
skb_queue_purge(&sta->psq); skb_queue_purge(&sta->psq);
skb_queue_purge(&sta->apsdq); skb_queue_purge(&sta->apsdq);
if (sta->mgmt_psq_len != 0) {
list_for_each_entry_safe(entry, tmp, &sta->mgmt_psq, list) {
kfree(entry);
}
INIT_LIST_HEAD(&sta->mgmt_psq);
sta->mgmt_psq_len = 0;
}
spin_unlock_bh(&sta->psq_lock); spin_unlock_bh(&sta->psq_lock);
memset(&ar->ap_stats.sta[sta->aid - 1], 0, memset(&ar->ap_stats.sta[sta->aid - 1], 0,
...@@ -588,11 +599,9 @@ void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid, ...@@ -588,11 +599,9 @@ void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
memcpy(vif->bssid, bssid, sizeof(vif->bssid)); memcpy(vif->bssid, bssid, sizeof(vif->bssid));
vif->bss_ch = channel; vif->bss_ch = channel;
if ((vif->nw_type == INFRA_NETWORK)) { if ((vif->nw_type == INFRA_NETWORK))
ar->listen_intvl_b = listen_int;
ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
0, ar->listen_intvl_b); vif->listen_intvl_t, 0);
}
netif_wake_queue(vif->ndev); netif_wake_queue(vif->ndev);
...@@ -810,6 +819,7 @@ void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid) ...@@ -810,6 +819,7 @@ void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
struct sk_buff *skb; struct sk_buff *skb;
bool psq_empty = false; bool psq_empty = false;
struct ath6kl *ar = vif->ar; struct ath6kl *ar = vif->ar;
struct ath6kl_mgmt_buff *mgmt_buf;
conn = ath6kl_find_sta_by_aid(ar, aid); conn = ath6kl_find_sta_by_aid(ar, aid);
...@@ -820,7 +830,7 @@ void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid) ...@@ -820,7 +830,7 @@ void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
* becomes empty update the PVB for this station. * becomes empty update the PVB for this station.
*/ */
spin_lock_bh(&conn->psq_lock); spin_lock_bh(&conn->psq_lock);
psq_empty = skb_queue_empty(&conn->psq); psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0);
spin_unlock_bh(&conn->psq_lock); spin_unlock_bh(&conn->psq_lock);
if (psq_empty) if (psq_empty)
...@@ -828,15 +838,31 @@ void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid) ...@@ -828,15 +838,31 @@ void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
return; return;
spin_lock_bh(&conn->psq_lock); spin_lock_bh(&conn->psq_lock);
if (conn->mgmt_psq_len > 0) {
mgmt_buf = list_first_entry(&conn->mgmt_psq,
struct ath6kl_mgmt_buff, list);
list_del(&mgmt_buf->list);
conn->mgmt_psq_len--;
spin_unlock_bh(&conn->psq_lock);
conn->sta_flags |= STA_PS_POLLED;
ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx,
mgmt_buf->id, mgmt_buf->freq,
mgmt_buf->wait, mgmt_buf->buf,
mgmt_buf->len, mgmt_buf->no_cck);
conn->sta_flags &= ~STA_PS_POLLED;
kfree(mgmt_buf);
} else {
skb = skb_dequeue(&conn->psq); skb = skb_dequeue(&conn->psq);
spin_unlock_bh(&conn->psq_lock); spin_unlock_bh(&conn->psq_lock);
conn->sta_flags |= STA_PS_POLLED; conn->sta_flags |= STA_PS_POLLED;
ath6kl_data_tx(skb, vif->ndev); ath6kl_data_tx(skb, vif->ndev);
conn->sta_flags &= ~STA_PS_POLLED; conn->sta_flags &= ~STA_PS_POLLED;
}
spin_lock_bh(&conn->psq_lock); spin_lock_bh(&conn->psq_lock);
psq_empty = skb_queue_empty(&conn->psq); psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0);
spin_unlock_bh(&conn->psq_lock); spin_unlock_bh(&conn->psq_lock);
if (psq_empty) if (psq_empty)
...@@ -944,8 +970,8 @@ void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid, ...@@ -944,8 +970,8 @@ void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
set_bit(CONNECT_PEND, &vif->flags); set_bit(CONNECT_PEND, &vif->flags);
if (((reason == ASSOC_FAILED) && if (((reason == ASSOC_FAILED) &&
(prot_reason_status == 0x11)) || (prot_reason_status == 0x11)) ||
((reason == ASSOC_FAILED) && (prot_reason_status == 0x0) ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0) &&
&& (vif->reconnect_flag == 1))) { (vif->reconnect_flag == 1))) {
set_bit(CONNECTED, &vif->flags); set_bit(CONNECTED, &vif->flags);
return; return;
} }
...@@ -1184,5 +1210,7 @@ void init_netdev(struct net_device *dev) ...@@ -1184,5 +1210,7 @@ void init_netdev(struct net_device *dev)
sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH
+ WMI_MAX_TX_META_SZ + ATH6KL_HTC_ALIGN_BYTES; + WMI_MAX_TX_META_SZ + ATH6KL_HTC_ALIGN_BYTES;
dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
return; return;
} }
/* /*
* Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
...@@ -31,6 +32,7 @@ ...@@ -31,6 +32,7 @@
struct ath6kl_sdio { struct ath6kl_sdio {
struct sdio_func *func; struct sdio_func *func;
/* protects access to bus_req_freeq */
spinlock_t lock; spinlock_t lock;
/* free list */ /* free list */
...@@ -49,16 +51,20 @@ struct ath6kl_sdio { ...@@ -49,16 +51,20 @@ struct ath6kl_sdio {
/* scatter request list head */ /* scatter request list head */
struct list_head scat_req; struct list_head scat_req;
/* Avoids disabling irq while the interrupts being handled */ atomic_t irq_handling;
struct mutex mtx_irq; wait_queue_head_t irq_wq;
/* protects access to scat_req */
spinlock_t scat_lock; spinlock_t scat_lock;
bool scatter_enabled; bool scatter_enabled;
bool is_disabled; bool is_disabled;
const struct sdio_device_id *id; const struct sdio_device_id *id;
struct work_struct wr_async_work; struct work_struct wr_async_work;
struct list_head wr_asyncq; struct list_head wr_asyncq;
/* protects access to wr_asyncq */
spinlock_t wr_async_lock; spinlock_t wr_async_lock;
}; };
...@@ -404,7 +410,10 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, ...@@ -404,7 +410,10 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
return -ENOMEM; return -ENOMEM;
mutex_lock(&ar_sdio->dma_buffer_mutex); mutex_lock(&ar_sdio->dma_buffer_mutex);
tbuf = ar_sdio->dma_buffer; tbuf = ar_sdio->dma_buffer;
if (request & HIF_WRITE)
memcpy(tbuf, buf, len); memcpy(tbuf, buf, len);
bounced = true; bounced = true;
} else } else
tbuf = buf; tbuf = buf;
...@@ -462,7 +471,7 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func) ...@@ -462,7 +471,7 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n"); ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
ar_sdio = sdio_get_drvdata(func); ar_sdio = sdio_get_drvdata(func);
mutex_lock(&ar_sdio->mtx_irq); atomic_set(&ar_sdio->irq_handling, 1);
/* /*
* Release the host during interrups so we can pick it back up when * Release the host during interrups so we can pick it back up when
* we process commands. * we process commands.
...@@ -471,7 +480,10 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func) ...@@ -471,7 +480,10 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
status = ath6kl_hif_intr_bh_handler(ar_sdio->ar); status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
sdio_claim_host(ar_sdio->func); sdio_claim_host(ar_sdio->func);
mutex_unlock(&ar_sdio->mtx_irq);
atomic_set(&ar_sdio->irq_handling, 0);
wake_up(&ar_sdio->irq_wq);
WARN_ON(status && status != -ECANCELED); WARN_ON(status && status != -ECANCELED);
} }
...@@ -572,6 +584,13 @@ static void ath6kl_sdio_irq_enable(struct ath6kl *ar) ...@@ -572,6 +584,13 @@ static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
sdio_release_host(ar_sdio->func); sdio_release_host(ar_sdio->func);
} }
static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
return !atomic_read(&ar_sdio->irq_handling);
}
static void ath6kl_sdio_irq_disable(struct ath6kl *ar) static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
{ {
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
...@@ -579,14 +598,21 @@ static void ath6kl_sdio_irq_disable(struct ath6kl *ar) ...@@ -579,14 +598,21 @@ static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
sdio_claim_host(ar_sdio->func); sdio_claim_host(ar_sdio->func);
mutex_lock(&ar_sdio->mtx_irq); if (atomic_read(&ar_sdio->irq_handling)) {
sdio_release_host(ar_sdio->func);
ret = wait_event_interruptible(ar_sdio->irq_wq,
ath6kl_sdio_is_on_irq(ar));
if (ret)
return;
sdio_claim_host(ar_sdio->func);
}
ret = sdio_release_irq(ar_sdio->func); ret = sdio_release_irq(ar_sdio->func);
if (ret) if (ret)
ath6kl_err("Failed to release sdio irq: %d\n", ret); ath6kl_err("Failed to release sdio irq: %d\n", ret);
mutex_unlock(&ar_sdio->mtx_irq);
sdio_release_host(ar_sdio->func); sdio_release_host(ar_sdio->func);
} }
...@@ -601,6 +627,8 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar) ...@@ -601,6 +627,8 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
node = list_first_entry(&ar_sdio->scat_req, node = list_first_entry(&ar_sdio->scat_req,
struct hif_scatter_req, list); struct hif_scatter_req, list);
list_del(&node->list); list_del(&node->list);
node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req);
} }
spin_unlock_bh(&ar_sdio->scat_lock); spin_unlock_bh(&ar_sdio->scat_lock);
...@@ -813,6 +841,7 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) ...@@ -813,6 +841,7 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct sdio_func *func = ar_sdio->func; struct sdio_func *func = ar_sdio->func;
mmc_pm_flag_t flags; mmc_pm_flag_t flags;
bool try_deepsleep = false;
int ret; int ret;
if (ar->state == ATH6KL_STATE_SCHED_SCAN) { if (ar->state == ATH6KL_STATE_SCHED_SCAN) {
...@@ -839,14 +868,22 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) ...@@ -839,14 +868,22 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
goto cut_pwr; goto cut_pwr;
ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow); ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
if (ret) if (ret && ret != -ENOTCONN)
ath6kl_err("wow suspend failed: %d\n", ret);
if (ret &&
(!ar->wow_suspend_mode ||
ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP))
try_deepsleep = true;
else if (ret &&
ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR)
goto cut_pwr; goto cut_pwr;
if (!ret)
return 0; return 0;
} }
if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP || if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
!ar->suspend_mode) { !ar->suspend_mode || try_deepsleep) {
flags = sdio_get_host_pm_caps(func); flags = sdio_get_host_pm_caps(func);
if (!(flags & MMC_PM_KEEP_POWER)) if (!(flags & MMC_PM_KEEP_POWER))
...@@ -901,8 +938,15 @@ static int ath6kl_sdio_resume(struct ath6kl *ar) ...@@ -901,8 +938,15 @@ static int ath6kl_sdio_resume(struct ath6kl *ar)
case ATH6KL_STATE_WOW: case ATH6KL_STATE_WOW:
break; break;
case ATH6KL_STATE_SCHED_SCAN: case ATH6KL_STATE_SCHED_SCAN:
break; break;
case ATH6KL_STATE_SUSPENDING:
break;
case ATH6KL_STATE_RESUMING:
break;
} }
ath6kl_cfg80211_resume(ar); ath6kl_cfg80211_resume(ar);
...@@ -1285,7 +1329,6 @@ static int ath6kl_sdio_probe(struct sdio_func *func, ...@@ -1285,7 +1329,6 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
spin_lock_init(&ar_sdio->scat_lock); spin_lock_init(&ar_sdio->scat_lock);
spin_lock_init(&ar_sdio->wr_async_lock); spin_lock_init(&ar_sdio->wr_async_lock);
mutex_init(&ar_sdio->dma_buffer_mutex); mutex_init(&ar_sdio->dma_buffer_mutex);
mutex_init(&ar_sdio->mtx_irq);
INIT_LIST_HEAD(&ar_sdio->scat_req); INIT_LIST_HEAD(&ar_sdio->scat_req);
INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
...@@ -1293,6 +1336,8 @@ static int ath6kl_sdio_probe(struct sdio_func *func, ...@@ -1293,6 +1336,8 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work); INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
init_waitqueue_head(&ar_sdio->irq_wq);
for (count = 0; count < BUS_REQUEST_MAX_NUM; count++) for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]); ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
......
/* /*
* Copyright (c) 2004-2010 Atheros Communications Inc. * Copyright (c) 2004-2010 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
...@@ -19,6 +20,7 @@ ...@@ -19,6 +20,7 @@
#define AR6003_BOARD_DATA_SZ 1024 #define AR6003_BOARD_DATA_SZ 1024
#define AR6003_BOARD_EXT_DATA_SZ 768 #define AR6003_BOARD_EXT_DATA_SZ 768
#define AR6003_BOARD_EXT_DATA_SZ_V2 1024
#define AR6004_BOARD_DATA_SZ 6144 #define AR6004_BOARD_DATA_SZ 6144
#define AR6004_BOARD_EXT_DATA_SZ 0 #define AR6004_BOARD_EXT_DATA_SZ 0
......
/* /*
* Copyright (c) 2010-2011 Atheros Communications Inc. * Copyright (c) 2010-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
......
/* /*
* Copyright (c) 2010-2011 Atheros Communications Inc. * Copyright (c) 2010-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
......
/* /*
* Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
...@@ -284,6 +285,9 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb, ...@@ -284,6 +285,9 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
int status = 0; int status = 0;
struct ath6kl_cookie *cookie = NULL; struct ath6kl_cookie *cookie = NULL;
if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW))
return -EACCES;
spin_lock_bh(&ar->lock); spin_lock_bh(&ar->lock);
ath6kl_dbg(ATH6KL_DBG_WLAN_TX, ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
...@@ -359,6 +363,11 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) ...@@ -359,6 +363,11 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
return 0; return 0;
} }
if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON)) {
dev_kfree_skb(skb);
return 0;
}
if (!test_bit(WMI_READY, &ar->flag)) if (!test_bit(WMI_READY, &ar->flag))
goto fail_tx; goto fail_tx;
...@@ -593,7 +602,8 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, ...@@ -593,7 +602,8 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
*/ */
if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] < if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
ar->hiac_stream_active_pri && ar->hiac_stream_active_pri &&
ar->cookie_count <= MAX_HI_COOKIE_NUM) ar->cookie_count <=
target->endpoint[endpoint].tx_drop_packet_threshold)
/* /*
* Give preference to the highest priority stream by * Give preference to the highest priority stream by
* dropping the packets which overflowed. * dropping the packets which overflowed.
...@@ -1296,7 +1306,15 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) ...@@ -1296,7 +1306,15 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
skb_put(skb, packet->act_len + HTC_HDR_LENGTH); skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
skb_pull(skb, HTC_HDR_LENGTH); skb_pull(skb, HTC_HDR_LENGTH);
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
skb->data, skb->len);
if (ept == ar->ctrl_ep) { if (ept == ar->ctrl_ep) {
if (test_bit(WMI_ENABLED, &ar->flag)) {
ath6kl_check_wow_status(ar);
ath6kl_wmi_control_rx(ar->wmi, skb);
return;
}
if_idx = if_idx =
wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data); wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
} else { } else {
...@@ -1321,10 +1339,6 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) ...@@ -1321,10 +1339,6 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
spin_unlock_bh(&vif->if_lock); spin_unlock_bh(&vif->if_lock);
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
skb->data, skb->len);
skb->dev = vif->ndev; skb->dev = vif->ndev;
if (!test_bit(WMI_ENABLED, &ar->flag)) { if (!test_bit(WMI_ENABLED, &ar->flag)) {
...@@ -1336,11 +1350,6 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) ...@@ -1336,11 +1350,6 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
ath6kl_check_wow_status(ar); ath6kl_check_wow_status(ar);
if (ept == ar->ctrl_ep) {
ath6kl_wmi_control_rx(ar->wmi, skb);
return;
}
min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) + min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
sizeof(struct ath6kl_llc_snap_hdr); sizeof(struct ath6kl_llc_snap_hdr);
...@@ -1416,8 +1425,33 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) ...@@ -1416,8 +1425,33 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
if (!(conn->sta_flags & STA_PS_SLEEP)) { if (!(conn->sta_flags & STA_PS_SLEEP)) {
struct sk_buff *skbuff = NULL; struct sk_buff *skbuff = NULL;
bool is_apsdq_empty; bool is_apsdq_empty;
struct ath6kl_mgmt_buff *mgmt;
u8 idx;
spin_lock_bh(&conn->psq_lock); spin_lock_bh(&conn->psq_lock);
while (conn->mgmt_psq_len > 0) {
mgmt = list_first_entry(
&conn->mgmt_psq,
struct ath6kl_mgmt_buff,
list);
list_del(&mgmt->list);
conn->mgmt_psq_len--;
spin_unlock_bh(&conn->psq_lock);
idx = vif->fw_vif_idx;
ath6kl_wmi_send_mgmt_cmd(ar->wmi,
idx,
mgmt->id,
mgmt->freq,
mgmt->wait,
mgmt->buf,
mgmt->len,
mgmt->no_cck);
kfree(mgmt);
spin_lock_bh(&conn->psq_lock);
}
conn->mgmt_psq_len = 0;
while ((skbuff = skb_dequeue(&conn->psq))) { while ((skbuff = skb_dequeue(&conn->psq))) {
spin_unlock_bh(&conn->psq_lock); spin_unlock_bh(&conn->psq_lock);
ath6kl_data_tx(skbuff, vif->ndev); ath6kl_data_tx(skbuff, vif->ndev);
......
/* /*
* Copyright (c) 2007-2011 Atheros Communications Inc. * Copyright (c) 2007-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
......
This diff is collapsed.
/* /*
* Copyright (c) 2010-2011 Atheros Communications Inc. * Copyright (c) 2010-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
...@@ -110,6 +111,8 @@ struct wmi { ...@@ -110,6 +111,8 @@ struct wmi {
u8 fat_pipe_exist; u8 fat_pipe_exist;
struct ath6kl *parent_dev; struct ath6kl *parent_dev;
u8 pwr_mode; u8 pwr_mode;
/* protects fat_pipe_exist and stream_exist_for_ac */
spinlock_t lock; spinlock_t lock;
enum htc_endpoint_id ep_id; enum htc_endpoint_id ep_id;
struct sq_threshold_params struct sq_threshold_params
...@@ -997,6 +1000,12 @@ struct wmi_listen_int_cmd { ...@@ -997,6 +1000,12 @@ struct wmi_listen_int_cmd {
__le16 num_beacons; __le16 num_beacons;
} __packed; } __packed;
/* WMI_SET_BMISS_TIME_CMDID */
struct wmi_bmiss_time_cmd {
__le16 bmiss_time;
__le16 num_beacons;
};
/* WMI_SET_POWER_MODE_CMDID */ /* WMI_SET_POWER_MODE_CMDID */
enum wmi_power_mode { enum wmi_power_mode {
REC_POWER = 0x01, REC_POWER = 0x01,
...@@ -1014,7 +1023,7 @@ struct wmi_power_mode_cmd { ...@@ -1014,7 +1023,7 @@ struct wmi_power_mode_cmd {
*/ */
enum power_save_fail_event_policy { enum power_save_fail_event_policy {
SEND_POWER_SAVE_FAIL_EVENT_ALWAYS = 1, SEND_POWER_SAVE_FAIL_EVENT_ALWAYS = 1,
IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN = 2, IGNORE_PS_FAIL_DURING_SCAN = 2,
}; };
struct wmi_power_params_cmd { struct wmi_power_params_cmd {
...@@ -1212,7 +1221,7 @@ struct wmi_snr_threshold_params_cmd { ...@@ -1212,7 +1221,7 @@ struct wmi_snr_threshold_params_cmd {
enum wmi_preamble_policy { enum wmi_preamble_policy {
WMI_IGNORE_BARKER_IN_ERP = 0, WMI_IGNORE_BARKER_IN_ERP = 0,
WMI_DONOT_IGNORE_BARKER_IN_ERP WMI_FOLLOW_BARKER_IN_ERP,
}; };
struct wmi_set_lpreamble_cmd { struct wmi_set_lpreamble_cmd {
...@@ -2128,6 +2137,10 @@ struct wmi_rx_frame_format_cmd { ...@@ -2128,6 +2137,10 @@ struct wmi_rx_frame_format_cmd {
u8 reserved[1]; u8 reserved[1];
} __packed; } __packed;
struct wmi_ap_hidden_ssid_cmd {
u8 hidden_ssid;
} __packed;
/* AP mode events */ /* AP mode events */
struct wmi_ap_set_apsd_cmd { struct wmi_ap_set_apsd_cmd {
u8 enable; u8 enable;
...@@ -2413,6 +2426,8 @@ int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag, ...@@ -2413,6 +2426,8 @@ int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx, int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
u16 listen_interval, u16 listen_interval,
u16 listen_beacons); u16 listen_beacons);
int ath6kl_wmi_bmisstime_cmd(struct wmi *wmi, u8 if_idx,
u16 bmiss_time, u16 num_beacons);
int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode); int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode);
int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period, int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
u16 ps_poll_num, u16 dtim_policy, u16 ps_poll_num, u16 dtim_policy,
...@@ -2484,6 +2499,7 @@ u8 ath6kl_wmi_get_traffic_class(u8 user_priority); ...@@ -2484,6 +2499,7 @@ u8 ath6kl_wmi_get_traffic_class(u8 user_priority);
u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri); u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri);
/* AP mode */ /* AP mode */
int ath6kl_wmi_ap_hidden_ssid(struct wmi *wmi, u8 if_idx, bool enable);
int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx, int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
struct wmi_connect_cmd *p); struct wmi_connect_cmd *p);
...@@ -2505,9 +2521,6 @@ int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable); ...@@ -2505,9 +2521,6 @@ int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
u32 dur); u32 dur);
int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
u32 wait, const u8 *data, u16 data_len);
int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq, int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
u32 wait, const u8 *data, u16 data_len, u32 wait, const u8 *data, u16 data_len,
u32 no_cck); u32 no_cck);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment