Commit 4f9b2a7d authored by John W. Linville's avatar John W. Linville
parents f3f66b69 7a4a77b7
......@@ -97,7 +97,7 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
int ret;
u16 val;
u32 cksum, offset;
u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
/*
* Read values from EEPROM and store them in the capability structure
......@@ -116,12 +116,38 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
* Validate the checksum of the EEPROM date. There are some
* devices with invalid EEPROMs.
*/
for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) {
AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_UPPER, val);
if (val) {
eep_max = (val & AR5K_EEPROM_SIZE_UPPER_MASK) <<
AR5K_EEPROM_SIZE_ENDLOC_SHIFT;
AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_LOWER, val);
eep_max = (eep_max | val) - AR5K_EEPROM_INFO_BASE;
/*
* Fail safe check to prevent stupid loops due
* to busted EEPROMs. XXX: This value is likely too
* big still, waiting on a better value.
*/
if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) {
ATH5K_ERR(ah->ah_sc, "Invalid max custom EEPROM size: "
"%d (0x%04x) max expected: %d (0x%04x)\n",
eep_max, eep_max,
3 * AR5K_EEPROM_INFO_MAX,
3 * AR5K_EEPROM_INFO_MAX);
return -EIO;
}
}
for (cksum = 0, offset = 0; offset < eep_max; offset++) {
AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
cksum ^= val;
}
if (cksum != AR5K_EEPROM_INFO_CKSUM) {
ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum);
ATH5K_ERR(ah->ah_sc, "Invalid EEPROM "
"checksum: 0x%04x eep_max: 0x%04x (%s)\n",
cksum, eep_max,
eep_max == AR5K_EEPROM_INFO_MAX ?
"default size" : "custom size");
return -EIO;
}
......
......@@ -37,6 +37,14 @@
#define AR5K_EEPROM_RFKILL_POLARITY_S 1
#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
/* FLASH(EEPROM) Defines for AR531X chips */
#define AR5K_EEPROM_SIZE_LOWER 0x1b /* size info -- lower */
#define AR5K_EEPROM_SIZE_UPPER 0x1c /* size info -- upper */
#define AR5K_EEPROM_SIZE_UPPER_MASK 0xfff0
#define AR5K_EEPROM_SIZE_UPPER_SHIFT 4
#define AR5K_EEPROM_SIZE_ENDLOC_SHIFT 12
#define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */
#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE)
......
......@@ -25,7 +25,7 @@ config ATH9K
config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
depends on ATH9K
depends on ATH9K && DEBUG_FS
---help---
Say Y, if you need access to ath9k's statistics for
interrupts, rate control, etc.
......
......@@ -33,11 +33,11 @@ struct ath_node;
/* Macro to expand scalars to 64-bit objects */
#define ito64(x) (sizeof(x) == 8) ? \
#define ito64(x) (sizeof(x) == 1) ? \
(((unsigned long long int)(x)) & (0xff)) : \
(sizeof(x) == 16) ? \
(sizeof(x) == 2) ? \
(((unsigned long long int)(x)) & 0xffff) : \
((sizeof(x) == 32) ? \
((sizeof(x) == 4) ? \
(((unsigned long long int)(x)) & 0xffffffff) : \
(unsigned long long int)(x))
......
......@@ -1961,7 +1961,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
struct ieee80211_tx_info *info;
struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
u32 status = le32_to_cpu(tx_resp->u.status);
int tid = MAX_TID_COUNT;
int tid = MAX_TID_COUNT - 1;
int sta_id;
int freed;
u8 *qc = NULL;
......
......@@ -3196,8 +3196,10 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
/* Clear unsupported feature flags */
*total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC;
if (mwl8k_fw_lock(hw))
if (mwl8k_fw_lock(hw)) {
kfree(cmd);
return;
}
if (priv->sniffer_enabled) {
mwl8k_cmd_enable_sniffer(hw, 0);
......
......@@ -339,7 +339,7 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 12);
rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
......
......@@ -118,6 +118,12 @@
#define ALIGN_SIZE(__skb, __header) \
( ((unsigned long)((__skb)->data + (__header))) & 3 )
/*
* Constants for extra TX headroom for alignment purposes.
*/
#define RT2X00_ALIGN_SIZE 4 /* Only whole frame needs alignment */
#define RT2X00_L2PAD_SIZE 8 /* Both header & payload need alignment */
/*
* Standard timing and size defines.
* These values should follow the ieee80211 specifications.
......
......@@ -688,7 +688,17 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Initialize extra TX headroom required.
*/
rt2x00dev->hw->extra_tx_headroom = rt2x00dev->ops->extra_tx_headroom;
rt2x00dev->hw->extra_tx_headroom =
max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM,
rt2x00dev->ops->extra_tx_headroom);
/*
* Take TX headroom required for alignment into account.
*/
if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE;
else if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE;
/*
* Register HW.
......
......@@ -104,7 +104,7 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
* is also mapped to the DMA so it can be used for transfering
* additional descriptor information to the hardware.
*/
skb_push(skb, rt2x00dev->hw->extra_tx_headroom);
skb_push(skb, rt2x00dev->ops->extra_tx_headroom);
skbdesc->skb_dma =
dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
......@@ -112,7 +112,7 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
/*
* Restore data pointer to original location again.
*/
skb_pull(skb, rt2x00dev->hw->extra_tx_headroom);
skb_pull(skb, rt2x00dev->ops->extra_tx_headroom);
skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
}
......@@ -134,7 +134,7 @@ void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
* by the driver, but it was actually mapped to DMA.
*/
dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
skb->len + rt2x00dev->hw->extra_tx_headroom,
skb->len + rt2x00dev->ops->extra_tx_headroom,
DMA_TO_DEVICE);
skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
}
......
......@@ -987,12 +987,13 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
changed_flags &= SUPPORTED_FIF_FLAGS;
*new_flags &= SUPPORTED_FIF_FLAGS;
/* changed_flags is always populated but this driver
* doesn't support all FIF flags so its possible we don't
* need to do anything */
if (!changed_flags)
return;
/*
* If multicast parameter (as returned by zd_op_prepare_multicast)
* has changed, no bit in changed_flags is set. To handle this
* situation, we do not return if changed_flags is 0. If we do so,
* we will have some issue with IPv6 which uses multicast for link
* layer address resolution.
*/
if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI))
zd_mc_add_all(&hash);
......
......@@ -15,12 +15,14 @@
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <net/mac80211.h>
#include <net/ieee80211_radiotap.h>
#include "ieee80211_i.h"
#include "sta_info.h"
#include "debugfs_netdev.h"
#include "mesh.h"
#include "led.h"
#include "driver-ops.h"
#include "wme.h"
/**
* DOC: Interface list locking
......@@ -658,6 +660,12 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
WARN_ON(flushed);
}
static u16 ieee80211_netdev_select_queue(struct net_device *dev,
struct sk_buff *skb)
{
return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
}
static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_open = ieee80211_open,
.ndo_stop = ieee80211_stop,
......@@ -666,8 +674,34 @@ static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_set_multicast_list = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
.ndo_set_mac_address = ieee80211_change_mac,
.ndo_select_queue = ieee80211_netdev_select_queue,
};
static u16 ieee80211_monitor_select_queue(struct net_device *dev,
struct sk_buff *skb)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
struct ieee80211_hdr *hdr;
struct ieee80211_radiotap_header *rtap = (void *)skb->data;
if (local->hw.queues < 4)
return 0;
if (skb->len < 4 ||
skb->len < rtap->it_len + 2 /* frame control */)
return 0; /* doesn't matter, frame will be dropped */
hdr = (void *)((u8 *)skb->data + rtap->it_len);
if (!ieee80211_is_data(hdr->frame_control)) {
skb->priority = 7;
return ieee802_1d_to_ac[skb->priority];
}
return ieee80211_downgrade_queue(local, skb);
}
static const struct net_device_ops ieee80211_monitorif_ops = {
.ndo_open = ieee80211_open,
.ndo_stop = ieee80211_stop,
......@@ -676,6 +710,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
.ndo_set_multicast_list = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_select_queue = ieee80211_monitor_select_queue,
};
static void ieee80211_if_setup(struct net_device *dev)
......@@ -782,8 +817,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ASSERT_RTNL();
ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size,
name, ieee80211_if_setup);
ndev = alloc_netdev_mq(sizeof(*sdata) + local->hw.vif_data_size,
name, ieee80211_if_setup, local->hw.queues);
if (!ndev)
return -ENOMEM;
dev_net_set(ndev, wiphy_net(local->hw.wiphy));
......
......@@ -1665,7 +1665,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
memset(info, 0, sizeof(*info));
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
info->control.vif = &rx->sdata->vif;
ieee80211_select_queue(local, fwd_skb);
skb_set_queue_mapping(skb,
ieee80211_select_queue(rx->sdata, fwd_skb));
ieee80211_set_qos_hdr(local, skb);
if (is_multicast_ether_addr(fwd_hdr->addr1))
IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
fwded_mcast);
......
......@@ -1511,7 +1511,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
return;
}
ieee80211_select_queue(local, skb);
ieee80211_set_qos_hdr(local, skb);
ieee80211_tx(sdata, skb, false);
rcu_read_unlock();
}
......@@ -2289,6 +2289,9 @@ void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
skb_set_network_header(skb, 0);
skb_set_transport_header(skb, 0);
/* send all internal mgmt frames on VO */
skb_set_queue_mapping(skb, 0);
/*
* The other path calling ieee80211_xmit is from the tasklet,
* and while we can handle concurrent transmissions locking
......
......@@ -268,6 +268,7 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
if (WARN_ON(queue >= hw->queues))
return;
......@@ -280,6 +281,11 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
if (!skb_queue_empty(&local->pending[queue]))
tasklet_schedule(&local->tx_pending_tasklet);
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list)
netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
rcu_read_unlock();
}
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
......@@ -304,11 +310,17 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
if (WARN_ON(queue >= hw->queues))
return;
__set_bit(reason, &local->queue_stop_reasons[queue]);
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list)
netif_tx_stop_queue(netdev_get_tx_queue(sdata->dev, queue));
rcu_read_unlock();
}
void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
......
......@@ -45,21 +45,68 @@ static int wme_downgrade_ac(struct sk_buff *skb)
/* Indicate which queue to use. */
static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_local *local = sdata->local;
struct sta_info *sta = NULL;
u32 sta_flags = 0;
const u8 *ra = NULL;
bool qos = false;
if (!ieee80211_is_data(hdr->frame_control)) {
/* management frames go on AC_VO queue, but are sent
* without QoS control fields */
return 0;
if (local->hw.queues < 4 || skb->len < 6) {
skb->priority = 0; /* required for correct WPA/11i MIC */
return min_t(u16, local->hw.queues - 1,
ieee802_1d_to_ac[skb->priority]);
}
if (0 /* injected */) {
/* use AC from radiotap */
rcu_read_lock();
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
rcu_read_lock();
sta = rcu_dereference(sdata->u.vlan.sta);
if (sta)
sta_flags = get_sta_flags(sta);
rcu_read_unlock();
if (sta)
break;
case NL80211_IFTYPE_AP:
ra = skb->data;
break;
case NL80211_IFTYPE_WDS:
ra = sdata->u.wds.remote_addr;
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
/*
* XXX: This is clearly broken ... but already was before,
* because ieee80211_fill_mesh_addresses() would clear A1
* except for multicast addresses.
*/
break;
#endif
case NL80211_IFTYPE_STATION:
ra = sdata->u.mgd.bssid;
break;
case NL80211_IFTYPE_ADHOC:
ra = skb->data;
break;
default:
break;
}
if (!ieee80211_is_data_qos(hdr->frame_control)) {
if (!sta && ra && !is_multicast_ether_addr(ra)) {
sta = sta_info_get(sdata, ra);
if (sta)
sta_flags = get_sta_flags(sta);
}
if (sta_flags & WLAN_STA_WME)
qos = true;
rcu_read_unlock();
if (!qos) {
skb->priority = 0; /* required for correct WPA/11i MIC */
return ieee802_1d_to_ac[skb->priority];
}
......@@ -68,6 +115,12 @@ static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
* data frame has */
skb->priority = cfg80211_classify8021d(skb);
return ieee80211_downgrade_queue(local, skb);
}
u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
struct sk_buff *skb)
{
/* in case we are a client verify acm is not set for this ac */
while (unlikely(local->wmm_acm & BIT(skb->priority))) {
if (wme_downgrade_ac(skb)) {
......@@ -85,24 +138,17 @@ static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
return ieee802_1d_to_ac[skb->priority];
}
void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb)
void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
u16 queue;
u8 tid;
struct ieee80211_hdr *hdr = (void *)skb->data;
queue = classify80211(local, skb);
if (unlikely(queue >= local->hw.queues))
queue = local->hw.queues - 1;
/*
* Now we know the 1d priority, fill in the QoS header if
* there is one (and we haven't done this before).
*/
/* Fill in the QoS header if there is one. */
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *p = ieee80211_get_qos_ctl(hdr);
u8 ack_policy = 0;
u8 ack_policy = 0, tid;
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
if (unlikely(local->wifi_wme_noack_test))
ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
QOS_CONTROL_ACK_POLICY_SHIFT;
......@@ -110,6 +156,4 @@ void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb)
*p++ = ack_policy | tid;
*p = 0;
}
skb_set_queue_mapping(skb, queue);
}
......@@ -20,7 +20,11 @@
extern const int ieee802_1d_to_ac[8];
void ieee80211_select_queue(struct ieee80211_local *local,
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb);
u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
struct sk_buff *skb);
#endif /* _WME_H */
......@@ -1690,7 +1690,7 @@ int regulatory_hint_user(const char *alpha2)
request->wiphy_idx = WIPHY_IDX_STALE;
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_USER,
request->initiator = NL80211_REGDOM_SET_BY_USER;
queue_regulatory_request(request);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment