Commit a622ab72 authored by Johannes Berg's avatar Johannes Berg Committed by John W. Linville

mac80211: use RCU for TX aggregation

Currently we allocate some memory for each TX
aggregation session and additionally keep a
state bitmap indicating the state it is in.
By using RCU to protect the pointer, moving
the state into the structure and some locking
trickery we can avoid locking when the TX agg
session is fully operational.
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent a87f736d
This diff is collapsed.
...@@ -134,15 +134,15 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, ...@@ -134,15 +134,15 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
sta->ampdu_mlme.tid_rx[i]->ssn : 0); sta->ampdu_mlme.tid_rx[i]->ssn : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
sta->ampdu_mlme.tid_state_tx[i]); !!sta->ampdu_mlme.tid_tx[i]);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
sta->ampdu_mlme.tid_state_tx[i] ? sta->ampdu_mlme.tid_tx[i] ?
sta->ampdu_mlme.tid_tx[i]->dialog_token : 0); sta->ampdu_mlme.tid_tx[i]->dialog_token : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
sta->ampdu_mlme.tid_state_tx[i] ? sta->ampdu_mlme.tid_tx[i] ?
sta->ampdu_mlme.tid_tx[i]->ssn : 0); sta->ampdu_mlme.tid_tx[i]->ssn : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d", p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d",
sta->ampdu_mlme.tid_state_tx[i] ? sta->ampdu_mlme.tid_tx[i] ?
skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0); skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\n"); p += scnprintf(p, sizeof(buf) + buf - p, "\n");
} }
......
...@@ -176,13 +176,8 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, ...@@ -176,13 +176,8 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
if (initiator == WLAN_BACK_INITIATOR) if (initiator == WLAN_BACK_INITIATOR)
__ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0); __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0);
else { /* WLAN_BACK_RECIPIENT */ else
spin_lock_bh(&sta->lock); __ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_RECIPIENT);
if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)
___ieee80211_stop_tx_ba_session(sta, tid,
WLAN_BACK_RECIPIENT);
spin_unlock_bh(&sta->lock);
}
} }
int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
......
...@@ -1119,8 +1119,6 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, ...@@ -1119,8 +1119,6 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_back_parties initiator); enum ieee80211_back_parties initiator);
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_back_parties initiator);
/* Spectrum management */ /* Spectrum management */
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
......
...@@ -365,7 +365,7 @@ minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, stru ...@@ -365,7 +365,7 @@ minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, stru
return; return;
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
if (likely(sta->ampdu_mlme.tid_state_tx[tid] != HT_AGG_STATE_IDLE)) if (likely(sta->ampdu_mlme.tid_tx[tid]))
return; return;
ieee80211_start_tx_ba_session(pubsta, tid); ieee80211_start_tx_ba_session(pubsta, tid);
......
...@@ -246,14 +246,12 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, ...@@ -246,14 +246,12 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
} }
for (i = 0; i < STA_TID_NUM; i++) { for (i = 0; i < STA_TID_NUM; i++) {
/* timer_to_tid must be initialized with identity mapping to /*
* enable session_timer's data differentiation. refer to * timer_to_tid must be initialized with identity mapping
* sta_rx_agg_session_timer_expired for useage */ * to enable session_timer's data differentiation. See
* sta_rx_agg_session_timer_expired for usage.
*/
sta->timer_to_tid[i] = i; sta->timer_to_tid[i] = i;
/* tx */
sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE;
sta->ampdu_mlme.tid_tx[i] = NULL;
sta->ampdu_mlme.addba_req_num[i] = 0;
} }
skb_queue_head_init(&sta->ps_tx_buf); skb_queue_head_init(&sta->ps_tx_buf);
skb_queue_head_init(&sta->tx_filtered); skb_queue_head_init(&sta->tx_filtered);
......
...@@ -61,33 +61,40 @@ enum ieee80211_sta_info_flags { ...@@ -61,33 +61,40 @@ enum ieee80211_sta_info_flags {
#define STA_TID_NUM 16 #define STA_TID_NUM 16
#define ADDBA_RESP_INTERVAL HZ #define ADDBA_RESP_INTERVAL HZ
#define HT_AGG_MAX_RETRIES (0x3) #define HT_AGG_MAX_RETRIES 0x3
#define HT_AGG_STATE_INITIATOR_SHIFT (4) #define HT_AGG_STATE_DRV_READY 0
#define HT_AGG_STATE_RESPONSE_RECEIVED 1
#define HT_ADDBA_REQUESTED_MSK BIT(0) #define HT_AGG_STATE_OPERATIONAL 2
#define HT_ADDBA_DRV_READY_MSK BIT(1) #define HT_AGG_STATE_STOPPING 3
#define HT_ADDBA_RECEIVED_MSK BIT(2)
#define HT_AGG_STATE_REQ_STOP_BA_MSK BIT(3)
#define HT_AGG_STATE_INITIATOR_MSK BIT(HT_AGG_STATE_INITIATOR_SHIFT)
#define HT_AGG_STATE_IDLE (0x0)
#define HT_AGG_STATE_OPERATIONAL (HT_ADDBA_REQUESTED_MSK | \
HT_ADDBA_DRV_READY_MSK | \
HT_ADDBA_RECEIVED_MSK)
/** /**
* struct tid_ampdu_tx - TID aggregation information (Tx). * struct tid_ampdu_tx - TID aggregation information (Tx).
* *
* @rcu_head: rcu head for freeing structure
* @addba_resp_timer: timer for peer's response to addba request * @addba_resp_timer: timer for peer's response to addba request
* @pending: pending frames queue -- use sta's spinlock to protect * @pending: pending frames queue -- use sta's spinlock to protect
* @ssn: Starting Sequence Number expected to be aggregated. * @ssn: Starting Sequence Number expected to be aggregated.
* @dialog_token: dialog token for aggregation session * @dialog_token: dialog token for aggregation session
* @state: session state (see above)
* @stop_initiator: initiator of a session stop
*
* This structure is protected by RCU and the per-station
* spinlock. Assignments to the array holding it must hold
* the spinlock, only the TX path can access it under RCU
* lock-free if, and only if, the state has the flag
* %HT_AGG_STATE_OPERATIONAL set. Otherwise, the TX path
* must also acquire the spinlock and re-check the state,
* see comments in the tx code touching it.
*/ */
struct tid_ampdu_tx { struct tid_ampdu_tx {
struct rcu_head rcu_head;
struct timer_list addba_resp_timer; struct timer_list addba_resp_timer;
struct sk_buff_head pending; struct sk_buff_head pending;
unsigned long state;
u16 ssn; u16 ssn;
u8 dialog_token; u8 dialog_token;
u8 stop_initiator;
}; };
/** /**
...@@ -129,7 +136,6 @@ struct tid_ampdu_rx { ...@@ -129,7 +136,6 @@ struct tid_ampdu_rx {
* struct sta_ampdu_mlme - STA aggregation information. * struct sta_ampdu_mlme - STA aggregation information.
* *
* @tid_rx: aggregation info for Rx per TID -- RCU protected * @tid_rx: aggregation info for Rx per TID -- RCU protected
* @tid_state_tx: TID's state in Tx session state machine.
* @tid_tx: aggregation info for Tx per TID * @tid_tx: aggregation info for Tx per TID
* @addba_req_num: number of times addBA request has been sent. * @addba_req_num: number of times addBA request has been sent.
* @dialog_token_allocator: dialog token enumerator for each new session; * @dialog_token_allocator: dialog token enumerator for each new session;
...@@ -138,7 +144,6 @@ struct sta_ampdu_mlme { ...@@ -138,7 +144,6 @@ struct sta_ampdu_mlme {
/* rx */ /* rx */
struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
/* tx */ /* tx */
u8 tid_state_tx[STA_TID_NUM];
struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; struct tid_ampdu_tx *tid_tx[STA_TID_NUM];
u8 addba_req_num[STA_TID_NUM]; u8 addba_req_num[STA_TID_NUM];
u8 dialog_token_allocator; u8 dialog_token_allocator;
......
...@@ -1092,6 +1092,54 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, ...@@ -1092,6 +1092,54 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
return true; return true;
} }
static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
struct sk_buff *skb,
struct ieee80211_tx_info *info,
struct tid_ampdu_tx *tid_tx,
int tid)
{
bool queued = false;
if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
info->flags |= IEEE80211_TX_CTL_AMPDU;
} else {
spin_lock(&tx->sta->lock);
/*
* Need to re-check now, because we may get here
*
* 1) in the window during which the setup is actually
* already done, but not marked yet because not all
* packets are spliced over to the driver pending
* queue yet -- if this happened we acquire the lock
* either before or after the splice happens, but
* need to recheck which of these cases happened.
*
* 2) during session teardown, if the OPERATIONAL bit
* was cleared due to the teardown but the pointer
* hasn't been assigned NULL yet (or we loaded it
* before it was assigned) -- in this case it may
* now be NULL which means we should just let the
* packet pass through because splicing the frames
* back is already done.
*/
tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
if (!tid_tx) {
/* do nothing, let packet pass through */
} else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
info->flags |= IEEE80211_TX_CTL_AMPDU;
} else {
queued = true;
info->control.vif = &tx->sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
__skb_queue_tail(&tid_tx->pending, skb);
}
spin_unlock(&tx->sta->lock);
}
return queued;
}
/* /*
* initialises @tx * initialises @tx
*/ */
...@@ -1104,8 +1152,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, ...@@ -1104,8 +1152,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr; struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int hdrlen, tid; int hdrlen, tid;
u8 *qc, *state; u8 *qc;
bool queued = false;
memset(tx, 0, sizeof(*tx)); memset(tx, 0, sizeof(*tx));
tx->skb = skb; tx->skb = skb;
...@@ -1157,36 +1204,17 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, ...@@ -1157,36 +1204,17 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
qc = ieee80211_get_qos_ctl(hdr); qc = ieee80211_get_qos_ctl(hdr);
tid = *qc & IEEE80211_QOS_CTL_TID_MASK; tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
spin_lock(&tx->sta->lock); tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
/* if (tid_tx) {
* XXX: This spinlock could be fairly expensive, but see the bool queued;
* comment in agg-tx.c:ieee80211_agg_tx_operational().
* One way to solve this would be to do something RCU-like queued = ieee80211_tx_prep_agg(tx, skb, info,
* for managing the tid_tx struct and using atomic bitops tid_tx, tid);
* for the actual state -- by introducing an actual
* 'operational' bit that would be possible. It would
* require changing ieee80211_agg_tx_operational() to
* set that bit, and changing the way tid_tx is managed
* everywhere, including races between that bit and
* tid_tx going away (tid_tx being added can be easily
* committed to memory before the 'operational' bit).
*/
tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
state = &tx->sta->ampdu_mlme.tid_state_tx[tid];
if (*state == HT_AGG_STATE_OPERATIONAL) {
info->flags |= IEEE80211_TX_CTL_AMPDU;
} else if (*state != HT_AGG_STATE_IDLE) {
/* in progress */
queued = true;
info->control.vif = &sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
__skb_queue_tail(&tid_tx->pending, skb);
}
spin_unlock(&tx->sta->lock);
if (unlikely(queued)) if (unlikely(queued))
return TX_QUEUED; return TX_QUEUED;
} }
}
if (is_multicast_ether_addr(hdr->addr1)) { if (is_multicast_ether_addr(hdr->addr1)) {
tx->flags &= ~IEEE80211_TX_UNICAST; tx->flags &= ~IEEE80211_TX_UNICAST;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment