Commit 7f01d567 authored by Emmanuel Grumbach's avatar Emmanuel Grumbach Committed by John W. Linville

iwlagn: move the disable agg logic to transport layer

Since all the check_empty logic is now in the transport layer,
the upper layer doesn't need to know anything about tx queues.
The disable aggregation flow was the last to know what a tx queue
is, so move it too.
Signed-off-by: default avatarEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: default avatarWey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 464021ff
...@@ -408,10 +408,8 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, ...@@ -408,10 +408,8 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid) struct ieee80211_sta *sta, u16 tid)
{ {
int txq_id, sta_id, ssn; int sta_id;
struct iwl_tid_data *tid_data; struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
int write_ptr, read_ptr;
unsigned long flags;
sta_id = iwl_sta_id(sta); sta_id = iwl_sta_id(sta);
...@@ -420,61 +418,8 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, ...@@ -420,61 +418,8 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
return -ENXIO; return -ENXIO;
} }
spin_lock_irqsave(&priv->shrd->sta_lock, flags); return iwl_trans_tx_agg_disable(trans(priv), vif_priv->ctx->ctxid,
sta_id, tid);
tid_data = &priv->shrd->tid_data[sta_id][tid];
ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
txq_id = tid_data->agg.txq_id;
switch (priv->shrd->tid_data[sta_id][tid].agg.state) {
case IWL_EMPTYING_HW_QUEUE_ADDBA:
/*
* This can happen if the peer stops aggregation
* again before we've had a chance to drain the
* queue we selected previously, i.e. before the
* session was really started completely.
*/
IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
goto turn_off;
case IWL_AGG_ON:
break;
default:
IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
}
write_ptr = priv->txq[txq_id].q.write_ptr;
read_ptr = priv->txq[txq_id].q.read_ptr;
/* The queue is not empty */
if (write_ptr != read_ptr) {
IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
priv->shrd->tid_data[sta_id][tid].agg.state =
IWL_EMPTYING_HW_QUEUE_DELBA;
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
IWL_DEBUG_HT(priv, "HW queue is empty\n");
turn_off:
priv->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
/* do not restore/save irqs */
spin_unlock(&priv->shrd->sta_lock);
spin_lock(&priv->shrd->lock);
/*
* the only reason this call can fail is queue number out of range,
* which can happen if uCode is reloaded and all the station
* information are lost. if it is outside the range, there is no need
* to deactivate the uCode queue, just return "success" to allow
* mac80211 to clean up it own data.
*/
iwl_trans_txq_agg_disable(trans(priv), txq_id);
spin_unlock_irqrestore(&priv->shrd->lock, flags);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
return 0;
} }
static void iwlagn_non_agg_tx_status(struct iwl_priv *priv, static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
......
...@@ -189,7 +189,10 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); ...@@ -189,7 +189,10 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_tx_queue *txq, struct iwl_tx_queue *txq,
u16 byte_cnt); u16 byte_cnt);
int iwl_trans_pcie_txq_agg_disable(struct iwl_priv *priv, u16 txq_id); void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id);
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx, int sta_id,
int tid);
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index); void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
void iwl_trans_tx_queue_set_status(struct iwl_priv *priv, void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
struct iwl_tx_queue *txq, struct iwl_tx_queue *txq,
......
...@@ -560,32 +560,92 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, ...@@ -560,32 +560,92 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
return 0; return 0;
} }
int iwl_trans_pcie_txq_agg_disable(struct iwl_priv *priv, u16 txq_id)
void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
{ {
struct iwl_trans *trans = trans(priv); iwlagn_tx_queue_stop_scheduler(trans, txq_id);
iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
priv(trans)->txq[txq_id].q.read_ptr = 0;
priv(trans)->txq[txq_id].q.write_ptr = 0;
/* supposes that ssn_idx is valid (!= 0xFFF) */
iwl_trans_set_wr_ptrs(trans, txq_id, 0);
iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
iwl_txq_ctx_deactivate(priv(trans), txq_id);
iwl_trans_tx_queue_set_status(priv(trans),
&priv(trans)->txq[txq_id], 0, 0);
}
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx, int sta_id,
int tid)
{
unsigned long flags;
int read_ptr, write_ptr;
struct iwl_tid_data *tid_data;
int txq_id;
spin_lock_irqsave(&trans->shrd->sta_lock, flags);
tid_data = &trans->shrd->tid_data[sta_id][tid];
txq_id = tid_data->agg.txq_id;
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
(IWLAGN_FIRST_AMPDU_QUEUE + (IWLAGN_FIRST_AMPDU_QUEUE +
hw_params(priv).num_ampdu_queues <= txq_id)) { hw_params(trans).num_ampdu_queues <= txq_id)) {
IWL_ERR(priv, IWL_ERR(trans,
"queue number out of range: %d, must be %d to %d\n", "queue number out of range: %d, must be %d to %d\n",
txq_id, IWLAGN_FIRST_AMPDU_QUEUE, txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
IWLAGN_FIRST_AMPDU_QUEUE + IWLAGN_FIRST_AMPDU_QUEUE +
hw_params(priv).num_ampdu_queues - 1); hw_params(trans).num_ampdu_queues - 1);
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
return -EINVAL; return -EINVAL;
} }
iwlagn_tx_queue_stop_scheduler(trans, txq_id); switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
case IWL_EMPTYING_HW_QUEUE_ADDBA:
/*
* This can happen if the peer stops aggregation
* again before we've had a chance to drain the
* queue we selected previously, i.e. before the
* session was really started completely.
*/
IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
goto turn_off;
case IWL_AGG_ON:
break;
default:
IWL_WARN(trans, "Stopping AGG while state not ON"
"or starting\n");
}
iwl_clear_bits_prph(bus(priv), SCD_AGGR_SEL, (1 << txq_id)); write_ptr = priv(trans)->txq[txq_id].q.write_ptr;
read_ptr = priv(trans)->txq[txq_id].q.read_ptr;
priv->txq[txq_id].q.read_ptr = 0; /* The queue is not empty */
priv->txq[txq_id].q.write_ptr = 0; if (write_ptr != read_ptr) {
/* supposes that ssn_idx is valid (!= 0xFFF) */ IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
iwl_trans_set_wr_ptrs(trans, txq_id, 0); trans->shrd->tid_data[sta_id][tid].agg.state =
IWL_EMPTYING_HW_QUEUE_DELBA;
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
return 0;
}
IWL_DEBUG_HT(trans, "HW queue is empty\n");
turn_off:
trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
/* do not restore/save irqs */
spin_unlock(&trans->shrd->sta_lock);
spin_lock(&trans->shrd->lock);
iwl_trans_pcie_txq_agg_disable(trans, txq_id);
spin_unlock_irqrestore(&trans->shrd->lock, flags);
iwl_clear_bits_prph(bus(priv), SCD_INTERRUPT_MASK, (1 << txq_id)); iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
iwl_txq_ctx_deactivate(priv, txq_id);
iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], 0, 0);
return 0; return 0;
} }
......
...@@ -1281,7 +1281,7 @@ static int iwlagn_txq_check_empty(struct iwl_trans *trans, ...@@ -1281,7 +1281,7 @@ static int iwlagn_txq_check_empty(struct iwl_trans *trans,
(q->read_ptr == q->write_ptr)) { (q->read_ptr == q->write_ptr)) {
IWL_DEBUG_HT(trans, IWL_DEBUG_HT(trans,
"HW queue empty: continue DELBA flow\n"); "HW queue empty: continue DELBA flow\n");
iwl_trans_pcie_txq_agg_disable(priv(trans), txq_id); iwl_trans_pcie_txq_agg_disable(trans, txq_id);
tid_data->agg.state = IWL_AGG_OFF; tid_data->agg.state = IWL_AGG_OFF;
iwl_stop_tx_ba_trans_ready(priv(trans), iwl_stop_tx_ba_trans_ready(priv(trans),
NUM_IWL_RXON_CTX, NUM_IWL_RXON_CTX,
...@@ -2015,7 +2015,7 @@ const struct iwl_trans_ops trans_ops_pcie = { ...@@ -2015,7 +2015,7 @@ const struct iwl_trans_ops trans_ops_pcie = {
.tx = iwl_trans_pcie_tx, .tx = iwl_trans_pcie_tx,
.reclaim = iwl_trans_pcie_reclaim, .reclaim = iwl_trans_pcie_reclaim,
.txq_agg_disable = iwl_trans_pcie_txq_agg_disable, .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
.tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc, .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
.txq_agg_setup = iwl_trans_pcie_txq_agg_setup, .txq_agg_setup = iwl_trans_pcie_txq_agg_setup,
......
...@@ -97,7 +97,7 @@ struct iwl_device_cmd; ...@@ -97,7 +97,7 @@ struct iwl_device_cmd;
* @tx_agg_alloc: allocate resources for a TX BA session * @tx_agg_alloc: allocate resources for a TX BA session
* @txq_agg_setup: setup a tx queue for AMPDU - will be called once the HW is * @txq_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
* ready and a successful ADDBA response has been received. * ready and a successful ADDBA response has been received.
* @txq_agg_disable: de-configure a Tx queue to send AMPDUs * @tx_agg_disable: de-configure a Tx queue to send AMPDUs
* @kick_nic: remove the RESET from the embedded CPU and let it run * @kick_nic: remove the RESET from the embedded CPU and let it run
* @free: release all the ressource for the transport layer itself such as * @free: release all the ressource for the transport layer itself such as
* irq, tasklet etc... * irq, tasklet etc...
...@@ -127,7 +127,9 @@ struct iwl_trans_ops { ...@@ -127,7 +127,9 @@ struct iwl_trans_ops {
int txq_id, int ssn, u32 status, int txq_id, int ssn, u32 status,
struct sk_buff_head *skbs); struct sk_buff_head *skbs);
int (*txq_agg_disable)(struct iwl_priv *priv, u16 txq_id); int (*tx_agg_disable)(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx, int sta_id,
int tid);
int (*tx_agg_alloc)(struct iwl_trans *trans, int (*tx_agg_alloc)(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx, int sta_id, int tid, enum iwl_rxon_context_id ctx, int sta_id, int tid,
u16 *ssn); u16 *ssn);
...@@ -216,9 +218,11 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int sta_id, ...@@ -216,9 +218,11 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, status, skbs); trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, status, skbs);
} }
static inline int iwl_trans_txq_agg_disable(struct iwl_trans *trans, u16 txq_id) static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx,
int sta_id, int tid)
{ {
return trans->ops->txq_agg_disable(priv(trans), txq_id); return trans->ops->tx_agg_disable(trans, ctx, sta_id, tid);
} }
static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans, static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment