Commit 7ffef13d authored by Johannes Berg's avatar Johannes Berg Committed by Wey-Yi Guy

iwlagn: clean up TX aggregation code

Since the driver split, there's no need for
function pointers any more for aggregation
queue setup and teardown as all devices now
share the same code. Simplify this.
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarWey-Yi Guy <wey-yi.w.guy@intel.com>
parent d103e344
......@@ -179,8 +179,6 @@ static struct iwl_lib_ops iwl1000_lib = {
.txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwlagn_txq_set_sched,
.txq_agg_enable = iwlagn_txq_agg_enable,
.txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
......
......@@ -259,8 +259,6 @@ static struct iwl_lib_ops iwl2000_lib = {
.txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwlagn_txq_set_sched,
.txq_agg_enable = iwlagn_txq_agg_enable,
.txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
......
......@@ -348,8 +348,6 @@ static struct iwl_lib_ops iwl5000_lib = {
.txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwlagn_txq_set_sched,
.txq_agg_enable = iwlagn_txq_agg_enable,
.txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
......@@ -416,8 +414,6 @@ static struct iwl_lib_ops iwl5150_lib = {
.txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwlagn_txq_set_sched,
.txq_agg_enable = iwlagn_txq_agg_enable,
.txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
......
......@@ -288,8 +288,6 @@ static struct iwl_lib_ops iwl6000_lib = {
.txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwlagn_txq_set_sched,
.txq_agg_enable = iwlagn_txq_agg_enable,
.txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
......@@ -357,8 +355,6 @@ static struct iwl_lib_ops iwl6030_lib = {
.txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwlagn_txq_set_sched,
.txq_agg_enable = iwlagn_txq_agg_enable,
.txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
......
......@@ -222,8 +222,8 @@ void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
}
int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
int tx_fifo, int sta_id, int tid, u16 ssn_idx)
static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
int tx_fifo, int sta_id, int tid, u16 ssn_idx)
{
unsigned long flags;
u16 ra_tid;
......@@ -288,8 +288,8 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
return 0;
}
int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
u16 ssn_idx, u8 tx_fifo)
static int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
u16 ssn_idx, u8 tx_fifo)
{
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
(IWLAGN_FIRST_AMPDU_QUEUE +
......@@ -1037,8 +1037,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
spin_unlock_irqrestore(&priv->sta_lock, flags);
ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
sta_id, tid, *ssn);
ret = iwlagn_txq_agg_enable(priv, txq_id, tx_fifo, sta_id, tid, *ssn);
if (ret)
return ret;
......@@ -1125,8 +1124,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
* to deactivate the uCode queue, just return "success" to allow
* mac80211 to clean up it own data.
*/
priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
tx_fifo_id);
iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
spin_unlock_irqrestore(&priv->lock, flags);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
......@@ -1155,8 +1153,7 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
u16 ssn = SEQ_TO_SN(tid_data->seq_number);
int tx_fifo = get_fifo_from_tid(ctx, tid);
IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
ssn, tx_fifo);
iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
tid_data->agg.state = IWL_AGG_OFF;
ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
}
......
......@@ -133,10 +133,6 @@ void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
u16 byte_cnt);
void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
int tx_fifo, int sta_id, int tid, u16 ssn_idx);
int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
u16 ssn_idx, u8 tx_fifo);
void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask);
void iwl_free_tfds_in_queue(struct iwl_priv *priv,
int sta_id, int tid, int freed);
......
......@@ -171,11 +171,6 @@ struct iwl_lib_ops {
struct iwl_tx_queue *txq);
int (*txq_init)(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
/* aggregations */
int (*txq_agg_enable)(struct iwl_priv *priv, int txq_id, int tx_fifo,
int sta_id, int tid, u16 ssn_idx);
int (*txq_agg_disable)(struct iwl_priv *priv, u16 txq_id, u16 ssn_idx,
u8 tx_fifo);
/* setup Rx handler */
void (*rx_handler_setup)(struct iwl_priv *priv);
/* setup deferred work */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment