Commit 2e237319 authored by Andreas Fenkart's avatar Andreas Fenkart Committed by John W. Linville

mwifiex: replace ra_list_curr by list rotation.

After a packet is successfully transmitted, ra list is rotated, so the ra
next to the one transmitted, will be the first in the list. This way we
pick the ra' in a round robin fashion. This significantly simplifies
iteration in  mwifiex_wmm_get_highest_priolist_ptr to a call to
list_for_each_entry.
List rotation is done via list_move, where the head itself is temporarily
removed and then re-inserted after the item just transferred.
Signed-off-by: default avatarAndreas Fenkart <andreas.fenkart@streamunlimited.com>
Acked-by: default avatarBing Zhao <bzhao@marvell.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 1aac1e91
...@@ -296,19 +296,13 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, ...@@ -296,19 +296,13 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
break; break;
} }
if (ret != -EBUSY) { if (ret != -EBUSY) {
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); mwifiex_rotate_priolists(priv, pra_list, ptrindex);
if (mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
priv->wmm.packets_out[ptrindex]++;
priv->wmm.tid_tbl_ptr[ptrindex].ra_list_curr = pra_list;
}
/* Now bss_prio_cur pointer points to next node */ /* Now bss_prio_cur pointer points to next node */
adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur = adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
list_first_entry( list_first_entry(
&adapter->bss_prio_tbl[priv->bss_priority] &adapter->bss_prio_tbl[priv->bss_priority]
.bss_prio_cur->list, .bss_prio_cur->list,
struct mwifiex_bss_prio_node, list); struct mwifiex_bss_prio_node, list);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
} }
return 0; return 0;
......
...@@ -213,7 +213,6 @@ struct mwifiex_ra_list_tbl { ...@@ -213,7 +213,6 @@ struct mwifiex_ra_list_tbl {
struct mwifiex_tid_tbl { struct mwifiex_tid_tbl {
struct list_head ra_list; struct list_head ra_list;
struct mwifiex_ra_list_tbl *ra_list_curr;
}; };
#define WMM_HIGHEST_PRIORITY 7 #define WMM_HIGHEST_PRIORITY 7
......
...@@ -191,9 +191,6 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra) ...@@ -191,9 +191,6 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
} }
list_add_tail(&ra_list->list, list_add_tail(&ra_list->list,
&priv->wmm.tid_tbl_ptr[i].ra_list); &priv->wmm.tid_tbl_ptr[i].ra_list);
if (!priv->wmm.tid_tbl_ptr[i].ra_list_curr)
priv->wmm.tid_tbl_ptr[i].ra_list_curr = ra_list;
} }
} }
...@@ -424,7 +421,6 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter) ...@@ -424,7 +421,6 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
priv->aggr_prio_tbl[i].amsdu = tos_to_tid_inv[i]; priv->aggr_prio_tbl[i].amsdu = tos_to_tid_inv[i];
priv->aggr_prio_tbl[i].ampdu_ap = tos_to_tid_inv[i]; priv->aggr_prio_tbl[i].ampdu_ap = tos_to_tid_inv[i];
priv->aggr_prio_tbl[i].ampdu_user = tos_to_tid_inv[i]; priv->aggr_prio_tbl[i].ampdu_user = tos_to_tid_inv[i];
priv->wmm.tid_tbl_ptr[i].ra_list_curr = NULL;
} }
priv->aggr_prio_tbl[6].amsdu priv->aggr_prio_tbl[6].amsdu
...@@ -530,8 +526,6 @@ static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv) ...@@ -530,8 +526,6 @@ static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
} }
INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list); INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
priv->wmm.tid_tbl_ptr[i].ra_list_curr = NULL;
} }
} }
...@@ -883,7 +877,7 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, ...@@ -883,7 +877,7 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
struct mwifiex_private **priv, int *tid) struct mwifiex_private **priv, int *tid)
{ {
struct mwifiex_private *priv_tmp; struct mwifiex_private *priv_tmp;
struct mwifiex_ra_list_tbl *ptr, *head; struct mwifiex_ra_list_tbl *ptr;
struct mwifiex_bss_prio_node *bssprio_node, *bssprio_head; struct mwifiex_bss_prio_node *bssprio_node, *bssprio_head;
struct mwifiex_tid_tbl *tid_ptr; struct mwifiex_tid_tbl *tid_ptr;
atomic_t *hqp; atomic_t *hqp;
...@@ -926,51 +920,15 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, ...@@ -926,51 +920,15 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
tid_ptr = &(priv_tmp)->wmm. tid_ptr = &(priv_tmp)->wmm.
tid_tbl_ptr[tos_to_tid[i]]; tid_tbl_ptr[tos_to_tid[i]];
/* For non-STA ra_list_curr may be NULL */ /* iterate over receiver addresses */
if (!tid_ptr->ra_list_curr) list_for_each_entry(ptr, &tid_ptr->ra_list,
goto skip_wmm_queue; list) {
if (list_empty(&tid_ptr->ra_list))
goto skip_wmm_queue;
/*
* Always choose the next ra we transmitted
* last time, this way we pick the ra's in
* round robin fashion.
*/
ptr = list_first_entry(
&tid_ptr->ra_list_curr->list,
struct mwifiex_ra_list_tbl,
list);
head = ptr;
if (ptr == (struct mwifiex_ra_list_tbl *)
&tid_ptr->ra_list) {
/* Get next ra */
ptr = list_first_entry(&ptr->list,
struct mwifiex_ra_list_tbl, list);
head = ptr;
}
do {
if (!skb_queue_empty(&ptr->skb_head)) if (!skb_queue_empty(&ptr->skb_head))
/* holds both locks */ /* holds both locks */
goto found; goto found;
}
/* Get next ra */
ptr = list_first_entry(&ptr->list,
struct mwifiex_ra_list_tbl,
list);
if (ptr ==
(struct mwifiex_ra_list_tbl *)
&tid_ptr->ra_list)
ptr = list_first_entry(
&ptr->list,
struct mwifiex_ra_list_tbl,
list);
} while (ptr != head);
skip_wmm_queue:
spin_unlock_irqrestore(&priv_tmp->wmm. spin_unlock_irqrestore(&priv_tmp->wmm.
ra_list_spinlock, ra_list_spinlock,
flags_ra); flags_ra);
...@@ -1013,6 +971,35 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, ...@@ -1013,6 +971,35 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
return ptr; return ptr;
} }
/* This functions rotates ra lists so packets are picked in round robin
* fashion.
*
* After a packet is successfully transmitted, rotate the ra list, so the ra
* next to the one transmitted, will come first in the list. This way we pick
* the ra in a round robin fashion.
*
* Function also increments wmm.packets_out counter.
*/
void mwifiex_rotate_priolists(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra,
int tid)
{
struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
unsigned long flags;
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
if (mwifiex_is_ralist_valid(priv, ra, tid)) {
priv->wmm.packets_out[tid]++;
/*
* dirty trick: we remove 'head' temporarily and reinsert it
* after curr bss node. imagine list to stay fixed while only
* head is moved
*/
list_move(&tid_ptr->ra_list, &ra->list);
}
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
}
/* /*
* This function checks if 11n aggregation is possible. * This function checks if 11n aggregation is possible.
*/ */
...@@ -1099,11 +1086,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv, ...@@ -1099,11 +1086,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags); ra_list_flags);
} else { } else {
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); mwifiex_rotate_priolists(priv, ptr, ptr_index);
if (mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
priv->wmm.packets_out[ptr_index]++;
priv->wmm.tid_tbl_ptr[ptr_index].ra_list_curr = ptr;
}
adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur = adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
list_first_entry( list_first_entry(
&adapter->bss_prio_tbl[priv->bss_priority] &adapter->bss_prio_tbl[priv->bss_priority]
...@@ -1111,8 +1094,6 @@ mwifiex_send_single_packet(struct mwifiex_private *priv, ...@@ -1111,8 +1094,6 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
struct mwifiex_bss_prio_node, struct mwifiex_bss_prio_node,
list); list);
atomic_dec(&priv->wmm.tx_pkts_queued); atomic_dec(&priv->wmm.tx_pkts_queued);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
} }
} }
...@@ -1216,11 +1197,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv, ...@@ -1216,11 +1197,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
break; break;
} }
if (ret != -EBUSY) { if (ret != -EBUSY) {
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); mwifiex_rotate_priolists(priv, ptr, ptr_index);
if (mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
priv->wmm.packets_out[ptr_index]++;
priv->wmm.tid_tbl_ptr[ptr_index].ra_list_curr = ptr;
}
adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur = adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
list_first_entry( list_first_entry(
&adapter->bss_prio_tbl[priv->bss_priority] &adapter->bss_prio_tbl[priv->bss_priority]
...@@ -1228,8 +1205,6 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv, ...@@ -1228,8 +1205,6 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
struct mwifiex_bss_prio_node, struct mwifiex_bss_prio_node,
list); list);
atomic_dec(&priv->wmm.tx_pkts_queued); atomic_dec(&priv->wmm.tx_pkts_queued);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
} }
} }
......
...@@ -85,6 +85,9 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead) ...@@ -85,6 +85,9 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
struct sk_buff *skb); struct sk_buff *skb);
void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra); void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
void mwifiex_rotate_priolists(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra,
int tid);
int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter); int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter); void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment