Commit 56319093 authored by Ganapathi Bhat's avatar Ganapathi Bhat Committed by Kalle Valo

mwifiex: replace rx_pkt_lock by rx_reorder_tbl_lock

At present driver spinlock protects iteration of list
rx_reorder_tbl_ptr with rx_reorder_tbl_lock. To protect the
individual items in this list, it uses rx_pkt_lock. But, we can
use a single rx_reorder_tbl_lock for both purposes. This patch
replaces rx_pkt_lock by rx_reorder_tbl_lock.
Signed-off-by: default avatarGanapathi Bhat <gbhat@marvell.com>
Signed-off-by: default avatarKalle Valo <kvalo@codeaurora.org>
parent d0db2f7a
...@@ -118,18 +118,18 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, ...@@ -118,18 +118,18 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
tbl->win_size; tbl->win_size;
for (i = 0; i < pkt_to_send; ++i) { for (i = 0; i < pkt_to_send; ++i) {
spin_lock_irqsave(&priv->rx_pkt_lock, flags); spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
rx_tmp_ptr = NULL; rx_tmp_ptr = NULL;
if (tbl->rx_reorder_ptr[i]) { if (tbl->rx_reorder_ptr[i]) {
rx_tmp_ptr = tbl->rx_reorder_ptr[i]; rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL; tbl->rx_reorder_ptr[i] = NULL;
} }
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
if (rx_tmp_ptr) if (rx_tmp_ptr)
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
} }
spin_lock_irqsave(&priv->rx_pkt_lock, flags); spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
/* /*
* We don't have a circular buffer, hence use rotation to simulate * We don't have a circular buffer, hence use rotation to simulate
* circular buffer * circular buffer
...@@ -140,7 +140,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, ...@@ -140,7 +140,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
} }
tbl->start_win = start_win; tbl->start_win = start_win;
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
} }
/* /*
...@@ -160,18 +160,19 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, ...@@ -160,18 +160,19 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
unsigned long flags; unsigned long flags;
for (i = 0; i < tbl->win_size; ++i) { for (i = 0; i < tbl->win_size; ++i) {
spin_lock_irqsave(&priv->rx_pkt_lock, flags); spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
if (!tbl->rx_reorder_ptr[i]) { if (!tbl->rx_reorder_ptr[i]) {
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
flags);
break; break;
} }
rx_tmp_ptr = tbl->rx_reorder_ptr[i]; rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL; tbl->rx_reorder_ptr[i] = NULL;
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
} }
spin_lock_irqsave(&priv->rx_pkt_lock, flags); spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
/* /*
* We don't have a circular buffer, hence use rotation to simulate * We don't have a circular buffer, hence use rotation to simulate
* circular buffer * circular buffer
...@@ -184,7 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, ...@@ -184,7 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
} }
} }
tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1); tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
} }
/* /*
......
...@@ -439,7 +439,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter) ...@@ -439,7 +439,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; i++) { for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) { if (adapter->priv[i]) {
priv = adapter->priv[i]; priv = adapter->priv[i];
spin_lock_init(&priv->rx_pkt_lock);
spin_lock_init(&priv->wmm.ra_list_spinlock); spin_lock_init(&priv->wmm.ra_list_spinlock);
spin_lock_init(&priv->curr_bcn_buf_lock); spin_lock_init(&priv->curr_bcn_buf_lock);
spin_lock_init(&priv->sta_list_spinlock); spin_lock_init(&priv->sta_list_spinlock);
......
...@@ -616,9 +616,6 @@ struct mwifiex_private { ...@@ -616,9 +616,6 @@ struct mwifiex_private {
struct list_head rx_reorder_tbl_ptr; struct list_head rx_reorder_tbl_ptr;
/* spin lock for rx_reorder_tbl_ptr queue */ /* spin lock for rx_reorder_tbl_ptr queue */
spinlock_t rx_reorder_tbl_lock; spinlock_t rx_reorder_tbl_lock;
/* spin lock for Rx packets */
spinlock_t rx_pkt_lock;
#define MWIFIEX_ASSOC_RSP_BUF_SIZE 500 #define MWIFIEX_ASSOC_RSP_BUF_SIZE 500
u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE]; u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE];
u32 assoc_rsp_size; u32 assoc_rsp_size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment