Commit 2a30cb16 authored by Jérôme Pouiller's avatar Jérôme Pouiller Committed by Greg Kroah-Hartman

staging: wfx: associate tx_queues to vifs

The device handles 4 queues (one per AC) for each virtual interface (and
maximum 4 virtual interfaces). Until now the driver unified the queue of
all interfaces and handled only 4 queues for whole device.

This architecture did not allow to balance the traffic between the vif. So,
this patch relocate the queues into the vif and change the API accordingly.
Signed-off-by: default avatarJérôme Pouiller <jerome.pouiller@silabs.com>
Link: https://lore.kernel.org/r/20200701150707.222985-2-Jerome.Pouiller@silabs.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 7566103e
......@@ -408,7 +408,7 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
// Auxiliary operations
wfx_tx_manage_pm(wvif, hdr, tx_priv, sta);
wfx_tx_queues_put(wvif->wdev, skb);
wfx_tx_queues_put(wvif, skb);
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
schedule_work(&wvif->update_tim_work);
wfx_bh_request_tx(wvif->wdev);
......@@ -539,7 +539,7 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
const struct wfx_tx_priv *tx_priv;
struct sk_buff *skb;
skb = wfx_pending_get(wvif->wdev, arg->packet_id);
skb = wfx_pending_get(wvif, arg->packet_id);
if (!skb) {
dev_warn(wvif->wdev->dev, "received unknown packet_id (%#.8x) from chip\n",
arg->packet_id);
......@@ -582,35 +582,51 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
wfx_skb_dtor(wvif, skb);
}
void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
static void wfx_flush_vif(struct wfx_vif *wvif, u32 queues,
struct sk_buff_head *dropped)
{
struct wfx_dev *wdev = hw->priv;
struct sk_buff_head dropped;
struct wfx_queue *queue;
struct wfx_vif *wvif;
struct hif_msg *hif;
struct sk_buff *skb;
int vif_id = -1;
int i;
if (vif)
vif_id = ((struct wfx_vif *)vif->drv_priv)->id;
skb_queue_head_init(&dropped);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
if (!(BIT(i) & queues))
continue;
queue = &wdev->tx_queue[i];
if (drop)
wfx_tx_queue_drop(wdev, queue, vif_id, &dropped);
if (wdev->chip_frozen)
queue = &wvif->tx_queue[i];
if (dropped)
wfx_tx_queue_drop(wvif, queue, dropped);
}
if (wvif->wdev->chip_frozen)
return;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
if (!(BIT(i) & queues))
continue;
if (wait_event_timeout(wdev->tx_dequeue,
wfx_tx_queue_empty(wdev, queue, vif_id),
queue = &wvif->tx_queue[i];
if (wait_event_timeout(wvif->wdev->tx_dequeue,
wfx_tx_queue_empty(wvif, queue),
msecs_to_jiffies(1000)) <= 0)
dev_warn(wdev->dev,
dev_warn(wvif->wdev->dev,
"frames queued while flushing tx queues?");
}
}
void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct wfx_dev *wdev = hw->priv;
struct sk_buff_head dropped;
struct wfx_vif *wvif;
struct hif_msg *hif;
struct sk_buff *skb;
skb_queue_head_init(&dropped);
if (vif) {
wvif = (struct wfx_vif *)vif->drv_priv;
wfx_flush_vif(wvif, queues, drop ? &dropped : NULL);
} else {
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
wfx_flush_vif(wvif, queues, drop ? &dropped : NULL);
}
wfx_tx_flush(wdev);
if (wdev->chip_frozen)
wfx_pending_drop(wdev, &dropped);
......@@ -623,4 +639,3 @@ void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
wfx_skb_dtor(wvif, skb);
}
}
......@@ -349,8 +349,9 @@ struct wfx_dev *wfx_init_common(struct device *dev,
init_completion(&wdev->firmware_ready);
INIT_DELAYED_WORK(&wdev->cooling_timeout_work,
wfx_cooling_timeout_work);
skb_queue_head_init(&wdev->tx_pending);
init_waitqueue_head(&wdev->tx_dequeue);
wfx_init_hif_cmd(&wdev->hif_cmd);
wfx_tx_queues_init(wdev);
if (devm_add_action_or_reset(dev, wfx_free_common, wdev))
return NULL;
......
......@@ -57,84 +57,57 @@ void wfx_tx_lock_flush(struct wfx_dev *wdev)
wfx_tx_flush(wdev);
}
void wfx_tx_queues_init(struct wfx_dev *wdev)
void wfx_tx_queues_init(struct wfx_vif *wvif)
{
int i;
skb_queue_head_init(&wdev->tx_pending);
init_waitqueue_head(&wdev->tx_dequeue);
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
skb_queue_head_init(&wdev->tx_queue[i].normal);
skb_queue_head_init(&wdev->tx_queue[i].cab);
skb_queue_head_init(&wvif->tx_queue[i].normal);
skb_queue_head_init(&wvif->tx_queue[i].cab);
}
}
void wfx_tx_queues_check_empty(struct wfx_dev *wdev)
void wfx_tx_queues_check_empty(struct wfx_vif *wvif)
{
int i;
WARN_ON(!skb_queue_empty_lockless(&wdev->tx_pending));
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
WARN_ON(atomic_read(&wdev->tx_queue[i].pending_frames));
WARN_ON(!skb_queue_empty_lockless(&wdev->tx_queue[i].normal));
WARN_ON(!skb_queue_empty_lockless(&wdev->tx_queue[i].cab));
WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames));
WARN_ON(!skb_queue_empty_lockless(&wvif->tx_queue[i].normal));
WARN_ON(!skb_queue_empty_lockless(&wvif->tx_queue[i].cab));
}
}
static bool __wfx_tx_queue_empty(struct wfx_dev *wdev,
struct sk_buff_head *skb_queue, int vif_id)
bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue)
{
struct hif_msg *hif_msg;
struct sk_buff *skb;
spin_lock_bh(&skb_queue->lock);
skb_queue_walk(skb_queue, skb) {
hif_msg = (struct hif_msg *)skb->data;
if (vif_id < 0 || hif_msg->interface == vif_id) {
spin_unlock_bh(&skb_queue->lock);
return false;
}
}
spin_unlock_bh(&skb_queue->lock);
return true;
return skb_queue_empty(&queue->normal) && skb_queue_empty(&queue->cab);
}
bool wfx_tx_queue_empty(struct wfx_dev *wdev,
struct wfx_queue *queue, int vif_id)
{
return __wfx_tx_queue_empty(wdev, &queue->normal, vif_id) &&
__wfx_tx_queue_empty(wdev, &queue->cab, vif_id);
}
static void __wfx_tx_queue_drop(struct wfx_dev *wdev,
struct sk_buff_head *skb_queue, int vif_id,
static void __wfx_tx_queue_drop(struct wfx_vif *wvif,
struct sk_buff_head *skb_queue,
struct sk_buff_head *dropped)
{
struct sk_buff *skb, *tmp;
struct hif_msg *hif_msg;
spin_lock_bh(&skb_queue->lock);
skb_queue_walk_safe(skb_queue, skb, tmp) {
hif_msg = (struct hif_msg *)skb->data;
if (vif_id < 0 || hif_msg->interface == vif_id) {
__skb_unlink(skb, skb_queue);
skb_queue_head(dropped, skb);
}
__skb_unlink(skb, skb_queue);
skb_queue_head(dropped, skb);
}
spin_unlock_bh(&skb_queue->lock);
}
void wfx_tx_queue_drop(struct wfx_dev *wdev, struct wfx_queue *queue,
int vif_id, struct sk_buff_head *dropped)
void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue,
struct sk_buff_head *dropped)
{
__wfx_tx_queue_drop(wdev, &queue->cab, vif_id, dropped);
__wfx_tx_queue_drop(wdev, &queue->normal, vif_id, dropped);
wake_up(&wdev->tx_dequeue);
__wfx_tx_queue_drop(wvif, &queue->cab, dropped);
__wfx_tx_queue_drop(wvif, &queue->normal, dropped);
wake_up(&wvif->wdev->tx_dequeue);
}
void wfx_tx_queues_put(struct wfx_dev *wdev, struct sk_buff *skb)
void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb)
{
struct wfx_queue *queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
......@@ -146,39 +119,45 @@ void wfx_tx_queues_put(struct wfx_dev *wdev, struct sk_buff *skb)
void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped)
{
struct wfx_queue *queue;
struct wfx_vif *wvif;
struct hif_msg *hif;
struct sk_buff *skb;
WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device",
__func__);
while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) {
queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
WARN_ON(skb_get_queue_mapping(skb) > 3);
WARN_ON(!atomic_read(&queue->pending_frames));
atomic_dec(&queue->pending_frames);
hif = (struct hif_msg *)skb->data;
wvif = wdev_to_wvif(wdev, hif->interface);
if (wvif) {
queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
WARN_ON(skb_get_queue_mapping(skb) > 3);
WARN_ON(!atomic_read(&queue->pending_frames));
atomic_dec(&queue->pending_frames);
}
skb_queue_head(dropped, skb);
}
}
struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
struct sk_buff *wfx_pending_get(struct wfx_vif *wvif, u32 packet_id)
{
struct wfx_queue *queue;
struct hif_req_tx *req;
struct sk_buff *skb;
spin_lock_bh(&wdev->tx_pending.lock);
skb_queue_walk(&wdev->tx_pending, skb) {
spin_lock_bh(&wvif->wdev->tx_pending.lock);
skb_queue_walk(&wvif->wdev->tx_pending, skb) {
req = wfx_skb_txreq(skb);
if (req->packet_id == packet_id) {
spin_unlock_bh(&wdev->tx_pending.lock);
queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
spin_unlock_bh(&wvif->wdev->tx_pending.lock);
queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
WARN_ON(skb_get_queue_mapping(skb) > 3);
WARN_ON(!atomic_read(&queue->pending_frames));
atomic_dec(&queue->pending_frames);
skb_unlink(skb, &wdev->tx_pending);
skb_unlink(skb, &wvif->wdev->tx_pending);
return skb;
}
}
spin_unlock_bh(&wdev->tx_pending.lock);
spin_unlock_bh(&wvif->wdev->tx_pending.lock);
WARN(1, "cannot find packet in pending queue");
return NULL;
}
......@@ -221,7 +200,6 @@ unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
{
struct wfx_dev *wdev = wvif->wdev;
int i;
if (wvif->vif->type != NL80211_IFTYPE_AP)
......@@ -229,33 +207,39 @@ bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
for (i = 0; i < IEEE80211_NUM_ACS; ++i)
// Note: since only AP can have mcast frames in queue and only
// one vif can be AP, all queued frames has same interface id
if (!skb_queue_empty_lockless(&wdev->tx_queue[i].cab))
if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab))
return true;
return false;
}
static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
{
struct wfx_queue *sorted_queues[IEEE80211_NUM_ACS];
struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)];
int i, j, num_queues = 0;
struct wfx_vif *wvif;
struct hif_msg *hif;
struct sk_buff *skb;
int i, j;
// bubble sort
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
sorted_queues[i] = &wdev->tx_queue[i];
for (j = i; j > 0; j--)
if (atomic_read(&sorted_queues[j]->pending_frames) <
atomic_read(&sorted_queues[j - 1]->pending_frames))
swap(sorted_queues[j - 1], sorted_queues[j]);
// sort the queues
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
WARN_ON(num_queues >= ARRAY_SIZE(queues));
queues[num_queues] = &wvif->tx_queue[i];
for (j = num_queues; j > 0; j--)
if (atomic_read(&queues[j]->pending_frames) <
atomic_read(&queues[j - 1]->pending_frames))
swap(queues[j - 1], queues[j]);
num_queues++;
}
}
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
if (!wvif->after_dtim_tx_allowed)
continue;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
skb = skb_dequeue(&sorted_queues[i]->cab);
for (i = 0; i < num_queues; i++) {
skb = skb_dequeue(&queues[i]->cab);
if (!skb)
continue;
// Note: since only AP can have mcast frames in queue
......@@ -263,21 +247,20 @@ static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
// same interface id
hif = (struct hif_msg *)skb->data;
WARN_ON(hif->interface != wvif->id);
WARN_ON(sorted_queues[i] !=
&wdev->tx_queue[skb_get_queue_mapping(skb)]);
atomic_inc(&sorted_queues[i]->pending_frames);
WARN_ON(queues[i] !=
&wvif->tx_queue[skb_get_queue_mapping(skb)]);
atomic_inc(&queues[i]->pending_frames);
return skb;
}
// No more multicast to sent
wvif->after_dtim_tx_allowed = false;
schedule_work(&wvif->update_tim_work);
}
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
skb = skb_dequeue(&sorted_queues[i]->normal);
for (i = 0; i < num_queues; i++) {
skb = skb_dequeue(&queues[i]->normal);
if (skb) {
WARN_ON(sorted_queues[i] !=
&wdev->tx_queue[skb_get_queue_mapping(skb)]);
atomic_inc(&sorted_queues[i]->pending_frames);
atomic_inc(&queues[i]->pending_frames);
return skb;
}
}
......
......@@ -25,18 +25,17 @@ void wfx_tx_unlock(struct wfx_dev *wdev);
void wfx_tx_flush(struct wfx_dev *wdev);
void wfx_tx_lock_flush(struct wfx_dev *wdev);
void wfx_tx_queues_init(struct wfx_dev *wdev);
void wfx_tx_queues_check_empty(struct wfx_dev *wdev);
void wfx_tx_queues_init(struct wfx_vif *wvif);
void wfx_tx_queues_check_empty(struct wfx_vif *wvif);
bool wfx_tx_queues_has_cab(struct wfx_vif *wvif);
void wfx_tx_queues_put(struct wfx_dev *wdev, struct sk_buff *skb);
void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb);
struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev);
bool wfx_tx_queue_empty(struct wfx_dev *wdev, struct wfx_queue *queue,
int vif_id);
void wfx_tx_queue_drop(struct wfx_dev *wdev, struct wfx_queue *queue,
int vif_id, struct sk_buff_head *dropped);
bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue);
void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue,
struct sk_buff_head *dropped);
struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id);
struct sk_buff *wfx_pending_get(struct wfx_vif *wvif, u32 packet_id);
void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped);
unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
struct sk_buff *skb);
......
......@@ -805,6 +805,7 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
hif_set_macaddr(wvif, vif->addr);
wfx_tx_queues_init(wvif);
wfx_tx_policy_init(wvif);
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
......@@ -823,6 +824,7 @@ void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
wait_for_completion_timeout(&wvif->set_pm_mode_complete, msecs_to_jiffies(300));
wfx_tx_queues_check_empty(wvif);
mutex_lock(&wdev->conf_mutex);
WARN(wvif->link_id_map != 1, "corrupted state");
......@@ -855,5 +857,5 @@ void wfx_stop(struct ieee80211_hw *hw)
{
struct wfx_dev *wdev = hw->priv;
wfx_tx_queues_check_empty(wdev);
WARN_ON(!skb_queue_empty_lockless(&wdev->tx_pending));
}
......@@ -48,7 +48,6 @@ struct wfx_dev {
struct mutex conf_mutex;
struct wfx_hif_cmd hif_cmd;
struct wfx_queue tx_queue[4];
struct sk_buff_head tx_pending;
wait_queue_head_t tx_dequeue;
atomic_t tx_lock;
......@@ -75,6 +74,7 @@ struct wfx_vif {
struct delayed_work beacon_loss_work;
struct wfx_queue tx_queue[4];
struct tx_policy_cache tx_policy_cache;
struct work_struct tx_policy_upload_work;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment