Commit 2a9269b1 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Kalle Valo

mt7601u: use ieee80211_rx_list to pass frames to the network stack as a batch

Similar to mt76 driver, rely on ieee80211_rx_list in order to
improve icache footprint
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarKalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/5c72fa2dda45c1ae3f285af80c02f3db23341d85.1610580222.git.lorenzo@kernel.org
parent 6598f32d
...@@ -74,7 +74,8 @@ mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, ...@@ -74,7 +74,8 @@ mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
} }
static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data, static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
u32 seg_len, struct page *p) u32 seg_len, struct page *p,
struct list_head *list)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct mt7601u_rxwi *rxwi; struct mt7601u_rxwi *rxwi;
...@@ -104,9 +105,13 @@ static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data, ...@@ -104,9 +105,13 @@ static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
if (!skb) if (!skb)
return; return;
spin_lock(&dev->mac_lock); local_bh_disable();
ieee80211_rx(dev->hw, skb); rcu_read_lock();
spin_unlock(&dev->mac_lock);
ieee80211_rx_list(dev->hw, NULL, skb, list);
rcu_read_unlock();
local_bh_enable();
} }
static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len) static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
...@@ -130,6 +135,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) ...@@ -130,6 +135,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
u32 seg_len, data_len = e->urb->actual_length; u32 seg_len, data_len = e->urb->actual_length;
u8 *data = page_address(e->p); u8 *data = page_address(e->p);
struct page *new_p = NULL; struct page *new_p = NULL;
LIST_HEAD(list);
int cnt = 0; int cnt = 0;
if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state)) if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
...@@ -140,7 +146,8 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) ...@@ -140,7 +146,8 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
new_p = dev_alloc_pages(MT_RX_ORDER); new_p = dev_alloc_pages(MT_RX_ORDER);
while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) { while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL); mt7601u_rx_process_seg(dev, data, seg_len,
new_p ? e->p : NULL, &list);
data_len -= seg_len; data_len -= seg_len;
data += seg_len; data += seg_len;
...@@ -150,6 +157,8 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) ...@@ -150,6 +157,8 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
if (cnt > 1) if (cnt > 1)
trace_mt_rx_dma_aggr(dev, cnt, !!new_p); trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
netif_receive_skb_list(&list);
if (new_p) { if (new_p) {
/* we have one extra ref from the allocator */ /* we have one extra ref from the allocator */
__free_pages(e->p, MT_RX_ORDER); __free_pages(e->p, MT_RX_ORDER);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment