Commit d7d4ea9a authored by Stanislaw Gruszka's avatar Stanislaw Gruszka Committed by Felix Fietkau

mt76usb: remove mt76u_buf and use urb directly

Put urb pointer in mt76_queue_entry directly instead of mt76u_buf
structure.
Signed-off-by: default avatarStanislaw Gruszka <sgruszka@redhat.com>
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 279ade99
...@@ -89,10 +89,6 @@ struct mt76_tx_info { ...@@ -89,10 +89,6 @@ struct mt76_tx_info {
u32 info; u32 info;
}; };
struct mt76u_buf {
struct urb *urb;
};
struct mt76_queue_entry { struct mt76_queue_entry {
union { union {
void *buf; void *buf;
...@@ -100,7 +96,7 @@ struct mt76_queue_entry { ...@@ -100,7 +96,7 @@ struct mt76_queue_entry {
}; };
union { union {
struct mt76_txwi_cache *txwi; struct mt76_txwi_cache *txwi;
struct mt76u_buf ubuf; struct urb *urb;
}; };
enum mt76_txq_id qid; enum mt76_txq_id qid;
bool schedule; bool schedule;
......
...@@ -283,12 +283,11 @@ mt76u_set_endpoints(struct usb_interface *intf, ...@@ -283,12 +283,11 @@ mt76u_set_endpoints(struct usb_interface *intf,
} }
static int static int
mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf, int nsgs, mt76u_fill_rx_sg(struct mt76_dev *dev, struct urb *urb, int nsgs,
gfp_t gfp) gfp_t gfp)
{ {
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
int sglen = SKB_WITH_OVERHEAD(q->buf_size); int sglen = SKB_WITH_OVERHEAD(q->buf_size);
struct urb *urb = buf->urb;
int i; int i;
...@@ -323,44 +322,43 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf, int nsgs, ...@@ -323,44 +322,43 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf, int nsgs,
static int static int
mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q, mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76u_buf *buf, int nsgs, gfp_t gfp) struct urb *urb, int nsgs, gfp_t gfp)
{ {
if (dev->usb.sg_en) { if (dev->usb.sg_en) {
return mt76u_fill_rx_sg(dev, buf, nsgs, gfp); return mt76u_fill_rx_sg(dev, urb, nsgs, gfp);
} else { } else {
buf->urb->transfer_buffer_length = urb->transfer_buffer_length = SKB_WITH_OVERHEAD(q->buf_size);
SKB_WITH_OVERHEAD(q->buf_size); urb->transfer_buffer = page_frag_alloc(&q->rx_page,
buf->urb->transfer_buffer = q->buf_size, gfp);
page_frag_alloc(&q->rx_page, q->buf_size, gfp); return urb->transfer_buffer ? 0 : -ENOMEM;
return buf->urb->transfer_buffer ? 0 : -ENOMEM;
} }
} }
static int static int
mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf) mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
{ {
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct urb *urb;
buf->urb = usb_alloc_urb(0, GFP_KERNEL); urb = usb_alloc_urb(0, GFP_KERNEL);
if (!buf->urb) if (!urb)
return -ENOMEM; return -ENOMEM;
e->urb = urb;
if (dev->usb.sg_en) { if (dev->usb.sg_en) {
buf->urb->sg = devm_kcalloc(dev->dev, MT_SG_MAX_SIZE, urb->sg = devm_kcalloc(dev->dev, MT_SG_MAX_SIZE,
sizeof(*buf->urb->sg), sizeof(*urb->sg), GFP_KERNEL);
GFP_KERNEL); if (!urb->sg)
if (!buf->urb->sg)
return -ENOMEM; return -ENOMEM;
sg_init_table(buf->urb->sg, MT_SG_MAX_SIZE); sg_init_table(urb->sg, MT_SG_MAX_SIZE);
} }
return mt76u_refill_rx(dev, q, buf, MT_SG_MAX_SIZE, GFP_KERNEL); return mt76u_refill_rx(dev, q, urb, MT_SG_MAX_SIZE, GFP_KERNEL);
} }
static void mt76u_buf_free(struct mt76u_buf *buf) static void mt76u_urb_free(struct urb *urb)
{ {
struct urb *urb = buf->urb;
int i; int i;
for (i = 0; i < urb->num_sgs; i++) for (i = 0; i < urb->num_sgs; i++)
...@@ -369,12 +367,12 @@ static void mt76u_buf_free(struct mt76u_buf *buf) ...@@ -369,12 +367,12 @@ static void mt76u_buf_free(struct mt76u_buf *buf)
if (urb->transfer_buffer) if (urb->transfer_buffer)
skb_free_frag(urb->transfer_buffer); skb_free_frag(urb->transfer_buffer);
usb_free_urb(buf->urb); usb_free_urb(urb);
} }
static void static void
mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index, mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
struct mt76u_buf *buf, usb_complete_t complete_fn, struct urb *urb, usb_complete_t complete_fn,
void *context) void *context)
{ {
struct usb_device *udev = to_usb_device(dev->dev); struct usb_device *udev = to_usb_device(dev->dev);
...@@ -385,27 +383,27 @@ mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index, ...@@ -385,27 +383,27 @@ mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
else else
pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]); pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
buf->urb->dev = udev; urb->dev = udev;
buf->urb->pipe = pipe; urb->pipe = pipe;
buf->urb->complete = complete_fn; urb->complete = complete_fn;
buf->urb->context = context; urb->context = context;
} }
static inline struct mt76u_buf static inline struct urb *
*mt76u_get_next_rx_entry(struct mt76_queue *q) mt76u_get_next_rx_entry(struct mt76_queue *q)
{ {
struct mt76u_buf *buf = NULL; struct urb *urb = NULL;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&q->lock, flags); spin_lock_irqsave(&q->lock, flags);
if (q->queued > 0) { if (q->queued > 0) {
buf = &q->entry[q->head].ubuf; urb = q->entry[q->head].urb;
q->head = (q->head + 1) % q->ndesc; q->head = (q->head + 1) % q->ndesc;
q->queued--; q->queued--;
} }
spin_unlock_irqrestore(&q->lock, flags); spin_unlock_irqrestore(&q->lock, flags);
return buf; return urb;
} }
static int mt76u_get_rx_entry_len(u8 *data, u32 data_len) static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
...@@ -424,10 +422,9 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len) ...@@ -424,10 +422,9 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
} }
static int static int
mt76u_process_rx_entry(struct mt76_dev *dev, struct mt76u_buf *buf) mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
{ {
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct urb *urb = buf->urb;
u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer; u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length; int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
int len, nsgs = 1; int len, nsgs = 1;
...@@ -488,7 +485,7 @@ static void mt76u_complete_rx(struct urb *urb) ...@@ -488,7 +485,7 @@ static void mt76u_complete_rx(struct urb *urb)
} }
spin_lock_irqsave(&q->lock, flags); spin_lock_irqsave(&q->lock, flags);
if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch")) if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch"))
goto out; goto out;
q->tail = (q->tail + 1) % q->ndesc; q->tail = (q->tail + 1) % q->ndesc;
...@@ -499,37 +496,37 @@ static void mt76u_complete_rx(struct urb *urb) ...@@ -499,37 +496,37 @@ static void mt76u_complete_rx(struct urb *urb)
} }
static int static int
mt76u_submit_rx_buf(struct mt76_dev *dev, struct mt76u_buf *buf) mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb)
{ {
mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, buf, mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb,
mt76u_complete_rx, dev); mt76u_complete_rx, dev);
trace_submit_urb(dev, buf->urb); trace_submit_urb(dev, urb);
return usb_submit_urb(buf->urb, GFP_ATOMIC); return usb_submit_urb(urb, GFP_ATOMIC);
} }
static void mt76u_rx_tasklet(unsigned long data) static void mt76u_rx_tasklet(unsigned long data)
{ {
struct mt76_dev *dev = (struct mt76_dev *)data; struct mt76_dev *dev = (struct mt76_dev *)data;
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct mt76u_buf *buf; struct urb *urb;
int err, count; int err, count;
rcu_read_lock(); rcu_read_lock();
while (true) { while (true) {
buf = mt76u_get_next_rx_entry(q); urb = mt76u_get_next_rx_entry(q);
if (!buf) if (!urb)
break; break;
count = mt76u_process_rx_entry(dev, buf); count = mt76u_process_rx_entry(dev, urb);
if (count > 0) { if (count > 0) {
err = mt76u_refill_rx(dev, q, buf, count, err = mt76u_refill_rx(dev, q, urb, count,
GFP_ATOMIC); GFP_ATOMIC);
if (err < 0) if (err < 0)
break; break;
} }
mt76u_submit_rx_buf(dev, buf); mt76u_submit_rx_buf(dev, urb);
} }
mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
...@@ -544,7 +541,7 @@ int mt76u_submit_rx_buffers(struct mt76_dev *dev) ...@@ -544,7 +541,7 @@ int mt76u_submit_rx_buffers(struct mt76_dev *dev)
spin_lock_irqsave(&q->lock, flags); spin_lock_irqsave(&q->lock, flags);
for (i = 0; i < q->ndesc; i++) { for (i = 0; i < q->ndesc; i++) {
err = mt76u_submit_rx_buf(dev, &q->entry[i].ubuf); err = mt76u_submit_rx_buf(dev, q->entry[i].urb);
if (err < 0) if (err < 0)
break; break;
} }
...@@ -576,7 +573,7 @@ static int mt76u_alloc_rx(struct mt76_dev *dev) ...@@ -576,7 +573,7 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE; q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
q->ndesc = MT_NUM_RX_ENTRIES; q->ndesc = MT_NUM_RX_ENTRIES;
for (i = 0; i < q->ndesc; i++) { for (i = 0; i < q->ndesc; i++) {
err = mt76u_buf_alloc(dev, &q->entry[i].ubuf); err = mt76u_urb_alloc(dev, &q->entry[i]);
if (err < 0) if (err < 0)
return err; return err;
} }
...@@ -591,7 +588,7 @@ static void mt76u_free_rx(struct mt76_dev *dev) ...@@ -591,7 +588,7 @@ static void mt76u_free_rx(struct mt76_dev *dev)
int i; int i;
for (i = 0; i < q->ndesc; i++) for (i = 0; i < q->ndesc; i++)
mt76u_buf_free(&q->entry[i].ubuf); mt76u_urb_free(q->entry[i].urb);
if (!q->rx_page.va) if (!q->rx_page.va)
return; return;
...@@ -607,7 +604,7 @@ static void mt76u_stop_rx(struct mt76_dev *dev) ...@@ -607,7 +604,7 @@ static void mt76u_stop_rx(struct mt76_dev *dev)
int i; int i;
for (i = 0; i < q->ndesc; i++) for (i = 0; i < q->ndesc; i++)
usb_kill_urb(q->entry[i].ubuf.urb); usb_kill_urb(q->entry[i].urb);
} }
static void mt76u_tx_tasklet(unsigned long data) static void mt76u_tx_tasklet(unsigned long data)
...@@ -724,7 +721,7 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, ...@@ -724,7 +721,7 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
{ {
struct mt76_queue *q = dev->q_tx[qid].q; struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76u_buf *buf; struct urb *urb;
u16 idx = q->tail; u16 idx = q->tail;
int err; int err;
...@@ -737,13 +734,13 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, ...@@ -737,13 +734,13 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
return err; return err;
q->entry[idx].done = false; q->entry[idx].done = false;
buf = &q->entry[idx].ubuf; urb = q->entry[idx].urb;
err = mt76u_tx_setup_buffers(dev, skb, buf->urb); err = mt76u_tx_setup_buffers(dev, skb, urb);
if (err < 0) if (err < 0)
return err; return err;
mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx), mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
buf, mt76u_complete_tx, &q->entry[idx]); urb, mt76u_complete_tx, &q->entry[idx]);
q->tail = (q->tail + 1) % q->ndesc; q->tail = (q->tail + 1) % q->ndesc;
q->entry[idx].skb = skb; q->entry[idx].skb = skb;
...@@ -754,14 +751,14 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, ...@@ -754,14 +751,14 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
{ {
struct mt76u_buf *buf; struct urb *urb;
int err; int err;
while (q->first != q->tail) { while (q->first != q->tail) {
buf = &q->entry[q->first].ubuf; urb = q->entry[q->first].urb;
trace_submit_urb(dev, buf->urb); trace_submit_urb(dev, urb);
err = usb_submit_urb(buf->urb, GFP_ATOMIC); err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) { if (err < 0) {
if (err == -ENODEV) if (err == -ENODEV)
set_bit(MT76_REMOVED, &dev->state); set_bit(MT76_REMOVED, &dev->state);
...@@ -776,7 +773,7 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) ...@@ -776,7 +773,7 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
static int mt76u_alloc_tx(struct mt76_dev *dev) static int mt76u_alloc_tx(struct mt76_dev *dev)
{ {
struct mt76u_buf *buf; struct urb *urb;
struct mt76_queue *q; struct mt76_queue *q;
int i, j; int i, j;
...@@ -804,19 +801,18 @@ static int mt76u_alloc_tx(struct mt76_dev *dev) ...@@ -804,19 +801,18 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
q->ndesc = MT_NUM_TX_ENTRIES; q->ndesc = MT_NUM_TX_ENTRIES;
for (j = 0; j < q->ndesc; j++) { for (j = 0; j < q->ndesc; j++) {
buf = &q->entry[j].ubuf; urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
buf->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!buf->urb)
return -ENOMEM; return -ENOMEM;
q->entry[j].urb = urb;
if (!dev->usb.sg_en) if (!dev->usb.sg_en)
continue; continue;
buf->urb->sg = devm_kcalloc(dev->dev, MT_SG_MAX_SIZE, urb->sg = devm_kcalloc(dev->dev, MT_SG_MAX_SIZE,
sizeof(struct scatterlist), sizeof(struct scatterlist),
GFP_KERNEL); GFP_KERNEL);
if (!buf->urb->sg) if (!urb->sg)
return -ENOMEM; return -ENOMEM;
} }
} }
...@@ -831,7 +827,7 @@ static void mt76u_free_tx(struct mt76_dev *dev) ...@@ -831,7 +827,7 @@ static void mt76u_free_tx(struct mt76_dev *dev)
for (i = 0; i < IEEE80211_NUM_ACS; i++) { for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->q_tx[i].q; q = dev->q_tx[i].q;
for (j = 0; j < q->ndesc; j++) for (j = 0; j < q->ndesc; j++)
usb_free_urb(q->entry[j].ubuf.urb); usb_free_urb(q->entry[j].urb);
} }
} }
...@@ -843,7 +839,7 @@ static void mt76u_stop_tx(struct mt76_dev *dev) ...@@ -843,7 +839,7 @@ static void mt76u_stop_tx(struct mt76_dev *dev)
for (i = 0; i < IEEE80211_NUM_ACS; i++) { for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->q_tx[i].q; q = dev->q_tx[i].q;
for (j = 0; j < q->ndesc; j++) for (j = 0; j < q->ndesc; j++)
usb_kill_urb(q->entry[j].ubuf.urb); usb_kill_urb(q->entry[j].urb);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment