Commit c757096e authored by Marc Kleine-Budde's avatar Marc Kleine-Budde

can: rx-offload: add skb queue for use during ISR

Adding a skb to the skb_queue in rx-offload requires to take a lock.

This commit avoids this by adding an unlocked skb queue that is
appended at the end of the ISR. Having one lock at the end of the ISR
should be OK as the HW is empty, not about to overflow.

Link: https://lore.kernel.org/r/20210724204745.736053-2-mkl@pengutronix.deTested-by: default avatarOleksij Rempel <o.rempel@pengutronix.de>
Co-developed-by: default avatarKurt Van Dijck <dev.kurt@vandijck-laurijssen.be>
Signed-off-by: default avatarKurt Van Dijck <dev.kurt@vandijck-laurijssen.be>
Signed-off-by: default avatarMarc Kleine-Budde <mkl@pengutronix.de>
parent a08ec5fe
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2014 Protonic Holland, /* Copyright (c) 2014 Protonic Holland,
* David Jander * David Jander
* Copyright (C) 2014-2017 Pengutronix, * Copyright (C) 2014-2021 Pengutronix,
* Marc Kleine-Budde <kernel@pengutronix.de> * Marc Kleine-Budde <kernel@pengutronix.de>
*/ */
...@@ -174,10 +174,8 @@ can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) ...@@ -174,10 +174,8 @@ can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
u64 pending) u64 pending)
{ {
struct sk_buff_head skb_queue;
unsigned int i; unsigned int i;
int received = 0;
__skb_queue_head_init(&skb_queue);
for (i = offload->mb_first; for (i = offload->mb_first;
can_rx_offload_le(offload, i, offload->mb_last); can_rx_offload_le(offload, i, offload->mb_last);
...@@ -191,26 +189,12 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, ...@@ -191,26 +189,12 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
if (IS_ERR_OR_NULL(skb)) if (IS_ERR_OR_NULL(skb))
continue; continue;
__skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare); __skb_queue_add_sort(&offload->skb_irq_queue, skb,
} can_rx_offload_compare);
received++;
if (!skb_queue_empty(&skb_queue)) {
unsigned long flags;
u32 queue_len;
spin_lock_irqsave(&offload->skb_queue.lock, flags);
skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
queue_len = skb_queue_len(&offload->skb_queue);
if (queue_len > offload->skb_queue_len_max / 8)
netdev_dbg(offload->dev, "%s: queue_len=%d\n",
__func__, queue_len);
can_rx_offload_schedule(offload);
} }
return skb_queue_len(&skb_queue); return received;
} }
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp); EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
...@@ -226,13 +210,10 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) ...@@ -226,13 +210,10 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
if (!skb) if (!skb)
break; break;
skb_queue_tail(&offload->skb_queue, skb); __skb_queue_tail(&offload->skb_irq_queue, skb);
received++; received++;
} }
if (received)
can_rx_offload_schedule(offload);
return received; return received;
} }
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
...@@ -241,7 +222,6 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload, ...@@ -241,7 +222,6 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
struct sk_buff *skb, u32 timestamp) struct sk_buff *skb, u32 timestamp)
{ {
struct can_rx_offload_cb *cb; struct can_rx_offload_cb *cb;
unsigned long flags;
if (skb_queue_len(&offload->skb_queue) > if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) { offload->skb_queue_len_max) {
...@@ -252,11 +232,8 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload, ...@@ -252,11 +232,8 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
cb = can_rx_offload_get_cb(skb); cb = can_rx_offload_get_cb(skb);
cb->timestamp = timestamp; cb->timestamp = timestamp;
spin_lock_irqsave(&offload->skb_queue.lock, flags); __skb_queue_add_sort(&offload->skb_irq_queue, skb,
__skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare); can_rx_offload_compare);
spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
can_rx_offload_schedule(offload);
return 0; return 0;
} }
...@@ -295,13 +272,33 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload, ...@@ -295,13 +272,33 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
return -ENOBUFS; return -ENOBUFS;
} }
skb_queue_tail(&offload->skb_queue, skb); __skb_queue_tail(&offload->skb_irq_queue, skb);
can_rx_offload_schedule(offload);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail); EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
void can_rx_offload_irq_finish(struct can_rx_offload *offload)
{
unsigned long flags;
int queue_len;
if (skb_queue_empty_lockless(&offload->skb_irq_queue))
return;
spin_lock_irqsave(&offload->skb_queue.lock, flags);
skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
queue_len = skb_queue_len(&offload->skb_queue);
if (queue_len > offload->skb_queue_len_max / 8)
netdev_dbg(offload->dev, "%s: queue_len=%d\n",
__func__, queue_len);
can_rx_offload_schedule(offload);
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish);
static int can_rx_offload_init_queue(struct net_device *dev, static int can_rx_offload_init_queue(struct net_device *dev,
struct can_rx_offload *offload, struct can_rx_offload *offload,
unsigned int weight) unsigned int weight)
...@@ -312,6 +309,7 @@ static int can_rx_offload_init_queue(struct net_device *dev, ...@@ -312,6 +309,7 @@ static int can_rx_offload_init_queue(struct net_device *dev,
offload->skb_queue_len_max = 2 << fls(weight); offload->skb_queue_len_max = 2 << fls(weight);
offload->skb_queue_len_max *= 4; offload->skb_queue_len_max *= 4;
skb_queue_head_init(&offload->skb_queue); skb_queue_head_init(&offload->skb_queue);
__skb_queue_head_init(&offload->skb_irq_queue);
netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight); netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
...@@ -373,5 +371,6 @@ void can_rx_offload_del(struct can_rx_offload *offload) ...@@ -373,5 +371,6 @@ void can_rx_offload_del(struct can_rx_offload *offload)
{ {
netif_napi_del(&offload->napi); netif_napi_del(&offload->napi);
skb_queue_purge(&offload->skb_queue); skb_queue_purge(&offload->skb_queue);
__skb_queue_purge(&offload->skb_irq_queue);
} }
EXPORT_SYMBOL_GPL(can_rx_offload_del); EXPORT_SYMBOL_GPL(can_rx_offload_del);
...@@ -1198,6 +1198,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) ...@@ -1198,6 +1198,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
} }
} }
if (handled)
can_rx_offload_irq_finish(&priv->offload);
return handled; return handled;
} }
......
...@@ -1058,6 +1058,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) ...@@ -1058,6 +1058,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
} }
} }
if (cdev->is_peripheral)
can_rx_offload_irq_finish(&cdev->offload);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -2195,8 +2195,10 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id) ...@@ -2195,8 +2195,10 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
FIELD_GET(MCP251XFD_REG_INT_IE_MASK, FIELD_GET(MCP251XFD_REG_INT_IE_MASK,
priv->regs_status.intf); priv->regs_status.intf);
if (!(intf_pending)) if (!(intf_pending)) {
can_rx_offload_irq_finish(&priv->offload);
return handled; return handled;
}
/* Some interrupts must be ACKed in the /* Some interrupts must be ACKed in the
* MCP251XFD_REG_INT register. * MCP251XFD_REG_INT register.
...@@ -2296,6 +2298,8 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id) ...@@ -2296,6 +2298,8 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
} while (1); } while (1);
out_fail: out_fail:
can_rx_offload_irq_finish(&priv->offload);
netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n", netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
err, priv->regs_status.intf); err, priv->regs_status.intf);
mcp251xfd_dump(priv); mcp251xfd_dump(priv);
......
...@@ -786,6 +786,8 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) ...@@ -786,6 +786,8 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
int_status = hecc_read(priv, HECC_CANGIF0); int_status = hecc_read(priv, HECC_CANGIF0);
} }
can_rx_offload_irq_finish(&priv->offload);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -20,6 +20,7 @@ struct can_rx_offload { ...@@ -20,6 +20,7 @@ struct can_rx_offload {
bool drop); bool drop);
struct sk_buff_head skb_queue; struct sk_buff_head skb_queue;
struct sk_buff_head skb_irq_queue;
u32 skb_queue_len_max; u32 skb_queue_len_max;
unsigned int mb_first; unsigned int mb_first;
...@@ -48,6 +49,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, ...@@ -48,6 +49,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
unsigned int *frame_len_ptr); unsigned int *frame_len_ptr);
int can_rx_offload_queue_tail(struct can_rx_offload *offload, int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb); struct sk_buff *skb);
void can_rx_offload_irq_finish(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload); void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload); void can_rx_offload_enable(struct can_rx_offload *offload);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment