Commit e668673e authored by Markus Schneider-Pargmann's avatar Markus Schneider-Pargmann Committed by Marc Kleine-Budde

can: m_can: Use the workqueue as queue

The current implementation uses the workqueue for peripheral chips to
submit work. Only a single work item is queued and used at any time.

To be able to keep more than one transmit in flight at a time, prepare
the workqueue to support multiple transmits at the same time.

Each work item now has a separate storage for a skb and a pointer to
cdev. This assures that each workitem can be processed individually.

The workqueue is replaced by an ordered workqueue which makes sure that
only a single worker processes the items queued on the workqueue. Also
items are ordered by the order they were enqueued. This removes most of
the concurrency the workqueue normally offers. It is not necessary for
this driver.

The cleanup functions have to be adopted a bit to handle this new
mechanism.
Signed-off-by: default avatarMarkus Schneider-Pargmann <msp@baylibre.com>
Link: https://lore.kernel.org/all/20240207093220.2681425-11-msp@baylibre.comSigned-off-by: default avatarMarc Kleine-Budde <mkl@pengutronix.de>
parent 80c5bac0
...@@ -485,17 +485,18 @@ static void m_can_clean(struct net_device *net) ...@@ -485,17 +485,18 @@ static void m_can_clean(struct net_device *net)
{ {
struct m_can_classdev *cdev = netdev_priv(net); struct m_can_classdev *cdev = netdev_priv(net);
if (cdev->tx_skb) { if (cdev->tx_ops) {
u32 putidx = 0; for (int i = 0; i != cdev->tx_fifo_size; ++i) {
if (!cdev->tx_ops[i].skb)
continue;
net->stats.tx_errors++; net->stats.tx_errors++;
if (cdev->version > 30) cdev->tx_ops[i].skb = NULL;
putidx = FIELD_GET(TXFQS_TFQPI_MASK, }
m_can_read(cdev, M_CAN_TXFQS));
can_free_echo_skb(cdev->net, putidx, NULL);
cdev->tx_skb = NULL;
} }
for (int i = 0; i != cdev->can.echo_skb_max; ++i)
can_free_echo_skb(cdev->net, i, NULL);
} }
/* For peripherals, pass skb to rx-offload, which will push skb from /* For peripherals, pass skb to rx-offload, which will push skb from
...@@ -1685,8 +1686,9 @@ static int m_can_close(struct net_device *dev) ...@@ -1685,8 +1686,9 @@ static int m_can_close(struct net_device *dev)
m_can_clk_stop(cdev); m_can_clk_stop(cdev);
free_irq(dev->irq, dev); free_irq(dev->irq, dev);
m_can_clean(dev);
if (cdev->is_peripheral) { if (cdev->is_peripheral) {
cdev->tx_skb = NULL;
destroy_workqueue(cdev->tx_wq); destroy_workqueue(cdev->tx_wq);
cdev->tx_wq = NULL; cdev->tx_wq = NULL;
can_rx_offload_disable(&cdev->offload); can_rx_offload_disable(&cdev->offload);
...@@ -1713,20 +1715,18 @@ static int m_can_next_echo_skb_occupied(struct net_device *dev, u32 putidx) ...@@ -1713,20 +1715,18 @@ static int m_can_next_echo_skb_occupied(struct net_device *dev, u32 putidx)
return !!cdev->can.echo_skb[next_idx]; return !!cdev->can.echo_skb[next_idx];
} }
static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev,
struct sk_buff *skb)
{ {
struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data; struct canfd_frame *cf = (struct canfd_frame *)skb->data;
u8 len_padded = DIV_ROUND_UP(cf->len, 4); u8 len_padded = DIV_ROUND_UP(cf->len, 4);
struct m_can_fifo_element fifo_element; struct m_can_fifo_element fifo_element;
struct net_device *dev = cdev->net; struct net_device *dev = cdev->net;
struct sk_buff *skb = cdev->tx_skb;
u32 cccr, fdflags; u32 cccr, fdflags;
u32 txfqs; u32 txfqs;
int err; int err;
u32 putidx; u32 putidx;
cdev->tx_skb = NULL;
/* Generate ID field for TX buffer Element */ /* Generate ID field for TX buffer Element */
/* Common to all supported M_CAN versions */ /* Common to all supported M_CAN versions */
if (cf->can_id & CAN_EFF_FLAG) { if (cf->can_id & CAN_EFF_FLAG) {
...@@ -1850,10 +1850,31 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) ...@@ -1850,10 +1850,31 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
static void m_can_tx_work_queue(struct work_struct *ws) static void m_can_tx_work_queue(struct work_struct *ws)
{ {
struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev, struct m_can_tx_op *op = container_of(ws, struct m_can_tx_op, work);
tx_work); struct m_can_classdev *cdev = op->cdev;
struct sk_buff *skb = op->skb;
m_can_tx_handler(cdev); op->skb = NULL;
m_can_tx_handler(cdev, skb);
}
static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb)
{
cdev->tx_ops[cdev->next_tx_op].skb = skb;
queue_work(cdev->tx_wq, &cdev->tx_ops[cdev->next_tx_op].work);
++cdev->next_tx_op;
if (cdev->next_tx_op >= cdev->tx_fifo_size)
cdev->next_tx_op = 0;
}
static netdev_tx_t m_can_start_peripheral_xmit(struct m_can_classdev *cdev,
struct sk_buff *skb)
{
netif_stop_queue(cdev->net);
m_can_tx_queue_skb(cdev, skb);
return NETDEV_TX_OK;
} }
static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
...@@ -1864,30 +1885,15 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, ...@@ -1864,30 +1885,15 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
if (can_dev_dropped_skb(dev, skb)) if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK; return NETDEV_TX_OK;
if (cdev->is_peripheral) { if (cdev->can.state == CAN_STATE_BUS_OFF) {
if (cdev->tx_skb) { m_can_clean(cdev->net);
netdev_err(dev, "hard_xmit called while tx busy\n"); return NETDEV_TX_OK;
return NETDEV_TX_BUSY;
}
if (cdev->can.state == CAN_STATE_BUS_OFF) {
m_can_clean(dev);
} else {
/* Need to stop the queue to avoid numerous requests
* from being sent. Suggested improvement is to create
* a queueing mechanism that will queue the skbs and
* process them in order.
*/
cdev->tx_skb = skb;
netif_stop_queue(cdev->net);
queue_work(cdev->tx_wq, &cdev->tx_work);
}
} else {
cdev->tx_skb = skb;
return m_can_tx_handler(cdev);
} }
return NETDEV_TX_OK; if (cdev->is_peripheral)
return m_can_start_peripheral_xmit(cdev, skb);
else
return m_can_tx_handler(cdev, skb);
} }
static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer) static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)
...@@ -1927,15 +1933,17 @@ static int m_can_open(struct net_device *dev) ...@@ -1927,15 +1933,17 @@ static int m_can_open(struct net_device *dev)
/* register interrupt handler */ /* register interrupt handler */
if (cdev->is_peripheral) { if (cdev->is_peripheral) {
cdev->tx_skb = NULL; cdev->tx_wq = alloc_ordered_workqueue("mcan_wq",
cdev->tx_wq = alloc_workqueue("mcan_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM);
WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
if (!cdev->tx_wq) { if (!cdev->tx_wq) {
err = -ENOMEM; err = -ENOMEM;
goto out_wq_fail; goto out_wq_fail;
} }
INIT_WORK(&cdev->tx_work, m_can_tx_work_queue); for (int i = 0; i != cdev->tx_fifo_size; ++i) {
cdev->tx_ops[i].cdev = cdev;
INIT_WORK(&cdev->tx_ops[i].work, m_can_tx_work_queue);
}
err = request_threaded_irq(dev->irq, NULL, m_can_isr, err = request_threaded_irq(dev->irq, NULL, m_can_isr,
IRQF_ONESHOT, IRQF_ONESHOT,
...@@ -2228,6 +2236,19 @@ int m_can_class_register(struct m_can_classdev *cdev) ...@@ -2228,6 +2236,19 @@ int m_can_class_register(struct m_can_classdev *cdev)
{ {
int ret; int ret;
cdev->tx_fifo_size = max(1, min(cdev->mcfg[MRAM_TXB].num,
cdev->mcfg[MRAM_TXE].num));
if (cdev->is_peripheral) {
cdev->tx_ops =
devm_kzalloc(cdev->dev,
cdev->tx_fifo_size * sizeof(*cdev->tx_ops),
GFP_KERNEL);
if (!cdev->tx_ops) {
dev_err(cdev->dev, "Failed to allocate tx_ops for workqueue\n");
return -ENOMEM;
}
}
if (cdev->pm_clock_support) { if (cdev->pm_clock_support) {
ret = m_can_clk_start(cdev); ret = m_can_clk_start(cdev);
if (ret) if (ret)
......
...@@ -70,6 +70,12 @@ struct m_can_ops { ...@@ -70,6 +70,12 @@ struct m_can_ops {
int (*init)(struct m_can_classdev *cdev); int (*init)(struct m_can_classdev *cdev);
}; };
struct m_can_tx_op {
struct m_can_classdev *cdev;
struct work_struct work;
struct sk_buff *skb;
};
struct m_can_classdev { struct m_can_classdev {
struct can_priv can; struct can_priv can;
struct can_rx_offload offload; struct can_rx_offload offload;
...@@ -80,8 +86,6 @@ struct m_can_classdev { ...@@ -80,8 +86,6 @@ struct m_can_classdev {
struct clk *cclk; struct clk *cclk;
struct workqueue_struct *tx_wq; struct workqueue_struct *tx_wq;
struct work_struct tx_work;
struct sk_buff *tx_skb;
struct phy *transceiver; struct phy *transceiver;
ktime_t irq_timer_wait; ktime_t irq_timer_wait;
...@@ -102,7 +106,11 @@ struct m_can_classdev { ...@@ -102,7 +106,11 @@ struct m_can_classdev {
u32 tx_coalesce_usecs_irq; u32 tx_coalesce_usecs_irq;
// Store this internally to avoid fetch delays on peripheral chips // Store this internally to avoid fetch delays on peripheral chips
int tx_fifo_putidx; u32 tx_fifo_putidx;
struct m_can_tx_op *tx_ops;
int tx_fifo_size;
int next_tx_op;
struct mram_cfg mcfg[MRAM_CFG_NUM]; struct mram_cfg mcfg[MRAM_CFG_NUM];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment