Commit c5e12ac3 authored by Vladimir Oltean's avatar Vladimir Oltean Committed by David S. Miller

net: mscc: ocelot: serialize access to the injection/extraction groups

As explained by Horatiu Vultur in commit 603ead96 ("net: sparx5: Add
spinlock for frame transmission from CPU") which is for a similar
hardware design, multiple CPUs can simultaneously perform injection
or extraction. There are only 2 register groups for injection and 2
for extraction, and the driver only uses one of each. So we'd better
serialize access using spin locks, otherwise frame corruption is
possible.

Note that unlike in sparx5, FDMA in ocelot does not have this issue
because struct ocelot_fdma_tx_ring already contains an xmit_lock.

I guess this is mostly a problem for NXP LS1028A, as that is dual core.
I don't think VSC7514 is. So I'm blaming the commit where LS1028A (aka
the felix DSA driver) started using register-based packet injection and
extraction.

Fixes: 0a6f17c6 ("net: dsa: tag_ocelot_8021q: add support for PTP timestamping")
Signed-off-by: default avatarVladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e1b9e802
......@@ -528,7 +528,9 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds)
* so we need to be careful that there are no extra frames to be
* dequeued over MMIO, since we would never know to discard them.
*/
ocelot_lock_xtr_grp_bh(ocelot, 0);
ocelot_drain_cpu_queue(ocelot, 0);
ocelot_unlock_xtr_grp_bh(ocelot, 0);
return 0;
}
......@@ -1518,6 +1520,8 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
int port = xmit_work->dp->index;
int retries = 10;
ocelot_lock_inj_grp(ocelot, 0);
do {
if (ocelot_can_inject(ocelot, 0))
break;
......@@ -1526,6 +1530,7 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
} while (--retries);
if (!retries) {
ocelot_unlock_inj_grp(ocelot, 0);
dev_err(ocelot->dev, "port %d failed to inject skb\n",
port);
ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
......@@ -1535,6 +1540,8 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
ocelot_unlock_inj_grp(ocelot, 0);
consume_skb(skb);
kfree(xmit_work);
}
......@@ -1694,6 +1701,8 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot)
if (!felix->info->quirk_no_xtr_irq)
return false;
ocelot_lock_xtr_grp(ocelot, grp);
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
unsigned int type;
......@@ -1730,6 +1739,8 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot)
ocelot_drain_cpu_queue(ocelot, 0);
}
ocelot_unlock_xtr_grp(ocelot, grp);
return true;
}
......
......@@ -1099,6 +1099,48 @@ void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
}
EXPORT_SYMBOL(ocelot_ptp_rx_timestamp);
void ocelot_lock_inj_grp(struct ocelot *ocelot, int grp)
__acquires(&ocelot->inj_lock)
{
spin_lock(&ocelot->inj_lock);
}
EXPORT_SYMBOL_GPL(ocelot_lock_inj_grp);
void ocelot_unlock_inj_grp(struct ocelot *ocelot, int grp)
__releases(&ocelot->inj_lock)
{
spin_unlock(&ocelot->inj_lock);
}
EXPORT_SYMBOL_GPL(ocelot_unlock_inj_grp);
void ocelot_lock_xtr_grp(struct ocelot *ocelot, int grp)
__acquires(&ocelot->inj_lock)
{
spin_lock(&ocelot->inj_lock);
}
EXPORT_SYMBOL_GPL(ocelot_lock_xtr_grp);
void ocelot_unlock_xtr_grp(struct ocelot *ocelot, int grp)
__releases(&ocelot->inj_lock)
{
spin_unlock(&ocelot->inj_lock);
}
EXPORT_SYMBOL_GPL(ocelot_unlock_xtr_grp);
void ocelot_lock_xtr_grp_bh(struct ocelot *ocelot, int grp)
__acquires(&ocelot->xtr_lock)
{
spin_lock_bh(&ocelot->xtr_lock);
}
EXPORT_SYMBOL_GPL(ocelot_lock_xtr_grp_bh);
void ocelot_unlock_xtr_grp_bh(struct ocelot *ocelot, int grp)
__releases(&ocelot->xtr_lock)
{
spin_unlock_bh(&ocelot->xtr_lock);
}
EXPORT_SYMBOL_GPL(ocelot_unlock_xtr_grp_bh);
int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
{
u64 timestamp, src_port, len;
......@@ -1109,6 +1151,8 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
u32 val, *buf;
int err;
lockdep_assert_held(&ocelot->xtr_lock);
err = ocelot_xtr_poll_xfh(ocelot, grp, xfh);
if (err)
return err;
......@@ -1184,6 +1228,8 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp)
{
u32 val = ocelot_read(ocelot, QS_INJ_STATUS);
lockdep_assert_held(&ocelot->inj_lock);
if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))))
return false;
if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))
......@@ -1236,6 +1282,8 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
u32 ifh[OCELOT_TAG_LEN / 4];
unsigned int i, count, last;
lockdep_assert_held(&ocelot->inj_lock);
ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
......@@ -1272,6 +1320,8 @@ EXPORT_SYMBOL(ocelot_port_inject_frame);
void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
{
lockdep_assert_held(&ocelot->xtr_lock);
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
ocelot_read_rix(ocelot, QS_XTR_RD, grp);
}
......@@ -2954,6 +3004,8 @@ int ocelot_init(struct ocelot *ocelot)
mutex_init(&ocelot->fwd_domain_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
spin_lock_init(&ocelot->inj_lock);
spin_lock_init(&ocelot->xtr_lock);
ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0);
if (!ocelot->owq)
......
......@@ -51,6 +51,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
struct ocelot *ocelot = arg;
int grp = 0, err;
ocelot_lock_xtr_grp(ocelot, grp);
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
......@@ -69,6 +71,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
if (err < 0)
ocelot_drain_cpu_queue(ocelot, 0);
ocelot_unlock_xtr_grp(ocelot, grp);
return IRQ_HANDLED;
}
......
......@@ -813,6 +813,9 @@ struct ocelot {
const u32 *const *map;
struct list_head stats_regions;
spinlock_t inj_lock;
spinlock_t xtr_lock;
u32 pool_size[OCELOT_SB_NUM][OCELOT_SB_POOL_NUM];
int packet_buffer_size;
int num_frame_refs;
......@@ -966,6 +969,12 @@ void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 val, u32 reg, u32 offset);
/* Packet I/O */
void ocelot_lock_inj_grp(struct ocelot *ocelot, int grp);
void ocelot_unlock_inj_grp(struct ocelot *ocelot, int grp);
void ocelot_lock_xtr_grp(struct ocelot *ocelot, int grp);
void ocelot_unlock_xtr_grp(struct ocelot *ocelot, int grp);
void ocelot_lock_xtr_grp_bh(struct ocelot *ocelot, int grp);
void ocelot_unlock_xtr_grp_bh(struct ocelot *ocelot, int grp);
bool ocelot_can_inject(struct ocelot *ocelot, int grp);
void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
u32 rew_op, struct sk_buff *skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment