Commit 8d94a873 authored by David S. Miller's avatar David S. Miller

Merge branch 'PTP-support-for-the-SJA1105-DSA-driver'

Vladimir Oltean says:

====================
PTP support for the SJA1105 DSA driver

This patchset adds the following:

 - A timecounter/cyclecounter based PHC for the free-running
   timestamping clock of this switch.

 - A state machine implemented in the DSA tagger for SJA1105, which
   keeps track of metadata follow-up Ethernet frames (the switch's way
   of transmitting RX timestamps).

Clock manipulations on the actual hardware PTP clock will have to be
implemented anyway, for the TTEthernet block and the time-based ingress
policer.

v3 patchset can be found at:
https://lkml.org/lkml/2019/6/4/954

Changes from v3:

- Made it compile with the SJA1105 DSA driver and PTP driver as modules.

- Reworked/simplified/fixed some issues in 03/17
  (dsa_8021q_remove_header) and added an ASCII image that
  illustrates the transformation that is taking place.

- Removed a useless check for sja1105_is_link_local from 16/17 (RX
  timestamping) which also made previous 08/17 patch ("Move
  sja1105_is_link_local to include/linux") useless and therefore dropped.

v2 patchset can be found at:
https://lkml.org/lkml/2019/6/2/146

Changes from v2:

- Broke previous 09/10 patch (timestamping) into multiple smaller
  patches.

- Every patch in the series compiles.

v1 patchset can be found at:
https://lkml.org/lkml/2019/5/28/1093

Changes from v1:

- Removed the addition of the DSA .can_timestamp callback.

- Waiting for meta frames is done completely inside the tagger, and all
  frames emitted on RX are already partially timestamped.

- Added a global data structure for the tagger common to all ports.

- Made PTP work with ports in standalone mode, by limiting use of the
  DMAC-mangling "incl_srcpt" mode only when ports are bridged, aka when
  the DSA master is already promiscuous and can receive anything.
  Also changed meta frames to be sent at the 01-80-C2-00-00-0E DMAC.

- Made some progress w.r.t. observed negative path delay.  Apparently it
  only appears when the delay mechanism is the delay request-response
  (end-to-end) one. If peer delay is used (-P), the path delay is
  positive and appears reasonable for an 1000Base-T link (485 ns in
  steady state).

  SJA1105 as PTP slave (OC) with E2E path delay:

ptp4l[55.600]: master offset          8 s2 freq  +83677 path delay     -2390
ptp4l[56.600]: master offset         17 s2 freq  +83688 path delay     -2391
ptp4l[57.601]: master offset          6 s2 freq  +83682 path delay     -2391
ptp4l[58.601]: master offset         -1 s2 freq  +83677 path delay     -2391

  SJA1105 as PTP slave (OC) with P2P path delay:

ptp4l[48.343]: master offset          5 s2 freq  +83715 path delay       484
ptp4l[48.468]: master offset         -3 s2 freq  +83705 path delay       485
ptp4l[48.593]: master offset          0 s2 freq  +83708 path delay       485
ptp4l[48.718]: master offset          1 s2 freq  +83710 path delay       485
ptp4l[48.844]: master offset          1 s2 freq  +83710 path delay       485
ptp4l[48.969]: master offset         -5 s2 freq  +83702 path delay       485
ptp4l[49.094]: master offset          3 s2 freq  +83712 path delay       485
ptp4l[49.219]: master offset          4 s2 freq  +83714 path delay       485
ptp4l[49.344]: master offset         -5 s2 freq  +83702 path delay       485
ptp4l[49.469]: master offset          3 s2 freq  +83713 path delay       487
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a6cdeeb1 a602afd2
...@@ -16,3 +16,10 @@ tristate "NXP SJA1105 Ethernet switch family support" ...@@ -16,3 +16,10 @@ tristate "NXP SJA1105 Ethernet switch family support"
- SJA1105Q (Gen. 2, No SGMII, TT-Ethernet) - SJA1105Q (Gen. 2, No SGMII, TT-Ethernet)
- SJA1105R (Gen. 2, SGMII, No TT-Ethernet) - SJA1105R (Gen. 2, SGMII, No TT-Ethernet)
- SJA1105S (Gen. 2, SGMII, TT-Ethernet) - SJA1105S (Gen. 2, SGMII, TT-Ethernet)
config NET_DSA_SJA1105_PTP
tristate "Support for the PTP clock on the NXP SJA1105 Ethernet switch"
depends on NET_DSA_SJA1105
help
This enables support for timestamping and PTP clock manipulations in
the SJA1105 DSA driver.
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NET_DSA_SJA1105) += sja1105.o obj-$(CONFIG_NET_DSA_SJA1105) += sja1105.o
obj-$(CONFIG_NET_DSA_SJA1105_PTP) += sja1105_ptp.o
sja1105-objs := \ sja1105-objs := \
sja1105_spi.o \ sja1105_spi.o \
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#ifndef _SJA1105_H #ifndef _SJA1105_H
#define _SJA1105_H #define _SJA1105_H
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <linux/dsa/sja1105.h> #include <linux/dsa/sja1105.h>
#include <net/dsa.h> #include <net/dsa.h>
#include <linux/mutex.h> #include <linux/mutex.h>
...@@ -27,6 +29,11 @@ struct sja1105_regs { ...@@ -27,6 +29,11 @@ struct sja1105_regs {
u64 rgu; u64 rgu;
u64 config; u64 config;
u64 rmii_pll1; u64 rmii_pll1;
u64 ptp_control;
u64 ptpclk;
u64 ptpclkrate;
u64 ptptsclk;
u64 ptpegr_ts[SJA1105_NUM_PORTS];
u64 pad_mii_tx[SJA1105_NUM_PORTS]; u64 pad_mii_tx[SJA1105_NUM_PORTS];
u64 cgu_idiv[SJA1105_NUM_PORTS]; u64 cgu_idiv[SJA1105_NUM_PORTS];
u64 rgmii_pad_mii_tx[SJA1105_NUM_PORTS]; u64 rgmii_pad_mii_tx[SJA1105_NUM_PORTS];
...@@ -50,9 +57,19 @@ struct sja1105_info { ...@@ -50,9 +57,19 @@ struct sja1105_info {
* switch core and device_id) * switch core and device_id)
*/ */
u64 part_no; u64 part_no;
/* E/T and P/Q/R/S have partial timestamps of different sizes.
* They must be reconstructed on both families anyway to get the full
* 64-bit values back.
*/
int ptp_ts_bits;
/* Also SPI commands are of different sizes to retrieve
* the egress timestamps.
*/
int ptpegr_ts_bytes;
const struct sja1105_dynamic_table_ops *dyn_ops; const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops; const struct sja1105_table_ops *static_ops;
const struct sja1105_regs *regs; const struct sja1105_regs *regs;
int (*ptp_cmd)(const void *ctx, const void *data);
int (*reset_cmd)(const void *ctx, const void *data); int (*reset_cmd)(const void *ctx, const void *data);
int (*setup_rgmii_delay)(const void *ctx, int port); int (*setup_rgmii_delay)(const void *ctx, int port);
/* Prototypes from include/net/dsa.h */ /* Prototypes from include/net/dsa.h */
...@@ -72,13 +89,25 @@ struct sja1105_private { ...@@ -72,13 +89,25 @@ struct sja1105_private {
struct spi_device *spidev; struct spi_device *spidev;
struct dsa_switch *ds; struct dsa_switch *ds;
struct sja1105_port ports[SJA1105_NUM_PORTS]; struct sja1105_port ports[SJA1105_NUM_PORTS];
struct ptp_clock_info ptp_caps;
struct ptp_clock *clock;
/* The cycle counter translates the PTP timestamps (based on
* a free-running counter) into a software time domain.
*/
struct cyclecounter tstamp_cc;
struct timecounter tstamp_tc;
struct delayed_work refresh_work;
/* Serializes all operations on the cycle counter */
struct mutex ptp_lock;
/* Serializes transmission of management frames so that /* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another. * the switch doesn't confuse them with one another.
*/ */
struct mutex mgmt_lock; struct mutex mgmt_lock;
struct sja1105_tagger_data tagger_data;
}; };
#include "sja1105_dynamic_config.h" #include "sja1105_dynamic_config.h"
#include "sja1105_ptp.h"
struct sja1105_spi_message { struct sja1105_spi_message {
u64 access; u64 access;
......
...@@ -378,6 +378,7 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { ...@@ -378,6 +378,7 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
.addr = 0x38, .addr = 0x38,
}, },
[BLK_IDX_L2_FORWARDING_PARAMS] = {0}, [BLK_IDX_L2_FORWARDING_PARAMS] = {0},
[BLK_IDX_AVB_PARAMS] = {0},
[BLK_IDX_GENERAL_PARAMS] = { [BLK_IDX_GENERAL_PARAMS] = {
.entry_packing = sja1105et_general_params_entry_packing, .entry_packing = sja1105et_general_params_entry_packing,
.cmd_packing = sja1105et_general_params_cmd_packing, .cmd_packing = sja1105et_general_params_cmd_packing,
...@@ -441,6 +442,7 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { ...@@ -441,6 +442,7 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
.addr = 0x38, .addr = 0x38,
}, },
[BLK_IDX_L2_FORWARDING_PARAMS] = {0}, [BLK_IDX_L2_FORWARDING_PARAMS] = {0},
[BLK_IDX_AVB_PARAMS] = {0},
[BLK_IDX_GENERAL_PARAMS] = { [BLK_IDX_GENERAL_PARAMS] = {
.entry_packing = sja1105et_general_params_entry_packing, .entry_packing = sja1105et_general_params_entry_packing,
.cmd_packing = sja1105et_general_params_cmd_packing, .cmd_packing = sja1105et_general_params_cmd_packing,
......
...@@ -389,14 +389,14 @@ static int sja1105_init_general_params(struct sja1105_private *priv) ...@@ -389,14 +389,14 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
.mirr_ptacu = 0, .mirr_ptacu = 0,
.switchid = priv->ds->index, .switchid = priv->ds->index,
/* Priority queue for link-local frames trapped to CPU */ /* Priority queue for link-local frames trapped to CPU */
.hostprio = 0, .hostprio = 7,
.mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A, .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
.mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK, .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
.incl_srcpt1 = true, .incl_srcpt1 = false,
.send_meta1 = false, .send_meta1 = false,
.mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B, .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
.mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK, .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
.incl_srcpt0 = true, .incl_srcpt0 = false,
.send_meta0 = false, .send_meta0 = false,
/* The destination for traffic matching mac_fltres1 and /* The destination for traffic matching mac_fltres1 and
* mac_fltres0 on all ports except host_port. Such traffic * mac_fltres0 on all ports except host_port. Such traffic
...@@ -508,6 +508,39 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv) ...@@ -508,6 +508,39 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
return 0; return 0;
} }
static int sja1105_init_avb_params(struct sja1105_private *priv,
bool on)
{
struct sja1105_avb_params_entry *avb;
struct sja1105_table *table;
table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
/* Discard previous AVB Parameters Table */
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
/* Configure the reception of meta frames only if requested */
if (!on)
return 0;
table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
avb = table->entries;
avb->destmeta = SJA1105_META_DMAC;
avb->srcmeta = SJA1105_META_SMAC;
return 0;
}
static int sja1105_static_config_load(struct sja1105_private *priv, static int sja1105_static_config_load(struct sja1105_private *priv,
struct sja1105_dt_port *ports) struct sja1105_dt_port *ports)
{ {
...@@ -546,6 +579,9 @@ static int sja1105_static_config_load(struct sja1105_private *priv, ...@@ -546,6 +579,9 @@ static int sja1105_static_config_load(struct sja1105_private *priv,
if (rc < 0) if (rc < 0)
return rc; return rc;
rc = sja1105_init_general_params(priv); rc = sja1105_init_general_params(priv);
if (rc < 0)
return rc;
rc = sja1105_init_avb_params(priv, false);
if (rc < 0) if (rc < 0)
return rc; return rc;
...@@ -1289,23 +1325,6 @@ static int sja1105_static_config_reload(struct sja1105_private *priv) ...@@ -1289,23 +1325,6 @@ static int sja1105_static_config_reload(struct sja1105_private *priv)
return rc; return rc;
} }
/* The TPID setting belongs to the General Parameters table,
* which can only be partially reconfigured at runtime (and not the TPID).
* So a switch reset is required.
*/
static int sja1105_change_tpid(struct sja1105_private *priv,
u16 tpid, u16 tpid2)
{
struct sja1105_general_params_entry *general_params;
struct sja1105_table *table;
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
general_params->tpid = tpid;
general_params->tpid2 = tpid2;
return sja1105_static_config_reload(priv);
}
static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
{ {
struct sja1105_mac_config_entry *mac; struct sja1105_mac_config_entry *mac;
...@@ -1424,17 +1443,41 @@ static int sja1105_vlan_prepare(struct dsa_switch *ds, int port, ...@@ -1424,17 +1443,41 @@ static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
return 0; return 0;
} }
/* The TPID setting belongs to the General Parameters table,
* which can only be partially reconfigured at runtime (and not the TPID).
* So a switch reset is required.
*/
static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
{ {
struct sja1105_general_params_entry *general_params;
struct sja1105_private *priv = ds->priv; struct sja1105_private *priv = ds->priv;
struct sja1105_table *table;
u16 tpid, tpid2;
int rc; int rc;
if (enabled) if (enabled) {
/* Enable VLAN filtering. */ /* Enable VLAN filtering. */
rc = sja1105_change_tpid(priv, ETH_P_8021Q, ETH_P_8021AD); tpid = ETH_P_8021AD;
else tpid2 = ETH_P_8021Q;
} else {
/* Disable VLAN filtering. */ /* Disable VLAN filtering. */
rc = sja1105_change_tpid(priv, ETH_P_SJA1105, ETH_P_SJA1105); tpid = ETH_P_SJA1105;
tpid2 = ETH_P_SJA1105;
}
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
/* EtherType used to identify outer tagged (S-tag) VLAN traffic */
general_params->tpid = tpid;
/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
general_params->tpid2 = tpid2;
/* When VLAN filtering is on, we need to at least be able to
* decode management traffic through the "backup plan".
*/
general_params->incl_srcpt1 = enabled;
general_params->incl_srcpt0 = enabled;
rc = sja1105_static_config_reload(priv);
if (rc) if (rc)
dev_err(ds->dev, "Failed to change VLAN Ethertype\n"); dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
...@@ -1523,6 +1566,11 @@ static int sja1105_setup(struct dsa_switch *ds) ...@@ -1523,6 +1566,11 @@ static int sja1105_setup(struct dsa_switch *ds)
return rc; return rc;
} }
rc = sja1105_ptp_clock_register(priv);
if (rc < 0) {
dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
return rc;
}
/* Create and send configuration down to device */ /* Create and send configuration down to device */
rc = sja1105_static_config_load(priv, ports); rc = sja1105_static_config_load(priv, ports);
if (rc < 0) { if (rc < 0) {
...@@ -1552,8 +1600,16 @@ static int sja1105_setup(struct dsa_switch *ds) ...@@ -1552,8 +1600,16 @@ static int sja1105_setup(struct dsa_switch *ds)
return sja1105_setup_8021q_tagging(ds, true); return sja1105_setup_8021q_tagging(ds, true);
} }
static void sja1105_teardown(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
cancel_work_sync(&priv->tagger_data.rxtstamp_work);
skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
}
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
struct sk_buff *skb) struct sk_buff *skb, bool takets)
{ {
struct sja1105_mgmt_entry mgmt_route = {0}; struct sja1105_mgmt_entry mgmt_route = {0};
struct sja1105_private *priv = ds->priv; struct sja1105_private *priv = ds->priv;
...@@ -1566,6 +1622,8 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, ...@@ -1566,6 +1622,8 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest); mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
mgmt_route.destports = BIT(port); mgmt_route.destports = BIT(port);
mgmt_route.enfport = 1; mgmt_route.enfport = 1;
mgmt_route.tsreg = 0;
mgmt_route.takets = takets;
rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
slot, &mgmt_route, true); slot, &mgmt_route, true);
...@@ -1617,7 +1675,11 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port, ...@@ -1617,7 +1675,11 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
{ {
struct sja1105_private *priv = ds->priv; struct sja1105_private *priv = ds->priv;
struct sja1105_port *sp = &priv->ports[port]; struct sja1105_port *sp = &priv->ports[port];
struct skb_shared_hwtstamps shwt = {0};
int slot = sp->mgmt_slot; int slot = sp->mgmt_slot;
struct sk_buff *clone;
u64 now, ts;
int rc;
/* The tragic fact about the switch having 4x2 slots for installing /* The tragic fact about the switch having 4x2 slots for installing
* management routes is that all of them except one are actually * management routes is that all of them except one are actually
...@@ -1635,8 +1697,36 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port, ...@@ -1635,8 +1697,36 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
*/ */
mutex_lock(&priv->mgmt_lock); mutex_lock(&priv->mgmt_lock);
sja1105_mgmt_xmit(ds, port, slot, skb); /* The clone, if there, was made by dsa_skb_tx_timestamp */
clone = DSA_SKB_CB(skb)->clone;
sja1105_mgmt_xmit(ds, port, slot, skb, !!clone);
if (!clone)
goto out;
skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
mutex_lock(&priv->ptp_lock);
now = priv->tstamp_cc.read(&priv->tstamp_cc);
rc = sja1105_ptpegr_ts_poll(priv, slot, &ts);
if (rc < 0) {
dev_err(ds->dev, "xmit: timed out polling for tstamp\n");
kfree_skb(clone);
goto out_unlock_ptp;
}
ts = sja1105_tstamp_reconstruct(priv, now, ts);
ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
shwt.hwtstamp = ns_to_ktime(ts);
skb_complete_tx_timestamp(clone, &shwt);
out_unlock_ptp:
mutex_unlock(&priv->ptp_lock);
out:
mutex_unlock(&priv->mgmt_lock); mutex_unlock(&priv->mgmt_lock);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -1665,15 +1755,178 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds, ...@@ -1665,15 +1755,178 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds,
return sja1105_static_config_reload(priv); return sja1105_static_config_reload(priv);
} }
/* Caller must hold priv->tagger_data.meta_lock */
static int sja1105_change_rxtstamping(struct sja1105_private *priv,
bool on)
{
struct sja1105_general_params_entry *general_params;
struct sja1105_table *table;
int rc;
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
general_params->send_meta1 = on;
general_params->send_meta0 = on;
rc = sja1105_init_avb_params(priv, on);
if (rc < 0)
return rc;
/* Initialize the meta state machine to a known state */
if (priv->tagger_data.stampable_skb) {
kfree_skb(priv->tagger_data.stampable_skb);
priv->tagger_data.stampable_skb = NULL;
}
return sja1105_static_config_reload(priv);
}
static int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
struct ifreq *ifr)
{
struct sja1105_private *priv = ds->priv;
struct hwtstamp_config config;
bool rx_on;
int rc;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
priv->ports[port].hwts_tx_en = false;
break;
case HWTSTAMP_TX_ON:
priv->ports[port].hwts_tx_en = true;
break;
default:
return -ERANGE;
}
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
rx_on = false;
break;
default:
rx_on = true;
break;
}
if (rx_on != priv->tagger_data.hwts_rx_en) {
spin_lock(&priv->tagger_data.meta_lock);
rc = sja1105_change_rxtstamping(priv, rx_on);
spin_unlock(&priv->tagger_data.meta_lock);
if (rc < 0) {
dev_err(ds->dev,
"Failed to change RX timestamping: %d\n", rc);
return -EFAULT;
}
priv->tagger_data.hwts_rx_en = rx_on;
}
if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
return -EFAULT;
return 0;
}
static int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
struct ifreq *ifr)
{
struct sja1105_private *priv = ds->priv;
struct hwtstamp_config config;
config.flags = 0;
if (priv->ports[port].hwts_tx_en)
config.tx_type = HWTSTAMP_TX_ON;
else
config.tx_type = HWTSTAMP_TX_OFF;
if (priv->tagger_data.hwts_rx_en)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
else
config.rx_filter = HWTSTAMP_FILTER_NONE;
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
#define to_tagger(d) \
container_of((d), struct sja1105_tagger_data, rxtstamp_work)
#define to_sja1105(d) \
container_of((d), struct sja1105_private, tagger_data)
static void sja1105_rxtstamp_work(struct work_struct *work)
{
struct sja1105_tagger_data *data = to_tagger(work);
struct sja1105_private *priv = to_sja1105(data);
struct sk_buff *skb;
u64 now;
mutex_lock(&priv->ptp_lock);
now = priv->tstamp_cc.read(&priv->tstamp_cc);
while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) {
struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
u64 ts;
*shwt = (struct skb_shared_hwtstamps) {0};
ts = SJA1105_SKB_CB(skb)->meta_tstamp;
ts = sja1105_tstamp_reconstruct(priv, now, ts);
ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
shwt->hwtstamp = ns_to_ktime(ts);
netif_rx_ni(skb);
}
mutex_unlock(&priv->ptp_lock);
}
/* Called from dsa_skb_defer_rx_timestamp */
bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_tagger_data *data = &priv->tagger_data;
if (!data->hwts_rx_en)
return false;
/* We need to read the full PTP clock to reconstruct the Rx
* timestamp. For that we need a sleepable context.
*/
skb_queue_tail(&data->skb_rxtstamp_queue, skb);
schedule_work(&data->rxtstamp_work);
return true;
}
/* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone
* the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit
* callback, where we will timestamp it synchronously.
*/
bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_port *sp = &priv->ports[port];
if (!sp->hwts_tx_en)
return false;
return true;
}
static const struct dsa_switch_ops sja1105_switch_ops = { static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol, .get_tag_protocol = sja1105_get_tag_protocol,
.setup = sja1105_setup, .setup = sja1105_setup,
.teardown = sja1105_teardown,
.set_ageing_time = sja1105_set_ageing_time, .set_ageing_time = sja1105_set_ageing_time,
.phylink_validate = sja1105_phylink_validate, .phylink_validate = sja1105_phylink_validate,
.phylink_mac_config = sja1105_mac_config, .phylink_mac_config = sja1105_mac_config,
.get_strings = sja1105_get_strings, .get_strings = sja1105_get_strings,
.get_ethtool_stats = sja1105_get_ethtool_stats, .get_ethtool_stats = sja1105_get_ethtool_stats,
.get_sset_count = sja1105_get_sset_count, .get_sset_count = sja1105_get_sset_count,
.get_ts_info = sja1105_get_ts_info,
.port_fdb_dump = sja1105_fdb_dump, .port_fdb_dump = sja1105_fdb_dump,
.port_fdb_add = sja1105_fdb_add, .port_fdb_add = sja1105_fdb_add,
.port_fdb_del = sja1105_fdb_del, .port_fdb_del = sja1105_fdb_del,
...@@ -1688,6 +1941,10 @@ static const struct dsa_switch_ops sja1105_switch_ops = { ...@@ -1688,6 +1941,10 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_mdb_add = sja1105_mdb_add, .port_mdb_add = sja1105_mdb_add,
.port_mdb_del = sja1105_mdb_del, .port_mdb_del = sja1105_mdb_del,
.port_deferred_xmit = sja1105_port_deferred_xmit, .port_deferred_xmit = sja1105_port_deferred_xmit,
.port_hwtstamp_get = sja1105_hwtstamp_get,
.port_hwtstamp_set = sja1105_hwtstamp_set,
.port_rxtstamp = sja1105_port_rxtstamp,
.port_txtstamp = sja1105_port_txtstamp,
}; };
static int sja1105_check_device_id(struct sja1105_private *priv) static int sja1105_check_device_id(struct sja1105_private *priv)
...@@ -1728,6 +1985,7 @@ static int sja1105_check_device_id(struct sja1105_private *priv) ...@@ -1728,6 +1985,7 @@ static int sja1105_check_device_id(struct sja1105_private *priv)
static int sja1105_probe(struct spi_device *spi) static int sja1105_probe(struct spi_device *spi)
{ {
struct sja1105_tagger_data *tagger_data;
struct device *dev = &spi->dev; struct device *dev = &spi->dev;
struct sja1105_private *priv; struct sja1105_private *priv;
struct dsa_switch *ds; struct dsa_switch *ds;
...@@ -1782,12 +2040,17 @@ static int sja1105_probe(struct spi_device *spi) ...@@ -1782,12 +2040,17 @@ static int sja1105_probe(struct spi_device *spi)
ds->priv = priv; ds->priv = priv;
priv->ds = ds; priv->ds = ds;
tagger_data = &priv->tagger_data;
skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
/* Connections between dsa_port and sja1105_port */ /* Connections between dsa_port and sja1105_port */
for (i = 0; i < SJA1105_NUM_PORTS; i++) { for (i = 0; i < SJA1105_NUM_PORTS; i++) {
struct sja1105_port *sp = &priv->ports[i]; struct sja1105_port *sp = &priv->ports[i];
ds->ports[i].priv = sp; ds->ports[i].priv = sp;
sp->dp = &ds->ports[i]; sp->dp = &ds->ports[i];
sp->data = tagger_data;
} }
mutex_init(&priv->mgmt_lock); mutex_init(&priv->mgmt_lock);
...@@ -1798,6 +2061,7 @@ static int sja1105_remove(struct spi_device *spi) ...@@ -1798,6 +2061,7 @@ static int sja1105_remove(struct spi_device *spi)
{ {
struct sja1105_private *priv = spi_get_drvdata(spi); struct sja1105_private *priv = spi_get_drvdata(spi);
sja1105_ptp_clock_unregister(priv);
dsa_unregister_switch(priv->ds); dsa_unregister_switch(priv->ds);
sja1105_static_config_free(&priv->static_config); sja1105_static_config_free(&priv->static_config);
return 0; return 0;
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
*/
#include "sja1105.h"
/* The adjfine API clamps ppb between [-32,768,000, 32,768,000], and
* therefore scaled_ppm between [-2,147,483,648, 2,147,483,647].
* Set the maximum supported ppb to a round value smaller than the maximum.
*
* Percentually speaking, this is a +/- 0.032x adjustment of the
* free-running counter (0.968x to 1.032x).
*/
#define SJA1105_MAX_ADJ_PPB 32000000
#define SJA1105_SIZE_PTP_CMD 4
/* Timestamps are in units of 8 ns clock ticks (equivalent to a fixed
* 125 MHz clock) so the scale factor (MULT / SHIFT) needs to be 8.
* Furthermore, wisely pick SHIFT as 28 bits, which translates
* MULT into 2^31 (0x80000000). This is the same value around which
* the hardware PTPCLKRATE is centered, so the same ppb conversion
* arithmetic can be reused.
*/
#define SJA1105_CC_SHIFT 28
#define SJA1105_CC_MULT (8 << SJA1105_CC_SHIFT)
/* Having 33 bits of cycle counter left until a 64-bit overflow during delta
* conversion, we multiply this by the 8 ns counter resolution and arrive at
* a comfortable 68.71 second refresh interval until the delta would cause
* an integer overflow, in absence of any other readout.
* Approximate to 1 minute.
*/
#define SJA1105_REFRESH_INTERVAL (HZ * 60)
/* This range is actually +/- SJA1105_MAX_ADJ_PPB
* divided by 1000 (ppb -> ppm) and with a 16-bit
* "fractional" part (actually fixed point).
* |
* v
* Convert scaled_ppm from the +/- ((10^6) << 16) range
* into the +/- (1 << 31) range.
*
* This forgoes a "ppb" numeric representation (up to NSEC_PER_SEC)
* and defines the scaling factor between scaled_ppm and the actual
* frequency adjustments (both cycle counter and hardware).
*
* ptpclkrate = scaled_ppm * 2^31 / (10^6 * 2^16)
* simplifies to
* ptpclkrate = scaled_ppm * 2^9 / 5^6
*/
#define SJA1105_CC_MULT_NUM (1 << 9)
#define SJA1105_CC_MULT_DEM 15625
#define ptp_to_sja1105(d) container_of((d), struct sja1105_private, ptp_caps)
#define cc_to_sja1105(d) container_of((d), struct sja1105_private, tstamp_cc)
#define dw_to_sja1105(d) container_of((d), struct sja1105_private, refresh_work)
struct sja1105_ptp_cmd {
u64 resptp; /* reset */
};
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *info)
{
struct sja1105_private *priv = ds->priv;
/* Called during cleanup */
if (!priv->clock)
return -ENODEV;
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
info->phc_index = ptp_clock_index(priv->clock);
return 0;
}
EXPORT_SYMBOL_GPL(sja1105_get_ts_info);
int sja1105et_ptp_cmd(const void *ctx, const void *data)
{
const struct sja1105_ptp_cmd *cmd = data;
const struct sja1105_private *priv = ctx;
const struct sja1105_regs *regs = priv->info->regs;
const int size = SJA1105_SIZE_PTP_CMD;
u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
/* No need to keep this as part of the structure */
u64 valid = 1;
sja1105_pack(buf, &valid, 31, 31, size);
sja1105_pack(buf, &cmd->resptp, 2, 2, size);
return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->ptp_control,
buf, SJA1105_SIZE_PTP_CMD);
}
EXPORT_SYMBOL_GPL(sja1105et_ptp_cmd);
int sja1105pqrs_ptp_cmd(const void *ctx, const void *data)
{
const struct sja1105_ptp_cmd *cmd = data;
const struct sja1105_private *priv = ctx;
const struct sja1105_regs *regs = priv->info->regs;
const int size = SJA1105_SIZE_PTP_CMD;
u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
/* No need to keep this as part of the structure */
u64 valid = 1;
sja1105_pack(buf, &valid, 31, 31, size);
sja1105_pack(buf, &cmd->resptp, 3, 3, size);
return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->ptp_control,
buf, SJA1105_SIZE_PTP_CMD);
}
EXPORT_SYMBOL_GPL(sja1105pqrs_ptp_cmd);
/* The switch returns partial timestamps (24 bits for SJA1105 E/T, which wrap
* around in 0.135 seconds, and 32 bits for P/Q/R/S, wrapping around in 34.35
* seconds).
*
* This receives the RX or TX MAC timestamps, provided by hardware as
* the lower bits of the cycle counter, sampled at the time the timestamp was
* collected.
*
* To reconstruct into a full 64-bit-wide timestamp, the cycle counter is
* read and the high-order bits are filled in.
*
* Must be called within one wraparound period of the partial timestamp since
* it was generated by the MAC.
*/
u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now,
u64 ts_partial)
{
u64 partial_tstamp_mask = CYCLECOUNTER_MASK(priv->info->ptp_ts_bits);
u64 ts_reconstructed;
ts_reconstructed = (now & ~partial_tstamp_mask) | ts_partial;
/* Check lower bits of current cycle counter against the timestamp.
* If the current cycle counter is lower than the partial timestamp,
* then wraparound surely occurred and must be accounted for.
*/
if ((now & partial_tstamp_mask) <= ts_partial)
ts_reconstructed -= (partial_tstamp_mask + 1);
return ts_reconstructed;
}
EXPORT_SYMBOL_GPL(sja1105_tstamp_reconstruct);
/* Reads the SPI interface for an egress timestamp generated by the switch
* for frames sent using management routes.
*
* SJA1105 E/T layout of the 4-byte SPI payload:
*
* 31 23 15 7 0
* | | | | |
* +-----+-----+-----+ ^
* ^ |
* | |
* 24-bit timestamp Update bit
*
*
* SJA1105 P/Q/R/S layout of the 8-byte SPI payload:
*
* 31 23 15 7 0 63 55 47 39 32
* | | | | | | | | | |
* ^ +-----+-----+-----+-----+
* | ^
* | |
* Update bit 32-bit timestamp
*
* Notice that the update bit is in the same place.
* To have common code for E/T and P/Q/R/S for reading the timestamp,
* we need to juggle with the offset and the bit indices.
*/
int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
{
const struct sja1105_regs *regs = priv->info->regs;
int tstamp_bit_start, tstamp_bit_end;
int timeout = 10;
u8 packed_buf[8];
u64 update;
int rc;
do {
rc = sja1105_spi_send_packed_buf(priv, SPI_READ,
regs->ptpegr_ts[port],
packed_buf,
priv->info->ptpegr_ts_bytes);
if (rc < 0)
return rc;
sja1105_unpack(packed_buf, &update, 0, 0,
priv->info->ptpegr_ts_bytes);
if (update)
break;
usleep_range(10, 50);
} while (--timeout);
if (!timeout)
return -ETIMEDOUT;
/* Point the end bit to the second 32-bit word on P/Q/R/S,
* no-op on E/T.
*/
tstamp_bit_end = (priv->info->ptpegr_ts_bytes - 4) * 8;
/* Shift the 24-bit timestamp on E/T to be collected from 31:8.
* No-op on P/Q/R/S.
*/
tstamp_bit_end += 32 - priv->info->ptp_ts_bits;
tstamp_bit_start = tstamp_bit_end + priv->info->ptp_ts_bits - 1;
*ts = 0;
sja1105_unpack(packed_buf, ts, tstamp_bit_start, tstamp_bit_end,
priv->info->ptpegr_ts_bytes);
return 0;
}
EXPORT_SYMBOL_GPL(sja1105_ptpegr_ts_poll);
int sja1105_ptp_reset(struct sja1105_private *priv)
{
struct dsa_switch *ds = priv->ds;
struct sja1105_ptp_cmd cmd = {0};
int rc;
mutex_lock(&priv->ptp_lock);
cmd.resptp = 1;
dev_dbg(ds->dev, "Resetting PTP clock\n");
rc = priv->info->ptp_cmd(priv, &cmd);
timecounter_init(&priv->tstamp_tc, &priv->tstamp_cc,
ktime_to_ns(ktime_get_real()));
mutex_unlock(&priv->ptp_lock);
return rc;
}
EXPORT_SYMBOL_GPL(sja1105_ptp_reset);
static int sja1105_ptp_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
struct sja1105_private *priv = ptp_to_sja1105(ptp);
u64 ns;
mutex_lock(&priv->ptp_lock);
ns = timecounter_read(&priv->tstamp_tc);
mutex_unlock(&priv->ptp_lock);
*ts = ns_to_timespec64(ns);
return 0;
}
static int sja1105_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct sja1105_private *priv = ptp_to_sja1105(ptp);
u64 ns = timespec64_to_ns(ts);
mutex_lock(&priv->ptp_lock);
timecounter_init(&priv->tstamp_tc, &priv->tstamp_cc, ns);
mutex_unlock(&priv->ptp_lock);
return 0;
}
static int sja1105_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct sja1105_private *priv = ptp_to_sja1105(ptp);
s64 clkrate;
clkrate = (s64)scaled_ppm * SJA1105_CC_MULT_NUM;
clkrate = div_s64(clkrate, SJA1105_CC_MULT_DEM);
mutex_lock(&priv->ptp_lock);
/* Force a readout to update the timer *before* changing its frequency.
*
* This way, its corrected time curve can at all times be modeled
* as a linear "A * x + B" function, where:
*
* - B are past frequency adjustments and offset shifts, all
* accumulated into the cycle_last variable.
*
* - A is the new frequency adjustments we're just about to set.
*
* Reading now makes B accumulate the correct amount of time,
* corrected at the old rate, before changing it.
*
* Hardware timestamps then become simple points on the curve and
* are approximated using the above function. This is still better
* than letting the switch take the timestamps using the hardware
* rate-corrected clock (PTPCLKVAL) - the comparison in this case would
* be that we're shifting the ruler at the same time as we're taking
* measurements with it.
*
* The disadvantage is that it's possible to receive timestamps when
* a frequency adjustment took place in the near past.
* In this case they will be approximated using the new ppb value
* instead of a compound function made of two segments (one at the old
* and the other at the new rate) - introducing some inaccuracy.
*/
timecounter_read(&priv->tstamp_tc);
priv->tstamp_cc.mult = SJA1105_CC_MULT + clkrate;
mutex_unlock(&priv->ptp_lock);
return 0;
}
static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct sja1105_private *priv = ptp_to_sja1105(ptp);
mutex_lock(&priv->ptp_lock);
timecounter_adjtime(&priv->tstamp_tc, delta);
mutex_unlock(&priv->ptp_lock);
return 0;
}
static u64 sja1105_ptptsclk_read(const struct cyclecounter *cc)
{
struct sja1105_private *priv = cc_to_sja1105(cc);
const struct sja1105_regs *regs = priv->info->regs;
u64 ptptsclk = 0;
int rc;
rc = sja1105_spi_send_int(priv, SPI_READ, regs->ptptsclk,
&ptptsclk, 8);
if (rc < 0)
dev_err_ratelimited(priv->ds->dev,
"failed to read ptp cycle counter: %d\n",
rc);
return ptptsclk;
}
static void sja1105_ptp_overflow_check(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct sja1105_private *priv = dw_to_sja1105(dw);
struct timespec64 ts;
sja1105_ptp_gettime(&priv->ptp_caps, &ts);
schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
}
static const struct ptp_clock_info sja1105_ptp_caps = {
.owner = THIS_MODULE,
.name = "SJA1105 PHC",
.adjfine = sja1105_ptp_adjfine,
.adjtime = sja1105_ptp_adjtime,
.gettime64 = sja1105_ptp_gettime,
.settime64 = sja1105_ptp_settime,
.max_adj = SJA1105_MAX_ADJ_PPB,
};
int sja1105_ptp_clock_register(struct sja1105_private *priv)
{
struct dsa_switch *ds = priv->ds;
/* Set up the cycle counter */
priv->tstamp_cc = (struct cyclecounter) {
.read = sja1105_ptptsclk_read,
.mask = CYCLECOUNTER_MASK(64),
.shift = SJA1105_CC_SHIFT,
.mult = SJA1105_CC_MULT,
};
mutex_init(&priv->ptp_lock);
INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check);
schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
priv->ptp_caps = sja1105_ptp_caps;
priv->clock = ptp_clock_register(&priv->ptp_caps, ds->dev);
if (IS_ERR_OR_NULL(priv->clock))
return PTR_ERR(priv->clock);
return sja1105_ptp_reset(priv);
}
EXPORT_SYMBOL_GPL(sja1105_ptp_clock_register);
void sja1105_ptp_clock_unregister(struct sja1105_private *priv)
{
if (IS_ERR_OR_NULL(priv->clock))
return;
ptp_clock_unregister(priv->clock);
priv->clock = NULL;
}
EXPORT_SYMBOL_GPL(sja1105_ptp_clock_unregister);
MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
MODULE_DESCRIPTION("SJA1105 PHC Driver");
MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0
* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_PTP_H
#define _SJA1105_PTP_H
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
int sja1105_ptp_clock_register(struct sja1105_private *priv);
void sja1105_ptp_clock_unregister(struct sja1105_private *priv);
int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts);
int sja1105et_ptp_cmd(const void *ctx, const void *data);
int sja1105pqrs_ptp_cmd(const void *ctx, const void *data);
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *ts);
u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now,
u64 ts_partial);
int sja1105_ptp_reset(struct sja1105_private *priv);
#else
static inline int sja1105_ptp_clock_register(struct sja1105_private *priv)
{
return 0;
}
static inline void sja1105_ptp_clock_unregister(struct sja1105_private *priv)
{
return;
}
static inline int
sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
{
return 0;
}
static inline u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv,
u64 now, u64 ts_partial)
{
return 0;
}
static inline int sja1105_ptp_reset(struct sja1105_private *priv)
{
return 0;
}
#define sja1105et_ptp_cmd NULL
#define sja1105pqrs_ptp_cmd NULL
#define sja1105_get_ts_info NULL
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
#endif /* _SJA1105_PTP_H */
...@@ -100,6 +100,7 @@ int sja1105_spi_send_packed_buf(const struct sja1105_private *priv, ...@@ -100,6 +100,7 @@ int sja1105_spi_send_packed_buf(const struct sja1105_private *priv,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(sja1105_spi_send_packed_buf);
/* If @rw is: /* If @rw is:
* - SPI_WRITE: creates and sends an SPI write message at absolute * - SPI_WRITE: creates and sends an SPI write message at absolute
...@@ -135,6 +136,7 @@ int sja1105_spi_send_int(const struct sja1105_private *priv, ...@@ -135,6 +136,7 @@ int sja1105_spi_send_int(const struct sja1105_private *priv,
return rc; return rc;
} }
EXPORT_SYMBOL_GPL(sja1105_spi_send_int);
/* Should be used if a @packed_buf larger than SJA1105_SIZE_SPI_MSG_MAXLEN /* Should be used if a @packed_buf larger than SJA1105_SIZE_SPI_MSG_MAXLEN
* must be sent/received. Splitting the buffer into chunks and assembling * must be sent/received. Splitting the buffer into chunks and assembling
...@@ -478,7 +480,12 @@ int sja1105_static_config_upload(struct sja1105_private *priv) ...@@ -478,7 +480,12 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries); dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries);
} }
rc = sja1105_ptp_reset(priv);
if (rc < 0)
dev_err(dev, "Failed to reset PTP clock: %d\n", rc);
dev_info(dev, "Reset switch and programmed static config\n"); dev_info(dev, "Reset switch and programmed static config\n");
out: out:
kfree(config_buf); kfree(config_buf);
return rc; return rc;
...@@ -507,6 +514,11 @@ static struct sja1105_regs sja1105et_regs = { ...@@ -507,6 +514,11 @@ static struct sja1105_regs sja1105et_regs = {
.rgmii_tx_clk = {0x100016, 0x10001D, 0x100024, 0x10002B, 0x100032}, .rgmii_tx_clk = {0x100016, 0x10001D, 0x100024, 0x10002B, 0x100032},
.rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031}, .rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031},
.rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034}, .rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
.ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
.ptp_control = 0x17,
.ptpclk = 0x18, /* Spans 0x18 to 0x19 */
.ptpclkrate = 0x1A,
.ptptsclk = 0x1B, /* Spans 0x1B to 0x1C */
}; };
static struct sja1105_regs sja1105pqrs_regs = { static struct sja1105_regs sja1105pqrs_regs = {
...@@ -533,6 +545,11 @@ static struct sja1105_regs sja1105pqrs_regs = { ...@@ -533,6 +545,11 @@ static struct sja1105_regs sja1105pqrs_regs = {
.rmii_ref_clk = {0x100015, 0x10001B, 0x100021, 0x100027, 0x10002D}, .rmii_ref_clk = {0x100015, 0x10001B, 0x100021, 0x100027, 0x10002D},
.rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F}, .rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
.qlevel = {0x604, 0x614, 0x624, 0x634, 0x644}, .qlevel = {0x604, 0x614, 0x624, 0x634, 0x644},
.ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
.ptp_control = 0x18,
.ptpclk = 0x19,
.ptpclkrate = 0x1B,
.ptptsclk = 0x1C,
}; };
struct sja1105_info sja1105e_info = { struct sja1105_info sja1105e_info = {
...@@ -540,9 +557,12 @@ struct sja1105_info sja1105e_info = { ...@@ -540,9 +557,12 @@ struct sja1105_info sja1105e_info = {
.part_no = SJA1105ET_PART_NO, .part_no = SJA1105ET_PART_NO,
.static_ops = sja1105e_table_ops, .static_ops = sja1105e_table_ops,
.dyn_ops = sja1105et_dyn_ops, .dyn_ops = sja1105et_dyn_ops,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
.reset_cmd = sja1105et_reset_cmd, .reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add, .fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del, .fdb_del_cmd = sja1105et_fdb_del,
.ptp_cmd = sja1105et_ptp_cmd,
.regs = &sja1105et_regs, .regs = &sja1105et_regs,
.name = "SJA1105E", .name = "SJA1105E",
}; };
...@@ -551,9 +571,12 @@ struct sja1105_info sja1105t_info = { ...@@ -551,9 +571,12 @@ struct sja1105_info sja1105t_info = {
.part_no = SJA1105ET_PART_NO, .part_no = SJA1105ET_PART_NO,
.static_ops = sja1105t_table_ops, .static_ops = sja1105t_table_ops,
.dyn_ops = sja1105et_dyn_ops, .dyn_ops = sja1105et_dyn_ops,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
.reset_cmd = sja1105et_reset_cmd, .reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add, .fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del, .fdb_del_cmd = sja1105et_fdb_del,
.ptp_cmd = sja1105et_ptp_cmd,
.regs = &sja1105et_regs, .regs = &sja1105et_regs,
.name = "SJA1105T", .name = "SJA1105T",
}; };
...@@ -562,9 +585,12 @@ struct sja1105_info sja1105p_info = { ...@@ -562,9 +585,12 @@ struct sja1105_info sja1105p_info = {
.part_no = SJA1105P_PART_NO, .part_no = SJA1105P_PART_NO,
.static_ops = sja1105p_table_ops, .static_ops = sja1105p_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops, .dyn_ops = sja1105pqrs_dyn_ops,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.reset_cmd = sja1105pqrs_reset_cmd, .reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add, .fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del, .fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd = sja1105pqrs_ptp_cmd,
.regs = &sja1105pqrs_regs, .regs = &sja1105pqrs_regs,
.name = "SJA1105P", .name = "SJA1105P",
}; };
...@@ -573,9 +599,12 @@ struct sja1105_info sja1105q_info = { ...@@ -573,9 +599,12 @@ struct sja1105_info sja1105q_info = {
.part_no = SJA1105Q_PART_NO, .part_no = SJA1105Q_PART_NO,
.static_ops = sja1105q_table_ops, .static_ops = sja1105q_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops, .dyn_ops = sja1105pqrs_dyn_ops,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.reset_cmd = sja1105pqrs_reset_cmd, .reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add, .fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del, .fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd = sja1105pqrs_ptp_cmd,
.regs = &sja1105pqrs_regs, .regs = &sja1105pqrs_regs,
.name = "SJA1105Q", .name = "SJA1105Q",
}; };
...@@ -584,9 +613,12 @@ struct sja1105_info sja1105r_info = { ...@@ -584,9 +613,12 @@ struct sja1105_info sja1105r_info = {
.part_no = SJA1105R_PART_NO, .part_no = SJA1105R_PART_NO,
.static_ops = sja1105r_table_ops, .static_ops = sja1105r_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops, .dyn_ops = sja1105pqrs_dyn_ops,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.reset_cmd = sja1105pqrs_reset_cmd, .reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add, .fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del, .fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd = sja1105pqrs_ptp_cmd,
.regs = &sja1105pqrs_regs, .regs = &sja1105pqrs_regs,
.name = "SJA1105R", .name = "SJA1105R",
}; };
...@@ -596,8 +628,11 @@ struct sja1105_info sja1105s_info = { ...@@ -596,8 +628,11 @@ struct sja1105_info sja1105s_info = {
.static_ops = sja1105s_table_ops, .static_ops = sja1105s_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops, .dyn_ops = sja1105pqrs_dyn_ops,
.regs = &sja1105pqrs_regs, .regs = &sja1105pqrs_regs,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.reset_cmd = sja1105pqrs_reset_cmd, .reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add, .fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del, .fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd = sja1105pqrs_ptp_cmd,
.name = "SJA1105S", .name = "SJA1105S",
}; };
...@@ -35,6 +35,7 @@ void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len) ...@@ -35,6 +35,7 @@ void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len)
} }
dump_stack(); dump_stack();
} }
EXPORT_SYMBOL_GPL(sja1105_pack);
void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len) void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len)
{ {
...@@ -52,6 +53,7 @@ void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len) ...@@ -52,6 +53,7 @@ void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len)
start, end); start, end);
dump_stack(); dump_stack();
} }
EXPORT_SYMBOL_GPL(sja1105_unpack);
void sja1105_packing(void *buf, u64 *val, int start, int end, void sja1105_packing(void *buf, u64 *val, int start, int end,
size_t len, enum packing_op op) size_t len, enum packing_op op)
...@@ -74,6 +76,7 @@ void sja1105_packing(void *buf, u64 *val, int start, int end, ...@@ -74,6 +76,7 @@ void sja1105_packing(void *buf, u64 *val, int start, int end,
} }
dump_stack(); dump_stack();
} }
EXPORT_SYMBOL_GPL(sja1105_packing);
/* Little-endian Ethernet CRC32 of data packed as big-endian u32 words */ /* Little-endian Ethernet CRC32 of data packed as big-endian u32 words */
u32 sja1105_crc32(const void *buf, size_t len) u32 sja1105_crc32(const void *buf, size_t len)
...@@ -91,6 +94,28 @@ u32 sja1105_crc32(const void *buf, size_t len) ...@@ -91,6 +94,28 @@ u32 sja1105_crc32(const void *buf, size_t len)
return ~crc; return ~crc;
} }
static size_t sja1105et_avb_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY;
struct sja1105_avb_params_entry *entry = entry_ptr;
sja1105_packing(buf, &entry->destmeta, 95, 48, size, op);
sja1105_packing(buf, &entry->srcmeta, 47, 0, size, op);
return size;
}
static size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY;
struct sja1105_avb_params_entry *entry = entry_ptr;
sja1105_packing(buf, &entry->destmeta, 125, 78, size, op);
sja1105_packing(buf, &entry->srcmeta, 77, 30, size, op);
return size;
}
static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr, static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op) enum packing_op op)
{ {
...@@ -423,6 +448,7 @@ static u64 blk_id_map[BLK_IDX_MAX] = { ...@@ -423,6 +448,7 @@ static u64 blk_id_map[BLK_IDX_MAX] = {
[BLK_IDX_MAC_CONFIG] = BLKID_MAC_CONFIG, [BLK_IDX_MAC_CONFIG] = BLKID_MAC_CONFIG,
[BLK_IDX_L2_LOOKUP_PARAMS] = BLKID_L2_LOOKUP_PARAMS, [BLK_IDX_L2_LOOKUP_PARAMS] = BLKID_L2_LOOKUP_PARAMS,
[BLK_IDX_L2_FORWARDING_PARAMS] = BLKID_L2_FORWARDING_PARAMS, [BLK_IDX_L2_FORWARDING_PARAMS] = BLKID_L2_FORWARDING_PARAMS,
[BLK_IDX_AVB_PARAMS] = BLKID_AVB_PARAMS,
[BLK_IDX_GENERAL_PARAMS] = BLKID_GENERAL_PARAMS, [BLK_IDX_GENERAL_PARAMS] = BLKID_GENERAL_PARAMS,
[BLK_IDX_XMII_PARAMS] = BLKID_XMII_PARAMS, [BLK_IDX_XMII_PARAMS] = BLKID_XMII_PARAMS,
}; };
...@@ -624,6 +650,12 @@ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = { ...@@ -624,6 +650,12 @@ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY, .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
}, },
[BLK_IDX_AVB_PARAMS] = {
.packing = sja1105et_avb_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
.packed_entry_size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
},
[BLK_IDX_GENERAL_PARAMS] = { [BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105et_general_params_entry_packing, .packing = sja1105et_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry), .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
...@@ -682,6 +714,12 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = { ...@@ -682,6 +714,12 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY, .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
}, },
[BLK_IDX_AVB_PARAMS] = {
.packing = sja1105et_avb_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
.packed_entry_size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
},
[BLK_IDX_GENERAL_PARAMS] = { [BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105et_general_params_entry_packing, .packing = sja1105et_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry), .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
...@@ -740,6 +778,12 @@ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = { ...@@ -740,6 +778,12 @@ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY, .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
}, },
[BLK_IDX_AVB_PARAMS] = {
.packing = sja1105pqrs_avb_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
},
[BLK_IDX_GENERAL_PARAMS] = { [BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105pqrs_general_params_entry_packing, .packing = sja1105pqrs_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry), .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
...@@ -798,6 +842,12 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = { ...@@ -798,6 +842,12 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY, .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
}, },
[BLK_IDX_AVB_PARAMS] = {
.packing = sja1105pqrs_avb_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
},
[BLK_IDX_GENERAL_PARAMS] = { [BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105pqrs_general_params_entry_packing, .packing = sja1105pqrs_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry), .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
...@@ -856,6 +906,12 @@ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = { ...@@ -856,6 +906,12 @@ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY, .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
}, },
[BLK_IDX_AVB_PARAMS] = {
.packing = sja1105pqrs_avb_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
},
[BLK_IDX_GENERAL_PARAMS] = { [BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105pqrs_general_params_entry_packing, .packing = sja1105pqrs_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry), .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
...@@ -914,6 +970,12 @@ struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = { ...@@ -914,6 +970,12 @@ struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY, .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
}, },
[BLK_IDX_AVB_PARAMS] = {
.packing = sja1105pqrs_avb_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
},
[BLK_IDX_GENERAL_PARAMS] = { [BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105pqrs_general_params_entry_packing, .packing = sja1105pqrs_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry), .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
......
...@@ -20,10 +20,12 @@ ...@@ -20,10 +20,12 @@
#define SJA1105ET_SIZE_MAC_CONFIG_ENTRY 28 #define SJA1105ET_SIZE_MAC_CONFIG_ENTRY 28
#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY 4 #define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY 4
#define SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY 40 #define SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY 40
#define SJA1105ET_SIZE_AVB_PARAMS_ENTRY 12
#define SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY 20 #define SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY 20
#define SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY 32 #define SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY 32
#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY 16 #define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY 16
#define SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY 44 #define SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY 44
#define SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY 16
/* UM10944.pdf Page 11, Table 2. Configuration Blocks */ /* UM10944.pdf Page 11, Table 2. Configuration Blocks */
enum { enum {
...@@ -34,6 +36,7 @@ enum { ...@@ -34,6 +36,7 @@ enum {
BLKID_MAC_CONFIG = 0x09, BLKID_MAC_CONFIG = 0x09,
BLKID_L2_LOOKUP_PARAMS = 0x0D, BLKID_L2_LOOKUP_PARAMS = 0x0D,
BLKID_L2_FORWARDING_PARAMS = 0x0E, BLKID_L2_FORWARDING_PARAMS = 0x0E,
BLKID_AVB_PARAMS = 0x10,
BLKID_GENERAL_PARAMS = 0x11, BLKID_GENERAL_PARAMS = 0x11,
BLKID_XMII_PARAMS = 0x4E, BLKID_XMII_PARAMS = 0x4E,
}; };
...@@ -46,6 +49,7 @@ enum sja1105_blk_idx { ...@@ -46,6 +49,7 @@ enum sja1105_blk_idx {
BLK_IDX_MAC_CONFIG, BLK_IDX_MAC_CONFIG,
BLK_IDX_L2_LOOKUP_PARAMS, BLK_IDX_L2_LOOKUP_PARAMS,
BLK_IDX_L2_FORWARDING_PARAMS, BLK_IDX_L2_FORWARDING_PARAMS,
BLK_IDX_AVB_PARAMS,
BLK_IDX_GENERAL_PARAMS, BLK_IDX_GENERAL_PARAMS,
BLK_IDX_XMII_PARAMS, BLK_IDX_XMII_PARAMS,
BLK_IDX_MAX, BLK_IDX_MAX,
...@@ -64,6 +68,7 @@ enum sja1105_blk_idx { ...@@ -64,6 +68,7 @@ enum sja1105_blk_idx {
#define SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT 1 #define SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT 1
#define SJA1105_MAX_GENERAL_PARAMS_COUNT 1 #define SJA1105_MAX_GENERAL_PARAMS_COUNT 1
#define SJA1105_MAX_XMII_PARAMS_COUNT 1 #define SJA1105_MAX_XMII_PARAMS_COUNT 1
#define SJA1105_MAX_AVB_PARAMS_COUNT 1
#define SJA1105_MAX_FRAME_MEMORY 929 #define SJA1105_MAX_FRAME_MEMORY 929
...@@ -179,6 +184,11 @@ struct sja1105_l2_policing_entry { ...@@ -179,6 +184,11 @@ struct sja1105_l2_policing_entry {
u64 partition; u64 partition;
}; };
struct sja1105_avb_params_entry {
u64 destmeta;
u64 srcmeta;
};
struct sja1105_mac_config_entry { struct sja1105_mac_config_entry {
u64 top[8]; u64 top[8];
u64 base[8]; u64 base[8];
......
...@@ -20,9 +20,6 @@ int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index, ...@@ -20,9 +20,6 @@ int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci); u16 tpid, u16 tci);
struct sk_buff *dsa_8021q_rcv(struct sk_buff *skb, struct net_device *netdev,
struct packet_type *pt, u16 *tpid, u16 *tci);
u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port); u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port);
u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port); u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port);
...@@ -31,6 +28,8 @@ int dsa_8021q_rx_switch_id(u16 vid); ...@@ -31,6 +28,8 @@ int dsa_8021q_rx_switch_id(u16 vid);
int dsa_8021q_rx_source_port(u16 vid); int dsa_8021q_rx_source_port(u16 vid);
struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb);
#else #else
int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index, int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
...@@ -45,12 +44,6 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, ...@@ -45,12 +44,6 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
return NULL; return NULL;
} }
struct sk_buff *dsa_8021q_rcv(struct sk_buff *skb, struct net_device *netdev,
struct packet_type *pt, u16 *tpid, u16 *tci)
{
return NULL;
}
u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port) u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port)
{ {
return 0; return 0;
...@@ -71,6 +64,11 @@ int dsa_8021q_rx_source_port(u16 vid) ...@@ -71,6 +64,11 @@ int dsa_8021q_rx_source_port(u16 vid)
return 0; return 0;
} }
struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
{
return NULL;
}
#endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */ #endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */
#endif /* _NET_DSA_8021Q_H */ #endif /* _NET_DSA_8021Q_H */
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <net/dsa.h> #include <net/dsa.h>
#define ETH_P_SJA1105 ETH_P_DSA_8021Q #define ETH_P_SJA1105 ETH_P_DSA_8021Q
#define ETH_P_SJA1105_META 0x0008
/* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */ /* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */
#define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull #define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull
...@@ -20,8 +21,41 @@ ...@@ -20,8 +21,41 @@
#define SJA1105_LINKLOCAL_FILTER_B 0x011B19000000ull #define SJA1105_LINKLOCAL_FILTER_B 0x011B19000000ull
#define SJA1105_LINKLOCAL_FILTER_B_MASK 0xFFFFFF000000ull #define SJA1105_LINKLOCAL_FILTER_B_MASK 0xFFFFFF000000ull
/* Source and Destination MAC of follow-up meta frames.
* Whereas the choice of SMAC only affects the unique identification of the
* switch as sender of meta frames, the DMAC must be an address that is present
* in the DSA master port's multicast MAC filter.
* 01-80-C2-00-00-0E is a good choice for this, as all profiles of IEEE 1588
* over L2 use this address for some purpose already.
*/
#define SJA1105_META_SMAC 0x222222222222ull
#define SJA1105_META_DMAC 0x0180C200000Eull
/* Global tagger data: each struct sja1105_port has a reference to
* the structure defined in struct sja1105_private.
*/
struct sja1105_tagger_data {
struct sk_buff_head skb_rxtstamp_queue;
struct work_struct rxtstamp_work;
struct sk_buff *stampable_skb;
/* Protects concurrent access to the meta state machine
* from taggers running on multiple ports on SMP systems
*/
spinlock_t meta_lock;
bool hwts_rx_en;
};
struct sja1105_skb_cb {
u32 meta_tstamp;
};
#define SJA1105_SKB_CB(skb) \
((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb))
struct sja1105_port { struct sja1105_port {
struct sja1105_tagger_data *data;
struct dsa_port *dp; struct dsa_port *dp;
bool hwts_tx_en;
int mgmt_slot; int mgmt_slot;
}; };
......
...@@ -357,6 +357,7 @@ struct dsa_switch_ops { ...@@ -357,6 +357,7 @@ struct dsa_switch_ops {
int port); int port);
int (*setup)(struct dsa_switch *ds); int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
u32 (*get_phy_flags)(struct dsa_switch *ds, int port); u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
/* /*
......
...@@ -408,6 +408,9 @@ static void dsa_switch_teardown(struct dsa_switch *ds) ...@@ -408,6 +408,9 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
dsa_switch_unregister_notifier(ds); dsa_switch_unregister_notifier(ds);
if (ds->ops->teardown)
ds->ops->teardown(ds);
if (ds->devlink) { if (ds->devlink) {
devlink_unregister(ds->devlink); devlink_unregister(ds->devlink);
devlink_free(ds->devlink); devlink_free(ds->devlink);
......
...@@ -423,6 +423,8 @@ static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p, ...@@ -423,6 +423,8 @@ static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
if (!clone) if (!clone)
return; return;
DSA_SKB_CB(skb)->clone = clone;
if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type)) if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type))
return; return;
...@@ -460,6 +462,7 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -460,6 +462,7 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_end(&s->syncp); u64_stats_update_end(&s->syncp);
DSA_SKB_CB(skb)->deferred_xmit = false; DSA_SKB_CB(skb)->deferred_xmit = false;
DSA_SKB_CB(skb)->clone = NULL;
/* Identify PTP protocol packets, clone them, and pass them to the /* Identify PTP protocol packets, clone them, and pass them to the
* switch driver * switch driver
......
...@@ -235,31 +235,48 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, ...@@ -235,31 +235,48 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
} }
EXPORT_SYMBOL_GPL(dsa_8021q_xmit); EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
struct sk_buff *dsa_8021q_rcv(struct sk_buff *skb, struct net_device *netdev, /* In the DSA packet_type handler, skb->data points in the middle of the VLAN
struct packet_type *pt, u16 *tpid, u16 *tci) * tag, after tpid and before tci. This is because so far, ETH_HLEN
{ * (DMAC, SMAC, EtherType) bytes were pulled.
struct vlan_ethhdr *tag;
if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
return NULL;
tag = vlan_eth_hdr(skb);
*tpid = ntohs(tag->h_vlan_proto);
*tci = ntohs(tag->h_vlan_TCI);
/* skb->data points in the middle of the VLAN tag,
* after tpid and before tci. This is because so far,
* ETH_HLEN (DMAC, SMAC, EtherType) bytes were pulled.
* There are 2 bytes of VLAN tag left in skb->data, and upper * There are 2 bytes of VLAN tag left in skb->data, and upper
* layers expect the 'real' EtherType to be consumed as well. * layers expect the 'real' EtherType to be consumed as well.
* Coincidentally, a VLAN header is also of the same size as * Coincidentally, a VLAN header is also of the same size as
* the number of bytes that need to be pulled. * the number of bytes that need to be pulled.
*
* skb_mac_header skb->data
* | |
* v v
* | | | | | | | | | | | | | | | | | | |
* +-----------------------+-----------------------+-------+-------+-------+
* | Destination MAC | Source MAC | TPID | TCI | EType |
* +-----------------------+-----------------------+-------+-------+-------+
* ^ | |
* |<--VLAN_HLEN-->to <---VLAN_HLEN--->
* from |
* >>>>>>> v
* >>>>>>> | | | | | | | | | | | | | | |
* >>>>>>> +-----------------------+-----------------------+-------+
* >>>>>>> | Destination MAC | Source MAC | EType |
* +-----------------------+-----------------------+-------+
* ^ ^
* (now part of | |
* skb->head) skb_mac_header skb->data
*/ */
skb_pull_rcsum(skb, VLAN_HLEN); struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
{
u8 *from = skb_mac_header(skb);
u8 *dest = from + VLAN_HLEN;
memmove(dest, from, ETH_HLEN - VLAN_HLEN);
skb_pull(skb, VLAN_HLEN);
skb_push(skb, ETH_HLEN);
skb_reset_mac_header(skb);
skb_reset_mac_len(skb);
skb_pull_rcsum(skb, ETH_HLEN);
return skb; return skb;
} }
EXPORT_SYMBOL_GPL(dsa_8021q_rcv); EXPORT_SYMBOL_GPL(dsa_8021q_remove_header);
static const struct dsa_device_ops dsa_8021q_netdev_ops = { static const struct dsa_device_ops dsa_8021q_netdev_ops = {
.name = "8021q", .name = "8021q",
......
...@@ -13,6 +13,8 @@ static inline bool sja1105_is_link_local(const struct sk_buff *skb) ...@@ -13,6 +13,8 @@ static inline bool sja1105_is_link_local(const struct sk_buff *skb)
const struct ethhdr *hdr = eth_hdr(skb); const struct ethhdr *hdr = eth_hdr(skb);
u64 dmac = ether_addr_to_u64(hdr->h_dest); u64 dmac = ether_addr_to_u64(hdr->h_dest);
if (ntohs(hdr->h_proto) == ETH_P_SJA1105_META)
return false;
if ((dmac & SJA1105_LINKLOCAL_FILTER_A_MASK) == if ((dmac & SJA1105_LINKLOCAL_FILTER_A_MASK) ==
SJA1105_LINKLOCAL_FILTER_A) SJA1105_LINKLOCAL_FILTER_A)
return true; return true;
...@@ -22,15 +24,61 @@ static inline bool sja1105_is_link_local(const struct sk_buff *skb) ...@@ -22,15 +24,61 @@ static inline bool sja1105_is_link_local(const struct sk_buff *skb)
return false; return false;
} }
struct sja1105_meta {
u64 tstamp;
u64 dmac_byte_4;
u64 dmac_byte_3;
u64 source_port;
u64 switch_id;
};
static void sja1105_meta_unpack(const struct sk_buff *skb,
struct sja1105_meta *meta)
{
u8 *buf = skb_mac_header(skb) + ETH_HLEN;
/* UM10944.pdf section 4.2.17 AVB Parameters:
* Structure of the meta-data follow-up frame.
* It is in network byte order, so there are no quirks
* while unpacking the meta frame.
*
* Also SJA1105 E/T only populates bits 23:0 of the timestamp
* whereas P/Q/R/S does 32 bits. Since the structure is the
* same and the E/T puts zeroes in the high-order byte, use
* a unified unpacking command for both device series.
*/
packing(buf, &meta->tstamp, 31, 0, 4, UNPACK, 0);
packing(buf + 4, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
packing(buf + 5, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0);
packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0);
}
static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
{
const struct ethhdr *hdr = eth_hdr(skb);
u64 smac = ether_addr_to_u64(hdr->h_source);
u64 dmac = ether_addr_to_u64(hdr->h_dest);
if (smac != SJA1105_META_SMAC)
return false;
if (dmac != SJA1105_META_DMAC)
return false;
if (ntohs(hdr->h_proto) != ETH_P_SJA1105_META)
return false;
return true;
}
/* This is the first time the tagger sees the frame on RX. /* This is the first time the tagger sees the frame on RX.
* Figure out if we can decode it, and if we can, annotate skb->cb with how we * Figure out if we can decode it.
* plan to do that, so we don't need to check again in the rcv function.
*/ */
static bool sja1105_filter(const struct sk_buff *skb, struct net_device *dev) static bool sja1105_filter(const struct sk_buff *skb, struct net_device *dev)
{ {
if (!dsa_port_is_vlan_filtering(dev->dsa_ptr))
return true;
if (sja1105_is_link_local(skb)) if (sja1105_is_link_local(skb))
return true; return true;
if (!dsa_port_is_vlan_filtering(dev->dsa_ptr)) if (sja1105_is_meta_frame(skb))
return true; return true;
return false; return false;
} }
...@@ -62,25 +110,152 @@ static struct sk_buff *sja1105_xmit(struct sk_buff *skb, ...@@ -62,25 +110,152 @@ static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
((pcp << VLAN_PRIO_SHIFT) | tx_vid)); ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
} }
static void sja1105_transfer_meta(struct sk_buff *skb,
const struct sja1105_meta *meta)
{
struct ethhdr *hdr = eth_hdr(skb);
hdr->h_dest[3] = meta->dmac_byte_3;
hdr->h_dest[4] = meta->dmac_byte_4;
SJA1105_SKB_CB(skb)->meta_tstamp = meta->tstamp;
}
/* This is a simple state machine which follows the hardware mechanism of
* generating RX timestamps:
*
* After each timestampable skb (all traffic for which send_meta1 and
* send_meta0 is true, aka all MAC-filtered link-local traffic) a meta frame
* containing a partial timestamp is immediately generated by the switch and
* sent as a follow-up to the link-local frame on the CPU port.
*
* The meta frames have no unique identifier (such as sequence number) by which
* one may pair them to the correct timestampable frame.
* Instead, the switch has internal logic that ensures no frames are sent on
* the CPU port between a link-local timestampable frame and its corresponding
* meta follow-up. It also ensures strict ordering between ports (lower ports
* have higher priority towards the CPU port). For this reason, a per-port
* data structure is not needed/desirable.
*
* This function pairs the link-local frame with its partial timestamp from the
* meta follow-up frame. The full timestamp will be reconstructed later in a
* work queue.
*/
static struct sk_buff
*sja1105_rcv_meta_state_machine(struct sk_buff *skb,
struct sja1105_meta *meta,
bool is_link_local,
bool is_meta)
{
struct sja1105_port *sp;
struct dsa_port *dp;
dp = dsa_slave_to_port(skb->dev);
sp = dp->priv;
/* Step 1: A timestampable frame was received.
* Buffer it until we get its meta frame.
*/
if (is_link_local && sp->data->hwts_rx_en) {
spin_lock(&sp->data->meta_lock);
/* Was this a link-local frame instead of the meta
* that we were expecting?
*/
if (sp->data->stampable_skb) {
dev_err_ratelimited(dp->ds->dev,
"Expected meta frame, is %12llx "
"in the DSA master multicast filter?\n",
SJA1105_META_DMAC);
}
/* Hold a reference to avoid dsa_switch_rcv
* from freeing the skb.
*/
sp->data->stampable_skb = skb_get(skb);
spin_unlock(&sp->data->meta_lock);
/* Tell DSA we got nothing */
return NULL;
/* Step 2: The meta frame arrived.
* Time to take the stampable skb out of the closet, annotate it
* with the partial timestamp, and pretend that we received it
* just now (basically masquerade the buffered frame as the meta
* frame, which serves no further purpose).
*/
} else if (is_meta) {
struct sk_buff *stampable_skb;
spin_lock(&sp->data->meta_lock);
stampable_skb = sp->data->stampable_skb;
sp->data->stampable_skb = NULL;
/* Was this a meta frame instead of the link-local
* that we were expecting?
*/
if (!stampable_skb) {
dev_err_ratelimited(dp->ds->dev,
"Unexpected meta frame\n");
spin_unlock(&sp->data->meta_lock);
return NULL;
}
if (stampable_skb->dev != skb->dev) {
dev_err_ratelimited(dp->ds->dev,
"Meta frame on wrong port\n");
spin_unlock(&sp->data->meta_lock);
return NULL;
}
/* Free the meta frame and give DSA the buffered stampable_skb
* for further processing up the network stack.
*/
kfree_skb(skb);
skb = skb_copy(stampable_skb, GFP_ATOMIC);
if (!skb) {
dev_err_ratelimited(dp->ds->dev,
"Failed to copy stampable skb\n");
return NULL;
}
sja1105_transfer_meta(skb, meta);
/* The cached copy will be freed now */
skb_unref(stampable_skb);
spin_unlock(&sp->data->meta_lock);
}
return skb;
}
static struct sk_buff *sja1105_rcv(struct sk_buff *skb, static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
struct net_device *netdev, struct net_device *netdev,
struct packet_type *pt) struct packet_type *pt)
{ {
struct ethhdr *hdr = eth_hdr(skb); struct sja1105_meta meta = {0};
u64 source_port, switch_id; int source_port, switch_id;
struct sk_buff *nskb; struct vlan_ethhdr *hdr;
u16 tpid, vid, tci; u16 tpid, vid, tci;
bool is_link_local;
bool is_tagged; bool is_tagged;
bool is_meta;
nskb = dsa_8021q_rcv(skb, netdev, pt, &tpid, &tci); hdr = vlan_eth_hdr(skb);
is_tagged = (nskb && tpid == ETH_P_SJA1105); tpid = ntohs(hdr->h_vlan_proto);
is_tagged = (tpid == ETH_P_SJA1105);
skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; is_link_local = sja1105_is_link_local(skb);
vid = tci & VLAN_VID_MASK; is_meta = sja1105_is_meta_frame(skb);
skb->offload_fwd_mark = 1; skb->offload_fwd_mark = 1;
if (sja1105_is_link_local(skb)) { if (is_tagged) {
/* Normal traffic path. */
tci = ntohs(hdr->h_vlan_TCI);
vid = tci & VLAN_VID_MASK;
source_port = dsa_8021q_rx_source_port(vid);
switch_id = dsa_8021q_rx_switch_id(vid);
skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
} else if (is_link_local) {
/* Management traffic path. Switch embeds the switch ID and /* Management traffic path. Switch embeds the switch ID and
* port ID into bytes of the destination MAC, courtesy of * port ID into bytes of the destination MAC, courtesy of
* the incl_srcpt options. * the incl_srcpt options.
...@@ -90,10 +265,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, ...@@ -90,10 +265,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
/* Clear the DMAC bytes that were mangled by the switch */ /* Clear the DMAC bytes that were mangled by the switch */
hdr->h_dest[3] = 0; hdr->h_dest[3] = 0;
hdr->h_dest[4] = 0; hdr->h_dest[4] = 0;
} else if (is_meta) {
sja1105_meta_unpack(skb, &meta);
source_port = meta.source_port;
switch_id = meta.switch_id;
} else { } else {
/* Normal traffic path. */ return NULL;
source_port = dsa_8021q_rx_source_port(vid);
switch_id = dsa_8021q_rx_switch_id(vid);
} }
skb->dev = dsa_master_find_slave(netdev, switch_id, source_port); skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
...@@ -106,10 +283,10 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, ...@@ -106,10 +283,10 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
* it there, see dsa_switch_rcv: skb_push(skb, ETH_HLEN). * it there, see dsa_switch_rcv: skb_push(skb, ETH_HLEN).
*/ */
if (is_tagged) if (is_tagged)
memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - VLAN_HLEN, skb = dsa_8021q_remove_header(skb);
ETH_HLEN - VLAN_HLEN);
return skb; return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
is_meta);
} }
static struct dsa_device_ops sja1105_netdev_ops = { static struct dsa_device_ops sja1105_netdev_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment