Commit e345e58a authored by David S. Miller's avatar David S. Miller

Merge branch 'dsa-hsr-offload'

George McCollister says:

====================
add HSR offloading support for DSA switches

Add support for offloading HSR/PRP (IEC 62439-3) tag insertion, tag
removal, forwarding and duplication on DSA switches.
This series adds offloading to the xrs700x DSA driver.

Changes since RFC:
 * Split hsr and dsa patches. (Florian Fainelli)

Changes since v1:
 * Fixed some typos/wording. (Vladimir Oltean)
 * eliminate IFF_HSR and use is_hsr_master instead. (Vladimir Oltean)
 * Make hsr_handle_sup_frame handle skb_std as well (required when offloading)
 * Don't add hsr tag for HSR v0 supervisory frames.
 * Fixed tag insertion offloading for PRP.

Changes since v2:
 * Return -EOPNOTSUPP instead of 0 in dsa_switch_hsr_join and
   dsa_switch_hsr_leave. (Vladimir Oltean)
 * Only allow ports 1 and 2 to be HSR/PRP redundant ports. (Tobias Waldekranz)
 * Set and remove HSR features for both redundant ports. (Vladimir Oltean)
 * Change port_hsr_leave() to return int instead of void.
 * Remove hsr_init_skb() proto argument. (Vladimir Oltean)
===================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f8a7e014 bd62e6f5
......@@ -182,3 +182,24 @@ stricter than Hardware LRO. A packet stream merged by Hardware GRO must
be re-segmentable by GSO or TSO back to the exact original packet stream.
Hardware GRO is dependent on RXCSUM since every packet successfully merged
by hardware must also have the checksum verified by hardware.
* hsr-tag-ins-offload
This should be set for devices which insert an HSR (High-availability Seamless
Redundancy) or PRP (Parallel Redundancy Protocol) tag automatically.
* hsr-tag-rm-offload
This should be set for devices which remove HSR (High-availability Seamless
Redundancy) or PRP (Parallel Redundancy Protocol) tags automatically.
* hsr-fwd-offload
This should be set for devices which forward HSR (High-availability Seamless
Redundancy) frames from one port to another in hardware.
* hsr-dup-offload
This should be set for devices which duplicate outgoing HSR (High-availability
Seamless Redundancy) or PRP (Parallel Redundancy Protocol) tags automatically
frames in hardware.
......@@ -7,11 +7,17 @@
#include <net/dsa.h>
#include <linux/if_bridge.h>
#include <linux/of_device.h>
#include <linux/netdev_features.h>
#include <linux/if_hsr.h>
#include "xrs700x.h"
#include "xrs700x_reg.h"
#define XRS700X_MIB_INTERVAL msecs_to_jiffies(3000)
#define XRS7000X_SUPPORTED_HSR_FEATURES \
(NETIF_F_HW_HSR_TAG_INS | NETIF_F_HW_HSR_TAG_RM | \
NETIF_F_HW_HSR_FWD | NETIF_F_HW_HSR_DUP)
#define XRS7003E_ID 0x100
#define XRS7003F_ID 0x101
#define XRS7004E_ID 0x200
......@@ -496,6 +502,119 @@ static void xrs700x_bridge_leave(struct dsa_switch *ds, int port,
xrs700x_bridge_common(ds, port, bridge, false);
}
static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
struct net_device *hsr)
{
unsigned int val = XRS_HSR_CFG_HSR_PRP;
struct dsa_port *partner = NULL, *dp;
struct xrs700x *priv = ds->priv;
struct net_device *slave;
int ret, i, hsr_pair[2];
enum hsr_version ver;
ret = hsr_get_version(hsr, &ver);
if (ret)
return ret;
/* Only ports 1 and 2 can be HSR/PRP redundant ports. */
if (port != 1 && port != 2)
return -EOPNOTSUPP;
if (ver == HSR_V1)
val |= XRS_HSR_CFG_HSR;
else if (ver == PRP_V1)
val |= XRS_HSR_CFG_PRP;
else
return -EOPNOTSUPP;
dsa_hsr_foreach_port(dp, ds, hsr) {
partner = dp;
}
/* We can't enable redundancy on the switch until both
* redundant ports have signed up.
*/
if (!partner)
return 0;
regmap_fields_write(priv->ps_forward, partner->index,
XRS_PORT_DISABLED);
regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED);
regmap_write(priv->regmap, XRS_HSR_CFG(partner->index),
val | XRS_HSR_CFG_LANID_A);
regmap_write(priv->regmap, XRS_HSR_CFG(port),
val | XRS_HSR_CFG_LANID_B);
/* Clear bits for both redundant ports (HSR only) and the CPU port to
* enable forwarding.
*/
val = GENMASK(ds->num_ports - 1, 0);
if (ver == HSR_V1) {
val &= ~BIT(partner->index);
val &= ~BIT(port);
}
val &= ~BIT(dsa_upstream_port(ds, port));
regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
regmap_fields_write(priv->ps_forward, partner->index,
XRS_PORT_FORWARDING);
regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
hsr_pair[0] = port;
hsr_pair[1] = partner->index;
for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
slave = dsa_to_port(ds, hsr_pair[i])->slave;
slave->features |= XRS7000X_SUPPORTED_HSR_FEATURES;
}
return 0;
}
static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
struct net_device *hsr)
{
struct dsa_port *partner = NULL, *dp;
struct xrs700x *priv = ds->priv;
struct net_device *slave;
int i, hsr_pair[2];
unsigned int val;
dsa_hsr_foreach_port(dp, ds, hsr) {
partner = dp;
}
if (!partner)
return 0;
regmap_fields_write(priv->ps_forward, partner->index,
XRS_PORT_DISABLED);
regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED);
regmap_write(priv->regmap, XRS_HSR_CFG(partner->index), 0);
regmap_write(priv->regmap, XRS_HSR_CFG(port), 0);
/* Clear bit for the CPU port to enable forwarding. */
val = GENMASK(ds->num_ports - 1, 0);
val &= ~BIT(dsa_upstream_port(ds, port));
regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
regmap_fields_write(priv->ps_forward, partner->index,
XRS_PORT_FORWARDING);
regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
hsr_pair[0] = port;
hsr_pair[1] = partner->index;
for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
slave = dsa_to_port(ds, hsr_pair[i])->slave;
slave->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES;
}
return 0;
}
static const struct dsa_switch_ops xrs700x_ops = {
.get_tag_protocol = xrs700x_get_tag_protocol,
.setup = xrs700x_setup,
......@@ -509,6 +628,8 @@ static const struct dsa_switch_ops xrs700x_ops = {
.get_stats64 = xrs700x_get_stats64,
.port_bridge_join = xrs700x_bridge_join,
.port_bridge_leave = xrs700x_bridge_leave,
.port_hsr_join = xrs700x_hsr_join,
.port_hsr_leave = xrs700x_hsr_leave,
};
static int xrs700x_detect(struct xrs700x *priv)
......
......@@ -49,6 +49,11 @@
/* Port Configuration Registers - HSR/PRP */
#define XRS_HSR_CFG(x) (XRS_PORT_HSR_BASE(x) + 0x0)
#define XRS_HSR_CFG_HSR_PRP BIT(0)
#define XRS_HSR_CFG_HSR 0
#define XRS_HSR_CFG_PRP BIT(8)
#define XRS_HSR_CFG_LANID_A 0
#define XRS_HSR_CFG_LANID_B BIT(10)
/* Port Configuration Registers - PTP */
#define XRS_PTP_RX_SYNC_DELAY_NS_LO(x) (XRS_PORT_PTP_BASE(x) + 0x2)
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IF_HSR_H_
#define _LINUX_IF_HSR_H_
/* used to differentiate various protocols */
enum hsr_version {
HSR_V0 = 0,
HSR_V1,
PRP_V1,
};
#if IS_ENABLED(CONFIG_HSR)
extern bool is_hsr_master(struct net_device *dev);
extern int hsr_get_version(struct net_device *dev, enum hsr_version *ver);
#else
static inline bool is_hsr_master(struct net_device *dev)
{
return false;
}
static inline int hsr_get_version(struct net_device *dev,
enum hsr_version *ver)
{
return -EINVAL;
}
#endif /* CONFIG_HSR */
#endif /*_LINUX_IF_HSR_H_*/
......@@ -86,6 +86,11 @@ enum {
NETIF_F_HW_MACSEC_BIT, /* Offload MACsec operations */
NETIF_F_GRO_UDP_FWD_BIT, /* Allow UDP GRO for forwarding */
NETIF_F_HW_HSR_TAG_INS_BIT, /* Offload HSR tag insertion */
NETIF_F_HW_HSR_TAG_RM_BIT, /* Offload HSR tag removal */
NETIF_F_HW_HSR_FWD_BIT, /* Offload HSR forwarding */
NETIF_F_HW_HSR_DUP_BIT, /* Offload HSR duplication */
/*
* Add your fresh new feature above and remember to update
* netdev_features_strings[] in net/core/ethtool.c and maybe
......@@ -159,6 +164,10 @@ enum {
#define NETIF_F_GSO_FRAGLIST __NETIF_F(GSO_FRAGLIST)
#define NETIF_F_HW_MACSEC __NETIF_F(HW_MACSEC)
#define NETIF_F_GRO_UDP_FWD __NETIF_F(GRO_UDP_FWD)
#define NETIF_F_HW_HSR_TAG_INS __NETIF_F(HW_HSR_TAG_INS)
#define NETIF_F_HW_HSR_TAG_RM __NETIF_F(HW_HSR_TAG_RM)
#define NETIF_F_HW_HSR_FWD __NETIF_F(HW_HSR_FWD)
#define NETIF_F_HW_HSR_DUP __NETIF_F(HW_HSR_DUP)
/* Finds the next feature with the highest number of the range of start till 0.
*/
......
......@@ -172,6 +172,10 @@ struct dsa_switch_tree {
list_for_each_entry((_dp), &(_dst)->ports, list) \
if ((_dp)->lag_dev == (_lag))
#define dsa_hsr_foreach_port(_dp, _ds, _hsr) \
list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr))
static inline struct net_device *dsa_lag_dev(struct dsa_switch_tree *dst,
unsigned int id)
{
......@@ -264,6 +268,7 @@ struct dsa_port {
struct phylink_config pl_config;
struct net_device *lag_dev;
bool lag_tx_enabled;
struct net_device *hsr_dev;
struct list_head list;
......@@ -769,6 +774,14 @@ struct dsa_switch_ops {
struct netdev_lag_upper_info *info);
int (*port_lag_leave)(struct dsa_switch *ds, int port,
struct net_device *lag);
/*
* HSR integration
*/
int (*port_hsr_join)(struct dsa_switch *ds, int port,
struct net_device *hsr);
int (*port_hsr_leave)(struct dsa_switch *ds, int port,
struct net_device *hsr);
};
#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
......
......@@ -20,6 +20,8 @@ enum {
DSA_NOTIFIER_BRIDGE_LEAVE,
DSA_NOTIFIER_FDB_ADD,
DSA_NOTIFIER_FDB_DEL,
DSA_NOTIFIER_HSR_JOIN,
DSA_NOTIFIER_HSR_LEAVE,
DSA_NOTIFIER_LAG_CHANGE,
DSA_NOTIFIER_LAG_JOIN,
DSA_NOTIFIER_LAG_LEAVE,
......@@ -100,6 +102,13 @@ struct dsa_switchdev_event_work {
u16 vid;
};
/* DSA_NOTIFIER_HSR_* */
struct dsa_notifier_hsr_info {
struct net_device *hsr;
int sw_index;
int port;
};
struct dsa_slave_priv {
/* Copy of CPU port xmit for faster access in slave transmit hot path */
struct sk_buff * (*xmit)(struct sk_buff *skb,
......@@ -183,6 +192,8 @@ int dsa_port_vlan_del(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan);
int dsa_port_link_register_of(struct dsa_port *dp);
void dsa_port_link_unregister_of(struct dsa_port *dp);
int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
static inline bool dsa_port_offloads_netdev(struct dsa_port *dp,
......
......@@ -868,3 +868,37 @@ int dsa_port_get_phy_sset_count(struct dsa_port *dp)
return ret;
}
EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
{
struct dsa_notifier_hsr_info info = {
.sw_index = dp->ds->index,
.port = dp->index,
.hsr = hsr,
};
int err;
dp->hsr_dev = hsr;
err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
if (err)
dp->hsr_dev = NULL;
return err;
}
void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
{
struct dsa_notifier_hsr_info info = {
.sw_index = dp->ds->index,
.port = dp->index,
.hsr = hsr,
};
int err;
dp->hsr_dev = NULL;
err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
if (err)
pr_err("DSA: failed to notify DSA_NOTIFIER_HSR_LEAVE\n");
}
......@@ -17,6 +17,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
#include <linux/if_bridge.h>
#include <linux/if_hsr.h>
#include <linux/netpoll.h>
#include <linux/ptp_classify.h>
......@@ -1938,6 +1939,19 @@ static int dsa_slave_changeupper(struct net_device *dev,
dsa_port_lag_leave(dp, info->upper_dev);
err = NOTIFY_OK;
}
} else if (is_hsr_master(info->upper_dev)) {
if (info->linking) {
err = dsa_port_hsr_join(dp, info->upper_dev);
if (err == -EOPNOTSUPP) {
NL_SET_ERR_MSG_MOD(info->info.extack,
"Offloading not supported");
err = 0;
}
err = notifier_from_errno(err);
} else {
dsa_port_hsr_leave(dp, info->upper_dev);
err = NOTIFY_OK;
}
}
return err;
......
......@@ -166,6 +166,24 @@ static int dsa_switch_fdb_del(struct dsa_switch *ds,
return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
}
static int dsa_switch_hsr_join(struct dsa_switch *ds,
struct dsa_notifier_hsr_info *info)
{
if (ds->index == info->sw_index && ds->ops->port_hsr_join)
return ds->ops->port_hsr_join(ds, info->port, info->hsr);
return -EOPNOTSUPP;
}
static int dsa_switch_hsr_leave(struct dsa_switch *ds,
struct dsa_notifier_hsr_info *info)
{
if (ds->index == info->sw_index && ds->ops->port_hsr_leave)
return ds->ops->port_hsr_leave(ds, info->port, info->hsr);
return -EOPNOTSUPP;
}
static int dsa_switch_lag_change(struct dsa_switch *ds,
struct dsa_notifier_lag_info *info)
{
......@@ -371,6 +389,12 @@ static int dsa_switch_event(struct notifier_block *nb,
case DSA_NOTIFIER_FDB_DEL:
err = dsa_switch_fdb_del(ds, info);
break;
case DSA_NOTIFIER_HSR_JOIN:
err = dsa_switch_hsr_join(ds, info);
break;
case DSA_NOTIFIER_HSR_LEAVE:
err = dsa_switch_hsr_leave(ds, info);
break;
case DSA_NOTIFIER_LAG_CHANGE:
err = dsa_switch_lag_change(ds, info);
break;
......
......@@ -11,12 +11,17 @@
static struct sk_buff *xrs700x_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_port *partner, *dp = dsa_slave_to_port(dev);
u8 *trailer;
trailer = skb_put(skb, 1);
trailer[0] = BIT(dp->index);
if (dp->hsr_dev)
dsa_hsr_foreach_port(partner, dp->ds, dp->hsr_dev)
if (partner != dp)
trailer[0] |= BIT(partner->index);
return skb;
}
......
......@@ -69,6 +69,10 @@ const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
[NETIF_F_GRO_FRAGLIST_BIT] = "rx-gro-list",
[NETIF_F_HW_MACSEC_BIT] = "macsec-hw-offload",
[NETIF_F_GRO_UDP_FWD_BIT] = "rx-udp-gro-forwarding",
[NETIF_F_HW_HSR_TAG_INS_BIT] = "hsr-tag-ins-offload",
[NETIF_F_HW_HSR_TAG_RM_BIT] = "hsr-tag-rm-offload",
[NETIF_F_HW_HSR_FWD_BIT] = "hsr-fwd-offload",
[NETIF_F_HW_HSR_DUP_BIT] = "hsr-dup-offload",
};
const char
......
......@@ -230,7 +230,7 @@ static const struct header_ops hsr_header_ops = {
.parse = eth_header_parse,
};
static struct sk_buff *hsr_init_skb(struct hsr_port *master, u16 proto)
static struct sk_buff *hsr_init_skb(struct hsr_port *master)
{
struct hsr_priv *hsr = master->hsr;
struct sk_buff *skb;
......@@ -242,8 +242,7 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master, u16 proto)
* being, for PRP it is a trailer and for HSR it is a
* header
*/
skb = dev_alloc_skb(sizeof(struct hsr_tag) +
sizeof(struct hsr_sup_tag) +
skb = dev_alloc_skb(sizeof(struct hsr_sup_tag) +
sizeof(struct hsr_sup_payload) + hlen + tlen);
if (!skb)
......@@ -251,10 +250,9 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master, u16 proto)
skb_reserve(skb, hlen);
skb->dev = master->dev;
skb->protocol = htons(proto);
skb->priority = TC_PRIO_CONTROL;
if (dev_hard_header(skb, skb->dev, proto,
if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
hsr->sup_multicast_addr,
skb->dev->dev_addr, skb->len) <= 0)
goto out;
......@@ -275,12 +273,10 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
{
struct hsr_priv *hsr = master->hsr;
__u8 type = HSR_TLV_LIFE_CHECK;
struct hsr_tag *hsr_tag = NULL;
struct hsr_sup_payload *hsr_sp;
struct hsr_sup_tag *hsr_stag;
unsigned long irqflags;
struct sk_buff *skb;
u16 proto;
*interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
if (hsr->announce_count < 3 && hsr->prot_version == 0) {
......@@ -289,23 +285,12 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
hsr->announce_count++;
}
if (!hsr->prot_version)
proto = ETH_P_PRP;
else
proto = ETH_P_HSR;
skb = hsr_init_skb(master, proto);
skb = hsr_init_skb(master);
if (!skb) {
WARN_ONCE(1, "HSR: Could not send supervision frame\n");
return;
}
if (hsr->prot_version > 0) {
hsr_tag = skb_put(skb, sizeof(struct hsr_tag));
hsr_tag->encap_proto = htons(ETH_P_PRP);
set_hsr_tag_LSDU_size(hsr_tag, HSR_V1_SUP_LSDUSIZE);
}
hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf));
set_hsr_stag_HSR_ver(hsr_stag, hsr->prot_version);
......@@ -315,8 +300,6 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
if (hsr->prot_version > 0) {
hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
hsr->sup_sequence_nr++;
hsr_tag->sequence_nr = htons(hsr->sequence_nr);
hsr->sequence_nr++;
} else {
hsr_stag->sequence_nr = htons(hsr->sequence_nr);
hsr->sequence_nr++;
......@@ -332,7 +315,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
if (skb_put_padto(skb, ETH_ZLEN))
return;
hsr_forward_skb(skb, master);
......@@ -348,10 +331,8 @@ static void send_prp_supervision_frame(struct hsr_port *master,
struct hsr_sup_tag *hsr_stag;
unsigned long irqflags;
struct sk_buff *skb;
struct prp_rct *rct;
u8 *tail;
skb = hsr_init_skb(master, ETH_P_PRP);
skb = hsr_init_skb(master);
if (!skb) {
WARN_ONCE(1, "PRP: Could not send supervision frame\n");
return;
......@@ -373,17 +354,11 @@ static void send_prp_supervision_frame(struct hsr_port *master,
hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) {
if (skb_put_padto(skb, ETH_ZLEN)) {
spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
return;
}
tail = skb_tail_pointer(skb) - HSR_HLEN;
rct = (struct prp_rct *)tail;
rct->PRP_suffix = htons(ETH_P_PRP);
set_prp_LSDU_size(rct, HSR_V1_SUP_LSDUSIZE);
rct->sequence_nr = htons(hsr->sequence_nr);
hsr->sequence_nr++;
spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
hsr_forward_skb(skb, master);
......@@ -442,6 +417,7 @@ static struct hsr_proto_ops hsr_ops = {
.send_sv_frame = send_hsr_supervision_frame,
.create_tagged_frame = hsr_create_tagged_frame,
.get_untagged_frame = hsr_get_untagged_frame,
.drop_frame = hsr_drop_frame,
.fill_frame_info = hsr_fill_frame_info,
.invalid_dan_ingress_frame = hsr_invalid_dan_ingress_frame,
};
......@@ -489,10 +465,11 @@ void hsr_dev_setup(struct net_device *dev)
/* Return true if dev is a HSR master; return false otherwise.
*/
inline bool is_hsr_master(struct net_device *dev)
bool is_hsr_master(struct net_device *dev)
{
return (dev->netdev_ops->ndo_start_xmit == hsr_dev_xmit);
}
EXPORT_SYMBOL(is_hsr_master);
/* Default multicast address for HSR Supervision frames */
static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
......@@ -545,16 +522,6 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
hsr->prot_version = protocol_version;
/* FIXME: should I modify the value of these?
*
* - hsr_dev->flags - i.e.
* IFF_MASTER/SLAVE?
* - hsr_dev->priv_flags - i.e.
* IFF_EBRIDGE?
* IFF_TX_SKB_SHARING?
* IFF_HSR_MASTER/SLAVE?
*/
/* Make sure the 1st call to netif_carrier_on() gets through */
netif_carrier_off(hsr_dev);
......
......@@ -19,6 +19,5 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
unsigned char multicast_spec, u8 protocol_version,
struct netlink_ext_ack *extack);
void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
bool is_hsr_master(struct net_device *dev);
int hsr_get_max_mtu(struct hsr_priv *hsr);
#endif /* __HSR_DEVICE_H */
......@@ -186,6 +186,7 @@ static struct sk_buff *prp_fill_rct(struct sk_buff *skb,
set_prp_LSDU_size(trailer, lsdu_size);
trailer->sequence_nr = htons(frame->sequence_nr);
trailer->PRP_suffix = htons(ETH_P_PRP);
skb->protocol = eth_hdr(skb)->h_proto;
return skb;
}
......@@ -226,6 +227,7 @@ static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
ETH_P_HSR : ETH_P_PRP);
skb->protocol = hsr_ethhdr->ethhdr.h_proto;
return skb;
}
......@@ -247,6 +249,8 @@ struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
/* set the lane id properly */
hsr_set_path_id(hsr_ethhdr, port);
return skb_clone(frame->skb_hsr, GFP_ATOMIC);
} else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
return skb_clone(frame->skb_std, GFP_ATOMIC);
}
/* Create the new skb with enough headroom to fit the HSR tag */
......@@ -289,6 +293,8 @@ struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
return NULL;
}
return skb_clone(frame->skb_prp, GFP_ATOMIC);
} else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
return skb_clone(frame->skb_std, GFP_ATOMIC);
}
skb = skb_copy_expand(frame->skb_std, 0,
......@@ -341,6 +347,14 @@ bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
port->type == HSR_PT_SLAVE_A));
}
bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
{
if (port->dev->features & NETIF_F_HW_HSR_FWD)
return prp_drop_frame(frame, port);
return false;
}
/* Forward the frame through all devices except:
* - Back through the receiving device
* - If it's a HSR frame: through a device where it has passed before
......@@ -357,6 +371,7 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
{
struct hsr_port *port;
struct sk_buff *skb;
bool sent = false;
hsr_for_each_port(frame->port_rcv->hsr, port) {
struct hsr_priv *hsr = port->hsr;
......@@ -372,6 +387,12 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
continue;
/* If hardware duplicate generation is enabled, only send out
* one port.
*/
if ((port->dev->features & NETIF_F_HW_HSR_DUP) && sent)
continue;
/* Don't send frame over port where it has been sent before.
* Also fro SAN, this shouldn't be done.
*/
......@@ -403,10 +424,12 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
}
skb->dev = port->dev;
if (port->type == HSR_PT_MASTER)
if (port->type == HSR_PT_MASTER) {
hsr_deliver_master(skb, port->dev, frame->node_src);
else
hsr_xmit(skb, port, frame);
} else {
if (!hsr_xmit(skb, port, frame))
sent = true;
}
}
}
......@@ -454,7 +477,11 @@ static void handle_std_frame(struct sk_buff *skb,
void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
struct hsr_frame_info *frame)
{
if (proto == htons(ETH_P_PRP) ||
struct hsr_port *port = frame->port_rcv;
struct hsr_priv *hsr = port->hsr;
/* HSRv0 supervisory frames double as a tag so treat them as tagged. */
if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) ||
proto == htons(ETH_P_HSR)) {
/* HSR tagged frame :- Data or Supervision */
frame->skb_std = NULL;
......
......@@ -23,6 +23,7 @@ struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
struct hsr_port *port);
bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port);
bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port);
void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
struct hsr_frame_info *frame);
void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
......
......@@ -277,6 +277,8 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame)
skb = frame->skb_hsr;
else if (frame->skb_prp)
skb = frame->skb_prp;
else if (frame->skb_std)
skb = frame->skb_std;
if (!skb)
return;
......
......@@ -131,6 +131,17 @@ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt)
return NULL;
}
int hsr_get_version(struct net_device *dev, enum hsr_version *ver)
{
struct hsr_priv *hsr;
hsr = netdev_priv(dev);
*ver = hsr->prot_version;
return 0;
}
EXPORT_SYMBOL(hsr_get_version);
static struct notifier_block hsr_nb = {
.notifier_call = hsr_netdev_notify, /* Slave event notifications */
};
......
......@@ -13,6 +13,7 @@
#include <linux/netdevice.h>
#include <linux/list.h>
#include <linux/if_vlan.h>
#include <linux/if_hsr.h>
/* Time constants as specified in the HSR specification (IEC-62439-3 2010)
* Table 8.
......@@ -171,13 +172,6 @@ struct hsr_port {
enum hsr_port_type type;
};
/* used by driver internally to differentiate various protocols */
enum hsr_version {
HSR_V0 = 0,
HSR_V1,
PRP_V1,
};
struct hsr_frame_info;
struct hsr_node;
......
......@@ -48,12 +48,14 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
goto finish_consume;
}
/* For HSR, only tagged frames are expected, but for PRP
* there could be non tagged frames as well from Single
* attached nodes (SANs).
/* For HSR, only tagged frames are expected (unless the device offloads
* HSR tag removal), but for PRP there could be non tagged frames as
* well from Single attached nodes (SANs).
*/
protocol = eth_hdr(skb)->h_proto;
if (hsr->proto_ops->invalid_dan_ingress_frame &&
if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
hsr->proto_ops->invalid_dan_ingress_frame &&
hsr->proto_ops->invalid_dan_ingress_frame(protocol))
goto finish_pass;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment