Commit 7c6a86b4 authored by David S. Miller's avatar David S. Miller

Merge branch 'rmnet-Rewrite-some-existing-functionality'

Subash Abhinov Kasiviswanathan says:

====================
net: qualcomm: rmnet: Rewrite some existing functionality

This series fixes some of the broken rmnet functionality.
Bridge mode is re-written and made useable and the muxed_ep is converted to hlist.

Patches 1-5 are cleanups in preparation for these changes.
Patch 6 does the hlist conversion.
Patch 7 has the implementation of the rmnet bridge mode.

v1->v2: Fix the warning and code style issue in rmnet_rx_handler as
mentioned by David.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5ef9d78e 60d58f97
...@@ -61,23 +61,6 @@ rmnet_get_port_rtnl(const struct net_device *real_dev) ...@@ -61,23 +61,6 @@ rmnet_get_port_rtnl(const struct net_device *real_dev)
return rtnl_dereference(real_dev->rx_handler_data); return rtnl_dereference(real_dev->rx_handler_data);
} }
static struct rmnet_endpoint*
rmnet_get_endpoint(struct net_device *dev, int config_id)
{
struct rmnet_endpoint *ep;
struct rmnet_port *port;
if (!rmnet_is_real_dev_registered(dev)) {
ep = rmnet_vnd_get_endpoint(dev);
} else {
port = rmnet_get_port_rtnl(dev);
ep = &port->muxed_ep[config_id];
}
return ep;
}
static int rmnet_unregister_real_device(struct net_device *real_dev, static int rmnet_unregister_real_device(struct net_device *real_dev,
struct rmnet_port *port) struct rmnet_port *port)
{ {
...@@ -98,7 +81,7 @@ static int rmnet_unregister_real_device(struct net_device *real_dev, ...@@ -98,7 +81,7 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
static int rmnet_register_real_device(struct net_device *real_dev) static int rmnet_register_real_device(struct net_device *real_dev)
{ {
struct rmnet_port *port; struct rmnet_port *port;
int rc; int rc, entry;
ASSERT_RTNL(); ASSERT_RTNL();
...@@ -119,27 +102,41 @@ static int rmnet_register_real_device(struct net_device *real_dev) ...@@ -119,27 +102,41 @@ static int rmnet_register_real_device(struct net_device *real_dev)
/* hold on to real dev for MAP data */ /* hold on to real dev for MAP data */
dev_hold(real_dev); dev_hold(real_dev);
for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
INIT_HLIST_HEAD(&port->muxed_ep[entry]);
netdev_dbg(real_dev, "registered with rmnet\n"); netdev_dbg(real_dev, "registered with rmnet\n");
return 0; return 0;
} }
static void rmnet_set_endpoint_config(struct net_device *dev, static void rmnet_unregister_bridge(struct net_device *dev,
u8 mux_id, u8 rmnet_mode, struct rmnet_port *port)
struct net_device *egress_dev)
{ {
struct rmnet_endpoint *ep; struct net_device *rmnet_dev, *bridge_dev;
struct rmnet_port *bridge_port;
if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
return;
netdev_dbg(dev, "id %d mode %d dev %s\n", /* bridge slave handling */
mux_id, rmnet_mode, egress_dev->name); if (!port->nr_rmnet_devs) {
rmnet_dev = netdev_master_upper_dev_get_rcu(dev);
netdev_upper_dev_unlink(dev, rmnet_dev);
ep = rmnet_get_endpoint(dev, mux_id); bridge_dev = port->bridge_ep;
/* This config is cleared on every set, so its ok to not
* clear it on a device delete. bridge_port = rmnet_get_port_rtnl(bridge_dev);
*/ bridge_port->bridge_ep = NULL;
memset(ep, 0, sizeof(struct rmnet_endpoint)); bridge_port->rmnet_mode = RMNET_EPMODE_VND;
ep->rmnet_mode = rmnet_mode; } else {
ep->egress_dev = egress_dev; bridge_dev = port->bridge_ep;
ep->mux_id = mux_id;
bridge_port = rmnet_get_port_rtnl(bridge_dev);
rmnet_dev = netdev_master_upper_dev_get_rcu(bridge_dev);
netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
rmnet_unregister_real_device(bridge_dev, bridge_port);
}
} }
static int rmnet_newlink(struct net *src_net, struct net_device *dev, static int rmnet_newlink(struct net *src_net, struct net_device *dev,
...@@ -153,6 +150,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, ...@@ -153,6 +150,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
RMNET_EGRESS_FORMAT_MAP; RMNET_EGRESS_FORMAT_MAP;
struct net_device *real_dev; struct net_device *real_dev;
int mode = RMNET_EPMODE_VND; int mode = RMNET_EPMODE_VND;
struct rmnet_endpoint *ep;
struct rmnet_port *port; struct rmnet_port *port;
int err = 0; int err = 0;
u16 mux_id; u16 mux_id;
...@@ -164,6 +162,10 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, ...@@ -164,6 +162,10 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
if (!data[IFLA_VLAN_ID]) if (!data[IFLA_VLAN_ID])
return -EINVAL; return -EINVAL;
ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
if (!ep)
return -ENOMEM;
mux_id = nla_get_u16(data[IFLA_VLAN_ID]); mux_id = nla_get_u16(data[IFLA_VLAN_ID]);
err = rmnet_register_real_device(real_dev); err = rmnet_register_real_device(real_dev);
...@@ -171,7 +173,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, ...@@ -171,7 +173,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
goto err0; goto err0;
port = rmnet_get_port_rtnl(real_dev); port = rmnet_get_port_rtnl(real_dev);
err = rmnet_vnd_newlink(mux_id, dev, port, real_dev); err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
if (err) if (err)
goto err1; goto err1;
...@@ -183,13 +185,13 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, ...@@ -183,13 +185,13 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
ingress_format, egress_format); ingress_format, egress_format);
port->egress_data_format = egress_format; port->egress_data_format = egress_format;
port->ingress_data_format = ingress_format; port->ingress_data_format = ingress_format;
port->rmnet_mode = mode;
rmnet_set_endpoint_config(real_dev, mux_id, mode, dev); hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
rmnet_set_endpoint_config(dev, mux_id, mode, real_dev);
return 0; return 0;
err2: err2:
rmnet_vnd_dellink(mux_id, port); rmnet_vnd_dellink(mux_id, port, ep);
err1: err1:
rmnet_unregister_real_device(real_dev, port); rmnet_unregister_real_device(real_dev, port);
err0: err0:
...@@ -199,6 +201,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, ...@@ -199,6 +201,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
static void rmnet_dellink(struct net_device *dev, struct list_head *head) static void rmnet_dellink(struct net_device *dev, struct list_head *head)
{ {
struct net_device *real_dev; struct net_device *real_dev;
struct rmnet_endpoint *ep;
struct rmnet_port *port; struct rmnet_port *port;
u8 mux_id; u8 mux_id;
...@@ -212,8 +215,15 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head) ...@@ -212,8 +215,15 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
port = rmnet_get_port_rtnl(real_dev); port = rmnet_get_port_rtnl(real_dev);
mux_id = rmnet_vnd_get_mux(dev); mux_id = rmnet_vnd_get_mux(dev);
rmnet_vnd_dellink(mux_id, port);
netdev_upper_dev_unlink(dev, real_dev); netdev_upper_dev_unlink(dev, real_dev);
ep = rmnet_get_endpoint(port, mux_id);
if (ep) {
hlist_del_init_rcu(&ep->hlnode);
rmnet_unregister_bridge(dev, port);
rmnet_vnd_dellink(mux_id, port, ep);
kfree(ep);
}
rmnet_unregister_real_device(real_dev, port); rmnet_unregister_real_device(real_dev, port);
unregister_netdevice_queue(dev, head); unregister_netdevice_queue(dev, head);
...@@ -222,11 +232,16 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head) ...@@ -222,11 +232,16 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data) static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data)
{ {
struct rmnet_walk_data *d = data; struct rmnet_walk_data *d = data;
struct rmnet_endpoint *ep;
u8 mux_id; u8 mux_id;
mux_id = rmnet_vnd_get_mux(rmnet_dev); mux_id = rmnet_vnd_get_mux(rmnet_dev);
ep = rmnet_get_endpoint(d->port, mux_id);
rmnet_vnd_dellink(mux_id, d->port); if (ep) {
hlist_del_init_rcu(&ep->hlnode);
rmnet_vnd_dellink(mux_id, d->port, ep);
kfree(ep);
}
netdev_upper_dev_unlink(rmnet_dev, d->real_dev); netdev_upper_dev_unlink(rmnet_dev, d->real_dev);
unregister_netdevice_queue(rmnet_dev, d->head); unregister_netdevice_queue(rmnet_dev, d->head);
...@@ -252,6 +267,8 @@ static void rmnet_force_unassociate_device(struct net_device *dev) ...@@ -252,6 +267,8 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
d.port = port; d.port = port;
rcu_read_lock(); rcu_read_lock();
rmnet_unregister_bridge(dev, port);
netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d); netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d);
rcu_read_unlock(); rcu_read_unlock();
unregister_netdevice_many(&list); unregister_netdevice_many(&list);
...@@ -324,6 +341,77 @@ struct rmnet_port *rmnet_get_port(struct net_device *real_dev) ...@@ -324,6 +341,77 @@ struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
return NULL; return NULL;
} }
struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
{
struct rmnet_endpoint *ep;
hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
if (ep->mux_id == mux_id)
return ep;
}
return NULL;
}
int rmnet_add_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev,
struct netlink_ext_ack *extack)
{
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
struct net_device *real_dev = priv->real_dev;
struct rmnet_port *port, *slave_port;
int err;
port = rmnet_get_port(real_dev);
/* If there is more than one rmnet dev attached, its probably being
* used for muxing. Skip the briding in that case
*/
if (port->nr_rmnet_devs > 1)
return -EINVAL;
if (rmnet_is_real_dev_registered(slave_dev))
return -EBUSY;
err = rmnet_register_real_device(slave_dev);
if (err)
return -EBUSY;
err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
extack);
if (err)
return -EINVAL;
slave_port = rmnet_get_port(slave_dev);
slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
slave_port->bridge_ep = real_dev;
port->rmnet_mode = RMNET_EPMODE_BRIDGE;
port->bridge_ep = slave_dev;
netdev_dbg(slave_dev, "registered with rmnet as slave\n");
return 0;
}
int rmnet_del_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev)
{
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
struct net_device *real_dev = priv->real_dev;
struct rmnet_port *port, *slave_port;
port = rmnet_get_port(real_dev);
port->rmnet_mode = RMNET_EPMODE_VND;
port->bridge_ep = NULL;
netdev_upper_dev_unlink(slave_dev, rmnet_dev);
slave_port = rmnet_get_port(slave_dev);
rmnet_unregister_real_device(slave_dev, slave_port);
netdev_dbg(slave_dev, "removed from rmnet as slave\n");
return 0;
}
/* Startup/Shutdown */ /* Startup/Shutdown */
static int __init rmnet_init(void) static int __init rmnet_init(void)
......
...@@ -20,13 +20,10 @@ ...@@ -20,13 +20,10 @@
#define RMNET_MAX_LOGICAL_EP 255 #define RMNET_MAX_LOGICAL_EP 255
/* Information about the next device to deliver the packet to.
* Exact usage of this parameter depends on the rmnet_mode.
*/
struct rmnet_endpoint { struct rmnet_endpoint {
u8 rmnet_mode;
u8 mux_id; u8 mux_id;
struct net_device *egress_dev; struct net_device *egress_dev;
struct hlist_node hlnode;
}; };
/* One instance of this structure is instantiated for each real_dev associated /* One instance of this structure is instantiated for each real_dev associated
...@@ -34,22 +31,26 @@ struct rmnet_endpoint { ...@@ -34,22 +31,26 @@ struct rmnet_endpoint {
*/ */
struct rmnet_port { struct rmnet_port {
struct net_device *dev; struct net_device *dev;
struct rmnet_endpoint local_ep;
struct rmnet_endpoint muxed_ep[RMNET_MAX_LOGICAL_EP];
u32 ingress_data_format; u32 ingress_data_format;
u32 egress_data_format; u32 egress_data_format;
struct net_device *rmnet_devices[RMNET_MAX_LOGICAL_EP];
u8 nr_rmnet_devs; u8 nr_rmnet_devs;
u8 rmnet_mode;
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
struct net_device *bridge_ep;
}; };
extern struct rtnl_link_ops rmnet_link_ops; extern struct rtnl_link_ops rmnet_link_ops;
struct rmnet_priv { struct rmnet_priv {
struct rmnet_endpoint local_ep;
u8 mux_id; u8 mux_id;
struct net_device *real_dev; struct net_device *real_dev;
}; };
struct rmnet_port *rmnet_get_port(struct net_device *real_dev); struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
int rmnet_add_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev,
struct netlink_ext_ack *extack);
int rmnet_del_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev);
#endif /* _RMNET_CONFIG_H_ */ #endif /* _RMNET_CONFIG_H_ */
...@@ -44,56 +44,18 @@ static void rmnet_set_skb_proto(struct sk_buff *skb) ...@@ -44,56 +44,18 @@ static void rmnet_set_skb_proto(struct sk_buff *skb)
/* Generic handler */ /* Generic handler */
static rx_handler_result_t static rx_handler_result_t
rmnet_bridge_handler(struct sk_buff *skb, struct rmnet_endpoint *ep) rmnet_deliver_skb(struct sk_buff *skb)
{ {
if (!ep->egress_dev) skb_reset_transport_header(skb);
kfree_skb(skb); skb_reset_network_header(skb);
else rmnet_vnd_rx_fixup(skb, skb->dev);
rmnet_egress_handler(skb, ep);
skb->pkt_type = PACKET_HOST;
skb_set_mac_header(skb, 0);
netif_receive_skb(skb);
return RX_HANDLER_CONSUMED; return RX_HANDLER_CONSUMED;
} }
static rx_handler_result_t
rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_endpoint *ep)
{
switch (ep->rmnet_mode) {
case RMNET_EPMODE_NONE:
return RX_HANDLER_PASS;
case RMNET_EPMODE_BRIDGE:
return rmnet_bridge_handler(skb, ep);
case RMNET_EPMODE_VND:
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
rmnet_vnd_rx_fixup(skb, skb->dev);
skb->pkt_type = PACKET_HOST;
skb_set_mac_header(skb, 0);
netif_receive_skb(skb);
return RX_HANDLER_CONSUMED;
default:
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
}
static rx_handler_result_t
rmnet_ingress_deliver_packet(struct sk_buff *skb,
struct rmnet_port *port)
{
if (!port) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
skb->dev = port->local_ep.egress_dev;
return rmnet_deliver_skb(skb, &port->local_ep);
}
/* MAP handler */ /* MAP handler */
static rx_handler_result_t static rx_handler_result_t
...@@ -109,19 +71,18 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, ...@@ -109,19 +71,18 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
& RMNET_INGRESS_FORMAT_MAP_COMMANDS) & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
return rmnet_map_command(skb, port); return rmnet_map_command(skb, port);
kfree_skb(skb); goto free_skb;
return RX_HANDLER_CONSUMED;
} }
mux_id = RMNET_MAP_GET_MUX_ID(skb); mux_id = RMNET_MAP_GET_MUX_ID(skb);
len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb); len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb);
if (mux_id >= RMNET_MAX_LOGICAL_EP) { if (mux_id >= RMNET_MAX_LOGICAL_EP)
kfree_skb(skb); goto free_skb;
return RX_HANDLER_CONSUMED;
}
ep = &port->muxed_ep[mux_id]; ep = rmnet_get_endpoint(port, mux_id);
if (!ep)
goto free_skb;
if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING) if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING)
skb->dev = ep->egress_dev; skb->dev = ep->egress_dev;
...@@ -130,7 +91,11 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, ...@@ -130,7 +91,11 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
skb_pull(skb, sizeof(struct rmnet_map_header)); skb_pull(skb, sizeof(struct rmnet_map_header));
skb_trim(skb, len); skb_trim(skb, len);
rmnet_set_skb_proto(skb); rmnet_set_skb_proto(skb);
return rmnet_deliver_skb(skb, ep); return rmnet_deliver_skb(skb);
free_skb:
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
} }
static rx_handler_result_t static rx_handler_result_t
...@@ -154,8 +119,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb, ...@@ -154,8 +119,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
} }
static int rmnet_map_egress_handler(struct sk_buff *skb, static int rmnet_map_egress_handler(struct sk_buff *skb,
struct rmnet_port *port, struct rmnet_port *port, u8 mux_id,
struct rmnet_endpoint *ep,
struct net_device *orig_dev) struct net_device *orig_dev)
{ {
int required_headroom, additional_header_len; int required_headroom, additional_header_len;
...@@ -174,10 +138,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, ...@@ -174,10 +138,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
return RMNET_MAP_CONSUMED; return RMNET_MAP_CONSUMED;
if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) { if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
if (ep->mux_id == 0xff) if (mux_id == 0xff)
map_header->mux_id = 0; map_header->mux_id = 0;
else else
map_header->mux_id = ep->mux_id; map_header->mux_id = mux_id;
} }
skb->protocol = htons(ETH_P_MAP); skb->protocol = htons(ETH_P_MAP);
...@@ -185,6 +149,17 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, ...@@ -185,6 +149,17 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
return RMNET_MAP_SUCCESS; return RMNET_MAP_SUCCESS;
} }
static rx_handler_result_t
rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
{
if (bridge_dev) {
skb->dev = bridge_dev;
dev_queue_xmit(skb);
}
return RX_HANDLER_CONSUMED;
}
/* Ingress / Egress Entry Points */ /* Ingress / Egress Entry Points */
/* Processes packet as per ingress data format for receiving device. Logical /* Processes packet as per ingress data format for receiving device. Logical
...@@ -193,10 +168,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, ...@@ -193,10 +168,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
*/ */
rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
{ {
struct rmnet_port *port; int rc = RX_HANDLER_CONSUMED;
struct sk_buff *skb = *pskb; struct sk_buff *skb = *pskb;
struct rmnet_port *port;
struct net_device *dev; struct net_device *dev;
int rc;
if (!skb) if (!skb)
return RX_HANDLER_CONSUMED; return RX_HANDLER_CONSUMED;
...@@ -204,28 +179,14 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) ...@@ -204,28 +179,14 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
dev = skb->dev; dev = skb->dev;
port = rmnet_get_port(dev); port = rmnet_get_port(dev);
if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) { switch (port->rmnet_mode) {
rc = rmnet_map_ingress_handler(skb, port); case RMNET_EPMODE_VND:
} else { if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP)
switch (ntohs(skb->protocol)) { rc = rmnet_map_ingress_handler(skb, port);
case ETH_P_MAP: break;
if (port->local_ep.rmnet_mode == case RMNET_EPMODE_BRIDGE:
RMNET_EPMODE_BRIDGE) { rc = rmnet_bridge_handler(skb, port->bridge_ep);
rc = rmnet_ingress_deliver_packet(skb, port); break;
} else {
kfree_skb(skb);
rc = RX_HANDLER_CONSUMED;
}
break;
case ETH_P_IP:
case ETH_P_IPV6:
rc = rmnet_ingress_deliver_packet(skb, port);
break;
default:
rc = RX_HANDLER_PASS;
}
} }
return rc; return rc;
...@@ -235,14 +196,17 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) ...@@ -235,14 +196,17 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
* for egress device configured in logical endpoint. Packet is then transmitted * for egress device configured in logical endpoint. Packet is then transmitted
* on the egress device. * on the egress device.
*/ */
void rmnet_egress_handler(struct sk_buff *skb, void rmnet_egress_handler(struct sk_buff *skb)
struct rmnet_endpoint *ep)
{ {
struct net_device *orig_dev; struct net_device *orig_dev;
struct rmnet_port *port; struct rmnet_port *port;
struct rmnet_priv *priv;
u8 mux_id;
orig_dev = skb->dev; orig_dev = skb->dev;
skb->dev = ep->egress_dev; priv = netdev_priv(orig_dev);
skb->dev = priv->real_dev;
mux_id = priv->mux_id;
port = rmnet_get_port(skb->dev); port = rmnet_get_port(skb->dev);
if (!port) { if (!port) {
...@@ -251,7 +215,7 @@ void rmnet_egress_handler(struct sk_buff *skb, ...@@ -251,7 +215,7 @@ void rmnet_egress_handler(struct sk_buff *skb,
} }
if (port->egress_data_format & RMNET_EGRESS_FORMAT_MAP) { if (port->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
switch (rmnet_map_egress_handler(skb, port, ep, orig_dev)) { switch (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) {
case RMNET_MAP_CONSUMED: case RMNET_MAP_CONSUMED:
return; return;
...@@ -264,8 +228,7 @@ void rmnet_egress_handler(struct sk_buff *skb, ...@@ -264,8 +228,7 @@ void rmnet_egress_handler(struct sk_buff *skb,
} }
} }
if (ep->rmnet_mode == RMNET_EPMODE_VND) rmnet_vnd_tx_fixup(skb, orig_dev);
rmnet_vnd_tx_fixup(skb, orig_dev);
dev_queue_xmit(skb); dev_queue_xmit(skb);
} }
...@@ -18,8 +18,7 @@ ...@@ -18,8 +18,7 @@
#include "rmnet_config.h" #include "rmnet_config.h"
void rmnet_egress_handler(struct sk_buff *skb, void rmnet_egress_handler(struct sk_buff *skb);
struct rmnet_endpoint *ep);
rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb); rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include "rmnet_vnd.h" #include "rmnet_vnd.h"
static u8 rmnet_map_do_flow_control(struct sk_buff *skb, static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
struct rmnet_port *rdinfo, struct rmnet_port *port,
int enable) int enable)
{ {
struct rmnet_map_control_command *cmd; struct rmnet_map_control_command *cmd;
...@@ -37,7 +37,7 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, ...@@ -37,7 +37,7 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
return RX_HANDLER_CONSUMED; return RX_HANDLER_CONSUMED;
} }
ep = &rdinfo->muxed_ep[mux_id]; ep = rmnet_get_endpoint(port, mux_id);
vnd = ep->egress_dev; vnd = ep->egress_dev;
ip_family = cmd->flow_control.ip_family; ip_family = cmd->flow_control.ip_family;
......
...@@ -19,23 +19,15 @@ ...@@ -19,23 +19,15 @@
#define RMNET_TX_QUEUE_LEN 1000 #define RMNET_TX_QUEUE_LEN 1000
/* Constants */ /* Constants */
#define RMNET_EGRESS_FORMAT__RESERVED__ BIT(0)
#define RMNET_EGRESS_FORMAT_MAP BIT(1) #define RMNET_EGRESS_FORMAT_MAP BIT(1)
#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(2) #define RMNET_EGRESS_FORMAT_AGGREGATION BIT(2)
#define RMNET_EGRESS_FORMAT_MUXING BIT(3) #define RMNET_EGRESS_FORMAT_MUXING BIT(3)
#define RMNET_EGRESS_FORMAT_MAP_CKSUMV3 BIT(4)
#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4 BIT(5)
#define RMNET_INGRESS_FIX_ETHERNET BIT(0)
#define RMNET_INGRESS_FORMAT_MAP BIT(1) #define RMNET_INGRESS_FORMAT_MAP BIT(1)
#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(2) #define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(2)
#define RMNET_INGRESS_FORMAT_DEMUXING BIT(3) #define RMNET_INGRESS_FORMAT_DEMUXING BIT(3)
#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(4) #define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(4)
#define RMNET_INGRESS_FORMAT_MAP_CKSUMV3 BIT(5)
#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4 BIT(6)
/* Pass the frame up the stack with no modifications to skb->dev */
#define RMNET_EPMODE_NONE (0)
/* Replace skb->dev to a virtual rmnet device and pass up the stack */ /* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1) #define RMNET_EPMODE_VND (1)
/* Pass the frame directly to another device with dev_queue_xmit() */ /* Pass the frame directly to another device with dev_queue_xmit() */
......
...@@ -45,8 +45,8 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, ...@@ -45,8 +45,8 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
struct rmnet_priv *priv; struct rmnet_priv *priv;
priv = netdev_priv(dev); priv = netdev_priv(dev);
if (priv->local_ep.egress_dev) { if (priv->real_dev) {
rmnet_egress_handler(skb, &priv->local_ep); rmnet_egress_handler(skb);
} else { } else {
dev->stats.tx_dropped++; dev->stats.tx_dropped++;
kfree_skb(skb); kfree_skb(skb);
...@@ -74,6 +74,8 @@ static const struct net_device_ops rmnet_vnd_ops = { ...@@ -74,6 +74,8 @@ static const struct net_device_ops rmnet_vnd_ops = {
.ndo_start_xmit = rmnet_vnd_start_xmit, .ndo_start_xmit = rmnet_vnd_start_xmit,
.ndo_change_mtu = rmnet_vnd_change_mtu, .ndo_change_mtu = rmnet_vnd_change_mtu,
.ndo_get_iflink = rmnet_vnd_get_iflink, .ndo_get_iflink = rmnet_vnd_get_iflink,
.ndo_add_slave = rmnet_add_bridge,
.ndo_del_slave = rmnet_del_bridge,
}; };
/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU, /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
...@@ -100,17 +102,19 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) ...@@ -100,17 +102,19 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port, struct rmnet_port *port,
struct net_device *real_dev) struct net_device *real_dev,
struct rmnet_endpoint *ep)
{ {
struct rmnet_priv *priv; struct rmnet_priv *priv;
int rc; int rc;
if (port->rmnet_devices[id]) if (ep->egress_dev)
return -EINVAL; return -EINVAL;
rc = register_netdevice(rmnet_dev); rc = register_netdevice(rmnet_dev);
if (!rc) { if (!rc) {
port->rmnet_devices[id] = rmnet_dev; ep->egress_dev = rmnet_dev;
ep->mux_id = id;
port->nr_rmnet_devs++; port->nr_rmnet_devs++;
rmnet_dev->rtnl_link_ops = &rmnet_link_ops; rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
...@@ -125,12 +129,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, ...@@ -125,12 +129,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
return rc; return rc;
} }
int rmnet_vnd_dellink(u8 id, struct rmnet_port *port) int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep)
{ {
if (id >= RMNET_MAX_LOGICAL_EP || !port->rmnet_devices[id]) if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
return -EINVAL; return -EINVAL;
port->rmnet_devices[id] = NULL; ep->egress_dev = NULL;
port->nr_rmnet_devs--; port->nr_rmnet_devs--;
return 0; return 0;
} }
...@@ -143,21 +148,6 @@ u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev) ...@@ -143,21 +148,6 @@ u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
return priv->mux_id; return priv->mux_id;
} }
/* Gets the logical endpoint configuration for a RmNet virtual network device
* node. Caller should confirm that devices is a RmNet VND before calling.
*/
struct rmnet_endpoint *rmnet_vnd_get_endpoint(struct net_device *rmnet_dev)
{
struct rmnet_priv *priv;
if (!rmnet_dev)
return NULL;
priv = netdev_priv(rmnet_dev);
return &priv->local_ep;
}
int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable) int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
{ {
netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable); netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
......
...@@ -17,11 +17,12 @@ ...@@ -17,11 +17,12 @@
#define _RMNET_VND_H_ #define _RMNET_VND_H_
int rmnet_vnd_do_flow_control(struct net_device *dev, int enable); int rmnet_vnd_do_flow_control(struct net_device *dev, int enable);
struct rmnet_endpoint *rmnet_vnd_get_endpoint(struct net_device *dev);
int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port, struct rmnet_port *port,
struct net_device *real_dev); struct net_device *real_dev,
int rmnet_vnd_dellink(u8 id, struct rmnet_port *port); struct rmnet_endpoint *ep);
int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep);
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev); void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev); void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev); u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment