Commit f9f9ab17 authored by David S. Miller's avatar David S. Miller

Merge tag 'batadv-next-for-davem-20160812' of git://git.open-mesh.org/linux-merge

Simon Wunderlich says:

====================
This feature patchset includes the following changes (mostly
chronological order):

 - bump version strings, by Simon Wunderlich

 - kerneldoc clean up, by Sven Eckelmann

 - enable RTNL automatic loading and according documentation
   changes, by Sven Eckelmann (2 patches)

 - fix/improve interface removal and associated locking, by
   Sven Eckelmann (3 patches)

 - clean up unused variables, by Linus Luessing

 - implement Gateway selection code for B.A.T.M.A.N. V by
   Antonio Quartulli (4 patches)

 - rewrite TQ comparison by Markus Pargmann

 - fix Cocinelle warnings on bool vs integers (by Fenguang Wu/Intels
   kbuild test robot) and bitwise arithmetic operations (by Linus
   Luessing)

 - rewrite packet creation for forwarding for readability and to avoid
   reference count mistakes, by Linus Luessing

 - use kmem_cache for translation table, which results in more efficient
   storing of translation table entries, by Sven Eckelmann

 - rewrite/clarify reference handling for send_skb_unicast, by Sven
   Eckelmann

 - fix debug messages when updating routes, by Sven Eckelmann
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 85be21bd b5dcbad2
...@@ -43,10 +43,15 @@ new interfaces to verify the compatibility. There is no need to ...@@ -43,10 +43,15 @@ new interfaces to verify the compatibility. There is no need to
reload the module if you plug your USB wifi adapter into your ma- reload the module if you plug your USB wifi adapter into your ma-
chine after batman advanced was initially loaded. chine after batman advanced was initially loaded.
To activate a given interface simply write "bat0" into its The batman-adv soft-interface can be created using the iproute2
"mesh_iface" file inside the batman_adv subfolder: tool "ip"
# echo bat0 > /sys/class/net/eth0/batman_adv/mesh_iface # ip link add name bat0 type batadv
To activate a given interface simply attach it to the "bat0"
interface
# ip link set dev eth0 master bat0
Repeat this step for all interfaces you wish to add. Now batman Repeat this step for all interfaces you wish to add. Now batman
starts using/broadcasting on this/these interface(s). starts using/broadcasting on this/these interface(s).
...@@ -56,10 +61,10 @@ By reading the "iface_status" file you can check its status: ...@@ -56,10 +61,10 @@ By reading the "iface_status" file you can check its status:
# cat /sys/class/net/eth0/batman_adv/iface_status # cat /sys/class/net/eth0/batman_adv/iface_status
# active # active
To deactivate an interface you have to write "none" into its To deactivate an interface you have to detach it from the
"mesh_iface" file: "bat0" interface:
# echo none > /sys/class/net/eth0/batman_adv/mesh_iface # ip link set dev eth0 nomaster
All mesh wide settings can be found in batman's own interface All mesh wide settings can be found in batman's own interface
......
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#include "bat_algo.h" #include "bat_algo.h"
#include "bitarray.h" #include "bitarray.h"
#include "gateway_client.h"
#include "hard-interface.h" #include "hard-interface.h"
#include "hash.h" #include "hash.h"
#include "log.h" #include "log.h"
...@@ -528,36 +529,25 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet, ...@@ -528,36 +529,25 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet) static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
{ {
struct net_device *soft_iface; struct net_device *soft_iface;
struct batadv_priv *bat_priv;
struct batadv_hard_iface *primary_if = NULL;
if (!forw_packet->if_incoming) { if (!forw_packet->if_incoming) {
pr_err("Error - can't forward packet: incoming iface not specified\n"); pr_err("Error - can't forward packet: incoming iface not specified\n");
goto out; return;
} }
soft_iface = forw_packet->if_incoming->soft_iface; soft_iface = forw_packet->if_incoming->soft_iface;
bat_priv = netdev_priv(soft_iface);
if (WARN_ON(!forw_packet->if_outgoing)) if (WARN_ON(!forw_packet->if_outgoing))
goto out; return;
if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface)) if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface))
goto out; return;
if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE) if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
goto out; return;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* only for one specific outgoing interface */ /* only for one specific outgoing interface */
batadv_iv_ogm_send_to_if(forw_packet, forw_packet->if_outgoing); batadv_iv_ogm_send_to_if(forw_packet, forw_packet->if_outgoing);
out:
if (primary_if)
batadv_hardif_put(primary_if);
} }
/** /**
...@@ -685,19 +675,12 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, ...@@ -685,19 +675,12 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
struct batadv_forw_packet *forw_packet_aggr; struct batadv_forw_packet *forw_packet_aggr;
unsigned char *skb_buff; unsigned char *skb_buff;
unsigned int skb_size; unsigned int skb_size;
atomic_t *queue_left = own_packet ? NULL : &bat_priv->batman_queue_left;
/* own packet should always be scheduled */ forw_packet_aggr = batadv_forw_packet_alloc(if_incoming, if_outgoing,
if (!own_packet) { queue_left, bat_priv);
if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"batman packet queue full\n");
return;
}
}
forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
if (!forw_packet_aggr) if (!forw_packet_aggr)
goto out_nomem; return;
if (atomic_read(&bat_priv->aggregated_ogms) && if (atomic_read(&bat_priv->aggregated_ogms) &&
packet_len < BATADV_MAX_AGGREGATION_BYTES) packet_len < BATADV_MAX_AGGREGATION_BYTES)
...@@ -708,8 +691,11 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, ...@@ -708,8 +691,11 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
skb_size += ETH_HLEN; skb_size += ETH_HLEN;
forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size); forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size);
if (!forw_packet_aggr->skb) if (!forw_packet_aggr->skb) {
goto out_free_forw_packet; batadv_forw_packet_free(forw_packet_aggr);
return;
}
forw_packet_aggr->skb->priority = TC_PRIO_CONTROL; forw_packet_aggr->skb->priority = TC_PRIO_CONTROL;
skb_reserve(forw_packet_aggr->skb, ETH_HLEN); skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
...@@ -717,12 +703,7 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, ...@@ -717,12 +703,7 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
forw_packet_aggr->packet_len = packet_len; forw_packet_aggr->packet_len = packet_len;
memcpy(skb_buff, packet_buff, packet_len); memcpy(skb_buff, packet_buff, packet_len);
kref_get(&if_incoming->refcount);
kref_get(&if_outgoing->refcount);
forw_packet_aggr->own = own_packet; forw_packet_aggr->own = own_packet;
forw_packet_aggr->if_incoming = if_incoming;
forw_packet_aggr->if_outgoing = if_outgoing;
forw_packet_aggr->num_packets = 0;
forw_packet_aggr->direct_link_flags = BATADV_NO_FLAGS; forw_packet_aggr->direct_link_flags = BATADV_NO_FLAGS;
forw_packet_aggr->send_time = send_time; forw_packet_aggr->send_time = send_time;
...@@ -741,13 +722,6 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, ...@@ -741,13 +722,6 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
queue_delayed_work(batadv_event_workqueue, queue_delayed_work(batadv_event_workqueue,
&forw_packet_aggr->delayed_work, &forw_packet_aggr->delayed_work,
send_time - jiffies); send_time - jiffies);
return;
out_free_forw_packet:
kfree(forw_packet_aggr);
out_nomem:
if (!own_packet)
atomic_inc(&bat_priv->batman_queue_left);
} }
/* aggregate a new packet into the existing ogm packet */ /* aggregate a new packet into the existing ogm packet */
...@@ -1830,10 +1804,6 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work) ...@@ -1830,10 +1804,6 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work)
batadv_iv_ogm_schedule(forw_packet->if_incoming); batadv_iv_ogm_schedule(forw_packet->if_incoming);
out: out:
/* don't count own packet */
if (!forw_packet->own)
atomic_inc(&bat_priv->batman_queue_left);
batadv_forw_packet_free(forw_packet); batadv_forw_packet_free(forw_packet);
} }
...@@ -2029,35 +1999,40 @@ static void batadv_iv_neigh_print(struct batadv_priv *bat_priv, ...@@ -2029,35 +1999,40 @@ static void batadv_iv_neigh_print(struct batadv_priv *bat_priv,
} }
/** /**
* batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors * batadv_iv_ogm_neigh_diff - calculate tq difference of two neighbors
* @neigh1: the first neighbor object of the comparison * @neigh1: the first neighbor object of the comparison
* @if_outgoing1: outgoing interface for the first neighbor * @if_outgoing1: outgoing interface for the first neighbor
* @neigh2: the second neighbor object of the comparison * @neigh2: the second neighbor object of the comparison
* @if_outgoing2: outgoing interface for the second neighbor * @if_outgoing2: outgoing interface for the second neighbor
* @diff: pointer to integer receiving the calculated difference
* *
* Return: a value less, equal to or greater than 0 if the metric via neigh1 is * The content of *@diff is only valid when this function returns true.
* lower, the same as or higher than the metric via neigh2 * It is less, equal to or greater than 0 if the metric via neigh1 is lower,
* the same as or higher than the metric via neigh2
*
* Return: true when the difference could be calculated, false otherwise
*/ */
static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1, static bool batadv_iv_ogm_neigh_diff(struct batadv_neigh_node *neigh1,
struct batadv_hard_iface *if_outgoing1, struct batadv_hard_iface *if_outgoing1,
struct batadv_neigh_node *neigh2, struct batadv_neigh_node *neigh2,
struct batadv_hard_iface *if_outgoing2) struct batadv_hard_iface *if_outgoing2,
int *diff)
{ {
struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo; struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo;
u8 tq1, tq2; u8 tq1, tq2;
int diff; bool ret = true;
neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1); neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
neigh2_ifinfo = batadv_neigh_ifinfo_get(neigh2, if_outgoing2); neigh2_ifinfo = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
if (!neigh1_ifinfo || !neigh2_ifinfo) { if (!neigh1_ifinfo || !neigh2_ifinfo) {
diff = 0; ret = false;
goto out; goto out;
} }
tq1 = neigh1_ifinfo->bat_iv.tq_avg; tq1 = neigh1_ifinfo->bat_iv.tq_avg;
tq2 = neigh2_ifinfo->bat_iv.tq_avg; tq2 = neigh2_ifinfo->bat_iv.tq_avg;
diff = tq1 - tq2; *diff = (int)tq1 - (int)tq2;
out: out:
if (neigh1_ifinfo) if (neigh1_ifinfo)
...@@ -2065,6 +2040,32 @@ static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1, ...@@ -2065,6 +2040,32 @@ static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
if (neigh2_ifinfo) if (neigh2_ifinfo)
batadv_neigh_ifinfo_put(neigh2_ifinfo); batadv_neigh_ifinfo_put(neigh2_ifinfo);
return ret;
}
/**
* batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors
* @neigh1: the first neighbor object of the comparison
* @if_outgoing1: outgoing interface for the first neighbor
* @neigh2: the second neighbor object of the comparison
* @if_outgoing2: outgoing interface for the second neighbor
*
* Return: a value less, equal to or greater than 0 if the metric via neigh1 is
* lower, the same as or higher than the metric via neigh2
*/
static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
struct batadv_hard_iface *if_outgoing1,
struct batadv_neigh_node *neigh2,
struct batadv_hard_iface *if_outgoing2)
{
bool ret;
int diff;
ret = batadv_iv_ogm_neigh_diff(neigh1, if_outgoing1, neigh2,
if_outgoing2, &diff);
if (!ret)
return 0;
return diff; return diff;
} }
...@@ -2085,36 +2086,235 @@ batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1, ...@@ -2085,36 +2086,235 @@ batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1,
struct batadv_neigh_node *neigh2, struct batadv_neigh_node *neigh2,
struct batadv_hard_iface *if_outgoing2) struct batadv_hard_iface *if_outgoing2)
{ {
struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo;
u8 tq1, tq2;
bool ret; bool ret;
int diff;
neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1); ret = batadv_iv_ogm_neigh_diff(neigh1, if_outgoing1, neigh2,
neigh2_ifinfo = batadv_neigh_ifinfo_get(neigh2, if_outgoing2); if_outgoing2, &diff);
if (!ret)
return false;
/* we can't say that the metric is better */ ret = diff > -BATADV_TQ_SIMILARITY_THRESHOLD;
if (!neigh1_ifinfo || !neigh2_ifinfo) { return ret;
ret = false; }
static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
{
/* begin scheduling originator messages on that interface */
batadv_iv_ogm_schedule(hard_iface);
}
static struct batadv_gw_node *
batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
{
struct batadv_neigh_node *router;
struct batadv_neigh_ifinfo *router_ifinfo;
struct batadv_gw_node *gw_node, *curr_gw = NULL;
u64 max_gw_factor = 0;
u64 tmp_gw_factor = 0;
u8 max_tq = 0;
u8 tq_avg;
struct batadv_orig_node *orig_node;
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
orig_node = gw_node->orig_node;
router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
if (!router)
continue;
router_ifinfo = batadv_neigh_ifinfo_get(router,
BATADV_IF_DEFAULT);
if (!router_ifinfo)
goto next;
if (!kref_get_unless_zero(&gw_node->refcount))
goto next;
tq_avg = router_ifinfo->bat_iv.tq_avg;
switch (atomic_read(&bat_priv->gw.sel_class)) {
case 1: /* fast connection */
tmp_gw_factor = tq_avg * tq_avg;
tmp_gw_factor *= gw_node->bandwidth_down;
tmp_gw_factor *= 100 * 100;
tmp_gw_factor >>= 18;
if ((tmp_gw_factor > max_gw_factor) ||
((tmp_gw_factor == max_gw_factor) &&
(tq_avg > max_tq))) {
if (curr_gw)
batadv_gw_node_put(curr_gw);
curr_gw = gw_node;
kref_get(&curr_gw->refcount);
}
break;
default: /* 2: stable connection (use best statistic)
* 3: fast-switch (use best statistic but change as
* soon as a better gateway appears)
* XX: late-switch (use best statistic but change as
* soon as a better gateway appears which has
* $routing_class more tq points)
*/
if (tq_avg > max_tq) {
if (curr_gw)
batadv_gw_node_put(curr_gw);
curr_gw = gw_node;
kref_get(&curr_gw->refcount);
}
break;
}
if (tq_avg > max_tq)
max_tq = tq_avg;
if (tmp_gw_factor > max_gw_factor)
max_gw_factor = tmp_gw_factor;
batadv_gw_node_put(gw_node);
next:
batadv_neigh_node_put(router);
if (router_ifinfo)
batadv_neigh_ifinfo_put(router_ifinfo);
}
rcu_read_unlock();
return curr_gw;
}
static bool batadv_iv_gw_is_eligible(struct batadv_priv *bat_priv,
struct batadv_orig_node *curr_gw_orig,
struct batadv_orig_node *orig_node)
{
struct batadv_neigh_ifinfo *router_orig_ifinfo = NULL;
struct batadv_neigh_ifinfo *router_gw_ifinfo = NULL;
struct batadv_neigh_node *router_gw = NULL;
struct batadv_neigh_node *router_orig = NULL;
u8 gw_tq_avg, orig_tq_avg;
bool ret = false;
/* dynamic re-election is performed only on fast or late switch */
if (atomic_read(&bat_priv->gw.sel_class) <= 2)
return false;
router_gw = batadv_orig_router_get(curr_gw_orig, BATADV_IF_DEFAULT);
if (!router_gw) {
ret = true;
goto out; goto out;
} }
tq1 = neigh1_ifinfo->bat_iv.tq_avg; router_gw_ifinfo = batadv_neigh_ifinfo_get(router_gw,
tq2 = neigh2_ifinfo->bat_iv.tq_avg; BATADV_IF_DEFAULT);
ret = (tq1 - tq2) > -BATADV_TQ_SIMILARITY_THRESHOLD; if (!router_gw_ifinfo) {
ret = true;
goto out;
}
router_orig = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
if (!router_orig)
goto out;
router_orig_ifinfo = batadv_neigh_ifinfo_get(router_orig,
BATADV_IF_DEFAULT);
if (!router_orig_ifinfo)
goto out;
gw_tq_avg = router_gw_ifinfo->bat_iv.tq_avg;
orig_tq_avg = router_orig_ifinfo->bat_iv.tq_avg;
/* the TQ value has to be better */
if (orig_tq_avg < gw_tq_avg)
goto out;
/* if the routing class is greater than 3 the value tells us how much
* greater the TQ value of the new gateway must be
*/
if ((atomic_read(&bat_priv->gw.sel_class) > 3) &&
(orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw.sel_class)))
goto out;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
gw_tq_avg, orig_tq_avg);
ret = true;
out: out:
if (neigh1_ifinfo) if (router_gw_ifinfo)
batadv_neigh_ifinfo_put(neigh1_ifinfo); batadv_neigh_ifinfo_put(router_gw_ifinfo);
if (neigh2_ifinfo) if (router_orig_ifinfo)
batadv_neigh_ifinfo_put(neigh2_ifinfo); batadv_neigh_ifinfo_put(router_orig_ifinfo);
if (router_gw)
batadv_neigh_node_put(router_gw);
if (router_orig)
batadv_neigh_node_put(router_orig);
return ret; return ret;
} }
static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface) /* fails if orig_node has no router */
static int batadv_iv_gw_write_buffer_text(struct batadv_priv *bat_priv,
struct seq_file *seq,
const struct batadv_gw_node *gw_node)
{ {
/* begin scheduling originator messages on that interface */ struct batadv_gw_node *curr_gw;
batadv_iv_ogm_schedule(hard_iface); struct batadv_neigh_node *router;
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
int ret = -1;
router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
if (!router)
goto out;
router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
if (!router_ifinfo)
goto out;
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n",
(curr_gw == gw_node ? "=>" : " "),
gw_node->orig_node->orig,
router_ifinfo->bat_iv.tq_avg, router->addr,
router->if_incoming->net_dev->name,
gw_node->bandwidth_down / 10,
gw_node->bandwidth_down % 10,
gw_node->bandwidth_up / 10,
gw_node->bandwidth_up % 10);
ret = seq_has_overflowed(seq) ? -1 : 0;
if (curr_gw)
batadv_gw_node_put(curr_gw);
out:
if (router_ifinfo)
batadv_neigh_ifinfo_put(router_ifinfo);
if (router)
batadv_neigh_node_put(router);
return ret;
}
static void batadv_iv_gw_print(struct batadv_priv *bat_priv,
struct seq_file *seq)
{
struct batadv_gw_node *gw_node;
int gw_count = 0;
seq_puts(seq,
" Gateway (#/255) Nexthop [outgoingIF]: advertised uplink bandwidth\n");
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
/* fails if orig_node has no router */
if (batadv_iv_gw_write_buffer_text(bat_priv, seq, gw_node) < 0)
continue;
gw_count++;
}
rcu_read_unlock();
if (gw_count == 0)
seq_puts(seq, "No gateways in range ...\n");
} }
static struct batadv_algo_ops batadv_batman_iv __read_mostly = { static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
...@@ -2137,6 +2337,11 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = { ...@@ -2137,6 +2337,11 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.add_if = batadv_iv_ogm_orig_add_if, .add_if = batadv_iv_ogm_orig_add_if,
.del_if = batadv_iv_ogm_orig_del_if, .del_if = batadv_iv_ogm_orig_del_if,
}, },
.gw = {
.get_best_gw_node = batadv_iv_gw_get_best_gw_node,
.is_eligible = batadv_iv_gw_is_eligible,
.print = batadv_iv_gw_print,
},
}; };
int __init batadv_iv_init(void) int __init batadv_iv_init(void)
......
...@@ -21,8 +21,11 @@ ...@@ -21,8 +21,11 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/errno.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/rculist.h> #include <linux/rculist.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
...@@ -34,8 +37,11 @@ ...@@ -34,8 +37,11 @@
#include "bat_algo.h" #include "bat_algo.h"
#include "bat_v_elp.h" #include "bat_v_elp.h"
#include "bat_v_ogm.h" #include "bat_v_ogm.h"
#include "gateway_client.h"
#include "gateway_common.h"
#include "hard-interface.h" #include "hard-interface.h"
#include "hash.h" #include "hash.h"
#include "log.h"
#include "originator.h" #include "originator.h"
#include "packet.h" #include "packet.h"
...@@ -320,6 +326,239 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1, ...@@ -320,6 +326,239 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
return ret; return ret;
} }
static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv,
char *buff, size_t count)
{
u32 old_class, class;
if (!batadv_parse_throughput(bat_priv->soft_iface, buff,
"B.A.T.M.A.N. V GW selection class",
&class))
return -EINVAL;
old_class = atomic_read(&bat_priv->gw.sel_class);
atomic_set(&bat_priv->gw.sel_class, class);
if (old_class != class)
batadv_gw_reselect(bat_priv);
return count;
}
static ssize_t batadv_v_show_sel_class(struct batadv_priv *bat_priv, char *buff)
{
u32 class = atomic_read(&bat_priv->gw.sel_class);
return sprintf(buff, "%u.%u MBit\n", class / 10, class % 10);
}
/**
* batadv_v_gw_throughput_get - retrieve the GW-bandwidth for a given GW
* @gw_node: the GW to retrieve the metric for
* @bw: the pointer where the metric will be stored. The metric is computed as
* the minimum between the GW advertised throughput and the path throughput to
* it in the mesh
*
* Return: 0 on success, -1 on failure
*/
static int batadv_v_gw_throughput_get(struct batadv_gw_node *gw_node, u32 *bw)
{
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
struct batadv_orig_node *orig_node;
struct batadv_neigh_node *router;
int ret = -1;
orig_node = gw_node->orig_node;
router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
if (!router)
goto out;
router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
if (!router_ifinfo)
goto out;
/* the GW metric is computed as the minimum between the path throughput
* to reach the GW itself and the advertised bandwidth.
* This gives us an approximation of the effective throughput that the
* client can expect via this particular GW node
*/
*bw = router_ifinfo->bat_v.throughput;
*bw = min_t(u32, *bw, gw_node->bandwidth_down);
ret = 0;
out:
if (router)
batadv_neigh_node_put(router);
if (router_ifinfo)
batadv_neigh_ifinfo_put(router_ifinfo);
return ret;
}
/**
* batadv_v_gw_get_best_gw_node - retrieve the best GW node
* @bat_priv: the bat priv with all the soft interface information
*
* Return: the GW node having the best GW-metric, NULL if no GW is known
*/
static struct batadv_gw_node *
batadv_v_gw_get_best_gw_node(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *gw_node, *curr_gw = NULL;
u32 max_bw = 0, bw;
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
if (!kref_get_unless_zero(&gw_node->refcount))
continue;
if (batadv_v_gw_throughput_get(gw_node, &bw) < 0)
goto next;
if (curr_gw && (bw <= max_bw))
goto next;
if (curr_gw)
batadv_gw_node_put(curr_gw);
curr_gw = gw_node;
kref_get(&curr_gw->refcount);
max_bw = bw;
next:
batadv_gw_node_put(gw_node);
}
rcu_read_unlock();
return curr_gw;
}
/**
* batadv_v_gw_is_eligible - check if a originator would be selected as GW
* @bat_priv: the bat priv with all the soft interface information
* @curr_gw_orig: originator representing the currently selected GW
* @orig_node: the originator representing the new candidate
*
* Return: true if orig_node can be selected as current GW, false otherwise
*/
static bool batadv_v_gw_is_eligible(struct batadv_priv *bat_priv,
struct batadv_orig_node *curr_gw_orig,
struct batadv_orig_node *orig_node)
{
struct batadv_gw_node *curr_gw = NULL, *orig_gw = NULL;
u32 gw_throughput, orig_throughput, threshold;
bool ret = false;
threshold = atomic_read(&bat_priv->gw.sel_class);
curr_gw = batadv_gw_node_get(bat_priv, curr_gw_orig);
if (!curr_gw) {
ret = true;
goto out;
}
if (batadv_v_gw_throughput_get(curr_gw, &gw_throughput) < 0) {
ret = true;
goto out;
}
orig_gw = batadv_gw_node_get(bat_priv, orig_node);
if (!orig_node)
goto out;
if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0)
goto out;
if (orig_throughput < gw_throughput)
goto out;
if ((orig_throughput - gw_throughput) < threshold)
goto out;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Restarting gateway selection: better gateway found (throughput curr: %u, throughput new: %u)\n",
gw_throughput, orig_throughput);
ret = true;
out:
if (curr_gw)
batadv_gw_node_put(curr_gw);
if (orig_gw)
batadv_gw_node_put(orig_gw);
return ret;
}
/* fails if orig_node has no router */
static int batadv_v_gw_write_buffer_text(struct batadv_priv *bat_priv,
struct seq_file *seq,
const struct batadv_gw_node *gw_node)
{
struct batadv_gw_node *curr_gw;
struct batadv_neigh_node *router;
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
int ret = -1;
router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
if (!router)
goto out;
router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
if (!router_ifinfo)
goto out;
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
seq_printf(seq, "%s %pM (%9u.%1u) %pM [%10s]: %u.%u/%u.%u MBit\n",
(curr_gw == gw_node ? "=>" : " "),
gw_node->orig_node->orig,
router_ifinfo->bat_v.throughput / 10,
router_ifinfo->bat_v.throughput % 10, router->addr,
router->if_incoming->net_dev->name,
gw_node->bandwidth_down / 10,
gw_node->bandwidth_down % 10,
gw_node->bandwidth_up / 10,
gw_node->bandwidth_up % 10);
ret = seq_has_overflowed(seq) ? -1 : 0;
if (curr_gw)
batadv_gw_node_put(curr_gw);
out:
if (router_ifinfo)
batadv_neigh_ifinfo_put(router_ifinfo);
if (router)
batadv_neigh_node_put(router);
return ret;
}
/**
* batadv_v_gw_print - print the gateway list
* @bat_priv: the bat priv with all the soft interface information
* @seq: gateway table seq_file struct
*/
static void batadv_v_gw_print(struct batadv_priv *bat_priv,
struct seq_file *seq)
{
struct batadv_gw_node *gw_node;
int gw_count = 0;
seq_puts(seq,
" Gateway ( throughput) Nexthop [outgoingIF]: advertised uplink bandwidth\n");
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
/* fails if orig_node has no router */
if (batadv_v_gw_write_buffer_text(bat_priv, seq, gw_node) < 0)
continue;
gw_count++;
}
rcu_read_unlock();
if (gw_count == 0)
seq_puts(seq, "No gateways in range ...\n");
}
static struct batadv_algo_ops batadv_batman_v __read_mostly = { static struct batadv_algo_ops batadv_batman_v __read_mostly = {
.name = "BATMAN_V", .name = "BATMAN_V",
.iface = { .iface = {
...@@ -338,6 +577,13 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = { ...@@ -338,6 +577,13 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
.orig = { .orig = {
.print = batadv_v_orig_print, .print = batadv_v_orig_print,
}, },
.gw = {
.store_sel_class = batadv_v_store_sel_class,
.show_sel_class = batadv_v_show_sel_class,
.get_best_gw_node = batadv_v_gw_get_best_gw_node,
.is_eligible = batadv_v_gw_is_eligible,
.print = batadv_v_gw_print,
},
}; };
/** /**
...@@ -363,7 +609,16 @@ void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface) ...@@ -363,7 +609,16 @@ void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface)
*/ */
int batadv_v_mesh_init(struct batadv_priv *bat_priv) int batadv_v_mesh_init(struct batadv_priv *bat_priv)
{ {
return batadv_v_ogm_init(bat_priv); int ret = 0;
ret = batadv_v_ogm_init(bat_priv);
if (ret < 0)
return ret;
/* set default throughput difference threshold to 5Mbps */
atomic_set(&bat_priv->gw.sel_class, 50);
return 0;
} }
/** /**
......
...@@ -1148,7 +1148,7 @@ static bool batadv_bla_process_claim(struct batadv_priv *bat_priv, ...@@ -1148,7 +1148,7 @@ static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
/* Let the loopdetect frames on the mesh in any case. */ /* Let the loopdetect frames on the mesh in any case. */
if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT) if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
return 0; return false;
/* check if it is a claim frame. */ /* check if it is a claim frame. */
ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
......
...@@ -80,12 +80,12 @@ static void batadv_gw_node_release(struct kref *ref) ...@@ -80,12 +80,12 @@ static void batadv_gw_node_release(struct kref *ref)
* batadv_gw_node_put - decrement the gw_node refcounter and possibly release it * batadv_gw_node_put - decrement the gw_node refcounter and possibly release it
* @gw_node: gateway node to free * @gw_node: gateway node to free
*/ */
static void batadv_gw_node_put(struct batadv_gw_node *gw_node) void batadv_gw_node_put(struct batadv_gw_node *gw_node)
{ {
kref_put(&gw_node->refcount, batadv_gw_node_release); kref_put(&gw_node->refcount, batadv_gw_node_release);
} }
static struct batadv_gw_node * struct batadv_gw_node *
batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv) batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
{ {
struct batadv_gw_node *gw_node; struct batadv_gw_node *gw_node;
...@@ -164,86 +164,6 @@ void batadv_gw_reselect(struct batadv_priv *bat_priv) ...@@ -164,86 +164,6 @@ void batadv_gw_reselect(struct batadv_priv *bat_priv)
atomic_set(&bat_priv->gw.reselect, 1); atomic_set(&bat_priv->gw.reselect, 1);
} }
static struct batadv_gw_node *
batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
{
struct batadv_neigh_node *router;
struct batadv_neigh_ifinfo *router_ifinfo;
struct batadv_gw_node *gw_node, *curr_gw = NULL;
u64 max_gw_factor = 0;
u64 tmp_gw_factor = 0;
u8 max_tq = 0;
u8 tq_avg;
struct batadv_orig_node *orig_node;
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
orig_node = gw_node->orig_node;
router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
if (!router)
continue;
router_ifinfo = batadv_neigh_ifinfo_get(router,
BATADV_IF_DEFAULT);
if (!router_ifinfo)
goto next;
if (!kref_get_unless_zero(&gw_node->refcount))
goto next;
tq_avg = router_ifinfo->bat_iv.tq_avg;
switch (atomic_read(&bat_priv->gw.sel_class)) {
case 1: /* fast connection */
tmp_gw_factor = tq_avg * tq_avg;
tmp_gw_factor *= gw_node->bandwidth_down;
tmp_gw_factor *= 100 * 100;
tmp_gw_factor >>= 18;
if ((tmp_gw_factor > max_gw_factor) ||
((tmp_gw_factor == max_gw_factor) &&
(tq_avg > max_tq))) {
if (curr_gw)
batadv_gw_node_put(curr_gw);
curr_gw = gw_node;
kref_get(&curr_gw->refcount);
}
break;
default: /* 2: stable connection (use best statistic)
* 3: fast-switch (use best statistic but change as
* soon as a better gateway appears)
* XX: late-switch (use best statistic but change as
* soon as a better gateway appears which has
* $routing_class more tq points)
*/
if (tq_avg > max_tq) {
if (curr_gw)
batadv_gw_node_put(curr_gw);
curr_gw = gw_node;
kref_get(&curr_gw->refcount);
}
break;
}
if (tq_avg > max_tq)
max_tq = tq_avg;
if (tmp_gw_factor > max_gw_factor)
max_gw_factor = tmp_gw_factor;
batadv_gw_node_put(gw_node);
next:
batadv_neigh_node_put(router);
if (router_ifinfo)
batadv_neigh_ifinfo_put(router_ifinfo);
}
rcu_read_unlock();
return curr_gw;
}
/** /**
* batadv_gw_check_client_stop - check if client mode has been switched off * batadv_gw_check_client_stop - check if client mode has been switched off
* @bat_priv: the bat priv with all the soft interface information * @bat_priv: the bat priv with all the soft interface information
...@@ -287,12 +207,19 @@ void batadv_gw_election(struct batadv_priv *bat_priv) ...@@ -287,12 +207,19 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
if (atomic_read(&bat_priv->gw.mode) != BATADV_GW_MODE_CLIENT) if (atomic_read(&bat_priv->gw.mode) != BATADV_GW_MODE_CLIENT)
goto out; goto out;
if (!bat_priv->algo_ops->gw.get_best_gw_node)
goto out;
curr_gw = batadv_gw_get_selected_gw_node(bat_priv); curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw) if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw)
goto out; goto out;
next_gw = batadv_gw_get_best_gw_node(bat_priv); /* if gw.reselect is set to 1 it means that a previous call to
* gw.is_eligible() said that we have a new best GW, therefore it can
* now be picked from the list and selected
*/
next_gw = bat_priv->algo_ops->gw.get_best_gw_node(bat_priv);
if (curr_gw == next_gw) if (curr_gw == next_gw)
goto out; goto out;
...@@ -360,70 +287,31 @@ void batadv_gw_election(struct batadv_priv *bat_priv) ...@@ -360,70 +287,31 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
void batadv_gw_check_election(struct batadv_priv *bat_priv, void batadv_gw_check_election(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node) struct batadv_orig_node *orig_node)
{ {
struct batadv_neigh_ifinfo *router_orig_tq = NULL;
struct batadv_neigh_ifinfo *router_gw_tq = NULL;
struct batadv_orig_node *curr_gw_orig; struct batadv_orig_node *curr_gw_orig;
struct batadv_neigh_node *router_gw = NULL;
struct batadv_neigh_node *router_orig = NULL; /* abort immediately if the routing algorithm does not support gateway
u8 gw_tq_avg, orig_tq_avg; * election
*/
if (!bat_priv->algo_ops->gw.is_eligible)
return;
curr_gw_orig = batadv_gw_get_selected_orig(bat_priv); curr_gw_orig = batadv_gw_get_selected_orig(bat_priv);
if (!curr_gw_orig) if (!curr_gw_orig)
goto reselect; goto reselect;
router_gw = batadv_orig_router_get(curr_gw_orig, BATADV_IF_DEFAULT);
if (!router_gw)
goto reselect;
router_gw_tq = batadv_neigh_ifinfo_get(router_gw,
BATADV_IF_DEFAULT);
if (!router_gw_tq)
goto reselect;
/* this node already is the gateway */ /* this node already is the gateway */
if (curr_gw_orig == orig_node) if (curr_gw_orig == orig_node)
goto out; goto out;
router_orig = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); if (!bat_priv->algo_ops->gw.is_eligible(bat_priv, curr_gw_orig,
if (!router_orig) orig_node))
goto out;
router_orig_tq = batadv_neigh_ifinfo_get(router_orig,
BATADV_IF_DEFAULT);
if (!router_orig_tq)
goto out;
gw_tq_avg = router_gw_tq->bat_iv.tq_avg;
orig_tq_avg = router_orig_tq->bat_iv.tq_avg;
/* the TQ value has to be better */
if (orig_tq_avg < gw_tq_avg)
goto out;
/* if the routing class is greater than 3 the value tells us how much
* greater the TQ value of the new gateway must be
*/
if ((atomic_read(&bat_priv->gw.sel_class) > 3) &&
(orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw.sel_class)))
goto out; goto out;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
gw_tq_avg, orig_tq_avg);
reselect: reselect:
batadv_gw_reselect(bat_priv); batadv_gw_reselect(bat_priv);
out: out:
if (curr_gw_orig) if (curr_gw_orig)
batadv_orig_node_put(curr_gw_orig); batadv_orig_node_put(curr_gw_orig);
if (router_gw)
batadv_neigh_node_put(router_gw);
if (router_orig)
batadv_neigh_node_put(router_orig);
if (router_gw_tq)
batadv_neigh_ifinfo_put(router_gw_tq);
if (router_orig_tq)
batadv_neigh_ifinfo_put(router_orig_tq);
} }
/** /**
...@@ -472,8 +360,7 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, ...@@ -472,8 +360,7 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
* *
* Return: gateway node if found or NULL otherwise. * Return: gateway node if found or NULL otherwise.
*/ */
static struct batadv_gw_node * struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv,
batadv_gw_node_get(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node) struct batadv_orig_node *orig_node)
{ {
struct batadv_gw_node *gw_node_tmp, *gw_node = NULL; struct batadv_gw_node *gw_node_tmp, *gw_node = NULL;
...@@ -585,80 +472,31 @@ void batadv_gw_node_free(struct batadv_priv *bat_priv) ...@@ -585,80 +472,31 @@ void batadv_gw_node_free(struct batadv_priv *bat_priv)
spin_unlock_bh(&bat_priv->gw.list_lock); spin_unlock_bh(&bat_priv->gw.list_lock);
} }
/* fails if orig_node has no router */
static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
struct seq_file *seq,
const struct batadv_gw_node *gw_node)
{
struct batadv_gw_node *curr_gw;
struct batadv_neigh_node *router;
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
int ret = -1;
router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
if (!router)
goto out;
router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
if (!router_ifinfo)
goto out;
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n",
(curr_gw == gw_node ? "=>" : " "),
gw_node->orig_node->orig,
router_ifinfo->bat_iv.tq_avg, router->addr,
router->if_incoming->net_dev->name,
gw_node->bandwidth_down / 10,
gw_node->bandwidth_down % 10,
gw_node->bandwidth_up / 10,
gw_node->bandwidth_up % 10);
ret = seq_has_overflowed(seq) ? -1 : 0;
if (curr_gw)
batadv_gw_node_put(curr_gw);
out:
if (router_ifinfo)
batadv_neigh_ifinfo_put(router_ifinfo);
if (router)
batadv_neigh_node_put(router);
return ret;
}
int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
{ {
struct net_device *net_dev = (struct net_device *)seq->private; struct net_device *net_dev = (struct net_device *)seq->private;
struct batadv_priv *bat_priv = netdev_priv(net_dev); struct batadv_priv *bat_priv = netdev_priv(net_dev);
struct batadv_hard_iface *primary_if; struct batadv_hard_iface *primary_if;
struct batadv_gw_node *gw_node;
int gw_count = 0;
primary_if = batadv_seq_print_text_primary_if_get(seq); primary_if = batadv_seq_print_text_primary_if_get(seq);
if (!primary_if) if (!primary_if)
goto out; return 0;
seq_printf(seq, seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
" Gateway (#/255) Nexthop [outgoingIF]: advertised uplink bandwidth ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
BATADV_SOURCE_VERSION, primary_if->net_dev->name, BATADV_SOURCE_VERSION, primary_if->net_dev->name,
primary_if->net_dev->dev_addr, net_dev->name); primary_if->net_dev->dev_addr, net_dev->name,
bat_priv->algo_ops->name);
rcu_read_lock(); batadv_hardif_put(primary_if);
hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
/* fails if orig_node has no router */
if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0)
continue;
gw_count++; if (!bat_priv->algo_ops->gw.print) {
seq_puts(seq,
"No printing function for this routing protocol\n");
return 0;
} }
rcu_read_unlock();
if (gw_count == 0) bat_priv->algo_ops->gw.print(bat_priv, seq);
seq_puts(seq, "No gateways in range ...\n");
out:
if (primary_if)
batadv_hardif_put(primary_if);
return 0; return 0;
} }
......
...@@ -39,10 +39,15 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv, ...@@ -39,10 +39,15 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
void batadv_gw_node_delete(struct batadv_priv *bat_priv, void batadv_gw_node_delete(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node); struct batadv_orig_node *orig_node);
void batadv_gw_node_free(struct batadv_priv *bat_priv); void batadv_gw_node_free(struct batadv_priv *bat_priv);
void batadv_gw_node_put(struct batadv_gw_node *gw_node);
struct batadv_gw_node *
batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv);
int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb); bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
enum batadv_dhcp_recipient enum batadv_dhcp_recipient
batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len, batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
u8 *chaddr); u8 *chaddr);
struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node);
#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
...@@ -241,10 +241,9 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, ...@@ -241,10 +241,9 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
batadv_gw_node_update(bat_priv, orig, &gateway); batadv_gw_node_update(bat_priv, orig, &gateway);
/* restart gateway selection if fast or late switching was enabled */ /* restart gateway selection */
if ((gateway.bandwidth_down != 0) && if ((gateway.bandwidth_down != 0) &&
(atomic_read(&bat_priv->gw.mode) == BATADV_GW_MODE_CLIENT) && (atomic_read(&bat_priv->gw.mode) == BATADV_GW_MODE_CLIENT))
(atomic_read(&bat_priv->gw.sel_class) > 2))
batadv_gw_check_election(bat_priv, orig); batadv_gw_check_election(bat_priv, orig);
} }
......
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/workqueue.h>
#include "bat_v.h" #include "bat_v.h"
#include "bridge_loop_avoidance.h" #include "bridge_loop_avoidance.h"
...@@ -625,25 +624,6 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface, ...@@ -625,25 +624,6 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
batadv_hardif_put(primary_if); batadv_hardif_put(primary_if);
} }
/**
* batadv_hardif_remove_interface_finish - cleans up the remains of a hardif
* @work: work queue item
*
* Free the parts of the hard interface which can not be removed under
* rtnl lock (to prevent deadlock situations).
*/
static void batadv_hardif_remove_interface_finish(struct work_struct *work)
{
struct batadv_hard_iface *hard_iface;
hard_iface = container_of(work, struct batadv_hard_iface,
cleanup_work);
batadv_debugfs_del_hardif(hard_iface);
batadv_sysfs_del_hardif(&hard_iface->hardif_obj);
batadv_hardif_put(hard_iface);
}
static struct batadv_hard_iface * static struct batadv_hard_iface *
batadv_hardif_add_interface(struct net_device *net_dev) batadv_hardif_add_interface(struct net_device *net_dev)
{ {
...@@ -676,8 +656,6 @@ batadv_hardif_add_interface(struct net_device *net_dev) ...@@ -676,8 +656,6 @@ batadv_hardif_add_interface(struct net_device *net_dev)
INIT_LIST_HEAD(&hard_iface->list); INIT_LIST_HEAD(&hard_iface->list);
INIT_HLIST_HEAD(&hard_iface->neigh_list); INIT_HLIST_HEAD(&hard_iface->neigh_list);
INIT_WORK(&hard_iface->cleanup_work,
batadv_hardif_remove_interface_finish);
spin_lock_init(&hard_iface->neigh_list_lock); spin_lock_init(&hard_iface->neigh_list_lock);
...@@ -719,7 +697,9 @@ static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface) ...@@ -719,7 +697,9 @@ static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
return; return;
hard_iface->if_status = BATADV_IF_TO_BE_REMOVED; hard_iface->if_status = BATADV_IF_TO_BE_REMOVED;
queue_work(batadv_event_workqueue, &hard_iface->cleanup_work); batadv_debugfs_del_hardif(hard_iface);
batadv_sysfs_del_hardif(&hard_iface->hardif_obj);
batadv_hardif_put(hard_iface);
} }
void batadv_hardif_remove_interfaces(void) void batadv_hardif_remove_interfaces(void)
......
...@@ -82,6 +82,12 @@ static void batadv_recv_handler_init(void); ...@@ -82,6 +82,12 @@ static void batadv_recv_handler_init(void);
static int __init batadv_init(void) static int __init batadv_init(void)
{ {
int ret;
ret = batadv_tt_cache_init();
if (ret < 0)
return ret;
INIT_LIST_HEAD(&batadv_hardif_list); INIT_LIST_HEAD(&batadv_hardif_list);
batadv_algo_init(); batadv_algo_init();
...@@ -93,9 +99,8 @@ static int __init batadv_init(void) ...@@ -93,9 +99,8 @@ static int __init batadv_init(void)
batadv_tp_meter_init(); batadv_tp_meter_init();
batadv_event_workqueue = create_singlethread_workqueue("bat_events"); batadv_event_workqueue = create_singlethread_workqueue("bat_events");
if (!batadv_event_workqueue) if (!batadv_event_workqueue)
return -ENOMEM; goto err_create_wq;
batadv_socket_init(); batadv_socket_init();
batadv_debugfs_init(); batadv_debugfs_init();
...@@ -108,6 +113,11 @@ static int __init batadv_init(void) ...@@ -108,6 +113,11 @@ static int __init batadv_init(void)
BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION); BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
return 0; return 0;
err_create_wq:
batadv_tt_cache_destroy();
return -ENOMEM;
} }
static void __exit batadv_exit(void) static void __exit batadv_exit(void)
...@@ -123,6 +133,8 @@ static void __exit batadv_exit(void) ...@@ -123,6 +133,8 @@ static void __exit batadv_exit(void)
batadv_event_workqueue = NULL; batadv_event_workqueue = NULL;
rcu_barrier(); rcu_barrier();
batadv_tt_cache_destroy();
} }
int batadv_mesh_init(struct net_device *soft_iface) int batadv_mesh_init(struct net_device *soft_iface)
...@@ -638,3 +650,4 @@ MODULE_AUTHOR(BATADV_DRIVER_AUTHOR); ...@@ -638,3 +650,4 @@ MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
MODULE_DESCRIPTION(BATADV_DRIVER_DESC); MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE); MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
MODULE_VERSION(BATADV_SOURCE_VERSION); MODULE_VERSION(BATADV_SOURCE_VERSION);
MODULE_ALIAS_RTNL_LINK("batadv");
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv" #define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION #ifndef BATADV_SOURCE_VERSION
#define BATADV_SOURCE_VERSION "2016.3" #define BATADV_SOURCE_VERSION "2016.4"
#endif #endif
/* B.A.T.M.A.N. parameters */ /* B.A.T.M.A.N. parameters */
......
...@@ -528,7 +528,7 @@ static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv) ...@@ -528,7 +528,7 @@ static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv)
} }
return !(mcast_data.flags & return !(mcast_data.flags &
(BATADV_MCAST_WANT_ALL_IPV4 + BATADV_MCAST_WANT_ALL_IPV6)); (BATADV_MCAST_WANT_ALL_IPV4 | BATADV_MCAST_WANT_ALL_IPV6));
} }
/** /**
......
...@@ -74,11 +74,23 @@ static void _batadv_update_route(struct batadv_priv *bat_priv, ...@@ -74,11 +74,23 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
if (!orig_ifinfo) if (!orig_ifinfo)
return; return;
rcu_read_lock(); spin_lock_bh(&orig_node->neigh_list_lock);
curr_router = rcu_dereference(orig_ifinfo->router); /* curr_router used earlier may not be the current orig_ifinfo->router
if (curr_router && !kref_get_unless_zero(&curr_router->refcount)) * anymore because it was dereferenced outside of the neigh_list_lock
curr_router = NULL; * protected region. After the new best neighbor has replace the current
rcu_read_unlock(); * best neighbor the reference counter needs to decrease. Consequently,
* the code needs to ensure the curr_router variable contains a pointer
* to the replaced best neighbor.
*/
curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
/* increase refcount of new best neighbor */
if (neigh_node)
kref_get(&neigh_node->refcount);
rcu_assign_pointer(orig_ifinfo->router, neigh_node);
spin_unlock_bh(&orig_node->neigh_list_lock);
batadv_orig_ifinfo_put(orig_ifinfo);
/* route deleted */ /* route deleted */
if ((curr_router) && (!neigh_node)) { if ((curr_router) && (!neigh_node)) {
...@@ -100,27 +112,6 @@ static void _batadv_update_route(struct batadv_priv *bat_priv, ...@@ -100,27 +112,6 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
curr_router->addr); curr_router->addr);
} }
if (curr_router)
batadv_neigh_node_put(curr_router);
spin_lock_bh(&orig_node->neigh_list_lock);
/* curr_router used earlier may not be the current orig_ifinfo->router
* anymore because it was dereferenced outside of the neigh_list_lock
* protected region. After the new best neighbor has replace the current
* best neighbor the reference counter needs to decrease. Consequently,
* the code needs to ensure the curr_router variable contains a pointer
* to the replaced best neighbor.
*/
curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
/* increase refcount of new best neighbor */
if (neigh_node)
kref_get(&neigh_node->refcount);
rcu_assign_pointer(orig_ifinfo->router, neigh_node);
spin_unlock_bh(&orig_node->neigh_list_lock);
batadv_orig_ifinfo_put(orig_ifinfo);
/* decrease refcount of previous best neighbor */ /* decrease refcount of previous best neighbor */
if (curr_router) if (curr_router)
batadv_neigh_node_put(curr_router); batadv_neigh_node_put(curr_router);
......
...@@ -315,8 +315,7 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv, ...@@ -315,8 +315,7 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
* *
* Wrap the given skb into a batman-adv unicast or unicast-4addr header * Wrap the given skb into a batman-adv unicast or unicast-4addr header
* depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
* as packet_type. Then send this frame to the given orig_node and release a * as packet_type. Then send this frame to the given orig_node.
* reference to this orig_node.
* *
* Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
*/ */
...@@ -370,8 +369,6 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv, ...@@ -370,8 +369,6 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
ret = NET_XMIT_SUCCESS; ret = NET_XMIT_SUCCESS;
out: out:
if (orig_node)
batadv_orig_node_put(orig_node);
if (ret == NET_XMIT_DROP) if (ret == NET_XMIT_DROP)
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
...@@ -403,6 +400,7 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv, ...@@ -403,6 +400,7 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
struct ethhdr *ethhdr = (struct ethhdr *)skb->data; struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
struct batadv_orig_node *orig_node; struct batadv_orig_node *orig_node;
u8 *src, *dst; u8 *src, *dst;
int ret;
src = ethhdr->h_source; src = ethhdr->h_source;
dst = ethhdr->h_dest; dst = ethhdr->h_dest;
...@@ -414,8 +412,13 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv, ...@@ -414,8 +412,13 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
} }
orig_node = batadv_transtable_search(bat_priv, src, dst, vid); orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
return batadv_send_skb_unicast(bat_priv, skb, packet_type, ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
packet_subtype, orig_node, vid); packet_subtype, orig_node, vid);
if (orig_node)
batadv_orig_node_put(orig_node);
return ret;
} }
/** /**
...@@ -433,12 +436,25 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb, ...@@ -433,12 +436,25 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid) unsigned short vid)
{ {
struct batadv_orig_node *orig_node; struct batadv_orig_node *orig_node;
int ret;
orig_node = batadv_gw_get_selected_orig(bat_priv); orig_node = batadv_gw_get_selected_orig(bat_priv);
return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR, ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
BATADV_P_DATA, orig_node, vid); BATADV_P_DATA, orig_node, vid);
if (orig_node)
batadv_orig_node_put(orig_node);
return ret;
} }
/**
* batadv_forw_packet_free - free a forwarding packet
* @forw_packet: The packet to free
*
* This frees a forwarding packet and releases any resources it might
* have claimed.
*/
void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet) void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
{ {
kfree_skb(forw_packet->skb); kfree_skb(forw_packet->skb);
...@@ -446,9 +462,73 @@ void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet) ...@@ -446,9 +462,73 @@ void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
batadv_hardif_put(forw_packet->if_incoming); batadv_hardif_put(forw_packet->if_incoming);
if (forw_packet->if_outgoing) if (forw_packet->if_outgoing)
batadv_hardif_put(forw_packet->if_outgoing); batadv_hardif_put(forw_packet->if_outgoing);
if (forw_packet->queue_left)
atomic_inc(forw_packet->queue_left);
kfree(forw_packet); kfree(forw_packet);
} }
/**
* batadv_forw_packet_alloc - allocate a forwarding packet
* @if_incoming: The (optional) if_incoming to be grabbed
* @if_outgoing: The (optional) if_outgoing to be grabbed
* @queue_left: The (optional) queue counter to decrease
* @bat_priv: The bat_priv for the mesh of this forw_packet
*
* Allocates a forwarding packet and tries to get a reference to the
* (optional) if_incoming, if_outgoing and queue_left. If queue_left
* is NULL then bat_priv is optional, too.
*
* Return: An allocated forwarding packet on success, NULL otherwise.
*/
struct batadv_forw_packet *
batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing,
atomic_t *queue_left,
struct batadv_priv *bat_priv)
{
struct batadv_forw_packet *forw_packet;
const char *qname;
if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) {
qname = "unknown";
if (queue_left == &bat_priv->bcast_queue_left)
qname = "bcast";
if (queue_left == &bat_priv->batman_queue_left)
qname = "batman";
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"%s queue is full\n", qname);
return NULL;
}
forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
if (!forw_packet)
goto err;
if (if_incoming)
kref_get(&if_incoming->refcount);
if (if_outgoing)
kref_get(&if_outgoing->refcount);
forw_packet->skb = NULL;
forw_packet->queue_left = queue_left;
forw_packet->if_incoming = if_incoming;
forw_packet->if_outgoing = if_outgoing;
forw_packet->num_packets = 0;
return forw_packet;
err:
if (queue_left)
atomic_inc(queue_left);
return NULL;
}
static void static void
_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
struct batadv_forw_packet *forw_packet, struct batadv_forw_packet *forw_packet,
...@@ -487,24 +567,20 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, ...@@ -487,24 +567,20 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
struct batadv_bcast_packet *bcast_packet; struct batadv_bcast_packet *bcast_packet;
struct sk_buff *newskb; struct sk_buff *newskb;
if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"bcast packet queue full\n");
goto out;
}
primary_if = batadv_primary_if_get_selected(bat_priv); primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) if (!primary_if)
goto out_and_inc; goto err;
forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
forw_packet = batadv_forw_packet_alloc(primary_if, NULL,
&bat_priv->bcast_queue_left,
bat_priv);
batadv_hardif_put(primary_if);
if (!forw_packet) if (!forw_packet)
goto out_and_inc; goto err;
newskb = skb_copy(skb, GFP_ATOMIC); newskb = skb_copy(skb, GFP_ATOMIC);
if (!newskb) if (!newskb)
goto packet_free; goto err_packet_free;
/* as we have a copy now, it is safe to decrease the TTL */ /* as we have a copy now, it is safe to decrease the TTL */
bcast_packet = (struct batadv_bcast_packet *)newskb->data; bcast_packet = (struct batadv_bcast_packet *)newskb->data;
...@@ -513,11 +589,6 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, ...@@ -513,11 +589,6 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
skb_reset_mac_header(newskb); skb_reset_mac_header(newskb);
forw_packet->skb = newskb; forw_packet->skb = newskb;
forw_packet->if_incoming = primary_if;
forw_packet->if_outgoing = NULL;
/* how often did we send the bcast packet ? */
forw_packet->num_packets = 0;
INIT_DELAYED_WORK(&forw_packet->delayed_work, INIT_DELAYED_WORK(&forw_packet->delayed_work,
batadv_send_outstanding_bcast_packet); batadv_send_outstanding_bcast_packet);
...@@ -525,13 +596,9 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, ...@@ -525,13 +596,9 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
_batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay); _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
return NETDEV_TX_OK; return NETDEV_TX_OK;
packet_free: err_packet_free:
kfree(forw_packet); batadv_forw_packet_free(forw_packet);
out_and_inc: err:
atomic_inc(&bat_priv->bcast_queue_left);
out:
if (primary_if)
batadv_hardif_put(primary_if);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
...@@ -592,7 +659,6 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work) ...@@ -592,7 +659,6 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
out: out:
batadv_forw_packet_free(forw_packet); batadv_forw_packet_free(forw_packet);
atomic_inc(&bat_priv->bcast_queue_left);
} }
void void
...@@ -633,9 +699,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, ...@@ -633,9 +699,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
if (pending) { if (pending) {
hlist_del(&forw_packet->list); hlist_del(&forw_packet->list);
if (!forw_packet->own)
atomic_inc(&bat_priv->bcast_queue_left);
batadv_forw_packet_free(forw_packet); batadv_forw_packet_free(forw_packet);
} }
} }
...@@ -663,9 +726,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, ...@@ -663,9 +726,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
if (pending) { if (pending) {
hlist_del(&forw_packet->list); hlist_del(&forw_packet->list);
if (!forw_packet->own)
atomic_inc(&bat_priv->batman_queue_left);
batadv_forw_packet_free(forw_packet); batadv_forw_packet_free(forw_packet);
} }
} }
......
...@@ -28,6 +28,12 @@ ...@@ -28,6 +28,12 @@
struct sk_buff; struct sk_buff;
void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet); void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet);
struct batadv_forw_packet *
batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing,
atomic_t *queue_left,
struct batadv_priv *bat_priv);
int batadv_send_skb_to_orig(struct sk_buff *skb, int batadv_send_skb_to_orig(struct sk_buff *skb,
struct batadv_orig_node *orig_node, struct batadv_orig_node *orig_node,
struct batadv_hard_iface *recv_if); struct batadv_hard_iface *recv_if);
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/random.h> #include <linux/random.h>
#include <linux/rculist.h> #include <linux/rculist.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/socket.h> #include <linux/socket.h>
...@@ -46,7 +47,6 @@ ...@@ -46,7 +47,6 @@
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/workqueue.h>
#include "bat_algo.h" #include "bat_algo.h"
#include "bridge_loop_avoidance.h" #include "bridge_loop_avoidance.h"
...@@ -57,6 +57,7 @@ ...@@ -57,6 +57,7 @@
#include "hard-interface.h" #include "hard-interface.h"
#include "multicast.h" #include "multicast.h"
#include "network-coding.h" #include "network-coding.h"
#include "originator.h"
#include "packet.h" #include "packet.h"
#include "send.h" #include "send.h"
#include "sysfs.h" #include "sysfs.h"
...@@ -377,6 +378,8 @@ static int batadv_interface_tx(struct sk_buff *skb, ...@@ -377,6 +378,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
dropped_freed: dropped_freed:
batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED); batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
end: end:
if (mcast_single_orig)
batadv_orig_node_put(mcast_single_orig);
if (primary_if) if (primary_if)
batadv_hardif_put(primary_if); batadv_hardif_put(primary_if);
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -746,34 +749,6 @@ static void batadv_set_lockdep_class(struct net_device *dev) ...@@ -746,34 +749,6 @@ static void batadv_set_lockdep_class(struct net_device *dev)
netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL); netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
} }
/**
* batadv_softif_destroy_finish - cleans up the remains of a softif
* @work: work queue item
*
* Free the parts of the soft interface which can not be removed under
* rtnl lock (to prevent deadlock situations).
*/
static void batadv_softif_destroy_finish(struct work_struct *work)
{
struct batadv_softif_vlan *vlan;
struct batadv_priv *bat_priv;
struct net_device *soft_iface;
bat_priv = container_of(work, struct batadv_priv,
cleanup_work);
soft_iface = bat_priv->soft_iface;
/* destroy the "untagged" VLAN */
vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
if (vlan) {
batadv_softif_destroy_vlan(bat_priv, vlan);
batadv_softif_vlan_put(vlan);
}
batadv_sysfs_del_meshif(soft_iface);
unregister_netdev(soft_iface);
}
/** /**
* batadv_softif_init_late - late stage initialization of soft interface * batadv_softif_init_late - late stage initialization of soft interface
* @dev: registered network device to modify * @dev: registered network device to modify
...@@ -791,7 +766,6 @@ static int batadv_softif_init_late(struct net_device *dev) ...@@ -791,7 +766,6 @@ static int batadv_softif_init_late(struct net_device *dev)
bat_priv = netdev_priv(dev); bat_priv = netdev_priv(dev);
bat_priv->soft_iface = dev; bat_priv->soft_iface = dev;
INIT_WORK(&bat_priv->cleanup_work, batadv_softif_destroy_finish);
/* batadv_interface_stats() needs to be available as soon as /* batadv_interface_stats() needs to be available as soon as
* register_netdevice() has been called * register_netdevice() has been called
...@@ -1028,8 +1002,19 @@ struct net_device *batadv_softif_create(struct net *net, const char *name) ...@@ -1028,8 +1002,19 @@ struct net_device *batadv_softif_create(struct net *net, const char *name)
void batadv_softif_destroy_sysfs(struct net_device *soft_iface) void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
{ {
struct batadv_priv *bat_priv = netdev_priv(soft_iface); struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct batadv_softif_vlan *vlan;
ASSERT_RTNL();
/* destroy the "untagged" VLAN */
vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
if (vlan) {
batadv_softif_destroy_vlan(bat_priv, vlan);
batadv_softif_vlan_put(vlan);
}
queue_work(batadv_event_workqueue, &bat_priv->cleanup_work); batadv_sysfs_del_meshif(soft_iface);
unregister_netdevice(soft_iface);
} }
/** /**
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/stringify.h> #include <linux/stringify.h>
#include <linux/workqueue.h>
#include "bridge_loop_avoidance.h" #include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h" #include "distributed-arp-table.h"
...@@ -428,6 +429,13 @@ static ssize_t batadv_show_gw_mode(struct kobject *kobj, struct attribute *attr, ...@@ -428,6 +429,13 @@ static ssize_t batadv_show_gw_mode(struct kobject *kobj, struct attribute *attr,
struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
int bytes_written; int bytes_written;
/* GW mode is not available if the routing algorithm in use does not
* implement the GW API
*/
if (!bat_priv->algo_ops->gw.get_best_gw_node ||
!bat_priv->algo_ops->gw.is_eligible)
return -ENOENT;
switch (atomic_read(&bat_priv->gw.mode)) { switch (atomic_read(&bat_priv->gw.mode)) {
case BATADV_GW_MODE_CLIENT: case BATADV_GW_MODE_CLIENT:
bytes_written = sprintf(buff, "%s\n", bytes_written = sprintf(buff, "%s\n",
...@@ -455,6 +463,13 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj, ...@@ -455,6 +463,13 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj,
char *curr_gw_mode_str; char *curr_gw_mode_str;
int gw_mode_tmp = -1; int gw_mode_tmp = -1;
/* toggling GW mode is allowed only if the routing algorithm in use
* provides the GW API
*/
if (!bat_priv->algo_ops->gw.get_best_gw_node ||
!bat_priv->algo_ops->gw.is_eligible)
return -EINVAL;
if (buff[count - 1] == '\n') if (buff[count - 1] == '\n')
buff[count - 1] = '\0'; buff[count - 1] = '\0';
...@@ -514,6 +529,50 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj, ...@@ -514,6 +529,50 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj,
return count; return count;
} }
static ssize_t batadv_show_gw_sel_class(struct kobject *kobj,
struct attribute *attr, char *buff)
{
struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
/* GW selection class is not available if the routing algorithm in use
* does not implement the GW API
*/
if (!bat_priv->algo_ops->gw.get_best_gw_node ||
!bat_priv->algo_ops->gw.is_eligible)
return -ENOENT;
if (bat_priv->algo_ops->gw.show_sel_class)
return bat_priv->algo_ops->gw.show_sel_class(bat_priv, buff);
return sprintf(buff, "%i\n", atomic_read(&bat_priv->gw.sel_class));
}
static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
struct attribute *attr, char *buff,
size_t count)
{
struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
/* setting the GW selection class is allowed only if the routing
* algorithm in use implements the GW API
*/
if (!bat_priv->algo_ops->gw.get_best_gw_node ||
!bat_priv->algo_ops->gw.is_eligible)
return -EINVAL;
if (buff[count - 1] == '\n')
buff[count - 1] = '\0';
if (bat_priv->algo_ops->gw.store_sel_class)
return bat_priv->algo_ops->gw.store_sel_class(bat_priv, buff,
count);
return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
batadv_post_gw_reselect, attr,
&bat_priv->gw.sel_class,
bat_priv->soft_iface);
}
static ssize_t batadv_show_gw_bwidth(struct kobject *kobj, static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
struct attribute *attr, char *buff) struct attribute *attr, char *buff)
{ {
...@@ -625,8 +684,8 @@ BATADV_ATTR_SIF_UINT(orig_interval, orig_interval, S_IRUGO | S_IWUSR, ...@@ -625,8 +684,8 @@ BATADV_ATTR_SIF_UINT(orig_interval, orig_interval, S_IRUGO | S_IWUSR,
2 * BATADV_JITTER, INT_MAX, NULL); 2 * BATADV_JITTER, INT_MAX, NULL);
BATADV_ATTR_SIF_UINT(hop_penalty, hop_penalty, S_IRUGO | S_IWUSR, 0, BATADV_ATTR_SIF_UINT(hop_penalty, hop_penalty, S_IRUGO | S_IWUSR, 0,
BATADV_TQ_MAX_VALUE, NULL); BATADV_TQ_MAX_VALUE, NULL);
BATADV_ATTR_SIF_UINT(gw_sel_class, gw.sel_class, S_IRUGO | S_IWUSR, 1, static BATADV_ATTR(gw_sel_class, S_IRUGO | S_IWUSR, batadv_show_gw_sel_class,
BATADV_TQ_MAX_VALUE, batadv_post_gw_reselect); batadv_store_gw_sel_class);
static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth, static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth,
batadv_store_gw_bwidth); batadv_store_gw_bwidth);
#ifdef CONFIG_BATMAN_ADV_MCAST #ifdef CONFIG_BATMAN_ADV_MCAST
...@@ -712,6 +771,8 @@ int batadv_sysfs_add_meshif(struct net_device *dev) ...@@ -712,6 +771,8 @@ int batadv_sysfs_add_meshif(struct net_device *dev)
for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr) for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr)
sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr)); sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
kobject_uevent(bat_priv->mesh_obj, KOBJ_REMOVE);
kobject_del(bat_priv->mesh_obj);
kobject_put(bat_priv->mesh_obj); kobject_put(bat_priv->mesh_obj);
bat_priv->mesh_obj = NULL; bat_priv->mesh_obj = NULL;
out: out:
...@@ -726,6 +787,8 @@ void batadv_sysfs_del_meshif(struct net_device *dev) ...@@ -726,6 +787,8 @@ void batadv_sysfs_del_meshif(struct net_device *dev)
for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr) for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr)
sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr)); sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
kobject_uevent(bat_priv->mesh_obj, KOBJ_REMOVE);
kobject_del(bat_priv->mesh_obj);
kobject_put(bat_priv->mesh_obj); kobject_put(bat_priv->mesh_obj);
bat_priv->mesh_obj = NULL; bat_priv->mesh_obj = NULL;
} }
...@@ -781,6 +844,10 @@ int batadv_sysfs_add_vlan(struct net_device *dev, ...@@ -781,6 +844,10 @@ int batadv_sysfs_add_vlan(struct net_device *dev,
for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr) for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr)
sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr)); sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr));
if (vlan->kobj != bat_priv->mesh_obj) {
kobject_uevent(vlan->kobj, KOBJ_REMOVE);
kobject_del(vlan->kobj);
}
kobject_put(vlan->kobj); kobject_put(vlan->kobj);
vlan->kobj = NULL; vlan->kobj = NULL;
out: out:
...@@ -800,6 +867,10 @@ void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv, ...@@ -800,6 +867,10 @@ void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv,
for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr) for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr)
sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr)); sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr));
if (vlan->kobj != bat_priv->mesh_obj) {
kobject_uevent(vlan->kobj, KOBJ_REMOVE);
kobject_del(vlan->kobj);
}
kobject_put(vlan->kobj); kobject_put(vlan->kobj);
vlan->kobj = NULL; vlan->kobj = NULL;
} }
...@@ -828,31 +899,31 @@ static ssize_t batadv_show_mesh_iface(struct kobject *kobj, ...@@ -828,31 +899,31 @@ static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
return length; return length;
} }
static ssize_t batadv_store_mesh_iface(struct kobject *kobj, /**
struct attribute *attr, char *buff, * batadv_store_mesh_iface_finish - store new hardif mesh_iface state
size_t count) * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
* @ifname: name of soft-interface to modify
*
* Changes the parts of the hard+soft interface which can not be modified under
* sysfs lock (to prevent deadlock situations).
*
* Return: 0 on success, 0 < on failure
*/
static int batadv_store_mesh_iface_finish(struct net_device *net_dev,
char ifname[IFNAMSIZ])
{ {
struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
struct net *net = dev_net(net_dev); struct net *net = dev_net(net_dev);
struct batadv_hard_iface *hard_iface; struct batadv_hard_iface *hard_iface;
int status_tmp = -1; int status_tmp;
int ret = count; int ret = 0;
ASSERT_RTNL();
hard_iface = batadv_hardif_get_by_netdev(net_dev); hard_iface = batadv_hardif_get_by_netdev(net_dev);
if (!hard_iface) if (!hard_iface)
return count; return 0;
if (buff[count - 1] == '\n')
buff[count - 1] = '\0';
if (strlen(buff) >= IFNAMSIZ) {
pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
buff);
batadv_hardif_put(hard_iface);
return -EINVAL;
}
if (strncmp(buff, "none", 4) == 0) if (strncmp(ifname, "none", 4) == 0)
status_tmp = BATADV_IF_NOT_IN_USE; status_tmp = BATADV_IF_NOT_IN_USE;
else else
status_tmp = BATADV_IF_I_WANT_YOU; status_tmp = BATADV_IF_I_WANT_YOU;
...@@ -861,15 +932,13 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj, ...@@ -861,15 +932,13 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
goto out; goto out;
if ((hard_iface->soft_iface) && if ((hard_iface->soft_iface) &&
(strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0)) (strncmp(hard_iface->soft_iface->name, ifname, IFNAMSIZ) == 0))
goto out; goto out;
rtnl_lock();
if (status_tmp == BATADV_IF_NOT_IN_USE) { if (status_tmp == BATADV_IF_NOT_IN_USE) {
batadv_hardif_disable_interface(hard_iface, batadv_hardif_disable_interface(hard_iface,
BATADV_IF_CLEANUP_AUTO); BATADV_IF_CLEANUP_AUTO);
goto unlock; goto out;
} }
/* if the interface already is in use */ /* if the interface already is in use */
...@@ -877,15 +946,71 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj, ...@@ -877,15 +946,71 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
batadv_hardif_disable_interface(hard_iface, batadv_hardif_disable_interface(hard_iface,
BATADV_IF_CLEANUP_AUTO); BATADV_IF_CLEANUP_AUTO);
ret = batadv_hardif_enable_interface(hard_iface, net, buff); ret = batadv_hardif_enable_interface(hard_iface, net, ifname);
unlock:
rtnl_unlock();
out: out:
batadv_hardif_put(hard_iface); batadv_hardif_put(hard_iface);
return ret; return ret;
} }
/**
* batadv_store_mesh_iface_work - store new hardif mesh_iface state
* @work: work queue item
*
* Changes the parts of the hard+soft interface which can not be modified under
* sysfs lock (to prevent deadlock situations).
*/
static void batadv_store_mesh_iface_work(struct work_struct *work)
{
struct batadv_store_mesh_work *store_work;
int ret;
store_work = container_of(work, struct batadv_store_mesh_work, work);
rtnl_lock();
ret = batadv_store_mesh_iface_finish(store_work->net_dev,
store_work->soft_iface_name);
rtnl_unlock();
if (ret < 0)
pr_err("Failed to store new mesh_iface state %s for %s: %d\n",
store_work->soft_iface_name, store_work->net_dev->name,
ret);
dev_put(store_work->net_dev);
kfree(store_work);
}
static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
struct attribute *attr, char *buff,
size_t count)
{
struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
struct batadv_store_mesh_work *store_work;
if (buff[count - 1] == '\n')
buff[count - 1] = '\0';
if (strlen(buff) >= IFNAMSIZ) {
pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
buff);
return -EINVAL;
}
store_work = kmalloc(sizeof(*store_work), GFP_KERNEL);
if (!store_work)
return -ENOMEM;
dev_hold(net_dev);
INIT_WORK(&store_work->work, batadv_store_mesh_iface_work);
store_work->net_dev = net_dev;
strlcpy(store_work->soft_iface_name, buff,
sizeof(store_work->soft_iface_name));
queue_work(batadv_event_workqueue, &store_work->work);
return count;
}
static ssize_t batadv_show_iface_status(struct kobject *kobj, static ssize_t batadv_show_iface_status(struct kobject *kobj,
struct attribute *attr, char *buff) struct attribute *attr, char *buff)
{ {
...@@ -1048,6 +1173,8 @@ int batadv_sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev) ...@@ -1048,6 +1173,8 @@ int batadv_sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
void batadv_sysfs_del_hardif(struct kobject **hardif_obj) void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
{ {
kobject_uevent(*hardif_obj, KOBJ_REMOVE);
kobject_del(*hardif_obj);
kobject_put(*hardif_obj); kobject_put(*hardif_obj);
*hardif_obj = NULL; *hardif_obj = NULL;
} }
......
...@@ -22,12 +22,14 @@ ...@@ -22,12 +22,14 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/byteorder/generic.h> #include <linux/byteorder/generic.h>
#include <linux/cache.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/crc32c.h> #include <linux/crc32c.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/if_ether.h> #include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/jhash.h> #include <linux/jhash.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -54,6 +56,13 @@ ...@@ -54,6 +56,13 @@
#include "soft-interface.h" #include "soft-interface.h"
#include "tvlv.h" #include "tvlv.h"
static struct kmem_cache *batadv_tl_cache __read_mostly;
static struct kmem_cache *batadv_tg_cache __read_mostly;
static struct kmem_cache *batadv_tt_orig_cache __read_mostly;
static struct kmem_cache *batadv_tt_change_cache __read_mostly;
static struct kmem_cache *batadv_tt_req_cache __read_mostly;
static struct kmem_cache *batadv_tt_roam_cache __read_mostly;
/* hash class keys */ /* hash class keys */
static struct lock_class_key batadv_tt_local_hash_lock_class_key; static struct lock_class_key batadv_tt_local_hash_lock_class_key;
static struct lock_class_key batadv_tt_global_hash_lock_class_key; static struct lock_class_key batadv_tt_global_hash_lock_class_key;
...@@ -204,6 +213,20 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr, ...@@ -204,6 +213,20 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
return tt_global_entry; return tt_global_entry;
} }
/**
* batadv_tt_local_entry_free_rcu - free the tt_local_entry
* @rcu: rcu pointer of the tt_local_entry
*/
static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
{
struct batadv_tt_local_entry *tt_local_entry;
tt_local_entry = container_of(rcu, struct batadv_tt_local_entry,
common.rcu);
kmem_cache_free(batadv_tl_cache, tt_local_entry);
}
/** /**
* batadv_tt_local_entry_release - release tt_local_entry from lists and queue * batadv_tt_local_entry_release - release tt_local_entry from lists and queue
* for free after rcu grace period * for free after rcu grace period
...@@ -218,7 +241,7 @@ static void batadv_tt_local_entry_release(struct kref *ref) ...@@ -218,7 +241,7 @@ static void batadv_tt_local_entry_release(struct kref *ref)
batadv_softif_vlan_put(tt_local_entry->vlan); batadv_softif_vlan_put(tt_local_entry->vlan);
kfree_rcu(tt_local_entry, common.rcu); call_rcu(&tt_local_entry->common.rcu, batadv_tt_local_entry_free_rcu);
} }
/** /**
...@@ -233,6 +256,20 @@ batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry) ...@@ -233,6 +256,20 @@ batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
batadv_tt_local_entry_release); batadv_tt_local_entry_release);
} }
/**
* batadv_tt_global_entry_free_rcu - free the tt_global_entry
* @rcu: rcu pointer of the tt_global_entry
*/
static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
{
struct batadv_tt_global_entry *tt_global_entry;
tt_global_entry = container_of(rcu, struct batadv_tt_global_entry,
common.rcu);
kmem_cache_free(batadv_tg_cache, tt_global_entry);
}
/** /**
* batadv_tt_global_entry_release - release tt_global_entry from lists and queue * batadv_tt_global_entry_release - release tt_global_entry from lists and queue
* for free after rcu grace period * for free after rcu grace period
...@@ -246,7 +283,8 @@ static void batadv_tt_global_entry_release(struct kref *ref) ...@@ -246,7 +283,8 @@ static void batadv_tt_global_entry_release(struct kref *ref)
common.refcount); common.refcount);
batadv_tt_global_del_orig_list(tt_global_entry); batadv_tt_global_del_orig_list(tt_global_entry);
kfree_rcu(tt_global_entry, common.rcu);
call_rcu(&tt_global_entry->common.rcu, batadv_tt_global_entry_free_rcu);
} }
/** /**
...@@ -383,6 +421,19 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node, ...@@ -383,6 +421,19 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
batadv_tt_global_size_mod(orig_node, vid, -1); batadv_tt_global_size_mod(orig_node, vid, -1);
} }
/**
* batadv_tt_orig_list_entry_free_rcu - free the orig_entry
* @rcu: rcu pointer of the orig_entry
*/
static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
{
struct batadv_tt_orig_list_entry *orig_entry;
orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
kmem_cache_free(batadv_tt_orig_cache, orig_entry);
}
/** /**
* batadv_tt_orig_list_entry_release - release tt orig entry from lists and * batadv_tt_orig_list_entry_release - release tt orig entry from lists and
* queue for free after rcu grace period * queue for free after rcu grace period
...@@ -396,7 +447,7 @@ static void batadv_tt_orig_list_entry_release(struct kref *ref) ...@@ -396,7 +447,7 @@ static void batadv_tt_orig_list_entry_release(struct kref *ref)
refcount); refcount);
batadv_orig_node_put(orig_entry->orig_node); batadv_orig_node_put(orig_entry->orig_node);
kfree_rcu(orig_entry, rcu); call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
} }
/** /**
...@@ -426,7 +477,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv, ...@@ -426,7 +477,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
bool event_removed = false; bool event_removed = false;
bool del_op_requested, del_op_entry; bool del_op_requested, del_op_entry;
tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC); tt_change_node = kmem_cache_alloc(batadv_tt_change_cache, GFP_ATOMIC);
if (!tt_change_node) if (!tt_change_node)
return; return;
...@@ -467,8 +518,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv, ...@@ -467,8 +518,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
continue; continue;
del: del:
list_del(&entry->list); list_del(&entry->list);
kfree(entry); kmem_cache_free(batadv_tt_change_cache, entry);
kfree(tt_change_node); kmem_cache_free(batadv_tt_change_cache, tt_change_node);
event_removed = true; event_removed = true;
goto unlock; goto unlock;
} }
...@@ -646,7 +697,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, ...@@ -646,7 +697,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
goto out; goto out;
} }
tt_local = kmalloc(sizeof(*tt_local), GFP_ATOMIC); tt_local = kmem_cache_alloc(batadv_tl_cache, GFP_ATOMIC);
if (!tt_local) if (!tt_local)
goto out; goto out;
...@@ -656,7 +707,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, ...@@ -656,7 +707,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
net_ratelimited_function(batadv_info, soft_iface, net_ratelimited_function(batadv_info, soft_iface,
"adding TT local entry %pM to non-existent VLAN %d\n", "adding TT local entry %pM to non-existent VLAN %d\n",
addr, BATADV_PRINT_VID(vid)); addr, BATADV_PRINT_VID(vid));
kfree(tt_local); kmem_cache_free(batadv_tl_cache, tt_local);
tt_local = NULL; tt_local = NULL;
goto out; goto out;
} }
...@@ -959,7 +1010,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) ...@@ -959,7 +1010,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
tt_diff_entries_count++; tt_diff_entries_count++;
} }
list_del(&entry->list); list_del(&entry->list);
kfree(entry); kmem_cache_free(batadv_tt_change_cache, entry);
} }
spin_unlock_bh(&bat_priv->tt.changes_list_lock); spin_unlock_bh(&bat_priv->tt.changes_list_lock);
...@@ -1259,7 +1310,7 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv) ...@@ -1259,7 +1310,7 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list, list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
list) { list) {
list_del(&entry->list); list_del(&entry->list);
kfree(entry); kmem_cache_free(batadv_tt_change_cache, entry);
} }
atomic_set(&bat_priv->tt.local_changes, 0); atomic_set(&bat_priv->tt.local_changes, 0);
...@@ -1341,7 +1392,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, ...@@ -1341,7 +1392,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
goto out; goto out;
} }
orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC); orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);
if (!orig_entry) if (!orig_entry)
goto out; goto out;
...@@ -1411,7 +1462,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, ...@@ -1411,7 +1462,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
goto out; goto out;
if (!tt_global_entry) { if (!tt_global_entry) {
tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC); tt_global_entry = kmem_cache_zalloc(batadv_tg_cache,
GFP_ATOMIC);
if (!tt_global_entry) if (!tt_global_entry)
goto out; goto out;
...@@ -2280,7 +2332,7 @@ static void batadv_tt_req_node_release(struct kref *ref) ...@@ -2280,7 +2332,7 @@ static void batadv_tt_req_node_release(struct kref *ref)
tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount); tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount);
kfree(tt_req_node); kmem_cache_free(batadv_tt_req_cache, tt_req_node);
} }
/** /**
...@@ -2367,7 +2419,7 @@ batadv_tt_req_node_new(struct batadv_priv *bat_priv, ...@@ -2367,7 +2419,7 @@ batadv_tt_req_node_new(struct batadv_priv *bat_priv,
goto unlock; goto unlock;
} }
tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC); tt_req_node = kmem_cache_alloc(batadv_tt_req_cache, GFP_ATOMIC);
if (!tt_req_node) if (!tt_req_node)
goto unlock; goto unlock;
...@@ -3104,7 +3156,7 @@ static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv) ...@@ -3104,7 +3156,7 @@ static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) { list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
list_del(&node->list); list_del(&node->list);
kfree(node); kmem_cache_free(batadv_tt_roam_cache, node);
} }
spin_unlock_bh(&bat_priv->tt.roam_list_lock); spin_unlock_bh(&bat_priv->tt.roam_list_lock);
...@@ -3121,7 +3173,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv) ...@@ -3121,7 +3173,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
continue; continue;
list_del(&node->list); list_del(&node->list);
kfree(node); kmem_cache_free(batadv_tt_roam_cache, node);
} }
spin_unlock_bh(&bat_priv->tt.roam_list_lock); spin_unlock_bh(&bat_priv->tt.roam_list_lock);
} }
...@@ -3162,7 +3214,8 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, u8 *client) ...@@ -3162,7 +3214,8 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, u8 *client)
} }
if (!ret) { if (!ret) {
tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC); tt_roam_node = kmem_cache_alloc(batadv_tt_roam_cache,
GFP_ATOMIC);
if (!tt_roam_node) if (!tt_roam_node)
goto unlock; goto unlock;
...@@ -3865,3 +3918,85 @@ bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv, ...@@ -3865,3 +3918,85 @@ bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
return ret; return ret;
} }
/**
* batadv_tt_cache_init - Initialize tt memory object cache
*
* Return: 0 on success or negative error number in case of failure.
*/
int __init batadv_tt_cache_init(void)
{
size_t tl_size = sizeof(struct batadv_tt_local_entry);
size_t tg_size = sizeof(struct batadv_tt_global_entry);
size_t tt_orig_size = sizeof(struct batadv_tt_orig_list_entry);
size_t tt_change_size = sizeof(struct batadv_tt_change_node);
size_t tt_req_size = sizeof(struct batadv_tt_req_node);
size_t tt_roam_size = sizeof(struct batadv_tt_roam_node);
batadv_tl_cache = kmem_cache_create("batadv_tl_cache", tl_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!batadv_tl_cache)
return -ENOMEM;
batadv_tg_cache = kmem_cache_create("batadv_tg_cache", tg_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!batadv_tg_cache)
goto err_tt_tl_destroy;
batadv_tt_orig_cache = kmem_cache_create("batadv_tt_orig_cache",
tt_orig_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!batadv_tt_orig_cache)
goto err_tt_tg_destroy;
batadv_tt_change_cache = kmem_cache_create("batadv_tt_change_cache",
tt_change_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!batadv_tt_change_cache)
goto err_tt_orig_destroy;
batadv_tt_req_cache = kmem_cache_create("batadv_tt_req_cache",
tt_req_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!batadv_tt_req_cache)
goto err_tt_change_destroy;
batadv_tt_roam_cache = kmem_cache_create("batadv_tt_roam_cache",
tt_roam_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!batadv_tt_roam_cache)
goto err_tt_req_destroy;
return 0;
err_tt_req_destroy:
kmem_cache_destroy(batadv_tt_req_cache);
batadv_tt_req_cache = NULL;
err_tt_change_destroy:
kmem_cache_destroy(batadv_tt_change_cache);
batadv_tt_change_cache = NULL;
err_tt_orig_destroy:
kmem_cache_destroy(batadv_tt_orig_cache);
batadv_tt_orig_cache = NULL;
err_tt_tg_destroy:
kmem_cache_destroy(batadv_tg_cache);
batadv_tg_cache = NULL;
err_tt_tl_destroy:
kmem_cache_destroy(batadv_tl_cache);
batadv_tl_cache = NULL;
return -ENOMEM;
}
/**
* batadv_tt_cache_destroy - Destroy tt memory object cache
*/
void batadv_tt_cache_destroy(void)
{
kmem_cache_destroy(batadv_tl_cache);
kmem_cache_destroy(batadv_tg_cache);
kmem_cache_destroy(batadv_tt_orig_cache);
kmem_cache_destroy(batadv_tt_change_cache);
kmem_cache_destroy(batadv_tt_req_cache);
kmem_cache_destroy(batadv_tt_roam_cache);
}
...@@ -59,4 +59,7 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, ...@@ -59,4 +59,7 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv, bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
const u8 *addr, unsigned short vid); const u8 *addr, unsigned short vid);
int batadv_tt_cache_init(void);
void batadv_tt_cache_destroy(void);
#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
...@@ -132,7 +132,6 @@ struct batadv_hard_iface_bat_v { ...@@ -132,7 +132,6 @@ struct batadv_hard_iface_bat_v {
* @rcu: struct used for freeing in an RCU-safe manner * @rcu: struct used for freeing in an RCU-safe manner
* @bat_iv: per hard-interface B.A.T.M.A.N. IV data * @bat_iv: per hard-interface B.A.T.M.A.N. IV data
* @bat_v: per hard-interface B.A.T.M.A.N. V data * @bat_v: per hard-interface B.A.T.M.A.N. V data
* @cleanup_work: work queue callback item for hard-interface deinit
* @debug_dir: dentry for nc subdir in batman-adv directory in debugfs * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
* @neigh_list: list of unique single hop neighbors via this interface * @neigh_list: list of unique single hop neighbors via this interface
* @neigh_list_lock: lock protecting neigh_list * @neigh_list_lock: lock protecting neigh_list
...@@ -152,7 +151,6 @@ struct batadv_hard_iface { ...@@ -152,7 +151,6 @@ struct batadv_hard_iface {
#ifdef CONFIG_BATMAN_ADV_BATMAN_V #ifdef CONFIG_BATMAN_ADV_BATMAN_V
struct batadv_hard_iface_bat_v bat_v; struct batadv_hard_iface_bat_v bat_v;
#endif #endif
struct work_struct cleanup_work;
struct dentry *debug_dir; struct dentry *debug_dir;
struct hlist_head neigh_list; struct hlist_head neigh_list;
/* neigh_list_lock protects: neigh_list */ /* neigh_list_lock protects: neigh_list */
...@@ -1015,7 +1013,6 @@ struct batadv_priv_bat_v { ...@@ -1015,7 +1013,6 @@ struct batadv_priv_bat_v {
* @forw_bcast_list_lock: lock protecting forw_bcast_list * @forw_bcast_list_lock: lock protecting forw_bcast_list
* @tp_list_lock: spinlock protecting @tp_list * @tp_list_lock: spinlock protecting @tp_list
* @orig_work: work queue callback item for orig node purging * @orig_work: work queue callback item for orig node purging
* @cleanup_work: work queue callback item for soft-interface deinit
* @primary_if: one of the hard-interfaces assigned to this mesh interface * @primary_if: one of the hard-interfaces assigned to this mesh interface
* becomes the primary interface * becomes the primary interface
* @algo_ops: routing algorithm used by this mesh interface * @algo_ops: routing algorithm used by this mesh interface
...@@ -1074,7 +1071,6 @@ struct batadv_priv { ...@@ -1074,7 +1071,6 @@ struct batadv_priv {
spinlock_t tp_list_lock; /* protects tp_list */ spinlock_t tp_list_lock; /* protects tp_list */
atomic_t tp_num; atomic_t tp_num;
struct delayed_work orig_work; struct delayed_work orig_work;
struct work_struct cleanup_work;
struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */ struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */
struct batadv_algo_ops *algo_ops; struct batadv_algo_ops *algo_ops;
struct hlist_head softif_vlan_list; struct hlist_head softif_vlan_list;
...@@ -1379,6 +1375,7 @@ struct batadv_skb_cb { ...@@ -1379,6 +1375,7 @@ struct batadv_skb_cb {
* locally generated packet * locally generated packet
* @if_outgoing: packet where the packet should be sent to, or NULL if * @if_outgoing: packet where the packet should be sent to, or NULL if
* unspecified * unspecified
* @queue_left: The queue (counter) this packet was applied to
*/ */
struct batadv_forw_packet { struct batadv_forw_packet {
struct hlist_node list; struct hlist_node list;
...@@ -1391,11 +1388,13 @@ struct batadv_forw_packet { ...@@ -1391,11 +1388,13 @@ struct batadv_forw_packet {
struct delayed_work delayed_work; struct delayed_work delayed_work;
struct batadv_hard_iface *if_incoming; struct batadv_hard_iface *if_incoming;
struct batadv_hard_iface *if_outgoing; struct batadv_hard_iface *if_outgoing;
atomic_t *queue_left;
}; };
/** /**
* struct batadv_algo_iface_ops - mesh algorithm callbacks (interface specific) * struct batadv_algo_iface_ops - mesh algorithm callbacks (interface specific)
* @activate: start routing mechanisms when hard-interface is brought up * @activate: start routing mechanisms when hard-interface is brought up
* (optional)
* @enable: init routing info when hard-interface is enabled * @enable: init routing info when hard-interface is enabled
* @disable: de-init routing info when hard-interface is disabled * @disable: de-init routing info when hard-interface is disabled
* @update_mac: (re-)init mac addresses of the protocol information * @update_mac: (re-)init mac addresses of the protocol information
...@@ -1413,6 +1412,7 @@ struct batadv_algo_iface_ops { ...@@ -1413,6 +1412,7 @@ struct batadv_algo_iface_ops {
/** /**
* struct batadv_algo_neigh_ops - mesh algorithm callbacks (neighbour specific) * struct batadv_algo_neigh_ops - mesh algorithm callbacks (neighbour specific)
* @hardif_init: called on creation of single hop entry * @hardif_init: called on creation of single hop entry
* (optional)
* @cmp: compare the metrics of two neighbors for their respective outgoing * @cmp: compare the metrics of two neighbors for their respective outgoing
* interfaces * interfaces
* @is_similar_or_better: check if neigh1 is equally similar or better than * @is_similar_or_better: check if neigh1 is equally similar or better than
...@@ -1435,11 +1435,11 @@ struct batadv_algo_neigh_ops { ...@@ -1435,11 +1435,11 @@ struct batadv_algo_neigh_ops {
/** /**
* struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific) * struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific)
* @free: free the resources allocated by the routing algorithm for an orig_node * @free: free the resources allocated by the routing algorithm for an orig_node
* object * object (optional)
* @add_if: ask the routing algorithm to apply the needed changes to the * @add_if: ask the routing algorithm to apply the needed changes to the
* orig_node due to a new hard-interface being added into the mesh * orig_node due to a new hard-interface being added into the mesh (optional)
* @del_if: ask the routing algorithm to apply the needed changes to the * @del_if: ask the routing algorithm to apply the needed changes to the
* orig_node due to an hard-interface being removed from the mesh * orig_node due to an hard-interface being removed from the mesh (optional)
* @print: print the originator table (optional) * @print: print the originator table (optional)
*/ */
struct batadv_algo_orig_ops { struct batadv_algo_orig_ops {
...@@ -1451,6 +1451,28 @@ struct batadv_algo_orig_ops { ...@@ -1451,6 +1451,28 @@ struct batadv_algo_orig_ops {
struct batadv_hard_iface *hard_iface); struct batadv_hard_iface *hard_iface);
}; };
/**
* struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
* @store_sel_class: parse and stores a new GW selection class (optional)
* @show_sel_class: prints the current GW selection class (optional)
* @get_best_gw_node: select the best GW from the list of available nodes
* (optional)
* @is_eligible: check if a newly discovered GW is a potential candidate for
* the election as best GW (optional)
* @print: print the gateway table (optional)
*/
struct batadv_algo_gw_ops {
ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
size_t count);
ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
struct batadv_gw_node *(*get_best_gw_node)
(struct batadv_priv *bat_priv);
bool (*is_eligible)(struct batadv_priv *bat_priv,
struct batadv_orig_node *curr_gw_orig,
struct batadv_orig_node *orig_node);
void (*print)(struct batadv_priv *bat_priv, struct seq_file *seq);
};
/** /**
* struct batadv_algo_ops - mesh algorithm callbacks * struct batadv_algo_ops - mesh algorithm callbacks
* @list: list node for the batadv_algo_list * @list: list node for the batadv_algo_list
...@@ -1458,6 +1480,7 @@ struct batadv_algo_orig_ops { ...@@ -1458,6 +1480,7 @@ struct batadv_algo_orig_ops {
* @iface: callbacks related to interface handling * @iface: callbacks related to interface handling
* @neigh: callbacks related to neighbors handling * @neigh: callbacks related to neighbors handling
* @orig: callbacks related to originators handling * @orig: callbacks related to originators handling
* @gw: callbacks related to GW mode
*/ */
struct batadv_algo_ops { struct batadv_algo_ops {
struct hlist_node list; struct hlist_node list;
...@@ -1465,6 +1488,7 @@ struct batadv_algo_ops { ...@@ -1465,6 +1488,7 @@ struct batadv_algo_ops {
struct batadv_algo_iface_ops iface; struct batadv_algo_iface_ops iface;
struct batadv_algo_neigh_ops neigh; struct batadv_algo_neigh_ops neigh;
struct batadv_algo_orig_ops orig; struct batadv_algo_orig_ops orig;
struct batadv_algo_gw_ops gw;
}; };
/** /**
...@@ -1564,4 +1588,17 @@ enum batadv_tvlv_handler_flags { ...@@ -1564,4 +1588,17 @@ enum batadv_tvlv_handler_flags {
BATADV_TVLV_HANDLER_OGM_CALLED = BIT(2), BATADV_TVLV_HANDLER_OGM_CALLED = BIT(2),
}; };
/**
* struct batadv_store_mesh_work - Work queue item to detach add/del interface
* from sysfs locks
* @net_dev: netdevice to add/remove to/from batman-adv soft-interface
* @soft_iface_name: name of soft-interface to modify
* @work: work queue item
*/
struct batadv_store_mesh_work {
struct net_device *net_dev;
char soft_iface_name[IFNAMSIZ];
struct work_struct work;
};
#endif /* _NET_BATMAN_ADV_TYPES_H_ */ #endif /* _NET_BATMAN_ADV_TYPES_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment