Commit 18889231 authored by Javier Cardona's avatar Javier Cardona Committed by John W. Linville

mac80211: Move mpath and mpp growth to mesh workqueue.

This prevents calling rcu_synchronize from within the tx path by moving the
table growth code to the mesh workqueue.

Move mesh_table_free and mesh_table_grow from mesh.c to mesh_pathtbl.c and
declare them static.

Also, re-enable mesh in Kconfig and update the configuration description.
Signed-off-by: default avatarJavier Cardona <javier@cozybit.com>
Tested-by: default avatarAndrey Yurovsky <andrey@cozybit.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 5b365834
...@@ -66,12 +66,12 @@ endmenu ...@@ -66,12 +66,12 @@ endmenu
config MAC80211_MESH config MAC80211_MESH
bool "Enable mac80211 mesh networking (pre-802.11s) support" bool "Enable mac80211 mesh networking (pre-802.11s) support"
depends on MAC80211 && EXPERIMENTAL depends on MAC80211 && EXPERIMENTAL
depends on BROKEN
---help--- ---help---
This options enables support of Draft 802.11s mesh networking. This options enables support of Draft 802.11s mesh networking.
The implementation is based on Draft 1.08 of the Mesh Networking The implementation is based on Draft 2.08 of the Mesh Networking
amendment. For more information visit http://o11s.org/. amendment. However, no compliance with that draft is claimed or even
possible, as drafts leave a number of identifiers to be defined after
ratification. For more information visit http://o11s.org/.
config MAC80211_LEDS config MAC80211_LEDS
bool "Enable LED triggers" bool "Enable LED triggers"
......
...@@ -355,7 +355,7 @@ struct ieee80211_if_mesh { ...@@ -355,7 +355,7 @@ struct ieee80211_if_mesh {
unsigned long timers_running; unsigned long timers_running;
bool housekeeping; unsigned long wrkq_flags;
u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
size_t mesh_id_len; size_t mesh_id_len;
......
...@@ -47,7 +47,7 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data) ...@@ -47,7 +47,7 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
struct ieee80211_local *local = sdata->local; struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
ifmsh->housekeeping = true; ifmsh->wrkq_flags |= MESH_WORK_HOUSEKEEPING;
if (local->quiescing) { if (local->quiescing) {
set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
...@@ -320,30 +320,6 @@ struct mesh_table *mesh_table_alloc(int size_order) ...@@ -320,30 +320,6 @@ struct mesh_table *mesh_table_alloc(int size_order)
return newtbl; return newtbl;
} }
static void __mesh_table_free(struct mesh_table *tbl)
{
kfree(tbl->hash_buckets);
kfree(tbl->hashwlock);
kfree(tbl);
}
void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
{
struct hlist_head *mesh_hash;
struct hlist_node *p, *q;
int i;
mesh_hash = tbl->hash_buckets;
for (i = 0; i <= tbl->hash_mask; i++) {
spin_lock(&tbl->hashwlock[i]);
hlist_for_each_safe(p, q, &mesh_hash[i]) {
tbl->free_node(p, free_leafs);
atomic_dec(&tbl->entries);
}
spin_unlock(&tbl->hashwlock[i]);
}
__mesh_table_free(tbl);
}
static void ieee80211_mesh_path_timer(unsigned long data) static void ieee80211_mesh_path_timer(unsigned long data)
{ {
...@@ -360,44 +336,6 @@ static void ieee80211_mesh_path_timer(unsigned long data) ...@@ -360,44 +336,6 @@ static void ieee80211_mesh_path_timer(unsigned long data)
ieee80211_queue_work(&local->hw, &ifmsh->work); ieee80211_queue_work(&local->hw, &ifmsh->work);
} }
struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
{
struct mesh_table *newtbl;
struct hlist_head *oldhash;
struct hlist_node *p, *q;
int i;
if (atomic_read(&tbl->entries)
< tbl->mean_chain_len * (tbl->hash_mask + 1))
goto endgrow;
newtbl = mesh_table_alloc(tbl->size_order + 1);
if (!newtbl)
goto endgrow;
newtbl->free_node = tbl->free_node;
newtbl->mean_chain_len = tbl->mean_chain_len;
newtbl->copy_node = tbl->copy_node;
atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
oldhash = tbl->hash_buckets;
for (i = 0; i <= tbl->hash_mask; i++)
hlist_for_each(p, &oldhash[i])
if (tbl->copy_node(p, newtbl) < 0)
goto errcopy;
return newtbl;
errcopy:
for (i = 0; i <= newtbl->hash_mask; i++) {
hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
tbl->free_node(p, 0);
}
__mesh_table_free(newtbl);
endgrow:
return NULL;
}
/** /**
* ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame * ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame
* @hdr: 802.11 frame header * @hdr: 802.11 frame header
...@@ -487,7 +425,6 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata, ...@@ -487,7 +425,6 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
if (free_plinks != sdata->u.mesh.accepting_plinks) if (free_plinks != sdata->u.mesh.accepting_plinks)
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
ifmsh->housekeeping = false;
mod_timer(&ifmsh->housekeeping_timer, mod_timer(&ifmsh->housekeeping_timer,
round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
} }
...@@ -524,8 +461,8 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) ...@@ -524,8 +461,8 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local; struct ieee80211_local *local = sdata->local;
ifmsh->housekeeping = true; ifmsh->wrkq_flags |= MESH_WORK_HOUSEKEEPING;
queue_work(local->hw, &ifmsh->work); ieee80211_queue_work(&local->hw, &ifmsh->work);
sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON_ENABLED |
...@@ -664,7 +601,13 @@ static void ieee80211_mesh_work(struct work_struct *work) ...@@ -664,7 +601,13 @@ static void ieee80211_mesh_work(struct work_struct *work)
ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval)))
mesh_path_start_discovery(sdata); mesh_path_start_discovery(sdata);
if (ifmsh->housekeeping) if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
mesh_mpath_table_grow();
if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
mesh_mpp_table_grow();
if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags))
ieee80211_mesh_housekeeping(sdata, ifmsh); ieee80211_mesh_housekeeping(sdata, ifmsh);
} }
......
...@@ -43,6 +43,23 @@ enum mesh_path_flags { ...@@ -43,6 +43,23 @@ enum mesh_path_flags {
MESH_PATH_RESOLVED = BIT(4), MESH_PATH_RESOLVED = BIT(4),
}; };
/**
* enum mesh_deferred_task_flags - mac80211 mesh deferred tasks
*
*
*
* @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks
* @MESH_WORK_GROW_MPATH_TABLE: the mesh path table is full and needs
* to grow.
* @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to
* grow
*/
enum mesh_deferred_task_flags {
MESH_WORK_HOUSEKEEPING,
MESH_WORK_GROW_MPATH_TABLE,
MESH_WORK_GROW_MPP_TABLE,
};
/** /**
* struct mesh_path - mac80211 mesh path structure * struct mesh_path - mac80211 mesh path structure
* *
...@@ -250,7 +267,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, ...@@ -250,7 +267,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
/* Mesh tables */ /* Mesh tables */
struct mesh_table *mesh_table_alloc(int size_order); struct mesh_table *mesh_table_alloc(int size_order);
void mesh_table_free(struct mesh_table *tbl, bool free_leafs); void mesh_table_free(struct mesh_table *tbl, bool free_leafs);
struct mesh_table *mesh_table_grow(struct mesh_table *tbl); void mesh_mpath_table_grow(void);
void mesh_mpp_table_grow(void);
u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
struct mesh_table *tbl); struct mesh_table *tbl);
/* Mesh paths */ /* Mesh paths */
......
...@@ -39,6 +39,69 @@ static struct mesh_table *mesh_paths; ...@@ -39,6 +39,69 @@ static struct mesh_table *mesh_paths;
static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation; int mesh_paths_generation;
static void __mesh_table_free(struct mesh_table *tbl)
{
kfree(tbl->hash_buckets);
kfree(tbl->hashwlock);
kfree(tbl);
}
void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
{
struct hlist_head *mesh_hash;
struct hlist_node *p, *q;
int i;
mesh_hash = tbl->hash_buckets;
for (i = 0; i <= tbl->hash_mask; i++) {
spin_lock(&tbl->hashwlock[i]);
hlist_for_each_safe(p, q, &mesh_hash[i]) {
tbl->free_node(p, free_leafs);
atomic_dec(&tbl->entries);
}
spin_unlock(&tbl->hashwlock[i]);
}
__mesh_table_free(tbl);
}
static struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
{
struct mesh_table *newtbl;
struct hlist_head *oldhash;
struct hlist_node *p, *q;
int i;
if (atomic_read(&tbl->entries)
< tbl->mean_chain_len * (tbl->hash_mask + 1))
goto endgrow;
newtbl = mesh_table_alloc(tbl->size_order + 1);
if (!newtbl)
goto endgrow;
newtbl->free_node = tbl->free_node;
newtbl->mean_chain_len = tbl->mean_chain_len;
newtbl->copy_node = tbl->copy_node;
atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
oldhash = tbl->hash_buckets;
for (i = 0; i <= tbl->hash_mask; i++)
hlist_for_each(p, &oldhash[i])
if (tbl->copy_node(p, newtbl) < 0)
goto errcopy;
return newtbl;
errcopy:
for (i = 0; i <= newtbl->hash_mask; i++) {
hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
tbl->free_node(p, 0);
}
__mesh_table_free(newtbl);
endgrow:
return NULL;
}
/* This lock will have the grow table function as writer and add / delete nodes /* This lock will have the grow table function as writer and add / delete nodes
* as readers. When reading the table (i.e. doing lookups) we are well protected * as readers. When reading the table (i.e. doing lookups) we are well protected
...@@ -187,6 +250,8 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data ...@@ -187,6 +250,8 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
*/ */
int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
{ {
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
struct mesh_path *mpath, *new_mpath; struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node; struct mpath_node *node, *new_node;
struct hlist_head *bucket; struct hlist_head *bucket;
...@@ -195,8 +260,6 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) ...@@ -195,8 +260,6 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
int err = 0; int err = 0;
u32 hash_idx; u32 hash_idx;
might_sleep();
if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
/* never add ourselves as neighbours */ /* never add ourselves as neighbours */
return -ENOTSUPP; return -ENOTSUPP;
...@@ -208,11 +271,11 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) ...@@ -208,11 +271,11 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
return -ENOSPC; return -ENOSPC;
err = -ENOMEM; err = -ENOMEM;
new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
if (!new_mpath) if (!new_mpath)
goto err_path_alloc; goto err_path_alloc;
new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (!new_node) if (!new_node)
goto err_node_alloc; goto err_node_alloc;
...@@ -250,20 +313,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) ...@@ -250,20 +313,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
spin_unlock(&mesh_paths->hashwlock[hash_idx]); spin_unlock(&mesh_paths->hashwlock[hash_idx]);
read_unlock(&pathtbl_resize_lock); read_unlock(&pathtbl_resize_lock);
if (grow) { if (grow) {
struct mesh_table *oldtbl, *newtbl; set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
ieee80211_queue_work(&local->hw, &ifmsh->work);
write_lock(&pathtbl_resize_lock);
oldtbl = mesh_paths;
newtbl = mesh_table_grow(mesh_paths);
if (!newtbl) {
write_unlock(&pathtbl_resize_lock);
return 0;
}
rcu_assign_pointer(mesh_paths, newtbl);
write_unlock(&pathtbl_resize_lock);
synchronize_rcu();
mesh_table_free(oldtbl, false);
} }
return 0; return 0;
...@@ -278,9 +329,46 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) ...@@ -278,9 +329,46 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
return err; return err;
} }
void mesh_mpath_table_grow(void)
{
struct mesh_table *oldtbl, *newtbl;
write_lock(&pathtbl_resize_lock);
oldtbl = mesh_paths;
newtbl = mesh_table_grow(mesh_paths);
if (!newtbl) {
write_unlock(&pathtbl_resize_lock);
return;
}
rcu_assign_pointer(mesh_paths, newtbl);
write_unlock(&pathtbl_resize_lock);
synchronize_rcu();
mesh_table_free(oldtbl, false);
}
void mesh_mpp_table_grow(void)
{
struct mesh_table *oldtbl, *newtbl;
write_lock(&pathtbl_resize_lock);
oldtbl = mpp_paths;
newtbl = mesh_table_grow(mpp_paths);
if (!newtbl) {
write_unlock(&pathtbl_resize_lock);
return;
}
rcu_assign_pointer(mpp_paths, newtbl);
write_unlock(&pathtbl_resize_lock);
synchronize_rcu();
mesh_table_free(oldtbl, false);
}
int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
{ {
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
struct mesh_path *mpath, *new_mpath; struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node; struct mpath_node *node, *new_node;
struct hlist_head *bucket; struct hlist_head *bucket;
...@@ -289,8 +377,6 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) ...@@ -289,8 +377,6 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
int err = 0; int err = 0;
u32 hash_idx; u32 hash_idx;
might_sleep();
if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
/* never add ourselves as neighbours */ /* never add ourselves as neighbours */
return -ENOTSUPP; return -ENOTSUPP;
...@@ -299,11 +385,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) ...@@ -299,11 +385,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
return -ENOTSUPP; return -ENOTSUPP;
err = -ENOMEM; err = -ENOMEM;
new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
if (!new_mpath) if (!new_mpath)
goto err_path_alloc; goto err_path_alloc;
new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (!new_node) if (!new_node)
goto err_node_alloc; goto err_node_alloc;
...@@ -337,20 +423,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) ...@@ -337,20 +423,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
spin_unlock(&mpp_paths->hashwlock[hash_idx]); spin_unlock(&mpp_paths->hashwlock[hash_idx]);
read_unlock(&pathtbl_resize_lock); read_unlock(&pathtbl_resize_lock);
if (grow) { if (grow) {
struct mesh_table *oldtbl, *newtbl; set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
ieee80211_queue_work(&local->hw, &ifmsh->work);
write_lock(&pathtbl_resize_lock);
oldtbl = mpp_paths;
newtbl = mesh_table_grow(mpp_paths);
if (!newtbl) {
write_unlock(&pathtbl_resize_lock);
return 0;
}
rcu_assign_pointer(mpp_paths, newtbl);
write_unlock(&pathtbl_resize_lock);
synchronize_rcu();
mesh_table_free(oldtbl, false);
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment