Commit 005842ef authored by Alexandre Bounine's avatar Alexandre Bounine Committed by Linus Torvalds

rapidio: run discovery as an asynchronous process

Modify mport initialization routine to run the RapidIO discovery process
asynchronously.  This allows to have an arbitrary order of enumerating and
discovering ports in systems with multiple RapidIO controllers without
creating a deadlock situation if enumerator port is registered after a
discovering one.

Making netID matching to mportID ensures consistent net ID assignment in
multiport RapidIO systems with asynchronous discovery process (global
counter implementation is affected by race between threads).

[akpm@linux-foundation.org: tweak code layput]
Signed-off-by: default avatarAlexandre Bounine <alexandre.bounine@idt.com>
Cc: Matt Porter <mporter@kernel.crashing.org>
Cc: Li Yang <leoli@freescale.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a7071efc
...@@ -44,7 +44,6 @@ static void rio_init_em(struct rio_dev *rdev); ...@@ -44,7 +44,6 @@ static void rio_init_em(struct rio_dev *rdev);
DEFINE_SPINLOCK(rio_global_list_lock); DEFINE_SPINLOCK(rio_global_list_lock);
static int next_destid = 0; static int next_destid = 0;
static int next_net = 0;
static int next_comptag = 1; static int next_comptag = 1;
static int rio_mport_phys_table[] = { static int rio_mport_phys_table[] = {
...@@ -1062,7 +1061,7 @@ static struct rio_net __devinit *rio_alloc_net(struct rio_mport *port) ...@@ -1062,7 +1061,7 @@ static struct rio_net __devinit *rio_alloc_net(struct rio_mport *port)
INIT_LIST_HEAD(&net->mports); INIT_LIST_HEAD(&net->mports);
list_add_tail(&port->nnode, &net->mports); list_add_tail(&port->nnode, &net->mports);
net->hport = port; net->hport = port;
net->id = next_net++; net->id = port->id;
} }
return net; return net;
} }
......
...@@ -1260,15 +1260,62 @@ static int __devinit rio_init(void) ...@@ -1260,15 +1260,62 @@ static int __devinit rio_init(void)
return 0; return 0;
} }
static struct workqueue_struct *rio_wq;
struct rio_disc_work {
struct work_struct work;
struct rio_mport *mport;
};
static void __devinit disc_work_handler(struct work_struct *_work)
{
struct rio_disc_work *work;
work = container_of(_work, struct rio_disc_work, work);
pr_debug("RIO: discovery work for mport %d %s\n",
work->mport->id, work->mport->name);
rio_disc_mport(work->mport);
kfree(work);
}
int __devinit rio_init_mports(void) int __devinit rio_init_mports(void)
{ {
struct rio_mport *port; struct rio_mport *port;
struct rio_disc_work *work;
int no_disc = 0;
list_for_each_entry(port, &rio_mports, node) { list_for_each_entry(port, &rio_mports, node) {
if (port->host_deviceid >= 0) if (port->host_deviceid >= 0)
rio_enum_mport(port); rio_enum_mport(port);
else else if (!no_disc) {
rio_disc_mport(port); if (!rio_wq) {
rio_wq = alloc_workqueue("riodisc", 0, 0);
if (!rio_wq) {
pr_err("RIO: unable allocate rio_wq\n");
no_disc = 1;
continue;
}
}
work = kzalloc(sizeof *work, GFP_KERNEL);
if (!work) {
pr_err("RIO: no memory for work struct\n");
no_disc = 1;
continue;
}
work->mport = port;
INIT_WORK(&work->work, disc_work_handler);
queue_work(rio_wq, &work->work);
}
}
if (rio_wq) {
pr_debug("RIO: flush discovery workqueue\n");
flush_workqueue(rio_wq);
pr_debug("RIO: flush discovery workqueue finished\n");
destroy_workqueue(rio_wq);
} }
rio_init(); rio_init();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment