Commit c63e09ec authored by Al Viro's avatar Al Viro

Make allocation of anon devices cheaper

Standard trick - add a new variable (start) such that
for each n < start n is known to be busy.  Allocation can
skip checking everything in [0..start) and if it returns
n, we can set start to n + 1.  Freeing below start sets
start to what we'd just freed.

Of course, it still sucks if we do something like
	free 0
	allocate
	allocate
in a loop - still O(n^2) time.  However, on saner loads it
improves the things a lot and the entire thing is not worth
the trouble of switching to something with better worst-case
behaviour.
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 7e325d3a
...@@ -608,6 +608,7 @@ void emergency_remount(void) ...@@ -608,6 +608,7 @@ void emergency_remount(void)
static DEFINE_IDA(unnamed_dev_ida); static DEFINE_IDA(unnamed_dev_ida);
static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */ static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
static int unnamed_dev_start = 0; /* don't bother trying below it */
int set_anon_super(struct super_block *s, void *data) int set_anon_super(struct super_block *s, void *data)
{ {
...@@ -618,7 +619,8 @@ int set_anon_super(struct super_block *s, void *data) ...@@ -618,7 +619,8 @@ int set_anon_super(struct super_block *s, void *data)
if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0) if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
return -ENOMEM; return -ENOMEM;
spin_lock(&unnamed_dev_lock); spin_lock(&unnamed_dev_lock);
error = ida_get_new(&unnamed_dev_ida, &dev); error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
unnamed_dev_start = dev + 1;
spin_unlock(&unnamed_dev_lock); spin_unlock(&unnamed_dev_lock);
if (error == -EAGAIN) if (error == -EAGAIN)
/* We raced and lost with another CPU. */ /* We raced and lost with another CPU. */
...@@ -629,6 +631,7 @@ int set_anon_super(struct super_block *s, void *data) ...@@ -629,6 +631,7 @@ int set_anon_super(struct super_block *s, void *data)
if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) { if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) {
spin_lock(&unnamed_dev_lock); spin_lock(&unnamed_dev_lock);
ida_remove(&unnamed_dev_ida, dev); ida_remove(&unnamed_dev_ida, dev);
unnamed_dev_start = dev;
spin_unlock(&unnamed_dev_lock); spin_unlock(&unnamed_dev_lock);
return -EMFILE; return -EMFILE;
} }
...@@ -645,6 +648,8 @@ void kill_anon_super(struct super_block *sb) ...@@ -645,6 +648,8 @@ void kill_anon_super(struct super_block *sb)
generic_shutdown_super(sb); generic_shutdown_super(sb);
spin_lock(&unnamed_dev_lock); spin_lock(&unnamed_dev_lock);
ida_remove(&unnamed_dev_ida, slot); ida_remove(&unnamed_dev_ida, slot);
if (slot < unnamed_dev_start)
unnamed_dev_start = slot;
spin_unlock(&unnamed_dev_lock); spin_unlock(&unnamed_dev_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment