Commit e81ce85f authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

mm, vmalloc: iterate vmap_area_list, instead of vmlist in vread/vwrite()

Now, when we hold a vmap_area_lock, va->vm can't be discarded.  So we can
safely access to va->vm when iterating a vmap_area_list with holding a
vmap_area_lock.  With this property, change iterating vmlist codes in
vread/vwrite() to iterating vmap_area_list.

There is a little difference relate to lock, because vmlist_lock is mutex,
but, vmap_area_lock is spin_lock.  It may introduce a spinning overhead
during vread/vwrite() is executing.  But, these are debug-oriented
functions, so this overhead is not real problem for common case.
Signed-off-by: default avatarJoonsoo Kim <js1304@gmail.com>
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Atsushi Kumagai <kumagai-atsushi@mxc.nes.nec.co.jp>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Dave Anderson <anderson@redhat.com>
Cc: Eric Biederman <ebiederm@xmission.com>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c69480ad
...@@ -2012,7 +2012,8 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count) ...@@ -2012,7 +2012,8 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
long vread(char *buf, char *addr, unsigned long count) long vread(char *buf, char *addr, unsigned long count)
{ {
struct vm_struct *tmp; struct vmap_area *va;
struct vm_struct *vm;
char *vaddr, *buf_start = buf; char *vaddr, *buf_start = buf;
unsigned long buflen = count; unsigned long buflen = count;
unsigned long n; unsigned long n;
...@@ -2021,10 +2022,17 @@ long vread(char *buf, char *addr, unsigned long count) ...@@ -2021,10 +2022,17 @@ long vread(char *buf, char *addr, unsigned long count)
if ((unsigned long) addr + count < count) if ((unsigned long) addr + count < count)
count = -(unsigned long) addr; count = -(unsigned long) addr;
read_lock(&vmlist_lock); spin_lock(&vmap_area_lock);
for (tmp = vmlist; count && tmp; tmp = tmp->next) { list_for_each_entry(va, &vmap_area_list, list) {
vaddr = (char *) tmp->addr; if (!count)
if (addr >= vaddr + tmp->size - PAGE_SIZE) break;
if (!(va->flags & VM_VM_AREA))
continue;
vm = va->vm;
vaddr = (char *) vm->addr;
if (addr >= vaddr + vm->size - PAGE_SIZE)
continue; continue;
while (addr < vaddr) { while (addr < vaddr) {
if (count == 0) if (count == 0)
...@@ -2034,10 +2042,10 @@ long vread(char *buf, char *addr, unsigned long count) ...@@ -2034,10 +2042,10 @@ long vread(char *buf, char *addr, unsigned long count)
addr++; addr++;
count--; count--;
} }
n = vaddr + tmp->size - PAGE_SIZE - addr; n = vaddr + vm->size - PAGE_SIZE - addr;
if (n > count) if (n > count)
n = count; n = count;
if (!(tmp->flags & VM_IOREMAP)) if (!(vm->flags & VM_IOREMAP))
aligned_vread(buf, addr, n); aligned_vread(buf, addr, n);
else /* IOREMAP area is treated as memory hole */ else /* IOREMAP area is treated as memory hole */
memset(buf, 0, n); memset(buf, 0, n);
...@@ -2046,7 +2054,7 @@ long vread(char *buf, char *addr, unsigned long count) ...@@ -2046,7 +2054,7 @@ long vread(char *buf, char *addr, unsigned long count)
count -= n; count -= n;
} }
finished: finished:
read_unlock(&vmlist_lock); spin_unlock(&vmap_area_lock);
if (buf == buf_start) if (buf == buf_start)
return 0; return 0;
...@@ -2085,7 +2093,8 @@ long vread(char *buf, char *addr, unsigned long count) ...@@ -2085,7 +2093,8 @@ long vread(char *buf, char *addr, unsigned long count)
long vwrite(char *buf, char *addr, unsigned long count) long vwrite(char *buf, char *addr, unsigned long count)
{ {
struct vm_struct *tmp; struct vmap_area *va;
struct vm_struct *vm;
char *vaddr; char *vaddr;
unsigned long n, buflen; unsigned long n, buflen;
int copied = 0; int copied = 0;
...@@ -2095,10 +2104,17 @@ long vwrite(char *buf, char *addr, unsigned long count) ...@@ -2095,10 +2104,17 @@ long vwrite(char *buf, char *addr, unsigned long count)
count = -(unsigned long) addr; count = -(unsigned long) addr;
buflen = count; buflen = count;
read_lock(&vmlist_lock); spin_lock(&vmap_area_lock);
for (tmp = vmlist; count && tmp; tmp = tmp->next) { list_for_each_entry(va, &vmap_area_list, list) {
vaddr = (char *) tmp->addr; if (!count)
if (addr >= vaddr + tmp->size - PAGE_SIZE) break;
if (!(va->flags & VM_VM_AREA))
continue;
vm = va->vm;
vaddr = (char *) vm->addr;
if (addr >= vaddr + vm->size - PAGE_SIZE)
continue; continue;
while (addr < vaddr) { while (addr < vaddr) {
if (count == 0) if (count == 0)
...@@ -2107,10 +2123,10 @@ long vwrite(char *buf, char *addr, unsigned long count) ...@@ -2107,10 +2123,10 @@ long vwrite(char *buf, char *addr, unsigned long count)
addr++; addr++;
count--; count--;
} }
n = vaddr + tmp->size - PAGE_SIZE - addr; n = vaddr + vm->size - PAGE_SIZE - addr;
if (n > count) if (n > count)
n = count; n = count;
if (!(tmp->flags & VM_IOREMAP)) { if (!(vm->flags & VM_IOREMAP)) {
aligned_vwrite(buf, addr, n); aligned_vwrite(buf, addr, n);
copied++; copied++;
} }
...@@ -2119,7 +2135,7 @@ long vwrite(char *buf, char *addr, unsigned long count) ...@@ -2119,7 +2135,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
count -= n; count -= n;
} }
finished: finished:
read_unlock(&vmlist_lock); spin_unlock(&vmap_area_lock);
if (!copied) if (!copied)
return 0; return 0;
return buflen; return buflen;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment