Commit 61ecdb80 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

mm: strictly nested kmap_atomic()

Ensure kmap_atomic() usage is strictly nested
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Acked-by: default avatarChris Metcalf <cmetcalf@tilera.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2e30244a
...@@ -83,8 +83,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, ...@@ -83,8 +83,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
memcpy(dest_buf, src_buf, len); memcpy(dest_buf, src_buf, len);
kunmap_atomic(dest_buf, KM_USER0);
kunmap_atomic(src_buf, KM_USER1); kunmap_atomic(src_buf, KM_USER1);
kunmap_atomic(dest_buf, KM_USER0);
async_tx_sync_epilog(submit); async_tx_sync_epilog(submit);
} }
......
...@@ -89,9 +89,9 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, ...@@ -89,9 +89,9 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
memcpy(walk->dst.virt.addr, walk->page, n); memcpy(walk->dst.virt.addr, walk->page, n);
blkcipher_unmap_dst(walk); blkcipher_unmap_dst(walk);
} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) { } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
blkcipher_unmap_src(walk);
if (walk->flags & BLKCIPHER_WALK_DIFF) if (walk->flags & BLKCIPHER_WALK_DIFF)
blkcipher_unmap_dst(walk); blkcipher_unmap_dst(walk);
blkcipher_unmap_src(walk);
} }
scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->in, n);
......
...@@ -101,8 +101,8 @@ static int transfer_none(struct loop_device *lo, int cmd, ...@@ -101,8 +101,8 @@ static int transfer_none(struct loop_device *lo, int cmd,
else else
memcpy(raw_buf, loop_buf, size); memcpy(raw_buf, loop_buf, size);
kunmap_atomic(raw_buf, KM_USER0);
kunmap_atomic(loop_buf, KM_USER1); kunmap_atomic(loop_buf, KM_USER1);
kunmap_atomic(raw_buf, KM_USER0);
cond_resched(); cond_resched();
return 0; return 0;
} }
...@@ -130,8 +130,8 @@ static int transfer_xor(struct loop_device *lo, int cmd, ...@@ -130,8 +130,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
for (i = 0; i < size; i++) for (i = 0; i < size; i++)
*out++ = *in++ ^ key[(i & 511) % keysize]; *out++ = *in++ ^ key[(i & 511) % keysize];
kunmap_atomic(raw_buf, KM_USER0);
kunmap_atomic(loop_buf, KM_USER1); kunmap_atomic(loop_buf, KM_USER1);
kunmap_atomic(raw_buf, KM_USER0);
cond_resched(); cond_resched();
return 0; return 0;
} }
......
...@@ -201,8 +201,8 @@ static inline void copy_user_highpage(struct page *to, struct page *from, ...@@ -201,8 +201,8 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
vfrom = kmap_atomic(from, KM_USER0); vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1); vto = kmap_atomic(to, KM_USER1);
copy_user_page(vto, vfrom, vaddr, to); copy_user_page(vto, vfrom, vaddr, to);
kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1); kunmap_atomic(vto, KM_USER1);
kunmap_atomic(vfrom, KM_USER0);
} }
#endif #endif
...@@ -214,8 +214,8 @@ static inline void copy_highpage(struct page *to, struct page *from) ...@@ -214,8 +214,8 @@ static inline void copy_highpage(struct page *to, struct page *from)
vfrom = kmap_atomic(from, KM_USER0); vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1); vto = kmap_atomic(to, KM_USER1);
copy_page(vto, vfrom); copy_page(vto, vfrom);
kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1); kunmap_atomic(vto, KM_USER1);
kunmap_atomic(vfrom, KM_USER0);
} }
#endif /* _LINUX_HIGHMEM_H */ #endif /* _LINUX_HIGHMEM_H */
...@@ -984,8 +984,8 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) ...@@ -984,8 +984,8 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
src = kmap_atomic(s_page, KM_USER0); src = kmap_atomic(s_page, KM_USER0);
dst = kmap_atomic(d_page, KM_USER1); dst = kmap_atomic(d_page, KM_USER1);
do_copy_page(dst, src); do_copy_page(dst, src);
kunmap_atomic(src, KM_USER0);
kunmap_atomic(dst, KM_USER1); kunmap_atomic(dst, KM_USER1);
kunmap_atomic(src, KM_USER0);
} else { } else {
if (PageHighMem(d_page)) { if (PageHighMem(d_page)) {
/* Page pointed to by src may contain some kernel /* Page pointed to by src may contain some kernel
...@@ -2273,8 +2273,8 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf) ...@@ -2273,8 +2273,8 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
memcpy(buf, kaddr1, PAGE_SIZE); memcpy(buf, kaddr1, PAGE_SIZE);
memcpy(kaddr1, kaddr2, PAGE_SIZE); memcpy(kaddr1, kaddr2, PAGE_SIZE);
memcpy(kaddr2, buf, PAGE_SIZE); memcpy(kaddr2, buf, PAGE_SIZE);
kunmap_atomic(kaddr1, KM_USER0);
kunmap_atomic(kaddr2, KM_USER1); kunmap_atomic(kaddr2, KM_USER1);
kunmap_atomic(kaddr1, KM_USER0);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment