Commit ca219452 authored by Laura Abbott's avatar Laura Abbott Committed by Will Deacon

arm64: Correctly bounds check virt_addr_valid

virt_addr_valid is supposed to return true if and only if virt_to_page
returns a valid page structure. The current macro does math on whatever
address is given and passes that to pfn_valid to verify. vmalloc and
module addresses can happen to generate a pfn that 'happens' to be
valid. Fix this by only performing the pfn_valid check on addresses that
have the potential to be valid.
Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarLaura Abbott <labbott@redhat.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 0edfa839
...@@ -214,7 +214,7 @@ static inline void *phys_to_virt(phys_addr_t x) ...@@ -214,7 +214,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#ifndef CONFIG_SPARSEMEM_VMEMMAP #ifndef CONFIG_SPARSEMEM_VMEMMAP
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#else #else
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
#define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) #define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
...@@ -222,11 +222,15 @@ static inline void *phys_to_virt(phys_addr_t x) ...@@ -222,11 +222,15 @@ static inline void *phys_to_virt(phys_addr_t x)
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) #define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
#define virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ #define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
+ PHYS_OFFSET) >> PAGE_SHIFT) + PHYS_OFFSET) >> PAGE_SHIFT)
#endif #endif
#endif #endif
#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET)
#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
_virt_addr_valid(kaddr))
#include <asm-generic/memory_model.h> #include <asm-generic/memory_model.h>
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment