Commit f58bea2f authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Ingo Molnar

uprobes: Fix the usage of install_special_mapping()

install_special_mapping(pages) expects that "pages" is the zero-
terminated array while xol_add_vma() passes &area->page, this
means that special_mapping_fault() can wrongly use the next
member in xol_area (vaddr) as "struct page *".

Fortunately, this area is not expandable so pgoff != 0 isn't
possible (modulo bugs in special_mapping_vmops), but still this
does not look good.
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Pratyush Anand <panand@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150721134031.GA4789@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent db087ef6
...@@ -99,7 +99,7 @@ struct xol_area { ...@@ -99,7 +99,7 @@ struct xol_area {
wait_queue_head_t wq; /* if all slots are busy */ wait_queue_head_t wq; /* if all slots are busy */
atomic_t slot_count; /* number of in-use slots */ atomic_t slot_count; /* number of in-use slots */
unsigned long *bitmap; /* 0 = free slot */ unsigned long *bitmap; /* 0 = free slot */
struct page *page; struct page *pages[2];
/* /*
* We keep the vma's vm_start rather than a pointer to the vma * We keep the vma's vm_start rather than a pointer to the vma
...@@ -1142,7 +1142,7 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) ...@@ -1142,7 +1142,7 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
} }
ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE, ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE,
VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page); VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, area->pages);
if (ret) if (ret)
goto fail; goto fail;
...@@ -1168,21 +1168,22 @@ static struct xol_area *__create_xol_area(unsigned long vaddr) ...@@ -1168,21 +1168,22 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
if (!area->bitmap) if (!area->bitmap)
goto free_area; goto free_area;
area->page = alloc_page(GFP_HIGHUSER); area->pages[0] = alloc_page(GFP_HIGHUSER);
if (!area->page) if (!area->pages[0])
goto free_bitmap; goto free_bitmap;
area->pages[1] = NULL;
area->vaddr = vaddr; area->vaddr = vaddr;
init_waitqueue_head(&area->wq); init_waitqueue_head(&area->wq);
/* Reserve the 1st slot for get_trampoline_vaddr() */ /* Reserve the 1st slot for get_trampoline_vaddr() */
set_bit(0, area->bitmap); set_bit(0, area->bitmap);
atomic_set(&area->slot_count, 1); atomic_set(&area->slot_count, 1);
copy_to_page(area->page, 0, &insn, UPROBE_SWBP_INSN_SIZE); copy_to_page(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
if (!xol_add_vma(mm, area)) if (!xol_add_vma(mm, area))
return area; return area;
__free_page(area->page); __free_page(area->pages[0]);
free_bitmap: free_bitmap:
kfree(area->bitmap); kfree(area->bitmap);
free_area: free_area:
...@@ -1220,7 +1221,7 @@ void uprobe_clear_state(struct mm_struct *mm) ...@@ -1220,7 +1221,7 @@ void uprobe_clear_state(struct mm_struct *mm)
if (!area) if (!area)
return; return;
put_page(area->page); put_page(area->pages[0]);
kfree(area->bitmap); kfree(area->bitmap);
kfree(area); kfree(area);
} }
...@@ -1289,7 +1290,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe) ...@@ -1289,7 +1290,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
if (unlikely(!xol_vaddr)) if (unlikely(!xol_vaddr))
return 0; return 0;
arch_uprobe_copy_ixol(area->page, xol_vaddr, arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
&uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
return xol_vaddr; return xol_vaddr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment