Commit 2abbcc09 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Andrew Morton

uprobes: turn xol_area->pages[2] into xol_area->page

Now that xol_mapping has its own ->fault() method we no longer need
xol_area->pages[1] == NULL, we need a single page.

Link: https://lkml.kernel.org/r/20240911131437.GC3448@redhat.comSigned-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6d27a31e
......@@ -99,7 +99,7 @@ struct xol_area {
atomic_t slot_count; /* number of in-use slots */
unsigned long *bitmap; /* 0 = free slot */
struct page *pages[2];
struct page *page;
/*
* We keep the vma's vm_start rather than a pointer to the vma
* itself. The probed process or a naughty kernel module could make
......@@ -1437,7 +1437,7 @@ static vm_fault_t xol_fault(const struct vm_special_mapping *sm,
{
struct xol_area *area = vma->vm_mm->uprobes_state.xol_area;
vmf->page = area->pages[0];
vmf->page = area->page;
get_page(vmf->page);
return 0;
}
......@@ -1512,10 +1512,9 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
if (!area->bitmap)
goto free_area;
area->pages[0] = alloc_page(GFP_HIGHUSER);
if (!area->pages[0])
area->page = alloc_page(GFP_HIGHUSER);
if (!area->page)
goto free_bitmap;
area->pages[1] = NULL;
area->vaddr = vaddr;
init_waitqueue_head(&area->wq);
......@@ -1523,12 +1522,12 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
set_bit(0, area->bitmap);
atomic_set(&area->slot_count, 1);
insns = arch_uprobe_trampoline(&insns_size);
arch_uprobe_copy_ixol(area->pages[0], 0, insns, insns_size);
arch_uprobe_copy_ixol(area->page, 0, insns, insns_size);
if (!xol_add_vma(mm, area))
return area;
__free_page(area->pages[0]);
__free_page(area->page);
free_bitmap:
kfree(area->bitmap);
free_area:
......@@ -1570,7 +1569,7 @@ void uprobe_clear_state(struct mm_struct *mm)
if (!area)
return;
put_page(area->pages[0]);
put_page(area->page);
kfree(area->bitmap);
kfree(area);
}
......@@ -1637,7 +1636,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
if (unlikely(!xol_vaddr))
return 0;
arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
arch_uprobe_copy_ixol(area->page, xol_vaddr,
&uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
return xol_vaddr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment