Commit 1cb81b14 authored by Markus Metzger's avatar Markus Metzger Committed by Ingo Molnar

x86, bts, mm: clean up buffer allocation

The current mm interface is asymetric. One function allocates a locked
buffer, another function only refunds the memory.

Change this to have two functions for accounting and refunding locked
memory, respectively; and do the actual buffer allocation in ptrace.

[ Impact: refactor BTS buffer allocation code ]
Signed-off-by: default avatarMarkus Metzger <markus.t.metzger@intel.com>
Acked-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20090424095143.A30265@sedona.ch.intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 7e0bfad2
...@@ -617,17 +617,28 @@ struct bts_context { ...@@ -617,17 +617,28 @@ struct bts_context {
struct work_struct work; struct work_struct work;
}; };
static inline void alloc_bts_buffer(struct bts_context *context, static int alloc_bts_buffer(struct bts_context *context, unsigned int size)
unsigned int size)
{ {
void *buffer; void *buffer = NULL;
int err = -ENOMEM;
buffer = alloc_locked_buffer(size); err = account_locked_memory(current->mm, current->signal->rlim, size);
if (buffer) { if (err < 0)
context->buffer = buffer; return err;
context->size = size;
context->mm = get_task_mm(current); buffer = kzalloc(size, GFP_KERNEL);
} if (!buffer)
goto out_refund;
context->buffer = buffer;
context->size = size;
context->mm = get_task_mm(current);
return 0;
out_refund:
refund_locked_memory(current->mm, size);
return err;
} }
static inline void free_bts_buffer(struct bts_context *context) static inline void free_bts_buffer(struct bts_context *context)
...@@ -638,7 +649,7 @@ static inline void free_bts_buffer(struct bts_context *context) ...@@ -638,7 +649,7 @@ static inline void free_bts_buffer(struct bts_context *context)
kfree(context->buffer); kfree(context->buffer);
context->buffer = NULL; context->buffer = NULL;
refund_locked_buffer_memory(context->mm, context->size); refund_locked_memory(context->mm, context->size);
context->size = 0; context->size = 0;
mmput(context->mm); mmput(context->mm);
...@@ -786,13 +797,15 @@ static int ptrace_bts_config(struct task_struct *child, ...@@ -786,13 +797,15 @@ static int ptrace_bts_config(struct task_struct *child,
context->tracer = NULL; context->tracer = NULL;
if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) { if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) {
int err;
free_bts_buffer(context); free_bts_buffer(context);
if (!cfg.size) if (!cfg.size)
return 0; return 0;
alloc_bts_buffer(context, cfg.size); err = alloc_bts_buffer(context, cfg.size);
if (!context->buffer) if (err < 0)
return -ENOMEM; return err;
} }
if (cfg.flags & PTRACE_BTS_O_TRACE) if (cfg.flags & PTRACE_BTS_O_TRACE)
......
...@@ -19,6 +19,7 @@ struct anon_vma; ...@@ -19,6 +19,7 @@ struct anon_vma;
struct file_ra_state; struct file_ra_state;
struct user_struct; struct user_struct;
struct writeback_control; struct writeback_control;
struct rlimit;
#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr; extern unsigned long max_mapnr;
...@@ -1319,7 +1320,8 @@ int vmemmap_populate_basepages(struct page *start_page, ...@@ -1319,7 +1320,8 @@ int vmemmap_populate_basepages(struct page *start_page,
int vmemmap_populate(struct page *start_page, unsigned long pages, int node); int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
void vmemmap_populate_print_last(void); void vmemmap_populate_print_last(void);
extern void *alloc_locked_buffer(size_t size); extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
extern void refund_locked_buffer_memory(struct mm_struct *mm, size_t size); size_t size);
extern void refund_locked_memory(struct mm_struct *mm, size_t size);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */ #endif /* _LINUX_MM_H */
...@@ -629,38 +629,36 @@ void user_shm_unlock(size_t size, struct user_struct *user) ...@@ -629,38 +629,36 @@ void user_shm_unlock(size_t size, struct user_struct *user)
free_uid(user); free_uid(user);
} }
void *alloc_locked_buffer(size_t size) int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
size_t size)
{ {
unsigned long rlim, vm, pgsz; unsigned long lim, vm, pgsz;
void *buffer = NULL; int error = -ENOMEM;
pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
down_write(&current->mm->mmap_sem); down_write(&mm->mmap_sem);
rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
vm = current->mm->total_vm + pgsz;
if (rlim < vm)
goto out;
rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; lim = rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
vm = current->mm->locked_vm + pgsz; vm = mm->total_vm + pgsz;
if (rlim < vm) if (lim < vm)
goto out; goto out;
buffer = kzalloc(size, GFP_KERNEL); lim = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
if (!buffer) vm = mm->locked_vm + pgsz;
if (lim < vm)
goto out; goto out;
current->mm->total_vm += pgsz; mm->total_vm += pgsz;
current->mm->locked_vm += pgsz; mm->locked_vm += pgsz;
error = 0;
out: out:
up_write(&current->mm->mmap_sem); up_write(&mm->mmap_sem);
return buffer; return error;
} }
void refund_locked_buffer_memory(struct mm_struct *mm, size_t size) void refund_locked_memory(struct mm_struct *mm, size_t size)
{ {
unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment