Commit 7d55718b authored by Ingo Molnar's avatar Ingo Molnar

Merge branches 'tracing/core', 'x86/urgent' and 'x86/ptrace' into tracing/hw-branch-tracing

This pulls together all the topic branches that are needed
for the DS/BTS/PEBS tracing work.
...@@ -74,7 +74,7 @@ static int kbd_pending(void) ...@@ -74,7 +74,7 @@ static int kbd_pending(void)
{ {
u8 pending; u8 pending;
asm volatile("int $0x16; setnz %0" asm volatile("int $0x16; setnz %0"
: "=rm" (pending) : "=qm" (pending)
: "a" (0x0100)); : "a" (0x0100));
return pending; return pending;
} }
......
...@@ -23,12 +23,13 @@ ...@@ -23,12 +23,13 @@
#ifndef _ASM_X86_DS_H #ifndef _ASM_X86_DS_H
#define _ASM_X86_DS_H #define _ASM_X86_DS_H
#ifdef CONFIG_X86_DS
#include <linux/types.h> #include <linux/types.h>
#include <linux/init.h> #include <linux/init.h>
#ifdef CONFIG_X86_DS
struct task_struct; struct task_struct;
/* /*
...@@ -232,7 +233,8 @@ extern void ds_free(struct ds_context *context); ...@@ -232,7 +233,8 @@ extern void ds_free(struct ds_context *context);
#else /* CONFIG_X86_DS */ #else /* CONFIG_X86_DS */
#define ds_init_intel(config) do {} while (0) struct cpuinfo_x86;
static inline void __cpuinit ds_init_intel(struct cpuinfo_x86 *ignored) {}
#endif /* CONFIG_X86_DS */ #endif /* CONFIG_X86_DS */
#endif /* _ASM_X86_DS_H */ #endif /* _ASM_X86_DS_H */
...@@ -46,7 +46,7 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o ...@@ -46,7 +46,7 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-y += process.o obj-y += process.o
obj-y += i387.o xsave.o obj-y += i387.o xsave.o
obj-y += ptrace.o obj-y += ptrace.o
obj-y += ds.o obj-$(CONFIG_X86_DS) += ds.o
obj-$(CONFIG_X86_32) += tls.o obj-$(CONFIG_X86_32) += tls.o
obj-$(CONFIG_IA32_EMULATION) += tls.o obj-$(CONFIG_IA32_EMULATION) += tls.o
obj-y += step.o obj-y += step.o
......
...@@ -307,12 +307,11 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) ...@@ -307,12 +307,11 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_P4); set_cpu_cap(c, X86_FEATURE_P4);
if (c->x86 == 6) if (c->x86 == 6)
set_cpu_cap(c, X86_FEATURE_P3); set_cpu_cap(c, X86_FEATURE_P3);
#endif
if (cpu_has_bts) if (cpu_has_bts)
ptrace_bts_init_intel(c); ptrace_bts_init_intel(c);
#endif
detect_extended_topology(c); detect_extended_topology(c);
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
/* /*
......
...@@ -21,8 +21,6 @@ ...@@ -21,8 +21,6 @@
*/ */
#ifdef CONFIG_X86_DS
#include <asm/ds.h> #include <asm/ds.h>
#include <linux/errno.h> #include <linux/errno.h>
...@@ -211,14 +209,15 @@ static DEFINE_PER_CPU(struct ds_context *, system_context); ...@@ -211,14 +209,15 @@ static DEFINE_PER_CPU(struct ds_context *, system_context);
static inline struct ds_context *ds_get_context(struct task_struct *task) static inline struct ds_context *ds_get_context(struct task_struct *task)
{ {
struct ds_context *context; struct ds_context *context;
unsigned long irq;
spin_lock(&ds_lock); spin_lock_irqsave(&ds_lock, irq);
context = (task ? task->thread.ds_ctx : this_system_context); context = (task ? task->thread.ds_ctx : this_system_context);
if (context) if (context)
context->count++; context->count++;
spin_unlock(&ds_lock); spin_unlock_irqrestore(&ds_lock, irq);
return context; return context;
} }
...@@ -226,55 +225,46 @@ static inline struct ds_context *ds_get_context(struct task_struct *task) ...@@ -226,55 +225,46 @@ static inline struct ds_context *ds_get_context(struct task_struct *task)
/* /*
* Same as ds_get_context, but allocates the context and it's DS * Same as ds_get_context, but allocates the context and it's DS
* structure, if necessary; returns NULL; if out of memory. * structure, if necessary; returns NULL; if out of memory.
*
* pre: requires ds_lock to be held
*/ */
static inline struct ds_context *ds_alloc_context(struct task_struct *task) static inline struct ds_context *ds_alloc_context(struct task_struct *task)
{ {
struct ds_context **p_context = struct ds_context **p_context =
(task ? &task->thread.ds_ctx : &this_system_context); (task ? &task->thread.ds_ctx : &this_system_context);
struct ds_context *context = *p_context; struct ds_context *context = *p_context;
unsigned long irq;
if (!context) { if (!context) {
spin_unlock(&ds_lock);
context = kzalloc(sizeof(*context), GFP_KERNEL); context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
if (!context) {
spin_lock(&ds_lock);
return NULL; return NULL;
}
context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
if (!context->ds) { if (!context->ds) {
kfree(context); kfree(context);
spin_lock(&ds_lock);
return NULL; return NULL;
} }
spin_lock(&ds_lock); spin_lock_irqsave(&ds_lock, irq);
/*
* Check for race - another CPU could have allocated
* it meanwhile:
*/
if (*p_context) { if (*p_context) {
kfree(context->ds); kfree(context->ds);
kfree(context); kfree(context);
return *p_context;
}
*p_context = context; context = *p_context;
} else {
*p_context = context;
context->this = p_context; context->this = p_context;
context->task = task; context->task = task;
if (task) if (task)
set_tsk_thread_flag(task, TIF_DS_AREA_MSR); set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
if (!task || (task == current)) if (!task || (task == current))
wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0); wrmsrl(MSR_IA32_DS_AREA,
(unsigned long)context->ds);
get_tracer(task); }
spin_unlock_irqrestore(&ds_lock, irq);
} }
context->count++; context->count++;
...@@ -288,10 +278,12 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task) ...@@ -288,10 +278,12 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
*/ */
static inline void ds_put_context(struct ds_context *context) static inline void ds_put_context(struct ds_context *context)
{ {
unsigned long irq;
if (!context) if (!context)
return; return;
spin_lock(&ds_lock); spin_lock_irqsave(&ds_lock, irq);
if (--context->count) if (--context->count)
goto out; goto out;
...@@ -313,7 +305,7 @@ static inline void ds_put_context(struct ds_context *context) ...@@ -313,7 +305,7 @@ static inline void ds_put_context(struct ds_context *context)
kfree(context->ds); kfree(context->ds);
kfree(context); kfree(context);
out: out:
spin_unlock(&ds_lock); spin_unlock_irqrestore(&ds_lock, irq);
} }
...@@ -384,6 +376,7 @@ static int ds_request(struct task_struct *task, void *base, size_t size, ...@@ -384,6 +376,7 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
struct ds_context *context; struct ds_context *context;
unsigned long buffer, adj; unsigned long buffer, adj;
const unsigned long alignment = (1 << 3); const unsigned long alignment = (1 << 3);
unsigned long irq;
int error = 0; int error = 0;
if (!ds_cfg.sizeof_ds) if (!ds_cfg.sizeof_ds)
...@@ -398,26 +391,27 @@ static int ds_request(struct task_struct *task, void *base, size_t size, ...@@ -398,26 +391,27 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
return -EOPNOTSUPP; return -EOPNOTSUPP;
spin_lock(&ds_lock);
error = -ENOMEM;
context = ds_alloc_context(task); context = ds_alloc_context(task);
if (!context) if (!context)
goto out_unlock; return -ENOMEM;
spin_lock_irqsave(&ds_lock, irq);
error = -EPERM; error = -EPERM;
if (!check_tracer(task)) if (!check_tracer(task))
goto out_unlock; goto out_unlock;
get_tracer(task);
error = -EALREADY; error = -EALREADY;
if (context->owner[qual] == current) if (context->owner[qual] == current)
goto out_unlock; goto out_put_tracer;
error = -EPERM; error = -EPERM;
if (context->owner[qual] != NULL) if (context->owner[qual] != NULL)
goto out_unlock; goto out_put_tracer;
context->owner[qual] = current; context->owner[qual] = current;
spin_unlock(&ds_lock); spin_unlock_irqrestore(&ds_lock, irq);
error = -ENOMEM; error = -ENOMEM;
...@@ -465,10 +459,17 @@ static int ds_request(struct task_struct *task, void *base, size_t size, ...@@ -465,10 +459,17 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
out_release: out_release:
context->owner[qual] = NULL; context->owner[qual] = NULL;
ds_put_context(context); ds_put_context(context);
put_tracer(task);
return error;
out_put_tracer:
spin_unlock_irqrestore(&ds_lock, irq);
ds_put_context(context);
put_tracer(task);
return error; return error;
out_unlock: out_unlock:
spin_unlock(&ds_lock); spin_unlock_irqrestore(&ds_lock, irq);
ds_put_context(context); ds_put_context(context);
return error; return error;
} }
...@@ -818,13 +819,21 @@ static const struct ds_configuration ds_cfg_var = { ...@@ -818,13 +819,21 @@ static const struct ds_configuration ds_cfg_var = {
.sizeof_ds = sizeof(long) * 12, .sizeof_ds = sizeof(long) * 12,
.sizeof_field = sizeof(long), .sizeof_field = sizeof(long),
.sizeof_rec[ds_bts] = sizeof(long) * 3, .sizeof_rec[ds_bts] = sizeof(long) * 3,
#ifdef __i386__
.sizeof_rec[ds_pebs] = sizeof(long) * 10 .sizeof_rec[ds_pebs] = sizeof(long) * 10
#else
.sizeof_rec[ds_pebs] = sizeof(long) * 18
#endif
}; };
static const struct ds_configuration ds_cfg_64 = { static const struct ds_configuration ds_cfg_64 = {
.sizeof_ds = 8 * 12, .sizeof_ds = 8 * 12,
.sizeof_field = 8, .sizeof_field = 8,
.sizeof_rec[ds_bts] = 8 * 3, .sizeof_rec[ds_bts] = 8 * 3,
#ifdef __i386__
.sizeof_rec[ds_pebs] = 8 * 10 .sizeof_rec[ds_pebs] = 8 * 10
#else
.sizeof_rec[ds_pebs] = 8 * 18
#endif
}; };
static inline void static inline void
...@@ -838,17 +847,16 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c) ...@@ -838,17 +847,16 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
switch (c->x86) { switch (c->x86) {
case 0x6: case 0x6:
switch (c->x86_model) { switch (c->x86_model) {
case 0 ... 0xC:
/* sorry, don't know about them */
break;
case 0xD: case 0xD:
case 0xE: /* Pentium M */ case 0xE: /* Pentium M */
ds_configure(&ds_cfg_var); ds_configure(&ds_cfg_var);
break; break;
case 0xF: /* Core2 */ default: /* Core2, Atom, ... */
case 0x1C: /* Atom */
ds_configure(&ds_cfg_64); ds_configure(&ds_cfg_64);
break; break;
default:
/* sorry, don't know about them */
break;
} }
break; break;
case 0xF: case 0xF:
...@@ -878,4 +886,3 @@ void ds_free(struct ds_context *context) ...@@ -878,4 +886,3 @@ void ds_free(struct ds_context *context)
while (leftovers--) while (leftovers--)
ds_put_context(context); ds_put_context(context);
} }
#endif /* CONFIG_X86_DS */
...@@ -58,7 +58,7 @@ void __cpuinit mxcsr_feature_mask_init(void) ...@@ -58,7 +58,7 @@ void __cpuinit mxcsr_feature_mask_init(void)
stts(); stts();
} }
void __init init_thread_xstate(void) void __cpuinit init_thread_xstate(void)
{ {
if (!HAVE_HWFP) { if (!HAVE_HWFP) {
xstate_size = sizeof(struct i387_soft_struct); xstate_size = sizeof(struct i387_soft_struct);
......
...@@ -3608,27 +3608,7 @@ int __init io_apic_get_redir_entries (int ioapic) ...@@ -3608,27 +3608,7 @@ int __init io_apic_get_redir_entries (int ioapic)
int __init probe_nr_irqs(void) int __init probe_nr_irqs(void)
{ {
int idx; return NR_IRQS;
int nr = 0;
#ifndef CONFIG_XEN
int nr_min = 32;
#else
int nr_min = NR_IRQS;
#endif
for (idx = 0; idx < nr_ioapics; idx++)
nr += io_apic_get_redir_entries(idx) + 1;
/* double it for hotplug and msi and nmi */
nr <<= 1;
/* something wrong ? */
if (nr < nr_min)
nr = nr_min;
if (WARN_ON(nr > NR_IRQS))
nr = NR_IRQS;
return nr;
} }
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------
......
...@@ -1567,7 +1567,7 @@ static int __init calgary_parse_options(char *p) ...@@ -1567,7 +1567,7 @@ static int __init calgary_parse_options(char *p)
++p; ++p;
if (*p == '\0') if (*p == '\0')
break; break;
bridge = simple_strtol(p, &endp, 0); bridge = simple_strtoul(p, &endp, 0);
if (p == endp) if (p == endp)
break; break;
......
...@@ -929,17 +929,16 @@ void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *c) ...@@ -929,17 +929,16 @@ void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *c)
switch (c->x86) { switch (c->x86) {
case 0x6: case 0x6:
switch (c->x86_model) { switch (c->x86_model) {
case 0 ... 0xC:
/* sorry, don't know about them */
break;
case 0xD: case 0xD:
case 0xE: /* Pentium M */ case 0xE: /* Pentium M */
bts_configure(&bts_cfg_pentium_m); bts_configure(&bts_cfg_pentium_m);
break; break;
case 0xF: /* Core2 */ default: /* Core2, Atom, ... */
case 0x1C: /* Atom */
bts_configure(&bts_cfg_core2); bts_configure(&bts_cfg_core2);
break; break;
default:
/* sorry, don't know about them */
break;
} }
break; break;
case 0xF: case 0xF:
......
...@@ -310,7 +310,7 @@ static void __init setup_xstate_init(void) ...@@ -310,7 +310,7 @@ static void __init setup_xstate_init(void)
/* /*
* Enable and initialize the xsave feature. * Enable and initialize the xsave feature.
*/ */
void __init xsave_cntxt_init(void) void __ref xsave_cntxt_init(void)
{ {
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
......
...@@ -69,7 +69,7 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) ...@@ -69,7 +69,7 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
int i; int i;
if (!reset_value) { if (!reset_value) {
reset_value = kmalloc(sizeof(unsigned) * num_counters, reset_value = kmalloc(sizeof(reset_value[0]) * num_counters,
GFP_ATOMIC); GFP_ATOMIC);
if (!reset_value) if (!reset_value)
return; return;
......
...@@ -661,12 +661,11 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val) ...@@ -661,12 +661,11 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
* For 64-bit, we must skip the Xen hole in the middle of the address * For 64-bit, we must skip the Xen hole in the middle of the address
* space, just after the big x86-64 virtual hole. * space, just after the big x86-64 virtual hole.
*/ */
static int xen_pgd_walk(struct mm_struct *mm, static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
int (*func)(struct mm_struct *mm, struct page *, int (*func)(struct mm_struct *mm, struct page *,
enum pt_level), enum pt_level),
unsigned long limit) unsigned long limit)
{ {
pgd_t *pgd = mm->pgd;
int flush = 0; int flush = 0;
unsigned hole_low, hole_high; unsigned hole_low, hole_high;
unsigned pgdidx_limit, pudidx_limit, pmdidx_limit; unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
...@@ -753,6 +752,14 @@ static int xen_pgd_walk(struct mm_struct *mm, ...@@ -753,6 +752,14 @@ static int xen_pgd_walk(struct mm_struct *mm,
return flush; return flush;
} }
static int xen_pgd_walk(struct mm_struct *mm,
int (*func)(struct mm_struct *mm, struct page *,
enum pt_level),
unsigned long limit)
{
return __xen_pgd_walk(mm, mm->pgd, func, limit);
}
/* If we're using split pte locks, then take the page's lock and /* If we're using split pte locks, then take the page's lock and
return a pointer to it. Otherwise return NULL. */ return a pointer to it. Otherwise return NULL. */
static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
...@@ -854,7 +861,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) ...@@ -854,7 +861,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
xen_mc_batch(); xen_mc_batch();
if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
/* re-enable interrupts for flushing */ /* re-enable interrupts for flushing */
xen_mc_issue(0); xen_mc_issue(0);
...@@ -998,7 +1005,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) ...@@ -998,7 +1005,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
PT_PMD); PT_PMD);
#endif #endif
xen_pgd_walk(mm, xen_unpin_page, USER_LIMIT); __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
xen_mc_issue(0); xen_mc_issue(0);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment