Commit c4ff10ef authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Misc fixes: a BTS fix, a PT NMI handling fix, a PMU sysfs fix and an
  SRCU annotation"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/core: Add SRCU annotation for pmus list walk
  perf/x86/intel: Fix PT PMI handling
  perf/x86/intel/bts: Fix the use of page_private()
  perf/x86: Fix potential out-of-bounds access
parents 6c1c79a5 9f0bff11
...@@ -376,7 +376,7 @@ int x86_add_exclusive(unsigned int what) ...@@ -376,7 +376,7 @@ int x86_add_exclusive(unsigned int what)
* LBR and BTS are still mutually exclusive. * LBR and BTS are still mutually exclusive.
*/ */
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return 0; goto out;
if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
mutex_lock(&pmc_reserve_mutex); mutex_lock(&pmc_reserve_mutex);
...@@ -388,6 +388,7 @@ int x86_add_exclusive(unsigned int what) ...@@ -388,6 +388,7 @@ int x86_add_exclusive(unsigned int what)
mutex_unlock(&pmc_reserve_mutex); mutex_unlock(&pmc_reserve_mutex);
} }
out:
atomic_inc(&active_events); atomic_inc(&active_events);
return 0; return 0;
...@@ -398,11 +399,15 @@ int x86_add_exclusive(unsigned int what) ...@@ -398,11 +399,15 @@ int x86_add_exclusive(unsigned int what)
void x86_del_exclusive(unsigned int what) void x86_del_exclusive(unsigned int what)
{ {
atomic_dec(&active_events);
/*
* See the comment in x86_add_exclusive().
*/
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return; return;
atomic_dec(&x86_pmu.lbr_exclusive[what]); atomic_dec(&x86_pmu.lbr_exclusive[what]);
atomic_dec(&active_events);
} }
int x86_setup_perfctr(struct perf_event *event) int x86_setup_perfctr(struct perf_event *event)
...@@ -1642,9 +1647,12 @@ static struct attribute_group x86_pmu_format_group __ro_after_init = { ...@@ -1642,9 +1647,12 @@ static struct attribute_group x86_pmu_format_group __ro_after_init = {
ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page) ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
{ {
struct perf_pmu_events_attr *pmu_attr = \ struct perf_pmu_events_attr *pmu_attr =
container_of(attr, struct perf_pmu_events_attr, attr); container_of(attr, struct perf_pmu_events_attr, attr);
u64 config = x86_pmu.event_map(pmu_attr->id); u64 config = 0;
if (pmu_attr->id < x86_pmu.max_events)
config = x86_pmu.event_map(pmu_attr->id);
/* string trumps id */ /* string trumps id */
if (pmu_attr->event_str) if (pmu_attr->event_str)
...@@ -1713,6 +1721,9 @@ is_visible(struct kobject *kobj, struct attribute *attr, int idx) ...@@ -1713,6 +1721,9 @@ is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{ {
struct perf_pmu_events_attr *pmu_attr; struct perf_pmu_events_attr *pmu_attr;
if (idx >= x86_pmu.max_events)
return 0;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
/* str trumps id */ /* str trumps id */
return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0; return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0;
......
...@@ -63,9 +63,17 @@ struct bts_buffer { ...@@ -63,9 +63,17 @@ struct bts_buffer {
static struct pmu bts_pmu; static struct pmu bts_pmu;
static int buf_nr_pages(struct page *page)
{
if (!PagePrivate(page))
return 1;
return 1 << page_private(page);
}
static size_t buf_size(struct page *page) static size_t buf_size(struct page *page)
{ {
return 1 << (PAGE_SHIFT + page_private(page)); return buf_nr_pages(page) * PAGE_SIZE;
} }
static void * static void *
...@@ -83,9 +91,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages, ...@@ -83,9 +91,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages,
/* count all the high order buffers */ /* count all the high order buffers */
for (pg = 0, nbuf = 0; pg < nr_pages;) { for (pg = 0, nbuf = 0; pg < nr_pages;) {
page = virt_to_page(pages[pg]); page = virt_to_page(pages[pg]);
if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1)) pg += buf_nr_pages(page);
return NULL;
pg += 1 << page_private(page);
nbuf++; nbuf++;
} }
...@@ -109,7 +115,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages, ...@@ -109,7 +115,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages,
unsigned int __nr_pages; unsigned int __nr_pages;
page = virt_to_page(pages[pg]); page = virt_to_page(pages[pg]);
__nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1; __nr_pages = buf_nr_pages(page);
buf->buf[nbuf].page = page; buf->buf[nbuf].page = page;
buf->buf[nbuf].offset = offset; buf->buf[nbuf].offset = offset;
buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0); buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
......
...@@ -10523,7 +10523,7 @@ static struct pmu *perf_init_event(struct perf_event *event) ...@@ -10523,7 +10523,7 @@ static struct pmu *perf_init_event(struct perf_event *event)
goto unlock; goto unlock;
} }
list_for_each_entry_rcu(pmu, &pmus, entry) { list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) {
ret = perf_try_init_event(pmu, event); ret = perf_try_init_event(pmu, event);
if (!ret) if (!ret)
goto unlock; goto unlock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment