Commit f48ffef1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf_urgent_for_v5.18_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Borislav Petkov:

 - Add Sapphire Rapids CPU support

 - Fix a perf vmalloc-ed buffer mapping error (PERF_USE_VMALLOC in use)

* tag 'perf_urgent_for_v5.18_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86/cstate: Add SAPPHIRERAPIDS_X CPU support
  perf/core: Fix perf_mmap fail when CONFIG_PERF_USE_VMALLOC enabled
parents b877ca4d 528c9f1d
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
* perf code: 0x02 * perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
* TGL,TNT,RKL,ADL,RPL * TGL,TNT,RKL,ADL,RPL,SPR
* Scope: Core * Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03 * perf code: 0x03
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
* perf code: 0x00 * perf code: 0x00
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL, * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
* KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL, * KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
* RPL * RPL,SPR
* Scope: Package (physical package) * Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01 * perf code: 0x01
...@@ -74,7 +74,7 @@ ...@@ -74,7 +74,7 @@
* perf code: 0x02 * perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
* TGL,TNT,RKL,ADL,RPL * TGL,TNT,RKL,ADL,RPL,SPR
* Scope: Package (physical package) * Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03 * perf code: 0x03
...@@ -675,6 +675,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { ...@@ -675,6 +675,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates), X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates), X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates),
......
...@@ -6247,7 +6247,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -6247,7 +6247,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
again: again:
mutex_lock(&event->mmap_mutex); mutex_lock(&event->mmap_mutex);
if (event->rb) { if (event->rb) {
if (event->rb->nr_pages != nr_pages) { if (data_page_nr(event->rb) != nr_pages) {
ret = -EINVAL; ret = -EINVAL;
goto unlock; goto unlock;
} }
......
...@@ -116,6 +116,11 @@ static inline int page_order(struct perf_buffer *rb) ...@@ -116,6 +116,11 @@ static inline int page_order(struct perf_buffer *rb)
} }
#endif #endif
static inline int data_page_nr(struct perf_buffer *rb)
{
return rb->nr_pages << page_order(rb);
}
static inline unsigned long perf_data_size(struct perf_buffer *rb) static inline unsigned long perf_data_size(struct perf_buffer *rb)
{ {
return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
......
...@@ -859,11 +859,6 @@ void rb_free(struct perf_buffer *rb) ...@@ -859,11 +859,6 @@ void rb_free(struct perf_buffer *rb)
} }
#else #else
static int data_page_nr(struct perf_buffer *rb)
{
return rb->nr_pages << page_order(rb);
}
static struct page * static struct page *
__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff) __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment