Commit 120c5475 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

 - support for nr_cpus= command line argument (maxcpus was previously
   changed to allow secondary CPUs to be hot-plugged)

 - ARM PMU interrupt handling fix

 - fix potential TLB conflict in the hibernate code

 - improved handling of EL1 instruction aborts (better error reporting)

 - removal of useless jprobes code for stack saving/restoring

 - defconfig updates

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: defconfig: enable CONFIG_LOCALVERSION_AUTO
  arm64: defconfig: add options for virtualization and containers
  arm64: hibernate: handle allocation failures
  arm64: hibernate: avoid potential TLB conflict
  arm64: Handle el1 synchronous instruction aborts cleanly
  arm64: Remove stack duplicating code from jprobes
  drivers/perf: arm-pmu: Fix handling of SPI lacking "interrupt-affinity" property
  drivers/perf: arm-pmu: convert arm_pmu_mutex to spinlock
  arm64: Support hard limit of cpu count by nr_cpus
parents 329f4152 53fb45d3
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y CONFIG_AUDIT=y
...@@ -15,10 +14,14 @@ CONFIG_IKCONFIG_PROC=y ...@@ -15,10 +14,14 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14 CONFIG_LOG_BUF_SHIFT=14
CONFIG_MEMCG=y CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_HUGETLB=y CONFIG_CGROUP_HUGETLB=y
# CONFIG_UTS_NS is not set CONFIG_CPUSETS=y
# CONFIG_IPC_NS is not set CONFIG_CGROUP_DEVICE=y
# CONFIG_NET_NS is not set CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_ALL=y
...@@ -71,6 +74,7 @@ CONFIG_PREEMPT=y ...@@ -71,6 +74,7 @@ CONFIG_PREEMPT=y
CONFIG_KSM=y CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA=y CONFIG_CMA=y
CONFIG_SECCOMP=y
CONFIG_XEN=y CONFIG_XEN=y
CONFIG_KEXEC=y CONFIG_KEXEC=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
...@@ -84,10 +88,37 @@ CONFIG_NET=y ...@@ -84,10 +88,37 @@ CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y
CONFIG_INET=y CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set CONFIG_IPV6=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_BRIDGE=m
CONFIG_BRIDGE_VLAN_FILTERING=y
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_VLAN_8021Q_MVRP=y
CONFIG_BPF_JIT=y CONFIG_BPF_JIT=y
CONFIG_CFG80211=m CONFIG_CFG80211=m
CONFIG_MAC80211=m CONFIG_MAC80211=m
...@@ -103,6 +134,7 @@ CONFIG_MTD=y ...@@ -103,6 +134,7 @@ CONFIG_MTD=y
CONFIG_MTD_M25P80=y CONFIG_MTD_M25P80=y
CONFIG_MTD_SPI_NOR=y CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_BLK=y
CONFIG_SRAM=y CONFIG_SRAM=y
# CONFIG_SCSI_PROC_FS is not set # CONFIG_SCSI_PROC_FS is not set
...@@ -120,7 +152,10 @@ CONFIG_SATA_SIL24=y ...@@ -120,7 +152,10 @@ CONFIG_SATA_SIL24=y
CONFIG_PATA_PLATFORM=y CONFIG_PATA_PLATFORM=y
CONFIG_PATA_OF_PLATFORM=y CONFIG_PATA_OF_PLATFORM=y
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_TUN=y CONFIG_TUN=y
CONFIG_VETH=m
CONFIG_VIRTIO_NET=y CONFIG_VIRTIO_NET=y
CONFIG_AMD_XGBE=y CONFIG_AMD_XGBE=y
CONFIG_NET_XGENE=y CONFIG_NET_XGENE=y
...@@ -350,12 +385,16 @@ CONFIG_EXYNOS_ADC=y ...@@ -350,12 +385,16 @@ CONFIG_EXYNOS_ADC=y
CONFIG_PWM_SAMSUNG=y CONFIG_PWM_SAMSUNG=y
CONFIG_EXT2_FS=y CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_BTRFS_FS=m
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_FANOTIFY=y CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA=y CONFIG_QUOTA=y
CONFIG_AUTOFS4_FS=y CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y CONFIG_FUSE_FS=m
CONFIG_CUSE=y CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
CONFIG_VFAT_FS=y CONFIG_VFAT_FS=y
CONFIG_TMPFS=y CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y CONFIG_HUGETLBFS=y
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#define __ARCH_WANT_KPROBES_INSN_SLOT #define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 1 #define MAX_INSN_SIZE 1
#define MAX_STACK_SIZE 128
#define flush_insn_slot(p) do { } while (0) #define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0 #define kretprobe_blacklist_size 0
...@@ -47,7 +46,6 @@ struct kprobe_ctlblk { ...@@ -47,7 +46,6 @@ struct kprobe_ctlblk {
struct prev_kprobe prev_kprobe; struct prev_kprobe prev_kprobe;
struct kprobe_step_ctx ss_ctx; struct kprobe_step_ctx ss_ctx;
struct pt_regs jprobe_saved_regs; struct pt_regs jprobe_saved_regs;
char jprobes_stack[MAX_STACK_SIZE];
}; };
void arch_remove_kprobe(struct kprobe *); void arch_remove_kprobe(struct kprobe *);
......
...@@ -353,6 +353,8 @@ el1_sync: ...@@ -353,6 +353,8 @@ el1_sync:
lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
b.eq el1_da b.eq el1_da
cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
b.eq el1_ia
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el1_undef b.eq el1_undef
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
...@@ -364,6 +366,11 @@ el1_sync: ...@@ -364,6 +366,11 @@ el1_sync:
cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
b.ge el1_dbg b.ge el1_dbg
b el1_inv b el1_inv
el1_ia:
/*
* Fall through to the Data abort case
*/
el1_da: el1_da:
/* /*
* Data abort handling * Data abort handling
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/suspend.h> #include <asm/suspend.h>
#include <asm/sysreg.h>
#include <asm/virt.h> #include <asm/virt.h>
/* /*
...@@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length, ...@@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length,
set_pte(pte, __pte(virt_to_phys((void *)dst) | set_pte(pte, __pte(virt_to_phys((void *)dst) |
pgprot_val(PAGE_KERNEL_EXEC))); pgprot_val(PAGE_KERNEL_EXEC)));
/* Load our new page tables */ /*
asm volatile("msr ttbr0_el1, %0;" * Load our new page tables. A strict BBM approach requires that we
"isb;" * ensure that TLBs are free of any entries that may overlap with the
"tlbi vmalle1is;" * global mappings we are about to install.
"dsb ish;" *
"isb" : : "r"(virt_to_phys(pgd))); * For a real hibernate/resume cycle TTBR0 currently points to a zero
* page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
* runtime services), while for a userspace-driven test_resume cycle it
* points to userspace page tables (and we must point it at a zero page
* ourselves). Elsewhere we only (un)install the idmap with preemption
* disabled, so T0SZ should be as required regardless.
*/
cpu_set_reserved_ttbr0();
local_flush_tlb_all();
write_sysreg(virt_to_phys(pgd), ttbr0_el1);
isb();
*phys_dst_addr = virt_to_phys((void *)dst); *phys_dst_addr = virt_to_phys((void *)dst);
...@@ -393,6 +404,38 @@ int swsusp_arch_resume(void) ...@@ -393,6 +404,38 @@ int swsusp_arch_resume(void)
void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *, void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
void *, phys_addr_t, phys_addr_t); void *, phys_addr_t, phys_addr_t);
/*
* Restoring the memory image will overwrite the ttbr1 page tables.
* Create a second copy of just the linear map, and use this when
* restoring.
*/
tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!tmp_pg_dir) {
pr_err("Failed to allocate memory for temporary page tables.");
rc = -ENOMEM;
goto out;
}
rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
if (rc)
goto out;
/*
* Since we only copied the linear map, we need to find restore_pblist's
* linear map address.
*/
lm_restore_pblist = LMADDR(restore_pblist);
/*
* We need a zero page that is zero before & after resume in order to
* to break before make on the ttbr1 page tables.
*/
zero_page = (void *)get_safe_page(GFP_ATOMIC);
if (!zero_page) {
pr_err("Failed to allocate zero page.");
rc = -ENOMEM;
goto out;
}
/* /*
* Locate the exit code in the bottom-but-one page, so that *NULL * Locate the exit code in the bottom-but-one page, so that *NULL
* still has disastrous affects. * still has disastrous affects.
...@@ -418,27 +461,6 @@ int swsusp_arch_resume(void) ...@@ -418,27 +461,6 @@ int swsusp_arch_resume(void)
*/ */
__flush_dcache_area(hibernate_exit, exit_size); __flush_dcache_area(hibernate_exit, exit_size);
/*
* Restoring the memory image will overwrite the ttbr1 page tables.
* Create a second copy of just the linear map, and use this when
* restoring.
*/
tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!tmp_pg_dir) {
pr_err("Failed to allocate memory for temporary page tables.");
rc = -ENOMEM;
goto out;
}
rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
if (rc)
goto out;
/*
* Since we only copied the linear map, we need to find restore_pblist's
* linear map address.
*/
lm_restore_pblist = LMADDR(restore_pblist);
/* /*
* KASLR will cause the el2 vectors to be in a different location in * KASLR will cause the el2 vectors to be in a different location in
* the resumed kernel. Load hibernate's temporary copy into el2. * the resumed kernel. Load hibernate's temporary copy into el2.
...@@ -453,12 +475,6 @@ int swsusp_arch_resume(void) ...@@ -453,12 +475,6 @@ int swsusp_arch_resume(void)
__hyp_set_vectors(el2_vectors); __hyp_set_vectors(el2_vectors);
} }
/*
* We need a zero page that is zero before & after resume in order to
* to break before make on the ttbr1 page tables.
*/
zero_page = (void *)get_safe_page(GFP_ATOMIC);
hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
resume_hdr.reenter_kernel, lm_restore_pblist, resume_hdr.reenter_kernel, lm_restore_pblist,
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
......
...@@ -41,18 +41,6 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); ...@@ -41,18 +41,6 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static void __kprobes static void __kprobes
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
static inline unsigned long min_stack_size(unsigned long addr)
{
unsigned long size;
if (on_irq_stack(addr, raw_smp_processor_id()))
size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr;
else
size = (unsigned long)current_thread_info() + THREAD_START_SP - addr;
return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack));
}
static void __kprobes arch_prepare_ss_slot(struct kprobe *p) static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{ {
/* prepare insn slot */ /* prepare insn slot */
...@@ -489,20 +477,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) ...@@ -489,20 +477,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{ {
struct jprobe *jp = container_of(p, struct jprobe, kp); struct jprobe *jp = container_of(p, struct jprobe, kp);
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
long stack_ptr = kernel_stack_pointer(regs);
kcb->jprobe_saved_regs = *regs; kcb->jprobe_saved_regs = *regs;
/* /*
* As Linus pointed out, gcc assumes that the callee * Since we can't be sure where in the stack frame "stacked"
* owns the argument space and could overwrite it, e.g. * pass-by-value arguments are stored we just don't try to
* tailcall optimization. So, to be absolutely safe * duplicate any of the stack. Do not use jprobes on functions that
* we also save and restore enough stack bytes to cover * use more than 64 bytes (after padding each to an 8 byte boundary)
* the argument area. * of arguments, or pass individual arguments larger than 16 bytes.
*/ */
kasan_disable_current();
memcpy(kcb->jprobes_stack, (void *)stack_ptr,
min_stack_size(stack_ptr));
kasan_enable_current();
instruction_pointer_set(regs, (unsigned long) jp->entry); instruction_pointer_set(regs, (unsigned long) jp->entry);
preempt_disable(); preempt_disable();
...@@ -554,10 +537,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) ...@@ -554,10 +537,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
} }
unpause_graph_tracing(); unpause_graph_tracing();
*regs = kcb->jprobe_saved_regs; *regs = kcb->jprobe_saved_regs;
kasan_disable_current();
memcpy((void *)stack_addr, kcb->jprobes_stack,
min_stack_size(stack_addr));
kasan_enable_current();
preempt_enable_no_resched(); preempt_enable_no_resched();
return 1; return 1;
} }
......
...@@ -661,9 +661,9 @@ void __init smp_init_cpus(void) ...@@ -661,9 +661,9 @@ void __init smp_init_cpus(void)
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
acpi_parse_gic_cpu_interface, 0); acpi_parse_gic_cpu_interface, 0);
if (cpu_count > NR_CPUS) if (cpu_count > nr_cpu_ids)
pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n", pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n",
cpu_count, NR_CPUS); cpu_count, nr_cpu_ids);
if (!bootcpu_valid) { if (!bootcpu_valid) {
pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
...@@ -677,7 +677,7 @@ void __init smp_init_cpus(void) ...@@ -677,7 +677,7 @@ void __init smp_init_cpus(void)
* with entries in cpu_logical_map while initializing the cpus. * with entries in cpu_logical_map while initializing the cpus.
* If the cpu set-up fails, invalidate the cpu_logical_map entry. * If the cpu set-up fails, invalidate the cpu_logical_map entry.
*/ */
for (i = 1; i < NR_CPUS; i++) { for (i = 1; i < nr_cpu_ids; i++) {
if (cpu_logical_map(i) != INVALID_HWID) { if (cpu_logical_map(i) != INVALID_HWID) {
if (smp_cpu_setup(i)) if (smp_cpu_setup(i))
cpu_logical_map(i) = INVALID_HWID; cpu_logical_map(i) = INVALID_HWID;
......
...@@ -153,6 +153,11 @@ int ptep_set_access_flags(struct vm_area_struct *vma, ...@@ -153,6 +153,11 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
} }
#endif #endif
static bool is_el1_instruction_abort(unsigned int esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
}
/* /*
* The kernel tried to access some page that wasn't present. * The kernel tried to access some page that wasn't present.
*/ */
...@@ -161,8 +166,9 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, ...@@ -161,8 +166,9 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
{ {
/* /*
* Are we prepared to handle this kernel fault? * Are we prepared to handle this kernel fault?
* We are almost certainly not prepared to handle instruction faults.
*/ */
if (fixup_exception(regs)) if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
return; return;
/* /*
...@@ -267,7 +273,8 @@ static inline bool is_permission_fault(unsigned int esr) ...@@ -267,7 +273,8 @@ static inline bool is_permission_fault(unsigned int esr)
unsigned int ec = ESR_ELx_EC(esr); unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM); return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) ||
(ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
} }
static bool is_el0_instruction_abort(unsigned int esr) static bool is_el0_instruction_abort(unsigned int esr)
...@@ -312,6 +319,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, ...@@ -312,6 +319,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (regs->orig_addr_limit == KERNEL_DS) if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr); die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
if (is_el1_instruction_abort(esr))
die("Attempting to execute userspace memory", regs, esr);
if (!search_exception_tables(regs->pc)) if (!search_exception_tables(regs->pc))
die("Accessing user space memory outside uaccess.h routines", regs, esr); die("Accessing user space memory outside uaccess.h routines", regs, esr);
} }
......
...@@ -688,7 +688,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) ...@@ -688,7 +688,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return 0; return 0;
} }
static DEFINE_MUTEX(arm_pmu_mutex); static DEFINE_SPINLOCK(arm_pmu_lock);
static LIST_HEAD(arm_pmu_list); static LIST_HEAD(arm_pmu_list);
/* /*
...@@ -701,7 +701,7 @@ static int arm_perf_starting_cpu(unsigned int cpu) ...@@ -701,7 +701,7 @@ static int arm_perf_starting_cpu(unsigned int cpu)
{ {
struct arm_pmu *pmu; struct arm_pmu *pmu;
mutex_lock(&arm_pmu_mutex); spin_lock(&arm_pmu_lock);
list_for_each_entry(pmu, &arm_pmu_list, entry) { list_for_each_entry(pmu, &arm_pmu_list, entry) {
if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
...@@ -709,7 +709,7 @@ static int arm_perf_starting_cpu(unsigned int cpu) ...@@ -709,7 +709,7 @@ static int arm_perf_starting_cpu(unsigned int cpu)
if (pmu->reset) if (pmu->reset)
pmu->reset(pmu); pmu->reset(pmu);
} }
mutex_unlock(&arm_pmu_mutex); spin_unlock(&arm_pmu_lock);
return 0; return 0;
} }
...@@ -821,9 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -821,9 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
if (!cpu_hw_events) if (!cpu_hw_events)
return -ENOMEM; return -ENOMEM;
mutex_lock(&arm_pmu_mutex); spin_lock(&arm_pmu_lock);
list_add_tail(&cpu_pmu->entry, &arm_pmu_list); list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
mutex_unlock(&arm_pmu_mutex); spin_unlock(&arm_pmu_lock);
err = cpu_pm_pmu_register(cpu_pmu); err = cpu_pm_pmu_register(cpu_pmu);
if (err) if (err)
...@@ -859,9 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -859,9 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
return 0; return 0;
out_unregister: out_unregister:
mutex_lock(&arm_pmu_mutex); spin_lock(&arm_pmu_lock);
list_del(&cpu_pmu->entry); list_del(&cpu_pmu->entry);
mutex_unlock(&arm_pmu_mutex); spin_unlock(&arm_pmu_lock);
free_percpu(cpu_hw_events); free_percpu(cpu_hw_events);
return err; return err;
} }
...@@ -869,9 +869,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -869,9 +869,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
{ {
cpu_pm_pmu_unregister(cpu_pmu); cpu_pm_pmu_unregister(cpu_pmu);
mutex_lock(&arm_pmu_mutex); spin_lock(&arm_pmu_lock);
list_del(&cpu_pmu->entry); list_del(&cpu_pmu->entry);
mutex_unlock(&arm_pmu_mutex); spin_unlock(&arm_pmu_lock);
free_percpu(cpu_pmu->hw_events); free_percpu(cpu_pmu->hw_events);
} }
...@@ -967,11 +967,12 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) ...@@ -967,11 +967,12 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
/* If we didn't manage to parse anything, try the interrupt affinity */ /* If we didn't manage to parse anything, try the interrupt affinity */
if (cpumask_weight(&pmu->supported_cpus) == 0) { if (cpumask_weight(&pmu->supported_cpus) == 0) {
if (!using_spi) { int irq = platform_get_irq(pdev, 0);
if (irq_is_percpu(irq)) {
/* If using PPIs, check the affinity of the partition */ /* If using PPIs, check the affinity of the partition */
int ret, irq; int ret;
irq = platform_get_irq(pdev, 0);
ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
if (ret) { if (ret) {
kfree(irqs); kfree(irqs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment