Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
27a513ca
Commit
27a513ca
authored
Jun 12, 2015
by
Russell King
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'devel-stable' into for-next
Conflicts: arch/arm/kernel/perf_event_cpu.c
parents
970d96f9
bcc8fa83
Changes
15
Show whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
730 additions
and
855 deletions
+730
-855
arch/arm/common/mcpm_entry.c
arch/arm/common/mcpm_entry.c
+126
-155
arch/arm/include/asm/mcpm.h
arch/arm/include/asm/mcpm.h
+28
-45
arch/arm/include/asm/perf_event.h
arch/arm/include/asm/perf_event.h
+7
-0
arch/arm/include/asm/pmu.h
arch/arm/include/asm/pmu.h
+5
-14
arch/arm/kernel/Makefile
arch/arm/kernel/Makefile
+3
-1
arch/arm/kernel/perf_event.c
arch/arm/kernel/perf_event.c
+375
-33
arch/arm/kernel/perf_event_cpu.c
arch/arm/kernel/perf_event_cpu.c
+0
-421
arch/arm/kernel/perf_event_v6.c
arch/arm/kernel/perf_event_v6.c
+36
-13
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/perf_event_v7.c
+62
-67
arch/arm/kernel/perf_event_xscale.c
arch/arm/kernel/perf_event_xscale.c
+27
-5
arch/arm/mach-exynos/suspend.c
arch/arm/mach-exynos/suspend.c
+1
-7
arch/arm/mach-hisi/platmcpm.c
arch/arm/mach-hisi/platmcpm.c
+47
-86
drivers/cpuidle/cpuidle-big_little.c
drivers/cpuidle/cpuidle-big_little.c
+1
-7
include/linux/perf_event.h
include/linux/perf_event.h
+5
-0
kernel/events/core.c
kernel/events/core.c
+7
-1
No files found.
arch/arm/common/mcpm_entry.c
View file @
27a513ca
...
@@ -20,6 +20,126 @@
...
@@ -20,6 +20,126 @@
#include <asm/cputype.h>
#include <asm/cputype.h>
#include <asm/suspend.h>
#include <asm/suspend.h>
/*
* The public API for this code is documented in arch/arm/include/asm/mcpm.h.
* For a comprehensive description of the main algorithm used here, please
* see Documentation/arm/cluster-pm-race-avoidance.txt.
*/
struct
sync_struct
mcpm_sync
;
/*
* __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
* This must be called at the point of committing to teardown of a CPU.
* The CPU cache (SCTRL.C bit) is expected to still be active.
*/
static
void
__mcpm_cpu_going_down
(
unsigned
int
cpu
,
unsigned
int
cluster
)
{
mcpm_sync
.
clusters
[
cluster
].
cpus
[
cpu
].
cpu
=
CPU_GOING_DOWN
;
sync_cache_w
(
&
mcpm_sync
.
clusters
[
cluster
].
cpus
[
cpu
].
cpu
);
}
/*
* __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
* cluster can be torn down without disrupting this CPU.
* To avoid deadlocks, this must be called before a CPU is powered down.
* The CPU cache (SCTRL.C bit) is expected to be off.
* However L2 cache might or might not be active.
*/
static
void
__mcpm_cpu_down
(
unsigned
int
cpu
,
unsigned
int
cluster
)
{
dmb
();
mcpm_sync
.
clusters
[
cluster
].
cpus
[
cpu
].
cpu
=
CPU_DOWN
;
sync_cache_w
(
&
mcpm_sync
.
clusters
[
cluster
].
cpus
[
cpu
].
cpu
);
sev
();
}
/*
* __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
* @state: the final state of the cluster:
* CLUSTER_UP: no destructive teardown was done and the cluster has been
* restored to the previous state (CPU cache still active); or
* CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
* (CPU cache disabled, L2 cache either enabled or disabled).
*/
static
void
__mcpm_outbound_leave_critical
(
unsigned
int
cluster
,
int
state
)
{
dmb
();
mcpm_sync
.
clusters
[
cluster
].
cluster
=
state
;
sync_cache_w
(
&
mcpm_sync
.
clusters
[
cluster
].
cluster
);
sev
();
}
/*
* __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
* This function should be called by the last man, after local CPU teardown
* is complete. CPU cache expected to be active.
*
* Returns:
* false: the critical section was not entered because an inbound CPU was
* observed, or the cluster is already being set up;
* true: the critical section was entered: it is now safe to tear down the
* cluster.
*/
static
bool
__mcpm_outbound_enter_critical
(
unsigned
int
cpu
,
unsigned
int
cluster
)
{
unsigned
int
i
;
struct
mcpm_sync_struct
*
c
=
&
mcpm_sync
.
clusters
[
cluster
];
/* Warn inbound CPUs that the cluster is being torn down: */
c
->
cluster
=
CLUSTER_GOING_DOWN
;
sync_cache_w
(
&
c
->
cluster
);
/* Back out if the inbound cluster is already in the critical region: */
sync_cache_r
(
&
c
->
inbound
);
if
(
c
->
inbound
==
INBOUND_COMING_UP
)
goto
abort
;
/*
* Wait for all CPUs to get out of the GOING_DOWN state, so that local
* teardown is complete on each CPU before tearing down the cluster.
*
* If any CPU has been woken up again from the DOWN state, then we
* shouldn't be taking the cluster down at all: abort in that case.
*/
sync_cache_r
(
&
c
->
cpus
);
for
(
i
=
0
;
i
<
MAX_CPUS_PER_CLUSTER
;
i
++
)
{
int
cpustate
;
if
(
i
==
cpu
)
continue
;
while
(
1
)
{
cpustate
=
c
->
cpus
[
i
].
cpu
;
if
(
cpustate
!=
CPU_GOING_DOWN
)
break
;
wfe
();
sync_cache_r
(
&
c
->
cpus
[
i
].
cpu
);
}
switch
(
cpustate
)
{
case
CPU_DOWN
:
continue
;
default:
goto
abort
;
}
}
return
true
;
abort:
__mcpm_outbound_leave_critical
(
cluster
,
CLUSTER_UP
);
return
false
;
}
static
int
__mcpm_cluster_state
(
unsigned
int
cluster
)
{
sync_cache_r
(
&
mcpm_sync
.
clusters
[
cluster
].
cluster
);
return
mcpm_sync
.
clusters
[
cluster
].
cluster
;
}
extern
unsigned
long
mcpm_entry_vectors
[
MAX_NR_CLUSTERS
][
MAX_CPUS_PER_CLUSTER
];
extern
unsigned
long
mcpm_entry_vectors
[
MAX_NR_CLUSTERS
][
MAX_CPUS_PER_CLUSTER
];
void
mcpm_set_entry_vector
(
unsigned
cpu
,
unsigned
cluster
,
void
*
ptr
)
void
mcpm_set_entry_vector
(
unsigned
cpu
,
unsigned
cluster
,
void
*
ptr
)
...
@@ -78,16 +198,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
...
@@ -78,16 +198,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
bool
cpu_is_down
,
cluster_is_down
;
bool
cpu_is_down
,
cluster_is_down
;
int
ret
=
0
;
int
ret
=
0
;
pr_debug
(
"%s: cpu %u cluster %u
\n
"
,
__func__
,
cpu
,
cluster
);
if
(
!
platform_ops
)
if
(
!
platform_ops
)
return
-
EUNATCH
;
/* try not to shadow power_up errors */
return
-
EUNATCH
;
/* try not to shadow power_up errors */
might_sleep
();
might_sleep
();
/* backward compatibility callback */
if
(
platform_ops
->
power_up
)
return
platform_ops
->
power_up
(
cpu
,
cluster
);
pr_debug
(
"%s: cpu %u cluster %u
\n
"
,
__func__
,
cpu
,
cluster
);
/*
/*
* Since this is called with IRQs enabled, and no arch_spin_lock_irq
* Since this is called with IRQs enabled, and no arch_spin_lock_irq
* variant exists, we need to disable IRQs manually here.
* variant exists, we need to disable IRQs manually here.
...
@@ -128,29 +243,17 @@ void mcpm_cpu_power_down(void)
...
@@ -128,29 +243,17 @@ void mcpm_cpu_power_down(void)
bool
cpu_going_down
,
last_man
;
bool
cpu_going_down
,
last_man
;
phys_reset_t
phys_reset
;
phys_reset_t
phys_reset
;
mpidr
=
read_cpuid_mpidr
();
cpu
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
0
);
cluster
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
1
);
pr_debug
(
"%s: cpu %u cluster %u
\n
"
,
__func__
,
cpu
,
cluster
);
if
(
WARN_ON_ONCE
(
!
platform_ops
))
if
(
WARN_ON_ONCE
(
!
platform_ops
))
return
;
return
;
BUG_ON
(
!
irqs_disabled
());
BUG_ON
(
!
irqs_disabled
());
/*
* Do this before calling into the power_down method,
* as it might not always be safe to do afterwards.
*/
setup_mm_for_reboot
();
setup_mm_for_reboot
();
/* backward compatibility callback */
if
(
platform_ops
->
power_down
)
{
platform_ops
->
power_down
();
goto
not_dead
;
}
mpidr
=
read_cpuid_mpidr
();
cpu
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
0
);
cluster
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
1
);
pr_debug
(
"%s: cpu %u cluster %u
\n
"
,
__func__
,
cpu
,
cluster
);
__mcpm_cpu_going_down
(
cpu
,
cluster
);
__mcpm_cpu_going_down
(
cpu
,
cluster
);
arch_spin_lock
(
&
mcpm_lock
);
arch_spin_lock
(
&
mcpm_lock
);
BUG_ON
(
__mcpm_cluster_state
(
cluster
)
!=
CLUSTER_UP
);
BUG_ON
(
__mcpm_cluster_state
(
cluster
)
!=
CLUSTER_UP
);
...
@@ -187,7 +290,6 @@ void mcpm_cpu_power_down(void)
...
@@ -187,7 +290,6 @@ void mcpm_cpu_power_down(void)
if
(
cpu_going_down
)
if
(
cpu_going_down
)
wfi
();
wfi
();
not_dead:
/*
/*
* It is possible for a power_up request to happen concurrently
* It is possible for a power_up request to happen concurrently
* with a power_down request for the same CPU. In this case the
* with a power_down request for the same CPU. In this case the
...
@@ -219,22 +321,11 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
...
@@ -219,22 +321,11 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
return
ret
;
return
ret
;
}
}
void
mcpm_cpu_suspend
(
u64
expected_residency
)
void
mcpm_cpu_suspend
(
void
)
{
{
if
(
WARN_ON_ONCE
(
!
platform_ops
))
if
(
WARN_ON_ONCE
(
!
platform_ops
))
return
;
return
;
/* backward compatibility callback */
if
(
platform_ops
->
suspend
)
{
phys_reset_t
phys_reset
;
BUG_ON
(
!
irqs_disabled
());
setup_mm_for_reboot
();
platform_ops
->
suspend
(
expected_residency
);
phys_reset
=
(
phys_reset_t
)(
unsigned
long
)
virt_to_phys
(
cpu_reset
);
phys_reset
(
virt_to_phys
(
mcpm_entry_point
));
BUG
();
}
/* Some platforms might have to enable special resume modes, etc. */
/* Some platforms might have to enable special resume modes, etc. */
if
(
platform_ops
->
cpu_suspend_prepare
)
{
if
(
platform_ops
->
cpu_suspend_prepare
)
{
unsigned
int
mpidr
=
read_cpuid_mpidr
();
unsigned
int
mpidr
=
read_cpuid_mpidr
();
...
@@ -256,12 +347,6 @@ int mcpm_cpu_powered_up(void)
...
@@ -256,12 +347,6 @@ int mcpm_cpu_powered_up(void)
if
(
!
platform_ops
)
if
(
!
platform_ops
)
return
-
EUNATCH
;
return
-
EUNATCH
;
/* backward compatibility callback */
if
(
platform_ops
->
powered_up
)
{
platform_ops
->
powered_up
();
return
0
;
}
mpidr
=
read_cpuid_mpidr
();
mpidr
=
read_cpuid_mpidr
();
cpu
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
0
);
cpu
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
0
);
cluster
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
1
);
cluster
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
1
);
...
@@ -334,120 +419,6 @@ int __init mcpm_loopback(void (*cache_disable)(void))
...
@@ -334,120 +419,6 @@ int __init mcpm_loopback(void (*cache_disable)(void))
#endif
#endif
struct
sync_struct
mcpm_sync
;
/*
* __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
* This must be called at the point of committing to teardown of a CPU.
* The CPU cache (SCTRL.C bit) is expected to still be active.
*/
void
__mcpm_cpu_going_down
(
unsigned
int
cpu
,
unsigned
int
cluster
)
{
mcpm_sync
.
clusters
[
cluster
].
cpus
[
cpu
].
cpu
=
CPU_GOING_DOWN
;
sync_cache_w
(
&
mcpm_sync
.
clusters
[
cluster
].
cpus
[
cpu
].
cpu
);
}
/*
* __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
* cluster can be torn down without disrupting this CPU.
* To avoid deadlocks, this must be called before a CPU is powered down.
* The CPU cache (SCTRL.C bit) is expected to be off.
* However L2 cache might or might not be active.
*/
void
__mcpm_cpu_down
(
unsigned
int
cpu
,
unsigned
int
cluster
)
{
dmb
();
mcpm_sync
.
clusters
[
cluster
].
cpus
[
cpu
].
cpu
=
CPU_DOWN
;
sync_cache_w
(
&
mcpm_sync
.
clusters
[
cluster
].
cpus
[
cpu
].
cpu
);
sev
();
}
/*
* __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
* @state: the final state of the cluster:
* CLUSTER_UP: no destructive teardown was done and the cluster has been
* restored to the previous state (CPU cache still active); or
* CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
* (CPU cache disabled, L2 cache either enabled or disabled).
*/
void
__mcpm_outbound_leave_critical
(
unsigned
int
cluster
,
int
state
)
{
dmb
();
mcpm_sync
.
clusters
[
cluster
].
cluster
=
state
;
sync_cache_w
(
&
mcpm_sync
.
clusters
[
cluster
].
cluster
);
sev
();
}
/*
* __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
* This function should be called by the last man, after local CPU teardown
* is complete. CPU cache expected to be active.
*
* Returns:
* false: the critical section was not entered because an inbound CPU was
* observed, or the cluster is already being set up;
* true: the critical section was entered: it is now safe to tear down the
* cluster.
*/
bool
__mcpm_outbound_enter_critical
(
unsigned
int
cpu
,
unsigned
int
cluster
)
{
unsigned
int
i
;
struct
mcpm_sync_struct
*
c
=
&
mcpm_sync
.
clusters
[
cluster
];
/* Warn inbound CPUs that the cluster is being torn down: */
c
->
cluster
=
CLUSTER_GOING_DOWN
;
sync_cache_w
(
&
c
->
cluster
);
/* Back out if the inbound cluster is already in the critical region: */
sync_cache_r
(
&
c
->
inbound
);
if
(
c
->
inbound
==
INBOUND_COMING_UP
)
goto
abort
;
/*
* Wait for all CPUs to get out of the GOING_DOWN state, so that local
* teardown is complete on each CPU before tearing down the cluster.
*
* If any CPU has been woken up again from the DOWN state, then we
* shouldn't be taking the cluster down at all: abort in that case.
*/
sync_cache_r
(
&
c
->
cpus
);
for
(
i
=
0
;
i
<
MAX_CPUS_PER_CLUSTER
;
i
++
)
{
int
cpustate
;
if
(
i
==
cpu
)
continue
;
while
(
1
)
{
cpustate
=
c
->
cpus
[
i
].
cpu
;
if
(
cpustate
!=
CPU_GOING_DOWN
)
break
;
wfe
();
sync_cache_r
(
&
c
->
cpus
[
i
].
cpu
);
}
switch
(
cpustate
)
{
case
CPU_DOWN
:
continue
;
default:
goto
abort
;
}
}
return
true
;
abort:
__mcpm_outbound_leave_critical
(
cluster
,
CLUSTER_UP
);
return
false
;
}
int
__mcpm_cluster_state
(
unsigned
int
cluster
)
{
sync_cache_r
(
&
mcpm_sync
.
clusters
[
cluster
].
cluster
);
return
mcpm_sync
.
clusters
[
cluster
].
cluster
;
}
extern
unsigned
long
mcpm_power_up_setup_phys
;
extern
unsigned
long
mcpm_power_up_setup_phys
;
int
__init
mcpm_sync_init
(
int
__init
mcpm_sync_init
(
...
...
arch/arm/include/asm/mcpm.h
View file @
27a513ca
...
@@ -137,17 +137,12 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
...
@@ -137,17 +137,12 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
/**
/**
* mcpm_cpu_suspend - bring the calling CPU in a suspended state
* mcpm_cpu_suspend - bring the calling CPU in a suspended state
*
*
* @expected_residency: duration in microseconds the CPU is expected
* The calling CPU is suspended. This is similar to mcpm_cpu_power_down()
* to remain suspended, or 0 if unknown/infinity.
* except for possible extra platform specific configuration steps to allow
*
* an asynchronous wake-up e.g. with a pending interrupt.
* The calling CPU is suspended. The expected residency argument is used
* as a hint by the platform specific backend to implement the appropriate
* sleep state level according to the knowledge it has on wake-up latency
* for the given hardware.
*
*
* If this CPU is found to be the "last man standing" in the cluster
* If this CPU is found to be the "last man standing" in the cluster
* then the cluster may be prepared for power-down too, if the expected
* then the cluster may be prepared for power-down too.
* residency makes it worthwhile.
*
*
* This must be called with interrupts disabled.
* This must be called with interrupts disabled.
*
*
...
@@ -157,7 +152,7 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
...
@@ -157,7 +152,7 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
* This will return if mcpm_platform_register() has not been called
* This will return if mcpm_platform_register() has not been called
* previously in which case the caller should take appropriate action.
* previously in which case the caller should take appropriate action.
*/
*/
void
mcpm_cpu_suspend
(
u64
expected_residency
);
void
mcpm_cpu_suspend
(
void
);
/**
/**
* mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
* mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
...
@@ -234,12 +229,6 @@ struct mcpm_platform_ops {
...
@@ -234,12 +229,6 @@ struct mcpm_platform_ops {
void
(
*
cpu_is_up
)(
unsigned
int
cpu
,
unsigned
int
cluster
);
void
(
*
cpu_is_up
)(
unsigned
int
cpu
,
unsigned
int
cluster
);
void
(
*
cluster_is_up
)(
unsigned
int
cluster
);
void
(
*
cluster_is_up
)(
unsigned
int
cluster
);
int
(
*
wait_for_powerdown
)(
unsigned
int
cpu
,
unsigned
int
cluster
);
int
(
*
wait_for_powerdown
)(
unsigned
int
cpu
,
unsigned
int
cluster
);
/* deprecated callbacks */
int
(
*
power_up
)(
unsigned
int
cpu
,
unsigned
int
cluster
);
void
(
*
power_down
)(
void
);
void
(
*
suspend
)(
u64
);
void
(
*
powered_up
)(
void
);
};
};
/**
/**
...
@@ -251,35 +240,6 @@ struct mcpm_platform_ops {
...
@@ -251,35 +240,6 @@ struct mcpm_platform_ops {
*/
*/
int
__init
mcpm_platform_register
(
const
struct
mcpm_platform_ops
*
ops
);
int
__init
mcpm_platform_register
(
const
struct
mcpm_platform_ops
*
ops
);
/* Synchronisation structures for coordinating safe cluster setup/teardown: */
/*
* When modifying this structure, make sure you update the MCPM_SYNC_ defines
* to match.
*/
struct
mcpm_sync_struct
{
/* individual CPU states */
struct
{
s8
cpu
__aligned
(
__CACHE_WRITEBACK_GRANULE
);
}
cpus
[
MAX_CPUS_PER_CLUSTER
];
/* cluster state */
s8
cluster
__aligned
(
__CACHE_WRITEBACK_GRANULE
);
/* inbound-side state */
s8
inbound
__aligned
(
__CACHE_WRITEBACK_GRANULE
);
};
struct
sync_struct
{
struct
mcpm_sync_struct
clusters
[
MAX_NR_CLUSTERS
];
};
void
__mcpm_cpu_going_down
(
unsigned
int
cpu
,
unsigned
int
cluster
);
void
__mcpm_cpu_down
(
unsigned
int
cpu
,
unsigned
int
cluster
);
void
__mcpm_outbound_leave_critical
(
unsigned
int
cluster
,
int
state
);
bool
__mcpm_outbound_enter_critical
(
unsigned
int
this_cpu
,
unsigned
int
cluster
);
int
__mcpm_cluster_state
(
unsigned
int
cluster
);
/**
/**
* mcpm_sync_init - Initialize the cluster synchronization support
* mcpm_sync_init - Initialize the cluster synchronization support
*
*
...
@@ -318,6 +278,29 @@ int __init mcpm_loopback(void (*cache_disable)(void));
...
@@ -318,6 +278,29 @@ int __init mcpm_loopback(void (*cache_disable)(void));
void
__init
mcpm_smp_set_ops
(
void
);
void
__init
mcpm_smp_set_ops
(
void
);
/*
* Synchronisation structures for coordinating safe cluster setup/teardown.
* This is private to the MCPM core code and shared between C and assembly.
* When modifying this structure, make sure you update the MCPM_SYNC_ defines
* to match.
*/
struct
mcpm_sync_struct
{
/* individual CPU states */
struct
{
s8
cpu
__aligned
(
__CACHE_WRITEBACK_GRANULE
);
}
cpus
[
MAX_CPUS_PER_CLUSTER
];
/* cluster state */
s8
cluster
__aligned
(
__CACHE_WRITEBACK_GRANULE
);
/* inbound-side state */
s8
inbound
__aligned
(
__CACHE_WRITEBACK_GRANULE
);
};
struct
sync_struct
{
struct
mcpm_sync_struct
clusters
[
MAX_NR_CLUSTERS
];
};
#else
#else
/*
/*
...
...
arch/arm/include/asm/perf_event.h
View file @
27a513ca
...
@@ -19,4 +19,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
...
@@ -19,4 +19,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)
#define perf_misc_flags(regs) perf_misc_flags(regs)
#endif
#endif
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->ARM_pc = (__ip); \
(regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \
(regs)->ARM_sp = current_stack_pointer; \
(regs)->ARM_cpsr = SVC_MODE; \
}
#endif
/* __ARM_PERF_EVENT_H__ */
#endif
/* __ARM_PERF_EVENT_H__ */
arch/arm/include/asm/pmu.h
View file @
27a513ca
...
@@ -24,22 +24,10 @@
...
@@ -24,22 +24,10 @@
* interrupt and passed the address of the low level handler,
* interrupt and passed the address of the low level handler,
* and can be used to implement any platform specific handling
* and can be used to implement any platform specific handling
* before or after calling it.
* before or after calling it.
* @runtime_resume: an optional handler which will be called by the
* runtime PM framework following a call to pm_runtime_get().
* Note that if pm_runtime_get() is called more than once in
* succession this handler will only be called once.
* @runtime_suspend: an optional handler which will be called by the
* runtime PM framework following a call to pm_runtime_put().
* Note that if pm_runtime_get() is called more than once in
* succession this handler will only be called following the
* final call to pm_runtime_put() that actually disables the
* hardware.
*/
*/
struct
arm_pmu_platdata
{
struct
arm_pmu_platdata
{
irqreturn_t
(
*
handle_irq
)(
int
irq
,
void
*
dev
,
irqreturn_t
(
*
handle_irq
)(
int
irq
,
void
*
dev
,
irq_handler_t
pmu_handler
);
irq_handler_t
pmu_handler
);
int
(
*
runtime_resume
)(
struct
device
*
dev
);
int
(
*
runtime_suspend
)(
struct
device
*
dev
);
};
};
#ifdef CONFIG_HW_PERF_EVENTS
#ifdef CONFIG_HW_PERF_EVENTS
...
@@ -92,6 +80,7 @@ struct pmu_hw_events {
...
@@ -92,6 +80,7 @@ struct pmu_hw_events {
struct
arm_pmu
{
struct
arm_pmu
{
struct
pmu
pmu
;
struct
pmu
pmu
;
cpumask_t
active_irqs
;
cpumask_t
active_irqs
;
cpumask_t
supported_cpus
;
int
*
irq_affinity
;
int
*
irq_affinity
;
char
*
name
;
char
*
name
;
irqreturn_t
(
*
handle_irq
)(
int
irq_num
,
void
*
dev
);
irqreturn_t
(
*
handle_irq
)(
int
irq_num
,
void
*
dev
);
...
@@ -122,8 +111,6 @@ struct arm_pmu {
...
@@ -122,8 +111,6 @@ struct arm_pmu {
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
extern
const
struct
dev_pm_ops
armpmu_dev_pm_ops
;
int
armpmu_register
(
struct
arm_pmu
*
armpmu
,
int
type
);
int
armpmu_register
(
struct
arm_pmu
*
armpmu
,
int
type
);
u64
armpmu_event_update
(
struct
perf_event
*
event
);
u64
armpmu_event_update
(
struct
perf_event
*
event
);
...
@@ -158,6 +145,10 @@ struct pmu_probe_info {
...
@@ -158,6 +145,10 @@ struct pmu_probe_info {
#define XSCALE_PMU_PROBE(_version, _fn) \
#define XSCALE_PMU_PROBE(_version, _fn) \
PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
int
arm_pmu_device_probe
(
struct
platform_device
*
pdev
,
const
struct
of_device_id
*
of_table
,
const
struct
pmu_probe_info
*
probe_table
);
#endif
/* CONFIG_HW_PERF_EVENTS */
#endif
/* CONFIG_HW_PERF_EVENTS */
#endif
/* __ARM_PMU_H__ */
#endif
/* __ARM_PMU_H__ */
arch/arm/kernel/Makefile
View file @
27a513ca
...
@@ -71,7 +71,9 @@ obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
...
@@ -71,7 +71,9 @@ obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_CPU_PJ4B)
+=
pj4-cp0.o
obj-$(CONFIG_CPU_PJ4B)
+=
pj4-cp0.o
obj-$(CONFIG_IWMMXT)
+=
iwmmxt.o
obj-$(CONFIG_IWMMXT)
+=
iwmmxt.o
obj-$(CONFIG_PERF_EVENTS)
+=
perf_regs.o perf_callchain.o
obj-$(CONFIG_PERF_EVENTS)
+=
perf_regs.o perf_callchain.o
obj-$(CONFIG_HW_PERF_EVENTS)
+=
perf_event.o perf_event_cpu.o
obj-$(CONFIG_HW_PERF_EVENTS)
+=
perf_event.o
\
perf_event_xscale.o perf_event_v6.o
\
perf_event_v7.o
CFLAGS_pj4-cp0.o
:=
-marm
CFLAGS_pj4-cp0.o
:=
-marm
AFLAGS_iwmmxt.o
:=
-Wa
,-mcpu
=
iwmmxt
AFLAGS_iwmmxt.o
:=
-Wa
,-mcpu
=
iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY)
+=
topology.o
obj-$(CONFIG_ARM_CPU_TOPOLOGY)
+=
topology.o
...
...
arch/arm/kernel/perf_event.c
View file @
27a513ca
...
@@ -11,12 +11,18 @@
...
@@ -11,12 +11,18 @@
*/
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
#define pr_fmt(fmt) "hw perfevents: " fmt
#include <linux/bitmap.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/irqdesc.h>
#include <asm/cputype.h>
#include <asm/irq_regs.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
#include <asm/pmu.h>
...
@@ -229,6 +235,10 @@ armpmu_add(struct perf_event *event, int flags)
...
@@ -229,6 +235,10 @@ armpmu_add(struct perf_event *event, int flags)
int
idx
;
int
idx
;
int
err
=
0
;
int
err
=
0
;
/* An event following a process won't be stopped earlier */
if
(
!
cpumask_test_cpu
(
smp_processor_id
(),
&
armpmu
->
supported_cpus
))
return
-
ENOENT
;
perf_pmu_disable
(
event
->
pmu
);
perf_pmu_disable
(
event
->
pmu
);
/* If we don't have a space for the counter then finish early. */
/* If we don't have a space for the counter then finish early. */
...
@@ -344,20 +354,12 @@ static void
...
@@ -344,20 +354,12 @@ static void
armpmu_release_hardware
(
struct
arm_pmu
*
armpmu
)
armpmu_release_hardware
(
struct
arm_pmu
*
armpmu
)
{
{
armpmu
->
free_irq
(
armpmu
);
armpmu
->
free_irq
(
armpmu
);
pm_runtime_put_sync
(
&
armpmu
->
plat_device
->
dev
);
}
}
static
int
static
int
armpmu_reserve_hardware
(
struct
arm_pmu
*
armpmu
)
armpmu_reserve_hardware
(
struct
arm_pmu
*
armpmu
)
{
{
int
err
;
int
err
=
armpmu
->
request_irq
(
armpmu
,
armpmu_dispatch_irq
);
struct
platform_device
*
pmu_device
=
armpmu
->
plat_device
;
if
(
!
pmu_device
)
return
-
ENODEV
;
pm_runtime_get_sync
(
&
pmu_device
->
dev
);
err
=
armpmu
->
request_irq
(
armpmu
,
armpmu_dispatch_irq
);
if
(
err
)
{
if
(
err
)
{
armpmu_release_hardware
(
armpmu
);
armpmu_release_hardware
(
armpmu
);
return
err
;
return
err
;
...
@@ -454,6 +456,17 @@ static int armpmu_event_init(struct perf_event *event)
...
@@ -454,6 +456,17 @@ static int armpmu_event_init(struct perf_event *event)
int
err
=
0
;
int
err
=
0
;
atomic_t
*
active_events
=
&
armpmu
->
active_events
;
atomic_t
*
active_events
=
&
armpmu
->
active_events
;
/*
* Reject CPU-affine events for CPUs that are of a different class to
* that which this PMU handles. Process-following events (where
* event->cpu == -1) can be migrated between CPUs, and thus we have to
* reject them later (in armpmu_add) if they're scheduled on a
* different class of CPU.
*/
if
(
event
->
cpu
!=
-
1
&&
!
cpumask_test_cpu
(
event
->
cpu
,
&
armpmu
->
supported_cpus
))
return
-
ENOENT
;
/* does not support taken branch sampling */
/* does not support taken branch sampling */
if
(
has_branch_stack
(
event
))
if
(
has_branch_stack
(
event
))
return
-
EOPNOTSUPP
;
return
-
EOPNOTSUPP
;
...
@@ -489,6 +502,10 @@ static void armpmu_enable(struct pmu *pmu)
...
@@ -489,6 +502,10 @@ static void armpmu_enable(struct pmu *pmu)
struct
pmu_hw_events
*
hw_events
=
this_cpu_ptr
(
armpmu
->
hw_events
);
struct
pmu_hw_events
*
hw_events
=
this_cpu_ptr
(
armpmu
->
hw_events
);
int
enabled
=
bitmap_weight
(
hw_events
->
used_mask
,
armpmu
->
num_events
);
int
enabled
=
bitmap_weight
(
hw_events
->
used_mask
,
armpmu
->
num_events
);
/* For task-bound events we may be called on other CPUs */
if
(
!
cpumask_test_cpu
(
smp_processor_id
(),
&
armpmu
->
supported_cpus
))
return
;
if
(
enabled
)
if
(
enabled
)
armpmu
->
start
(
armpmu
);
armpmu
->
start
(
armpmu
);
}
}
...
@@ -496,34 +513,25 @@ static void armpmu_enable(struct pmu *pmu)
...
@@ -496,34 +513,25 @@ static void armpmu_enable(struct pmu *pmu)
static
void
armpmu_disable
(
struct
pmu
*
pmu
)
static
void
armpmu_disable
(
struct
pmu
*
pmu
)
{
{
struct
arm_pmu
*
armpmu
=
to_arm_pmu
(
pmu
);
struct
arm_pmu
*
armpmu
=
to_arm_pmu
(
pmu
);
armpmu
->
stop
(
armpmu
);
}
#ifdef CONFIG_PM
/* For task-bound events we may be called on other CPUs */
static
int
armpmu_runtime_resume
(
struct
device
*
dev
)
if
(
!
cpumask_test_cpu
(
smp_processor_id
(),
&
armpmu
->
supported_cpus
))
{
return
;
struct
arm_pmu_platdata
*
plat
=
dev_get_platdata
(
dev
);
if
(
plat
&&
plat
->
runtime_resume
)
armpmu
->
stop
(
armpmu
);
return
plat
->
runtime_resume
(
dev
);
return
0
;
}
}
static
int
armpmu_runtime_suspend
(
struct
device
*
dev
)
/*
* In heterogeneous systems, events are specific to a particular
* microarchitecture, and aren't suitable for another. Thus, only match CPUs of
* the same microarchitecture.
*/
static
int
armpmu_filter_match
(
struct
perf_event
*
event
)
{
{
struct
arm_pmu_platdata
*
plat
=
dev_get_platdata
(
dev
);
struct
arm_pmu
*
armpmu
=
to_arm_pmu
(
event
->
pmu
);
unsigned
int
cpu
=
smp_processor_id
();
if
(
plat
&&
plat
->
runtime_suspend
)
return
cpumask_test_cpu
(
cpu
,
&
armpmu
->
supported_cpus
);
return
plat
->
runtime_suspend
(
dev
);
return
0
;
}
}
#endif
const
struct
dev_pm_ops
armpmu_dev_pm_ops
=
{
SET_RUNTIME_PM_OPS
(
armpmu_runtime_suspend
,
armpmu_runtime_resume
,
NULL
)
};
static
void
armpmu_init
(
struct
arm_pmu
*
armpmu
)
static
void
armpmu_init
(
struct
arm_pmu
*
armpmu
)
{
{
...
@@ -539,15 +547,349 @@ static void armpmu_init(struct arm_pmu *armpmu)
...
@@ -539,15 +547,349 @@ static void armpmu_init(struct arm_pmu *armpmu)
.
start
=
armpmu_start
,
.
start
=
armpmu_start
,
.
stop
=
armpmu_stop
,
.
stop
=
armpmu_stop
,
.
read
=
armpmu_read
,
.
read
=
armpmu_read
,
.
filter_match
=
armpmu_filter_match
,
};
};
}
}
int
armpmu_register
(
struct
arm_pmu
*
armpmu
,
int
type
)
int
armpmu_register
(
struct
arm_pmu
*
armpmu
,
int
type
)
{
{
armpmu_init
(
armpmu
);
armpmu_init
(
armpmu
);
pm_runtime_enable
(
&
armpmu
->
plat_device
->
dev
);
pr_info
(
"enabled with %s PMU driver, %d counters available
\n
"
,
pr_info
(
"enabled with %s PMU driver, %d counters available
\n
"
,
armpmu
->
name
,
armpmu
->
num_events
);
armpmu
->
name
,
armpmu
->
num_events
);
return
perf_pmu_register
(
&
armpmu
->
pmu
,
armpmu
->
name
,
type
);
return
perf_pmu_register
(
&
armpmu
->
pmu
,
armpmu
->
name
,
type
);
}
}
/* Set at runtime when we know what CPU type we are. */
static
struct
arm_pmu
*
__oprofile_cpu_pmu
;
/*
* Despite the names, these two functions are CPU-specific and are used
* by the OProfile/perf code.
*/
const
char
*
perf_pmu_name
(
void
)
{
if
(
!
__oprofile_cpu_pmu
)
return
NULL
;
return
__oprofile_cpu_pmu
->
name
;
}
EXPORT_SYMBOL_GPL
(
perf_pmu_name
);
int
perf_num_counters
(
void
)
{
int
max_events
=
0
;
if
(
__oprofile_cpu_pmu
!=
NULL
)
max_events
=
__oprofile_cpu_pmu
->
num_events
;
return
max_events
;
}
EXPORT_SYMBOL_GPL
(
perf_num_counters
);
static
void
cpu_pmu_enable_percpu_irq
(
void
*
data
)
{
int
irq
=
*
(
int
*
)
data
;
enable_percpu_irq
(
irq
,
IRQ_TYPE_NONE
);
}
static
void
cpu_pmu_disable_percpu_irq
(
void
*
data
)
{
int
irq
=
*
(
int
*
)
data
;
disable_percpu_irq
(
irq
);
}
static
void
cpu_pmu_free_irq
(
struct
arm_pmu
*
cpu_pmu
)
{
int
i
,
irq
,
irqs
;
struct
platform_device
*
pmu_device
=
cpu_pmu
->
plat_device
;
struct
pmu_hw_events
__percpu
*
hw_events
=
cpu_pmu
->
hw_events
;
irqs
=
min
(
pmu_device
->
num_resources
,
num_possible_cpus
());
irq
=
platform_get_irq
(
pmu_device
,
0
);
if
(
irq
>=
0
&&
irq_is_percpu
(
irq
))
{
on_each_cpu
(
cpu_pmu_disable_percpu_irq
,
&
irq
,
1
);
free_percpu_irq
(
irq
,
&
hw_events
->
percpu_pmu
);
}
else
{
for
(
i
=
0
;
i
<
irqs
;
++
i
)
{
int
cpu
=
i
;
if
(
cpu_pmu
->
irq_affinity
)
cpu
=
cpu_pmu
->
irq_affinity
[
i
];
if
(
!
cpumask_test_and_clear_cpu
(
cpu
,
&
cpu_pmu
->
active_irqs
))
continue
;
irq
=
platform_get_irq
(
pmu_device
,
i
);
if
(
irq
>=
0
)
free_irq
(
irq
,
per_cpu_ptr
(
&
hw_events
->
percpu_pmu
,
cpu
));
}
}
}
static
int
cpu_pmu_request_irq
(
struct
arm_pmu
*
cpu_pmu
,
irq_handler_t
handler
)
{
int
i
,
err
,
irq
,
irqs
;
struct
platform_device
*
pmu_device
=
cpu_pmu
->
plat_device
;
struct
pmu_hw_events
__percpu
*
hw_events
=
cpu_pmu
->
hw_events
;
if
(
!
pmu_device
)
return
-
ENODEV
;
irqs
=
min
(
pmu_device
->
num_resources
,
num_possible_cpus
());
if
(
irqs
<
1
)
{
pr_warn_once
(
"perf/ARM: No irqs for PMU defined, sampling events not supported
\n
"
);
return
0
;
}
irq
=
platform_get_irq
(
pmu_device
,
0
);
if
(
irq
>=
0
&&
irq_is_percpu
(
irq
))
{
err
=
request_percpu_irq
(
irq
,
handler
,
"arm-pmu"
,
&
hw_events
->
percpu_pmu
);
if
(
err
)
{
pr_err
(
"unable to request IRQ%d for ARM PMU counters
\n
"
,
irq
);
return
err
;
}
on_each_cpu
(
cpu_pmu_enable_percpu_irq
,
&
irq
,
1
);
}
else
{
for
(
i
=
0
;
i
<
irqs
;
++
i
)
{
int
cpu
=
i
;
err
=
0
;
irq
=
platform_get_irq
(
pmu_device
,
i
);
if
(
irq
<
0
)
continue
;
if
(
cpu_pmu
->
irq_affinity
)
cpu
=
cpu_pmu
->
irq_affinity
[
i
];
/*
* If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
if
(
irq_set_affinity
(
irq
,
cpumask_of
(
cpu
))
&&
irqs
>
1
)
{
pr_warn
(
"unable to set irq affinity (irq=%d, cpu=%u)
\n
"
,
irq
,
cpu
);
continue
;
}
err
=
request_irq
(
irq
,
handler
,
IRQF_NOBALANCING
|
IRQF_NO_THREAD
,
"arm-pmu"
,
per_cpu_ptr
(
&
hw_events
->
percpu_pmu
,
cpu
));
if
(
err
)
{
pr_err
(
"unable to request IRQ%d for ARM PMU counters
\n
"
,
irq
);
return
err
;
}
cpumask_set_cpu
(
cpu
,
&
cpu_pmu
->
active_irqs
);
}
}
return
0
;
}
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
* junk values out of them.
*/
static
int
cpu_pmu_notify
(
struct
notifier_block
*
b
,
unsigned
long
action
,
void
*
hcpu
)
{
int
cpu
=
(
unsigned
long
)
hcpu
;
struct
arm_pmu
*
pmu
=
container_of
(
b
,
struct
arm_pmu
,
hotplug_nb
);
if
((
action
&
~
CPU_TASKS_FROZEN
)
!=
CPU_STARTING
)
return
NOTIFY_DONE
;
if
(
!
cpumask_test_cpu
(
cpu
,
&
pmu
->
supported_cpus
))
return
NOTIFY_DONE
;
if
(
pmu
->
reset
)
pmu
->
reset
(
pmu
);
else
return
NOTIFY_DONE
;
return
NOTIFY_OK
;
}
static
int
cpu_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
{
int
err
;
int
cpu
;
struct
pmu_hw_events
__percpu
*
cpu_hw_events
;
cpu_hw_events
=
alloc_percpu
(
struct
pmu_hw_events
);
if
(
!
cpu_hw_events
)
return
-
ENOMEM
;
cpu_pmu
->
hotplug_nb
.
notifier_call
=
cpu_pmu_notify
;
err
=
register_cpu_notifier
(
&
cpu_pmu
->
hotplug_nb
);
if
(
err
)
goto
out_hw_events
;
for_each_possible_cpu
(
cpu
)
{
struct
pmu_hw_events
*
events
=
per_cpu_ptr
(
cpu_hw_events
,
cpu
);
raw_spin_lock_init
(
&
events
->
pmu_lock
);
events
->
percpu_pmu
=
cpu_pmu
;
}
cpu_pmu
->
hw_events
=
cpu_hw_events
;
cpu_pmu
->
request_irq
=
cpu_pmu_request_irq
;
cpu_pmu
->
free_irq
=
cpu_pmu_free_irq
;
/* Ensure the PMU has sane values out of reset. */
if
(
cpu_pmu
->
reset
)
on_each_cpu_mask
(
&
cpu_pmu
->
supported_cpus
,
cpu_pmu
->
reset
,
cpu_pmu
,
1
);
/* If no interrupts available, set the corresponding capability flag */
if
(
!
platform_get_irq
(
cpu_pmu
->
plat_device
,
0
))
cpu_pmu
->
pmu
.
capabilities
|=
PERF_PMU_CAP_NO_INTERRUPT
;
return
0
;
out_hw_events:
free_percpu
(
cpu_hw_events
);
return
err
;
}
static
void
cpu_pmu_destroy
(
struct
arm_pmu
*
cpu_pmu
)
{
unregister_cpu_notifier
(
&
cpu_pmu
->
hotplug_nb
);
free_percpu
(
cpu_pmu
->
hw_events
);
}
/*
* CPU PMU identification and probing.
*/
static
int
probe_current_pmu
(
struct
arm_pmu
*
pmu
,
const
struct
pmu_probe_info
*
info
)
{
int
cpu
=
get_cpu
();
unsigned
int
cpuid
=
read_cpuid_id
();
int
ret
=
-
ENODEV
;
pr_info
(
"probing PMU on CPU %d
\n
"
,
cpu
);
for
(;
info
->
init
!=
NULL
;
info
++
)
{
if
((
cpuid
&
info
->
mask
)
!=
info
->
cpuid
)
continue
;
ret
=
info
->
init
(
pmu
);
break
;
}
put_cpu
();
return
ret
;
}
static
int
of_pmu_irq_cfg
(
struct
arm_pmu
*
pmu
)
{
int
i
,
irq
,
*
irqs
;
struct
platform_device
*
pdev
=
pmu
->
plat_device
;
/* Don't bother with PPIs; they're already affine */
irq
=
platform_get_irq
(
pdev
,
0
);
if
(
irq
>=
0
&&
irq_is_percpu
(
irq
))
return
0
;
irqs
=
kcalloc
(
pdev
->
num_resources
,
sizeof
(
*
irqs
),
GFP_KERNEL
);
if
(
!
irqs
)
return
-
ENOMEM
;
for
(
i
=
0
;
i
<
pdev
->
num_resources
;
++
i
)
{
struct
device_node
*
dn
;
int
cpu
;
dn
=
of_parse_phandle
(
pdev
->
dev
.
of_node
,
"interrupt-affinity"
,
i
);
if
(
!
dn
)
{
pr_warn
(
"Failed to parse %s/interrupt-affinity[%d]
\n
"
,
of_node_full_name
(
pdev
->
dev
.
of_node
),
i
);
break
;
}
for_each_possible_cpu
(
cpu
)
if
(
arch_find_n_match_cpu_physical_id
(
dn
,
cpu
,
NULL
))
break
;
of_node_put
(
dn
);
if
(
cpu
>=
nr_cpu_ids
)
{
pr_warn
(
"Failed to find logical CPU for %s
\n
"
,
dn
->
name
);
break
;
}
irqs
[
i
]
=
cpu
;
cpumask_set_cpu
(
cpu
,
&
pmu
->
supported_cpus
);
}
if
(
i
==
pdev
->
num_resources
)
{
pmu
->
irq_affinity
=
irqs
;
}
else
{
kfree
(
irqs
);
cpumask_setall
(
&
pmu
->
supported_cpus
);
}
return
0
;
}
int
arm_pmu_device_probe
(
struct
platform_device
*
pdev
,
const
struct
of_device_id
*
of_table
,
const
struct
pmu_probe_info
*
probe_table
)
{
const
struct
of_device_id
*
of_id
;
const
int
(
*
init_fn
)(
struct
arm_pmu
*
);
struct
device_node
*
node
=
pdev
->
dev
.
of_node
;
struct
arm_pmu
*
pmu
;
int
ret
=
-
ENODEV
;
pmu
=
kzalloc
(
sizeof
(
struct
arm_pmu
),
GFP_KERNEL
);
if
(
!
pmu
)
{
pr_info
(
"failed to allocate PMU device!
\n
"
);
return
-
ENOMEM
;
}
if
(
!
__oprofile_cpu_pmu
)
__oprofile_cpu_pmu
=
pmu
;
pmu
->
plat_device
=
pdev
;
if
(
node
&&
(
of_id
=
of_match_node
(
of_table
,
pdev
->
dev
.
of_node
)))
{
init_fn
=
of_id
->
data
;
ret
=
of_pmu_irq_cfg
(
pmu
);
if
(
!
ret
)
ret
=
init_fn
(
pmu
);
}
else
{
ret
=
probe_current_pmu
(
pmu
,
probe_table
);
cpumask_setall
(
&
pmu
->
supported_cpus
);
}
if
(
ret
)
{
pr_info
(
"failed to probe PMU!
\n
"
);
goto
out_free
;
}
ret
=
cpu_pmu_init
(
pmu
);
if
(
ret
)
goto
out_free
;
ret
=
armpmu_register
(
pmu
,
-
1
);
if
(
ret
)
goto
out_destroy
;
return
0
;
out_destroy:
cpu_pmu_destroy
(
pmu
);
out_free:
pr_info
(
"failed to register PMU devices!
\n
"
);
kfree
(
pmu
);
return
ret
;
}
arch/arm/kernel/perf_event_cpu.c
deleted
100644 → 0
View file @
970d96f9
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) 2012 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
*/
#define pr_fmt(fmt) "CPU PMU: " fmt
#include <linux/bitmap.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <asm/cputype.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
/* Set at runtime when we know what CPU type we are. */
static
struct
arm_pmu
*
cpu_pmu
;
/*
* Despite the names, these two functions are CPU-specific and are used
* by the OProfile/perf code.
*/
const
char
*
perf_pmu_name
(
void
)
{
if
(
!
cpu_pmu
)
return
NULL
;
return
cpu_pmu
->
name
;
}
EXPORT_SYMBOL_GPL
(
perf_pmu_name
);
int
perf_num_counters
(
void
)
{
int
max_events
=
0
;
if
(
cpu_pmu
!=
NULL
)
max_events
=
cpu_pmu
->
num_events
;
return
max_events
;
}
EXPORT_SYMBOL_GPL
(
perf_num_counters
);
/* Include the PMU-specific implementations. */
#include "perf_event_xscale.c"
#include "perf_event_v6.c"
#include "perf_event_v7.c"
static
void
cpu_pmu_enable_percpu_irq
(
void
*
data
)
{
int
irq
=
*
(
int
*
)
data
;
enable_percpu_irq
(
irq
,
IRQ_TYPE_NONE
);
}
static
void
cpu_pmu_disable_percpu_irq
(
void
*
data
)
{
int
irq
=
*
(
int
*
)
data
;
disable_percpu_irq
(
irq
);
}
static
void
cpu_pmu_free_irq
(
struct
arm_pmu
*
cpu_pmu
)
{
int
i
,
irq
,
irqs
;
struct
platform_device
*
pmu_device
=
cpu_pmu
->
plat_device
;
struct
pmu_hw_events
__percpu
*
hw_events
=
cpu_pmu
->
hw_events
;
irqs
=
min
(
pmu_device
->
num_resources
,
num_possible_cpus
());
irq
=
platform_get_irq
(
pmu_device
,
0
);
if
(
irq
>=
0
&&
irq_is_percpu
(
irq
))
{
on_each_cpu
(
cpu_pmu_disable_percpu_irq
,
&
irq
,
1
);
free_percpu_irq
(
irq
,
&
hw_events
->
percpu_pmu
);
}
else
{
for
(
i
=
0
;
i
<
irqs
;
++
i
)
{
int
cpu
=
i
;
if
(
cpu_pmu
->
irq_affinity
)
cpu
=
cpu_pmu
->
irq_affinity
[
i
];
if
(
!
cpumask_test_and_clear_cpu
(
cpu
,
&
cpu_pmu
->
active_irqs
))
continue
;
irq
=
platform_get_irq
(
pmu_device
,
i
);
if
(
irq
>=
0
)
free_irq
(
irq
,
per_cpu_ptr
(
&
hw_events
->
percpu_pmu
,
cpu
));
}
}
}
static
int
cpu_pmu_request_irq
(
struct
arm_pmu
*
cpu_pmu
,
irq_handler_t
handler
)
{
int
i
,
err
,
irq
,
irqs
;
struct
platform_device
*
pmu_device
=
cpu_pmu
->
plat_device
;
struct
pmu_hw_events
__percpu
*
hw_events
=
cpu_pmu
->
hw_events
;
if
(
!
pmu_device
)
return
-
ENODEV
;
irqs
=
min
(
pmu_device
->
num_resources
,
num_possible_cpus
());
if
(
irqs
<
1
)
{
pr_warn_once
(
"perf/ARM: No irqs for PMU defined, sampling events not supported
\n
"
);
return
0
;
}
irq
=
platform_get_irq
(
pmu_device
,
0
);
if
(
irq
>=
0
&&
irq_is_percpu
(
irq
))
{
err
=
request_percpu_irq
(
irq
,
handler
,
"arm-pmu"
,
&
hw_events
->
percpu_pmu
);
if
(
err
)
{
pr_err
(
"unable to request IRQ%d for ARM PMU counters
\n
"
,
irq
);
return
err
;
}
on_each_cpu
(
cpu_pmu_enable_percpu_irq
,
&
irq
,
1
);
}
else
{
for
(
i
=
0
;
i
<
irqs
;
++
i
)
{
int
cpu
=
i
;
err
=
0
;
irq
=
platform_get_irq
(
pmu_device
,
i
);
if
(
irq
<
0
)
continue
;
if
(
cpu_pmu
->
irq_affinity
)
cpu
=
cpu_pmu
->
irq_affinity
[
i
];
/*
* If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
if
(
irq_set_affinity
(
irq
,
cpumask_of
(
cpu
))
&&
irqs
>
1
)
{
pr_warn
(
"unable to set irq affinity (irq=%d, cpu=%u)
\n
"
,
irq
,
cpu
);
continue
;
}
err
=
request_irq
(
irq
,
handler
,
IRQF_NOBALANCING
|
IRQF_NO_THREAD
,
"arm-pmu"
,
per_cpu_ptr
(
&
hw_events
->
percpu_pmu
,
cpu
));
if
(
err
)
{
pr_err
(
"unable to request IRQ%d for ARM PMU counters
\n
"
,
irq
);
return
err
;
}
cpumask_set_cpu
(
cpu
,
&
cpu_pmu
->
active_irqs
);
}
}
return
0
;
}
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
* junk values out of them.
*/
static
int
cpu_pmu_notify
(
struct
notifier_block
*
b
,
unsigned
long
action
,
void
*
hcpu
)
{
struct
arm_pmu
*
pmu
=
container_of
(
b
,
struct
arm_pmu
,
hotplug_nb
);
if
((
action
&
~
CPU_TASKS_FROZEN
)
!=
CPU_STARTING
)
return
NOTIFY_DONE
;
if
(
pmu
->
reset
)
pmu
->
reset
(
pmu
);
else
return
NOTIFY_DONE
;
return
NOTIFY_OK
;
}
static
int
cpu_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
{
int
err
;
int
cpu
;
struct
pmu_hw_events
__percpu
*
cpu_hw_events
;
cpu_hw_events
=
alloc_percpu
(
struct
pmu_hw_events
);
if
(
!
cpu_hw_events
)
return
-
ENOMEM
;
cpu_pmu
->
hotplug_nb
.
notifier_call
=
cpu_pmu_notify
;
err
=
register_cpu_notifier
(
&
cpu_pmu
->
hotplug_nb
);
if
(
err
)
goto
out_hw_events
;
for_each_possible_cpu
(
cpu
)
{
struct
pmu_hw_events
*
events
=
per_cpu_ptr
(
cpu_hw_events
,
cpu
);
raw_spin_lock_init
(
&
events
->
pmu_lock
);
events
->
percpu_pmu
=
cpu_pmu
;
}
cpu_pmu
->
hw_events
=
cpu_hw_events
;
cpu_pmu
->
request_irq
=
cpu_pmu_request_irq
;
cpu_pmu
->
free_irq
=
cpu_pmu_free_irq
;
/* Ensure the PMU has sane values out of reset. */
if
(
cpu_pmu
->
reset
)
on_each_cpu
(
cpu_pmu
->
reset
,
cpu_pmu
,
1
);
/* If no interrupts available, set the corresponding capability flag */
if
(
!
platform_get_irq
(
cpu_pmu
->
plat_device
,
0
))
cpu_pmu
->
pmu
.
capabilities
|=
PERF_PMU_CAP_NO_INTERRUPT
;
return
0
;
out_hw_events:
free_percpu
(
cpu_hw_events
);
return
err
;
}
static
void
cpu_pmu_destroy
(
struct
arm_pmu
*
cpu_pmu
)
{
unregister_cpu_notifier
(
&
cpu_pmu
->
hotplug_nb
);
free_percpu
(
cpu_pmu
->
hw_events
);
}
/*
* PMU platform driver and devicetree bindings.
*/
static
const
struct
of_device_id
cpu_pmu_of_device_ids
[]
=
{
{.
compatible
=
"arm,cortex-a17-pmu"
,
.
data
=
armv7_a17_pmu_init
},
{.
compatible
=
"arm,cortex-a15-pmu"
,
.
data
=
armv7_a15_pmu_init
},
{.
compatible
=
"arm,cortex-a12-pmu"
,
.
data
=
armv7_a12_pmu_init
},
{.
compatible
=
"arm,cortex-a9-pmu"
,
.
data
=
armv7_a9_pmu_init
},
{.
compatible
=
"arm,cortex-a8-pmu"
,
.
data
=
armv7_a8_pmu_init
},
{.
compatible
=
"arm,cortex-a7-pmu"
,
.
data
=
armv7_a7_pmu_init
},
{.
compatible
=
"arm,cortex-a5-pmu"
,
.
data
=
armv7_a5_pmu_init
},
{.
compatible
=
"arm,arm11mpcore-pmu"
,
.
data
=
armv6mpcore_pmu_init
},
{.
compatible
=
"arm,arm1176-pmu"
,
.
data
=
armv6_1176_pmu_init
},
{.
compatible
=
"arm,arm1136-pmu"
,
.
data
=
armv6_1136_pmu_init
},
{.
compatible
=
"qcom,krait-pmu"
,
.
data
=
krait_pmu_init
},
{.
compatible
=
"qcom,scorpion-pmu"
,
.
data
=
scorpion_pmu_init
},
{.
compatible
=
"qcom,scorpion-mp-pmu"
,
.
data
=
scorpion_mp_pmu_init
},
{},
};
static
struct
platform_device_id
cpu_pmu_plat_device_ids
[]
=
{
{.
name
=
"arm-pmu"
},
{.
name
=
"armv6-pmu"
},
{.
name
=
"armv7-pmu"
},
{.
name
=
"xscale-pmu"
},
{},
};
static
const
struct
pmu_probe_info
pmu_probe_table
[]
=
{
ARM_PMU_PROBE
(
ARM_CPU_PART_ARM1136
,
armv6_1136_pmu_init
),
ARM_PMU_PROBE
(
ARM_CPU_PART_ARM1156
,
armv6_1156_pmu_init
),
ARM_PMU_PROBE
(
ARM_CPU_PART_ARM1176
,
armv6_1176_pmu_init
),
ARM_PMU_PROBE
(
ARM_CPU_PART_ARM11MPCORE
,
armv6mpcore_pmu_init
),
ARM_PMU_PROBE
(
ARM_CPU_PART_CORTEX_A8
,
armv7_a8_pmu_init
),
ARM_PMU_PROBE
(
ARM_CPU_PART_CORTEX_A9
,
armv7_a9_pmu_init
),
XSCALE_PMU_PROBE
(
ARM_CPU_XSCALE_ARCH_V1
,
xscale1pmu_init
),
XSCALE_PMU_PROBE
(
ARM_CPU_XSCALE_ARCH_V2
,
xscale2pmu_init
),
{
/* sentinel value */
}
};
/*
* CPU PMU identification and probing.
*/
static
int
probe_current_pmu
(
struct
arm_pmu
*
pmu
)
{
int
cpu
=
get_cpu
();
unsigned
int
cpuid
=
read_cpuid_id
();
int
ret
=
-
ENODEV
;
const
struct
pmu_probe_info
*
info
;
pr_info
(
"probing PMU on CPU %d
\n
"
,
cpu
);
for
(
info
=
pmu_probe_table
;
info
->
init
!=
NULL
;
info
++
)
{
if
((
cpuid
&
info
->
mask
)
!=
info
->
cpuid
)
continue
;
ret
=
info
->
init
(
pmu
);
break
;
}
put_cpu
();
return
ret
;
}
static
int
of_pmu_irq_cfg
(
struct
platform_device
*
pdev
)
{
int
i
,
irq
;
int
*
irqs
;
/* Don't bother with PPIs; they're already affine */
irq
=
platform_get_irq
(
pdev
,
0
);
if
(
irq
>=
0
&&
irq_is_percpu
(
irq
))
return
0
;
irqs
=
kcalloc
(
pdev
->
num_resources
,
sizeof
(
*
irqs
),
GFP_KERNEL
);
if
(
!
irqs
)
return
-
ENOMEM
;
for
(
i
=
0
;
i
<
pdev
->
num_resources
;
++
i
)
{
struct
device_node
*
dn
;
int
cpu
;
dn
=
of_parse_phandle
(
pdev
->
dev
.
of_node
,
"interrupt-affinity"
,
i
);
if
(
!
dn
)
{
pr_warn
(
"Failed to parse %s/interrupt-affinity[%d]
\n
"
,
of_node_full_name
(
pdev
->
dev
.
of_node
),
i
);
break
;
}
for_each_possible_cpu
(
cpu
)
if
(
arch_find_n_match_cpu_physical_id
(
dn
,
cpu
,
NULL
))
break
;
of_node_put
(
dn
);
if
(
cpu
>=
nr_cpu_ids
)
{
pr_warn
(
"Failed to find logical CPU for %s
\n
"
,
dn
->
name
);
break
;
}
irqs
[
i
]
=
cpu
;
}
if
(
i
==
pdev
->
num_resources
)
cpu_pmu
->
irq_affinity
=
irqs
;
else
kfree
(
irqs
);
return
0
;
}
static
int
cpu_pmu_device_probe
(
struct
platform_device
*
pdev
)
{
const
struct
of_device_id
*
of_id
;
const
int
(
*
init_fn
)(
struct
arm_pmu
*
);
struct
device_node
*
node
=
pdev
->
dev
.
of_node
;
struct
arm_pmu
*
pmu
;
int
ret
=
-
ENODEV
;
if
(
cpu_pmu
)
{
pr_info
(
"attempt to register multiple PMU devices!
\n
"
);
return
-
ENOSPC
;
}
pmu
=
kzalloc
(
sizeof
(
struct
arm_pmu
),
GFP_KERNEL
);
if
(
!
pmu
)
{
pr_info
(
"failed to allocate PMU device!
\n
"
);
return
-
ENOMEM
;
}
cpu_pmu
=
pmu
;
cpu_pmu
->
plat_device
=
pdev
;
if
(
node
&&
(
of_id
=
of_match_node
(
cpu_pmu_of_device_ids
,
pdev
->
dev
.
of_node
)))
{
init_fn
=
of_id
->
data
;
ret
=
of_pmu_irq_cfg
(
pdev
);
if
(
!
ret
)
ret
=
init_fn
(
pmu
);
}
else
{
ret
=
probe_current_pmu
(
pmu
);
}
if
(
ret
)
{
pr_info
(
"failed to probe PMU!
\n
"
);
goto
out_free
;
}
ret
=
cpu_pmu_init
(
cpu_pmu
);
if
(
ret
)
goto
out_free
;
ret
=
armpmu_register
(
cpu_pmu
,
-
1
);
if
(
ret
)
goto
out_destroy
;
return
0
;
out_destroy:
cpu_pmu_destroy
(
cpu_pmu
);
out_free:
pr_info
(
"failed to register PMU devices!
\n
"
);
kfree
(
pmu
);
return
ret
;
}
static
struct
platform_driver
cpu_pmu_driver
=
{
.
driver
=
{
.
name
=
"arm-pmu"
,
.
pm
=
&
armpmu_dev_pm_ops
,
.
of_match_table
=
cpu_pmu_of_device_ids
,
},
.
probe
=
cpu_pmu_device_probe
,
.
id_table
=
cpu_pmu_plat_device_ids
,
};
static
int
__init
register_pmu_driver
(
void
)
{
return
platform_driver_register
(
&
cpu_pmu_driver
);
}
device_initcall
(
register_pmu_driver
);
arch/arm/kernel/perf_event_v6.c
View file @
27a513ca
...
@@ -31,6 +31,14 @@
...
@@ -31,6 +31,14 @@
*/
*/
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
#include <asm/cputype.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
#include <linux/of.h>
#include <linux/platform_device.h>
enum
armv6_perf_types
{
enum
armv6_perf_types
{
ARMV6_PERFCTR_ICACHE_MISS
=
0x0
,
ARMV6_PERFCTR_ICACHE_MISS
=
0x0
,
ARMV6_PERFCTR_IBUF_STALL
=
0x1
,
ARMV6_PERFCTR_IBUF_STALL
=
0x1
,
...
@@ -543,24 +551,39 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
...
@@ -543,24 +551,39 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
return
0
;
return
0
;
}
}
#else
static
int
armv6_1136_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
{
return
-
ENODEV
;
}
static
int
armv6_1156_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
struct
of_device_id
armv6_pmu_of_device_ids
[]
=
{
{
{.
compatible
=
"arm,arm11mpcore-pmu"
,
.
data
=
armv6mpcore_pmu_init
},
return
-
ENODEV
;
{.
compatible
=
"arm,arm1176-pmu"
,
.
data
=
armv6_1176_pmu_init
},
}
{.
compatible
=
"arm,arm1136-pmu"
,
.
data
=
armv6_1136_pmu_init
},
{
/* sentinel value */
}
};
static
int
armv6_1176_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
const
struct
pmu_probe_info
armv6_pmu_probe_table
[]
=
{
ARM_PMU_PROBE
(
ARM_CPU_PART_ARM1136
,
armv6_1136_pmu_init
),
ARM_PMU_PROBE
(
ARM_CPU_PART_ARM1156
,
armv6_1156_pmu_init
),
ARM_PMU_PROBE
(
ARM_CPU_PART_ARM1176
,
armv6_1176_pmu_init
),
ARM_PMU_PROBE
(
ARM_CPU_PART_ARM11MPCORE
,
armv6mpcore_pmu_init
),
{
/* sentinel value */
}
};
static
int
armv6_pmu_device_probe
(
struct
platform_device
*
pdev
)
{
{
return
-
ENODEV
;
return
arm_pmu_device_probe
(
pdev
,
armv6_pmu_of_device_ids
,
armv6_pmu_probe_table
);
}
}
static
int
armv6mpcore_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
struct
platform_driver
armv6_pmu_driver
=
{
.
driver
=
{
.
name
=
"armv6-pmu"
,
.
of_match_table
=
armv6_pmu_of_device_ids
,
},
.
probe
=
armv6_pmu_device_probe
,
};
static
int
__init
register_armv6_pmu_driver
(
void
)
{
{
return
-
ENODEV
;
return
platform_driver_register
(
&
armv6_pmu_driver
)
;
}
}
device_initcall
(
register_armv6_pmu_driver
);
#endif
/* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
#endif
/* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
arch/arm/kernel/perf_event_v7.c
View file @
27a513ca
...
@@ -19,9 +19,15 @@
...
@@ -19,9 +19,15 @@
#ifdef CONFIG_CPU_V7
#ifdef CONFIG_CPU_V7
#include <asm/cp15.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
#include <asm/vfp.h>
#include <asm/vfp.h>
#include "../vfp/vfpinstr.h"
#include "../vfp/vfpinstr.h"
#include <linux/of.h>
#include <linux/platform_device.h>
/*
/*
* Common ARMv7 event types
* Common ARMv7 event types
*
*
...
@@ -1056,15 +1062,22 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
...
@@ -1056,15 +1062,22 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu
->
max_period
=
(
1LLU
<<
32
)
-
1
;
cpu_pmu
->
max_period
=
(
1LLU
<<
32
)
-
1
;
};
};
static
u32
armv7_read_num_pmnc_events
(
void
)
static
void
armv7_read_num_pmnc_events
(
void
*
info
)
{
{
u32
nb_cnt
;
int
*
nb_cnt
=
info
;
/* Read the nb of CNTx counters supported from PMNC */
/* Read the nb of CNTx counters supported from PMNC */
nb_cnt
=
(
armv7_pmnc_read
()
>>
ARMV7_PMNC_N_SHIFT
)
&
ARMV7_PMNC_N_MASK
;
*
nb_cnt
=
(
armv7_pmnc_read
()
>>
ARMV7_PMNC_N_SHIFT
)
&
ARMV7_PMNC_N_MASK
;
/* Add the CPU cycles counter and return */
/* Add the CPU cycles counter */
return
nb_cnt
+
1
;
*
nb_cnt
+=
1
;
}
static
int
armv7_probe_num_events
(
struct
arm_pmu
*
arm_pmu
)
{
return
smp_call_function_any
(
&
arm_pmu
->
supported_cpus
,
armv7_read_num_pmnc_events
,
&
arm_pmu
->
num_events
,
1
);
}
}
static
int
armv7_a8_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
int
armv7_a8_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
...
@@ -1072,8 +1085,7 @@ static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
...
@@ -1072,8 +1085,7 @@ static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
armv7pmu_init
(
cpu_pmu
);
armv7pmu_init
(
cpu_pmu
);
cpu_pmu
->
name
=
"armv7_cortex_a8"
;
cpu_pmu
->
name
=
"armv7_cortex_a8"
;
cpu_pmu
->
map_event
=
armv7_a8_map_event
;
cpu_pmu
->
map_event
=
armv7_a8_map_event
;
cpu_pmu
->
num_events
=
armv7_read_num_pmnc_events
();
return
armv7_probe_num_events
(
cpu_pmu
);
return
0
;
}
}
static
int
armv7_a9_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
int
armv7_a9_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
...
@@ -1081,8 +1093,7 @@ static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
...
@@ -1081,8 +1093,7 @@ static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
armv7pmu_init
(
cpu_pmu
);
armv7pmu_init
(
cpu_pmu
);
cpu_pmu
->
name
=
"armv7_cortex_a9"
;
cpu_pmu
->
name
=
"armv7_cortex_a9"
;
cpu_pmu
->
map_event
=
armv7_a9_map_event
;
cpu_pmu
->
map_event
=
armv7_a9_map_event
;
cpu_pmu
->
num_events
=
armv7_read_num_pmnc_events
();
return
armv7_probe_num_events
(
cpu_pmu
);
return
0
;
}
}
static
int
armv7_a5_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
int
armv7_a5_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
...
@@ -1090,8 +1101,7 @@ static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
...
@@ -1090,8 +1101,7 @@ static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
armv7pmu_init
(
cpu_pmu
);
armv7pmu_init
(
cpu_pmu
);
cpu_pmu
->
name
=
"armv7_cortex_a5"
;
cpu_pmu
->
name
=
"armv7_cortex_a5"
;
cpu_pmu
->
map_event
=
armv7_a5_map_event
;
cpu_pmu
->
map_event
=
armv7_a5_map_event
;
cpu_pmu
->
num_events
=
armv7_read_num_pmnc_events
();
return
armv7_probe_num_events
(
cpu_pmu
);
return
0
;
}
}
static
int
armv7_a15_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
int
armv7_a15_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
...
@@ -1099,9 +1109,8 @@ static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
...
@@ -1099,9 +1109,8 @@ static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
armv7pmu_init
(
cpu_pmu
);
armv7pmu_init
(
cpu_pmu
);
cpu_pmu
->
name
=
"armv7_cortex_a15"
;
cpu_pmu
->
name
=
"armv7_cortex_a15"
;
cpu_pmu
->
map_event
=
armv7_a15_map_event
;
cpu_pmu
->
map_event
=
armv7_a15_map_event
;
cpu_pmu
->
num_events
=
armv7_read_num_pmnc_events
();
cpu_pmu
->
set_event_filter
=
armv7pmu_set_event_filter
;
cpu_pmu
->
set_event_filter
=
armv7pmu_set_event_filter
;
return
0
;
return
armv7_probe_num_events
(
cpu_pmu
)
;
}
}
static
int
armv7_a7_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
int
armv7_a7_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
...
@@ -1109,9 +1118,8 @@ static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
...
@@ -1109,9 +1118,8 @@ static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
armv7pmu_init
(
cpu_pmu
);
armv7pmu_init
(
cpu_pmu
);
cpu_pmu
->
name
=
"armv7_cortex_a7"
;
cpu_pmu
->
name
=
"armv7_cortex_a7"
;
cpu_pmu
->
map_event
=
armv7_a7_map_event
;
cpu_pmu
->
map_event
=
armv7_a7_map_event
;
cpu_pmu
->
num_events
=
armv7_read_num_pmnc_events
();
cpu_pmu
->
set_event_filter
=
armv7pmu_set_event_filter
;
cpu_pmu
->
set_event_filter
=
armv7pmu_set_event_filter
;
return
0
;
return
armv7_probe_num_events
(
cpu_pmu
)
;
}
}
static
int
armv7_a12_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
int
armv7_a12_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
...
@@ -1119,16 +1127,15 @@ static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
...
@@ -1119,16 +1127,15 @@ static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
armv7pmu_init
(
cpu_pmu
);
armv7pmu_init
(
cpu_pmu
);
cpu_pmu
->
name
=
"armv7_cortex_a12"
;
cpu_pmu
->
name
=
"armv7_cortex_a12"
;
cpu_pmu
->
map_event
=
armv7_a12_map_event
;
cpu_pmu
->
map_event
=
armv7_a12_map_event
;
cpu_pmu
->
num_events
=
armv7_read_num_pmnc_events
();
cpu_pmu
->
set_event_filter
=
armv7pmu_set_event_filter
;
cpu_pmu
->
set_event_filter
=
armv7pmu_set_event_filter
;
return
0
;
return
armv7_probe_num_events
(
cpu_pmu
)
;
}
}
static
int
armv7_a17_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
int
armv7_a17_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
{
{
armv7_a12_pmu_init
(
cpu_pmu
);
int
ret
=
armv7_a12_pmu_init
(
cpu_pmu
);
cpu_pmu
->
name
=
"armv7_cortex_a17"
;
cpu_pmu
->
name
=
"armv7_cortex_a17"
;
return
0
;
return
ret
;
}
}
/*
/*
...
@@ -1508,14 +1515,13 @@ static int krait_pmu_init(struct arm_pmu *cpu_pmu)
...
@@ -1508,14 +1515,13 @@ static int krait_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu
->
map_event
=
krait_map_event_no_branch
;
cpu_pmu
->
map_event
=
krait_map_event_no_branch
;
else
else
cpu_pmu
->
map_event
=
krait_map_event
;
cpu_pmu
->
map_event
=
krait_map_event
;
cpu_pmu
->
num_events
=
armv7_read_num_pmnc_events
();
cpu_pmu
->
set_event_filter
=
armv7pmu_set_event_filter
;
cpu_pmu
->
set_event_filter
=
armv7pmu_set_event_filter
;
cpu_pmu
->
reset
=
krait_pmu_reset
;
cpu_pmu
->
reset
=
krait_pmu_reset
;
cpu_pmu
->
enable
=
krait_pmu_enable_event
;
cpu_pmu
->
enable
=
krait_pmu_enable_event
;
cpu_pmu
->
disable
=
krait_pmu_disable_event
;
cpu_pmu
->
disable
=
krait_pmu_disable_event
;
cpu_pmu
->
get_event_idx
=
krait_pmu_get_event_idx
;
cpu_pmu
->
get_event_idx
=
krait_pmu_get_event_idx
;
cpu_pmu
->
clear_event_idx
=
krait_pmu_clear_event_idx
;
cpu_pmu
->
clear_event_idx
=
krait_pmu_clear_event_idx
;
return
0
;
return
armv7_probe_num_events
(
cpu_pmu
)
;
}
}
/*
/*
...
@@ -1833,13 +1839,12 @@ static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
...
@@ -1833,13 +1839,12 @@ static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
armv7pmu_init
(
cpu_pmu
);
armv7pmu_init
(
cpu_pmu
);
cpu_pmu
->
name
=
"armv7_scorpion"
;
cpu_pmu
->
name
=
"armv7_scorpion"
;
cpu_pmu
->
map_event
=
scorpion_map_event
;
cpu_pmu
->
map_event
=
scorpion_map_event
;
cpu_pmu
->
num_events
=
armv7_read_num_pmnc_events
();
cpu_pmu
->
reset
=
scorpion_pmu_reset
;
cpu_pmu
->
reset
=
scorpion_pmu_reset
;
cpu_pmu
->
enable
=
scorpion_pmu_enable_event
;
cpu_pmu
->
enable
=
scorpion_pmu_enable_event
;
cpu_pmu
->
disable
=
scorpion_pmu_disable_event
;
cpu_pmu
->
disable
=
scorpion_pmu_disable_event
;
cpu_pmu
->
get_event_idx
=
scorpion_pmu_get_event_idx
;
cpu_pmu
->
get_event_idx
=
scorpion_pmu_get_event_idx
;
cpu_pmu
->
clear_event_idx
=
scorpion_pmu_clear_event_idx
;
cpu_pmu
->
clear_event_idx
=
scorpion_pmu_clear_event_idx
;
return
0
;
return
armv7_probe_num_events
(
cpu_pmu
)
;
}
}
static
int
scorpion_mp_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
int
scorpion_mp_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
...
@@ -1847,62 +1852,52 @@ static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
...
@@ -1847,62 +1852,52 @@ static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
armv7pmu_init
(
cpu_pmu
);
armv7pmu_init
(
cpu_pmu
);
cpu_pmu
->
name
=
"armv7_scorpion_mp"
;
cpu_pmu
->
name
=
"armv7_scorpion_mp"
;
cpu_pmu
->
map_event
=
scorpion_map_event
;
cpu_pmu
->
map_event
=
scorpion_map_event
;
cpu_pmu
->
num_events
=
armv7_read_num_pmnc_events
();
cpu_pmu
->
reset
=
scorpion_pmu_reset
;
cpu_pmu
->
reset
=
scorpion_pmu_reset
;
cpu_pmu
->
enable
=
scorpion_pmu_enable_event
;
cpu_pmu
->
enable
=
scorpion_pmu_enable_event
;
cpu_pmu
->
disable
=
scorpion_pmu_disable_event
;
cpu_pmu
->
disable
=
scorpion_pmu_disable_event
;
cpu_pmu
->
get_event_idx
=
scorpion_pmu_get_event_idx
;
cpu_pmu
->
get_event_idx
=
scorpion_pmu_get_event_idx
;
cpu_pmu
->
clear_event_idx
=
scorpion_pmu_clear_event_idx
;
cpu_pmu
->
clear_event_idx
=
scorpion_pmu_clear_event_idx
;
return
0
;
return
armv7_probe_num_events
(
cpu_pmu
);
}
}
#else
static
inline
int
armv7_a8_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
const
struct
of_device_id
armv7_pmu_of_device_ids
[]
=
{
{
{.
compatible
=
"arm,cortex-a17-pmu"
,
.
data
=
armv7_a17_pmu_init
},
return
-
ENODEV
;
{.
compatible
=
"arm,cortex-a15-pmu"
,
.
data
=
armv7_a15_pmu_init
},
}
{.
compatible
=
"arm,cortex-a12-pmu"
,
.
data
=
armv7_a12_pmu_init
},
{.
compatible
=
"arm,cortex-a9-pmu"
,
.
data
=
armv7_a9_pmu_init
},
static
inline
int
armv7_a9_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
{.
compatible
=
"arm,cortex-a8-pmu"
,
.
data
=
armv7_a8_pmu_init
},
{
{.
compatible
=
"arm,cortex-a7-pmu"
,
.
data
=
armv7_a7_pmu_init
},
return
-
ENODEV
;
{.
compatible
=
"arm,cortex-a5-pmu"
,
.
data
=
armv7_a5_pmu_init
},
}
{.
compatible
=
"qcom,krait-pmu"
,
.
data
=
krait_pmu_init
},
{.
compatible
=
"qcom,scorpion-pmu"
,
.
data
=
scorpion_pmu_init
},
static
inline
int
armv7_a5_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
{.
compatible
=
"qcom,scorpion-mp-pmu"
,
.
data
=
scorpion_mp_pmu_init
},
{
{},
return
-
ENODEV
;
};
}
static
inline
int
armv7_a15_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
{
return
-
ENODEV
;
}
static
inline
int
armv7_a7_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
{
return
-
ENODEV
;
}
static
inline
int
armv7_a12_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
const
struct
pmu_probe_info
armv7_pmu_probe_table
[]
=
{
{
ARM_PMU_PROBE
(
ARM_CPU_PART_CORTEX_A8
,
armv7_a8_pmu_init
),
return
-
ENODEV
;
ARM_PMU_PROBE
(
ARM_CPU_PART_CORTEX_A9
,
armv7_a9_pmu_init
),
}
{
/* sentinel value */
}
};
static
inline
int
armv7_a17_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
{
return
-
ENODEV
;
}
static
in
line
int
krait_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
in
t
armv7_pmu_device_probe
(
struct
platform_device
*
pdev
)
{
{
return
-
ENODEV
;
return
arm_pmu_device_probe
(
pdev
,
armv7_pmu_of_device_ids
,
armv7_pmu_probe_table
);
}
}
static
inline
int
scorpion_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
struct
platform_driver
armv7_pmu_driver
=
{
{
.
driver
=
{
return
-
ENODEV
;
.
name
=
"armv7-pmu"
,
}
.
of_match_table
=
armv7_pmu_of_device_ids
,
},
.
probe
=
armv7_pmu_device_probe
,
};
static
in
line
int
scorpion_mp_pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
in
t
__init
register_armv7_pmu_driver
(
void
)
{
{
return
-
ENODEV
;
return
platform_driver_register
(
&
armv7_pmu_driver
)
;
}
}
device_initcall
(
register_armv7_pmu_driver
);
#endif
/* CONFIG_CPU_V7 */
#endif
/* CONFIG_CPU_V7 */
arch/arm/kernel/perf_event_xscale.c
View file @
27a513ca
...
@@ -13,6 +13,14 @@
...
@@ -13,6 +13,14 @@
*/
*/
#ifdef CONFIG_CPU_XSCALE
#ifdef CONFIG_CPU_XSCALE
#include <asm/cputype.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
#include <linux/of.h>
#include <linux/platform_device.h>
enum
xscale_perf_types
{
enum
xscale_perf_types
{
XSCALE_PERFCTR_ICACHE_MISS
=
0x00
,
XSCALE_PERFCTR_ICACHE_MISS
=
0x00
,
XSCALE_PERFCTR_ICACHE_NO_DELIVER
=
0x01
,
XSCALE_PERFCTR_ICACHE_NO_DELIVER
=
0x01
,
...
@@ -740,14 +748,28 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
...
@@ -740,14 +748,28 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
return
0
;
return
0
;
}
}
#else
static
inline
int
xscale1pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
const
struct
pmu_probe_info
xscale_pmu_probe_table
[]
=
{
XSCALE_PMU_PROBE
(
ARM_CPU_XSCALE_ARCH_V1
,
xscale1pmu_init
),
XSCALE_PMU_PROBE
(
ARM_CPU_XSCALE_ARCH_V2
,
xscale2pmu_init
),
{
/* sentinel value */
}
};
static
int
xscale_pmu_device_probe
(
struct
platform_device
*
pdev
)
{
{
return
-
ENODEV
;
return
arm_pmu_device_probe
(
pdev
,
NULL
,
xscale_pmu_probe_table
)
;
}
}
static
inline
int
xscale2pmu_init
(
struct
arm_pmu
*
cpu_pmu
)
static
struct
platform_driver
xscale_pmu_driver
=
{
.
driver
=
{
.
name
=
"xscale-pmu"
,
},
.
probe
=
xscale_pmu_device_probe
,
};
static
int
__init
register_xscale_pmu_driver
(
void
)
{
{
return
-
ENODEV
;
return
platform_driver_register
(
&
xscale_pmu_driver
)
;
}
}
device_initcall
(
register_xscale_pmu_driver
);
#endif
/* CONFIG_CPU_XSCALE */
#endif
/* CONFIG_CPU_XSCALE */
arch/arm/mach-exynos/suspend.c
View file @
27a513ca
...
@@ -311,13 +311,7 @@ static int exynos5420_cpu_suspend(unsigned long arg)
...
@@ -311,13 +311,7 @@ static int exynos5420_cpu_suspend(unsigned long arg)
if
(
IS_ENABLED
(
CONFIG_EXYNOS5420_MCPM
))
{
if
(
IS_ENABLED
(
CONFIG_EXYNOS5420_MCPM
))
{
mcpm_set_entry_vector
(
cpu
,
cluster
,
exynos_cpu_resume
);
mcpm_set_entry_vector
(
cpu
,
cluster
,
exynos_cpu_resume
);
mcpm_cpu_suspend
();
/*
* Residency value passed to mcpm_cpu_suspend back-end
* has to be given clear semantics. Set to 0 as a
* temporary value.
*/
mcpm_cpu_suspend
(
0
);
}
}
pr_info
(
"Failed to suspend the system
\n
"
);
pr_info
(
"Failed to suspend the system
\n
"
);
...
...
arch/arm/mach-hisi/platmcpm.c
View file @
27a513ca
...
@@ -6,6 +6,8 @@
...
@@ -6,6 +6,8 @@
* under the terms and conditions of the GNU General Public License,
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* version 2, as published by the Free Software Foundation.
*/
*/
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/memblock.h>
...
@@ -13,7 +15,9 @@
...
@@ -13,7 +15,9 @@
#include <asm/cputype.h>
#include <asm/cputype.h>
#include <asm/cp15.h>
#include <asm/cp15.h>
#include <asm/mcpm.h>
#include <asm/cacheflush.h>
#include <asm/smp.h>
#include <asm/smp_plat.h>
#include "core.h"
#include "core.h"
...
@@ -94,11 +98,16 @@ static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on)
...
@@ -94,11 +98,16 @@ static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on)
}
while
(
data
!=
readl_relaxed
(
fabric
+
FAB_SF_MODE
));
}
while
(
data
!=
readl_relaxed
(
fabric
+
FAB_SF_MODE
));
}
}
static
int
hip04_
mcpm_power_up
(
unsigned
int
cpu
,
unsigned
int
cluster
)
static
int
hip04_
boot_secondary
(
unsigned
int
l_cpu
,
struct
task_struct
*
idle
)
{
{
unsigned
int
mpidr
,
cpu
,
cluster
;
unsigned
long
data
;
unsigned
long
data
;
void
__iomem
*
sys_dreq
,
*
sys_status
;
void
__iomem
*
sys_dreq
,
*
sys_status
;
mpidr
=
cpu_logical_map
(
l_cpu
);
cpu
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
0
);
cluster
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
1
);
if
(
!
sysctrl
)
if
(
!
sysctrl
)
return
-
ENODEV
;
return
-
ENODEV
;
if
(
cluster
>=
HIP04_MAX_CLUSTERS
||
cpu
>=
HIP04_MAX_CPUS_PER_CLUSTER
)
if
(
cluster
>=
HIP04_MAX_CLUSTERS
||
cpu
>=
HIP04_MAX_CPUS_PER_CLUSTER
)
...
@@ -118,6 +127,7 @@ static int hip04_mcpm_power_up(unsigned int cpu, unsigned int cluster)
...
@@ -118,6 +127,7 @@ static int hip04_mcpm_power_up(unsigned int cpu, unsigned int cluster)
cpu_relax
();
cpu_relax
();
data
=
readl_relaxed
(
sys_status
);
data
=
readl_relaxed
(
sys_status
);
}
while
(
data
&
CLUSTER_DEBUG_RESET_STATUS
);
}
while
(
data
&
CLUSTER_DEBUG_RESET_STATUS
);
hip04_set_snoop_filter
(
cluster
,
1
);
}
}
data
=
CORE_RESET_BIT
(
cpu
)
|
NEON_RESET_BIT
(
cpu
)
|
\
data
=
CORE_RESET_BIT
(
cpu
)
|
NEON_RESET_BIT
(
cpu
)
|
\
...
@@ -126,11 +136,15 @@ static int hip04_mcpm_power_up(unsigned int cpu, unsigned int cluster)
...
@@ -126,11 +136,15 @@ static int hip04_mcpm_power_up(unsigned int cpu, unsigned int cluster)
do
{
do
{
cpu_relax
();
cpu_relax
();
}
while
(
data
==
readl_relaxed
(
sys_status
));
}
while
(
data
==
readl_relaxed
(
sys_status
));
/*
/*
* We may fail to power up core again without this delay.
* We may fail to power up core again without this delay.
* It's not mentioned in document. It's found by test.
* It's not mentioned in document. It's found by test.
*/
*/
udelay
(
20
);
udelay
(
20
);
arch_send_wakeup_ipi_mask
(
cpumask_of
(
l_cpu
));
out:
out:
hip04_cpu_table
[
cluster
][
cpu
]
++
;
hip04_cpu_table
[
cluster
][
cpu
]
++
;
spin_unlock_irq
(
&
boot_lock
);
spin_unlock_irq
(
&
boot_lock
);
...
@@ -138,31 +152,30 @@ static int hip04_mcpm_power_up(unsigned int cpu, unsigned int cluster)
...
@@ -138,31 +152,30 @@ static int hip04_mcpm_power_up(unsigned int cpu, unsigned int cluster)
return
0
;
return
0
;
}
}
static
void
hip04_mcpm_power_down
(
void
)
#ifdef CONFIG_HOTPLUG_CPU
static
void
hip04_cpu_die
(
unsigned
int
l_cpu
)
{
{
unsigned
int
mpidr
,
cpu
,
cluster
;
unsigned
int
mpidr
,
cpu
,
cluster
;
bool
skip_wfi
=
false
,
last_man
=
false
;
bool
last_man
;
mpidr
=
read_cpuid_mpidr
(
);
mpidr
=
cpu_logical_map
(
l_cpu
);
cpu
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
0
);
cpu
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
0
);
cluster
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
1
);
cluster
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
1
);
__mcpm_cpu_going_down
(
cpu
,
cluster
);
spin_lock
(
&
boot_lock
);
spin_lock
(
&
boot_lock
);
BUG_ON
(
__mcpm_cluster_state
(
cluster
)
!=
CLUSTER_UP
);
hip04_cpu_table
[
cluster
][
cpu
]
--
;
hip04_cpu_table
[
cluster
][
cpu
]
--
;
if
(
hip04_cpu_table
[
cluster
][
cpu
]
==
1
)
{
if
(
hip04_cpu_table
[
cluster
][
cpu
]
==
1
)
{
/* A power_up request went ahead of us. */
/* A power_up request went ahead of us. */
skip_wfi
=
true
;
spin_unlock
(
&
boot_lock
);
return
;
}
else
if
(
hip04_cpu_table
[
cluster
][
cpu
]
>
1
)
{
}
else
if
(
hip04_cpu_table
[
cluster
][
cpu
]
>
1
)
{
pr_err
(
"Cluster %d CPU%d boots multiple times
\n
"
,
cluster
,
cpu
);
pr_err
(
"Cluster %d CPU%d boots multiple times
\n
"
,
cluster
,
cpu
);
BUG
();
BUG
();
}
}
last_man
=
hip04_cluster_is_down
(
cluster
);
last_man
=
hip04_cluster_is_down
(
cluster
);
if
(
last_man
&&
__mcpm_outbound_enter_critical
(
cpu
,
cluster
))
{
spin_unlock
(
&
boot_lock
);
spin_unlock
(
&
boot_lock
);
if
(
last_man
)
{
/* Since it's Cortex A15, disable L2 prefetching. */
/* Since it's Cortex A15, disable L2 prefetching. */
asm
volatile
(
asm
volatile
(
"mcr p15, 1, %0, c15, c0, 3
\n\t
"
"mcr p15, 1, %0, c15, c0, 3
\n\t
"
...
@@ -170,34 +183,30 @@ static void hip04_mcpm_power_down(void)
...
@@ -170,34 +183,30 @@ static void hip04_mcpm_power_down(void)
"dsb "
"dsb "
:
:
"r"
(
0x400
)
);
:
:
"r"
(
0x400
)
);
v7_exit_coherency_flush
(
all
);
v7_exit_coherency_flush
(
all
);
hip04_set_snoop_filter
(
cluster
,
0
);
__mcpm_outbound_leave_critical
(
cluster
,
CLUSTER_DOWN
);
}
else
{
}
else
{
spin_unlock
(
&
boot_lock
);
v7_exit_coherency_flush
(
louis
);
v7_exit_coherency_flush
(
louis
);
}
}
__mcpm_cpu_down
(
cpu
,
cluster
);
for
(;;)
if
(
!
skip_wfi
)
wfi
();
wfi
();
}
}
static
int
hip04_
mcpm_wait_for_powerdown
(
unsigned
int
cpu
,
unsigned
int
cluster
)
static
int
hip04_
cpu_kill
(
unsigned
int
l_cpu
)
{
{
unsigned
int
mpidr
,
cpu
,
cluster
;
unsigned
int
data
,
tries
,
count
;
unsigned
int
data
,
tries
,
count
;
int
ret
=
-
ETIMEDOUT
;
mpidr
=
cpu_logical_map
(
l_cpu
);
cpu
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
0
);
cluster
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
1
);
BUG_ON
(
cluster
>=
HIP04_MAX_CLUSTERS
||
BUG_ON
(
cluster
>=
HIP04_MAX_CLUSTERS
||
cpu
>=
HIP04_MAX_CPUS_PER_CLUSTER
);
cpu
>=
HIP04_MAX_CPUS_PER_CLUSTER
);
count
=
TIMEOUT_MSEC
/
POLL_MSEC
;
count
=
TIMEOUT_MSEC
/
POLL_MSEC
;
spin_lock_irq
(
&
boot_lock
);
spin_lock_irq
(
&
boot_lock
);
for
(
tries
=
0
;
tries
<
count
;
tries
++
)
{
for
(
tries
=
0
;
tries
<
count
;
tries
++
)
{
if
(
hip04_cpu_table
[
cluster
][
cpu
])
{
if
(
hip04_cpu_table
[
cluster
][
cpu
])
ret
=
-
EBUSY
;
goto
err
;
goto
err
;
}
cpu_relax
();
cpu_relax
();
data
=
readl_relaxed
(
sysctrl
+
SC_CPU_RESET_STATUS
(
cluster
));
data
=
readl_relaxed
(
sysctrl
+
SC_CPU_RESET_STATUS
(
cluster
));
if
(
data
&
CORE_WFI_STATUS
(
cpu
))
if
(
data
&
CORE_WFI_STATUS
(
cpu
))
...
@@ -220,64 +229,22 @@ static int hip04_mcpm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
...
@@ -220,64 +229,22 @@ static int hip04_mcpm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
}
}
if
(
tries
>=
count
)
if
(
tries
>=
count
)
goto
err
;
goto
err
;
if
(
hip04_cluster_is_down
(
cluster
))
hip04_set_snoop_filter
(
cluster
,
0
);
spin_unlock_irq
(
&
boot_lock
);
spin_unlock_irq
(
&
boot_lock
);
return
0
;
return
1
;
err:
err:
spin_unlock_irq
(
&
boot_lock
);
spin_unlock_irq
(
&
boot_lock
);
return
ret
;
return
0
;
}
static
void
hip04_mcpm_powered_up
(
void
)
{
unsigned
int
mpidr
,
cpu
,
cluster
;
mpidr
=
read_cpuid_mpidr
();
cpu
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
0
);
cluster
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
1
);
spin_lock
(
&
boot_lock
);
if
(
!
hip04_cpu_table
[
cluster
][
cpu
])
hip04_cpu_table
[
cluster
][
cpu
]
=
1
;
spin_unlock
(
&
boot_lock
);
}
static
void
__naked
hip04_mcpm_power_up_setup
(
unsigned
int
affinity_level
)
{
asm
volatile
(
"
\n
"
" cmp r0, #0
\n
"
" bxeq lr
\n
"
/* calculate fabric phys address */
" adr r2, 2f
\n
"
" ldmia r2, {r1, r3}
\n
"
" sub r0, r2, r1
\n
"
" ldr r2, [r0, r3]
\n
"
/* get cluster id from MPIDR */
" mrc p15, 0, r0, c0, c0, 5
\n
"
" ubfx r1, r0, #8, #8
\n
"
/* 1 << cluster id */
" mov r0, #1
\n
"
" mov r3, r0, lsl r1
\n
"
" ldr r0, [r2, #"
__stringify
(
FAB_SF_MODE
)
"]
\n
"
" tst r0, r3
\n
"
" bxne lr
\n
"
" orr r1, r0, r3
\n
"
" str r1, [r2, #"
__stringify
(
FAB_SF_MODE
)
"]
\n
"
"1: ldr r0, [r2, #"
__stringify
(
FAB_SF_MODE
)
"]
\n
"
" tst r0, r3
\n
"
" beq 1b
\n
"
" bx lr
\n
"
" .align 2
\n
"
"2: .word .
\n
"
" .word fabric_phys_addr
\n
"
);
}
}
#endif
static
const
struct
mcpm_platform_ops
hip04_mcpm_ops
=
{
.
power_up
=
hip04_mcpm_power_up
,
static
struct
smp_operations
__initdata
hip04_smp_ops
=
{
.
power_down
=
hip04_mcpm_power_down
,
.
smp_boot_secondary
=
hip04_boot_secondary
,
.
wait_for_powerdown
=
hip04_mcpm_wait_for_powerdown
,
#ifdef CONFIG_HOTPLUG_CPU
.
powered_up
=
hip04_mcpm_powered_up
,
.
cpu_die
=
hip04_cpu_die
,
.
cpu_kill
=
hip04_cpu_kill
,
#endif
};
};
static
bool
__init
hip04_cpu_table_init
(
void
)
static
bool
__init
hip04_cpu_table_init
(
void
)
...
@@ -298,7 +265,7 @@ static bool __init hip04_cpu_table_init(void)
...
@@ -298,7 +265,7 @@ static bool __init hip04_cpu_table_init(void)
return
true
;
return
true
;
}
}
static
int
__init
hip04_
mcpm
_init
(
void
)
static
int
__init
hip04_
smp
_init
(
void
)
{
{
struct
device_node
*
np
,
*
np_sctl
,
*
np_fab
;
struct
device_node
*
np
,
*
np_sctl
,
*
np_fab
;
struct
resource
fab_res
;
struct
resource
fab_res
;
...
@@ -353,10 +320,6 @@ static int __init hip04_mcpm_init(void)
...
@@ -353,10 +320,6 @@ static int __init hip04_mcpm_init(void)
ret
=
-
EINVAL
;
ret
=
-
EINVAL
;
goto
err_table
;
goto
err_table
;
}
}
ret
=
mcpm_platform_register
(
&
hip04_mcpm_ops
);
if
(
ret
)
{
goto
err_table
;
}
/*
/*
* Fill the instruction address that is used after secondary core
* Fill the instruction address that is used after secondary core
...
@@ -364,13 +327,11 @@ static int __init hip04_mcpm_init(void)
...
@@ -364,13 +327,11 @@ static int __init hip04_mcpm_init(void)
*/
*/
writel_relaxed
(
hip04_boot_method
[
0
],
relocation
);
writel_relaxed
(
hip04_boot_method
[
0
],
relocation
);
writel_relaxed
(
0xa5a5a5a5
,
relocation
+
4
);
/* magic number */
writel_relaxed
(
0xa5a5a5a5
,
relocation
+
4
);
/* magic number */
writel_relaxed
(
virt_to_phys
(
mcpm_entry_point
),
relocation
+
8
);
writel_relaxed
(
virt_to_phys
(
secondary_startup
),
relocation
+
8
);
writel_relaxed
(
0
,
relocation
+
12
);
writel_relaxed
(
0
,
relocation
+
12
);
iounmap
(
relocation
);
iounmap
(
relocation
);
mcpm_sync_init
(
hip04_mcpm_power_up_setup
);
smp_set_ops
(
&
hip04_smp_ops
);
mcpm_smp_set_ops
();
pr_info
(
"HiP04 MCPM initialized
\n
"
);
return
ret
;
return
ret
;
err_table:
err_table:
iounmap
(
fabric
);
iounmap
(
fabric
);
...
@@ -383,4 +344,4 @@ static int __init hip04_mcpm_init(void)
...
@@ -383,4 +344,4 @@ static int __init hip04_mcpm_init(void)
err:
err:
return
ret
;
return
ret
;
}
}
early_initcall
(
hip04_
mcpm
_init
);
early_initcall
(
hip04_
smp
_init
);
drivers/cpuidle/cpuidle-big_little.c
View file @
27a513ca
...
@@ -108,13 +108,7 @@ static int notrace bl_powerdown_finisher(unsigned long arg)
...
@@ -108,13 +108,7 @@ static int notrace bl_powerdown_finisher(unsigned long arg)
unsigned
int
cpu
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
0
);
unsigned
int
cpu
=
MPIDR_AFFINITY_LEVEL
(
mpidr
,
0
);
mcpm_set_entry_vector
(
cpu
,
cluster
,
cpu_resume
);
mcpm_set_entry_vector
(
cpu
,
cluster
,
cpu_resume
);
mcpm_cpu_suspend
();
/*
* Residency value passed to mcpm_cpu_suspend back-end
* has to be given clear semantics. Set to 0 as a
* temporary value.
*/
mcpm_cpu_suspend
(
0
);
/* return value != 0 means failure */
/* return value != 0 means failure */
return
1
;
return
1
;
...
...
include/linux/perf_event.h
View file @
27a513ca
...
@@ -304,6 +304,11 @@ struct pmu {
...
@@ -304,6 +304,11 @@ struct pmu {
* Free pmu-private AUX data structures
* Free pmu-private AUX data structures
*/
*/
void
(
*
free_aux
)
(
void
*
aux
);
/* optional */
void
(
*
free_aux
)
(
void
*
aux
);
/* optional */
/*
* Filter events for PMU-specific reasons.
*/
int
(
*
filter_match
)
(
struct
perf_event
*
event
);
/* optional */
};
};
/**
/**
...
...
kernel/events/core.c
View file @
27a513ca
...
@@ -1506,11 +1506,17 @@ static int __init perf_workqueue_init(void)
...
@@ -1506,11 +1506,17 @@ static int __init perf_workqueue_init(void)
core_initcall
(
perf_workqueue_init
);
core_initcall
(
perf_workqueue_init
);
static
inline
int
pmu_filter_match
(
struct
perf_event
*
event
)
{
struct
pmu
*
pmu
=
event
->
pmu
;
return
pmu
->
filter_match
?
pmu
->
filter_match
(
event
)
:
1
;
}
static
inline
int
static
inline
int
event_filter_match
(
struct
perf_event
*
event
)
event_filter_match
(
struct
perf_event
*
event
)
{
{
return
(
event
->
cpu
==
-
1
||
event
->
cpu
==
smp_processor_id
())
return
(
event
->
cpu
==
-
1
||
event
->
cpu
==
smp_processor_id
())
&&
perf_cgroup_match
(
event
);
&&
perf_cgroup_match
(
event
)
&&
pmu_filter_match
(
event
)
;
}
}
static
void
static
void
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment