Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
6cbab787
Commit
6cbab787
authored
Jun 21, 2021
by
Rafael J. Wysocki
Browse files
Options
Browse Files
Download
Plain Diff
Merge back cpufreq material for v5.14.
parents
771fac5e
bcc936c5
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
243 additions
and
34 deletions
+243
-34
Documentation/admin-guide/pm/intel_pstate.rst
Documentation/admin-guide/pm/intel_pstate.rst
+6
-0
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/cpufreq_stats.c
+2
-3
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/intel_pstate.c
+234
-29
drivers/cpufreq/loongson2_cpufreq.c
drivers/cpufreq/loongson2_cpufreq.c
+0
-1
drivers/cpufreq/sc520_freq.c
drivers/cpufreq/sc520_freq.c
+1
-0
drivers/cpufreq/sh-cpufreq.c
drivers/cpufreq/sh-cpufreq.c
+0
-1
No files found.
Documentation/admin-guide/pm/intel_pstate.rst
View file @
6cbab787
...
...
@@ -365,6 +365,9 @@ argument is passed to the kernel in the command line.
inclusive) including both turbo and non-turbo P-states (see
`Turbo P-states Support`_).
This attribute is present only if the value exposed by it is the same
for all of the CPUs in the system.
The value of this attribute is not affected by the ``no_turbo``
setting described `below <no_turbo_attr_>`_.
...
...
@@ -374,6 +377,9 @@ argument is passed to the kernel in the command line.
Ratio of the `turbo range <turbo_>`_ size to the size of the entire
range of supported P-states, in percent.
This attribute is present only if the value exposed by it is the same
for all of the CPUs in the system.
This attribute is read-only.
.. _no_turbo_attr:
...
...
drivers/cpufreq/cpufreq_stats.c
View file @
6cbab787
...
...
@@ -211,7 +211,7 @@ void cpufreq_stats_free_table(struct cpufreq_policy *policy)
void
cpufreq_stats_create_table
(
struct
cpufreq_policy
*
policy
)
{
unsigned
int
i
=
0
,
count
=
0
,
ret
=
-
ENOMEM
;
unsigned
int
i
=
0
,
count
;
struct
cpufreq_stats
*
stats
;
unsigned
int
alloc_size
;
struct
cpufreq_frequency_table
*
pos
;
...
...
@@ -253,8 +253,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
stats
->
last_index
=
freq_table_get_index
(
stats
,
policy
->
cur
);
policy
->
stats
=
stats
;
ret
=
sysfs_create_group
(
&
policy
->
kobj
,
&
stats_attr_group
);
if
(
!
ret
)
if
(
!
sysfs_create_group
(
&
policy
->
kobj
,
&
stats_attr_group
))
return
;
/* We failed, release resources */
...
...
drivers/cpufreq/intel_pstate.c
View file @
6cbab787
...
...
@@ -121,9 +121,10 @@ struct sample {
* @max_pstate_physical:This is physical Max P state for a processor
* This can be higher than the max_pstate which can
* be limited by platform thermal design power limits
* @
scaling: Scaling factor to convert frequency to cpufreq
*
frequency units
* @
perf_ctl_scaling: PERF_CTL P-state to frequency scaling factor
*
@scaling: Scaling factor between performance and frequency
* @turbo_pstate: Max Turbo P state possible for this platform
* @min_freq: @min_pstate frequency in cpufreq units
* @max_freq: @max_pstate frequency in cpufreq units
* @turbo_freq: @turbo_pstate frequency in cpufreq units
*
...
...
@@ -134,8 +135,10 @@ struct pstate_data {
int
min_pstate
;
int
max_pstate
;
int
max_pstate_physical
;
int
perf_ctl_scaling
;
int
scaling
;
int
turbo_pstate
;
unsigned
int
min_freq
;
unsigned
int
max_freq
;
unsigned
int
turbo_freq
;
};
...
...
@@ -366,7 +369,7 @@ static void intel_pstate_set_itmt_prio(int cpu)
}
}
static
int
intel_pstate_get_cppc_guranteed
(
int
cpu
)
static
int
intel_pstate_get_cppc_gu
a
ranteed
(
int
cpu
)
{
struct
cppc_perf_caps
cppc_perf
;
int
ret
;
...
...
@@ -382,7 +385,7 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
}
#else
/* CONFIG_ACPI_CPPC_LIB */
static
void
intel_pstate_set_itmt_prio
(
int
cpu
)
static
inline
void
intel_pstate_set_itmt_prio
(
int
cpu
)
{
}
#endif
/* CONFIG_ACPI_CPPC_LIB */
...
...
@@ -467,6 +470,20 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
acpi_processor_unregister_performance
(
policy
->
cpu
);
}
static
bool
intel_pstate_cppc_perf_valid
(
u32
perf
,
struct
cppc_perf_caps
*
caps
)
{
return
perf
&&
perf
<=
caps
->
highest_perf
&&
perf
>=
caps
->
lowest_perf
;
}
static
bool
intel_pstate_cppc_perf_caps
(
struct
cpudata
*
cpu
,
struct
cppc_perf_caps
*
caps
)
{
if
(
cppc_get_perf_caps
(
cpu
->
cpu
,
caps
))
return
false
;
return
caps
->
highest_perf
&&
caps
->
lowest_perf
<=
caps
->
highest_perf
;
}
#else
/* CONFIG_ACPI */
static
inline
void
intel_pstate_init_acpi_perf_limits
(
struct
cpufreq_policy
*
policy
)
{
...
...
@@ -483,12 +500,146 @@ static inline bool intel_pstate_acpi_pm_profile_server(void)
#endif
/* CONFIG_ACPI */
#ifndef CONFIG_ACPI_CPPC_LIB
static
in
t
intel_pstate_get_cppc_gu
ranteed
(
int
cpu
)
static
in
line
int
intel_pstate_get_cppc_gua
ranteed
(
int
cpu
)
{
return
-
ENOTSUPP
;
}
#endif
/* CONFIG_ACPI_CPPC_LIB */
static
void
intel_pstate_hybrid_hwp_perf_ctl_parity
(
struct
cpudata
*
cpu
)
{
pr_debug
(
"CPU%d: Using PERF_CTL scaling for HWP
\n
"
,
cpu
->
cpu
);
cpu
->
pstate
.
scaling
=
cpu
->
pstate
.
perf_ctl_scaling
;
}
/**
* intel_pstate_hybrid_hwp_calibrate - Calibrate HWP performance levels.
* @cpu: Target CPU.
*
* On hybrid processors, HWP may expose more performance levels than there are
* P-states accessible through the PERF_CTL interface. If that happens, the
* scaling factor between HWP performance levels and CPU frequency will be less
* than the scaling factor between P-state values and CPU frequency.
*
* In that case, the scaling factor between HWP performance levels and CPU
* frequency needs to be determined which can be done with the help of the
* observation that certain HWP performance levels should correspond to certain
* P-states, like for example the HWP highest performance should correspond
* to the maximum turbo P-state of the CPU.
*/
static
void
intel_pstate_hybrid_hwp_calibrate
(
struct
cpudata
*
cpu
)
{
int
perf_ctl_max_phys
=
cpu
->
pstate
.
max_pstate_physical
;
int
perf_ctl_scaling
=
cpu
->
pstate
.
perf_ctl_scaling
;
int
perf_ctl_turbo
=
pstate_funcs
.
get_turbo
();
int
turbo_freq
=
perf_ctl_turbo
*
perf_ctl_scaling
;
int
perf_ctl_max
=
pstate_funcs
.
get_max
();
int
max_freq
=
perf_ctl_max
*
perf_ctl_scaling
;
int
scaling
=
INT_MAX
;
int
freq
;
pr_debug
(
"CPU%d: perf_ctl_max_phys = %d
\n
"
,
cpu
->
cpu
,
perf_ctl_max_phys
);
pr_debug
(
"CPU%d: perf_ctl_max = %d
\n
"
,
cpu
->
cpu
,
perf_ctl_max
);
pr_debug
(
"CPU%d: perf_ctl_turbo = %d
\n
"
,
cpu
->
cpu
,
perf_ctl_turbo
);
pr_debug
(
"CPU%d: perf_ctl_scaling = %d
\n
"
,
cpu
->
cpu
,
perf_ctl_scaling
);
pr_debug
(
"CPU%d: HWP_CAP guaranteed = %d
\n
"
,
cpu
->
cpu
,
cpu
->
pstate
.
max_pstate
);
pr_debug
(
"CPU%d: HWP_CAP highest = %d
\n
"
,
cpu
->
cpu
,
cpu
->
pstate
.
turbo_pstate
);
#ifdef CONFIG_ACPI
if
(
IS_ENABLED
(
CONFIG_ACPI_CPPC_LIB
))
{
struct
cppc_perf_caps
caps
;
if
(
intel_pstate_cppc_perf_caps
(
cpu
,
&
caps
))
{
if
(
intel_pstate_cppc_perf_valid
(
caps
.
nominal_perf
,
&
caps
))
{
pr_debug
(
"CPU%d: Using CPPC nominal
\n
"
,
cpu
->
cpu
);
/*
* If the CPPC nominal performance is valid, it
* can be assumed to correspond to cpu_khz.
*/
if
(
caps
.
nominal_perf
==
perf_ctl_max_phys
)
{
intel_pstate_hybrid_hwp_perf_ctl_parity
(
cpu
);
return
;
}
scaling
=
DIV_ROUND_UP
(
cpu_khz
,
caps
.
nominal_perf
);
}
else
if
(
intel_pstate_cppc_perf_valid
(
caps
.
guaranteed_perf
,
&
caps
))
{
pr_debug
(
"CPU%d: Using CPPC guaranteed
\n
"
,
cpu
->
cpu
);
/*
* If the CPPC guaranteed performance is valid,
* it can be assumed to correspond to max_freq.
*/
if
(
caps
.
guaranteed_perf
==
perf_ctl_max
)
{
intel_pstate_hybrid_hwp_perf_ctl_parity
(
cpu
);
return
;
}
scaling
=
DIV_ROUND_UP
(
max_freq
,
caps
.
guaranteed_perf
);
}
}
}
#endif
/*
* If using the CPPC data to compute the HWP-to-frequency scaling factor
* doesn't work, use the HWP_CAP gauranteed perf for this purpose with
* the assumption that it corresponds to max_freq.
*/
if
(
scaling
>
perf_ctl_scaling
)
{
pr_debug
(
"CPU%d: Using HWP_CAP guaranteed
\n
"
,
cpu
->
cpu
);
if
(
cpu
->
pstate
.
max_pstate
==
perf_ctl_max
)
{
intel_pstate_hybrid_hwp_perf_ctl_parity
(
cpu
);
return
;
}
scaling
=
DIV_ROUND_UP
(
max_freq
,
cpu
->
pstate
.
max_pstate
);
if
(
scaling
>
perf_ctl_scaling
)
{
/*
* This should not happen, because it would mean that
* the number of HWP perf levels was less than the
* number of P-states, so use the PERF_CTL scaling in
* that case.
*/
pr_debug
(
"CPU%d: scaling (%d) out of range
\n
"
,
cpu
->
cpu
,
scaling
);
intel_pstate_hybrid_hwp_perf_ctl_parity
(
cpu
);
return
;
}
}
/*
* If the product of the HWP performance scaling factor obtained above
* and the HWP_CAP highest performance is greater than the maximum turbo
* frequency corresponding to the pstate_funcs.get_turbo() return value,
* the scaling factor is too high, so recompute it so that the HWP_CAP
* highest performance corresponds to the maximum turbo frequency.
*/
if
(
turbo_freq
<
cpu
->
pstate
.
turbo_pstate
*
scaling
)
{
pr_debug
(
"CPU%d: scaling too high (%d)
\n
"
,
cpu
->
cpu
,
scaling
);
cpu
->
pstate
.
turbo_freq
=
turbo_freq
;
scaling
=
DIV_ROUND_UP
(
turbo_freq
,
cpu
->
pstate
.
turbo_pstate
);
}
cpu
->
pstate
.
scaling
=
scaling
;
pr_debug
(
"CPU%d: HWP-to-frequency scaling factor: %d
\n
"
,
cpu
->
cpu
,
scaling
);
cpu
->
pstate
.
max_freq
=
rounddown
(
cpu
->
pstate
.
max_pstate
*
scaling
,
perf_ctl_scaling
);
freq
=
perf_ctl_max_phys
*
perf_ctl_scaling
;
cpu
->
pstate
.
max_pstate_physical
=
DIV_ROUND_UP
(
freq
,
scaling
);
cpu
->
pstate
.
min_freq
=
cpu
->
pstate
.
min_pstate
*
perf_ctl_scaling
;
/*
* Cast the min P-state value retrieved via pstate_funcs.get_min() to
* the effective range of HWP performance levels.
*/
cpu
->
pstate
.
min_pstate
=
DIV_ROUND_UP
(
cpu
->
pstate
.
min_freq
,
scaling
);
}
static
inline
void
update_turbo_state
(
void
)
{
u64
misc_en
;
...
...
@@ -795,19 +946,22 @@ cpufreq_freq_attr_rw(energy_performance_preference);
static
ssize_t
show_base_frequency
(
struct
cpufreq_policy
*
policy
,
char
*
buf
)
{
struct
cpudata
*
cpu
;
u64
cap
;
int
ratio
;
struct
cpudata
*
cpu
=
all_cpu_data
[
policy
->
cpu
];
int
ratio
,
freq
;
ratio
=
intel_pstate_get_cppc_guranteed
(
policy
->
cpu
);
ratio
=
intel_pstate_get_cppc_gu
a
ranteed
(
policy
->
cpu
);
if
(
ratio
<=
0
)
{
u64
cap
;
rdmsrl_on_cpu
(
policy
->
cpu
,
MSR_HWP_CAPABILITIES
,
&
cap
);
ratio
=
HWP_GUARANTEED_PERF
(
cap
);
}
cpu
=
all_cpu_data
[
policy
->
cpu
];
freq
=
ratio
*
cpu
->
pstate
.
scaling
;
if
(
cpu
->
pstate
.
scaling
!=
cpu
->
pstate
.
perf_ctl_scaling
)
freq
=
rounddown
(
freq
,
cpu
->
pstate
.
perf_ctl_scaling
);
return
sprintf
(
buf
,
"%d
\n
"
,
ratio
*
cpu
->
pstate
.
scaling
);
return
sprintf
(
buf
,
"%d
\n
"
,
freq
);
}
cpufreq_freq_attr_ro
(
base_frequency
);
...
...
@@ -831,9 +985,20 @@ static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
static
void
intel_pstate_get_hwp_cap
(
struct
cpudata
*
cpu
)
{
int
scaling
=
cpu
->
pstate
.
scaling
;
__intel_pstate_get_hwp_cap
(
cpu
);
cpu
->
pstate
.
max_freq
=
cpu
->
pstate
.
max_pstate
*
cpu
->
pstate
.
scaling
;
cpu
->
pstate
.
turbo_freq
=
cpu
->
pstate
.
turbo_pstate
*
cpu
->
pstate
.
scaling
;
cpu
->
pstate
.
max_freq
=
cpu
->
pstate
.
max_pstate
*
scaling
;
cpu
->
pstate
.
turbo_freq
=
cpu
->
pstate
.
turbo_pstate
*
scaling
;
if
(
scaling
!=
cpu
->
pstate
.
perf_ctl_scaling
)
{
int
perf_ctl_scaling
=
cpu
->
pstate
.
perf_ctl_scaling
;
cpu
->
pstate
.
max_freq
=
rounddown
(
cpu
->
pstate
.
max_freq
,
perf_ctl_scaling
);
cpu
->
pstate
.
turbo_freq
=
rounddown
(
cpu
->
pstate
.
turbo_freq
,
perf_ctl_scaling
);
}
}
static
void
intel_pstate_hwp_set
(
unsigned
int
cpu
)
...
...
@@ -1365,8 +1530,6 @@ define_one_global_rw(energy_efficiency);
static
struct
attribute
*
intel_pstate_attributes
[]
=
{
&
status
.
attr
,
&
no_turbo
.
attr
,
&
turbo_pct
.
attr
,
&
num_pstates
.
attr
,
NULL
};
...
...
@@ -1391,6 +1554,14 @@ static void __init intel_pstate_sysfs_expose_params(void)
if
(
WARN_ON
(
rc
))
return
;
if
(
!
boot_cpu_has
(
X86_FEATURE_HYBRID_CPU
))
{
rc
=
sysfs_create_file
(
intel_pstate_kobject
,
&
turbo_pct
.
attr
);
WARN_ON
(
rc
);
rc
=
sysfs_create_file
(
intel_pstate_kobject
,
&
num_pstates
.
attr
);
WARN_ON
(
rc
);
}
/*
* If per cpu limits are enforced there are no global limits, so
* return without creating max/min_perf_pct attributes
...
...
@@ -1417,6 +1588,11 @@ static void __init intel_pstate_sysfs_remove(void)
sysfs_remove_group
(
intel_pstate_kobject
,
&
intel_pstate_attr_group
);
if
(
!
boot_cpu_has
(
X86_FEATURE_HYBRID_CPU
))
{
sysfs_remove_file
(
intel_pstate_kobject
,
&
num_pstates
.
attr
);
sysfs_remove_file
(
intel_pstate_kobject
,
&
turbo_pct
.
attr
);
}
if
(
!
per_cpu_limits
)
{
sysfs_remove_file
(
intel_pstate_kobject
,
&
max_perf_pct
.
attr
);
sysfs_remove_file
(
intel_pstate_kobject
,
&
min_perf_pct
.
attr
);
...
...
@@ -1713,19 +1889,33 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
static
void
intel_pstate_get_cpu_pstates
(
struct
cpudata
*
cpu
)
{
bool
hybrid_cpu
=
boot_cpu_has
(
X86_FEATURE_HYBRID_CPU
);
int
perf_ctl_max_phys
=
pstate_funcs
.
get_max_physical
();
int
perf_ctl_scaling
=
hybrid_cpu
?
cpu_khz
/
perf_ctl_max_phys
:
pstate_funcs
.
get_scaling
();
cpu
->
pstate
.
min_pstate
=
pstate_funcs
.
get_min
();
cpu
->
pstate
.
max_pstate_physical
=
p
state_funcs
.
get_max_physical
()
;
cpu
->
pstate
.
scaling
=
pstate_funcs
.
get_scaling
()
;
cpu
->
pstate
.
max_pstate_physical
=
p
erf_ctl_max_phys
;
cpu
->
pstate
.
perf_ctl_scaling
=
perf_ctl_scaling
;
if
(
hwp_active
&&
!
hwp_mode_bdw
)
{
__intel_pstate_get_hwp_cap
(
cpu
);
if
(
hybrid_cpu
)
intel_pstate_hybrid_hwp_calibrate
(
cpu
);
else
cpu
->
pstate
.
scaling
=
perf_ctl_scaling
;
}
else
{
cpu
->
pstate
.
scaling
=
perf_ctl_scaling
;
cpu
->
pstate
.
max_pstate
=
pstate_funcs
.
get_max
();
cpu
->
pstate
.
turbo_pstate
=
pstate_funcs
.
get_turbo
();
}
cpu
->
pstate
.
max_freq
=
cpu
->
pstate
.
max_pstate
*
cpu
->
pstate
.
scaling
;
cpu
->
pstate
.
turbo_freq
=
cpu
->
pstate
.
turbo_pstate
*
cpu
->
pstate
.
scaling
;
if
(
cpu
->
pstate
.
scaling
==
perf_ctl_scaling
)
{
cpu
->
pstate
.
min_freq
=
cpu
->
pstate
.
min_pstate
*
perf_ctl_scaling
;
cpu
->
pstate
.
max_freq
=
cpu
->
pstate
.
max_pstate
*
perf_ctl_scaling
;
cpu
->
pstate
.
turbo_freq
=
cpu
->
pstate
.
turbo_pstate
*
perf_ctl_scaling
;
}
if
(
pstate_funcs
.
get_aperf_mperf_shift
)
cpu
->
aperf_mperf_shift
=
pstate_funcs
.
get_aperf_mperf_shift
();
...
...
@@ -2087,6 +2277,8 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
X86_MATCH
(
ATOM_GOLDMONT
,
core_funcs
),
X86_MATCH
(
ATOM_GOLDMONT_PLUS
,
core_funcs
),
X86_MATCH
(
SKYLAKE_X
,
core_funcs
),
X86_MATCH
(
COMETLAKE
,
core_funcs
),
X86_MATCH
(
ICELAKE_X
,
core_funcs
),
{}
};
MODULE_DEVICE_TABLE
(
x86cpu
,
intel_pstate_cpu_ids
);
...
...
@@ -2195,23 +2387,34 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
unsigned
int
policy_min
,
unsigned
int
policy_max
)
{
int
scaling
=
cpu
->
pstate
.
scaling
;
int
perf_ctl_scaling
=
cpu
->
pstate
.
perf_ctl_
scaling
;
int32_t
max_policy_perf
,
min_policy_perf
;
max_policy_perf
=
policy_max
/
perf_ctl_scaling
;
if
(
policy_max
==
policy_min
)
{
min_policy_perf
=
max_policy_perf
;
}
else
{
min_policy_perf
=
policy_min
/
perf_ctl_scaling
;
min_policy_perf
=
clamp_t
(
int32_t
,
min_policy_perf
,
0
,
max_policy_perf
);
}
/*
* HWP needs some special consideration, because HWP_REQUEST uses
* abstract values to represent performance rather than pure ratios.
*/
if
(
hwp_active
)
if
(
hwp_active
)
{
intel_pstate_get_hwp_cap
(
cpu
);
max_policy_perf
=
policy_max
/
scaling
;
if
(
policy_max
==
policy_min
)
{
min_policy_perf
=
max_policy_perf
;
}
else
{
min_policy_perf
=
policy_min
/
scaling
;
min_policy_perf
=
clamp_t
(
int32_t
,
min_policy_perf
,
0
,
max_policy_perf
);
if
(
cpu
->
pstate
.
scaling
!=
perf_ctl_scaling
)
{
int
scaling
=
cpu
->
pstate
.
scaling
;
int
freq
;
freq
=
max_policy_perf
*
perf_ctl_scaling
;
max_policy_perf
=
DIV_ROUND_UP
(
freq
,
scaling
);
freq
=
min_policy_perf
*
perf_ctl_scaling
;
min_policy_perf
=
DIV_ROUND_UP
(
freq
,
scaling
);
}
}
pr_debug
(
"cpu:%d min_policy_perf:%d max_policy_perf:%d
\n
"
,
...
...
@@ -2405,7 +2608,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
cpu
->
min_perf_ratio
=
0
;
/* cpuinfo and default policy values */
policy
->
cpuinfo
.
min_freq
=
cpu
->
pstate
.
min_
pstate
*
cpu
->
pstate
.
scaling
;
policy
->
cpuinfo
.
min_freq
=
cpu
->
pstate
.
min_
freq
;
update_turbo_state
();
global
.
turbo_disabled_mf
=
global
.
turbo_disabled
;
policy
->
cpuinfo
.
max_freq
=
global
.
turbo_disabled
?
...
...
@@ -3135,6 +3338,8 @@ static int __init intel_pstate_init(void)
}
pr_info
(
"HWP enabled
\n
"
);
}
else
if
(
boot_cpu_has
(
X86_FEATURE_HYBRID_CPU
))
{
pr_warn
(
"Problematic setup: Hybrid processor with disabled HWP
\n
"
);
}
return
0
;
...
...
drivers/cpufreq/loongson2_cpufreq.c
View file @
6cbab787
...
...
@@ -16,7 +16,6 @@
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/sched.h>
/* set_cpus_allowed() */
#include <linux/delay.h>
#include <linux/platform_device.h>
...
...
drivers/cpufreq/sc520_freq.c
View file @
6cbab787
...
...
@@ -42,6 +42,7 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
default:
pr_err
(
"error: cpuctl register has unexpected value %02x
\n
"
,
clockspeed_reg
);
fallthrough
;
case
0x01
:
return
100000
;
case
0x02
:
...
...
drivers/cpufreq/sh-cpufreq.c
View file @
6cbab787
...
...
@@ -23,7 +23,6 @@
#include <linux/cpumask.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/sched.h>
/* set_cpus_allowed() */
#include <linux/clk.h>
#include <linux/percpu.h>
#include <linux/sh_clk.h>
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment