Commit 5561f25b authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-cpufreq'

Merge cpufreq updates for 5.17-rc1:

 - Add new P-state driver for AMD processors (Huang Rui).

 - Fix initialization of min and max frequency QoS requests in the
   cpufreq core (Rafael Wysocki).

 - Fix EPP handling on Alder Lake in intel_pstate (Srinivas Pandruvada).

 - Make intel_pstate update cpuinfo.max_freq when notified of HWP
   capabilities changes and drop a redundant function call from that
   driver (Rafael Wysocki).

 - Improve IRQ support in the Qcom cpufreq driver (Ard Biesheuvel,
   Stephen Boyd, Vladimir Zapolskiy).

 - Fix double devm_remap() in the Mediatek cpufreq driver (Hector Yuan).

 - Introduce thermal pressure helpers for cpufreq CPU cooling (Lukasz
   Luba).

 - Make cpufreq use default_groups in kobj_type (Greg Kroah-Hartman).

* pm-cpufreq: (32 commits)
  x86, sched: Fix undefined reference to init_freq_invariance_cppc() build error
  cpufreq: amd-pstate: Fix Kconfig dependencies for AMD P-State
  cpufreq: amd-pstate: Fix struct amd_cpudata kernel-doc comment
  MAINTAINERS: Add AMD P-State driver maintainer entry
  Documentation: amd-pstate: Add AMD P-State driver introduction
  cpufreq: amd-pstate: Add AMD P-State performance attributes
  cpufreq: amd-pstate: Add AMD P-State frequencies attributes
  cpufreq: amd-pstate: Add boost mode support for AMD P-State
  cpufreq: amd-pstate: Add trace for AMD P-State module
  cpufreq: amd-pstate: Introduce the support for the processors with shared memory solution
  cpufreq: amd-pstate: Add fast switch function for AMD P-State
  cpufreq: amd-pstate: Introduce a new AMD P-State driver to support future processors
  ACPI: CPPC: Add CPPC enable register function
  ACPI: CPPC: Check present CPUs for determining _CPC is valid
  ACPI: CPPC: Implement support for SystemIO registers
  x86/msr: Add AMD CPPC MSR definitions
  x86/cpufeatures: Add AMD Collaborative Processor Performance Control feature flag
  cpufreq: use default_groups in kobj_type
  cpufreq: mediatek-hw: Fix double devm_remap in hotplug case
  cpufreq: intel_pstate: Update cpuinfo.max_freq on HWP_CAP changes
  ...
parents 4ecc933b 6c4ab1b8
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
Collaborative Processor Performance Control (CPPC) Collaborative Processor Performance Control (CPPC)
================================================== ==================================================
.. _cppc_sysfs:
CPPC CPPC
==== ====
......
This diff is collapsed.
...@@ -11,6 +11,7 @@ Working-State Power Management ...@@ -11,6 +11,7 @@ Working-State Power Management
intel_idle intel_idle
cpufreq cpufreq
intel_pstate intel_pstate
amd-pstate
cpufreq_drivers cpufreq_drivers
intel_epb intel_epb
intel-speed-select intel-speed-select
...@@ -993,6 +993,13 @@ S: Supported ...@@ -993,6 +993,13 @@ S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/pm/ F: drivers/gpu/drm/amd/pm/
AMD PSTATE DRIVER
M: Huang Rui <ray.huang@amd.com>
L: linux-pm@vger.kernel.org
S: Supported
F: Documentation/admin-guide/pm/amd-pstate.rst
F: drivers/cpufreq/amd-pstate*
AMD PTDMA DRIVER AMD PTDMA DRIVER
M: Sanjay R Mehta <sanju.mehta@amd.com> M: Sanjay R Mehta <sanju.mehta@amd.com>
L: dmaengine@vger.kernel.org L: dmaengine@vger.kernel.org
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
/* Replace task scheduler's default thermal pressure API */ /* Replace task scheduler's default thermal pressure API */
#define arch_scale_thermal_pressure topology_get_thermal_pressure #define arch_scale_thermal_pressure topology_get_thermal_pressure
#define arch_set_thermal_pressure topology_set_thermal_pressure #define arch_update_thermal_pressure topology_update_thermal_pressure
#else #else
......
...@@ -32,7 +32,7 @@ void update_freq_counters_refs(void); ...@@ -32,7 +32,7 @@ void update_freq_counters_refs(void);
/* Replace task scheduler's default thermal pressure API */ /* Replace task scheduler's default thermal pressure API */
#define arch_scale_thermal_pressure topology_get_thermal_pressure #define arch_scale_thermal_pressure topology_get_thermal_pressure
#define arch_set_thermal_pressure topology_set_thermal_pressure #define arch_update_thermal_pressure topology_update_thermal_pressure
#include <asm-generic/topology.h> #include <asm-generic/topology.h>
......
...@@ -315,6 +315,7 @@ ...@@ -315,6 +315,7 @@
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
#define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
......
...@@ -486,6 +486,23 @@ ...@@ -486,6 +486,23 @@
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f #define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
/* AMD Collaborative Processor Performance Control MSRs */
#define MSR_AMD_CPPC_CAP1 0xc00102b0
#define MSR_AMD_CPPC_ENABLE 0xc00102b1
#define MSR_AMD_CPPC_CAP2 0xc00102b2
#define MSR_AMD_CPPC_REQ 0xc00102b3
#define MSR_AMD_CPPC_STATUS 0xc00102b4
#define AMD_CPPC_LOWEST_PERF(x) (((x) >> 0) & 0xff)
#define AMD_CPPC_LOWNONLIN_PERF(x) (((x) >> 8) & 0xff)
#define AMD_CPPC_NOMINAL_PERF(x) (((x) >> 16) & 0xff)
#define AMD_CPPC_HIGHEST_PERF(x) (((x) >> 24) & 0xff)
#define AMD_CPPC_MAX_PERF(x) (((x) & 0xff) << 0)
#define AMD_CPPC_MIN_PERF(x) (((x) & 0xff) << 8)
#define AMD_CPPC_DES_PERF(x) (((x) & 0xff) << 16)
#define AMD_CPPC_ENERGY_PERF_PREF(x) (((x) & 0xff) << 24)
/* Fam 17h MSRs */ /* Fam 17h MSRs */
#define MSR_F17H_IRPERF 0xc00000e9 #define MSR_F17H_IRPERF 0xc00000e9
......
...@@ -221,7 +221,7 @@ static inline void arch_set_max_freq_ratio(bool turbo_disabled) ...@@ -221,7 +221,7 @@ static inline void arch_set_max_freq_ratio(bool turbo_disabled)
} }
#endif #endif
#ifdef CONFIG_ACPI_CPPC_LIB #if defined(CONFIG_ACPI_CPPC_LIB) && defined(CONFIG_SMP)
void init_freq_invariance_cppc(void); void init_freq_invariance_cppc(void);
#define init_freq_invariance_cppc init_freq_invariance_cppc #define init_freq_invariance_cppc init_freq_invariance_cppc
#endif #endif
......
...@@ -118,6 +118,8 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); ...@@ -118,6 +118,8 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
*/ */
#define NUM_RETRIES 500ULL #define NUM_RETRIES 500ULL
#define OVER_16BTS_MASK ~0xFFFFULL
#define define_one_cppc_ro(_name) \ #define define_one_cppc_ro(_name) \
static struct kobj_attribute _name = \ static struct kobj_attribute _name = \
__ATTR(_name, 0444, show_##_name, NULL) __ATTR(_name, 0444, show_##_name, NULL)
...@@ -411,7 +413,7 @@ bool acpi_cpc_valid(void) ...@@ -411,7 +413,7 @@ bool acpi_cpc_valid(void)
struct cpc_desc *cpc_ptr; struct cpc_desc *cpc_ptr;
int cpu; int cpu;
for_each_possible_cpu(cpu) { for_each_present_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu); cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_ptr) if (!cpc_ptr)
return false; return false;
...@@ -746,9 +748,26 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) ...@@ -746,9 +748,26 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free; goto out_free;
cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr; cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
} }
} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
if (gas_t->access_width < 1 || gas_t->access_width > 3) {
/*
* 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
* SystemIO doesn't implement 64-bit
* registers.
*/
pr_debug("Invalid access width %d for SystemIO register\n",
gas_t->access_width);
goto out_free;
}
if (gas_t->address & OVER_16BTS_MASK) {
/* SystemIO registers use 16-bit integer addresses */
pr_debug("Invalid IO port %llu for SystemIO register\n",
gas_t->address);
goto out_free;
}
} else { } else {
if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
/* Support only PCC ,SYS MEM and FFH type regs */ /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
pr_debug("Unsupported register type: %d\n", gas_t->space_id); pr_debug("Unsupported register type: %d\n", gas_t->space_id);
goto out_free; goto out_free;
} }
...@@ -923,7 +942,21 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) ...@@ -923,7 +942,21 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
} }
*val = 0; *val = 0;
if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
u32 width = 8 << (reg->access_width - 1);
acpi_status status;
status = acpi_os_read_port((acpi_io_address)reg->address,
(u32 *)val, width);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to read SystemIO port %llx\n",
reg->address);
return -EFAULT;
}
return 0;
} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr; vaddr = reg_res->sys_mem_vaddr;
...@@ -962,7 +995,20 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) ...@@ -962,7 +995,20 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg; struct cpc_reg *reg = &reg_res->cpc_entry.reg;
if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
u32 width = 8 << (reg->access_width - 1);
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
(u32)val, width);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to write SystemIO port %llx\n",
reg->address);
return -EFAULT;
}
return 0;
} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr; vaddr = reg_res->sys_mem_vaddr;
...@@ -1229,6 +1275,51 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) ...@@ -1229,6 +1275,51 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
} }
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
/**
* cppc_set_enable - Set to enable CPPC on the processor by writing the
* Continuous Performance Control package EnableRegister field.
* @cpu: CPU for which to enable CPPC register.
* @enable: 0 - disable, 1 - enable CPPC feature on the processor.
*
* Return: 0 for success, -ERRNO or -EIO otherwise.
*/
int cppc_set_enable(int cpu, bool enable)
{
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_register_resource *enable_reg;
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = -EINVAL;
if (!cpc_desc) {
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
return -EINVAL;
}
enable_reg = &cpc_desc->cpc_regs[ENABLE];
if (CPC_IN_PCC(enable_reg)) {
if (pcc_ss_id < 0)
return -EIO;
ret = cpc_write(cpu, enable_reg, enable);
if (ret)
return ret;
pcc_ss_data = pcc_data[pcc_ss_id];
down_write(&pcc_ss_data->pcc_lock);
/* after writing CPC, transfer the ownership of PCC to platfrom */
ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
up_write(&pcc_ss_data->pcc_lock);
return ret;
}
return cpc_write(cpu, enable_reg, enable);
}
EXPORT_SYMBOL_GPL(cppc_set_enable);
/** /**
* cppc_set_perf - Set a CPU's performance controls. * cppc_set_perf - Set a CPU's performance controls.
* @cpu: CPU for which to set performance controls. * @cpu: CPU for which to set performance controls.
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data); static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask; static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant; static bool scale_freq_invariant;
static DEFINE_PER_CPU(u32, freq_factor) = 1;
static bool supports_scale_freq_counters(const struct cpumask *cpus) static bool supports_scale_freq_counters(const struct cpumask *cpus)
{ {
...@@ -155,15 +156,49 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) ...@@ -155,15 +156,49 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
DEFINE_PER_CPU(unsigned long, thermal_pressure); DEFINE_PER_CPU(unsigned long, thermal_pressure);
void topology_set_thermal_pressure(const struct cpumask *cpus, /**
unsigned long th_pressure) * topology_update_thermal_pressure() - Update thermal pressure for CPUs
* @cpus : The related CPUs for which capacity has been reduced
* @capped_freq : The maximum allowed frequency that CPUs can run at
*
* Update the value of thermal pressure for all @cpus in the mask. The
* cpumask should include all (online+offline) affected CPUs, to avoid
* operating on stale data when hot-plug is used for some CPUs. The
* @capped_freq reflects the currently allowed max CPUs frequency due to
* thermal capping. It might be also a boost frequency value, which is bigger
* than the internal 'freq_factor' max frequency. In such case the pressure
* value should simply be removed, since this is an indication that there is
* no thermal throttling. The @capped_freq must be provided in kHz.
*/
void topology_update_thermal_pressure(const struct cpumask *cpus,
unsigned long capped_freq)
{ {
unsigned long max_capacity, capacity, th_pressure;
u32 max_freq;
int cpu; int cpu;
cpu = cpumask_first(cpus);
max_capacity = arch_scale_cpu_capacity(cpu);
max_freq = per_cpu(freq_factor, cpu);
/* Convert to MHz scale which is used in 'freq_factor' */
capped_freq /= 1000;
/*
* Handle properly the boost frequencies, which should simply clean
* the thermal pressure value.
*/
if (max_freq <= capped_freq)
capacity = max_capacity;
else
capacity = mult_frac(max_capacity, capped_freq, max_freq);
th_pressure = max_capacity - capacity;
for_each_cpu(cpu, cpus) for_each_cpu(cpu, cpus)
WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
} }
EXPORT_SYMBOL_GPL(topology_set_thermal_pressure); EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
static ssize_t cpu_capacity_show(struct device *dev, static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
...@@ -217,7 +252,6 @@ static void update_topology_flags_workfn(struct work_struct *work) ...@@ -217,7 +252,6 @@ static void update_topology_flags_workfn(struct work_struct *work)
update_topology = 0; update_topology = 0;
} }
static DEFINE_PER_CPU(u32, freq_factor) = 1;
static u32 *raw_capacity; static u32 *raw_capacity;
static int free_raw_capacity(void) static int free_raw_capacity(void)
......
...@@ -34,6 +34,23 @@ config X86_PCC_CPUFREQ ...@@ -34,6 +34,23 @@ config X86_PCC_CPUFREQ
If in doubt, say N. If in doubt, say N.
config X86_AMD_PSTATE
tristate "AMD Processor P-State driver"
depends on X86 && ACPI
select ACPI_PROCESSOR
select ACPI_CPPC_LIB if X86_64
select CPU_FREQ_GOV_SCHEDUTIL if SMP
help
This driver adds a CPUFreq driver which utilizes a fine grain
processor performance frequency control range instead of legacy
performance levels. _CPC needs to be present in the ACPI tables
of the system.
For details, take a look at:
<file:Documentation/admin-guide/pm/amd-pstate.rst>.
If in doubt, say N.
config X86_ACPI_CPUFREQ config X86_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver" tristate "ACPI Processor P-States driver"
depends on ACPI_PROCESSOR depends on ACPI_PROCESSOR
......
...@@ -17,6 +17,10 @@ obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o ...@@ -17,6 +17,10 @@ obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
# Traces
CFLAGS_amd-pstate-trace.o := -I$(src)
amd_pstate-y := amd-pstate.o amd-pstate-trace.o
################################################################################## ##################################################################################
# x86 drivers. # x86 drivers.
# Link order matters. K8 is preferred to ACPI because of firmware bugs in early # Link order matters. K8 is preferred to ACPI because of firmware bugs in early
...@@ -25,6 +29,7 @@ obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o ...@@ -25,6 +29,7 @@ obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
# speedstep-* is preferred over p4-clockmod. # speedstep-* is preferred over p4-clockmod.
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
obj-$(CONFIG_X86_AMD_PSTATE) += amd_pstate.o
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
......
#define CREATE_TRACE_POINTS
#include "amd-pstate-trace.h"
/* SPDX-License-Identifier: GPL-2.0 */
/*
* amd-pstate-trace.h - AMD Processor P-state Frequency Driver Tracer
*
* Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved.
*
* Author: Huang Rui <ray.huang@amd.com>
*/
#if !defined(_AMD_PSTATE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _AMD_PSTATE_TRACE_H
#include <linux/cpufreq.h>
#include <linux/tracepoint.h>
#include <linux/trace_events.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM amd_cpu
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE amd-pstate-trace
#define TPS(x) tracepoint_string(x)
TRACE_EVENT(amd_pstate_perf,
TP_PROTO(unsigned long min_perf,
unsigned long target_perf,
unsigned long capacity,
unsigned int cpu_id,
bool changed,
bool fast_switch
),
TP_ARGS(min_perf,
target_perf,
capacity,
cpu_id,
changed,
fast_switch
),
TP_STRUCT__entry(
__field(unsigned long, min_perf)
__field(unsigned long, target_perf)
__field(unsigned long, capacity)
__field(unsigned int, cpu_id)
__field(bool, changed)
__field(bool, fast_switch)
),
TP_fast_assign(
__entry->min_perf = min_perf;
__entry->target_perf = target_perf;
__entry->capacity = capacity;
__entry->cpu_id = cpu_id;
__entry->changed = changed;
__entry->fast_switch = fast_switch;
),
TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu cpu_id=%u changed=%s fast_switch=%s",
(unsigned long)__entry->min_perf,
(unsigned long)__entry->target_perf,
(unsigned long)__entry->capacity,
(unsigned int)__entry->cpu_id,
(__entry->changed) ? "true" : "false",
(__entry->fast_switch) ? "true" : "false"
)
);
#endif /* _AMD_PSTATE_TRACE_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#include <trace/define_trace.h>
This diff is collapsed.
...@@ -924,7 +924,7 @@ cpufreq_freq_attr_rw(scaling_max_freq); ...@@ -924,7 +924,7 @@ cpufreq_freq_attr_rw(scaling_max_freq);
cpufreq_freq_attr_rw(scaling_governor); cpufreq_freq_attr_rw(scaling_governor);
cpufreq_freq_attr_rw(scaling_setspeed); cpufreq_freq_attr_rw(scaling_setspeed);
static struct attribute *default_attrs[] = { static struct attribute *cpufreq_attrs[] = {
&cpuinfo_min_freq.attr, &cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr, &cpuinfo_max_freq.attr,
&cpuinfo_transition_latency.attr, &cpuinfo_transition_latency.attr,
...@@ -938,6 +938,7 @@ static struct attribute *default_attrs[] = { ...@@ -938,6 +938,7 @@ static struct attribute *default_attrs[] = {
&scaling_setspeed.attr, &scaling_setspeed.attr,
NULL NULL
}; };
ATTRIBUTE_GROUPS(cpufreq);
#define to_policy(k) container_of(k, struct cpufreq_policy, kobj) #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
#define to_attr(a) container_of(a, struct freq_attr, attr) #define to_attr(a) container_of(a, struct freq_attr, attr)
...@@ -1000,7 +1001,7 @@ static const struct sysfs_ops sysfs_ops = { ...@@ -1000,7 +1001,7 @@ static const struct sysfs_ops sysfs_ops = {
static struct kobj_type ktype_cpufreq = { static struct kobj_type ktype_cpufreq = {
.sysfs_ops = &sysfs_ops, .sysfs_ops = &sysfs_ops,
.default_attrs = default_attrs, .default_groups = cpufreq_groups,
.release = cpufreq_sysfs_release, .release = cpufreq_sysfs_release,
}; };
...@@ -1403,7 +1404,7 @@ static int cpufreq_online(unsigned int cpu) ...@@ -1403,7 +1404,7 @@ static int cpufreq_online(unsigned int cpu)
ret = freq_qos_add_request(&policy->constraints, ret = freq_qos_add_request(&policy->constraints,
policy->min_freq_req, FREQ_QOS_MIN, policy->min_freq_req, FREQ_QOS_MIN,
policy->min); FREQ_QOS_MIN_DEFAULT_VALUE);
if (ret < 0) { if (ret < 0) {
/* /*
* So we don't call freq_qos_remove_request() for an * So we don't call freq_qos_remove_request() for an
...@@ -1423,7 +1424,7 @@ static int cpufreq_online(unsigned int cpu) ...@@ -1423,7 +1424,7 @@ static int cpufreq_online(unsigned int cpu)
ret = freq_qos_add_request(&policy->constraints, ret = freq_qos_add_request(&policy->constraints,
policy->max_freq_req, FREQ_QOS_MAX, policy->max_freq_req, FREQ_QOS_MAX,
policy->max); FREQ_QOS_MAX_DEFAULT_VALUE);
if (ret < 0) { if (ret < 0) {
policy->max_freq_req = NULL; policy->max_freq_req = NULL;
goto out_destroy_policy; goto out_destroy_policy;
......
...@@ -257,7 +257,7 @@ gov_attr_rw(ignore_nice_load); ...@@ -257,7 +257,7 @@ gov_attr_rw(ignore_nice_load);
gov_attr_rw(down_threshold); gov_attr_rw(down_threshold);
gov_attr_rw(freq_step); gov_attr_rw(freq_step);
static struct attribute *cs_attributes[] = { static struct attribute *cs_attrs[] = {
&sampling_rate.attr, &sampling_rate.attr,
&sampling_down_factor.attr, &sampling_down_factor.attr,
&up_threshold.attr, &up_threshold.attr,
...@@ -266,6 +266,7 @@ static struct attribute *cs_attributes[] = { ...@@ -266,6 +266,7 @@ static struct attribute *cs_attributes[] = {
&freq_step.attr, &freq_step.attr,
NULL NULL
}; };
ATTRIBUTE_GROUPS(cs);
/************************** sysfs end ************************/ /************************** sysfs end ************************/
...@@ -315,7 +316,7 @@ static void cs_start(struct cpufreq_policy *policy) ...@@ -315,7 +316,7 @@ static void cs_start(struct cpufreq_policy *policy)
static struct dbs_governor cs_governor = { static struct dbs_governor cs_governor = {
.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"), .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"),
.kobj_type = { .default_attrs = cs_attributes }, .kobj_type = { .default_groups = cs_groups },
.gov_dbs_update = cs_dbs_update, .gov_dbs_update = cs_dbs_update,
.alloc = cs_alloc, .alloc = cs_alloc,
.free = cs_free, .free = cs_free,
......
...@@ -328,7 +328,7 @@ gov_attr_rw(sampling_down_factor); ...@@ -328,7 +328,7 @@ gov_attr_rw(sampling_down_factor);
gov_attr_rw(ignore_nice_load); gov_attr_rw(ignore_nice_load);
gov_attr_rw(powersave_bias); gov_attr_rw(powersave_bias);
static struct attribute *od_attributes[] = { static struct attribute *od_attrs[] = {
&sampling_rate.attr, &sampling_rate.attr,
&up_threshold.attr, &up_threshold.attr,
&sampling_down_factor.attr, &sampling_down_factor.attr,
...@@ -337,6 +337,7 @@ static struct attribute *od_attributes[] = { ...@@ -337,6 +337,7 @@ static struct attribute *od_attributes[] = {
&io_is_busy.attr, &io_is_busy.attr,
NULL NULL
}; };
ATTRIBUTE_GROUPS(od);
/************************** sysfs end ************************/ /************************** sysfs end ************************/
...@@ -401,7 +402,7 @@ static struct od_ops od_ops = { ...@@ -401,7 +402,7 @@ static struct od_ops od_ops = {
static struct dbs_governor od_dbs_gov = { static struct dbs_governor od_dbs_gov = {
.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"), .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
.kobj_type = { .default_attrs = od_attributes }, .kobj_type = { .default_groups = od_groups },
.gov_dbs_update = od_dbs_update, .gov_dbs_update = od_dbs_update,
.alloc = od_alloc, .alloc = od_alloc,
.free = od_free, .free = od_free,
......
...@@ -664,19 +664,29 @@ static int intel_pstate_set_epb(int cpu, s16 pref) ...@@ -664,19 +664,29 @@ static int intel_pstate_set_epb(int cpu, s16 pref)
* 3 balance_power * 3 balance_power
* 4 power * 4 power
*/ */
enum energy_perf_value_index {
EPP_INDEX_DEFAULT = 0,
EPP_INDEX_PERFORMANCE,
EPP_INDEX_BALANCE_PERFORMANCE,
EPP_INDEX_BALANCE_POWERSAVE,
EPP_INDEX_POWERSAVE,
};
static const char * const energy_perf_strings[] = { static const char * const energy_perf_strings[] = {
"default", [EPP_INDEX_DEFAULT] = "default",
"performance", [EPP_INDEX_PERFORMANCE] = "performance",
"balance_performance", [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
"balance_power", [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
"power", [EPP_INDEX_POWERSAVE] = "power",
NULL NULL
}; };
static const unsigned int epp_values[] = { static unsigned int epp_values[] = {
HWP_EPP_PERFORMANCE, [EPP_INDEX_DEFAULT] = 0, /* Unused index */
HWP_EPP_BALANCE_PERFORMANCE, [EPP_INDEX_PERFORMANCE] = HWP_EPP_PERFORMANCE,
HWP_EPP_BALANCE_POWERSAVE, [EPP_INDEX_BALANCE_PERFORMANCE] = HWP_EPP_BALANCE_PERFORMANCE,
HWP_EPP_POWERSAVE [EPP_INDEX_BALANCE_POWERSAVE] = HWP_EPP_BALANCE_POWERSAVE,
[EPP_INDEX_POWERSAVE] = HWP_EPP_POWERSAVE,
}; };
static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp) static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp)
...@@ -690,14 +700,14 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw ...@@ -690,14 +700,14 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw
return epp; return epp;
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
if (epp == HWP_EPP_PERFORMANCE) if (epp == epp_values[EPP_INDEX_PERFORMANCE])
return 1; return EPP_INDEX_PERFORMANCE;
if (epp == HWP_EPP_BALANCE_PERFORMANCE) if (epp == epp_values[EPP_INDEX_BALANCE_PERFORMANCE])
return 2; return EPP_INDEX_BALANCE_PERFORMANCE;
if (epp == HWP_EPP_BALANCE_POWERSAVE) if (epp == epp_values[EPP_INDEX_BALANCE_POWERSAVE])
return 3; return EPP_INDEX_BALANCE_POWERSAVE;
if (epp == HWP_EPP_POWERSAVE) if (epp == epp_values[EPP_INDEX_POWERSAVE])
return 4; return EPP_INDEX_POWERSAVE;
*raw_epp = epp; *raw_epp = epp;
return 0; return 0;
} else if (boot_cpu_has(X86_FEATURE_EPB)) { } else if (boot_cpu_has(X86_FEATURE_EPB)) {
...@@ -757,7 +767,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, ...@@ -757,7 +767,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
if (use_raw) if (use_raw)
epp = raw_epp; epp = raw_epp;
else if (epp == -EINVAL) else if (epp == -EINVAL)
epp = epp_values[pref_index - 1]; epp = epp_values[pref_index];
/* /*
* To avoid confusion, refuse to set EPP to any values different * To avoid confusion, refuse to set EPP to any values different
...@@ -843,7 +853,7 @@ static ssize_t store_energy_performance_preference( ...@@ -843,7 +853,7 @@ static ssize_t store_energy_performance_preference(
* upfront. * upfront.
*/ */
if (!raw) if (!raw)
epp = ret ? epp_values[ret - 1] : cpu->epp_default; epp = ret ? epp_values[ret] : cpu->epp_default;
if (cpu->epp_cached != epp) { if (cpu->epp_cached != epp) {
int err; int err;
...@@ -1124,19 +1134,22 @@ static void intel_pstate_update_policies(void) ...@@ -1124,19 +1134,22 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu); cpufreq_update_policy(cpu);
} }
static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
struct cpufreq_policy *policy)
{
policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
refresh_frequency_limits(policy);
}
static void intel_pstate_update_max_freq(unsigned int cpu) static void intel_pstate_update_max_freq(unsigned int cpu)
{ {
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
struct cpudata *cpudata;
if (!policy) if (!policy)
return; return;
cpudata = all_cpu_data[cpu]; __intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
refresh_frequency_limits(policy);
cpufreq_cpu_release(policy); cpufreq_cpu_release(policy);
} }
...@@ -1584,8 +1597,15 @@ static void intel_pstate_notify_work(struct work_struct *work) ...@@ -1584,8 +1597,15 @@ static void intel_pstate_notify_work(struct work_struct *work)
{ {
struct cpudata *cpudata = struct cpudata *cpudata =
container_of(to_delayed_work(work), struct cpudata, hwp_notify_work); container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
if (policy) {
intel_pstate_get_hwp_cap(cpudata);
__intel_pstate_update_max_freq(cpudata, policy);
cpufreq_cpu_release(policy);
}
cpufreq_update_policy(cpudata->cpu);
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
} }
...@@ -1679,10 +1699,18 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) ...@@ -1679,10 +1699,18 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
if (cpudata->epp_default == -EINVAL)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
intel_pstate_enable_hwp_interrupt(cpudata); intel_pstate_enable_hwp_interrupt(cpudata);
if (cpudata->epp_default >= 0)
return;
if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) {
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
} else {
cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE];
intel_pstate_set_epp(cpudata, cpudata->epp_default);
}
} }
static int atom_get_min_pstate(void) static int atom_get_min_pstate(void)
...@@ -2486,10 +2514,7 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu, ...@@ -2486,10 +2514,7 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
* HWP needs some special consideration, because HWP_REQUEST uses * HWP needs some special consideration, because HWP_REQUEST uses
* abstract values to represent performance rather than pure ratios. * abstract values to represent performance rather than pure ratios.
*/ */
if (hwp_active) { if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
intel_pstate_get_hwp_cap(cpu);
if (cpu->pstate.scaling != perf_ctl_scaling) {
int scaling = cpu->pstate.scaling; int scaling = cpu->pstate.scaling;
int freq; int freq;
...@@ -2498,7 +2523,6 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu, ...@@ -2498,7 +2523,6 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
freq = min_policy_perf * perf_ctl_scaling; freq = min_policy_perf * perf_ctl_scaling;
min_policy_perf = DIV_ROUND_UP(freq, scaling); min_policy_perf = DIV_ROUND_UP(freq, scaling);
} }
}
pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n", pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
cpu->cpu, min_policy_perf, max_policy_perf); cpu->cpu, min_policy_perf, max_policy_perf);
...@@ -3349,6 +3373,16 @@ static bool intel_pstate_hwp_is_enabled(void) ...@@ -3349,6 +3373,16 @@ static bool intel_pstate_hwp_is_enabled(void)
return !!(value & 0x1); return !!(value & 0x1);
} }
static const struct x86_cpu_id intel_epp_balance_perf[] = {
/*
* Set EPP value as 102, this is the max suggested EPP
* which can result in one core turbo frequency for
* AlderLake Mobile CPUs.
*/
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102),
{}
};
static int __init intel_pstate_init(void) static int __init intel_pstate_init(void)
{ {
static struct cpudata **_all_cpu_data; static struct cpudata **_all_cpu_data;
...@@ -3438,6 +3472,13 @@ static int __init intel_pstate_init(void) ...@@ -3438,6 +3472,13 @@ static int __init intel_pstate_init(void)
intel_pstate_sysfs_expose_params(); intel_pstate_sysfs_expose_params();
if (hwp_active) {
const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf);
if (id)
epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data;
}
mutex_lock(&intel_pstate_driver_lock); mutex_lock(&intel_pstate_driver_lock);
rc = intel_pstate_register_driver(default_driver); rc = intel_pstate_register_driver(default_driver);
mutex_unlock(&intel_pstate_driver_lock); mutex_unlock(&intel_pstate_driver_lock);
......
...@@ -36,6 +36,8 @@ enum { ...@@ -36,6 +36,8 @@ enum {
struct mtk_cpufreq_data { struct mtk_cpufreq_data {
struct cpufreq_frequency_table *table; struct cpufreq_frequency_table *table;
void __iomem *reg_bases[REG_ARRAY_SIZE]; void __iomem *reg_bases[REG_ARRAY_SIZE];
struct resource *res;
void __iomem *base;
int nr_opp; int nr_opp;
}; };
...@@ -156,6 +158,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev, ...@@ -156,6 +158,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
{ {
struct mtk_cpufreq_data *data; struct mtk_cpufreq_data *data;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct resource *res;
void __iomem *base; void __iomem *base;
int ret, i; int ret, i;
int index; int index;
...@@ -170,9 +173,26 @@ static int mtk_cpu_resources_init(struct platform_device *pdev, ...@@ -170,9 +173,26 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
if (index < 0) if (index < 0)
return index; return index;
base = devm_platform_ioremap_resource(pdev, index); res = platform_get_resource(pdev, IORESOURCE_MEM, index);
if (IS_ERR(base)) if (!res) {
return PTR_ERR(base); dev_err(dev, "failed to get mem resource %d\n", index);
return -ENODEV;
}
if (!request_mem_region(res->start, resource_size(res), res->name)) {
dev_err(dev, "failed to request resource %pR\n", res);
return -EBUSY;
}
base = ioremap(res->start, resource_size(res));
if (!base) {
dev_err(dev, "failed to map resource %pR\n", res);
ret = -ENOMEM;
goto release_region;
}
data->base = base;
data->res = res;
for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++) for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++)
data->reg_bases[i] = base + offsets[i]; data->reg_bases[i] = base + offsets[i];
...@@ -187,6 +207,9 @@ static int mtk_cpu_resources_init(struct platform_device *pdev, ...@@ -187,6 +207,9 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
policy->driver_data = data; policy->driver_data = data;
return 0; return 0;
release_region:
release_mem_region(res->start, resource_size(res));
return ret;
} }
static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
...@@ -233,9 +256,13 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) ...@@ -233,9 +256,13 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{ {
struct mtk_cpufreq_data *data = policy->driver_data; struct mtk_cpufreq_data *data = policy->driver_data;
struct resource *res = data->res;
void __iomem *base = data->base;
/* HW should be in paused state now */ /* HW should be in paused state now */
writel_relaxed(0x0, data->reg_bases[REG_FREQ_ENABLE]); writel_relaxed(0x0, data->reg_bases[REG_FREQ_ENABLE]);
iounmap(base);
release_mem_region(res->start, resource_size(res));
return 0; return 0;
} }
......
...@@ -46,6 +46,7 @@ struct qcom_cpufreq_data { ...@@ -46,6 +46,7 @@ struct qcom_cpufreq_data {
*/ */
struct mutex throttle_lock; struct mutex throttle_lock;
int throttle_irq; int throttle_irq;
char irq_name[15];
bool cancel_throttle; bool cancel_throttle;
struct delayed_work throttle_work; struct delayed_work throttle_work;
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
...@@ -275,10 +276,10 @@ static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data) ...@@ -275,10 +276,10 @@ static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
{ {
unsigned long max_capacity, capacity, freq_hz, throttled_freq;
struct cpufreq_policy *policy = data->policy; struct cpufreq_policy *policy = data->policy;
int cpu = cpumask_first(policy->cpus); int cpu = cpumask_first(policy->cpus);
struct device *dev = get_cpu_device(cpu); struct device *dev = get_cpu_device(cpu);
unsigned long freq_hz, throttled_freq;
struct dev_pm_opp *opp; struct dev_pm_opp *opp;
unsigned int freq; unsigned int freq;
...@@ -295,16 +296,8 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) ...@@ -295,16 +296,8 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
throttled_freq = freq_hz / HZ_PER_KHZ; throttled_freq = freq_hz / HZ_PER_KHZ;
/* Update thermal pressure */ /* Update thermal pressure (the boost frequencies are accepted) */
arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
max_capacity = arch_scale_cpu_capacity(cpu);
capacity = mult_frac(max_capacity, throttled_freq, policy->cpuinfo.max_freq);
/* Don't pass boost capacity to scheduler */
if (capacity > max_capacity)
capacity = max_capacity;
arch_set_thermal_pressure(policy->cpus, max_capacity - capacity);
/* /*
* In the unlikely case policy is unregistered do not enable * In the unlikely case policy is unregistered do not enable
...@@ -342,9 +335,9 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data) ...@@ -342,9 +335,9 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
/* Disable interrupt and enable polling */ /* Disable interrupt and enable polling */
disable_irq_nosync(c_data->throttle_irq); disable_irq_nosync(c_data->throttle_irq);
qcom_lmh_dcvs_notify(c_data); schedule_delayed_work(&c_data->throttle_work, 0);
return 0; return IRQ_HANDLED;
} }
static const struct qcom_cpufreq_soc_data qcom_soc_data = { static const struct qcom_cpufreq_soc_data qcom_soc_data = {
...@@ -375,16 +368,17 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index) ...@@ -375,16 +368,17 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
{ {
struct qcom_cpufreq_data *data = policy->driver_data; struct qcom_cpufreq_data *data = policy->driver_data;
struct platform_device *pdev = cpufreq_get_driver_data(); struct platform_device *pdev = cpufreq_get_driver_data();
char irq_name[15];
int ret; int ret;
/* /*
* Look for LMh interrupt. If no interrupt line is specified / * Look for LMh interrupt. If no interrupt line is specified /
* if there is an error, allow cpufreq to be enabled as usual. * if there is an error, allow cpufreq to be enabled as usual.
*/ */
data->throttle_irq = platform_get_irq(pdev, index); data->throttle_irq = platform_get_irq_optional(pdev, index);
if (data->throttle_irq <= 0) if (data->throttle_irq == -ENXIO)
return data->throttle_irq == -EPROBE_DEFER ? -EPROBE_DEFER : 0; return 0;
if (data->throttle_irq < 0)
return data->throttle_irq;
data->cancel_throttle = false; data->cancel_throttle = false;
data->policy = policy; data->policy = policy;
...@@ -392,14 +386,19 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index) ...@@ -392,14 +386,19 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
mutex_init(&data->throttle_lock); mutex_init(&data->throttle_lock);
INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll); INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
snprintf(irq_name, sizeof(irq_name), "dcvsh-irq-%u", policy->cpu); snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu);
ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq, ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
IRQF_ONESHOT, irq_name, data); IRQF_ONESHOT, data->irq_name, data);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Error registering %s: %d\n", irq_name, ret); dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret);
return 0; return 0;
} }
ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
if (ret)
dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
data->irq_name, data->throttle_irq);
return 0; return 0;
} }
......
...@@ -462,7 +462,6 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, ...@@ -462,7 +462,6 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpumask *cpus; struct cpumask *cpus;
unsigned int frequency; unsigned int frequency;
unsigned long max_capacity, capacity;
int ret; int ret;
/* Request state should be less than max_level */ /* Request state should be less than max_level */
...@@ -479,10 +478,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, ...@@ -479,10 +478,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
if (ret >= 0) { if (ret >= 0) {
cpufreq_cdev->cpufreq_state = state; cpufreq_cdev->cpufreq_state = state;
cpus = cpufreq_cdev->policy->related_cpus; cpus = cpufreq_cdev->policy->related_cpus;
max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus)); arch_update_thermal_pressure(cpus, frequency);
capacity = frequency * max_capacity;
capacity /= cpufreq_cdev->policy->cpuinfo.max_freq;
arch_set_thermal_pressure(cpus, max_capacity - capacity);
ret = 0; ret = 0;
} }
......
...@@ -138,6 +138,7 @@ extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf); ...@@ -138,6 +138,7 @@ extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf); extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf);
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_set_enable(int cpu, bool enable);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
extern bool acpi_cpc_valid(void); extern bool acpi_cpc_valid(void);
extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data); extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
...@@ -162,6 +163,10 @@ static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) ...@@ -162,6 +163,10 @@ static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{ {
return -ENOTSUPP; return -ENOTSUPP;
} }
static inline int cppc_set_enable(int cpu, bool enable)
{
return -ENOTSUPP;
}
static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps) static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps)
{ {
return -ENOTSUPP; return -ENOTSUPP;
......
...@@ -56,8 +56,8 @@ static inline unsigned long topology_get_thermal_pressure(int cpu) ...@@ -56,8 +56,8 @@ static inline unsigned long topology_get_thermal_pressure(int cpu)
return per_cpu(thermal_pressure, cpu); return per_cpu(thermal_pressure, cpu);
} }
void topology_set_thermal_pressure(const struct cpumask *cpus, void topology_update_thermal_pressure(const struct cpumask *cpus,
unsigned long th_pressure); unsigned long capped_freq);
struct cpu_topology { struct cpu_topology {
int thread_id; int thread_id;
......
...@@ -266,10 +266,10 @@ unsigned long arch_scale_thermal_pressure(int cpu) ...@@ -266,10 +266,10 @@ unsigned long arch_scale_thermal_pressure(int cpu)
} }
#endif #endif
#ifndef arch_set_thermal_pressure #ifndef arch_update_thermal_pressure
static __always_inline static __always_inline
void arch_set_thermal_pressure(const struct cpumask *cpus, void arch_update_thermal_pressure(const struct cpumask *cpus,
unsigned long th_pressure) unsigned long capped_frequency)
{ } { }
#endif #endif
......
...@@ -550,7 +550,7 @@ config SCHED_THERMAL_PRESSURE ...@@ -550,7 +550,7 @@ config SCHED_THERMAL_PRESSURE
i.e. put less load on throttled CPUs than on non/less throttled ones. i.e. put less load on throttled CPUs than on non/less throttled ones.
This requires the architecture to implement This requires the architecture to implement
arch_set_thermal_pressure() and arch_scale_thermal_pressure(). arch_update_thermal_pressure() and arch_scale_thermal_pressure().
config BSD_PROCESS_ACCT config BSD_PROCESS_ACCT
bool "BSD Process Accounting" bool "BSD Process Accounting"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment