Commit 6a8ccb1d authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branches 'acpi-processor' and 'acpi-cppc'

* acpi-processor:
  ACPI / sleep: move acpi_processor_sleep to sleep.c
  ACPI / processor : add support for ACPI0010 processor container
  ACPI / processor_idle: replace PREFIX with pr_fmt

* acpi-cppc:
  ACPI / CPPC: use MRTT/MPAR to decide if/when a req can be sent
  ACPI / CPPC: replace writeX/readX to PCC with relaxed version
  mailbox: pcc: optimized pcc_send_data
  ACPI / CPPC: optimized cpc_read and cpc_write
  ACPI / CPPC: Optimize PCC Read Write operations
...@@ -514,7 +514,24 @@ static struct acpi_scan_handler processor_handler = { ...@@ -514,7 +514,24 @@ static struct acpi_scan_handler processor_handler = {
}, },
}; };
static int acpi_processor_container_attach(struct acpi_device *dev,
const struct acpi_device_id *id)
{
return 1;
}
static const struct acpi_device_id processor_container_ids[] = {
{ ACPI_PROCESSOR_CONTAINER_HID, },
{ }
};
static struct acpi_scan_handler processor_container_handler = {
.ids = processor_container_ids,
.attach = acpi_processor_container_attach,
};
void __init acpi_processor_init(void) void __init acpi_processor_init(void)
{ {
acpi_scan_add_handler_with_hotplug(&processor_handler, "processor"); acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
acpi_scan_add_handler(&processor_container_handler);
} }
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/ktime.h>
#include <acpi/cppc_acpi.h> #include <acpi/cppc_acpi.h>
/* /*
...@@ -63,58 +64,140 @@ static struct mbox_chan *pcc_channel; ...@@ -63,58 +64,140 @@ static struct mbox_chan *pcc_channel;
static void __iomem *pcc_comm_addr; static void __iomem *pcc_comm_addr;
static u64 comm_base_addr; static u64 comm_base_addr;
static int pcc_subspace_idx = -1; static int pcc_subspace_idx = -1;
static u16 pcc_cmd_delay;
static bool pcc_channel_acquired; static bool pcc_channel_acquired;
static ktime_t deadline;
static unsigned int pcc_mpar, pcc_mrtt;
/* pcc mapped address + header size + offset within PCC subspace */
#define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs))
/* /*
* Arbitrary Retries in case the remote processor is slow to respond * Arbitrary Retries in case the remote processor is slow to respond
* to PCC commands. * to PCC commands. Keeping it high enough to cover emulators where
* the processors run painfully slow.
*/ */
#define NUM_RETRIES 500 #define NUM_RETRIES 500
static int check_pcc_chan(void)
{
int ret = -EIO;
struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr;
ktime_t next_deadline = ktime_add(ktime_get(), deadline);
/* Retry in case the remote processor was too slow to catch up. */
while (!ktime_after(ktime_get(), next_deadline)) {
/*
* Per spec, prior to boot the PCC space wil be initialized by
* platform and should have set the command completion bit when
* PCC can be used by OSPM
*/
if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
ret = 0;
break;
}
/*
* Reducing the bus traffic in case this loop takes longer than
* a few retries.
*/
udelay(3);
}
return ret;
}
static int send_pcc_cmd(u16 cmd) static int send_pcc_cmd(u16 cmd)
{ {
int retries, result = -EIO; int ret = -EIO;
struct acpi_pcct_hw_reduced *pcct_ss = pcc_channel->con_priv;
struct acpi_pcct_shared_memory *generic_comm_base = struct acpi_pcct_shared_memory *generic_comm_base =
(struct acpi_pcct_shared_memory *) pcc_comm_addr; (struct acpi_pcct_shared_memory *) pcc_comm_addr;
u32 cmd_latency = pcct_ss->latency; static ktime_t last_cmd_cmpl_time, last_mpar_reset;
static int mpar_count;
unsigned int time_delta;
/*
* For CMD_WRITE we know for a fact the caller should have checked
* the channel before writing to PCC space
*/
if (cmd == CMD_READ) {
ret = check_pcc_chan();
if (ret)
return ret;
}
/* Min time OS should wait before sending next command. */ /*
udelay(pcc_cmd_delay); * Handle the Minimum Request Turnaround Time(MRTT)
* "The minimum amount of time that OSPM must wait after the completion
* of a command before issuing the next command, in microseconds"
*/
if (pcc_mrtt) {
time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
if (pcc_mrtt > time_delta)
udelay(pcc_mrtt - time_delta);
}
/*
* Handle the non-zero Maximum Periodic Access Rate(MPAR)
* "The maximum number of periodic requests that the subspace channel can
* support, reported in commands per minute. 0 indicates no limitation."
*
* This parameter should be ideally zero or large enough so that it can
* handle maximum number of requests that all the cores in the system can
* collectively generate. If it is not, we will follow the spec and just
* not send the request to the platform after hitting the MPAR limit in
* any 60s window
*/
if (pcc_mpar) {
if (mpar_count == 0) {
time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
if (time_delta < 60 * MSEC_PER_SEC) {
pr_debug("PCC cmd not sent due to MPAR limit");
return -EIO;
}
last_mpar_reset = ktime_get();
mpar_count = pcc_mpar;
}
mpar_count--;
}
/* Write to the shared comm region. */ /* Write to the shared comm region. */
writew(cmd, &generic_comm_base->command); writew_relaxed(cmd, &generic_comm_base->command);
/* Flip CMD COMPLETE bit */ /* Flip CMD COMPLETE bit */
writew(0, &generic_comm_base->status); writew_relaxed(0, &generic_comm_base->status);
/* Ring doorbell */ /* Ring doorbell */
result = mbox_send_message(pcc_channel, &cmd); ret = mbox_send_message(pcc_channel, &cmd);
if (result < 0) { if (ret < 0) {
pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
cmd, result); cmd, ret);
return result; return ret;
} }
/* Wait for a nominal time to let platform process command. */ /*
udelay(cmd_latency); * For READs we need to ensure the cmd completed to ensure
* the ensuing read()s can proceed. For WRITEs we dont care
/* Retry in case the remote processor was too slow to catch up. */ * because the actual write()s are done before coming here
for (retries = NUM_RETRIES; retries > 0; retries--) { * and the next READ or WRITE will check if the channel
if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) { * is busy/free at the entry of this call.
result = 0; *
break; * If Minimum Request Turnaround Time is non-zero, we need
} * to record the completion time of both READ and WRITE
* command for proper handling of MRTT, so we need to check
* for pcc_mrtt in addition to CMD_READ
*/
if (cmd == CMD_READ || pcc_mrtt) {
ret = check_pcc_chan();
if (pcc_mrtt)
last_cmd_cmpl_time = ktime_get();
} }
mbox_client_txdone(pcc_channel, result); mbox_client_txdone(pcc_channel, ret);
return result; return ret;
} }
static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret) static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
{ {
if (ret) if (ret < 0)
pr_debug("TX did not complete: CMD sent:%x, ret:%d\n", pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
*(u16 *)msg, ret); *(u16 *)msg, ret);
else else
...@@ -306,6 +389,7 @@ static int register_pcc_channel(int pcc_subspace_idx) ...@@ -306,6 +389,7 @@ static int register_pcc_channel(int pcc_subspace_idx)
{ {
struct acpi_pcct_hw_reduced *cppc_ss; struct acpi_pcct_hw_reduced *cppc_ss;
unsigned int len; unsigned int len;
u64 usecs_lat;
if (pcc_subspace_idx >= 0) { if (pcc_subspace_idx >= 0) {
pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
...@@ -335,7 +419,16 @@ static int register_pcc_channel(int pcc_subspace_idx) ...@@ -335,7 +419,16 @@ static int register_pcc_channel(int pcc_subspace_idx)
*/ */
comm_base_addr = cppc_ss->base_address; comm_base_addr = cppc_ss->base_address;
len = cppc_ss->length; len = cppc_ss->length;
pcc_cmd_delay = cppc_ss->min_turnaround_time;
/*
* cppc_ss->latency is just a Nominal value. In reality
* the remote processor could be much slower to reply.
* So add an arbitrary amount of wait on top of Nominal.
*/
usecs_lat = NUM_RETRIES * cppc_ss->latency;
deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
pcc_mrtt = cppc_ss->min_turnaround_time;
pcc_mpar = cppc_ss->max_access_rate;
pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len); pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len);
if (!pcc_comm_addr) { if (!pcc_comm_addr) {
...@@ -546,29 +639,74 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr) ...@@ -546,29 +639,74 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
} }
EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
static u64 get_phys_addr(struct cpc_reg *reg) /*
{ * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
/* PCC communication addr space begins at byte offset 0x8. */ * as fast as possible. We have already mapped the PCC subspace during init, so
if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) * we can directly write to it.
return (u64)comm_base_addr + 0x8 + reg->address; */
else
return reg->address;
}
static void cpc_read(struct cpc_reg *reg, u64 *val) static int cpc_read(struct cpc_reg *reg, u64 *val)
{ {
u64 addr = get_phys_addr(reg); int ret_val = 0;
acpi_os_read_memory((acpi_physical_address)addr, *val = 0;
if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
void __iomem *vaddr = GET_PCC_VADDR(reg->address);
switch (reg->bit_width) {
case 8:
*val = readb_relaxed(vaddr);
break;
case 16:
*val = readw_relaxed(vaddr);
break;
case 32:
*val = readl_relaxed(vaddr);
break;
case 64:
*val = readq_relaxed(vaddr);
break;
default:
pr_debug("Error: Cannot read %u bit width from PCC\n",
reg->bit_width);
ret_val = -EFAULT;
}
} else
ret_val = acpi_os_read_memory((acpi_physical_address)reg->address,
val, reg->bit_width); val, reg->bit_width);
return ret_val;
} }
static void cpc_write(struct cpc_reg *reg, u64 val) static int cpc_write(struct cpc_reg *reg, u64 val)
{ {
u64 addr = get_phys_addr(reg); int ret_val = 0;
if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
void __iomem *vaddr = GET_PCC_VADDR(reg->address);
acpi_os_write_memory((acpi_physical_address)addr, switch (reg->bit_width) {
case 8:
writeb_relaxed(val, vaddr);
break;
case 16:
writew_relaxed(val, vaddr);
break;
case 32:
writel_relaxed(val, vaddr);
break;
case 64:
writeq_relaxed(val, vaddr);
break;
default:
pr_debug("Error: Cannot write %u bit width to PCC\n",
reg->bit_width);
ret_val = -EFAULT;
break;
}
} else
ret_val = acpi_os_write_memory((acpi_physical_address)reg->address,
val, reg->bit_width); val, reg->bit_width);
return ret_val;
} }
/** /**
...@@ -604,7 +742,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) ...@@ -604,7 +742,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
(ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || (ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
(nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { (nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
/* Ring doorbell once to update PCC subspace */ /* Ring doorbell once to update PCC subspace */
if (send_pcc_cmd(CMD_READ)) { if (send_pcc_cmd(CMD_READ) < 0) {
ret = -EIO; ret = -EIO;
goto out_err; goto out_err;
} }
...@@ -662,7 +800,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) ...@@ -662,7 +800,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
(reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { (reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
/* Ring doorbell once to update PCC subspace */ /* Ring doorbell once to update PCC subspace */
if (send_pcc_cmd(CMD_READ)) { if (send_pcc_cmd(CMD_READ) < 0) {
ret = -EIO; ret = -EIO;
goto out_err; goto out_err;
} }
...@@ -713,6 +851,13 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) ...@@ -713,6 +851,13 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
spin_lock(&pcc_lock); spin_lock(&pcc_lock);
/* If this is PCC reg, check if channel is free before writing */
if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
ret = check_pcc_chan();
if (ret)
goto busy_channel;
}
/* /*
* Skip writing MIN/MAX until Linux knows how to come up with * Skip writing MIN/MAX until Linux knows how to come up with
* useful values. * useful values.
...@@ -722,10 +867,10 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) ...@@ -722,10 +867,10 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
/* Is this a PCC reg ?*/ /* Is this a PCC reg ?*/
if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
/* Ring doorbell so Remote can get our perf request. */ /* Ring doorbell so Remote can get our perf request. */
if (send_pcc_cmd(CMD_WRITE)) if (send_pcc_cmd(CMD_WRITE) < 0)
ret = -EIO; ret = -EIO;
} }
busy_channel:
spin_unlock(&pcc_lock); spin_unlock(&pcc_lock);
return ret; return ret;
......
...@@ -314,7 +314,6 @@ static int __init acpi_processor_driver_init(void) ...@@ -314,7 +314,6 @@ static int __init acpi_processor_driver_init(void)
if (result < 0) if (result < 0)
return result; return result;
acpi_processor_syscore_init();
register_hotcpu_notifier(&acpi_cpu_notifier); register_hotcpu_notifier(&acpi_cpu_notifier);
acpi_thermal_cpufreq_init(); acpi_thermal_cpufreq_init();
acpi_processor_ppc_init(); acpi_processor_ppc_init();
...@@ -330,7 +329,6 @@ static void __exit acpi_processor_driver_exit(void) ...@@ -330,7 +329,6 @@ static void __exit acpi_processor_driver_exit(void)
acpi_processor_ppc_exit(); acpi_processor_ppc_exit();
acpi_thermal_cpufreq_exit(); acpi_thermal_cpufreq_exit();
unregister_hotcpu_notifier(&acpi_cpu_notifier); unregister_hotcpu_notifier(&acpi_cpu_notifier);
acpi_processor_syscore_exit();
driver_unregister(&acpi_processor_driver); driver_unregister(&acpi_processor_driver);
} }
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
* *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/ */
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/acpi.h> #include <linux/acpi.h>
...@@ -30,7 +31,6 @@ ...@@ -30,7 +31,6 @@
#include <linux/sched.h> /* need_resched() */ #include <linux/sched.h> /* need_resched() */
#include <linux/tick.h> #include <linux/tick.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/syscore_ops.h>
#include <acpi/processor.h> #include <acpi/processor.h>
/* /*
...@@ -43,8 +43,6 @@ ...@@ -43,8 +43,6 @@
#include <asm/apic.h> #include <asm/apic.h>
#endif #endif
#define PREFIX "ACPI: "
#define ACPI_PROCESSOR_CLASS "processor" #define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT #define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle"); ACPI_MODULE_NAME("processor_idle");
...@@ -81,7 +79,7 @@ static int set_max_cstate(const struct dmi_system_id *id) ...@@ -81,7 +79,7 @@ static int set_max_cstate(const struct dmi_system_id *id)
if (max_cstate > ACPI_PROCESSOR_MAX_POWER) if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
return 0; return 0;
printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate." pr_notice("%s detected - limiting to C%ld max_cstate."
" Override with \"processor.max_cstate=%d\"\n", id->ident, " Override with \"processor.max_cstate=%d\"\n", id->ident,
(long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
...@@ -194,42 +192,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, ...@@ -194,42 +192,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
#endif #endif
#ifdef CONFIG_PM_SLEEP
static u32 saved_bm_rld;
static int acpi_processor_suspend(void)
{
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
return 0;
}
static void acpi_processor_resume(void)
{
u32 resumed_bm_rld = 0;
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
if (resumed_bm_rld == saved_bm_rld)
return;
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
}
static struct syscore_ops acpi_processor_syscore_ops = {
.suspend = acpi_processor_suspend,
.resume = acpi_processor_resume,
};
void acpi_processor_syscore_init(void)
{
register_syscore_ops(&acpi_processor_syscore_ops);
}
void acpi_processor_syscore_exit(void)
{
unregister_syscore_ops(&acpi_processor_syscore_ops);
}
#endif /* CONFIG_PM_SLEEP */
#if defined(CONFIG_X86) #if defined(CONFIG_X86)
static void tsc_check_state(int state) static void tsc_check_state(int state)
{ {
...@@ -351,7 +313,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) ...@@ -351,7 +313,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
/* There must be at least 2 elements */ /* There must be at least 2 elements */
if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
printk(KERN_ERR PREFIX "not enough elements in _CST\n"); pr_err("not enough elements in _CST\n");
ret = -EFAULT; ret = -EFAULT;
goto end; goto end;
} }
...@@ -360,7 +322,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) ...@@ -360,7 +322,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
/* Validate number of power states. */ /* Validate number of power states. */
if (count < 1 || count != cst->package.count - 1) { if (count < 1 || count != cst->package.count - 1) {
printk(KERN_ERR PREFIX "count given by _CST is not valid\n"); pr_err("count given by _CST is not valid\n");
ret = -EFAULT; ret = -EFAULT;
goto end; goto end;
} }
...@@ -469,11 +431,9 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) ...@@ -469,11 +431,9 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
* (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
*/ */
if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
printk(KERN_WARNING pr_warn("Limiting number of power states to max (%d)\n",
"Limiting number of power states to max (%d)\n",
ACPI_PROCESSOR_MAX_POWER); ACPI_PROCESSOR_MAX_POWER);
printk(KERN_WARNING pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
"Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
break; break;
} }
} }
...@@ -1097,7 +1057,7 @@ int acpi_processor_power_init(struct acpi_processor *pr) ...@@ -1097,7 +1057,7 @@ int acpi_processor_power_init(struct acpi_processor *pr)
retval = cpuidle_register_driver(&acpi_idle_driver); retval = cpuidle_register_driver(&acpi_idle_driver);
if (retval) if (retval)
return retval; return retval;
printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", pr_debug("%s registered with cpuidle\n",
acpi_idle_driver.name); acpi_idle_driver.name);
} }
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/syscore_ops.h>
#include <asm/io.h> #include <asm/io.h>
#include <trace/events/power.h> #include <trace/events/power.h>
...@@ -677,6 +678,39 @@ static void acpi_sleep_suspend_setup(void) ...@@ -677,6 +678,39 @@ static void acpi_sleep_suspend_setup(void)
static inline void acpi_sleep_suspend_setup(void) {} static inline void acpi_sleep_suspend_setup(void) {}
#endif /* !CONFIG_SUSPEND */ #endif /* !CONFIG_SUSPEND */
#ifdef CONFIG_PM_SLEEP
static u32 saved_bm_rld;
static int acpi_save_bm_rld(void)
{
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
return 0;
}
static void acpi_restore_bm_rld(void)
{
u32 resumed_bm_rld = 0;
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
if (resumed_bm_rld == saved_bm_rld)
return;
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
}
static struct syscore_ops acpi_sleep_syscore_ops = {
.suspend = acpi_save_bm_rld,
.resume = acpi_restore_bm_rld,
};
void acpi_sleep_syscore_init(void)
{
register_syscore_ops(&acpi_sleep_syscore_ops);
}
#else
static inline void acpi_sleep_syscore_init(void) {}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_HIBERNATION #ifdef CONFIG_HIBERNATION
static unsigned long s4_hardware_signature; static unsigned long s4_hardware_signature;
static struct acpi_table_facs *facs; static struct acpi_table_facs *facs;
...@@ -839,6 +873,7 @@ int __init acpi_sleep_init(void) ...@@ -839,6 +873,7 @@ int __init acpi_sleep_init(void)
sleep_states[ACPI_STATE_S0] = 1; sleep_states[ACPI_STATE_S0] = 1;
acpi_sleep_syscore_init();
acpi_sleep_suspend_setup(); acpi_sleep_suspend_setup();
acpi_sleep_hibernate_setup(); acpi_sleep_hibernate_setup();
......
...@@ -63,6 +63,7 @@ ...@@ -63,6 +63,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/mailbox_controller.h> #include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h> #include <linux/mailbox_client.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "mailbox.h" #include "mailbox.h"
...@@ -70,6 +71,9 @@ ...@@ -70,6 +71,9 @@
static struct mbox_chan *pcc_mbox_channels; static struct mbox_chan *pcc_mbox_channels;
/* Array of cached virtual address for doorbell registers */
static void __iomem **pcc_doorbell_vaddr;
static struct mbox_controller pcc_mbox_ctrl = {}; static struct mbox_controller pcc_mbox_ctrl = {};
/** /**
* get_pcc_channel - Given a PCC subspace idx, get * get_pcc_channel - Given a PCC subspace idx, get
...@@ -160,6 +164,66 @@ void pcc_mbox_free_channel(struct mbox_chan *chan) ...@@ -160,6 +164,66 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
} }
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
/*
* PCC can be used with perf critical drivers such as CPPC
* So it makes sense to locally cache the virtual address and
* use it to read/write to PCC registers such as doorbell register
*
* The below read_register and write_registers are used to read and
* write from perf critical registers such as PCC doorbell register
*/
static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
{
int ret_val = 0;
switch (bit_width) {
case 8:
*val = readb(vaddr);
break;
case 16:
*val = readw(vaddr);
break;
case 32:
*val = readl(vaddr);
break;
case 64:
*val = readq(vaddr);
break;
default:
pr_debug("Error: Cannot read register of %u bit width",
bit_width);
ret_val = -EFAULT;
break;
}
return ret_val;
}
static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
{
int ret_val = 0;
switch (bit_width) {
case 8:
writeb(val, vaddr);
break;
case 16:
writew(val, vaddr);
break;
case 32:
writel(val, vaddr);
break;
case 64:
writeq(val, vaddr);
break;
default:
pr_debug("Error: Cannot write register of %u bit width",
bit_width);
ret_val = -EFAULT;
break;
}
return ret_val;
}
/** /**
* pcc_send_data - Called from Mailbox Controller code. Used * pcc_send_data - Called from Mailbox Controller code. Used
* here only to ring the channel doorbell. The PCC client * here only to ring the channel doorbell. The PCC client
...@@ -175,21 +239,39 @@ EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); ...@@ -175,21 +239,39 @@ EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
static int pcc_send_data(struct mbox_chan *chan, void *data) static int pcc_send_data(struct mbox_chan *chan, void *data)
{ {
struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv; struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
struct acpi_generic_address doorbell; struct acpi_generic_address *doorbell;
u64 doorbell_preserve; u64 doorbell_preserve;
u64 doorbell_val; u64 doorbell_val;
u64 doorbell_write; u64 doorbell_write;
u32 id = chan - pcc_mbox_channels;
int ret = 0;
doorbell = pcct_ss->doorbell_register; if (id >= pcc_mbox_ctrl.num_chans) {
pr_debug("pcc_send_data: Invalid mbox_chan passed\n");
return -ENOENT;
}
doorbell = &pcct_ss->doorbell_register;
doorbell_preserve = pcct_ss->preserve_mask; doorbell_preserve = pcct_ss->preserve_mask;
doorbell_write = pcct_ss->write_mask; doorbell_write = pcct_ss->write_mask;
/* Sync notification from OS to Platform. */ /* Sync notification from OS to Platform. */
acpi_read(&doorbell_val, &doorbell); if (pcc_doorbell_vaddr[id]) {
acpi_write((doorbell_val & doorbell_preserve) | doorbell_write, ret = read_register(pcc_doorbell_vaddr[id], &doorbell_val,
&doorbell); doorbell->bit_width);
if (ret)
return 0; return ret;
ret = write_register(pcc_doorbell_vaddr[id],
(doorbell_val & doorbell_preserve) | doorbell_write,
doorbell->bit_width);
} else {
ret = acpi_read(&doorbell_val, doorbell);
if (ret)
return ret;
ret = acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
doorbell);
}
return ret;
} }
static const struct mbox_chan_ops pcc_chan_ops = { static const struct mbox_chan_ops pcc_chan_ops = {
...@@ -265,14 +347,29 @@ static int __init acpi_pcc_probe(void) ...@@ -265,14 +347,29 @@ static int __init acpi_pcc_probe(void)
return -ENOMEM; return -ENOMEM;
} }
pcc_doorbell_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
if (!pcc_doorbell_vaddr) {
kfree(pcc_mbox_channels);
return -ENOMEM;
}
/* Point to the first PCC subspace entry */ /* Point to the first PCC subspace entry */
pcct_entry = (struct acpi_subtable_header *) ( pcct_entry = (struct acpi_subtable_header *) (
(unsigned long) pcct_tbl + sizeof(struct acpi_table_pcct)); (unsigned long) pcct_tbl + sizeof(struct acpi_table_pcct));
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct acpi_generic_address *db_reg;
struct acpi_pcct_hw_reduced *pcct_ss;
pcc_mbox_channels[i].con_priv = pcct_entry; pcc_mbox_channels[i].con_priv = pcct_entry;
pcct_entry = (struct acpi_subtable_header *) pcct_entry = (struct acpi_subtable_header *)
((unsigned long) pcct_entry + pcct_entry->length); ((unsigned long) pcct_entry + pcct_entry->length);
/* If doorbell is in system memory cache the virt address */
pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry;
db_reg = &pcct_ss->doorbell_register;
if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address,
db_reg->bit_width/8);
} }
pcc_mbox_ctrl.num_chans = count; pcc_mbox_ctrl.num_chans = count;
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#define ACPI_PROCESSOR_CLASS "processor" #define ACPI_PROCESSOR_CLASS "processor"
#define ACPI_PROCESSOR_DEVICE_NAME "Processor" #define ACPI_PROCESSOR_DEVICE_NAME "Processor"
#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007" #define ACPI_PROCESSOR_DEVICE_HID "ACPI0007"
#define ACPI_PROCESSOR_CONTAINER_HID "ACPI0010"
#define ACPI_PROCESSOR_BUSY_METRIC 10 #define ACPI_PROCESSOR_BUSY_METRIC 10
...@@ -394,14 +395,6 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr) ...@@ -394,14 +395,6 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr)
} }
#endif /* CONFIG_ACPI_PROCESSOR_IDLE */ #endif /* CONFIG_ACPI_PROCESSOR_IDLE */
#if defined(CONFIG_PM_SLEEP) & defined(CONFIG_ACPI_PROCESSOR_IDLE)
void acpi_processor_syscore_init(void);
void acpi_processor_syscore_exit(void);
#else
static inline void acpi_processor_syscore_init(void) {}
static inline void acpi_processor_syscore_exit(void) {}
#endif
/* in processor_thermal.c */ /* in processor_thermal.c */
int acpi_processor_get_limit_info(struct acpi_processor *pr); int acpi_processor_get_limit_info(struct acpi_processor *pr);
extern const struct thermal_cooling_device_ops processor_cooling_ops; extern const struct thermal_cooling_device_ops processor_cooling_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment