Commit 603fadf3 authored by Bjorn Helgaas's avatar Bjorn Helgaas Committed by Rafael J. Wysocki

ACPI: Fix comment typos

Fix some misspellings in comments.  No functional change intended.
Signed-off-by: default avatarBjorn Helgaas <bhelgaas@google.com>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 8c2ffd91
...@@ -390,7 +390,7 @@ static ssize_t acpi_aml_read_cmd(char *msg, size_t count) ...@@ -390,7 +390,7 @@ static ssize_t acpi_aml_read_cmd(char *msg, size_t count)
return size > 0 ? size : ret; return size > 0 ? size : ret;
} }
static int acpi_aml_thread(void *unsed) static int acpi_aml_thread(void *unused)
{ {
acpi_osd_exec_callback function = NULL; acpi_osd_exec_callback function = NULL;
void *context; void *context;
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
* LPAT conversion table * LPAT conversion table
* *
* @lpat_table: the temperature_raw mapping table structure * @lpat_table: the temperature_raw mapping table structure
* @raw: the raw value, used as a key to get the temerature from the * @raw: the raw value, used as a key to get the temperature from the
* above mapping table * above mapping table
* *
* A positive converted temperature value will be returned on success, * A positive converted temperature value will be returned on success,
......
...@@ -81,9 +81,9 @@ struct cppc_pcc_data { ...@@ -81,9 +81,9 @@ struct cppc_pcc_data {
int refcount; int refcount;
}; };
/* Array to represent the PCC channel per subspace id */ /* Array to represent the PCC channel per subspace ID */
static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES]; static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
/* The cpu_pcc_subspace_idx containsper CPU subspace id */ /* The cpu_pcc_subspace_idx contains per CPU subspace ID */
static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx); static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
/* /*
...@@ -436,7 +436,7 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) ...@@ -436,7 +436,7 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
return -ENOMEM; return -ENOMEM;
/* /*
* Now that we have _PSD data from all CPUs, lets setup P-state * Now that we have _PSD data from all CPUs, let's setup P-state
* domain info. * domain info.
*/ */
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
...@@ -588,7 +588,7 @@ static int register_pcc_channel(int pcc_ss_idx) ...@@ -588,7 +588,7 @@ static int register_pcc_channel(int pcc_ss_idx)
return -ENOMEM; return -ENOMEM;
} }
/* Set flag so that we dont come here for each CPU. */ /* Set flag so that we don't come here for each CPU. */
pcc_data[pcc_ss_idx]->pcc_channel_acquired = true; pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
} }
...@@ -613,7 +613,7 @@ bool __weak cpc_ffh_supported(void) ...@@ -613,7 +613,7 @@ bool __weak cpc_ffh_supported(void)
* *
* Check and allocate the cppc_pcc_data memory. * Check and allocate the cppc_pcc_data memory.
* In some processor configurations it is possible that same subspace * In some processor configurations it is possible that same subspace
* is shared between multiple CPU's. This is seen especially in CPU's * is shared between multiple CPUs. This is seen especially in CPUs
* with hardware multi-threading support. * with hardware multi-threading support.
* *
* Return: 0 for success, errno for failure * Return: 0 for success, errno for failure
...@@ -711,7 +711,7 @@ static bool is_cppc_supported(int revision, int num_ent) ...@@ -711,7 +711,7 @@ static bool is_cppc_supported(int revision, int num_ent)
/** /**
* acpi_cppc_processor_probe - Search for per CPU _CPC objects. * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
* @pr: Ptr to acpi_processor containing this CPUs logical Id. * @pr: Ptr to acpi_processor containing this CPU's logical ID.
* *
* Return: 0 for success or negative value for err. * Return: 0 for success or negative value for err.
*/ */
...@@ -728,7 +728,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) ...@@ -728,7 +728,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
acpi_status status; acpi_status status;
int ret = -EFAULT; int ret = -EFAULT;
/* Parse the ACPI _CPC table for this cpu. */ /* Parse the ACPI _CPC table for this CPU. */
status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output, status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
ACPI_TYPE_PACKAGE); ACPI_TYPE_PACKAGE);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
...@@ -840,7 +840,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) ...@@ -840,7 +840,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
if (ret) if (ret)
goto out_free; goto out_free;
/* Register PCC channel once for all PCC subspace id. */ /* Register PCC channel once for all PCC subspace ID. */
if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) { if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
ret = register_pcc_channel(pcc_subspace_id); ret = register_pcc_channel(pcc_subspace_id);
if (ret) if (ret)
...@@ -860,7 +860,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) ...@@ -860,7 +860,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free; goto out_free;
} }
/* Plug PSD data into this CPUs CPC descriptor. */ /* Plug PSD data into this CPU's CPC descriptor. */
per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj, ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
...@@ -891,7 +891,7 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe); ...@@ -891,7 +891,7 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
/** /**
* acpi_cppc_processor_exit - Cleanup CPC structs. * acpi_cppc_processor_exit - Cleanup CPC structs.
* @pr: Ptr to acpi_processor containing this CPUs logical Id. * @pr: Ptr to acpi_processor containing this CPU's logical ID.
* *
* Return: Void * Return: Void
*/ */
...@@ -931,7 +931,7 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); ...@@ -931,7 +931,7 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
/** /**
* cpc_read_ffh() - Read FFH register * cpc_read_ffh() - Read FFH register
* @cpunum: cpu number to read * @cpunum: CPU number to read
* @reg: cppc register information * @reg: cppc register information
* @val: place holder for return value * @val: place holder for return value
* *
...@@ -946,7 +946,7 @@ int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) ...@@ -946,7 +946,7 @@ int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
/** /**
* cpc_write_ffh() - Write FFH register * cpc_write_ffh() - Write FFH register
* @cpunum: cpu number to write * @cpunum: CPU number to write
* @reg: cppc register information * @reg: cppc register information
* @val: value to write * @val: value to write
* *
...@@ -1093,7 +1093,7 @@ int cppc_get_desired_perf(int cpunum, u64 *desired_perf) ...@@ -1093,7 +1093,7 @@ int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
EXPORT_SYMBOL_GPL(cppc_get_desired_perf); EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
/** /**
* cppc_get_perf_caps - Get a CPUs performance capabilities. * cppc_get_perf_caps - Get a CPU's performance capabilities.
* @cpunum: CPU from which to get capabilities info. * @cpunum: CPU from which to get capabilities info.
* @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
* *
...@@ -1178,7 +1178,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) ...@@ -1178,7 +1178,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
EXPORT_SYMBOL_GPL(cppc_get_perf_caps); EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
/** /**
* cppc_get_perf_ctrs - Read a CPUs performance feedback counters. * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
* @cpunum: CPU from which to read counters. * @cpunum: CPU from which to read counters.
* @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
* *
...@@ -1205,7 +1205,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) ...@@ -1205,7 +1205,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME]; ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
/* /*
* If refernce perf register is not supported then we should * If reference perf register is not supported then we should
* use the nominal perf value * use the nominal perf value
*/ */
if (!CPC_SUPPORTED(ref_perf_reg)) if (!CPC_SUPPORTED(ref_perf_reg))
...@@ -1258,7 +1258,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) ...@@ -1258,7 +1258,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
/** /**
* cppc_set_perf - Set a CPUs performance controls. * cppc_set_perf - Set a CPU's performance controls.
* @cpu: CPU for which to set performance controls. * @cpu: CPU for which to set performance controls.
* @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
* *
...@@ -1339,7 +1339,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) ...@@ -1339,7 +1339,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
* executing the Phase-II. * executing the Phase-II.
* 2. Some other CPU has beaten this CPU to successfully execute the * 2. Some other CPU has beaten this CPU to successfully execute the
* write_trylock and has already acquired the write_lock. We know for a * write_trylock and has already acquired the write_lock. We know for a
* fact it(other CPU acquiring the write_lock) couldn't have happened * fact it (other CPU acquiring the write_lock) couldn't have happened
* before this CPU's Phase-I as we held the read_lock. * before this CPU's Phase-I as we held the read_lock.
* 3. Some other CPU executing pcc CMD_READ has stolen the * 3. Some other CPU executing pcc CMD_READ has stolen the
* down_write, in which case, send_pcc_cmd will check for pending * down_write, in which case, send_pcc_cmd will check for pending
......
...@@ -535,12 +535,12 @@ int acpi_device_sleep_wake(struct acpi_device *dev, ...@@ -535,12 +535,12 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
/* /*
* Try to execute _DSW first. * Try to execute _DSW first.
* *
* Three agruments are needed for the _DSW object: * Three arguments are needed for the _DSW object:
* Argument 0: enable/disable the wake capabilities * Argument 0: enable/disable the wake capabilities
* Argument 1: target system state * Argument 1: target system state
* Argument 2: target device state * Argument 2: target device state
* When _DSW object is called to disable the wake capabilities, maybe * When _DSW object is called to disable the wake capabilities, maybe
* the first argument is filled. The values of the other two agruments * the first argument is filled. The values of the other two arguments
* are meaningless. * are meaningless.
*/ */
in_arg[0].type = ACPI_TYPE_INTEGER; in_arg[0].type = ACPI_TYPE_INTEGER;
......
...@@ -164,7 +164,7 @@ static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *t ...@@ -164,7 +164,7 @@ static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *t
} }
/** /**
* acpi_count_levels() - Given a PPTT table, and a cpu node, count the caches * acpi_count_levels() - Given a PPTT table, and a CPU node, count the caches
* @table_hdr: Pointer to the head of the PPTT table * @table_hdr: Pointer to the head of the PPTT table
* @cpu_node: processor node we wish to count caches for * @cpu_node: processor node we wish to count caches for
* *
...@@ -235,7 +235,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr, ...@@ -235,7 +235,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
/** /**
* acpi_find_processor_node() - Given a PPTT table find the requested processor * acpi_find_processor_node() - Given a PPTT table find the requested processor
* @table_hdr: Pointer to the head of the PPTT table * @table_hdr: Pointer to the head of the PPTT table
* @acpi_cpu_id: cpu we are searching for * @acpi_cpu_id: CPU we are searching for
* *
* Find the subtable entry describing the provided processor. * Find the subtable entry describing the provided processor.
* This is done by iterating the PPTT table looking for processor nodes * This is done by iterating the PPTT table looking for processor nodes
...@@ -456,21 +456,21 @@ static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_ta ...@@ -456,21 +456,21 @@ static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_ta
static void acpi_pptt_warn_missing(void) static void acpi_pptt_warn_missing(void)
{ {
pr_warn_once("No PPTT table found, cpu and cache topology may be inaccurate\n"); pr_warn_once("No PPTT table found, CPU and cache topology may be inaccurate\n");
} }
/** /**
* topology_get_acpi_cpu_tag() - Find a unique topology value for a feature * topology_get_acpi_cpu_tag() - Find a unique topology value for a feature
* @table: Pointer to the head of the PPTT table * @table: Pointer to the head of the PPTT table
* @cpu: Kernel logical cpu number * @cpu: Kernel logical CPU number
* @level: A level that terminates the search * @level: A level that terminates the search
* @flag: A flag which terminates the search * @flag: A flag which terminates the search
* *
* Get a unique value given a cpu, and a topology level, that can be * Get a unique value given a CPU, and a topology level, that can be
* matched to determine which cpus share common topological features * matched to determine which cpus share common topological features
* at that level. * at that level.
* *
* Return: Unique value, or -ENOENT if unable to locate cpu * Return: Unique value, or -ENOENT if unable to locate CPU
*/ */
static int topology_get_acpi_cpu_tag(struct acpi_table_header *table, static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
unsigned int cpu, int level, int flag) unsigned int cpu, int level, int flag)
...@@ -510,7 +510,7 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag) ...@@ -510,7 +510,7 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
return -ENOENT; return -ENOENT;
} }
retval = topology_get_acpi_cpu_tag(table, cpu, level, flag); retval = topology_get_acpi_cpu_tag(table, cpu, level, flag);
pr_debug("Topology Setup ACPI cpu %d, level %d ret = %d\n", pr_debug("Topology Setup ACPI CPU %d, level %d ret = %d\n",
cpu, level, retval); cpu, level, retval);
acpi_put_table(table); acpi_put_table(table);
...@@ -519,9 +519,9 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag) ...@@ -519,9 +519,9 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
/** /**
* acpi_find_last_cache_level() - Determines the number of cache levels for a PE * acpi_find_last_cache_level() - Determines the number of cache levels for a PE
* @cpu: Kernel logical cpu number * @cpu: Kernel logical CPU number
* *
* Given a logical cpu number, returns the number of levels of cache represented * Given a logical CPU number, returns the number of levels of cache represented
* in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0 * in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0
* indicating we didn't find any cache levels. * indicating we didn't find any cache levels.
* *
...@@ -534,7 +534,7 @@ int acpi_find_last_cache_level(unsigned int cpu) ...@@ -534,7 +534,7 @@ int acpi_find_last_cache_level(unsigned int cpu)
int number_of_levels = 0; int number_of_levels = 0;
acpi_status status; acpi_status status;
pr_debug("Cache Setup find last level cpu=%d\n", cpu); pr_debug("Cache Setup find last level CPU=%d\n", cpu);
acpi_cpu_id = get_acpi_id_for_cpu(cpu); acpi_cpu_id = get_acpi_id_for_cpu(cpu);
status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
...@@ -551,14 +551,14 @@ int acpi_find_last_cache_level(unsigned int cpu) ...@@ -551,14 +551,14 @@ int acpi_find_last_cache_level(unsigned int cpu)
/** /**
* cache_setup_acpi() - Override CPU cache topology with data from the PPTT * cache_setup_acpi() - Override CPU cache topology with data from the PPTT
* @cpu: Kernel logical cpu number * @cpu: Kernel logical CPU number
* *
* Updates the global cache info provided by cpu_get_cacheinfo() * Updates the global cache info provided by cpu_get_cacheinfo()
* when there are valid properties in the acpi_pptt_cache nodes. A * when there are valid properties in the acpi_pptt_cache nodes. A
* successful parse may not result in any updates if none of the * successful parse may not result in any updates if none of the
* cache levels have any valid flags set. Futher, a unique value is * cache levels have any valid flags set. Further, a unique value is
* associated with each known CPU cache entry. This unique value * associated with each known CPU cache entry. This unique value
* can be used to determine whether caches are shared between cpus. * can be used to determine whether caches are shared between CPUs.
* *
* Return: -ENOENT on failure to find table, or 0 on success * Return: -ENOENT on failure to find table, or 0 on success
*/ */
...@@ -567,7 +567,7 @@ int cache_setup_acpi(unsigned int cpu) ...@@ -567,7 +567,7 @@ int cache_setup_acpi(unsigned int cpu)
struct acpi_table_header *table; struct acpi_table_header *table;
acpi_status status; acpi_status status;
pr_debug("Cache Setup ACPI cpu %d\n", cpu); pr_debug("Cache Setup ACPI CPU %d\n", cpu);
status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
...@@ -582,8 +582,8 @@ int cache_setup_acpi(unsigned int cpu) ...@@ -582,8 +582,8 @@ int cache_setup_acpi(unsigned int cpu)
} }
/** /**
* find_acpi_cpu_topology() - Determine a unique topology value for a given cpu * find_acpi_cpu_topology() - Determine a unique topology value for a given CPU
* @cpu: Kernel logical cpu number * @cpu: Kernel logical CPU number
* @level: The topological level for which we would like a unique ID * @level: The topological level for which we would like a unique ID
* *
* Determine a topology unique ID for each thread/core/cluster/mc_grouping * Determine a topology unique ID for each thread/core/cluster/mc_grouping
...@@ -596,7 +596,7 @@ int cache_setup_acpi(unsigned int cpu) ...@@ -596,7 +596,7 @@ int cache_setup_acpi(unsigned int cpu)
* other levels beyond this use a generated value to uniquely identify * other levels beyond this use a generated value to uniquely identify
* a topological feature. * a topological feature.
* *
* Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found. * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
* Otherwise returns a value which represents a unique topological feature. * Otherwise returns a value which represents a unique topological feature.
*/ */
int find_acpi_cpu_topology(unsigned int cpu, int level) int find_acpi_cpu_topology(unsigned int cpu, int level)
...@@ -606,12 +606,12 @@ int find_acpi_cpu_topology(unsigned int cpu, int level) ...@@ -606,12 +606,12 @@ int find_acpi_cpu_topology(unsigned int cpu, int level)
/** /**
* find_acpi_cpu_cache_topology() - Determine a unique cache topology value * find_acpi_cpu_cache_topology() - Determine a unique cache topology value
* @cpu: Kernel logical cpu number * @cpu: Kernel logical CPU number
* @level: The cache level for which we would like a unique ID * @level: The cache level for which we would like a unique ID
* *
* Determine a unique ID for each unified cache in the system * Determine a unique ID for each unified cache in the system
* *
* Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found. * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
* Otherwise returns a value which represents a unique topological feature. * Otherwise returns a value which represents a unique topological feature.
*/ */
int find_acpi_cpu_cache_topology(unsigned int cpu, int level) int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
...@@ -643,17 +643,17 @@ int find_acpi_cpu_cache_topology(unsigned int cpu, int level) ...@@ -643,17 +643,17 @@ int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
/** /**
* find_acpi_cpu_topology_package() - Determine a unique cpu package value * find_acpi_cpu_topology_package() - Determine a unique CPU package value
* @cpu: Kernel logical cpu number * @cpu: Kernel logical CPU number
* *
* Determine a topology unique package ID for the given cpu. * Determine a topology unique package ID for the given CPU.
* This ID can then be used to group peers, which will have matching ids. * This ID can then be used to group peers, which will have matching ids.
* *
* The search terminates when either a level is found with the PHYSICAL_PACKAGE * The search terminates when either a level is found with the PHYSICAL_PACKAGE
* flag set or we reach a root node. * flag set or we reach a root node.
* *
* Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found. * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
* Otherwise returns a value which represents the package for this cpu. * Otherwise returns a value which represents the package for this CPU.
*/ */
int find_acpi_cpu_topology_package(unsigned int cpu) int find_acpi_cpu_topology_package(unsigned int cpu)
{ {
......
...@@ -895,7 +895,7 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device) ...@@ -895,7 +895,7 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
/* /*
* Call _PSW/_DSW object to disable its ability to wake the sleeping * Call _PSW/_DSW object to disable its ability to wake the sleeping
* system for the ACPI device with the _PRW object. * system for the ACPI device with the _PRW object.
* The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW. * The _PSW object is deprecated in ACPI 3.0 and is replaced by _DSW.
* So it is necessary to call _DSW object first. Only when it is not * So it is necessary to call _DSW object first. Only when it is not
* present will the _PSW object used. * present will the _PSW object used.
*/ */
......
...@@ -28,7 +28,7 @@ EXPORT_SYMBOL(qdf2400_e44_present); ...@@ -28,7 +28,7 @@ EXPORT_SYMBOL(qdf2400_e44_present);
/* /*
* Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit. * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
* Detect them by examining the OEM fields in the SPCR header, similiar to PCI * Detect them by examining the OEM fields in the SPCR header, similar to PCI
* quirk detection in pci_mcfg.c. * quirk detection in pci_mcfg.c.
*/ */
static bool qdf2400_erratum_44_present(struct acpi_table_header *h) static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
......
...@@ -112,7 +112,7 @@ static int video_detect_force_none(const struct dmi_system_id *d) ...@@ -112,7 +112,7 @@ static int video_detect_force_none(const struct dmi_system_id *d)
static const struct dmi_system_id video_detect_dmi_table[] = { static const struct dmi_system_id video_detect_dmi_table[] = {
/* On Samsung X360, the BIOS will set a flag (VDRV) if generic /* On Samsung X360, the BIOS will set a flag (VDRV) if generic
* ACPI backlight device is used. This flag will definitively break * ACPI backlight device is used. This flag will definitively break
* the backlight interface (even the vendor interface) untill next * the backlight interface (even the vendor interface) until next
* reboot. It's why we should prevent video.ko from being used here * reboot. It's why we should prevent video.ko from being used here
* and we can't rely on a later call to acpi_video_unregister(). * and we can't rely on a later call to acpi_video_unregister().
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment