Commit e4db2813 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

cpufreq: governor: Avoid atomic operations in hot paths

Rework the handling of work items by dbs_update_util_handler() and
dbs_work_handler() so the former (which is executed in scheduler
paths) only uses atomic operations when absolutely necessary.  That
is, when the policy is shared and dbs_update_util_handler() has
already decided that this is the time to queue up a work item.

In particular, this avoids the atomic ops entirely on platforms where
policy objects are never shared.
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
parent f62b9374
...@@ -304,6 +304,7 @@ static void gov_cancel_work(struct cpufreq_policy *policy) ...@@ -304,6 +304,7 @@ static void gov_cancel_work(struct cpufreq_policy *policy)
irq_work_sync(&policy_dbs->irq_work); irq_work_sync(&policy_dbs->irq_work);
cancel_work_sync(&policy_dbs->work); cancel_work_sync(&policy_dbs->work);
atomic_set(&policy_dbs->work_count, 0); atomic_set(&policy_dbs->work_count, 0);
policy_dbs->work_in_progress = false;
} }
static void dbs_work_handler(struct work_struct *work) static void dbs_work_handler(struct work_struct *work)
...@@ -326,13 +327,15 @@ static void dbs_work_handler(struct work_struct *work) ...@@ -326,13 +327,15 @@ static void dbs_work_handler(struct work_struct *work)
policy_dbs->sample_delay_ns = jiffies_to_nsecs(delay); policy_dbs->sample_delay_ns = jiffies_to_nsecs(delay);
mutex_unlock(&policy_dbs->timer_mutex); mutex_unlock(&policy_dbs->timer_mutex);
/* Allow the utilization update handler to queue up more work. */
atomic_set(&policy_dbs->work_count, 0);
/* /*
* If the atomic operation below is reordered with respect to the * If the update below is reordered with respect to the sample delay
* sample delay modification, the utilization update handler may end * modification, the utilization update handler may end up using a stale
* up using a stale sample delay value. * sample delay value.
*/ */
smp_mb__before_atomic(); smp_wmb();
atomic_dec(&policy_dbs->work_count); policy_dbs->work_in_progress = false;
} }
static void dbs_irq_work(struct irq_work *irq_work) static void dbs_irq_work(struct irq_work *irq_work)
...@@ -348,6 +351,7 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time, ...@@ -348,6 +351,7 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
{ {
struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util); struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
u64 delta_ns;
/* /*
* The work may not be allowed to be queued up right now. * The work may not be allowed to be queued up right now.
...@@ -355,17 +359,30 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time, ...@@ -355,17 +359,30 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
* - Work has already been queued up or is in progress. * - Work has already been queued up or is in progress.
* - It is too early (too little time from the previous sample). * - It is too early (too little time from the previous sample).
*/ */
if (atomic_inc_return(&policy_dbs->work_count) == 1) { if (policy_dbs->work_in_progress)
u64 delta_ns; return;
/*
* If the reads below are reordered before the check above, the value
* of sample_delay_ns used in the computation may be stale.
*/
smp_rmb();
delta_ns = time - policy_dbs->last_sample_time; delta_ns = time - policy_dbs->last_sample_time;
if ((s64)delta_ns >= policy_dbs->sample_delay_ns) { if ((s64)delta_ns < policy_dbs->sample_delay_ns)
return;
/*
* If the policy is not shared, the irq_work may be queued up right away
* at this point. Otherwise, we need to ensure that only one of the
* CPUs sharing the policy will do that.
*/
if (policy_dbs->is_shared &&
!atomic_add_unless(&policy_dbs->work_count, 1, 1))
return;
policy_dbs->last_sample_time = time; policy_dbs->last_sample_time = time;
policy_dbs->work_in_progress = true;
irq_work_queue(&policy_dbs->irq_work); irq_work_queue(&policy_dbs->irq_work);
return;
}
}
atomic_dec(&policy_dbs->work_count);
} }
static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
...@@ -542,6 +559,8 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy) ...@@ -542,6 +559,8 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy)
if (!policy->cur) if (!policy->cur)
return -EINVAL; return -EINVAL;
policy_dbs->is_shared = policy_is_shared(policy);
sampling_rate = dbs_data->sampling_rate; sampling_rate = dbs_data->sampling_rate;
ignore_nice = dbs_data->ignore_nice_load; ignore_nice = dbs_data->ignore_nice_load;
......
...@@ -130,6 +130,9 @@ struct policy_dbs_info { ...@@ -130,6 +130,9 @@ struct policy_dbs_info {
/* dbs_data may be shared between multiple policy objects */ /* dbs_data may be shared between multiple policy objects */
struct dbs_data *dbs_data; struct dbs_data *dbs_data;
struct list_head list; struct list_head list;
/* Status indicators */
bool is_shared; /* This object is used by multiple CPUs */
bool work_in_progress; /* Work is being queued up or in progress */
}; };
static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs, static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment