Commit b09d981b authored by Vikas Shivappa's avatar Vikas Shivappa Committed by Thomas Gleixner

x86/intel_rdt: Prepare to add RDT monitor cpus file support

Separate the ctrl cpus file handling from the generic cpus file handling
and convert the per cpu closid from u32 to a struct which will be used
later to add rmid to the same struct. Also cleanup some name space.
Signed-off-by: default avatarVikas Shivappa <vikas.shivappa@linux.intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: ravi.v.shankar@intel.com
Cc: tony.luck@intel.com
Cc: fenghua.yu@intel.com
Cc: peterz@infradead.org
Cc: eranian@google.com
Cc: vikas.shivappa@intel.com
Cc: ak@linux.intel.com
Cc: davidcc@google.com
Cc: reinette.chatre@intel.com
Link: http://lkml.kernel.org/r/1501017287-28083-17-git-send-email-vikas.shivappa@linux.intel.com
parent d6aaba61
...@@ -26,7 +26,7 @@ struct intel_pqr_state { ...@@ -26,7 +26,7 @@ struct intel_pqr_state {
}; };
DECLARE_PER_CPU(struct intel_pqr_state, pqr_state); DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid); DECLARE_PER_CPU_READ_MOSTLY(struct intel_pqr_state, rdt_cpu_default);
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
/* /*
...@@ -54,7 +54,7 @@ static inline void intel_rdt_sched_in(void) ...@@ -54,7 +54,7 @@ static inline void intel_rdt_sched_in(void)
*/ */
closid = current->closid; closid = current->closid;
if (closid == 0) if (closid == 0)
closid = this_cpu_read(cpu_closid); closid = this_cpu_read(rdt_cpu_default.closid);
if (closid != state->closid) { if (closid != state->closid) {
state->closid = closid; state->closid = closid;
......
...@@ -39,8 +39,6 @@ ...@@ -39,8 +39,6 @@
/* Mutex to protect rdtgroup access. */ /* Mutex to protect rdtgroup access. */
DEFINE_MUTEX(rdtgroup_mutex); DEFINE_MUTEX(rdtgroup_mutex);
DEFINE_PER_CPU_READ_MOSTLY(int, cpu_closid);
/* /*
* The cached intel_pqr_state is strictly per CPU and can never be * The cached intel_pqr_state is strictly per CPU and can never be
* updated from a remote CPU. Functions which modify the state * updated from a remote CPU. Functions which modify the state
...@@ -49,6 +47,8 @@ DEFINE_PER_CPU_READ_MOSTLY(int, cpu_closid); ...@@ -49,6 +47,8 @@ DEFINE_PER_CPU_READ_MOSTLY(int, cpu_closid);
*/ */
DEFINE_PER_CPU(struct intel_pqr_state, pqr_state); DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
DEFINE_PER_CPU_READ_MOSTLY(struct intel_pqr_state, rdt_cpu_default);
/* /*
* Used to store the max resource name width and max resource data width * Used to store the max resource name width and max resource data width
* to display the schemata in a tabular format * to display the schemata in a tabular format
...@@ -500,7 +500,7 @@ static void clear_closid(int cpu) ...@@ -500,7 +500,7 @@ static void clear_closid(int cpu)
{ {
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
per_cpu(cpu_closid, cpu) = 0; per_cpu(rdt_cpu_default.closid, cpu) = 0;
state->closid = 0; state->closid = 0;
wrmsr(IA32_PQR_ASSOC, state->rmid, 0); wrmsr(IA32_PQR_ASSOC, state->rmid, 0);
} }
......
...@@ -320,8 +320,6 @@ union cpuid_0x10_x_edx { ...@@ -320,8 +320,6 @@ union cpuid_0x10_x_edx {
unsigned int full; unsigned int full;
}; };
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);
void rdt_ctrl_update(void *arg); void rdt_ctrl_update(void *arg);
struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn); struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
void rdtgroup_kn_unlock(struct kernfs_node *kn); void rdtgroup_kn_unlock(struct kernfs_node *kn);
......
...@@ -181,13 +181,16 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of, ...@@ -181,13 +181,16 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
/* /*
* This is safe against intel_rdt_sched_in() called from __switch_to() * This is safe against intel_rdt_sched_in() called from __switch_to()
* because __switch_to() is executed with interrupts disabled. A local call * because __switch_to() is executed with interrupts disabled. A local call
* from rdt_update_closid() is proteced against __switch_to() because * from update_closid() is proteced against __switch_to() because
* preemption is disabled. * preemption is disabled.
*/ */
static void rdt_update_cpu_closid(void *closid) static void update_cpu_closid(void *info)
{ {
if (closid) struct rdtgroup *r = info;
this_cpu_write(cpu_closid, *(int *)closid);
if (r)
this_cpu_write(rdt_cpu_default.closid, r->closid);
/* /*
* We cannot unconditionally write the MSR because the current * We cannot unconditionally write the MSR because the current
* executing task might have its own closid selected. Just reuse * executing task might have its own closid selected. Just reuse
...@@ -199,28 +202,62 @@ static void rdt_update_cpu_closid(void *closid) ...@@ -199,28 +202,62 @@ static void rdt_update_cpu_closid(void *closid)
/* /*
* Update the PGR_ASSOC MSR on all cpus in @cpu_mask, * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
* *
* Per task closids must have been set up before calling this function. * Per task closids/rmids must have been set up before calling this function.
*
* The per cpu closids are updated with the smp function call, when @closid
* is not NULL. If @closid is NULL then all affected percpu closids must
* have been set up before calling this function.
*/ */
static void static void
rdt_update_closid(const struct cpumask *cpu_mask, int *closid) update_closid(const struct cpumask *cpu_mask, struct rdtgroup *r)
{ {
int cpu = get_cpu(); int cpu = get_cpu();
if (cpumask_test_cpu(cpu, cpu_mask)) if (cpumask_test_cpu(cpu, cpu_mask))
rdt_update_cpu_closid(closid); update_cpu_closid(r);
smp_call_function_many(cpu_mask, rdt_update_cpu_closid, closid, 1); smp_call_function_many(cpu_mask, update_cpu_closid, r, 1);
put_cpu(); put_cpu();
} }
static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
cpumask_var_t tmpmask)
{
struct rdtgroup *r;
/* Check whether cpus are dropped from this group */
cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
if (cpumask_weight(tmpmask)) {
/* Can't drop from default group */
if (rdtgrp == &rdtgroup_default)
return -EINVAL;
/* Give any dropped cpus to rdtgroup_default */
cpumask_or(&rdtgroup_default.cpu_mask,
&rdtgroup_default.cpu_mask, tmpmask);
update_closid(tmpmask, &rdtgroup_default);
}
/*
* If we added cpus, remove them from previous group that owned them
* and update per-cpu closid
*/
cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
if (cpumask_weight(tmpmask)) {
list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
if (r == rdtgrp)
continue;
cpumask_andnot(&r->cpu_mask, &r->cpu_mask, tmpmask);
}
update_closid(tmpmask, rdtgrp);
}
/* Done pushing/pulling - update this group with new mask */
cpumask_copy(&rdtgrp->cpu_mask, newmask);
return 0;
}
static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off) char *buf, size_t nbytes, loff_t off)
{ {
cpumask_var_t tmpmask, newmask; cpumask_var_t tmpmask, newmask;
struct rdtgroup *rdtgrp, *r; struct rdtgroup *rdtgrp;
int ret; int ret;
if (!buf) if (!buf)
...@@ -254,36 +291,10 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, ...@@ -254,36 +291,10 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
goto unlock; goto unlock;
} }
/* Check whether cpus are dropped from this group */ if (rdtgrp->type == RDTCTRL_GROUP)
cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask);
if (cpumask_weight(tmpmask)) { else
/* Can't drop from default group */ ret = -EINVAL;
if (rdtgrp == &rdtgroup_default) {
ret = -EINVAL;
goto unlock;
}
/* Give any dropped cpus to rdtgroup_default */
cpumask_or(&rdtgroup_default.cpu_mask,
&rdtgroup_default.cpu_mask, tmpmask);
rdt_update_closid(tmpmask, &rdtgroup_default.closid);
}
/*
* If we added cpus, remove them from previous group that owned them
* and update per-cpu closid
*/
cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
if (cpumask_weight(tmpmask)) {
list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
if (r == rdtgrp)
continue;
cpumask_andnot(&r->cpu_mask, &r->cpu_mask, tmpmask);
}
rdt_update_closid(tmpmask, &rdtgrp->closid);
}
/* Done pushing/pulling - update this group with new mask */
cpumask_copy(&rdtgrp->cpu_mask, newmask);
unlock: unlock:
rdtgroup_kn_unlock(of->kn); rdtgroup_kn_unlock(of->kn);
...@@ -1102,7 +1113,7 @@ static void rmdir_all_sub(void) ...@@ -1102,7 +1113,7 @@ static void rmdir_all_sub(void)
} }
/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
get_online_cpus(); get_online_cpus();
rdt_update_closid(cpu_online_mask, &rdtgroup_default.closid); update_closid(cpu_online_mask, &rdtgroup_default);
put_online_cpus(); put_online_cpus();
kernfs_remove(kn_info); kernfs_remove(kn_info);
...@@ -1357,13 +1368,13 @@ static int rdtgroup_rmdir(struct kernfs_node *kn) ...@@ -1357,13 +1368,13 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
/* Update per cpu closid of the moved CPUs first */ /* Update per cpu closid of the moved CPUs first */
for_each_cpu(cpu, &rdtgrp->cpu_mask) for_each_cpu(cpu, &rdtgrp->cpu_mask)
per_cpu(cpu_closid, cpu) = closid; per_cpu(rdt_cpu_default.closid, cpu) = closid;
/* /*
* Update the MSR on moved CPUs and CPUs which have moved * Update the MSR on moved CPUs and CPUs which have moved
* task running on them. * task running on them.
*/ */
cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
rdt_update_closid(tmpmask, NULL); update_closid(tmpmask, NULL);
rdtgrp->flags = RDT_DELETED; rdtgrp->flags = RDT_DELETED;
closid_free(rdtgrp->closid); closid_free(rdtgrp->closid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment