Commit 352940ec authored by Babu Moger's avatar Babu Moger Committed by Borislav Petkov

x86/resctrl: Rename the RDT functions and definitions

As AMD is starting to support RESCTRL features, rename the RDT functions
and definitions to more generic names.

Replace "intel_rdt" with "resctrl" where applicable.
Signed-off-by: default avatarBabu Moger <babu.moger@amd.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: "Chang S. Bae" <chang.seok.bae@intel.com>
Cc: David Miller <davem@davemloft.net>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Dmitry Safonov <dima@arista.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Kate Stewart <kstewart@linuxfoundation.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: <linux-doc@vger.kernel.org>
Cc: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Philippe Ombredanne <pombredanne@nexb.com>
Cc: Pu Wen <puwen@hygon.cn>
Cc: <qianyue.zj@alibaba-inc.com>
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Cc: Reinette Chatre <reinette.chatre@intel.com>
Cc: Rian Hunter <rian@alum.mit.edu>
Cc: Sherry Hurwitz <sherry.hurwitz@amd.com>
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Lendacky <Thomas.Lendacky@amd.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: <xiaochen.shen@intel.com>
Link: https://lkml.kernel.org/r/20181121202811.4492-3-babu.moger@amd.com
parent fa7d9493
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_INTEL_RDT_SCHED_H #ifndef _ASM_X86_RESCTRL_SCHED_H
#define _ASM_X86_INTEL_RDT_SCHED_H #define _ASM_X86_RESCTRL_SCHED_H
#ifdef CONFIG_INTEL_RDT #ifdef CONFIG_INTEL_RDT
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#define IA32_PQR_ASSOC 0x0c8f #define IA32_PQR_ASSOC 0x0c8f
/** /**
* struct intel_pqr_state - State cache for the PQR MSR * struct resctrl_pqr_state - State cache for the PQR MSR
* @cur_rmid: The cached Resource Monitoring ID * @cur_rmid: The cached Resource Monitoring ID
* @cur_closid: The cached Class Of Service ID * @cur_closid: The cached Class Of Service ID
* @default_rmid: The user assigned Resource Monitoring ID * @default_rmid: The user assigned Resource Monitoring ID
...@@ -24,21 +24,21 @@ ...@@ -24,21 +24,21 @@
* The cache also helps to avoid pointless updates if the value does * The cache also helps to avoid pointless updates if the value does
* not change. * not change.
*/ */
struct intel_pqr_state { struct resctrl_pqr_state {
u32 cur_rmid; u32 cur_rmid;
u32 cur_closid; u32 cur_closid;
u32 default_rmid; u32 default_rmid;
u32 default_closid; u32 default_closid;
}; };
DECLARE_PER_CPU(struct intel_pqr_state, pqr_state); DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state);
DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
/* /*
* __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
* *
* Following considerations are made so that this has minimal impact * Following considerations are made so that this has minimal impact
* on scheduler hot path: * on scheduler hot path:
...@@ -51,9 +51,9 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); ...@@ -51,9 +51,9 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
* simple as possible. * simple as possible.
* Must be called with preemption disabled. * Must be called with preemption disabled.
*/ */
static void __intel_rdt_sched_in(void) static void __resctrl_sched_in(void)
{ {
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
u32 closid = state->default_closid; u32 closid = state->default_closid;
u32 rmid = state->default_rmid; u32 rmid = state->default_rmid;
...@@ -78,16 +78,16 @@ static void __intel_rdt_sched_in(void) ...@@ -78,16 +78,16 @@ static void __intel_rdt_sched_in(void)
} }
} }
static inline void intel_rdt_sched_in(void) static inline void resctrl_sched_in(void)
{ {
if (static_branch_likely(&rdt_enable_key)) if (static_branch_likely(&rdt_enable_key))
__intel_rdt_sched_in(); __resctrl_sched_in();
} }
#else #else
static inline void intel_rdt_sched_in(void) {} static inline void resctrl_sched_in(void) {}
#endif /* CONFIG_INTEL_RDT */ #endif /* CONFIG_INTEL_RDT */
#endif /* _ASM_X86_INTEL_RDT_SCHED_H */ #endif /* _ASM_X86_RESCTRL_SCHED_H */
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
* Software Developer Manual June 2016, volume 3, section 17.17. * Software Developer Manual June 2016, volume 3, section 17.17.
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) "resctrl: " fmt
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/err.h> #include <linux/err.h>
...@@ -40,12 +40,12 @@ ...@@ -40,12 +40,12 @@
DEFINE_MUTEX(rdtgroup_mutex); DEFINE_MUTEX(rdtgroup_mutex);
/* /*
* The cached intel_pqr_state is strictly per CPU and can never be * The cached resctrl_pqr_state is strictly per CPU and can never be
* updated from a remote CPU. Functions which modify the state * updated from a remote CPU. Functions which modify the state
* are called with interrupts disabled and no preemption, which * are called with interrupts disabled and no preemption, which
* is sufficient for the protection. * is sufficient for the protection.
*/ */
DEFINE_PER_CPU(struct intel_pqr_state, pqr_state); DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);
/* /*
* Used to store the max resource name width and max resource data width * Used to store the max resource name width and max resource data width
...@@ -639,7 +639,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) ...@@ -639,7 +639,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
static void clear_closid_rmid(int cpu) static void clear_closid_rmid(int cpu)
{ {
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
state->default_closid = 0; state->default_closid = 0;
state->default_rmid = 0; state->default_rmid = 0;
...@@ -648,7 +648,7 @@ static void clear_closid_rmid(int cpu) ...@@ -648,7 +648,7 @@ static void clear_closid_rmid(int cpu)
wrmsr(IA32_PQR_ASSOC, 0, 0); wrmsr(IA32_PQR_ASSOC, 0, 0);
} }
static int intel_rdt_online_cpu(unsigned int cpu) static int resctrl_online_cpu(unsigned int cpu)
{ {
struct rdt_resource *r; struct rdt_resource *r;
...@@ -674,7 +674,7 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) ...@@ -674,7 +674,7 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
} }
} }
static int intel_rdt_offline_cpu(unsigned int cpu) static int resctrl_offline_cpu(unsigned int cpu)
{ {
struct rdtgroup *rdtgrp; struct rdtgroup *rdtgrp;
struct rdt_resource *r; struct rdt_resource *r;
...@@ -866,7 +866,7 @@ static __init bool get_rdt_resources(void) ...@@ -866,7 +866,7 @@ static __init bool get_rdt_resources(void)
static enum cpuhp_state rdt_online; static enum cpuhp_state rdt_online;
static int __init intel_rdt_late_init(void) static int __init resctrl_late_init(void)
{ {
struct rdt_resource *r; struct rdt_resource *r;
int state, ret; int state, ret;
...@@ -877,8 +877,8 @@ static int __init intel_rdt_late_init(void) ...@@ -877,8 +877,8 @@ static int __init intel_rdt_late_init(void)
rdt_init_padding(); rdt_init_padding();
state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"x86/rdt/cat:online:", "x86/resctrl/cat:online:",
intel_rdt_online_cpu, intel_rdt_offline_cpu); resctrl_online_cpu, resctrl_offline_cpu);
if (state < 0) if (state < 0)
return state; return state;
...@@ -890,20 +890,20 @@ static int __init intel_rdt_late_init(void) ...@@ -890,20 +890,20 @@ static int __init intel_rdt_late_init(void)
rdt_online = state; rdt_online = state;
for_each_alloc_capable_rdt_resource(r) for_each_alloc_capable_rdt_resource(r)
pr_info("Intel RDT %s allocation detected\n", r->name); pr_info("%s allocation detected\n", r->name);
for_each_mon_capable_rdt_resource(r) for_each_mon_capable_rdt_resource(r)
pr_info("Intel RDT %s monitoring detected\n", r->name); pr_info("%s monitoring detected\n", r->name);
return 0; return 0;
} }
late_initcall(intel_rdt_late_init); late_initcall(resctrl_late_init);
static void __exit intel_rdt_exit(void) static void __exit resctrl_exit(void)
{ {
cpuhp_remove_state(rdt_online); cpuhp_remove_state(rdt_online);
rdtgroup_exit(); rdtgroup_exit();
} }
__exitcall(intel_rdt_exit); __exitcall(resctrl_exit);
...@@ -69,7 +69,7 @@ struct rmid_read { ...@@ -69,7 +69,7 @@ struct rmid_read {
u64 val; u64 val;
}; };
extern unsigned int intel_cqm_threshold; extern unsigned int resctrl_cqm_threshold;
extern bool rdt_alloc_capable; extern bool rdt_alloc_capable;
extern bool rdt_mon_capable; extern bool rdt_mon_capable;
extern unsigned int rdt_mon_features; extern unsigned int rdt_mon_features;
......
...@@ -73,7 +73,7 @@ unsigned int rdt_mon_features; ...@@ -73,7 +73,7 @@ unsigned int rdt_mon_features;
* This is the threshold cache occupancy at which we will consider an * This is the threshold cache occupancy at which we will consider an
* RMID available for re-allocation. * RMID available for re-allocation.
*/ */
unsigned int intel_cqm_threshold; unsigned int resctrl_cqm_threshold;
static inline struct rmid_entry *__rmid_entry(u32 rmid) static inline struct rmid_entry *__rmid_entry(u32 rmid)
{ {
...@@ -107,7 +107,7 @@ static bool rmid_dirty(struct rmid_entry *entry) ...@@ -107,7 +107,7 @@ static bool rmid_dirty(struct rmid_entry *entry)
{ {
u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID); u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
return val >= intel_cqm_threshold; return val >= resctrl_cqm_threshold;
} }
/* /*
...@@ -187,7 +187,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) ...@@ -187,7 +187,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
list_for_each_entry(d, &r->domains, list) { list_for_each_entry(d, &r->domains, list) {
if (cpumask_test_cpu(cpu, &d->cpu_mask)) { if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID); val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
if (val <= intel_cqm_threshold) if (val <= resctrl_cqm_threshold)
continue; continue;
} }
...@@ -625,6 +625,7 @@ static void l3_mon_evt_init(struct rdt_resource *r) ...@@ -625,6 +625,7 @@ static void l3_mon_evt_init(struct rdt_resource *r)
int rdt_get_mon_l3_config(struct rdt_resource *r) int rdt_get_mon_l3_config(struct rdt_resource *r)
{ {
unsigned int cl_size = boot_cpu_data.x86_cache_size;
int ret; int ret;
r->mon_scale = boot_cpu_data.x86_cache_occ_scale; r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
...@@ -637,10 +638,10 @@ int rdt_get_mon_l3_config(struct rdt_resource *r) ...@@ -637,10 +638,10 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
* *
* For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC. * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
*/ */
intel_cqm_threshold = boot_cpu_data.x86_cache_size * 1024 / r->num_rmid; resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid;
/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */ /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
intel_cqm_threshold /= r->mon_scale; resctrl_cqm_threshold /= r->mon_scale;
ret = dom_data_init(r); ret = dom_data_init(r);
if (ret) if (ret)
......
...@@ -298,7 +298,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of, ...@@ -298,7 +298,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
} }
/* /*
* This is safe against intel_rdt_sched_in() called from __switch_to() * This is safe against resctrl_sched_in() called from __switch_to()
* because __switch_to() is executed with interrupts disabled. A local call * because __switch_to() is executed with interrupts disabled. A local call
* from update_closid_rmid() is proteced against __switch_to() because * from update_closid_rmid() is proteced against __switch_to() because
* preemption is disabled. * preemption is disabled.
...@@ -317,7 +317,7 @@ static void update_cpu_closid_rmid(void *info) ...@@ -317,7 +317,7 @@ static void update_cpu_closid_rmid(void *info)
* executing task might have its own closid selected. Just reuse * executing task might have its own closid selected. Just reuse
* the context switch code. * the context switch code.
*/ */
intel_rdt_sched_in(); resctrl_sched_in();
} }
/* /*
...@@ -542,7 +542,7 @@ static void move_myself(struct callback_head *head) ...@@ -542,7 +542,7 @@ static void move_myself(struct callback_head *head)
preempt_disable(); preempt_disable();
/* update PQR_ASSOC MSR to make resource group go into effect */ /* update PQR_ASSOC MSR to make resource group go into effect */
intel_rdt_sched_in(); resctrl_sched_in();
preempt_enable(); preempt_enable();
kfree(callback); kfree(callback);
...@@ -926,7 +926,7 @@ static int max_threshold_occ_show(struct kernfs_open_file *of, ...@@ -926,7 +926,7 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
{ {
struct rdt_resource *r = of->kn->parent->priv; struct rdt_resource *r = of->kn->parent->priv;
seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale); seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale);
return 0; return 0;
} }
...@@ -945,7 +945,7 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, ...@@ -945,7 +945,7 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
if (bytes > (boot_cpu_data.x86_cache_size * 1024)) if (bytes > (boot_cpu_data.x86_cache_size * 1024))
return -EINVAL; return -EINVAL;
intel_cqm_threshold = bytes / r->mon_scale; resctrl_cqm_threshold = bytes / r->mon_scale;
return nbytes; return nbytes;
} }
......
...@@ -302,7 +302,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -302,7 +302,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
this_cpu_write(current_task, next_p); this_cpu_write(current_task, next_p);
/* Load the Intel cache allocation PQR MSR. */ /* Load the Intel cache allocation PQR MSR. */
intel_rdt_sched_in(); resctrl_sched_in();
return prev_p; return prev_p;
} }
......
...@@ -664,7 +664,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -664,7 +664,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
} }
/* Load the Intel cache allocation PQR MSR. */ /* Load the Intel cache allocation PQR MSR. */
intel_rdt_sched_in(); resctrl_sched_in();
return prev_p; return prev_p;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment