Commit a52fb43a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-cache-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cache control updates from Borislav Petkov:

 - The generalization of the RDT code to accommodate the addition of
   AMD's very similar implementation of the cache monitoring feature.

   This entails a subsystem move into a separate and generic
   arch/x86/kernel/cpu/resctrl/ directory along with adding
   vendor-specific initialization and feature detection helpers.

   Ontop of that is the unification of user-visible strings, both in the
   resctrl filesystem error handling and Kconfig.

   Provided by Babu Moger and Sherry Hurwitz.

 - Code simplifications and error handling improvements by Reinette
   Chatre.

* 'x86-cache-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/resctrl: Fix rdt_find_domain() return value and checks
  x86/resctrl: Remove unnecessary check for cbm_validate()
  x86/resctrl: Use rdt_last_cmd_puts() where possible
  MAINTAINERS: Update resctrl filename patterns
  Documentation: Rename and update intel_rdt_ui.txt to resctrl_ui.txt
  x86/resctrl: Introduce AMD QOS feature
  x86/resctrl: Fixup the user-visible strings
  x86/resctrl: Add AMD's X86_FEATURE_MBA to the scattered CPUID features
  x86/resctrl: Rename the config option INTEL_RDT to RESCTRL
  x86/resctrl: Add vendor check for the MBA software controller
  x86/resctrl: Bring cbm_validate() into the resource structure
  x86/resctrl: Initialize the vendor-specific resource functions
  x86/resctrl: Move all the macros to resctrl/internal.h
  x86/resctrl: Re-arrange the RDT init code
  x86/resctrl: Rename the RDT functions and definitions
  x86/resctrl: Rename and move rdt files to a separate directory
parents 42b00f12 52eb7433
User Interface for Resource Allocation in Intel Resource Director Technology
User Interface for Resource Control feature
Intel refers to this feature as Intel Resource Director Technology(Intel(R) RDT).
AMD refers to this feature as AMD Platform Quality of Service(AMD QoS).
Copyright (C) 2016 Intel Corporation
......@@ -6,8 +9,8 @@ Fenghua Yu <fenghua.yu@intel.com>
Tony Luck <tony.luck@intel.com>
Vikas Shivappa <vikas.shivappa@intel.com>
This feature is enabled by the CONFIG_INTEL_RDT Kconfig and the
X86 /proc/cpuinfo flag bits:
This feature is enabled by the CONFIG_RESCTRL and the X86 /proc/cpuinfo
flag bits:
RDT (Resource Director Technology) Allocation - "rdt_a"
CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
CDP (Code and Data Prioritization ) - "cdp_l3", "cdp_l2"
......
......@@ -12717,9 +12717,9 @@ M: Fenghua Yu <fenghua.yu@intel.com>
M: Reinette Chatre <reinette.chatre@intel.com>
L: linux-kernel@vger.kernel.org
S: Supported
F: arch/x86/kernel/cpu/intel_rdt*
F: arch/x86/include/asm/intel_rdt_sched.h
F: Documentation/x86/intel_rdt*
F: arch/x86/kernel/cpu/resctrl/
F: arch/x86/include/asm/resctrl_sched.h
F: Documentation/x86/resctrl*
READ-COPY UPDATE (RCU)
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
......
......@@ -444,15 +444,23 @@ config RETPOLINE
branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower.
config INTEL_RDT
bool "Intel Resource Director Technology support"
depends on X86 && CPU_SUP_INTEL
config RESCTRL
bool "Resource Control support"
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
select KERNFS
help
Select to enable resource allocation and monitoring which are
sub-features of Intel Resource Director Technology(RDT). More
information about RDT can be found in the Intel x86
Architecture Software Developer Manual.
Enable Resource Control support.
Provide support for the allocation and monitoring of system resources
usage by the CPU.
Intel calls this Intel Resource Director Technology
(Intel(R) RDT). More information about RDT can be found in the
Intel x86 Architecture Software Developer Manual.
AMD calls this AMD Platform Quality of Service (AMD QoS).
More information about AMD QoS can be found in the AMD64 Technology
Platform Quality of Service Extensions manual.
Say N if unsure.
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_INTEL_RDT_SCHED_H
#define _ASM_X86_INTEL_RDT_SCHED_H
#ifndef _ASM_X86_RESCTRL_SCHED_H
#define _ASM_X86_RESCTRL_SCHED_H
#ifdef CONFIG_INTEL_RDT
#ifdef CONFIG_RESCTRL
#include <linux/sched.h>
#include <linux/jump_label.h>
......@@ -10,7 +10,7 @@
#define IA32_PQR_ASSOC 0x0c8f
/**
* struct intel_pqr_state - State cache for the PQR MSR
* struct resctrl_pqr_state - State cache for the PQR MSR
* @cur_rmid: The cached Resource Monitoring ID
* @cur_closid: The cached Class Of Service ID
* @default_rmid: The user assigned Resource Monitoring ID
......@@ -24,21 +24,21 @@
* The cache also helps to avoid pointless updates if the value does
* not change.
*/
struct intel_pqr_state {
struct resctrl_pqr_state {
u32 cur_rmid;
u32 cur_closid;
u32 default_rmid;
u32 default_closid;
};
DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state);
DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
/*
* __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
* __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
*
* Following considerations are made so that this has minimal impact
* on scheduler hot path:
......@@ -51,9 +51,9 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
* simple as possible.
* Must be called with preemption disabled.
*/
static void __intel_rdt_sched_in(void)
static void __resctrl_sched_in(void)
{
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
u32 closid = state->default_closid;
u32 rmid = state->default_rmid;
......@@ -78,16 +78,16 @@ static void __intel_rdt_sched_in(void)
}
}
static inline void intel_rdt_sched_in(void)
static inline void resctrl_sched_in(void)
{
if (static_branch_likely(&rdt_enable_key))
__intel_rdt_sched_in();
__resctrl_sched_in();
}
#else
static inline void intel_rdt_sched_in(void) {}
static inline void resctrl_sched_in(void) {}
#endif /* CONFIG_INTEL_RDT */
#endif /* CONFIG_RESCTRL */
#endif /* _ASM_X86_INTEL_RDT_SCHED_H */
#endif /* _ASM_X86_RESCTRL_SCHED_H */
......@@ -36,13 +36,10 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o
obj-$(CONFIG_INTEL_RDT) += intel_rdt_ctrlmondata.o intel_rdt_pseudo_lock.o
CFLAGS_intel_rdt_pseudo_lock.o = -I$(src)
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_MICROCODE) += microcode/
obj-$(CONFIG_RESCTRL) += resctrl/
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_RESCTRL) += core.o rdtgroup.o monitor.o
obj-$(CONFIG_RESCTRL) += ctrlmondata.o pseudo_lock.o
CFLAGS_pseudo_lock.o = -I$(src)
......@@ -27,7 +27,54 @@
#include <linux/kernfs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "intel_rdt.h"
#include "internal.h"
/*
* Check whether MBA bandwidth percentage value is correct. The value is
* checked against the minimum and maximum bandwidth values specified by
* the hardware. The allocated bandwidth percentage is rounded to the next
* control step available on the hardware.
*/
static bool bw_validate_amd(char *buf, unsigned long *data,
struct rdt_resource *r)
{
unsigned long bw;
int ret;
ret = kstrtoul(buf, 10, &bw);
if (ret) {
rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
return false;
}
if (bw < r->membw.min_bw || bw > r->default_ctrl) {
rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
r->membw.min_bw, r->default_ctrl);
return false;
}
*data = roundup(bw, (unsigned long)r->membw.bw_gran);
return true;
}
int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d)
{
unsigned long bw_val;
if (d->have_new_ctrl) {
rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
return -EINVAL;
}
if (!bw_validate_amd(data->buf, &bw_val, r))
return -EINVAL;
d->new_ctrl = bw_val;
d->have_new_ctrl = true;
return 0;
}
/*
* Check whether MBA bandwidth percentage value is correct. The value is
......@@ -65,13 +112,13 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
return true;
}
int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d)
{
unsigned long bw_val;
if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id);
rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
return -EINVAL;
}
......@@ -89,7 +136,7 @@ int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
* are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
* Additionally Haswell requires at least two bits set.
*/
static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r)
{
unsigned long first_bit, zero_bit, val;
unsigned int cbm_len = r->cache.cbm_len;
......@@ -97,12 +144,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
ret = kstrtoul(buf, 16, &val);
if (ret) {
rdt_last_cmd_printf("non-hex character in mask %s\n", buf);
rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
return false;
}
if (val == 0 || val > r->default_ctrl) {
rdt_last_cmd_puts("mask out of range\n");
rdt_last_cmd_puts("Mask out of range\n");
return false;
}
......@@ -110,12 +157,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) {
rdt_last_cmd_printf("mask %lx has non-consecutive 1-bits\n", val);
rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
return false;
}
if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
rdt_last_cmd_printf("Need at least %d bits in mask\n",
rdt_last_cmd_printf("Need at least %d bits in the mask\n",
r->cache.min_cbm_bits);
return false;
}
......@@ -124,6 +171,30 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
return true;
}
/*
* Check whether a cache bit mask is valid. AMD allows non-contiguous
* bitmasks
*/
bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r)
{
unsigned long val;
int ret;
ret = kstrtoul(buf, 16, &val);
if (ret) {
rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
return false;
}
if (val > r->default_ctrl) {
rdt_last_cmd_puts("Mask out of range\n");
return false;
}
*data = val;
return true;
}
/*
* Read one cache bit mask (hex). Check that it is valid for the current
* resource type.
......@@ -135,7 +206,7 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
u32 cbm_val;
if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id);
rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
return -EINVAL;
}
......@@ -145,17 +216,17 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
*/
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
rdtgroup_pseudo_locked_in_hierarchy(d)) {
rdt_last_cmd_printf("pseudo-locked region in hierarchy\n");
rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
return -EINVAL;
}
if (!cbm_validate(data->buf, &cbm_val, r))
if (!r->cbm_validate(data->buf, &cbm_val, r))
return -EINVAL;
if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
rdtgrp->mode == RDT_MODE_SHAREABLE) &&
rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
rdt_last_cmd_printf("CBM overlaps with pseudo-locked region\n");
rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
return -EINVAL;
}
......@@ -164,14 +235,14 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
* either is exclusive.
*/
if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) {
rdt_last_cmd_printf("overlaps with exclusive group\n");
rdt_last_cmd_puts("Overlaps with exclusive group\n");
return -EINVAL;
}
if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) {
if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
rdt_last_cmd_printf("overlaps with other group\n");
rdt_last_cmd_puts("Overlaps with other group\n");
return -EINVAL;
}
}
......@@ -293,7 +364,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok,
if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid)
return parse_line(tok, r, rdtgrp);
}
rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname);
rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
return -EINVAL;
}
......@@ -326,7 +397,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
*/
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
ret = -EINVAL;
rdt_last_cmd_puts("resource group is pseudo-locked\n");
rdt_last_cmd_puts("Resource group is pseudo-locked\n");
goto out;
}
......@@ -467,7 +538,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
r = &rdt_resources_all[resid];
d = rdt_find_domain(r, domid, NULL);
if (!d) {
if (IS_ERR_OR_NULL(d)) {
ret = -ENOENT;
goto out;
}
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_INTEL_RDT_H
#define _ASM_X86_INTEL_RDT_H
#ifndef _ASM_X86_RESCTRL_INTERNAL_H
#define _ASM_X86_RESCTRL_INTERNAL_H
#include <linux/sched.h>
#include <linux/kernfs.h>
#include <linux/jump_label.h>
#define IA32_L3_QOS_CFG 0xc81
#define IA32_L2_QOS_CFG 0xc82
#define IA32_L3_CBM_BASE 0xc90
#define IA32_L2_CBM_BASE 0xd10
#define IA32_MBA_THRTL_BASE 0xd50
#define MSR_IA32_L3_QOS_CFG 0xc81
#define MSR_IA32_L2_QOS_CFG 0xc82
#define MSR_IA32_L3_CBM_BASE 0xc90
#define MSR_IA32_L2_CBM_BASE 0xd10
#define MSR_IA32_MBA_THRTL_BASE 0xd50
#define MSR_IA32_MBA_BW_BASE 0xc0000200
#define MSR_IA32_QM_CTR 0x0c8e
#define MSR_IA32_QM_EVTSEL 0x0c8d
#define L3_QOS_CDP_ENABLE 0x01ULL
......@@ -29,6 +33,9 @@
#define MBM_CNTR_WIDTH 24
#define MBM_OVERFLOW_INTERVAL 1000
#define MAX_MBA_BW 100u
#define MBA_IS_LINEAR 0x4
#define MBA_MAX_MBPS U32_MAX
#define MAX_MBA_BW_AMD 0x800
#define RMID_VAL_ERROR BIT_ULL(63)
#define RMID_VAL_UNAVAIL BIT_ULL(62)
......@@ -69,7 +76,7 @@ struct rmid_read {
u64 val;
};
extern unsigned int intel_cqm_threshold;
extern unsigned int resctrl_cqm_threshold;
extern bool rdt_alloc_capable;
extern bool rdt_mon_capable;
extern unsigned int rdt_mon_features;
......@@ -405,6 +412,7 @@ struct rdt_parse_data {
* @cache: Cache allocation related data
* @format_str: Per resource format string to show domain value
* @parse_ctrlval: Per resource function pointer to parse control values
* @cbm_validate Cache bitmask validate function
* @evt_list: List of monitoring events
* @num_rmid: Number of RMIDs available
* @mon_scale: cqm counter * mon_scale = occupancy in bytes
......@@ -431,6 +439,7 @@ struct rdt_resource {
int (*parse_ctrlval)(struct rdt_parse_data *data,
struct rdt_resource *r,
struct rdt_domain *d);
bool (*cbm_validate)(char *buf, u32 *data, struct rdt_resource *r);
struct list_head evt_list;
int num_rmid;
unsigned int mon_scale;
......@@ -439,7 +448,9 @@ struct rdt_resource {
int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d);
int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d);
int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d);
extern struct mutex rdtgroup_mutex;
......@@ -463,6 +474,10 @@ enum {
RDT_NUM_RESOURCES,
};
#define for_each_rdt_resource(r) \
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
r++)
#define for_each_capable_rdt_resource(r) \
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
r++) \
......@@ -567,5 +582,7 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms);
void cqm_handle_limbo(struct work_struct *work);
bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
void __check_limbo(struct rdt_domain *d, bool force_free);
bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r);
bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r);
#endif /* _ASM_X86_INTEL_RDT_H */
#endif /* _ASM_X86_RESCTRL_INTERNAL_H */
......@@ -26,10 +26,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
#include "intel_rdt.h"
#define MSR_IA32_QM_CTR 0x0c8e
#define MSR_IA32_QM_EVTSEL 0x0c8d
#include "internal.h"
struct rmid_entry {
u32 rmid;
......@@ -73,7 +70,7 @@ unsigned int rdt_mon_features;
* This is the threshold cache occupancy at which we will consider an
* RMID available for re-allocation.
*/
unsigned int intel_cqm_threshold;
unsigned int resctrl_cqm_threshold;
static inline struct rmid_entry *__rmid_entry(u32 rmid)
{
......@@ -107,7 +104,7 @@ static bool rmid_dirty(struct rmid_entry *entry)
{
u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
return val >= intel_cqm_threshold;
return val >= resctrl_cqm_threshold;
}
/*
......@@ -187,7 +184,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
list_for_each_entry(d, &r->domains, list) {
if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
if (val <= intel_cqm_threshold)
if (val <= resctrl_cqm_threshold)
continue;
}
......@@ -625,6 +622,7 @@ static void l3_mon_evt_init(struct rdt_resource *r)
int rdt_get_mon_l3_config(struct rdt_resource *r)
{
unsigned int cl_size = boot_cpu_data.x86_cache_size;
int ret;
r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
......@@ -637,10 +635,10 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
*
* For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
*/
intel_cqm_threshold = boot_cpu_data.x86_cache_size * 1024 / r->num_rmid;
resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid;
/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
intel_cqm_threshold /= r->mon_scale;
resctrl_cqm_threshold /= r->mon_scale;
ret = dom_data_init(r);
if (ret)
......
......@@ -24,14 +24,14 @@
#include <asm/cacheflush.h>
#include <asm/intel-family.h>
#include <asm/intel_rdt_sched.h>
#include <asm/resctrl_sched.h>
#include <asm/perf_event.h>
#include "../../events/perf_event.h" /* For X86_CONFIG() */
#include "intel_rdt.h"
#include "internal.h"
#define CREATE_TRACE_POINTS
#include "intel_rdt_pseudo_lock_event.h"
#include "pseudo_lock_event.h"
/*
* MSR_MISC_FEATURE_CONTROL register enables the modification of hardware
......@@ -213,7 +213,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
for_each_cpu(cpu, &plr->d->cpu_mask) {
pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
if (!pm_req) {
rdt_last_cmd_puts("fail allocating mem for PM QoS\n");
rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n");
ret = -ENOMEM;
goto out_err;
}
......@@ -222,7 +222,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
DEV_PM_QOS_RESUME_LATENCY,
30);
if (ret < 0) {
rdt_last_cmd_printf("fail to add latency req cpu%d\n",
rdt_last_cmd_printf("Failed to add latency req CPU%d\n",
cpu);
kfree(pm_req);
ret = -1;
......@@ -289,7 +289,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
plr->cpu = cpumask_first(&plr->d->cpu_mask);
if (!cpu_online(plr->cpu)) {
rdt_last_cmd_printf("cpu %u associated with cache not online\n",
rdt_last_cmd_printf("CPU %u associated with cache not online\n",
plr->cpu);
ret = -ENODEV;
goto out_region;
......@@ -307,7 +307,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
}
ret = -1;
rdt_last_cmd_puts("unable to determine cache line size\n");
rdt_last_cmd_puts("Unable to determine cache line size\n");
out_region:
pseudo_lock_region_clear(plr);
return ret;
......@@ -361,14 +361,14 @@ static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
* KMALLOC_MAX_SIZE.
*/
if (plr->size > KMALLOC_MAX_SIZE) {
rdt_last_cmd_puts("requested region exceeds maximum size\n");
rdt_last_cmd_puts("Requested region exceeds maximum size\n");
ret = -E2BIG;
goto out_region;
}
plr->kmem = kzalloc(plr->size, GFP_KERNEL);
if (!plr->kmem) {
rdt_last_cmd_puts("unable to allocate memory\n");
rdt_last_cmd_puts("Unable to allocate memory\n");
ret = -ENOMEM;
goto out_region;
}
......@@ -665,7 +665,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
* default closid associated with it.
*/
if (rdtgrp == &rdtgroup_default) {
rdt_last_cmd_puts("cannot pseudo-lock default group\n");
rdt_last_cmd_puts("Cannot pseudo-lock default group\n");
return -EINVAL;
}
......@@ -707,17 +707,17 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
*/
prefetch_disable_bits = get_prefetch_disable_bits();
if (prefetch_disable_bits == 0) {
rdt_last_cmd_puts("pseudo-locking not supported\n");
rdt_last_cmd_puts("Pseudo-locking not supported\n");
return -EINVAL;
}
if (rdtgroup_monitor_in_progress(rdtgrp)) {
rdt_last_cmd_puts("monitoring in progress\n");
rdt_last_cmd_puts("Monitoring in progress\n");
return -EINVAL;
}
if (rdtgroup_tasks_assigned(rdtgrp)) {
rdt_last_cmd_puts("tasks assigned to resource group\n");
rdt_last_cmd_puts("Tasks assigned to resource group\n");
return -EINVAL;
}
......@@ -727,13 +727,13 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
}
if (rdtgroup_locksetup_user_restrict(rdtgrp)) {
rdt_last_cmd_puts("unable to modify resctrl permissions\n");
rdt_last_cmd_puts("Unable to modify resctrl permissions\n");
return -EIO;
}
ret = pseudo_lock_init(rdtgrp);
if (ret) {
rdt_last_cmd_puts("unable to init pseudo-lock region\n");
rdt_last_cmd_puts("Unable to init pseudo-lock region\n");
goto out_release;
}
......@@ -770,7 +770,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
if (rdt_mon_capable) {
ret = alloc_rmid();
if (ret < 0) {
rdt_last_cmd_puts("out of RMIDs\n");
rdt_last_cmd_puts("Out of RMIDs\n");
return ret;
}
rdtgrp->mon.rmid = ret;
......@@ -1304,7 +1304,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
"pseudo_lock/%u", plr->cpu);
if (IS_ERR(thread)) {
ret = PTR_ERR(thread);
rdt_last_cmd_printf("locking thread returned error %d\n", ret);
rdt_last_cmd_printf("Locking thread returned error %d\n", ret);
goto out_cstates;
}
......@@ -1322,13 +1322,13 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
* the cleared, but not freed, plr struct resulting in an
* empty pseudo-locking loop.
*/
rdt_last_cmd_puts("locking thread interrupted\n");
rdt_last_cmd_puts("Locking thread interrupted\n");
goto out_cstates;
}
ret = pseudo_lock_minor_get(&new_minor);
if (ret < 0) {
rdt_last_cmd_puts("unable to obtain a new minor number\n");
rdt_last_cmd_puts("Unable to obtain a new minor number\n");
goto out_cstates;
}
......@@ -1360,7 +1360,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
if (IS_ERR(dev)) {
ret = PTR_ERR(dev);
rdt_last_cmd_printf("failed to create character device: %d\n",
rdt_last_cmd_printf("Failed to create character device: %d\n",
ret);
goto out_debugfs;
}
......
......@@ -39,5 +39,5 @@ TRACE_EVENT(pseudo_lock_l3,
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE intel_rdt_pseudo_lock_event
#define TRACE_INCLUDE_FILE pseudo_lock_event
#include <trace/define_trace.h>
......@@ -35,8 +35,8 @@
#include <uapi/linux/magic.h>
#include <asm/intel_rdt_sched.h>
#include "intel_rdt.h"
#include <asm/resctrl_sched.h>
#include "internal.h"
DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
......@@ -298,7 +298,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
}
/*
* This is safe against intel_rdt_sched_in() called from __switch_to()
* This is safe against resctrl_sched_in() called from __switch_to()
* because __switch_to() is executed with interrupts disabled. A local call
* from update_closid_rmid() is proteced against __switch_to() because
* preemption is disabled.
......@@ -317,7 +317,7 @@ static void update_cpu_closid_rmid(void *info)
* executing task might have its own closid selected. Just reuse
* the context switch code.
*/
intel_rdt_sched_in();
resctrl_sched_in();
}
/*
......@@ -345,7 +345,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
/* Check whether cpus belong to parent ctrl group */
cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
if (cpumask_weight(tmpmask)) {
rdt_last_cmd_puts("can only add CPUs to mongroup that belong to parent\n");
rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
return -EINVAL;
}
......@@ -470,14 +470,14 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
rdt_last_cmd_clear();
if (!rdtgrp) {
ret = -ENOENT;
rdt_last_cmd_puts("directory was removed\n");
rdt_last_cmd_puts("Directory was removed\n");
goto unlock;
}
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
ret = -EINVAL;
rdt_last_cmd_puts("pseudo-locking in progress\n");
rdt_last_cmd_puts("Pseudo-locking in progress\n");
goto unlock;
}
......@@ -487,7 +487,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
ret = cpumask_parse(buf, newmask);
if (ret) {
rdt_last_cmd_puts("bad cpu list/mask\n");
rdt_last_cmd_puts("Bad CPU list/mask\n");
goto unlock;
}
......@@ -495,7 +495,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
cpumask_andnot(tmpmask, newmask, cpu_online_mask);
if (cpumask_weight(tmpmask)) {
ret = -EINVAL;
rdt_last_cmd_puts("can only assign online cpus\n");
rdt_last_cmd_puts("Can only assign online CPUs\n");
goto unlock;
}
......@@ -542,7 +542,7 @@ static void move_myself(struct callback_head *head)
preempt_disable();
/* update PQR_ASSOC MSR to make resource group go into effect */
intel_rdt_sched_in();
resctrl_sched_in();
preempt_enable();
kfree(callback);
......@@ -574,7 +574,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
*/
atomic_dec(&rdtgrp->waitcount);
kfree(callback);
rdt_last_cmd_puts("task exited\n");
rdt_last_cmd_puts("Task exited\n");
} else {
/*
* For ctrl_mon groups move both closid and rmid.
......@@ -692,7 +692,7 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
ret = -EINVAL;
rdt_last_cmd_puts("pseudo-locking in progress\n");
rdt_last_cmd_puts("Pseudo-locking in progress\n");
goto unlock;
}
......@@ -926,7 +926,7 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
{
struct rdt_resource *r = of->kn->parent->priv;
seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale);
seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale);
return 0;
}
......@@ -945,7 +945,7 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
if (bytes > (boot_cpu_data.x86_cache_size * 1024))
return -EINVAL;
intel_cqm_threshold = bytes / r->mon_scale;
resctrl_cqm_threshold = bytes / r->mon_scale;
return nbytes;
}
......@@ -1029,7 +1029,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
* peer RDT CDP resource. Hence the WARN.
*/
_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
if (WARN_ON(!_d_cdp)) {
if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
_r_cdp = NULL;
ret = -EINVAL;
}
......@@ -1158,14 +1158,14 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
list_for_each_entry(d, &r->domains, list) {
if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
rdtgrp->closid, false)) {
rdt_last_cmd_puts("schemata overlaps\n");
rdt_last_cmd_puts("Schemata overlaps\n");
return false;
}
}
}
if (!has_cache) {
rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n");
rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
return false;
}
......@@ -1206,7 +1206,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
goto out;
if (mode == RDT_MODE_PSEUDO_LOCKED) {
rdt_last_cmd_printf("cannot change pseudo-locked group\n");
rdt_last_cmd_puts("Cannot change pseudo-locked group\n");
ret = -EINVAL;
goto out;
}
......@@ -1235,7 +1235,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
goto out;
rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
} else {
rdt_last_cmd_printf("unknown/unsupported mode\n");
rdt_last_cmd_puts("Unknown or unsupported mode\n");
ret = -EINVAL;
}
......@@ -1722,14 +1722,14 @@ static void l3_qos_cfg_update(void *arg)
{
bool *enable = arg;
wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
}
static void l2_qos_cfg_update(void *arg)
{
bool *enable = arg;
wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
}
static inline bool is_mba_linear(void)
......@@ -1878,7 +1878,10 @@ static int parse_rdtgroupfs_options(char *data)
if (ret)
goto out;
} else if (!strcmp(token, "mba_MBps")) {
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
ret = set_mba_sc(true);
else
ret = -EINVAL;
if (ret)
goto out;
} else {
......@@ -2540,7 +2543,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
tmp_cbm = d->new_ctrl;
if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
r->cache.min_cbm_bits) {
rdt_last_cmd_printf("no space on %s:%d\n",
rdt_last_cmd_printf("No space on %s:%d\n",
r->name, d->id);
return -ENOSPC;
}
......@@ -2557,7 +2560,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
continue;
ret = update_domains(r, rdtgrp->closid);
if (ret < 0) {
rdt_last_cmd_puts("failed to initialize allocations\n");
rdt_last_cmd_puts("Failed to initialize allocations\n");
return ret;
}
rdtgrp->mode = RDT_MODE_SHAREABLE;
......@@ -2580,7 +2583,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
rdt_last_cmd_clear();
if (!prdtgrp) {
ret = -ENODEV;
rdt_last_cmd_puts("directory was removed\n");
rdt_last_cmd_puts("Directory was removed\n");
goto out_unlock;
}
......@@ -2588,7 +2591,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
(prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
ret = -EINVAL;
rdt_last_cmd_puts("pseudo-locking in progress\n");
rdt_last_cmd_puts("Pseudo-locking in progress\n");
goto out_unlock;
}
......@@ -2596,7 +2599,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
if (!rdtgrp) {
ret = -ENOSPC;
rdt_last_cmd_puts("kernel out of memory\n");
rdt_last_cmd_puts("Kernel out of memory\n");
goto out_unlock;
}
*r = rdtgrp;
......@@ -2637,7 +2640,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
if (rdt_mon_capable) {
ret = alloc_rmid();
if (ret < 0) {
rdt_last_cmd_puts("out of RMIDs\n");
rdt_last_cmd_puts("Out of RMIDs\n");
goto out_destroy;
}
rdtgrp->mon.rmid = ret;
......@@ -2725,7 +2728,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
kn = rdtgrp->kn;
ret = closid_alloc();
if (ret < 0) {
rdt_last_cmd_puts("out of CLOSIDs\n");
rdt_last_cmd_puts("Out of CLOSIDs\n");
goto out_common_fail;
}
closid = ret;
......
......@@ -17,7 +17,11 @@ struct cpuid_bit {
u32 sub_leaf;
};
/* Please keep the leaf sorted by cpuid_bit.level for faster search. */
/*
* Please keep the leaf sorted by cpuid_bit.level for faster search.
* X86_FEATURE_MBA is supported by both Intel and AMD. But the CPUID
* levels are different and there is a separate entry for each.
*/
static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
......@@ -29,6 +33,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
{ X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
{ 0, 0, 0, 0, 0 }
......
......@@ -56,7 +56,7 @@
#include <asm/debugreg.h>
#include <asm/switch_to.h>
#include <asm/vm86.h>
#include <asm/intel_rdt_sched.h>
#include <asm/resctrl_sched.h>
#include <asm/proto.h>
#include "process.h"
......@@ -298,7 +298,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
this_cpu_write(current_task, next_p);
/* Load the Intel cache allocation PQR MSR. */
intel_rdt_sched_in();
resctrl_sched_in();
return prev_p;
}
......
......@@ -52,7 +52,7 @@
#include <asm/switch_to.h>
#include <asm/xen/hypervisor.h>
#include <asm/vdso.h>
#include <asm/intel_rdt_sched.h>
#include <asm/resctrl_sched.h>
#include <asm/unistd.h>
#include <asm/fsgsbase.h>
#ifdef CONFIG_IA32_EMULATION
......@@ -622,7 +622,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
}
/* Load the Intel cache allocation PQR MSR. */
intel_rdt_sched_in();
resctrl_sched_in();
return prev_p;
}
......
......@@ -993,7 +993,7 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
struct list_head cg_list;
#endif
#ifdef CONFIG_INTEL_RDT
#ifdef CONFIG_RESCTRL
u32 closid;
u32 rmid;
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment