Commit 193e2268 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_cache_for_v6.1_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cache resource control updates from Borislav Petkov:

 - More work by James Morse to disentangle the resctrl filesystem
   generic code from the architectural one with the endgoal of plugging
   ARM's MPAM implementation into it too so that the user interface
   remains the same

 - Properly restore the MSR_MISC_FEATURE_CONTROL value instead of
   blindly overwriting it to 0

* tag 'x86_cache_for_v6.1_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (22 commits)
  x86/resctrl: Make resctrl_arch_rmid_read() return values in bytes
  x86/resctrl: Add resctrl_rmid_realloc_limit to abstract x86's boot_cpu_data
  x86/resctrl: Rename and change the units of resctrl_cqm_threshold
  x86/resctrl: Move get_corrected_mbm_count() into resctrl_arch_rmid_read()
  x86/resctrl: Move mbm_overflow_count() into resctrl_arch_rmid_read()
  x86/resctrl: Pass the required parameters into resctrl_arch_rmid_read()
  x86/resctrl: Abstract __rmid_read()
  x86/resctrl: Allow per-rmid arch private storage to be reset
  x86/resctrl: Add per-rmid arch private storage for overflow and chunks
  x86/resctrl: Calculate bandwidth from the previous __mon_event_count() chunks
  x86/resctrl: Allow update_mba_bw() to update controls directly
  x86/resctrl: Remove architecture copy of mbps_val
  x86/resctrl: Switch over to the resctrl mbps_val list
  x86/resctrl: Create mba_sc configuration in the rdt_domain
  x86/resctrl: Abstract and use supports_mba_mbps()
  x86/resctrl: Remove set_mba_sc()s control array re-initialisation
  x86/resctrl: Add domain offline callback for resctrl work
  x86/resctrl: Group struct rdt_hw_domain cleanup
  x86/resctrl: Add domain online callback for resctrl work
  x86/resctrl: Merge mon_capable and mon_enabled
  ...
parents b5f0b113 f7b1843e
......@@ -81,6 +81,15 @@ static void __resctrl_sched_in(void)
}
}
static inline unsigned int resctrl_arch_round_mon_val(unsigned int val)
{
unsigned int scale = boot_cpu_data.x86_cache_occ_scale;
/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
val /= scale;
return val * scale;
}
static inline void resctrl_sched_in(void)
{
if (static_branch_likely(&rdt_enable_key))
......
......@@ -147,7 +147,6 @@ static inline void cache_alloc_hsw_probe(void)
r->cache.shareable_bits = 0xc0000;
r->cache.min_cbm_bits = 2;
r->alloc_capable = true;
r->alloc_enabled = true;
rdt_alloc_capable = true;
}
......@@ -211,7 +210,6 @@ static bool __get_mem_config_intel(struct rdt_resource *r)
thread_throttle_mode_init();
r->alloc_capable = true;
r->alloc_enabled = true;
return true;
}
......@@ -242,7 +240,6 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
r->data_width = 4;
r->alloc_capable = true;
r->alloc_enabled = true;
return true;
}
......@@ -261,7 +258,6 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
r->cache.shareable_bits = ebx & r->default_ctrl;
r->data_width = (r->cache.cbm_len + 3) / 4;
r->alloc_capable = true;
r->alloc_enabled = true;
}
static void rdt_get_cdp_config(int level)
......@@ -300,7 +296,7 @@ mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
* that can be written to QOS_MSRs.
* There are currently no SKUs which support non linear delay values.
*/
u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
{
if (r->membw.delay_linear)
return MAX_MBA_BW - bw;
......@@ -401,7 +397,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
return NULL;
}
void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc)
{
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
int i;
......@@ -410,12 +406,17 @@ void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
* Initialize the Control MSRs to having no control.
* For Cache Allocation: Set all bits in cbm
* For Memory Allocation: Set b/w requested to 100%
* and the bandwidth in MBps to U32_MAX
*/
for (i = 0; i < hw_res->num_closid; i++, dc++, dm++) {
for (i = 0; i < hw_res->num_closid; i++, dc++)
*dc = r->default_ctrl;
*dm = MBA_MAX_MBPS;
}
}
static void domain_free(struct rdt_hw_domain *hw_dom)
{
kfree(hw_dom->arch_mbm_total);
kfree(hw_dom->arch_mbm_local);
kfree(hw_dom->ctrl_val);
kfree(hw_dom);
}
static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
......@@ -423,23 +424,15 @@ static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
struct msr_param m;
u32 *dc, *dm;
u32 *dc;
dc = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->ctrl_val),
GFP_KERNEL);
if (!dc)
return -ENOMEM;
dm = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->mbps_val),
GFP_KERNEL);
if (!dm) {
kfree(dc);
return -ENOMEM;
}
hw_dom->ctrl_val = dc;
hw_dom->mbps_val = dm;
setup_default_ctrlval(r, dc, dm);
setup_default_ctrlval(r, dc);
m.low = 0;
m.high = hw_res->num_closid;
......@@ -447,39 +440,31 @@ static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
return 0;
}
static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
/**
* arch_domain_mbm_alloc() - Allocate arch private storage for the MBM counters
* @num_rmid: The size of the MBM counter array
* @hw_dom: The domain that owns the allocated arrays
*/
static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
{
size_t tsize;
if (is_llc_occupancy_enabled()) {
d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL);
if (!d->rmid_busy_llc)
return -ENOMEM;
INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
}
if (is_mbm_total_enabled()) {
tsize = sizeof(*d->mbm_total);
d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
if (!d->mbm_total) {
bitmap_free(d->rmid_busy_llc);
tsize = sizeof(*hw_dom->arch_mbm_total);
hw_dom->arch_mbm_total = kcalloc(num_rmid, tsize, GFP_KERNEL);
if (!hw_dom->arch_mbm_total)
return -ENOMEM;
}
}
if (is_mbm_local_enabled()) {
tsize = sizeof(*d->mbm_local);
d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
if (!d->mbm_local) {
bitmap_free(d->rmid_busy_llc);
kfree(d->mbm_total);
tsize = sizeof(*hw_dom->arch_mbm_local);
hw_dom->arch_mbm_local = kcalloc(num_rmid, tsize, GFP_KERNEL);
if (!hw_dom->arch_mbm_local) {
kfree(hw_dom->arch_mbm_total);
hw_dom->arch_mbm_total = NULL;
return -ENOMEM;
}
}
if (is_mbm_enabled()) {
INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
}
return 0;
}
......@@ -502,6 +487,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
struct list_head *add_pos = NULL;
struct rdt_hw_domain *hw_dom;
struct rdt_domain *d;
int err;
d = rdt_find_domain(r, id, &add_pos);
if (IS_ERR(d)) {
......@@ -527,25 +513,22 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
rdt_domain_reconfigure_cdp(r);
if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
kfree(hw_dom);
domain_free(hw_dom);
return;
}
if (r->mon_capable && domain_setup_mon_state(r, d)) {
kfree(hw_dom->ctrl_val);
kfree(hw_dom->mbps_val);
kfree(hw_dom);
if (r->mon_capable && arch_domain_mbm_alloc(r->num_rmid, hw_dom)) {
domain_free(hw_dom);
return;
}
list_add_tail(&d->list, add_pos);
/*
* If resctrl is mounted, add
* per domain monitor data directories.
*/
if (static_branch_unlikely(&rdt_mon_enable_key))
mkdir_mondata_subdir_allrdtgrp(r, d);
err = resctrl_online_domain(r, d);
if (err) {
list_del(&d->list);
domain_free(hw_dom);
}
}
static void domain_remove_cpu(int cpu, struct rdt_resource *r)
......@@ -563,27 +546,8 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
cpumask_clear_cpu(cpu, &d->cpu_mask);
if (cpumask_empty(&d->cpu_mask)) {
/*
* If resctrl is mounted, remove all the
* per domain monitor data directories.
*/
if (static_branch_unlikely(&rdt_mon_enable_key))
rmdir_mondata_subdir_allrdtgrp(r, d->id);
resctrl_offline_domain(r, d);
list_del(&d->list);
if (r->mon_capable && is_mbm_enabled())
cancel_delayed_work(&d->mbm_over);
if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) {
/*
* When a package is going down, forcefully
* decrement rmid->ebusy. There is no way to know
* that the L3 was flushed and hence may lead to
* incorrect counts in rare scenarios, but leaving
* the RMID as busy creates RMID leaks if the
* package never comes back.
*/
__check_limbo(d, true);
cancel_delayed_work(&d->cqm_limbo);
}
/*
* rdt_domain "d" is going to be freed below, so clear
......@@ -591,13 +555,8 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
*/
if (d->plr)
d->plr->d = NULL;
domain_free(hw_dom);
kfree(hw_dom->ctrl_val);
kfree(hw_dom->mbps_val);
bitmap_free(d->rmid_busy_llc);
kfree(d->mbm_total);
kfree(d->mbm_local);
kfree(hw_dom);
return;
}
......
......@@ -61,6 +61,7 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
struct rdt_domain *d)
{
struct resctrl_staged_config *cfg;
u32 closid = data->rdtgrp->closid;
struct rdt_resource *r = s->res;
unsigned long bw_val;
......@@ -72,6 +73,12 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
if (!bw_validate(data->buf, &bw_val, r))
return -EINVAL;
if (is_mba_sc(r)) {
d->mbps_val[closid] = bw_val;
return 0;
}
cfg->new_ctrl = bw_val;
cfg->have_new_ctrl = true;
......@@ -261,14 +268,13 @@ static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
static bool apply_config(struct rdt_hw_domain *hw_dom,
struct resctrl_staged_config *cfg, u32 idx,
cpumask_var_t cpu_mask, bool mba_sc)
cpumask_var_t cpu_mask)
{
struct rdt_domain *dom = &hw_dom->d_resctrl;
u32 *dc = !mba_sc ? hw_dom->ctrl_val : hw_dom->mbps_val;
if (cfg->new_ctrl != dc[idx]) {
if (cfg->new_ctrl != hw_dom->ctrl_val[idx]) {
cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
dc[idx] = cfg->new_ctrl;
hw_dom->ctrl_val[idx] = cfg->new_ctrl;
return true;
}
......@@ -276,6 +282,27 @@ static bool apply_config(struct rdt_hw_domain *hw_dom,
return false;
}
int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
u32 closid, enum resctrl_conf_type t, u32 cfg_val)
{
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
u32 idx = get_config_index(closid, t);
struct msr_param msr_param;
if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
return -EINVAL;
hw_dom->ctrl_val[idx] = cfg_val;
msr_param.res = r;
msr_param.low = idx;
msr_param.high = idx + 1;
hw_res->msr_update(d, &msr_param, r);
return 0;
}
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
{
struct resctrl_staged_config *cfg;
......@@ -284,14 +311,12 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
enum resctrl_conf_type t;
cpumask_var_t cpu_mask;
struct rdt_domain *d;
bool mba_sc;
int cpu;
u32 idx;
if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
return -ENOMEM;
mba_sc = is_mba_sc(r);
msr_param.res = NULL;
list_for_each_entry(d, &r->domains, list) {
hw_dom = resctrl_to_arch_dom(d);
......@@ -301,7 +326,7 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
continue;
idx = get_config_index(closid, t);
if (!apply_config(hw_dom, cfg, idx, cpu_mask, mba_sc))
if (!apply_config(hw_dom, cfg, idx, cpu_mask))
continue;
if (!msr_param.res) {
......@@ -315,11 +340,7 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
}
}
/*
* Avoid writing the control msr with control values when
* MBA software controller is enabled
*/
if (cpumask_empty(cpu_mask) || mba_sc)
if (cpumask_empty(cpu_mask))
goto done;
cpu = get_cpu();
/* Update resource control msr on this CPU if it's in cpu_mask. */
......@@ -406,6 +427,14 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
list_for_each_entry(s, &resctrl_schema_all, list) {
r = s->res;
/*
* Writes to mba_sc resources update the software controller,
* not the control MSR.
*/
if (is_mba_sc(r))
continue;
ret = resctrl_arch_update_domains(r, rdtgrp->closid);
if (ret)
goto out;
......@@ -433,9 +462,7 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
u32 idx = get_config_index(closid, type);
if (!is_mba_sc(r))
return hw_dom->ctrl_val[idx];
return hw_dom->mbps_val[idx];
return hw_dom->ctrl_val[idx];
}
static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
......@@ -450,8 +477,12 @@ static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int clo
if (sep)
seq_puts(s, ";");
ctrl_val = resctrl_arch_get_config(r, dom, closid,
schema->conf_type);
if (is_mba_sc(r))
ctrl_val = dom->mbps_val[closid];
else
ctrl_val = resctrl_arch_get_config(r, dom, closid,
schema->conf_type);
seq_printf(s, r->format_str, dom->id, max_data_width,
ctrl_val);
sep = true;
......@@ -518,7 +549,6 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
int rdtgroup_mondata_show(struct seq_file *m, void *arg)
{
struct kernfs_open_file *of = m->private;
struct rdt_hw_resource *hw_res;
u32 resid, evtid, domid;
struct rdtgroup *rdtgrp;
struct rdt_resource *r;
......@@ -538,8 +568,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
domid = md.u.domid;
evtid = md.u.evtid;
hw_res = &rdt_resources_all[resid];
r = &hw_res->r_resctrl;
r = &rdt_resources_all[resid].r_resctrl;
d = rdt_find_domain(r, domid, NULL);
if (IS_ERR_OR_NULL(d)) {
ret = -ENOENT;
......@@ -548,12 +577,12 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
mon_event_read(&rr, r, d, rdtgrp, evtid, false);
if (rr.val & RMID_VAL_ERROR)
if (rr.err == -EIO)
seq_puts(m, "Error\n");
else if (rr.val & RMID_VAL_UNAVAIL)
else if (rr.err == -EINVAL)
seq_puts(m, "Unavailable\n");
else
seq_printf(m, "%llu\n", rr.val * hw_res->mon_scale);
seq_printf(m, "%llu\n", rr.val);
out:
rdtgroup_kn_unlock(of->kn);
......
......@@ -22,21 +22,12 @@
#define L2_QOS_CDP_ENABLE 0x01ULL
/*
* Event IDs are used to program IA32_QM_EVTSEL before reading event
* counter from IA32_QM_CTR
*/
#define QOS_L3_OCCUP_EVENT_ID 0x01
#define QOS_L3_MBM_TOTAL_EVENT_ID 0x02
#define QOS_L3_MBM_LOCAL_EVENT_ID 0x03
#define CQM_LIMBOCHECK_INTERVAL 1000
#define MBM_CNTR_WIDTH_BASE 24
#define MBM_OVERFLOW_INTERVAL 1000
#define MAX_MBA_BW 100u
#define MBA_IS_LINEAR 0x4
#define MBA_MAX_MBPS U32_MAX
#define MAX_MBA_BW_AMD 0x800
#define MBM_CNTR_WIDTH_OFFSET_AMD 20
......@@ -74,7 +65,7 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
* @list: entry in &rdt_resource->evt_list
*/
struct mon_evt {
u32 evtid;
enum resctrl_event_id evtid;
char *name;
struct list_head list;
};
......@@ -91,9 +82,9 @@ struct mon_evt {
union mon_data_bits {
void *priv;
struct {
unsigned int rid : 10;
unsigned int evtid : 8;
unsigned int domid : 14;
unsigned int rid : 10;
enum resctrl_event_id evtid : 8;
unsigned int domid : 14;
} u;
};
......@@ -101,12 +92,12 @@ struct rmid_read {
struct rdtgroup *rgrp;
struct rdt_resource *r;
struct rdt_domain *d;
int evtid;
enum resctrl_event_id evtid;
bool first;
int err;
u64 val;
};
extern unsigned int resctrl_cqm_threshold;
extern bool rdt_alloc_capable;
extern bool rdt_mon_capable;
extern unsigned int rdt_mon_features;
......@@ -288,35 +279,45 @@ struct rftype {
/**
* struct mbm_state - status for each MBM counter in each domain
* @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes)
* @prev_msr: Value of IA32_QM_CTR for this RMID last time we read it
* @prev_bw_msr:Value of previous IA32_QM_CTR for bandwidth counting
* @prev_bw_bytes: Previous bytes value read for bandwidth calculation
* @prev_bw: The most recent bandwidth in MBps
* @delta_bw: Difference between the current and previous bandwidth
* @delta_comp: Indicates whether to compute the delta_bw
*/
struct mbm_state {
u64 chunks;
u64 prev_msr;
u64 prev_bw_msr;
u64 prev_bw_bytes;
u32 prev_bw;
u32 delta_bw;
bool delta_comp;
};
/**
* struct arch_mbm_state - values used to compute resctrl_arch_rmid_read()s
* return value.
* @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes)
* @prev_msr: Value of IA32_QM_CTR last time it was read for the RMID used to
* find this struct.
*/
struct arch_mbm_state {
u64 chunks;
u64 prev_msr;
};
/**
* struct rdt_hw_domain - Arch private attributes of a set of CPUs that share
* a resource
* @d_resctrl: Properties exposed to the resctrl file system
* @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID)
* @mbps_val: When mba_sc is enabled, this holds the bandwidth in MBps
* @arch_mbm_total: arch private state for MBM total bandwidth
* @arch_mbm_local: arch private state for MBM local bandwidth
*
* Members of this structure are accessed via helpers that provide abstraction.
*/
struct rdt_hw_domain {
struct rdt_domain d_resctrl;
u32 *ctrl_val;
u32 *mbps_val;
struct arch_mbm_state *arch_mbm_total;
struct arch_mbm_state *arch_mbm_local;
};
static inline struct rdt_hw_domain *resctrl_to_arch_dom(struct rdt_domain *r)
......@@ -459,14 +460,6 @@ int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
for_each_rdt_resource(r) \
if (r->mon_capable)
#define for_each_alloc_enabled_rdt_resource(r) \
for_each_rdt_resource(r) \
if (r->alloc_enabled)
#define for_each_mon_enabled_rdt_resource(r) \
for_each_rdt_resource(r) \
if (r->mon_enabled)
/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
union cpuid_0x10_1_eax {
struct {
......@@ -530,10 +523,6 @@ void free_rmid(u32 rmid);
int rdt_get_mon_l3_config(struct rdt_resource *r);
void mon_event_count(void *info);
int rdtgroup_mondata_show(struct seq_file *m, void *arg);
void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
unsigned int dom_id);
void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
struct rdt_domain *d);
void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
struct rdt_domain *d, struct rdtgroup *rdtgrp,
int evtid, int first);
......@@ -542,8 +531,6 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom,
void mbm_handle_overflow(struct work_struct *work);
void __init intel_rdt_mbm_apply_quirk(void);
bool is_mba_sc(struct rdt_resource *r);
void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm);
u32 delay_bw_map(unsigned long bw, struct rdt_resource *r);
void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms);
void cqm_handle_limbo(struct work_struct *work);
bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
......
This diff is collapsed.
......@@ -420,6 +420,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
struct pseudo_lock_region *plr = rdtgrp->plr;
u32 rmid_p, closid_p;
unsigned long i;
u64 saved_msr;
#ifdef CONFIG_KASAN
/*
* The registers used for local register variables are also used
......@@ -463,6 +464,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
* the buffer and evict pseudo-locked memory read earlier from the
* cache.
*/
saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL);
__wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
closid_p = this_cpu_read(pqr_state.cur_closid);
rmid_p = this_cpu_read(pqr_state.cur_rmid);
......@@ -514,7 +516,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
__wrmsr(IA32_PQR_ASSOC, rmid_p, closid_p);
/* Re-enable the hardware prefetcher(s) */
wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr);
local_irq_enable();
plr->thread_done = 1;
......@@ -835,7 +837,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
* First determine which cpus have pseudo-locked regions
* associated with them.
*/
for_each_alloc_enabled_rdt_resource(r) {
for_each_alloc_capable_rdt_resource(r) {
list_for_each_entry(d_i, &r->domains, list) {
if (d_i->plr)
cpumask_or(cpu_with_psl, cpu_with_psl,
......@@ -871,6 +873,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
static int measure_cycles_lat_fn(void *_plr)
{
struct pseudo_lock_region *plr = _plr;
u32 saved_low, saved_high;
unsigned long i;
u64 start, end;
void *mem_r;
......@@ -879,6 +882,7 @@ static int measure_cycles_lat_fn(void *_plr)
/*
* Disable hardware prefetchers.
*/
rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
mem_r = READ_ONCE(plr->kmem);
/*
......@@ -895,7 +899,7 @@ static int measure_cycles_lat_fn(void *_plr)
end = rdtsc_ordered();
trace_pseudo_lock_mem_latency((u32)(end - start));
}
wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
local_irq_enable();
plr->thread_done = 1;
wake_up_interruptible(&plr->lock_thread_wq);
......@@ -940,6 +944,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
u64 hits_before = 0, hits_after = 0, miss_before = 0, miss_after = 0;
struct perf_event *miss_event, *hit_event;
int hit_pmcnum, miss_pmcnum;
u32 saved_low, saved_high;
unsigned int line_size;
unsigned int size;
unsigned long i;
......@@ -973,6 +978,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
/*
* Disable hardware prefetchers.
*/
rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
/* Initialize rest of local variables */
......@@ -1031,7 +1037,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
*/
rmb();
/* Re-enable hardware prefetchers */
wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
local_irq_enable();
out_hit:
perf_event_release_kernel(hit_event);
......
This diff is collapsed.
......@@ -15,6 +15,9 @@ int proc_resctrl_show(struct seq_file *m,
#endif
/* max value for struct rdt_domain's mbps_val */
#define MBA_MAX_MBPS U32_MAX
/**
* enum resctrl_conf_type - The type of configuration.
* @CDP_NONE: No prioritisation, both code and data are controlled or monitored.
......@@ -29,6 +32,16 @@ enum resctrl_conf_type {
#define CDP_NUM_TYPES (CDP_DATA + 1)
/*
* Event IDs, the values match those used to program IA32_QM_EVTSEL before
* reading IA32_QM_CTR on RDT systems.
*/
enum resctrl_event_id {
QOS_L3_OCCUP_EVENT_ID = 0x01,
QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
};
/**
* struct resctrl_staged_config - parsed configuration to be applied
* @new_ctrl: new ctrl value to be loaded
......@@ -53,6 +66,9 @@ struct resctrl_staged_config {
* @cqm_work_cpu: worker CPU for CQM h/w counters
* @plr: pseudo-locked region (if any) associated with domain
* @staged_config: parsed configuration to be applied
* @mbps_val: When mba_sc is enabled, this holds the array of user
* specified control values for mba_sc in MBps, indexed
* by closid
*/
struct rdt_domain {
struct list_head list;
......@@ -67,6 +83,7 @@ struct rdt_domain {
int cqm_work_cpu;
struct pseudo_lock_region *plr;
struct resctrl_staged_config staged_config[CDP_NUM_TYPES];
u32 *mbps_val;
};
/**
......@@ -130,8 +147,6 @@ struct resctrl_schema;
/**
* struct rdt_resource - attributes of a resctrl resource
* @rid: The index of the resource
* @alloc_enabled: Is allocation enabled on this machine
* @mon_enabled: Is monitoring enabled for this feature
* @alloc_capable: Is allocation available on this machine
* @mon_capable: Is monitor feature available on this machine
* @num_rmid: Number of RMIDs available
......@@ -150,8 +165,6 @@ struct resctrl_schema;
*/
struct rdt_resource {
int rid;
bool alloc_enabled;
bool mon_enabled;
bool alloc_capable;
bool mon_capable;
int num_rmid;
......@@ -194,7 +207,50 @@ struct resctrl_schema {
/* The number of closid supported by this resource regardless of CDP */
u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
/*
* Update the ctrl_val and apply this config right now.
* Must be called on one of the domain's CPUs.
*/
int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
u32 closid, enum resctrl_conf_type t, u32 cfg_val);
u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
u32 closid, enum resctrl_conf_type type);
int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d);
void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d);
/**
* resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
* for this resource and domain.
* @r: resource that the counter should be read from.
* @d: domain that the counter should be read from.
* @rmid: rmid of the counter to read.
* @eventid: eventid to read, e.g. L3 occupancy.
* @val: result of the counter read in bytes.
*
* Call from process context on a CPU that belongs to domain @d.
*
* Return:
* 0 on success, or -EIO, -EINVAL etc on error.
*/
int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
u32 rmid, enum resctrl_event_id eventid, u64 *val);
/**
* resctrl_arch_reset_rmid() - Reset any private state associated with rmid
* and eventid.
* @r: The domain's resource.
* @d: The rmid's domain.
* @rmid: The rmid whose counter values should be reset.
* @eventid: The eventid whose counter values should be reset.
*
* This can be called from any CPU.
*/
void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
u32 rmid, enum resctrl_event_id eventid);
extern unsigned int resctrl_rmid_realloc_threshold;
extern unsigned int resctrl_rmid_realloc_limit;
#endif /* _RESCTRL_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment