Commit 9bf9511e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_cache_updates_for_5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cache resource control updates from Borislav Petkov:
 "Add support for wider Memory Bandwidth Monitoring counters by querying
  their width from CPUID.

  As a prerequsite for that, streamline and unify the CPUID detection of
  the respective resource control attributes.

  By Reinette Chatre"

* tag 'x86_cache_updates_for_5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/resctrl: Support wider MBM counters
  x86/resctrl: Support CPUID enumeration of MBM counter width
  x86/resctrl: Maintain MBM counter width per resource
  x86/resctrl: Query LLC monitoring properties once during boot
  x86/resctrl: Remove unnecessary RMID checks
  x86/cpu: Move resctrl CPUID code to resctrl/
  x86/resctrl: Rename asm/resctrl_sched.h to asm/resctrl.h
parents ef34ba6d 0c4d5ba1
...@@ -14269,7 +14269,7 @@ M: Reinette Chatre <reinette.chatre@intel.com> ...@@ -14269,7 +14269,7 @@ M: Reinette Chatre <reinette.chatre@intel.com>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Supported S: Supported
F: Documentation/x86/resctrl* F: Documentation/x86/resctrl*
F: arch/x86/include/asm/resctrl_sched.h F: arch/x86/include/asm/resctrl.h
F: arch/x86/kernel/cpu/resctrl/ F: arch/x86/kernel/cpu/resctrl/
F: tools/testing/selftests/resctrl/ F: tools/testing/selftests/resctrl/
......
...@@ -113,9 +113,10 @@ struct cpuinfo_x86 { ...@@ -113,9 +113,10 @@ struct cpuinfo_x86 {
/* in KB - valid for CPUS which support this call: */ /* in KB - valid for CPUS which support this call: */
unsigned int x86_cache_size; unsigned int x86_cache_size;
int x86_cache_alignment; /* In bytes */ int x86_cache_alignment; /* In bytes */
/* Cache QoS architectural values: */ /* Cache QoS architectural values, valid only on the BSP: */
int x86_cache_max_rmid; /* max index */ int x86_cache_max_rmid; /* max index */
int x86_cache_occ_scale; /* scale to bytes */ int x86_cache_occ_scale; /* scale to bytes */
int x86_cache_mbm_width_offset;
int x86_power; int x86_power;
unsigned long loops_per_jiffy; unsigned long loops_per_jiffy;
/* cpuid returned max cores value: */ /* cpuid returned max cores value: */
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_RESCTRL_SCHED_H #ifndef _ASM_X86_RESCTRL_H
#define _ASM_X86_RESCTRL_SCHED_H #define _ASM_X86_RESCTRL_H
#ifdef CONFIG_X86_CPU_RESCTRL #ifdef CONFIG_X86_CPU_RESCTRL
...@@ -84,10 +84,13 @@ static inline void resctrl_sched_in(void) ...@@ -84,10 +84,13 @@ static inline void resctrl_sched_in(void)
__resctrl_sched_in(); __resctrl_sched_in();
} }
void resctrl_cpu_detect(struct cpuinfo_x86 *c);
#else #else
static inline void resctrl_sched_in(void) {} static inline void resctrl_sched_in(void) {}
static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {}
#endif /* CONFIG_X86_CPU_RESCTRL */ #endif /* CONFIG_X86_CPU_RESCTRL */
#endif /* _ASM_X86_RESCTRL_SCHED_H */ #endif /* _ASM_X86_RESCTRL_H */
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <asm/resctrl.h>
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
# include <asm/mmconfig.h> # include <asm/mmconfig.h>
...@@ -597,6 +598,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) ...@@ -597,6 +598,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
} }
} }
resctrl_cpu_detect(c);
} }
static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
......
...@@ -854,30 +854,6 @@ static void init_speculation_control(struct cpuinfo_x86 *c) ...@@ -854,30 +854,6 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
} }
} }
static void init_cqm(struct cpuinfo_x86 *c)
{
if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
c->x86_cache_max_rmid = -1;
c->x86_cache_occ_scale = -1;
return;
}
/* will be overridden if occupancy monitoring exists */
c->x86_cache_max_rmid = cpuid_ebx(0xf);
if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
u32 eax, ebx, ecx, edx;
/* QoS sub-leaf, EAX=0Fh, ECX=1 */
cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
c->x86_cache_max_rmid = ecx;
c->x86_cache_occ_scale = ebx;
}
}
void get_cpu_cap(struct cpuinfo_x86 *c) void get_cpu_cap(struct cpuinfo_x86 *c)
{ {
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
...@@ -945,7 +921,6 @@ void get_cpu_cap(struct cpuinfo_x86 *c) ...@@ -945,7 +921,6 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
init_scattered_cpuid_features(c); init_scattered_cpuid_features(c);
init_speculation_control(c); init_speculation_control(c);
init_cqm(c);
/* /*
* Clear/Set all flags overridden by options, after probe. * Clear/Set all flags overridden by options, after probe.
...@@ -1377,20 +1352,6 @@ static void generic_identify(struct cpuinfo_x86 *c) ...@@ -1377,20 +1352,6 @@ static void generic_identify(struct cpuinfo_x86 *c)
#endif #endif
} }
static void x86_init_cache_qos(struct cpuinfo_x86 *c)
{
/*
* The heavy lifting of max_rmid and cache_occ_scale are handled
* in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu
* in case CQM bits really aren't there in this CPU.
*/
if (c != &boot_cpu_data) {
boot_cpu_data.x86_cache_max_rmid =
min(boot_cpu_data.x86_cache_max_rmid,
c->x86_cache_max_rmid);
}
}
/* /*
* Validate that ACPI/mptables have the same information about the * Validate that ACPI/mptables have the same information about the
* effective APIC id and update the package map. * effective APIC id and update the package map.
...@@ -1503,7 +1464,6 @@ static void identify_cpu(struct cpuinfo_x86 *c) ...@@ -1503,7 +1464,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
#endif #endif
x86_init_rdrand(c); x86_init_rdrand(c);
x86_init_cache_qos(c);
setup_pku(c); setup_pku(c);
/* /*
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/cmdline.h> #include <asm/cmdline.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/resctrl.h>
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#include <linux/topology.h> #include <linux/topology.h>
...@@ -322,6 +323,11 @@ static void early_init_intel(struct cpuinfo_x86 *c) ...@@ -322,6 +323,11 @@ static void early_init_intel(struct cpuinfo_x86 *c)
detect_ht_early(c); detect_ht_early(c);
} }
static void bsp_init_intel(struct cpuinfo_x86 *c)
{
resctrl_cpu_detect(c);
}
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* /*
* Early probe support logic for ppro memory erratum #50 * Early probe support logic for ppro memory erratum #50
...@@ -961,6 +967,7 @@ static const struct cpu_dev intel_cpu_dev = { ...@@ -961,6 +967,7 @@ static const struct cpu_dev intel_cpu_dev = {
#endif #endif
.c_detect_tlb = intel_detect_tlb, .c_detect_tlb = intel_detect_tlb,
.c_early_init = early_init_intel, .c_early_init = early_init_intel,
.c_bsp_init = bsp_init_intel,
.c_init = init_intel, .c_init = init_intel,
.c_x86_vendor = X86_VENDOR_INTEL, .c_x86_vendor = X86_VENDOR_INTEL,
}; };
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include <linux/cpuhotplug.h> #include <linux/cpuhotplug.h>
#include <asm/intel-family.h> #include <asm/intel-family.h>
#include <asm/resctrl_sched.h> #include <asm/resctrl.h>
#include "internal.h" #include "internal.h"
/* Mutex to protect rdtgroup access. */ /* Mutex to protect rdtgroup access. */
...@@ -958,6 +958,36 @@ static __init void rdt_init_res_defs(void) ...@@ -958,6 +958,36 @@ static __init void rdt_init_res_defs(void)
static enum cpuhp_state rdt_online; static enum cpuhp_state rdt_online;
/* Runs once on the BSP during boot. */
void resctrl_cpu_detect(struct cpuinfo_x86 *c)
{
if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
c->x86_cache_max_rmid = -1;
c->x86_cache_occ_scale = -1;
c->x86_cache_mbm_width_offset = -1;
return;
}
/* will be overridden if occupancy monitoring exists */
c->x86_cache_max_rmid = cpuid_ebx(0xf);
if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
u32 eax, ebx, ecx, edx;
/* QoS sub-leaf, EAX=0Fh, ECX=1 */
cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
c->x86_cache_max_rmid = ecx;
c->x86_cache_occ_scale = ebx;
if (c->x86_vendor == X86_VENDOR_INTEL)
c->x86_cache_mbm_width_offset = eax & 0xff;
else
c->x86_cache_mbm_width_offset = -1;
}
}
static int __init resctrl_late_init(void) static int __init resctrl_late_init(void)
{ {
struct rdt_resource *r; struct rdt_resource *r;
......
...@@ -495,14 +495,16 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of, ...@@ -495,14 +495,16 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
return ret; return ret;
} }
void mon_event_read(struct rmid_read *rr, struct rdt_domain *d, void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
struct rdtgroup *rdtgrp, int evtid, int first) struct rdt_domain *d, struct rdtgroup *rdtgrp,
int evtid, int first)
{ {
/* /*
* setup the parameters to send to the IPI to read the data. * setup the parameters to send to the IPI to read the data.
*/ */
rr->rgrp = rdtgrp; rr->rgrp = rdtgrp;
rr->evtid = evtid; rr->evtid = evtid;
rr->r = r;
rr->d = d; rr->d = d;
rr->val = 0; rr->val = 0;
rr->first = first; rr->first = first;
...@@ -539,7 +541,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg) ...@@ -539,7 +541,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
goto out; goto out;
} }
mon_event_read(&rr, d, rdtgrp, evtid, false); mon_event_read(&rr, r, d, rdtgrp, evtid, false);
if (rr.val & RMID_VAL_ERROR) if (rr.val & RMID_VAL_ERROR)
seq_puts(m, "Error\n"); seq_puts(m, "Error\n");
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#define CQM_LIMBOCHECK_INTERVAL 1000 #define CQM_LIMBOCHECK_INTERVAL 1000
#define MBM_CNTR_WIDTH 24 #define MBM_CNTR_WIDTH_BASE 24
#define MBM_OVERFLOW_INTERVAL 1000 #define MBM_OVERFLOW_INTERVAL 1000
#define MAX_MBA_BW 100u #define MAX_MBA_BW 100u
#define MBA_IS_LINEAR 0x4 #define MBA_IS_LINEAR 0x4
...@@ -40,6 +40,12 @@ ...@@ -40,6 +40,12 @@
#define RMID_VAL_ERROR BIT_ULL(63) #define RMID_VAL_ERROR BIT_ULL(63)
#define RMID_VAL_UNAVAIL BIT_ULL(62) #define RMID_VAL_UNAVAIL BIT_ULL(62)
/*
* With the above fields in use 62 bits remain in MSR_IA32_QM_CTR for
* data to be returned. The counter width is discovered from the hardware
* as an offset from MBM_CNTR_WIDTH_BASE.
*/
#define MBM_CNTR_WIDTH_OFFSET_MAX (62 - MBM_CNTR_WIDTH_BASE)
struct rdt_fs_context { struct rdt_fs_context {
...@@ -87,6 +93,7 @@ union mon_data_bits { ...@@ -87,6 +93,7 @@ union mon_data_bits {
struct rmid_read { struct rmid_read {
struct rdtgroup *rgrp; struct rdtgroup *rgrp;
struct rdt_resource *r;
struct rdt_domain *d; struct rdt_domain *d;
int evtid; int evtid;
bool first; bool first;
...@@ -460,6 +467,7 @@ struct rdt_resource { ...@@ -460,6 +467,7 @@ struct rdt_resource {
struct list_head evt_list; struct list_head evt_list;
int num_rmid; int num_rmid;
unsigned int mon_scale; unsigned int mon_scale;
unsigned int mbm_width;
unsigned long fflags; unsigned long fflags;
}; };
...@@ -587,8 +595,9 @@ void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, ...@@ -587,8 +595,9 @@ void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
unsigned int dom_id); unsigned int dom_id);
void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
struct rdt_domain *d); struct rdt_domain *d);
void mon_event_read(struct rmid_read *rr, struct rdt_domain *d, void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
struct rdtgroup *rdtgrp, int evtid, int first); struct rdt_domain *d, struct rdtgroup *rdtgrp,
int evtid, int first);
void mbm_setup_overflow_handler(struct rdt_domain *dom, void mbm_setup_overflow_handler(struct rdt_domain *dom,
unsigned long delay_ms); unsigned long delay_ms);
void mbm_handle_overflow(struct work_struct *work); void mbm_handle_overflow(struct work_struct *work);
......
...@@ -214,9 +214,9 @@ void free_rmid(u32 rmid) ...@@ -214,9 +214,9 @@ void free_rmid(u32 rmid)
list_add_tail(&entry->list, &rmid_free_lru); list_add_tail(&entry->list, &rmid_free_lru);
} }
static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr) static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
{ {
u64 shift = 64 - MBM_CNTR_WIDTH, chunks; u64 shift = 64 - width, chunks;
chunks = (cur_msr << shift) - (prev_msr << shift); chunks = (cur_msr << shift) - (prev_msr << shift);
return chunks >>= shift; return chunks >>= shift;
...@@ -256,7 +256,7 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr) ...@@ -256,7 +256,7 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
return 0; return 0;
} }
chunks = mbm_overflow_count(m->prev_msr, tval); chunks = mbm_overflow_count(m->prev_msr, tval, rr->r->mbm_width);
m->chunks += chunks; m->chunks += chunks;
m->prev_msr = tval; m->prev_msr = tval;
...@@ -278,7 +278,7 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr) ...@@ -278,7 +278,7 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
return; return;
chunks = mbm_overflow_count(m->prev_bw_msr, tval); chunks = mbm_overflow_count(m->prev_bw_msr, tval, rr->r->mbm_width);
m->chunks_bw += chunks; m->chunks_bw += chunks;
m->chunks = m->chunks_bw; m->chunks = m->chunks_bw;
cur_bw = (chunks * r->mon_scale) >> 20; cur_bw = (chunks * r->mon_scale) >> 20;
...@@ -433,11 +433,12 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) ...@@ -433,11 +433,12 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
} }
} }
static void mbm_update(struct rdt_domain *d, int rmid) static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid)
{ {
struct rmid_read rr; struct rmid_read rr;
rr.first = false; rr.first = false;
rr.r = r;
rr.d = d; rr.d = d;
/* /*
...@@ -510,6 +511,7 @@ void mbm_handle_overflow(struct work_struct *work) ...@@ -510,6 +511,7 @@ void mbm_handle_overflow(struct work_struct *work)
struct rdtgroup *prgrp, *crgrp; struct rdtgroup *prgrp, *crgrp;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct list_head *head; struct list_head *head;
struct rdt_resource *r;
struct rdt_domain *d; struct rdt_domain *d;
mutex_lock(&rdtgroup_mutex); mutex_lock(&rdtgroup_mutex);
...@@ -517,16 +519,18 @@ void mbm_handle_overflow(struct work_struct *work) ...@@ -517,16 +519,18 @@ void mbm_handle_overflow(struct work_struct *work)
if (!static_branch_likely(&rdt_mon_enable_key)) if (!static_branch_likely(&rdt_mon_enable_key))
goto out_unlock; goto out_unlock;
d = get_domain_from_cpu(cpu, &rdt_resources_all[RDT_RESOURCE_L3]); r = &rdt_resources_all[RDT_RESOURCE_L3];
d = get_domain_from_cpu(cpu, r);
if (!d) if (!d)
goto out_unlock; goto out_unlock;
list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
mbm_update(d, prgrp->mon.rmid); mbm_update(r, d, prgrp->mon.rmid);
head = &prgrp->mon.crdtgrp_list; head = &prgrp->mon.crdtgrp_list;
list_for_each_entry(crgrp, head, mon.crdtgrp_list) list_for_each_entry(crgrp, head, mon.crdtgrp_list)
mbm_update(d, crgrp->mon.rmid); mbm_update(r, d, crgrp->mon.rmid);
if (is_mba_sc(NULL)) if (is_mba_sc(NULL))
update_mba_bw(prgrp, d); update_mba_bw(prgrp, d);
...@@ -614,11 +618,18 @@ static void l3_mon_evt_init(struct rdt_resource *r) ...@@ -614,11 +618,18 @@ static void l3_mon_evt_init(struct rdt_resource *r)
int rdt_get_mon_l3_config(struct rdt_resource *r) int rdt_get_mon_l3_config(struct rdt_resource *r)
{ {
unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset;
unsigned int cl_size = boot_cpu_data.x86_cache_size; unsigned int cl_size = boot_cpu_data.x86_cache_size;
int ret; int ret;
r->mon_scale = boot_cpu_data.x86_cache_occ_scale; r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1; r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
r->mbm_width = MBM_CNTR_WIDTH_BASE;
if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX)
r->mbm_width += mbm_offset;
else if (mbm_offset > MBM_CNTR_WIDTH_OFFSET_MAX)
pr_warn("Ignoring impossible MBM counter offset\n");
/* /*
* A reasonable upper limit on the max threshold is the number * A reasonable upper limit on the max threshold is the number
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/intel-family.h> #include <asm/intel-family.h>
#include <asm/resctrl_sched.h> #include <asm/resctrl.h>
#include <asm/perf_event.h> #include <asm/perf_event.h>
#include "../../events/perf_event.h" /* For X86_CONFIG() */ #include "../../events/perf_event.h" /* For X86_CONFIG() */
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include <uapi/linux/magic.h> #include <uapi/linux/magic.h>
#include <asm/resctrl_sched.h> #include <asm/resctrl.h>
#include "internal.h" #include "internal.h"
DEFINE_STATIC_KEY_FALSE(rdt_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
...@@ -2472,7 +2472,7 @@ static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, ...@@ -2472,7 +2472,7 @@ static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
goto out_destroy; goto out_destroy;
if (is_mbm_event(mevt->evtid)) if (is_mbm_event(mevt->evtid))
mon_event_read(&rr, d, prgrp, mevt->evtid, true); mon_event_read(&rr, r, d, prgrp, mevt->evtid, true);
} }
kernfs_activate(kn); kernfs_activate(kn);
return 0; return 0;
......
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/vm86.h> #include <asm/vm86.h>
#include <asm/resctrl_sched.h> #include <asm/resctrl.h>
#include <asm/proto.h> #include <asm/proto.h>
#include "process.h" #include "process.h"
......
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/resctrl_sched.h> #include <asm/resctrl.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/fsgsbase.h> #include <asm/fsgsbase.h>
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment