Commit 64f996f6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: cpu_init(): fix memory leak when using CPU hotplug
  x86: pda_init(): fix memory leak when using CPU hotplug
  x86, xen: Use native_pte_flags instead of native_pte_val for .pte_flags
  x86: move mtrr cpu cap setting early in early_init_xxxx
  x86: delay early cpu initialization until cpuid is done
  x86: use X86_FEATURE_NOPL in alternatives
  x86: add NOPL as a synthetic CPU feature bit
  x86: boot: stub out unimplemented CPU feature words
parents f5325225 23952a96
...@@ -38,12 +38,12 @@ static const u32 req_flags[NCAPINTS] = ...@@ -38,12 +38,12 @@ static const u32 req_flags[NCAPINTS] =
{ {
REQUIRED_MASK0, REQUIRED_MASK0,
REQUIRED_MASK1, REQUIRED_MASK1,
REQUIRED_MASK2, 0, /* REQUIRED_MASK2 not implemented in this file */
REQUIRED_MASK3, 0, /* REQUIRED_MASK3 not implemented in this file */
REQUIRED_MASK4, REQUIRED_MASK4,
REQUIRED_MASK5, 0, /* REQUIRED_MASK5 not implemented in this file */
REQUIRED_MASK6, REQUIRED_MASK6,
REQUIRED_MASK7, 0, /* REQUIRED_MASK7 not implemented in this file */
}; };
#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
......
...@@ -145,35 +145,25 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { ...@@ -145,35 +145,25 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = {
extern char __vsyscall_0; extern char __vsyscall_0;
const unsigned char *const *find_nop_table(void) const unsigned char *const *find_nop_table(void)
{ {
return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 < 6 ? k8_nops : p6_nops; boot_cpu_has(X86_FEATURE_NOPL))
return p6_nops;
else
return k8_nops;
} }
#else /* CONFIG_X86_64 */ #else /* CONFIG_X86_64 */
static const struct nop {
int cpuid;
const unsigned char *const *noptable;
} noptypes[] = {
{ X86_FEATURE_K8, k8_nops },
{ X86_FEATURE_K7, k7_nops },
{ X86_FEATURE_P4, p6_nops },
{ X86_FEATURE_P3, p6_nops },
{ -1, NULL }
};
const unsigned char *const *find_nop_table(void) const unsigned char *const *find_nop_table(void)
{ {
const unsigned char *const *noptable = intel_nops; if (boot_cpu_has(X86_FEATURE_K8))
int i; return k8_nops;
else if (boot_cpu_has(X86_FEATURE_K7))
for (i = 0; noptypes[i].cpuid >= 0; i++) { return k7_nops;
if (boot_cpu_has(noptypes[i].cpuid)) { else if (boot_cpu_has(X86_FEATURE_NOPL))
noptable = noptypes[i].noptable; return p6_nops;
break; else
} return intel_nops;
}
return noptable;
} }
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
......
...@@ -31,6 +31,11 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) ...@@ -31,6 +31,11 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
if (c->x86_power & (1<<8)) if (c->x86_power & (1<<8))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
} }
/* Set MTRR capability flag if appropriate */
if (c->x86_model == 13 || c->x86_model == 9 ||
(c->x86_model == 8 && c->x86_mask >= 8))
set_cpu_cap(c, X86_FEATURE_K6_MTRR);
} }
static void __cpuinit init_amd(struct cpuinfo_x86 *c) static void __cpuinit init_amd(struct cpuinfo_x86 *c)
...@@ -166,10 +171,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) ...@@ -166,10 +171,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
mbytes); mbytes);
} }
/* Set MTRR capability flag if appropriate */
if (c->x86_model == 13 || c->x86_model == 9 ||
(c->x86_model == 8 && c->x86_mask >= 8))
set_cpu_cap(c, X86_FEATURE_K6_MTRR);
break; break;
} }
......
...@@ -314,6 +314,16 @@ enum { ...@@ -314,6 +314,16 @@ enum {
EAMD3D = 1<<20, EAMD3D = 1<<20,
}; };
static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
{
switch (c->x86) {
case 5:
/* Emulate MTRRs using Centaur's MCR. */
set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
break;
}
}
static void __cpuinit init_centaur(struct cpuinfo_x86 *c) static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
{ {
...@@ -462,6 +472,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) ...@@ -462,6 +472,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
static struct cpu_dev centaur_cpu_dev __cpuinitdata = { static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
.c_vendor = "Centaur", .c_vendor = "Centaur",
.c_ident = { "CentaurHauls" }, .c_ident = { "CentaurHauls" },
.c_early_init = early_init_centaur,
.c_init = init_centaur, .c_init = init_centaur,
.c_size_cache = centaur_size_cache, .c_size_cache = centaur_size_cache,
}; };
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/pat.h> #include <asm/pat.h>
#include <asm/asm.h>
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h> #include <asm/mpspec.h>
#include <asm/apic.h> #include <asm/apic.h>
...@@ -334,11 +335,40 @@ static void __init early_cpu_detect(void) ...@@ -334,11 +335,40 @@ static void __init early_cpu_detect(void)
get_cpu_vendor(c, 1); get_cpu_vendor(c, 1);
early_get_cap(c);
if (c->x86_vendor != X86_VENDOR_UNKNOWN && if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
cpu_devs[c->x86_vendor]->c_early_init) cpu_devs[c->x86_vendor]->c_early_init)
cpu_devs[c->x86_vendor]->c_early_init(c); cpu_devs[c->x86_vendor]->c_early_init(c);
}
early_get_cap(c); /*
* The NOPL instruction is supposed to exist on all CPUs with
* family >= 6, unfortunately, that's not true in practice because
* of early VIA chips and (more importantly) broken virtualizers that
* are not easy to detect. Hence, probe for it based on first
* principles.
*/
static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
{
const u32 nopl_signature = 0x888c53b1; /* Random number */
u32 has_nopl = nopl_signature;
clear_cpu_cap(c, X86_FEATURE_NOPL);
if (c->x86 >= 6) {
asm volatile("\n"
"1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
"2:\n"
" .section .fixup,\"ax\"\n"
"3: xor %0,%0\n"
" jmp 2b\n"
" .previous\n"
_ASM_EXTABLE(1b,3b)
: "+a" (has_nopl));
if (has_nopl == nopl_signature)
set_cpu_cap(c, X86_FEATURE_NOPL);
}
} }
static void __cpuinit generic_identify(struct cpuinfo_x86 *c) static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
...@@ -395,8 +425,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) ...@@ -395,8 +425,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
} }
init_scattered_cpuid_features(c); init_scattered_cpuid_features(c);
detect_nopl(c);
} }
} }
static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/pat.h> #include <asm/pat.h>
#include <asm/asm.h>
#include <asm/numa.h> #include <asm/numa.h>
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h> #include <asm/mpspec.h>
...@@ -215,6 +216,39 @@ static void __init early_cpu_support_print(void) ...@@ -215,6 +216,39 @@ static void __init early_cpu_support_print(void)
} }
} }
/*
* The NOPL instruction is supposed to exist on all CPUs with
* family >= 6, unfortunately, that's not true in practice because
* of early VIA chips and (more importantly) broken virtualizers that
* are not easy to detect. Hence, probe for it based on first
* principles.
*
* Note: no 64-bit chip is known to lack these, but put the code here
* for consistency with 32 bits, and to make it utterly trivial to
* diagnose the problem should it ever surface.
*/
static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
{
const u32 nopl_signature = 0x888c53b1; /* Random number */
u32 has_nopl = nopl_signature;
clear_cpu_cap(c, X86_FEATURE_NOPL);
if (c->x86 >= 6) {
asm volatile("\n"
"1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
"2:\n"
" .section .fixup,\"ax\"\n"
"3: xor %0,%0\n"
" jmp 2b\n"
" .previous\n"
_ASM_EXTABLE(1b,3b)
: "+a" (has_nopl));
if (has_nopl == nopl_signature)
set_cpu_cap(c, X86_FEATURE_NOPL);
}
}
static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
void __init early_cpu_init(void) void __init early_cpu_init(void)
...@@ -313,6 +347,8 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) ...@@ -313,6 +347,8 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
c->x86_phys_bits = eax & 0xff; c->x86_phys_bits = eax & 0xff;
} }
detect_nopl(c);
if (c->x86_vendor != X86_VENDOR_UNKNOWN && if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
cpu_devs[c->x86_vendor]->c_early_init) cpu_devs[c->x86_vendor]->c_early_init)
cpu_devs[c->x86_vendor]->c_early_init(c); cpu_devs[c->x86_vendor]->c_early_init(c);
...@@ -493,17 +529,20 @@ void pda_init(int cpu) ...@@ -493,17 +529,20 @@ void pda_init(int cpu)
/* others are initialized in smpboot.c */ /* others are initialized in smpboot.c */
pda->pcurrent = &init_task; pda->pcurrent = &init_task;
pda->irqstackptr = boot_cpu_stack; pda->irqstackptr = boot_cpu_stack;
pda->irqstackptr += IRQSTACKSIZE - 64;
} else { } else {
pda->irqstackptr = (char *) if (!pda->irqstackptr) {
__get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); pda->irqstackptr = (char *)
if (!pda->irqstackptr) __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
panic("cannot allocate irqstack for cpu %d", cpu); if (!pda->irqstackptr)
panic("cannot allocate irqstack for cpu %d",
cpu);
pda->irqstackptr += IRQSTACKSIZE - 64;
}
if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
pda->nodenumber = cpu_to_node(cpu); pda->nodenumber = cpu_to_node(cpu);
} }
pda->irqstackptr += IRQSTACKSIZE-64;
} }
char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
...@@ -601,19 +640,22 @@ void __cpuinit cpu_init(void) ...@@ -601,19 +640,22 @@ void __cpuinit cpu_init(void)
/* /*
* set up and load the per-CPU TSS * set up and load the per-CPU TSS
*/ */
for (v = 0; v < N_EXCEPTION_STACKS; v++) { if (!orig_ist->ist[0]) {
static const unsigned int order[N_EXCEPTION_STACKS] = { static const unsigned int order[N_EXCEPTION_STACKS] = {
[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
[DEBUG_STACK - 1] = DEBUG_STACK_ORDER [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
}; };
if (cpu) { for (v = 0; v < N_EXCEPTION_STACKS; v++) {
estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); if (cpu) {
if (!estacks) estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
panic("Cannot allocate exception stack %ld %d\n", if (!estacks)
v, cpu); panic("Cannot allocate exception "
"stack %ld %d\n", v, cpu);
}
estacks += PAGE_SIZE << order[v];
orig_ist->ist[v] = t->x86_tss.ist[v] =
(unsigned long)estacks;
} }
estacks += PAGE_SIZE << order[v];
orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks;
} }
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
......
...@@ -15,13 +15,11 @@ ...@@ -15,13 +15,11 @@
/* /*
* Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
*/ */
static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
{ {
unsigned char ccr2, ccr3; unsigned char ccr2, ccr3;
unsigned long flags;
/* we test for DEVID by checking whether CCR3 is writable */ /* we test for DEVID by checking whether CCR3 is writable */
local_irq_save(flags);
ccr3 = getCx86(CX86_CCR3); ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, ccr3 ^ 0x80); setCx86(CX86_CCR3, ccr3 ^ 0x80);
getCx86(0xc0); /* dummy to change bus */ getCx86(0xc0); /* dummy to change bus */
...@@ -44,9 +42,16 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) ...@@ -44,9 +42,16 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
*dir0 = getCx86(CX86_DIR0); *dir0 = getCx86(CX86_DIR0);
*dir1 = getCx86(CX86_DIR1); *dir1 = getCx86(CX86_DIR1);
} }
local_irq_restore(flags);
} }
static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
{
unsigned long flags;
local_irq_save(flags);
__do_cyrix_devid(dir0, dir1);
local_irq_restore(flags);
}
/* /*
* Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
* order to identify the Cyrix CPU model after we're out of setup.c * order to identify the Cyrix CPU model after we're out of setup.c
...@@ -161,6 +166,24 @@ static void __cpuinit geode_configure(void) ...@@ -161,6 +166,24 @@ static void __cpuinit geode_configure(void)
local_irq_restore(flags); local_irq_restore(flags);
} }
static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c)
{
unsigned char dir0, dir0_msn, dir1 = 0;
__do_cyrix_devid(&dir0, &dir1);
dir0_msn = dir0 >> 4; /* identifies CPU "family" */
switch (dir0_msn) {
case 3: /* 6x86/6x86L */
/* Emulate MTRRs using Cyrix's ARRs. */
set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
break;
case 5: /* 6x86MX/M II */
/* Emulate MTRRs using Cyrix's ARRs. */
set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
break;
}
}
static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
{ {
...@@ -416,6 +439,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) ...@@ -416,6 +439,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
.c_vendor = "Cyrix", .c_vendor = "Cyrix",
.c_ident = { "CyrixInstead" }, .c_ident = { "CyrixInstead" },
.c_early_init = early_init_cyrix,
.c_init = init_cyrix, .c_init = init_cyrix,
.c_identify = cyrix_identify, .c_identify = cyrix_identify,
}; };
......
...@@ -39,7 +39,8 @@ const char * const x86_cap_flags[NCAPINTS*32] = { ...@@ -39,7 +39,8 @@ const char * const x86_cap_flags[NCAPINTS*32] = {
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
"constant_tsc", "up", NULL, "arch_perfmon", "constant_tsc", "up", NULL, "arch_perfmon",
"pebs", "bts", NULL, NULL, "pebs", "bts", NULL, NULL,
"rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL, "rep_good", NULL, NULL, NULL,
"nopl", NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* Intel-defined (#2) */ /* Intel-defined (#2) */
......
...@@ -1324,7 +1324,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { ...@@ -1324,7 +1324,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
.ptep_modify_prot_commit = __ptep_modify_prot_commit, .ptep_modify_prot_commit = __ptep_modify_prot_commit,
.pte_val = xen_pte_val, .pte_val = xen_pte_val,
.pte_flags = native_pte_val, .pte_flags = native_pte_flags,
.pgd_val = xen_pgd_val, .pgd_val = xen_pgd_val,
.make_pte = xen_make_pte, .make_pte = xen_make_pte,
......
...@@ -72,14 +72,15 @@ ...@@ -72,14 +72,15 @@
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
#define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */ #define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */
#define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */ #define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */
#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */
#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */
#define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
......
...@@ -41,6 +41,12 @@ ...@@ -41,6 +41,12 @@
# define NEED_3DNOW 0 # define NEED_3DNOW 0
#endif #endif
#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64)
# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31))
#else
# define NEED_NOPL 0
#endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define NEED_PSE 0 #define NEED_PSE 0
#define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) #define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
...@@ -67,7 +73,7 @@ ...@@ -67,7 +73,7 @@
#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) #define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
#define REQUIRED_MASK2 0 #define REQUIRED_MASK2 0
#define REQUIRED_MASK3 0 #define REQUIRED_MASK3 (NEED_NOPL)
#define REQUIRED_MASK4 0 #define REQUIRED_MASK4 0
#define REQUIRED_MASK5 0 #define REQUIRED_MASK5 0
#define REQUIRED_MASK6 0 #define REQUIRED_MASK6 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment