Commit b3727c24 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: Print the hypervisor returned tsc_khz during boot
  x86: Correct segment permission flags in 64-bit linker script
  x86: cpuinit-annotate SMP boot trampolines properly
  x86: Increase timeout for EHCI debug port reset completion in early printk
  x86: Fix uaccess_32.h typo
  x86: Trivial whitespace cleanups
  x86, apic: Fix missed handling of discrete apics
  x86/i386: Remove duplicated #include
  x86, mtrr: Convert loop to a while based construct, avoid naked semicolon
  Revert 'x86: Fix system crash when loading with "reservetop" parameter'
  x86, mce: Fix compile warning in case of CONFIG_SMP=n
  x86, apic: Use logical flat on intel with <= 8 logical cpus
  x86: SGI UV: Map MMIO-High memory range
  x86: SGI UV: Add volatile semantics to macros that access chipset registers
  x86: SGI UV: Fix IPI macros
  x86: apic: Convert BUG() to BUG_ON()
  x86: Remove final bits of CONFIG_X86_OLD_MCE
parents 58e75a09 6399c087
...@@ -65,6 +65,19 @@ static inline void default_inquire_remote_apic(int apicid) ...@@ -65,6 +65,19 @@ static inline void default_inquire_remote_apic(int apicid)
__inquire_remote_apic(apicid); __inquire_remote_apic(apicid);
} }
/*
* With 82489DX we can't rely on apic feature bit
* retrieved via cpuid but still have to deal with
* such an apic chip so we assume that SMP configuration
* is found from MP table (64bit case uses ACPI mostly
* which set smp presence flag as well so we are safe
* to use this helper too).
*/
static inline bool apic_from_smp_config(void)
{
return smp_found_config && !disable_apic;
}
/* /*
* Basic functions accessing APICs. * Basic functions accessing APICs.
*/ */
......
...@@ -65,7 +65,6 @@ static __always_inline void *__constant_memcpy(void *to, const void *from, ...@@ -65,7 +65,6 @@ static __always_inline void *__constant_memcpy(void *to, const void *from,
case 4: case 4:
*(int *)to = *(int *)from; *(int *)to = *(int *)from;
return to; return to;
case 3: case 3:
*(short *)to = *(short *)from; *(short *)to = *(short *)from;
*((char *)to + 2) = *((char *)from + 2); *((char *)to + 2) = *((char *)from + 2);
......
...@@ -33,7 +33,7 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero ...@@ -33,7 +33,7 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
* Copy data from kernel space to user space. Caller must check * Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function. * the specified block with access_ok() before calling this function.
* The caller should also make sure he pins the user space address * The caller should also make sure he pins the user space address
* so that the we don't result in page fault and sleep. * so that we don't result in page fault and sleep.
* *
* Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
* we return the initial request size (1, 2 or 4), as copy_*_user should do. * we return the initial request size (1, 2 or 4), as copy_*_user should do.
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/numa.h> #include <linux/numa.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/io.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/uv/uv_mmrs.h> #include <asm/uv/uv_mmrs.h>
...@@ -258,13 +259,13 @@ static inline unsigned long *uv_global_mmr32_address(int pnode, ...@@ -258,13 +259,13 @@ static inline unsigned long *uv_global_mmr32_address(int pnode,
static inline void uv_write_global_mmr32(int pnode, unsigned long offset, static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
unsigned long val) unsigned long val)
{ {
*uv_global_mmr32_address(pnode, offset) = val; writeq(val, uv_global_mmr32_address(pnode, offset));
} }
static inline unsigned long uv_read_global_mmr32(int pnode, static inline unsigned long uv_read_global_mmr32(int pnode,
unsigned long offset) unsigned long offset)
{ {
return *uv_global_mmr32_address(pnode, offset); return readq(uv_global_mmr32_address(pnode, offset));
} }
/* /*
...@@ -281,13 +282,13 @@ static inline unsigned long *uv_global_mmr64_address(int pnode, ...@@ -281,13 +282,13 @@ static inline unsigned long *uv_global_mmr64_address(int pnode,
static inline void uv_write_global_mmr64(int pnode, unsigned long offset, static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
unsigned long val) unsigned long val)
{ {
*uv_global_mmr64_address(pnode, offset) = val; writeq(val, uv_global_mmr64_address(pnode, offset));
} }
static inline unsigned long uv_read_global_mmr64(int pnode, static inline unsigned long uv_read_global_mmr64(int pnode,
unsigned long offset) unsigned long offset)
{ {
return *uv_global_mmr64_address(pnode, offset); return readq(uv_global_mmr64_address(pnode, offset));
} }
/* /*
...@@ -301,22 +302,22 @@ static inline unsigned long *uv_local_mmr_address(unsigned long offset) ...@@ -301,22 +302,22 @@ static inline unsigned long *uv_local_mmr_address(unsigned long offset)
static inline unsigned long uv_read_local_mmr(unsigned long offset) static inline unsigned long uv_read_local_mmr(unsigned long offset)
{ {
return *uv_local_mmr_address(offset); return readq(uv_local_mmr_address(offset));
} }
static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
{ {
*uv_local_mmr_address(offset) = val; writeq(val, uv_local_mmr_address(offset));
} }
static inline unsigned char uv_read_local_mmr8(unsigned long offset) static inline unsigned char uv_read_local_mmr8(unsigned long offset)
{ {
return *((unsigned char *)uv_local_mmr_address(offset)); return readb(uv_local_mmr_address(offset));
} }
static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val) static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val)
{ {
*((unsigned char *)uv_local_mmr_address(offset)) = val; writeb(val, uv_local_mmr_address(offset));
} }
/* /*
...@@ -422,7 +423,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) ...@@ -422,7 +423,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
unsigned long val; unsigned long val;
val = (1UL << UVH_IPI_INT_SEND_SHFT) | val = (1UL << UVH_IPI_INT_SEND_SHFT) |
((apicid & 0x3f) << UVH_IPI_INT_APIC_ID_SHFT) | ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
(vector << UVH_IPI_INT_VECTOR_SHFT); (vector << UVH_IPI_INT_VECTOR_SHFT);
uv_write_global_mmr64(pnode, UVH_IPI_INT, val); uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
} }
......
...@@ -62,7 +62,7 @@ unsigned int boot_cpu_physical_apicid = -1U; ...@@ -62,7 +62,7 @@ unsigned int boot_cpu_physical_apicid = -1U;
/* /*
* The highest APIC ID seen during enumeration. * The highest APIC ID seen during enumeration.
* *
* This determines the messaging protocol we can use: if all APIC IDs * On AMD, this determines the messaging protocol we can use: if all APIC IDs
* are in the 0 ... 7 range, then we can use logical addressing which * are in the 0 ... 7 range, then we can use logical addressing which
* has some performance advantages (better broadcasting). * has some performance advantages (better broadcasting).
* *
...@@ -979,7 +979,7 @@ void lapic_shutdown(void) ...@@ -979,7 +979,7 @@ void lapic_shutdown(void)
{ {
unsigned long flags; unsigned long flags;
if (!cpu_has_apic) if (!cpu_has_apic && !apic_from_smp_config())
return; return;
local_irq_save(flags); local_irq_save(flags);
...@@ -1197,8 +1197,7 @@ void __cpuinit setup_local_APIC(void) ...@@ -1197,8 +1197,7 @@ void __cpuinit setup_local_APIC(void)
* Double-check whether this APIC is really registered. * Double-check whether this APIC is really registered.
* This is meaningless in clustered apic mode, so we skip it. * This is meaningless in clustered apic mode, so we skip it.
*/ */
if (!apic->apic_id_registered()) BUG_ON(!apic->apic_id_registered());
BUG();
/* /*
* Intel recommends to set DFR, LDR and TPR before enabling * Intel recommends to set DFR, LDR and TPR before enabling
...@@ -1917,25 +1916,15 @@ void __cpuinit generic_processor_info(int apicid, int version) ...@@ -1917,25 +1916,15 @@ void __cpuinit generic_processor_info(int apicid, int version)
max_physical_apicid = apicid; max_physical_apicid = apicid;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/*
* Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
* but we need to work other dependencies like SMP_SUSPEND etc
* before this can be done without some confusion.
* if (CPU_HOTPLUG_ENABLED || num_processors > 8)
* - Ashok Raj <ashok.raj@intel.com>
*/
if (max_physical_apicid >= 8) {
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
if (!APIC_XAPIC(version)) { if (num_processors > 8)
def_to_bigsmp = 0; def_to_bigsmp = 1;
break; break;
}
/* If P4 and above fall through */
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
if (max_physical_apicid >= 8)
def_to_bigsmp = 1; def_to_bigsmp = 1;
} }
}
#endif #endif
#if defined(CONFIG_SMP) || defined(CONFIG_X86_64) #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
......
...@@ -1874,7 +1874,7 @@ __apicdebuginit(int) print_all_ICs(void) ...@@ -1874,7 +1874,7 @@ __apicdebuginit(int) print_all_ICs(void)
print_PIC(); print_PIC();
/* don't print out if apic is not there */ /* don't print out if apic is not there */
if (!cpu_has_apic || disable_apic) if (!cpu_has_apic && !apic_from_smp_config())
return 0; return 0;
print_all_local_APICs(); print_all_local_APICs();
...@@ -1999,7 +1999,7 @@ void disable_IO_APIC(void) ...@@ -1999,7 +1999,7 @@ void disable_IO_APIC(void)
/* /*
* Use virtual wire A mode when interrupt remapping is enabled. * Use virtual wire A mode when interrupt remapping is enabled.
*/ */
if (cpu_has_apic) if (cpu_has_apic || apic_from_smp_config())
disconnect_bsp_APIC(!intr_remapping_enabled && disconnect_bsp_APIC(!intr_remapping_enabled &&
ioapic_i8259.pin != -1); ioapic_i8259.pin != -1);
} }
......
...@@ -64,15 +64,22 @@ void __init default_setup_apic_routing(void) ...@@ -64,15 +64,22 @@ void __init default_setup_apic_routing(void)
apic = &apic_x2apic_phys; apic = &apic_x2apic_phys;
else else
apic = &apic_x2apic_cluster; apic = &apic_x2apic_cluster;
printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
} }
#endif #endif
if (apic == &apic_flat) { if (apic == &apic_flat) {
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
if (num_processors > 8)
apic = &apic_physflat;
break;
case X86_VENDOR_AMD:
if (max_physical_apicid >= 8) if (max_physical_apicid >= 8)
apic = &apic_physflat; apic = &apic_physflat;
printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
} }
}
printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
if (is_vsmp_box()) { if (is_vsmp_box()) {
/* need to update phys_pkg_id */ /* need to update phys_pkg_id */
......
...@@ -389,6 +389,16 @@ static __init void map_gru_high(int max_pnode) ...@@ -389,6 +389,16 @@ static __init void map_gru_high(int max_pnode)
map_high("GRU", gru.s.base, shift, max_pnode, map_wb); map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
} }
static __init void map_mmr_high(int max_pnode)
{
union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
if (mmr.s.enable)
map_high("MMR", mmr.s.base, shift, max_pnode, map_uc);
}
static __init void map_mmioh_high(int max_pnode) static __init void map_mmioh_high(int max_pnode)
{ {
union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
...@@ -643,6 +653,7 @@ void __init uv_system_init(void) ...@@ -643,6 +653,7 @@ void __init uv_system_init(void)
} }
map_gru_high(max_pnode); map_gru_high(max_pnode);
map_mmr_high(max_pnode);
map_mmioh_high(max_pnode); map_mmioh_high(max_pnode);
uv_cpu_init(); uv_cpu_init();
......
...@@ -489,8 +489,9 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) ...@@ -489,8 +489,9 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
int i, err = 0; int i, err = 0;
struct threshold_bank *b = NULL; struct threshold_bank *b = NULL;
char name[32]; char name[32];
#ifdef CONFIG_SMP
struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu);
#endif
sprintf(name, "threshold_bank%i", bank); sprintf(name, "threshold_bank%i", bank);
......
...@@ -126,8 +126,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) ...@@ -126,8 +126,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
return -EINVAL; return -EINVAL;
base = simple_strtoull(line + 5, &ptr, 0); base = simple_strtoull(line + 5, &ptr, 0);
for (; isspace(*ptr); ++ptr) while (isspace(*ptr))
; ptr++;
if (strncmp(ptr, "size=", 5)) if (strncmp(ptr, "size=", 5))
return -EINVAL; return -EINVAL;
...@@ -135,14 +135,14 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) ...@@ -135,14 +135,14 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
size = simple_strtoull(ptr + 5, &ptr, 0); size = simple_strtoull(ptr + 5, &ptr, 0);
if ((base & 0xfff) || (size & 0xfff)) if ((base & 0xfff) || (size & 0xfff))
return -EINVAL; return -EINVAL;
for (; isspace(*ptr); ++ptr) while (isspace(*ptr))
; ptr++;
if (strncmp(ptr, "type=", 5)) if (strncmp(ptr, "type=", 5))
return -EINVAL; return -EINVAL;
ptr += 5; ptr += 5;
for (; isspace(*ptr); ++ptr) while (isspace(*ptr))
; ptr++;
for (i = 0; i < MTRR_NUM_TYPES; ++i) { for (i = 0; i < MTRR_NUM_TYPES; ++i) {
if (strcmp(ptr, mtrr_strings[i])) if (strcmp(ptr, mtrr_strings[i]))
......
...@@ -58,6 +58,9 @@ static unsigned long vmware_get_tsc_khz(void) ...@@ -58,6 +58,9 @@ static unsigned long vmware_get_tsc_khz(void)
tsc_hz = eax | (((uint64_t)ebx) << 32); tsc_hz = eax | (((uint64_t)ebx) << 32);
do_div(tsc_hz, 1000); do_div(tsc_hz, 1000);
BUG_ON(tsc_hz >> 32); BUG_ON(tsc_hz >> 32);
printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n",
(unsigned long) tsc_hz / 1000,
(unsigned long) tsc_hz % 1000);
return tsc_hz; return tsc_hz;
} }
...@@ -69,6 +72,9 @@ void __init vmware_platform_setup(void) ...@@ -69,6 +72,9 @@ void __init vmware_platform_setup(void)
if (ebx != UINT_MAX) if (ebx != UINT_MAX)
x86_platform.calibrate_tsc = vmware_get_tsc_khz; x86_platform.calibrate_tsc = vmware_get_tsc_khz;
else
printk(KERN_WARNING
"Failed to get TSC freq from the hypervisor\n");
} }
/* /*
......
...@@ -624,7 +624,7 @@ static int __init ehci_setup(void) ...@@ -624,7 +624,7 @@ static int __init ehci_setup(void)
return -1; return -1;
} }
loop = 10; loop = 100000;
/* Reset the EHCI controller */ /* Reset the EHCI controller */
cmd = readl(&ehci_regs->command); cmd = readl(&ehci_regs->command);
cmd |= CMD_RESET; cmd |= CMD_RESET;
......
...@@ -697,21 +697,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -697,21 +697,6 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "Command line: %s\n", boot_command_line); printk(KERN_INFO "Command line: %s\n", boot_command_line);
#endif #endif
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
#ifdef CONFIG_X86_64
/*
* Must call this twice: Once just to detect whether hardware doesn't
* support NX (so that the early EHCI debug console setup can safely
* call set_fixmap(), and then again after parsing early parameters to
* honor the respective command line option.
*/
check_efer();
#endif
parse_early_param();
/* VMI may relocate the fixmap; do this before touching ioremap area */ /* VMI may relocate the fixmap; do this before touching ioremap area */
vmi_init(); vmi_init();
...@@ -794,6 +779,21 @@ void __init setup_arch(char **cmdline_p) ...@@ -794,6 +779,21 @@ void __init setup_arch(char **cmdline_p)
#endif #endif
#endif #endif
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
#ifdef CONFIG_X86_64
/*
* Must call this twice: Once just to detect whether hardware doesn't
* support NX (so that the early EHCI debug console setup can safely
* call set_fixmap(), and then again after parsing early parameters to
* honor the respective command line option.
*/
check_efer();
#endif
parse_early_param();
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
check_efer(); check_efer();
#endif #endif
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#include <asm/e820.h> #include <asm/e820.h>
/* ready for x86_64 and x86 */ /* ready for x86_64 and x86 */
unsigned char *trampoline_base = __va(TRAMPOLINE_BASE); unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE);
void __init reserve_trampoline_memory(void) void __init reserve_trampoline_memory(void)
{ {
...@@ -26,7 +26,7 @@ void __init reserve_trampoline_memory(void) ...@@ -26,7 +26,7 @@ void __init reserve_trampoline_memory(void)
* bootstrap into the page concerned. The caller * bootstrap into the page concerned. The caller
* has made sure it's suitably aligned. * has made sure it's suitably aligned.
*/ */
unsigned long setup_trampoline(void) unsigned long __cpuinit setup_trampoline(void)
{ {
memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
return virt_to_phys(trampoline_base); return virt_to_phys(trampoline_base);
......
...@@ -28,16 +28,12 @@ ...@@ -28,16 +28,12 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/page_types.h> #include <asm/page_types.h>
/* We can free up trampoline after bootup if cpu hotplug is not supported. */ /* We can free up trampoline after bootup if cpu hotplug is not supported. */
#ifndef CONFIG_HOTPLUG_CPU __CPUINITRODATA
.section ".cpuinit.data","aw",@progbits
#else
.section .rodata,"a",@progbits
#endif
.code16 .code16
ENTRY(trampoline_data) ENTRY(trampoline_data)
......
...@@ -25,14 +25,15 @@ ...@@ -25,14 +25,15 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h>
#include <asm/pgtable_types.h> #include <asm/pgtable_types.h>
#include <asm/page_types.h> #include <asm/page_types.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
.section .rodata, "a", @progbits /* We can free up the trampoline after bootup if cpu hotplug is not supported. */
__CPUINITRODATA
.code16 .code16
ENTRY(trampoline_data) ENTRY(trampoline_data)
......
...@@ -45,9 +45,9 @@ PHDRS { ...@@ -45,9 +45,9 @@ PHDRS {
text PT_LOAD FLAGS(5); /* R_E */ text PT_LOAD FLAGS(5); /* R_E */
data PT_LOAD FLAGS(7); /* RWE */ data PT_LOAD FLAGS(7); /* RWE */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
user PT_LOAD FLAGS(7); /* RWE */ user PT_LOAD FLAGS(5); /* R_E */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
percpu PT_LOAD FLAGS(7); /* RWE */ percpu PT_LOAD FLAGS(6); /* RW_ */
#endif #endif
init PT_LOAD FLAGS(7); /* RWE */ init PT_LOAD FLAGS(7); /* RWE */
#endif #endif
......
...@@ -243,10 +243,6 @@ static void __restore_processor_state(struct saved_context *ctxt) ...@@ -243,10 +243,6 @@ static void __restore_processor_state(struct saved_context *ctxt)
do_fpu_end(); do_fpu_end();
mtrr_bp_restore(); mtrr_bp_restore();
#ifdef CONFIG_X86_OLD_MCE
mcheck_init(&boot_cpu_data);
#endif
} }
/* Needed by apm.c */ /* Needed by apm.c */
......
...@@ -275,7 +275,6 @@ void acpi_tb_parse_fadt(u32 table_index) ...@@ -275,7 +275,6 @@ void acpi_tb_parse_fadt(u32 table_index)
void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length) void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
{ {
/* /*
* Check if the FADT is larger than the largest table that we expect * Check if the FADT is larger than the largest table that we expect
* (the ACPI 2.0/3.0 version). If so, truncate the table, and issue * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment