Commit b95a8a27 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/vdso: Use generic VDSO clock mode storage

Switch to the generic VDSO clock mode storage.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com> (VDSO parts)
Acked-by: Juergen Gross <jgross@suse.com> (Xen parts)
Acked-by: Paolo Bonzini <pbonzini@redhat.com> (KVM parts)
Link: https://lkml.kernel.org/r/20200207124403.152039903@linutronix.de
parent 5d51bee7
...@@ -57,7 +57,6 @@ config X86 ...@@ -57,7 +57,6 @@ config X86
select ACPI_LEGACY_TABLES_LOOKUP if ACPI select ACPI_LEGACY_TABLES_LOOKUP if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_32BIT_OFF_T if X86_32 select ARCH_32BIT_OFF_T if X86_32
select ARCH_CLOCKSOURCE_DATA
select ARCH_CLOCKSOURCE_INIT select ARCH_CLOCKSOURCE_INIT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VIRTUAL
...@@ -126,6 +125,7 @@ config X86 ...@@ -126,6 +125,7 @@ config X86
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL select GENERIC_TIME_VSYSCALL
select GENERIC_GETTIMEOFDAY select GENERIC_GETTIMEOFDAY
select GENERIC_VDSO_CLOCK_MODE
select GENERIC_VDSO_TIME_NS select GENERIC_VDSO_TIME_NS
select GUP_GET_PTE_LOW_HIGH if X86_PAE select GUP_GET_PTE_LOW_HIGH if X86_PAE
select HARDLOCKUP_CHECK_TIMESTAMP if X86_64 select HARDLOCKUP_CHECK_TIMESTAMP if X86_64
......
...@@ -221,7 +221,7 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, ...@@ -221,7 +221,7 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
} else if (sym_offset == image->sym_pvclock_page) { } else if (sym_offset == image->sym_pvclock_page) {
struct pvclock_vsyscall_time_info *pvti = struct pvclock_vsyscall_time_info *pvti =
pvclock_get_pvti_cpu0_va(); pvclock_get_pvti_cpu0_va();
if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) { if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) {
return vmf_insert_pfn_prot(vma, vmf->address, return vmf_insert_pfn_prot(vma, vmf->address,
__pa(pvti) >> PAGE_SHIFT, __pa(pvti) >> PAGE_SHIFT,
pgprot_decrypted(vma->vm_page_prot)); pgprot_decrypted(vma->vm_page_prot));
...@@ -229,7 +229,7 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, ...@@ -229,7 +229,7 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
} else if (sym_offset == image->sym_hvclock_page) { } else if (sym_offset == image->sym_hvclock_page) {
struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page(); struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK)) if (tsc_pg && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
return vmf_insert_pfn(vma, vmf->address, return vmf_insert_pfn(vma, vmf->address,
virt_to_phys(tsc_pg) >> PAGE_SHIFT); virt_to_phys(tsc_pg) >> PAGE_SHIFT);
} else if (sym_offset == image->sym_timens_page) { } else if (sym_offset == image->sym_timens_page) {
...@@ -447,7 +447,7 @@ __setup("vdso=", vdso_setup); ...@@ -447,7 +447,7 @@ __setup("vdso=", vdso_setup);
static int __init init_vdso(void) static int __init init_vdso(void)
{ {
BUILD_BUG_ON(VCLOCK_MAX >= 32); BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
init_vdso_image(&vdso_image_64); init_vdso_image(&vdso_image_64);
......
...@@ -4,15 +4,10 @@ ...@@ -4,15 +4,10 @@
#ifndef _ASM_X86_CLOCKSOURCE_H #ifndef _ASM_X86_CLOCKSOURCE_H
#define _ASM_X86_CLOCKSOURCE_H #define _ASM_X86_CLOCKSOURCE_H
#define VCLOCK_NONE 0 /* No vDSO clock available. */ #define VDSO_ARCH_CLOCKMODES \
#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */ VDSO_CLOCKMODE_TSC, \
#define VCLOCK_PVCLOCK 2 /* vDSO should use vread_pvclock. */ VDSO_CLOCKMODE_PVCLOCK, \
#define VCLOCK_HVCLOCK 3 /* vDSO should use vread_hvclock. */ VDSO_CLOCKMODE_HVCLOCK
#define VCLOCK_MAX 3
struct arch_clocksource_data {
int vclock_mode;
};
extern unsigned int vclocks_used; extern unsigned int vclocks_used;
......
...@@ -46,9 +46,9 @@ typedef int (*hyperv_fill_flush_list_func)( ...@@ -46,9 +46,9 @@ typedef int (*hyperv_fill_flush_list_func)(
#define hv_set_reference_tsc(val) \ #define hv_set_reference_tsc(val) \
wrmsrl(HV_X64_MSR_REFERENCE_TSC, val) wrmsrl(HV_X64_MSR_REFERENCE_TSC, val)
#define hv_set_clocksource_vdso(val) \ #define hv_set_clocksource_vdso(val) \
((val).archdata.vclock_mode = VCLOCK_HVCLOCK) ((val).vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK)
#define hv_enable_vdso_clocksource() \ #define hv_enable_vdso_clocksource() \
vclocks_set_used(VCLOCK_HVCLOCK); vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
#define hv_get_raw_timer() rdtsc_ordered() #define hv_get_raw_timer() rdtsc_ordered()
void hyperv_callback_vector(void); void hyperv_callback_vector(void);
......
...@@ -243,7 +243,7 @@ static u64 vread_hvclock(void) ...@@ -243,7 +243,7 @@ static u64 vread_hvclock(void)
static inline u64 __arch_get_hw_counter(s32 clock_mode) static inline u64 __arch_get_hw_counter(s32 clock_mode)
{ {
if (likely(clock_mode == VCLOCK_TSC)) if (likely(clock_mode == VDSO_CLOCKMODE_TSC))
return (u64)rdtsc_ordered(); return (u64)rdtsc_ordered();
/* /*
* For any memory-mapped vclock type, we need to make sure that gcc * For any memory-mapped vclock type, we need to make sure that gcc
...@@ -252,13 +252,13 @@ static inline u64 __arch_get_hw_counter(s32 clock_mode) ...@@ -252,13 +252,13 @@ static inline u64 __arch_get_hw_counter(s32 clock_mode)
* question isn't enabled, which will segfault. Hence the barriers. * question isn't enabled, which will segfault. Hence the barriers.
*/ */
#ifdef CONFIG_PARAVIRT_CLOCK #ifdef CONFIG_PARAVIRT_CLOCK
if (clock_mode == VCLOCK_PVCLOCK) { if (clock_mode == VDSO_CLOCKMODE_PVCLOCK) {
barrier(); barrier();
return vread_pvclock(); return vread_pvclock();
} }
#endif #endif
#ifdef CONFIG_HYPERV_TIMER #ifdef CONFIG_HYPERV_TIMER
if (clock_mode == VCLOCK_HVCLOCK) { if (clock_mode == VDSO_CLOCKMODE_HVCLOCK) {
barrier(); barrier();
return vread_hvclock(); return vread_hvclock();
} }
......
...@@ -21,13 +21,6 @@ struct vdso_data *__x86_get_k_vdso_data(void) ...@@ -21,13 +21,6 @@ struct vdso_data *__x86_get_k_vdso_data(void)
} }
#define __arch_get_k_vdso_data __x86_get_k_vdso_data #define __arch_get_k_vdso_data __x86_get_k_vdso_data
static __always_inline
int __x86_get_clock_mode(struct timekeeper *tk)
{
return tk->tkr_mono.clock->archdata.vclock_mode;
}
#define __arch_get_clock_mode __x86_get_clock_mode
/* The asm-generic header needs to be included after the definitions above */ /* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h> #include <asm-generic/vdso/vsyscall.h>
......
...@@ -161,7 +161,7 @@ bool kvm_check_and_clear_guest_paused(void) ...@@ -161,7 +161,7 @@ bool kvm_check_and_clear_guest_paused(void)
static int kvm_cs_enable(struct clocksource *cs) static int kvm_cs_enable(struct clocksource *cs)
{ {
vclocks_set_used(VCLOCK_PVCLOCK); vclocks_set_used(VDSO_CLOCKMODE_PVCLOCK);
return 0; return 0;
} }
...@@ -279,7 +279,7 @@ static int __init kvm_setup_vsyscall_timeinfo(void) ...@@ -279,7 +279,7 @@ static int __init kvm_setup_vsyscall_timeinfo(void)
if (!(flags & PVCLOCK_TSC_STABLE_BIT)) if (!(flags & PVCLOCK_TSC_STABLE_BIT))
return 0; return 0;
kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
#endif #endif
kvmclock_init_mem(); kvmclock_init_mem();
......
...@@ -145,7 +145,7 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock, ...@@ -145,7 +145,7 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti) void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti)
{ {
WARN_ON(vclock_was_used(VCLOCK_PVCLOCK)); WARN_ON(vclock_was_used(VDSO_CLOCKMODE_PVCLOCK));
pvti_cpu0_va = pvti; pvti_cpu0_va = pvti;
} }
......
...@@ -122,18 +122,12 @@ void __init time_init(void) ...@@ -122,18 +122,12 @@ void __init time_init(void)
*/ */
void clocksource_arch_init(struct clocksource *cs) void clocksource_arch_init(struct clocksource *cs)
{ {
if (cs->archdata.vclock_mode == VCLOCK_NONE) if (cs->vdso_clock_mode == VDSO_CLOCKMODE_NONE)
return; return;
if (cs->archdata.vclock_mode > VCLOCK_MAX) {
pr_warn("clocksource %s registered with invalid vclock_mode %d. Disabling vclock.\n",
cs->name, cs->archdata.vclock_mode);
cs->archdata.vclock_mode = VCLOCK_NONE;
}
if (cs->mask != CLOCKSOURCE_MASK(64)) { if (cs->mask != CLOCKSOURCE_MASK(64)) {
pr_warn("clocksource %s registered with invalid mask %016llx. Disabling vclock.\n", pr_warn("clocksource %s registered with invalid mask %016llx for VDSO. Disabling VDSO support.\n",
cs->name, cs->mask); cs->name, cs->mask);
cs->archdata.vclock_mode = VCLOCK_NONE; cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
} }
} }
...@@ -1110,7 +1110,7 @@ static void tsc_cs_tick_stable(struct clocksource *cs) ...@@ -1110,7 +1110,7 @@ static void tsc_cs_tick_stable(struct clocksource *cs)
static int tsc_cs_enable(struct clocksource *cs) static int tsc_cs_enable(struct clocksource *cs)
{ {
vclocks_set_used(VCLOCK_TSC); vclocks_set_used(VDSO_CLOCKMODE_TSC);
return 0; return 0;
} }
...@@ -1124,7 +1124,7 @@ static struct clocksource clocksource_tsc_early = { ...@@ -1124,7 +1124,7 @@ static struct clocksource clocksource_tsc_early = {
.mask = CLOCKSOURCE_MASK(64), .mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS | .flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_MUST_VERIFY, CLOCK_SOURCE_MUST_VERIFY,
.archdata = { .vclock_mode = VCLOCK_TSC }, .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
.enable = tsc_cs_enable, .enable = tsc_cs_enable,
.resume = tsc_resume, .resume = tsc_resume,
.mark_unstable = tsc_cs_mark_unstable, .mark_unstable = tsc_cs_mark_unstable,
...@@ -1145,7 +1145,7 @@ static struct clocksource clocksource_tsc = { ...@@ -1145,7 +1145,7 @@ static struct clocksource clocksource_tsc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS | .flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_VALID_FOR_HRES |
CLOCK_SOURCE_MUST_VERIFY, CLOCK_SOURCE_MUST_VERIFY,
.archdata = { .vclock_mode = VCLOCK_TSC }, .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
.enable = tsc_cs_enable, .enable = tsc_cs_enable,
.resume = tsc_resume, .resume = tsc_resume,
.mark_unstable = tsc_cs_mark_unstable, .mark_unstable = tsc_cs_mark_unstable,
......
...@@ -815,8 +815,8 @@ TRACE_EVENT(kvm_write_tsc_offset, ...@@ -815,8 +815,8 @@ TRACE_EVENT(kvm_write_tsc_offset,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define host_clocks \ #define host_clocks \
{VCLOCK_NONE, "none"}, \ {VDSO_CLOCKMODE_NONE, "none"}, \
{VCLOCK_TSC, "tsc"} \ {VDSO_CLOCKMODE_TSC, "tsc"} \
TRACE_EVENT(kvm_update_master_clock, TRACE_EVENT(kvm_update_master_clock,
TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched), TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
......
...@@ -1631,7 +1631,7 @@ static void update_pvclock_gtod(struct timekeeper *tk) ...@@ -1631,7 +1631,7 @@ static void update_pvclock_gtod(struct timekeeper *tk)
write_seqcount_begin(&vdata->seq); write_seqcount_begin(&vdata->seq);
/* copy pvclock gtod data */ /* copy pvclock gtod data */
vdata->clock.vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode;
vdata->clock.cycle_last = tk->tkr_mono.cycle_last; vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
vdata->clock.mask = tk->tkr_mono.mask; vdata->clock.mask = tk->tkr_mono.mask;
vdata->clock.mult = tk->tkr_mono.mult; vdata->clock.mult = tk->tkr_mono.mult;
...@@ -1639,7 +1639,7 @@ static void update_pvclock_gtod(struct timekeeper *tk) ...@@ -1639,7 +1639,7 @@ static void update_pvclock_gtod(struct timekeeper *tk)
vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec; vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec;
vdata->clock.offset = tk->tkr_mono.base; vdata->clock.offset = tk->tkr_mono.base;
vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->archdata.vclock_mode; vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode;
vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last;
vdata->raw_clock.mask = tk->tkr_raw.mask; vdata->raw_clock.mask = tk->tkr_raw.mask;
vdata->raw_clock.mult = tk->tkr_raw.mult; vdata->raw_clock.mult = tk->tkr_raw.mult;
...@@ -1840,7 +1840,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) ...@@ -1840,7 +1840,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
static inline int gtod_is_based_on_tsc(int mode) static inline int gtod_is_based_on_tsc(int mode)
{ {
return mode == VCLOCK_TSC || mode == VCLOCK_HVCLOCK; return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
} }
static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
...@@ -1933,7 +1933,7 @@ static inline bool kvm_check_tsc_unstable(void) ...@@ -1933,7 +1933,7 @@ static inline bool kvm_check_tsc_unstable(void)
* TSC is marked unstable when we're running on Hyper-V, * TSC is marked unstable when we're running on Hyper-V,
* 'TSC page' clocksource is good. * 'TSC page' clocksource is good.
*/ */
if (pvclock_gtod_data.clock.vclock_mode == VCLOCK_HVCLOCK) if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK)
return false; return false;
#endif #endif
return check_tsc_unstable(); return check_tsc_unstable();
...@@ -2088,30 +2088,30 @@ static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp, ...@@ -2088,30 +2088,30 @@ static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
u64 tsc_pg_val; u64 tsc_pg_val;
switch (clock->vclock_mode) { switch (clock->vclock_mode) {
case VCLOCK_HVCLOCK: case VDSO_CLOCKMODE_HVCLOCK:
tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(), tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(),
tsc_timestamp); tsc_timestamp);
if (tsc_pg_val != U64_MAX) { if (tsc_pg_val != U64_MAX) {
/* TSC page valid */ /* TSC page valid */
*mode = VCLOCK_HVCLOCK; *mode = VDSO_CLOCKMODE_HVCLOCK;
v = (tsc_pg_val - clock->cycle_last) & v = (tsc_pg_val - clock->cycle_last) &
clock->mask; clock->mask;
} else { } else {
/* TSC page invalid */ /* TSC page invalid */
*mode = VCLOCK_NONE; *mode = VDSO_CLOCKMODE_NONE;
} }
break; break;
case VCLOCK_TSC: case VDSO_CLOCKMODE_TSC:
*mode = VCLOCK_TSC; *mode = VDSO_CLOCKMODE_TSC;
*tsc_timestamp = read_tsc(); *tsc_timestamp = read_tsc();
v = (*tsc_timestamp - clock->cycle_last) & v = (*tsc_timestamp - clock->cycle_last) &
clock->mask; clock->mask;
break; break;
default: default:
*mode = VCLOCK_NONE; *mode = VDSO_CLOCKMODE_NONE;
} }
if (*mode == VCLOCK_NONE) if (*mode == VDSO_CLOCKMODE_NONE)
*tsc_timestamp = v = 0; *tsc_timestamp = v = 0;
return v * clock->mult; return v * clock->mult;
......
...@@ -147,7 +147,7 @@ static struct notifier_block xen_pvclock_gtod_notifier = { ...@@ -147,7 +147,7 @@ static struct notifier_block xen_pvclock_gtod_notifier = {
static int xen_cs_enable(struct clocksource *cs) static int xen_cs_enable(struct clocksource *cs)
{ {
vclocks_set_used(VCLOCK_PVCLOCK); vclocks_set_used(VDSO_CLOCKMODE_PVCLOCK);
return 0; return 0;
} }
...@@ -419,12 +419,13 @@ void xen_restore_time_memory_area(void) ...@@ -419,12 +419,13 @@ void xen_restore_time_memory_area(void)
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t); ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
/* /*
* We don't disable VCLOCK_PVCLOCK entirely if it fails to register the * We don't disable VDSO_CLOCKMODE_PVCLOCK entirely if it fails to
* secondary time info with Xen or if we migrated to a host without the * register the secondary time info with Xen or if we migrated to a
* necessary flags. On both of these cases what happens is either * host without the necessary flags. On both of these cases what
* process seeing a zeroed out pvti or seeing no PVCLOCK_TSC_STABLE_BIT * happens is either process seeing a zeroed out pvti or seeing no
* bit set. Userspace checks the latter and if 0, it discards the data * PVCLOCK_TSC_STABLE_BIT bit set. Userspace checks the latter and
* in pvti and fallbacks to a system call for a reliable timestamp. * if 0, it discards the data in pvti and fallbacks to a system
* call for a reliable timestamp.
*/ */
if (ret != 0) if (ret != 0)
pr_notice("Cannot restore secondary vcpu_time_info (err %d)", pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
...@@ -450,7 +451,7 @@ static void xen_setup_vsyscall_time_info(void) ...@@ -450,7 +451,7 @@ static void xen_setup_vsyscall_time_info(void)
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t); ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
if (ret) { if (ret) {
pr_notice("xen: VCLOCK_PVCLOCK not supported (err %d)\n", ret); pr_notice("xen: VDSO_CLOCKMODE_PVCLOCK not supported (err %d)\n", ret);
free_page((unsigned long)ti); free_page((unsigned long)ti);
return; return;
} }
...@@ -467,14 +468,14 @@ static void xen_setup_vsyscall_time_info(void) ...@@ -467,14 +468,14 @@ static void xen_setup_vsyscall_time_info(void)
if (!ret) if (!ret)
free_page((unsigned long)ti); free_page((unsigned long)ti);
pr_notice("xen: VCLOCK_PVCLOCK not supported (tsc unstable)\n"); pr_notice("xen: VDSO_CLOCKMODE_PVCLOCK not supported (tsc unstable)\n");
return; return;
} }
xen_clock = ti; xen_clock = ti;
pvclock_set_pvti_cpu0_va(xen_clock); pvclock_set_pvti_cpu0_va(xen_clock);
xen_clocksource.archdata.vclock_mode = VCLOCK_PVCLOCK; xen_clocksource.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
} }
static void __init xen_time_init(void) static void __init xen_time_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment