Commit 908091c8 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin Committed by Chris Wilson

drm/i915/pmu: Make more struct i915_pmu centric

Just tidy the code a bit by removing a sea of overly verbose i915->pmu.*.
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190801162330.2729-1-tvrtko.ursulin@linux.intel.com
parent 2b92a82f
...@@ -74,8 +74,9 @@ static unsigned int event_enabled_bit(struct perf_event *event) ...@@ -74,8 +74,9 @@ static unsigned int event_enabled_bit(struct perf_event *event)
return config_enabled_bit(event->attr.config); return config_enabled_bit(event->attr.config);
} }
static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
{ {
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
u64 enable; u64 enable;
/* /*
...@@ -83,7 +84,7 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) ...@@ -83,7 +84,7 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
* *
* We start with a bitmask of all currently enabled events. * We start with a bitmask of all currently enabled events.
*/ */
enable = i915->pmu.enable; enable = pmu->enable;
/* /*
* Mask out all the ones which do not need the timer, or in * Mask out all the ones which do not need the timer, or in
...@@ -114,24 +115,26 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) ...@@ -114,24 +115,26 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
void i915_pmu_gt_parked(struct drm_i915_private *i915) void i915_pmu_gt_parked(struct drm_i915_private *i915)
{ {
if (!i915->pmu.base.event_init) struct i915_pmu *pmu = &i915->pmu;
if (!pmu->base.event_init)
return; return;
spin_lock_irq(&i915->pmu.lock); spin_lock_irq(&pmu->lock);
/* /*
* Signal sampling timer to stop if only engine events are enabled and * Signal sampling timer to stop if only engine events are enabled and
* GPU went idle. * GPU went idle.
*/ */
i915->pmu.timer_enabled = pmu_needs_timer(i915, false); pmu->timer_enabled = pmu_needs_timer(pmu, false);
spin_unlock_irq(&i915->pmu.lock); spin_unlock_irq(&pmu->lock);
} }
static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915) static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
{ {
if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) { if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
i915->pmu.timer_enabled = true; pmu->timer_enabled = true;
i915->pmu.timer_last = ktime_get(); pmu->timer_last = ktime_get();
hrtimer_start_range_ns(&i915->pmu.timer, hrtimer_start_range_ns(&pmu->timer,
ns_to_ktime(PERIOD), 0, ns_to_ktime(PERIOD), 0,
HRTIMER_MODE_REL_PINNED); HRTIMER_MODE_REL_PINNED);
} }
...@@ -139,15 +142,17 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915) ...@@ -139,15 +142,17 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
void i915_pmu_gt_unparked(struct drm_i915_private *i915) void i915_pmu_gt_unparked(struct drm_i915_private *i915)
{ {
if (!i915->pmu.base.event_init) struct i915_pmu *pmu = &i915->pmu;
if (!pmu->base.event_init)
return; return;
spin_lock_irq(&i915->pmu.lock); spin_lock_irq(&pmu->lock);
/* /*
* Re-enable sampling timer when GPU goes active. * Re-enable sampling timer when GPU goes active.
*/ */
__i915_pmu_maybe_start_timer(i915); __i915_pmu_maybe_start_timer(pmu);
spin_unlock_irq(&i915->pmu.lock); spin_unlock_irq(&pmu->lock);
} }
static void static void
...@@ -251,15 +256,16 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) ...@@ -251,15 +256,16 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
{ {
struct drm_i915_private *i915 = struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer); container_of(hrtimer, struct drm_i915_private, pmu.timer);
struct i915_pmu *pmu = &i915->pmu;
unsigned int period_ns; unsigned int period_ns;
ktime_t now; ktime_t now;
if (!READ_ONCE(i915->pmu.timer_enabled)) if (!READ_ONCE(pmu->timer_enabled))
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
now = ktime_get(); now = ktime_get();
period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last)); period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
i915->pmu.timer_last = now; pmu->timer_last = now;
/* /*
* Strictly speaking the passed in period may not be 100% accurate for * Strictly speaking the passed in period may not be 100% accurate for
...@@ -443,6 +449,7 @@ static u64 get_rc6(struct drm_i915_private *i915) ...@@ -443,6 +449,7 @@ static u64 get_rc6(struct drm_i915_private *i915)
{ {
#if IS_ENABLED(CONFIG_PM) #if IS_ENABLED(CONFIG_PM)
struct intel_runtime_pm *rpm = &i915->runtime_pm; struct intel_runtime_pm *rpm = &i915->runtime_pm;
struct i915_pmu *pmu = &i915->pmu;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
unsigned long flags; unsigned long flags;
u64 val; u64 val;
...@@ -458,16 +465,16 @@ static u64 get_rc6(struct drm_i915_private *i915) ...@@ -458,16 +465,16 @@ static u64 get_rc6(struct drm_i915_private *i915)
* previously. * previously.
*/ */
spin_lock_irqsave(&i915->pmu.lock, flags); spin_lock_irqsave(&pmu->lock, flags);
if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
i915->pmu.sample[__I915_SAMPLE_RC6].cur = val; pmu->sample[__I915_SAMPLE_RC6].cur = val;
} else { } else {
val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
} }
spin_unlock_irqrestore(&i915->pmu.lock, flags); spin_unlock_irqrestore(&pmu->lock, flags);
} else { } else {
struct device *kdev = rpm->kdev; struct device *kdev = rpm->kdev;
...@@ -478,7 +485,7 @@ static u64 get_rc6(struct drm_i915_private *i915) ...@@ -478,7 +485,7 @@ static u64 get_rc6(struct drm_i915_private *i915)
* on top of the last known real value, as the approximated RC6 * on top of the last known real value, as the approximated RC6
* counter value. * counter value.
*/ */
spin_lock_irqsave(&i915->pmu.lock, flags); spin_lock_irqsave(&pmu->lock, flags);
/* /*
* After the above branch intel_runtime_pm_get_if_in_use failed * After the above branch intel_runtime_pm_get_if_in_use failed
...@@ -494,20 +501,20 @@ static u64 get_rc6(struct drm_i915_private *i915) ...@@ -494,20 +501,20 @@ static u64 get_rc6(struct drm_i915_private *i915)
if (pm_runtime_status_suspended(kdev)) { if (pm_runtime_status_suspended(kdev)) {
val = pm_runtime_suspended_time(kdev); val = pm_runtime_suspended_time(kdev);
if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
i915->pmu.suspended_time_last = val; pmu->suspended_time_last = val;
val -= i915->pmu.suspended_time_last; val -= pmu->suspended_time_last;
val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; val += pmu->sample[__I915_SAMPLE_RC6].cur;
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
} else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { } else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
} else { } else {
val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; val = pmu->sample[__I915_SAMPLE_RC6].cur;
} }
spin_unlock_irqrestore(&i915->pmu.lock, flags); spin_unlock_irqrestore(&pmu->lock, flags);
} }
return val; return val;
...@@ -520,6 +527,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event) ...@@ -520,6 +527,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
{ {
struct drm_i915_private *i915 = struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base); container_of(event->pmu, typeof(*i915), pmu.base);
struct i915_pmu *pmu = &i915->pmu;
u64 val = 0; u64 val = 0;
if (is_engine_event(event)) { if (is_engine_event(event)) {
...@@ -542,12 +550,12 @@ static u64 __i915_pmu_event_read(struct perf_event *event) ...@@ -542,12 +550,12 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
switch (event->attr.config) { switch (event->attr.config) {
case I915_PMU_ACTUAL_FREQUENCY: case I915_PMU_ACTUAL_FREQUENCY:
val = val =
div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur, div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
USEC_PER_SEC /* to MHz */); USEC_PER_SEC /* to MHz */);
break; break;
case I915_PMU_REQUESTED_FREQUENCY: case I915_PMU_REQUESTED_FREQUENCY:
val = val =
div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur, div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
USEC_PER_SEC /* to MHz */); USEC_PER_SEC /* to MHz */);
break; break;
case I915_PMU_INTERRUPTS: case I915_PMU_INTERRUPTS:
...@@ -582,24 +590,25 @@ static void i915_pmu_enable(struct perf_event *event) ...@@ -582,24 +590,25 @@ static void i915_pmu_enable(struct perf_event *event)
struct drm_i915_private *i915 = struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base); container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event); unsigned int bit = event_enabled_bit(event);
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&i915->pmu.lock, flags); spin_lock_irqsave(&pmu->lock, flags);
/* /*
* Update the bitmask of enabled events and increment * Update the bitmask of enabled events and increment
* the event reference counter. * the event reference counter.
*/ */
BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS); BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); GEM_BUG_ON(pmu->enable_count[bit] == ~0);
i915->pmu.enable |= BIT_ULL(bit); pmu->enable |= BIT_ULL(bit);
i915->pmu.enable_count[bit]++; pmu->enable_count[bit]++;
/* /*
* Start the sampling timer if needed and not already enabled. * Start the sampling timer if needed and not already enabled.
*/ */
__i915_pmu_maybe_start_timer(i915); __i915_pmu_maybe_start_timer(pmu);
/* /*
* For per-engine events the bitmask and reference counting * For per-engine events the bitmask and reference counting
...@@ -625,7 +634,7 @@ static void i915_pmu_enable(struct perf_event *event) ...@@ -625,7 +634,7 @@ static void i915_pmu_enable(struct perf_event *event)
engine->pmu.enable_count[sample]++; engine->pmu.enable_count[sample]++;
} }
spin_unlock_irqrestore(&i915->pmu.lock, flags); spin_unlock_irqrestore(&pmu->lock, flags);
/* /*
* Store the current counter value so we can report the correct delta * Store the current counter value so we can report the correct delta
...@@ -640,9 +649,10 @@ static void i915_pmu_disable(struct perf_event *event) ...@@ -640,9 +649,10 @@ static void i915_pmu_disable(struct perf_event *event)
struct drm_i915_private *i915 = struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base); container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event); unsigned int bit = event_enabled_bit(event);
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&i915->pmu.lock, flags); spin_lock_irqsave(&pmu->lock, flags);
if (is_engine_event(event)) { if (is_engine_event(event)) {
u8 sample = engine_event_sample(event); u8 sample = engine_event_sample(event);
...@@ -664,18 +674,18 @@ static void i915_pmu_disable(struct perf_event *event) ...@@ -664,18 +674,18 @@ static void i915_pmu_disable(struct perf_event *event)
engine->pmu.enable &= ~BIT(sample); engine->pmu.enable &= ~BIT(sample);
} }
GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); GEM_BUG_ON(pmu->enable_count[bit] == 0);
/* /*
* Decrement the reference count and clear the enabled * Decrement the reference count and clear the enabled
* bitmask when the last listener on an event goes away. * bitmask when the last listener on an event goes away.
*/ */
if (--i915->pmu.enable_count[bit] == 0) { if (--pmu->enable_count[bit] == 0) {
i915->pmu.enable &= ~BIT_ULL(bit); pmu->enable &= ~BIT_ULL(bit);
i915->pmu.timer_enabled &= pmu_needs_timer(i915, true); pmu->timer_enabled &= pmu_needs_timer(pmu, true);
} }
spin_unlock_irqrestore(&i915->pmu.lock, flags); spin_unlock_irqrestore(&pmu->lock, flags);
} }
static void i915_pmu_event_start(struct perf_event *event, int flags) static void i915_pmu_event_start(struct perf_event *event, int flags)
...@@ -824,8 +834,9 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name, ...@@ -824,8 +834,9 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
} }
static struct attribute ** static struct attribute **
create_event_attributes(struct drm_i915_private *i915) create_event_attributes(struct i915_pmu *pmu)
{ {
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
static const struct { static const struct {
u64 config; u64 config;
const char *name; const char *name;
...@@ -939,8 +950,8 @@ create_event_attributes(struct drm_i915_private *i915) ...@@ -939,8 +950,8 @@ create_event_attributes(struct drm_i915_private *i915)
} }
} }
i915->pmu.i915_attr = i915_attr; pmu->i915_attr = i915_attr;
i915->pmu.pmu_attr = pmu_attr; pmu->pmu_attr = pmu_attr;
return attr; return attr;
...@@ -956,7 +967,7 @@ err:; ...@@ -956,7 +967,7 @@ err:;
return NULL; return NULL;
} }
static void free_event_attributes(struct drm_i915_private *i915) static void free_event_attributes(struct i915_pmu *pmu)
{ {
struct attribute **attr_iter = i915_pmu_events_attr_group.attrs; struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
...@@ -964,12 +975,12 @@ static void free_event_attributes(struct drm_i915_private *i915) ...@@ -964,12 +975,12 @@ static void free_event_attributes(struct drm_i915_private *i915)
kfree((*attr_iter)->name); kfree((*attr_iter)->name);
kfree(i915_pmu_events_attr_group.attrs); kfree(i915_pmu_events_attr_group.attrs);
kfree(i915->pmu.i915_attr); kfree(pmu->i915_attr);
kfree(i915->pmu.pmu_attr); kfree(pmu->pmu_attr);
i915_pmu_events_attr_group.attrs = NULL; i915_pmu_events_attr_group.attrs = NULL;
i915->pmu.i915_attr = NULL; pmu->i915_attr = NULL;
i915->pmu.pmu_attr = NULL; pmu->pmu_attr = NULL;
} }
static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
...@@ -1006,7 +1017,7 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) ...@@ -1006,7 +1017,7 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915) static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
{ {
enum cpuhp_state slot; enum cpuhp_state slot;
int ret; int ret;
...@@ -1019,7 +1030,7 @@ static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915) ...@@ -1019,7 +1030,7 @@ static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
return ret; return ret;
slot = ret; slot = ret;
ret = cpuhp_state_add_instance(slot, &i915->pmu.node); ret = cpuhp_state_add_instance(slot, &pmu->node);
if (ret) { if (ret) {
cpuhp_remove_multi_state(slot); cpuhp_remove_multi_state(slot);
return ret; return ret;
...@@ -1029,15 +1040,16 @@ static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915) ...@@ -1029,15 +1040,16 @@ static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
return 0; return 0;
} }
static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915) static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
{ {
WARN_ON(cpuhp_slot == CPUHP_INVALID); WARN_ON(cpuhp_slot == CPUHP_INVALID);
WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node)); WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
cpuhp_remove_multi_state(cpuhp_slot); cpuhp_remove_multi_state(cpuhp_slot);
} }
void i915_pmu_register(struct drm_i915_private *i915) void i915_pmu_register(struct drm_i915_private *i915)
{ {
struct i915_pmu *pmu = &i915->pmu;
int ret; int ret;
if (INTEL_GEN(i915) <= 2) { if (INTEL_GEN(i915) <= 2) {
...@@ -1045,56 +1057,58 @@ void i915_pmu_register(struct drm_i915_private *i915) ...@@ -1045,56 +1057,58 @@ void i915_pmu_register(struct drm_i915_private *i915)
return; return;
} }
i915_pmu_events_attr_group.attrs = create_event_attributes(i915); i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
if (!i915_pmu_events_attr_group.attrs) { if (!i915_pmu_events_attr_group.attrs) {
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto err;
} }
i915->pmu.base.attr_groups = i915_pmu_attr_groups; pmu->base.attr_groups = i915_pmu_attr_groups;
i915->pmu.base.task_ctx_nr = perf_invalid_context; pmu->base.task_ctx_nr = perf_invalid_context;
i915->pmu.base.event_init = i915_pmu_event_init; pmu->base.event_init = i915_pmu_event_init;
i915->pmu.base.add = i915_pmu_event_add; pmu->base.add = i915_pmu_event_add;
i915->pmu.base.del = i915_pmu_event_del; pmu->base.del = i915_pmu_event_del;
i915->pmu.base.start = i915_pmu_event_start; pmu->base.start = i915_pmu_event_start;
i915->pmu.base.stop = i915_pmu_event_stop; pmu->base.stop = i915_pmu_event_stop;
i915->pmu.base.read = i915_pmu_event_read; pmu->base.read = i915_pmu_event_read;
i915->pmu.base.event_idx = i915_pmu_event_event_idx; pmu->base.event_idx = i915_pmu_event_event_idx;
spin_lock_init(&i915->pmu.lock); spin_lock_init(&pmu->lock);
hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
i915->pmu.timer.function = i915_sample; pmu->timer.function = i915_sample;
ret = perf_pmu_register(&i915->pmu.base, "i915", -1); ret = perf_pmu_register(&pmu->base, "i915", -1);
if (ret) if (ret)
goto err; goto err;
ret = i915_pmu_register_cpuhp_state(i915); ret = i915_pmu_register_cpuhp_state(pmu);
if (ret) if (ret)
goto err_unreg; goto err_unreg;
return; return;
err_unreg: err_unreg:
perf_pmu_unregister(&i915->pmu.base); perf_pmu_unregister(&pmu->base);
err: err:
i915->pmu.base.event_init = NULL; pmu->base.event_init = NULL;
free_event_attributes(i915); free_event_attributes(pmu);
DRM_NOTE("Failed to register PMU! (err=%d)\n", ret); DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
} }
void i915_pmu_unregister(struct drm_i915_private *i915) void i915_pmu_unregister(struct drm_i915_private *i915)
{ {
if (!i915->pmu.base.event_init) struct i915_pmu *pmu = &i915->pmu;
if (!pmu->base.event_init)
return; return;
WARN_ON(i915->pmu.enable); WARN_ON(pmu->enable);
hrtimer_cancel(&i915->pmu.timer); hrtimer_cancel(&pmu->timer);
i915_pmu_unregister_cpuhp_state(i915); i915_pmu_unregister_cpuhp_state(pmu);
perf_pmu_unregister(&i915->pmu.base); perf_pmu_unregister(&pmu->base);
i915->pmu.base.event_init = NULL; pmu->base.event_init = NULL;
free_event_attributes(i915); free_event_attributes(pmu);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment