Commit 9d5b5a63 authored by Kevin Modzelewski's avatar Kevin Modzelewski

Add support for rewriting wrapperdescriptors

Involves a couple changes:
- have the rewriter treat certain callsites as non-mutations
- add special cases for wrapperdescr objects
parent f8b16dbd
...@@ -232,6 +232,30 @@ assembler::Register Rewriter::ConstLoader::loadConst(uint64_t val, Location othe ...@@ -232,6 +232,30 @@ assembler::Register Rewriter::ConstLoader::loadConst(uint64_t val, Location othe
return reg; return reg;
} }
void Rewriter::restoreArgs() {
ASSERT(!done_guarding, "this will probably work but why are we calling this at this time");
for (int i = 0; i < args.size(); i++) {
args[i]->bumpUse();
Location l = Location::forArg(i);
if (l.type == Location::Stack)
continue;
assert(l.type == Location::Register);
assembler::Register r = l.asRegister();
if (!args[i]->isInLocation(l)) {
allocReg(r);
args[i]->getInReg(r);
}
}
for (int i = 0; i < args.size(); i++) {
assert(args[i]->isInLocation(args[i]->arg_loc));
}
}
void RewriterVar::addGuard(uint64_t val) { void RewriterVar::addGuard(uint64_t val) {
RewriterVar* val_var = rewriter->loadConst(val); RewriterVar* val_var = rewriter->loadConst(val);
rewriter->addAction([=]() { rewriter->_addGuard(this, val_var); }, { this, val_var }, ActionType::GUARD); rewriter->addAction([=]() { rewriter->_addGuard(this, val_var); }, { this, val_var }, ActionType::GUARD);
...@@ -241,6 +265,8 @@ void Rewriter::_addGuard(RewriterVar* var, RewriterVar* val_constant) { ...@@ -241,6 +265,8 @@ void Rewriter::_addGuard(RewriterVar* var, RewriterVar* val_constant) {
assert(val_constant->is_constant); assert(val_constant->is_constant);
uint64_t val = val_constant->constant_value; uint64_t val = val_constant->constant_value;
restoreArgs();
assembler::Register var_reg = var->getInReg(); assembler::Register var_reg = var->getInReg();
if (isLargeConstant(val)) { if (isLargeConstant(val)) {
assembler::Register reg = val_constant->getInReg(Location::any(), true, /* otherThan */ var_reg); assembler::Register reg = val_constant->getInReg(Location::any(), true, /* otherThan */ var_reg);
...@@ -265,6 +291,8 @@ void Rewriter::_addGuardNotEq(RewriterVar* var, RewriterVar* val_constant) { ...@@ -265,6 +291,8 @@ void Rewriter::_addGuardNotEq(RewriterVar* var, RewriterVar* val_constant) {
assert(val_constant->is_constant); assert(val_constant->is_constant);
uint64_t val = val_constant->constant_value; uint64_t val = val_constant->constant_value;
restoreArgs();
assembler::Register var_reg = var->getInReg(); assembler::Register var_reg = var->getInReg();
if (isLargeConstant(val)) { if (isLargeConstant(val)) {
assembler::Register reg = val_constant->getInReg(Location::any(), true, /* otherThan */ var_reg); assembler::Register reg = val_constant->getInReg(Location::any(), true, /* otherThan */ var_reg);
...@@ -292,6 +320,8 @@ void Rewriter::_addAttrGuard(RewriterVar* var, int offset, RewriterVar* val_cons ...@@ -292,6 +320,8 @@ void Rewriter::_addAttrGuard(RewriterVar* var, int offset, RewriterVar* val_cons
assert(val_constant->is_constant); assert(val_constant->is_constant);
uint64_t val = val_constant->constant_value; uint64_t val = val_constant->constant_value;
restoreArgs();
// TODO if var is a constant, we will end up emitting something like // TODO if var is a constant, we will end up emitting something like
// mov $0x123, %rax // mov $0x123, %rax
// cmp $0x10(%rax), %rdi // cmp $0x10(%rax), %rdi
...@@ -621,35 +651,35 @@ RewriterVar* Rewriter::loadConst(int64_t val, Location dest) { ...@@ -621,35 +651,35 @@ RewriterVar* Rewriter::loadConst(int64_t val, Location dest) {
return const_loader_var; return const_loader_var;
} }
RewriterVar* Rewriter::call(bool can_call_into_python, void* func_addr) { RewriterVar* Rewriter::call(bool has_side_effects, void* func_addr) {
RewriterVar::SmallVector args; RewriterVar::SmallVector args;
RewriterVar::SmallVector args_xmm; RewriterVar::SmallVector args_xmm;
return call(can_call_into_python, func_addr, args, args_xmm); return call(has_side_effects, func_addr, args, args_xmm);
} }
RewriterVar* Rewriter::call(bool can_call_into_python, void* func_addr, RewriterVar* arg0) { RewriterVar* Rewriter::call(bool has_side_effects, void* func_addr, RewriterVar* arg0) {
RewriterVar::SmallVector args; RewriterVar::SmallVector args;
RewriterVar::SmallVector args_xmm; RewriterVar::SmallVector args_xmm;
args.push_back(arg0); args.push_back(arg0);
return call(can_call_into_python, func_addr, args, args_xmm); return call(has_side_effects, func_addr, args, args_xmm);
} }
RewriterVar* Rewriter::call(bool can_call_into_python, void* func_addr, RewriterVar* arg0, RewriterVar* arg1) { RewriterVar* Rewriter::call(bool has_side_effects, void* func_addr, RewriterVar* arg0, RewriterVar* arg1) {
RewriterVar::SmallVector args; RewriterVar::SmallVector args;
RewriterVar::SmallVector args_xmm; RewriterVar::SmallVector args_xmm;
args.push_back(arg0); args.push_back(arg0);
args.push_back(arg1); args.push_back(arg1);
return call(can_call_into_python, func_addr, args, args_xmm); return call(has_side_effects, func_addr, args, args_xmm);
} }
RewriterVar* Rewriter::call(bool can_call_into_python, void* func_addr, RewriterVar* arg0, RewriterVar* arg1, RewriterVar* Rewriter::call(bool has_side_effects, void* func_addr, RewriterVar* arg0, RewriterVar* arg1,
RewriterVar* arg2) { RewriterVar* arg2) {
RewriterVar::SmallVector args; RewriterVar::SmallVector args;
RewriterVar::SmallVector args_xmm; RewriterVar::SmallVector args_xmm;
args.push_back(arg0); args.push_back(arg0);
args.push_back(arg1); args.push_back(arg1);
args.push_back(arg2); args.push_back(arg2);
return call(can_call_into_python, func_addr, args, args_xmm); return call(has_side_effects, func_addr, args, args_xmm);
} }
static const Location caller_save_registers[]{ static const Location caller_save_registers[]{
...@@ -660,7 +690,7 @@ static const Location caller_save_registers[]{ ...@@ -660,7 +690,7 @@ static const Location caller_save_registers[]{
assembler::XMM11, assembler::XMM12, assembler::XMM13, assembler::XMM14, assembler::XMM15, assembler::XMM11, assembler::XMM12, assembler::XMM13, assembler::XMM14, assembler::XMM15,
}; };
RewriterVar* Rewriter::call(bool can_call_into_python, void* func_addr, const RewriterVar::SmallVector& args, RewriterVar* Rewriter::call(bool has_side_effects, void* func_addr, const RewriterVar::SmallVector& args,
const RewriterVar::SmallVector& args_xmm) { const RewriterVar::SmallVector& args_xmm) {
RewriterVar* result = createNewVar(); RewriterVar* result = createNewVar();
std::vector<RewriterVar*> uses; std::vector<RewriterVar*> uses;
...@@ -672,19 +702,22 @@ RewriterVar* Rewriter::call(bool can_call_into_python, void* func_addr, const Re ...@@ -672,19 +702,22 @@ RewriterVar* Rewriter::call(bool can_call_into_python, void* func_addr, const Re
assert(v != NULL); assert(v != NULL);
uses.push_back(v); uses.push_back(v);
} }
addAction([=]() { this->_call(result, can_call_into_python, func_addr, args, args_xmm); }, uses,
ActionType::MUTATION); ActionType type;
if (has_side_effects)
type = ActionType::MUTATION;
else
type = ActionType::NORMAL;
addAction([=]() { this->_call(result, has_side_effects, func_addr, args, args_xmm); }, uses, type);
return result; return result;
} }
void Rewriter::_call(RewriterVar* result, bool can_call_into_python, void* func_addr, void Rewriter::_call(RewriterVar* result, bool has_side_effects, void* func_addr, const RewriterVar::SmallVector& args,
const RewriterVar::SmallVector& args, const RewriterVar::SmallVector& args_xmm) { const RewriterVar::SmallVector& args_xmm) {
// TODO figure out why this is here -- what needs to be done differently if (has_side_effects)
// if can_call_into_python is true?
// assert(!can_call_into_python);
assert(done_guarding); assert(done_guarding);
if (can_call_into_python) { if (has_side_effects) {
// We need some fixed amount of space at the beginning of the IC that we can use to invalidate // We need some fixed amount of space at the beginning of the IC that we can use to invalidate
// it by writing a jmp. // it by writing a jmp.
// FIXME this check is conservative, since actually we just have to verify that the return // FIXME this check is conservative, since actually we just have to verify that the return
...@@ -694,7 +727,7 @@ void Rewriter::_call(RewriterVar* result, bool can_call_into_python, void* func_ ...@@ -694,7 +727,7 @@ void Rewriter::_call(RewriterVar* result, bool can_call_into_python, void* func_
assert(assembler->bytesWritten() >= IC_INVALDITION_HEADER_SIZE); assert(assembler->bytesWritten() >= IC_INVALDITION_HEADER_SIZE);
} }
if (can_call_into_python) { if (has_side_effects) {
if (!marked_inside_ic) { if (!marked_inside_ic) {
// assembler->trap(); // assembler->trap();
...@@ -922,7 +955,7 @@ void Rewriter::commit() { ...@@ -922,7 +955,7 @@ void Rewriter::commit() {
// Emit assembly for each action, and set done_guarding when // Emit assembly for each action, and set done_guarding when
// we reach the last guard. // we reach the last guard.
// Note: If an arg finishes its uses before we're done gurading, we don't release it at that point; // Note: If an arg finishes its uses before we're done guarding, we don't release it at that point;
// instead, we release it here, at the point when we set done_guarding. // instead, we release it here, at the point when we set done_guarding.
// An alternate, maybe cleaner, way to accomplish this would be to add a use for each arg // An alternate, maybe cleaner, way to accomplish this would be to add a use for each arg
// at each guard in the var's `uses` list. // at each guard in the var's `uses` list.
...@@ -1290,12 +1323,6 @@ assembler::Indirect Rewriter::indirectFor(Location l) { ...@@ -1290,12 +1323,6 @@ assembler::Indirect Rewriter::indirectFor(Location l) {
void Rewriter::spillRegister(assembler::Register reg, Location preserve) { void Rewriter::spillRegister(assembler::Register reg, Location preserve) {
assert(preserve.type == Location::Register || preserve.type == Location::AnyReg); assert(preserve.type == Location::Register || preserve.type == Location::AnyReg);
if (!done_guarding) {
for (int i = 0; i < args.size(); i++) {
assert(args[i]->arg_loc != Location(reg));
}
}
RewriterVar* var = vars_by_location[reg]; RewriterVar* var = vars_by_location[reg];
assert(var); assert(var);
...@@ -1335,12 +1362,6 @@ void Rewriter::spillRegister(assembler::Register reg, Location preserve) { ...@@ -1335,12 +1362,6 @@ void Rewriter::spillRegister(assembler::Register reg, Location preserve) {
void Rewriter::spillRegister(assembler::XMMRegister reg) { void Rewriter::spillRegister(assembler::XMMRegister reg) {
assertPhaseEmitting(); assertPhaseEmitting();
if (!done_guarding) {
for (int i = 0; i < args.size(); i++) {
assert(!args[i]->isInLocation(Location(reg)));
}
}
RewriterVar* var = vars_by_location[reg]; RewriterVar* var = vars_by_location[reg];
assert(var); assert(var);
......
...@@ -371,6 +371,9 @@ private: ...@@ -371,6 +371,9 @@ private:
failed = true; failed = true;
return; return;
} }
for (RewriterVar* arg : args) {
arg->uses.push_back(actions.size());
}
assert(!added_changing_action); assert(!added_changing_action);
last_guard_action = (int)actions.size(); last_guard_action = (int)actions.size();
} }
...@@ -388,6 +391,10 @@ private: ...@@ -388,6 +391,10 @@ private:
return done_guarding; return done_guarding;
} }
// Make sure our original args are currently in their original positions.
// ie if we are about to guard and then branch to the slowpath callsite.
void restoreArgs();
// Allocates a register. dest must be of type Register or AnyReg // Allocates a register. dest must be of type Register or AnyReg
// If otherThan is a register, guaranteed to not use that register. // If otherThan is a register, guaranteed to not use that register.
assembler::Register allocReg(Location dest, Location otherThan = Location::any()); assembler::Register allocReg(Location dest, Location otherThan = Location::any());
...@@ -414,7 +421,7 @@ private: ...@@ -414,7 +421,7 @@ private:
void _trap(); void _trap();
void _loadConst(RewriterVar* result, int64_t val); void _loadConst(RewriterVar* result, int64_t val);
void _call(RewriterVar* result, bool can_call_into_python, void* func_addr, const RewriterVar::SmallVector& args, void _call(RewriterVar* result, bool has_side_effects, void* func_addr, const RewriterVar::SmallVector& args,
const RewriterVar::SmallVector& args_xmm); const RewriterVar::SmallVector& args_xmm);
void _add(RewriterVar* result, RewriterVar* a, int64_t b, Location dest); void _add(RewriterVar* result, RewriterVar* a, int64_t b, Location dest);
int _allocate(RewriterVar* result, int n); int _allocate(RewriterVar* result, int n);
...@@ -448,6 +455,11 @@ private: ...@@ -448,6 +455,11 @@ private:
assert(p.second->locations.count(p.first) == 1); assert(p.second->locations.count(p.first) == 1);
} }
} }
if (!done_guarding) {
for (RewriterVar* arg : args) {
assert(!arg->locations.empty());
}
}
#endif #endif
} }
...@@ -478,17 +490,19 @@ public: ...@@ -478,17 +490,19 @@ public:
void trap(); void trap();
RewriterVar* loadConst(int64_t val, Location loc = Location::any()); RewriterVar* loadConst(int64_t val, Location loc = Location::any());
// can_call_into_python: whether this call could result in arbitrary Python code being called. // has_side_effects: whether this call could have "side effects". the exact side effects we've
// This causes some extra bookkeeping to prevent, ex this patchpoint to be rewritten when // been concerned about have changed over time, so it's better to err on the side of saying "true",
// entered recursively. Setting to false disables this for slightly better performance, but // but currently you can only set it to false if 1) you will not call into Python code, which basically
// it's not huge so if in doubt just pass "true". // can have any sorts of side effects, but in particular could result in the IC being reentrant, and
RewriterVar* call(bool can_call_into_python, void* func_addr, const RewriterVar::SmallVector& args, // 2) does not have any side-effects that would be user-visible if we bailed out from the middle of the
// inline cache. (Extra allocations don't count even though they're potentially visible if you look
// hard enough.)
RewriterVar* call(bool has_side_effects, void* func_addr, const RewriterVar::SmallVector& args,
const RewriterVar::SmallVector& args_xmm = RewriterVar::SmallVector()); const RewriterVar::SmallVector& args_xmm = RewriterVar::SmallVector());
RewriterVar* call(bool can_call_into_python, void* func_addr); RewriterVar* call(bool has_side_effects, void* func_addr);
RewriterVar* call(bool can_call_into_python, void* func_addr, RewriterVar* arg0); RewriterVar* call(bool has_side_effects, void* func_addr, RewriterVar* arg0);
RewriterVar* call(bool can_call_into_python, void* func_addr, RewriterVar* arg0, RewriterVar* arg1); RewriterVar* call(bool has_side_effects, void* func_addr, RewriterVar* arg0, RewriterVar* arg1);
RewriterVar* call(bool can_call_into_python, void* func_addr, RewriterVar* arg0, RewriterVar* arg1, RewriterVar* call(bool has_side_effects, void* func_addr, RewriterVar* arg0, RewriterVar* arg1, RewriterVar* arg2);
RewriterVar* arg2);
RewriterVar* add(RewriterVar* a, int64_t b, Location dest); RewriterVar* add(RewriterVar* a, int64_t b, Location dest);
// Allocates n pointer-sized stack slots: // Allocates n pointer-sized stack slots:
RewriterVar* allocate(int n); RewriterVar* allocate(int n);
......
...@@ -984,7 +984,7 @@ Box* typeLookup(BoxedClass* cls, llvm::StringRef attr, GetattrRewriteArgs* rewri ...@@ -984,7 +984,7 @@ Box* typeLookup(BoxedClass* cls, llvm::StringRef attr, GetattrRewriteArgs* rewri
bool isNondataDescriptorInstanceSpecialCase(Box* descr) { bool isNondataDescriptorInstanceSpecialCase(Box* descr) {
return descr->cls == function_cls || descr->cls == instancemethod_cls || descr->cls == staticmethod_cls return descr->cls == function_cls || descr->cls == instancemethod_cls || descr->cls == staticmethod_cls
|| descr->cls == classmethod_cls; || descr->cls == classmethod_cls || descr->cls == wrapperdescr_cls;
} }
Box* nondataDescriptorInstanceSpecialCases(GetattrRewriteArgs* rewrite_args, Box* obj, Box* descr, RewriterVar* r_descr, Box* nondataDescriptorInstanceSpecialCases(GetattrRewriteArgs* rewrite_args, Box* obj, Box* descr, RewriterVar* r_descr,
...@@ -1058,7 +1058,7 @@ Box* nondataDescriptorInstanceSpecialCases(GetattrRewriteArgs* rewrite_args, Box ...@@ -1058,7 +1058,7 @@ Box* nondataDescriptorInstanceSpecialCases(GetattrRewriteArgs* rewrite_args, Box
if (!for_call) { if (!for_call) {
if (rewrite_args) { if (rewrite_args) {
rewrite_args->out_rtn rewrite_args->out_rtn
= rewrite_args->rewriter->call(true, (void*)boxInstanceMethod, r_im_self, r_im_func, r_im_class); = rewrite_args->rewriter->call(false, (void*)boxInstanceMethod, r_im_self, r_im_func, r_im_class);
rewrite_args->out_success = true; rewrite_args->out_success = true;
} }
return boxInstanceMethod(im_self, im_func, im_class); return boxInstanceMethod(im_self, im_func, im_class);
...@@ -1071,12 +1071,7 @@ Box* nondataDescriptorInstanceSpecialCases(GetattrRewriteArgs* rewrite_args, Box ...@@ -1071,12 +1071,7 @@ Box* nondataDescriptorInstanceSpecialCases(GetattrRewriteArgs* rewrite_args, Box
} }
return im_func; return im_func;
} }
} } else if (descr->cls == staticmethod_cls) {
else if (descr->cls == staticmethod_cls) {
static StatCounter slowpath("slowpath_staticmethod_get");
slowpath.log();
BoxedStaticmethod* sm = static_cast<BoxedStaticmethod*>(descr); BoxedStaticmethod* sm = static_cast<BoxedStaticmethod*>(descr);
if (sm->sm_callable == NULL) { if (sm->sm_callable == NULL) {
raiseExcHelper(RuntimeError, "uninitialized staticmethod object"); raiseExcHelper(RuntimeError, "uninitialized staticmethod object");
...@@ -1090,6 +1085,23 @@ Box* nondataDescriptorInstanceSpecialCases(GetattrRewriteArgs* rewrite_args, Box ...@@ -1090,6 +1085,23 @@ Box* nondataDescriptorInstanceSpecialCases(GetattrRewriteArgs* rewrite_args, Box
} }
return sm->sm_callable; return sm->sm_callable;
} else if (descr->cls == wrapperdescr_cls) {
BoxedWrapperDescriptor* self = static_cast<BoxedWrapperDescriptor*>(descr);
Box* inst = obj;
Box* owner = obj->cls;
Box* r = BoxedWrapperDescriptor::__get__(self, inst, owner);
if (rewrite_args) {
// TODO: inline this?
RewriterVar* r_rtn = rewrite_args->rewriter->call(
/* has_side_effects= */ false, (void*)&BoxedWrapperDescriptor::__get__, r_descr, rewrite_args->obj,
r_descr->getAttr(offsetof(Box, cls), Location::forArg(2)));
rewrite_args->out_success = true;
rewrite_args->out_rtn = r_rtn;
}
return r;
} }
return NULL; return NULL;
...@@ -1120,14 +1132,13 @@ Box* descriptorClsSpecialCases(GetattrRewriteArgs* rewrite_args, BoxedClass* cls ...@@ -1120,14 +1132,13 @@ Box* descriptorClsSpecialCases(GetattrRewriteArgs* rewrite_args, BoxedClass* cls
return descr; return descr;
} }
// Special case: member descriptor // These classes are descriptors, but only have special behavior when involved
if (descr->cls == member_descriptor_cls) { // in instance lookups
if (descr->cls == member_descriptor_cls || descr->cls == wrapperdescr_cls) {
if (rewrite_args) if (rewrite_args)
r_descr->addAttrGuard(BOX_CLS_OFFSET, (uint64_t)descr->cls); r_descr->addAttrGuard(BOX_CLS_OFFSET, (uint64_t)descr->cls);
if (rewrite_args) { if (rewrite_args) {
// Actually just return val (it's a descriptor but only
// has special behaviour for instance lookups - see below)
rewrite_args->out_rtn = r_descr; rewrite_args->out_rtn = r_descr;
rewrite_args->out_success = true; rewrite_args->out_success = true;
} }
...@@ -1332,7 +1343,7 @@ Box* dataDescriptorInstanceSpecialCases(GetattrRewriteArgs* rewrite_args, llvm:: ...@@ -1332,7 +1343,7 @@ Box* dataDescriptorInstanceSpecialCases(GetattrRewriteArgs* rewrite_args, llvm::
RewriterVar* r_closure = r_descr->getAttr(offsetof(BoxedGetsetDescriptor, closure)); RewriterVar* r_closure = r_descr->getAttr(offsetof(BoxedGetsetDescriptor, closure));
rewrite_args->out_rtn = rewrite_args->rewriter->call( rewrite_args->out_rtn = rewrite_args->rewriter->call(
/* can_call_into_python */ true, (void*)getset_descr->get, rewrite_args->obj, r_closure); /* has_side_effects */ true, (void*)getset_descr->get, rewrite_args->obj, r_closure);
if (descr->cls == capi_getset_cls) if (descr->cls == capi_getset_cls)
// TODO I think we are supposed to check the return value? // TODO I think we are supposed to check the return value?
...@@ -1896,7 +1907,7 @@ bool dataDescriptorSetSpecialCases(Box* obj, Box* val, Box* descr, SetattrRewrit ...@@ -1896,7 +1907,7 @@ bool dataDescriptorSetSpecialCases(Box* obj, Box* val, Box* descr, SetattrRewrit
args.push_back(r_val); args.push_back(r_val);
args.push_back(r_closure); args.push_back(r_closure);
rewrite_args->rewriter->call( rewrite_args->rewriter->call(
/* can_call_into_python */ true, (void*)getset_descr->set, args); /* has_side_effects */ true, (void*)getset_descr->set, args);
if (descr->cls == capi_getset_cls) if (descr->cls == capi_getset_cls)
// TODO I think we are supposed to check the return value? // TODO I think we are supposed to check the return value?
...@@ -3584,6 +3595,13 @@ extern "C" Box* runtimeCall(Box* obj, ArgPassSpec argspec, Box* arg1, Box* arg2, ...@@ -3584,6 +3595,13 @@ extern "C" Box* runtimeCall(Box* obj, ArgPassSpec argspec, Box* arg1, Box* arg2,
__builtin_extract_return_addr(__builtin_return_address(0)), num_orig_args, "runtimeCall")); __builtin_extract_return_addr(__builtin_return_address(0)), num_orig_args, "runtimeCall"));
Box* rtn; Box* rtn;
#if 0 && STAT_TIMERS
static uint64_t* st_id = Stats::getStatCounter("us_timer_slowpath_runtimecall_patchable");
static uint64_t* st_id_nopatch = Stats::getStatCounter("us_timer_slowpath_runtimecall_nopatch");
bool havepatch = (bool)getICInfo(__builtin_extract_return_addr(__builtin_return_address(0)));
ScopedStatTimer st(havepatch ? st_id : st_id_nopatch, 10);
#endif
if (rewriter.get()) { if (rewriter.get()) {
// TODO feel weird about doing this; it either isn't necessary // TODO feel weird about doing this; it either isn't necessary
// or this kind of thing is necessary in a lot more places // or this kind of thing is necessary in a lot more places
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment