Commit 24f44628 authored by Kevin Modzelewski's avatar Kevin Modzelewski

Merge pull request #1192 from kmod/refcounting_merge

Some cleanup for the refcounting merge
parents c429f19d b5d22ec7
language: cpp
branches:
except:
# - refcounting
compiler:
- clang
- gcc
......
......@@ -111,9 +111,9 @@ if(ENABLE_PGO)
endif()
if(ENABLE_REF_DEBUG)
add_definitions(-DPy_REF_DEBUG)
add_definitions(-DPYMALLOC_DEBUG)
add_definitions(-DPy_TRACE_REFS)
add_definitions(-DPy_REF_DEBUG)
add_definitions(-DPYMALLOC_DEBUG)
add_definitions(-DPy_TRACE_REFS)
endif()
macro(ADD_PROFILE_FLAGS)
......@@ -342,7 +342,6 @@ endmacro()
# tests testname directory arguments
add_pyston_test(defaults tests --order-by-mtime -t50)
# XXX: reenable
add_pyston_test(force_llvm tests -a=-n -a=-X -t50)
if(${CMAKE_BUILD_TYPE} STREQUAL "Release")
add_pyston_test(max_compilation_tier tests -a=-O -a=-X -t50)
......
......@@ -61,6 +61,8 @@ Refcounting in the JIT tiers is handled differently from normal code. In this c
- `refConsumed()` says that a reference was handed out of the JIT's set of objects. This could be handling `return` statements, calling functions that steal a ref from their arguments, or storing the reference in a data structure (which thus inherits the reference).
- `refUsed()`. This shouldn't be needed that often, but tells the refcounting system that there was a use of a reference that the refcounter was unable to see. For example, we have some calling conventions where objects are passed in an array; the uses of the array are thus uses of the individual variables. You probably don't have to worry about this.
The automatic refcounter will then look at the IR we generated (either the LLVM IR or our Rewriter IR) and determine where to insert incref and decref operations. Note that it will insert decrefs at the first possible point that it thinks they are acceptable. So if you have some use of your variable that is not visible to the refcounter, you will need to call refUsed(). But for the most part things should just work.
Here's an example of what it can look like. The case is storing a variable in an array.
```C++
......@@ -76,12 +78,12 @@ Note that we get the previous value and then set the type to OWNED. The refcoun
## Updating C extensions
C extensions don't require any updates, but they ofter leak some references (usually a constant number) which the refcounting system
C extensions don't require any updates, but they often leak some references (usually a constant number) which the refcounting system
can't distinguish from steady-state leaks. For standard libraries that are implemented as C extensions, it's usually nice to go through and fix these so that the system can get down to "0 refs remaining" at finalization.
Usually this is just a matter of calling PyGC_RegisterStaticConstant() around any static constants. This will usually be things like static strings, or sometimes singletons or interned values.
Similarly, this can happen in our code. If you store sometihng in a `static` variable, you will typically have to call PyGC_RegisterStaticConstant. There is a helper function for the common case -- getStaticString() is equivalent to `PyGC_RegisterStaticConstant(PyString_InternFromString())`, which happens a decent amount.
Similarly, this can happen in our code. If you store something in a `static` C/C++ variable, you will typically have to call PyGC_RegisterStaticConstant. There is a helper function for the common case -- getStaticString() is equivalent to `PyGC_RegisterStaticConstant(PyString_InternFromString())`, which happens a decent amount.
## Testing
......
......@@ -10,7 +10,6 @@ extern "C" {
// Pyston addition: refcounting annotations:
#define BORROWED(...) __VA_ARGS__
#define STOLEN(...) __VA_ARGS__
#define NOREFCHECK ;
/* Object and type object interface */
......
......@@ -19,7 +19,7 @@
*/
#include "Python.h"
//#include "frameobject.h" /* for PyFrame_ClearFreeList */
#include "frameobject.h" /* for PyFrame_ClearFreeList */
/* Get an object's GC head */
#define AS_GC(o) ((PyGC_Head *)(o)-1)
......@@ -382,13 +382,6 @@ subtract_refs(PyGC_Head *containers)
PyGC_Head *gc = containers->gc.gc_next;
for (; gc != containers; gc=gc->gc.gc_next) {
traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
// Pyston addition: some extra checking for our transition
#ifndef NDEBUG
if (!traverse) {
fprintf(stderr, "%s needs a tp_traverse\n", Py_TYPE(FROM_GC(gc))->tp_name);
assert(0);
}
#endif
(void) traverse(FROM_GC(gc),
(visitproc)visit_decref,
NULL);
......@@ -552,8 +545,6 @@ move_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
PyObject *op = FROM_GC(gc);
// Pyston addition: for now assert that the gc isn't freeing anything.
assert(IS_TENTATIVELY_UNREACHABLE(op));
next = gc->gc.gc_next;
......
# TODO: we will have to figure out a better way of generating this file
# TODO: there are many things missing here
build_time_vars = {
"CC": "gcc -pthread",
......
......@@ -163,7 +163,6 @@ bool Rewriter::ConstLoader::tryRegRegMove(uint64_t val, assembler::Register dst_
bool Rewriter::ConstLoader::tryLea(uint64_t val, assembler::Register dst_reg) {
assert(rewriter->phase_emitting);
return false;
// for large constants it maybe beneficial to create the value with a LEA from a known const value
if (isLargeConstant(val)) {
......@@ -1241,7 +1240,12 @@ std::vector<Location> Rewriter::getDecrefLocations() {
for (auto&& p : owned_attrs) {
RewriterVar* var = p.first;
assert(var->locations.size() == 1);
// If you forget to call deregisterOwnedAttr(), and then later do something that needs to emit decref info,
// we will try to emit the info for that owned attr even though the rewriter has decided that it doesn't need
// to keep it alive any more.
ASSERT(var->locations.size() > 0,
"owned variable not accessible any more -- maybe forgot to call deregisterOwnedAttr?");
ASSERT(var->locations.size() == 1, "this code only looks at one location");
Location l = *var->locations.begin();
assert(l.type == Location::Scratch);
......
......@@ -130,6 +130,7 @@ class RewriterAction;
// This might make more sense as an inner class of Rewriter, but
// you can't forward-declare that :/
class RewriterVar {
private:
// Fields for automatic refcounting:
int num_refs_consumed = 0; // The number of "refConsumed()" calls on this RewriterVar
int last_refconsumed_numuses
......@@ -150,10 +151,18 @@ public:
// getAttrFloat casts to double (maybe I should make that separate?)
RewriterVar* getAttrFloat(int offset, Location loc = Location::any());
RewriterVar* getAttrDouble(int offset, Location loc = Location::any());
// SetattrType: a guardrail against the easy-to-make mistake of having a reference
// stored in a memory location. The refcount-tracker can't see this type of usage, so it will
// end up decrefing the object after the store, even if the memory location is later used.
//
// For refcount-tracked objects, you need to specify one of two subsequent calls you will make:
// either HANDED_OFF if the ref got handed off and you will call refConsumed(), or REF_USED if the stored ref
// is borrowed, in which case you need to call refUsed() after the usage of the memory location.
enum class SetattrType {
UNKNOWN,
HANDED_OFF,
REFUSED,
REF_USED,
};
void setAttr(int offset, RewriterVar* other, SetattrType type = SetattrType::UNKNOWN);
......@@ -263,7 +272,7 @@ public:
}
#ifndef NDEBUG
// XXX: for testing, reset these on deallocation so that we will see the next time they get set.
// For testing, reset these on deallocation so that we will see the next time they get set.
~RewriterVar() {
rewriter = (Rewriter*)-1;
reftype = (RefType)-1;
......
......@@ -643,8 +643,6 @@ Value ASTInterpreter::visit_branch(AST_Branch* node) {
AUTO_DECREF(v.o);
if (jit) {
jit->emitEndBlock();
// Special note: emitSideExit decrefs v for us.
// TODO: since the value is always True or False, maybe could optimize by putting the decref
// before the conditional instead of after.
......@@ -676,15 +674,13 @@ Value ASTInterpreter::visit_jump(AST_Jump* node) {
if (jit) {
if (backedge && ENABLE_OSR && !FORCE_INTERPRETER)
jit->emitOSRPoint(node);
jit->emitEndBlock();
jit->emitJump(node->target);
finishJITing(node->target);
// we may have started JITing because the OSR thresholds got triggered in this case we don't want to jit
// additional blocks ouside of the loop if the function is cold.
// XXX reenable this
// if (getMD()->times_interpreted < REOPT_THRESHOLD_INTERPRETER)
// should_jit = false;
if (getMD()->times_interpreted < REOPT_THRESHOLD_INTERPRETER)
should_jit = false;
}
if (backedge)
......@@ -1069,7 +1065,6 @@ Value ASTInterpreter::visit_return(AST_Return* node) {
Value s = node->value ? visit_expr(node->value) : getNone();
if (jit) {
jit->emitEndBlock();
jit->emitReturn(s);
finishJITing();
}
......@@ -1114,7 +1109,7 @@ Value ASTInterpreter::createFunction(AST* node, AST_arguments* args, const std::
Value v = visit_expr(d);
defaults.push_back(v.o);
if (jit) {
defaults_var->setAttr(i++ * sizeof(void*), v, RewriterVar::SetattrType::REFUSED);
defaults_var->setAttr(i++ * sizeof(void*), v, RewriterVar::SetattrType::REF_USED);
defaults_vars.push_back(v.var);
}
}
......
......@@ -179,8 +179,7 @@ RewriterVar* JitFragmentWriter::emitAugbinop(AST_expr* node, RewriterVar* lhs, R
}
RewriterVar* JitFragmentWriter::emitBinop(AST_expr* node, RewriterVar* lhs, RewriterVar* rhs, int op_type) {
/// XXX increase this too much for testing
return emitPPCall((void*)binop, { lhs, rhs, imm(op_type) }, 2, 640, node).first->setType(RefType::OWNED);
return emitPPCall((void*)binop, { lhs, rhs, imm(op_type) }, 2, 240, node).first->setType(RefType::OWNED);
}
RewriterVar* JitFragmentWriter::emitCallattr(AST_expr* node, RewriterVar* obj, BoxedString* attr, CallattrFlags flags,
......@@ -202,7 +201,7 @@ RewriterVar* JitFragmentWriter::emitCallattr(AST_expr* node, RewriterVar* obj, B
if (args.size() > 3) {
RewriterVar* scratch = allocate(args.size() - 3);
for (int i = 0; i < args.size() - 3; ++i)
scratch->setAttr(i * sizeof(void*), args[i + 3], RewriterVar::SetattrType::REFUSED);
scratch->setAttr(i * sizeof(void*), args[i + 3], RewriterVar::SetattrType::REF_USED);
call_args.push_back(scratch);
} else if (keyword_names) {
call_args.push_back(imm(0ul));
......@@ -303,7 +302,7 @@ RewriterVar* JitFragmentWriter::emitCreateList(const llvm::ArrayRef<RewriterVar*
return call(false, (void*)createList)->setType(RefType::OWNED);
auto rtn = emitCallWithAllocatedArgs((void*)createListHelper,
{ imm(num), allocArgs(values, RewriterVar::SetattrType::REFUSED) },
{ imm(num), allocArgs(values, RewriterVar::SetattrType::REF_USED) },
values)->setType(RefType::OWNED);
for (RewriterVar* v : values) {
v->refConsumed();
......@@ -316,7 +315,7 @@ RewriterVar* JitFragmentWriter::emitCreateSet(const llvm::ArrayRef<RewriterVar*>
if (num == 0)
return call(false, (void*)createSet)->setType(RefType::OWNED);
auto rtn = emitCallWithAllocatedArgs((void*)createSetHelper,
{ imm(num), allocArgs(values, RewriterVar::SetattrType::REFUSED) },
{ imm(num), allocArgs(values, RewriterVar::SetattrType::REF_USED) },
values)->setType(RefType::OWNED);
for (RewriterVar* v : values) {
v->refConsumed();
......@@ -341,7 +340,7 @@ RewriterVar* JitFragmentWriter::emitCreateTuple(const llvm::ArrayRef<RewriterVar
r = call(false, (void*)BoxedTuple::create3, values[0], values[1], values[2])->setType(RefType::OWNED);
else {
r = emitCallWithAllocatedArgs((void*)createTupleHelper,
{ imm(num), allocArgs(values, RewriterVar::SetattrType::REFUSED) },
{ imm(num), allocArgs(values, RewriterVar::SetattrType::REF_USED) },
values)->setType(RefType::OWNED);
}
return r;
......@@ -482,7 +481,7 @@ RewriterVar* JitFragmentWriter::emitRuntimeCall(AST_expr* node, RewriterVar* obj
if (args.size() > 3) {
RewriterVar* scratch = allocate(args.size() - 3);
for (int i = 0; i < args.size() - 3; ++i)
scratch->setAttr(i * sizeof(void*), args[i + 3], RewriterVar::SetattrType::REFUSED);
scratch->setAttr(i * sizeof(void*), args[i + 3], RewriterVar::SetattrType::REF_USED);
call_args.push_back(scratch);
} else
call_args.push_back(imm(0ul));
......@@ -500,7 +499,7 @@ RewriterVar* JitFragmentWriter::emitRuntimeCall(AST_expr* node, RewriterVar* obj
RewriterVar* args_array = nullptr;
if (args.size()) {
args_array = allocArgs(args, RewriterVar::SetattrType::REFUSED);
args_array = allocArgs(args, RewriterVar::SetattrType::REF_USED);
} else
RELEASE_ASSERT(!keyword_names_var, "0 args but keyword names are set");
......@@ -618,10 +617,6 @@ void JitFragmentWriter::emitRaise3(RewriterVar* arg0, RewriterVar* arg1, Rewrite
arg2->refConsumed();
}
void JitFragmentWriter::emitEndBlock() {
// XXX remove
}
void JitFragmentWriter::emitReturn(RewriterVar* v) {
addAction([=]() { _emitReturn(v); }, { v }, ActionType::NORMAL);
v->refConsumed();
......
......@@ -275,7 +275,6 @@ public:
// emitSideExit steals a full ref from v, not just a vref
void emitSideExit(STOLEN(RewriterVar*) v, Box* cmp_value, CFGBlock* next_block);
void emitUncacheExcInfo();
void emitEndBlock();
void abortCompilation();
int finishCompilation();
......
......@@ -353,7 +353,9 @@ llvm::Value* IRGenState::getGlobalsIfCustom() {
return getGlobals();
}
// XXX This is pretty hacky, but I think I can get rid of it once I merge in Marius's new frame introspection work
// This is a hacky little constant that should only be used by the underlying exception-propagation code.
// But it means that we are calling a function that 1) throws a C++ exception, and 2) we explicitly want to
// disable generation of a C++ fixup. This is really just for propagating an exception after it had gotten caught.
#define NO_CXX_INTERCEPTION ((llvm::BasicBlock*)-1)
llvm::Value* IREmitter::ALWAYS_THROWS = ((llvm::Value*)1);
......@@ -770,7 +772,6 @@ private:
llvm::SmallVector<ExceptionState, 2> incoming_exc_state;
// These are the values that are outgoing of an invoke block:
llvm::SmallVector<ExceptionState, 2> outgoing_exc_state;
// llvm::DenseMap<llvm::BasicBlock*, llvm::BasicBlock*> cxx_exc_dests;
llvm::DenseMap<llvm::BasicBlock*, llvm::BasicBlock*> capi_exc_dests;
llvm::DenseMap<llvm::BasicBlock*, llvm::PHINode*> capi_phis;
......@@ -923,6 +924,8 @@ private:
// local symbols will not throw.
emitter.getBuilder()->CreateUnreachable();
exc_type = exc_value = exc_tb = undefVariable();
// TODO: should we not emit the rest of the block? I (kmod) think I tried that and
// ran into some sort of issue, but I don't remember what it was.
// endBlock(DEAD);
}
......@@ -3091,10 +3094,6 @@ public:
}
llvm::BasicBlock* getCXXExcDest(const UnwindInfo& unw_info) override {
// llvm::BasicBlock*& cxx_exc_dest = cxx_exc_dests[final_dest];
// if (cxx_exc_dest)
// return cxx_exc_dest;
llvm::BasicBlock* final_dest;
if (unw_info.hasHandler()) {
final_dest = unw_info.exc_dest;
......
......@@ -593,6 +593,12 @@ public:
Box* operator*() { return impl->getValue(); }
};
// Similar to std::unique_ptr<>, but allocates its data on the stack.
// This means that it should only be used with types that can be relocated trivially.
// TODO add assertions for that, similar to SmallFunction.
// Also, if you copy the SmallUniquePtr, the address that it represents changes (since you
// copy the data as well). In debug mode, this class will enforce that once you get the
// pointer value, it does not get copied again.
template <typename T, int N> class SmallUniquePtr {
private:
char _data[N];
......@@ -645,8 +651,6 @@ public:
// should complain).
class BoxIteratorRange {
private:
// std::unique_ptr<BoxIteratorImpl> begin_impl;
// char _data[32];
typedef SmallUniquePtr<BoxIteratorImpl, 32> UniquePtr;
UniquePtr begin_impl;
BoxIteratorImpl* end_impl;
......@@ -831,9 +835,9 @@ public:
void operator delete(void* ptr) __attribute__((visibility("default"))) { abort(); }
_PyObject_HEAD_EXTRA
_PyObject_HEAD_EXTRA;
Py_ssize_t ob_refcnt;
Py_ssize_t ob_refcnt;
// Note: cls gets initialized in the new() function.
BoxedClass* cls;
......
......@@ -70,8 +70,6 @@ Box* BoxedCode::argcount(Box* b, void*) {
}
Box* BoxedCode::varnames(Box* b, void*) {
NOREFCHECK;
RELEASE_ASSERT(b->cls == code_cls, "");
BoxedCode* code = static_cast<BoxedCode*>(b);
......
......@@ -897,6 +897,11 @@ public:
};
void _sortArray(Box** elts, long num_elts, Box* cmp, Box* key) {
// TODO(kmod): maybe we should just switch to CPython's sort. not sure how the algorithms compare,
// but they specifically try to support cases where __lt__ or the cmp function might end up inspecting
// the current list being sorted.
// I also don't know if std::stable_sort is exception-safe.
if (cmp) {
assert(!key);
std::stable_sort<Box**, PyCmpComparer>(elts, elts + num_elts, PyCmpComparer(cmp));
......@@ -956,11 +961,6 @@ void listSort(BoxedList* self, Box* cmp, Box* key, Box* reverse) {
RELEASE_ASSERT(!cmp || !key, "Specifying both the 'cmp' and 'key' keywords is currently not supported");
// TODO(kmod): maybe we should just switch to CPython's sort. not sure how the algorithms compare,
// but they specifically try to support cases where __lt__ or the cmp function might end up inspecting
// the current list being sorted.
// I also don't know if std::stable_sort is exception-safe.
auto orig_size = self->size;
auto orig_elts = self->elts;
......
......@@ -4870,13 +4870,13 @@ Box* callCLFunc(FunctionMetadata* md, CallRewriteArgs* rewrite_args, int num_out
RewriterVar* arg_array = rewrite_args->rewriter->allocate(4);
arg_vec.push_back(arg_array);
if (num_output_args >= 1)
arg_array->setAttr(0, rewrite_args->arg1, RewriterVar::SetattrType::REFUSED);
arg_array->setAttr(0, rewrite_args->arg1, RewriterVar::SetattrType::REF_USED);
if (num_output_args >= 2)
arg_array->setAttr(8, rewrite_args->arg2, RewriterVar::SetattrType::REFUSED);
arg_array->setAttr(8, rewrite_args->arg2, RewriterVar::SetattrType::REF_USED);
if (num_output_args >= 3)
arg_array->setAttr(16, rewrite_args->arg3, RewriterVar::SetattrType::REFUSED);
arg_array->setAttr(16, rewrite_args->arg3, RewriterVar::SetattrType::REF_USED);
if (num_output_args >= 4)
arg_array->setAttr(24, rewrite_args->args, RewriterVar::SetattrType::REFUSED);
arg_array->setAttr(24, rewrite_args->args, RewriterVar::SetattrType::REF_USED);
if (S == CXX)
rewrite_args->out_rtn = rewrite_args->rewriter->call(true, (void*)astInterpretHelper, arg_vec)
......@@ -5391,11 +5391,6 @@ static Box* runtimeCallEntry(Box* obj, ArgPassSpec argspec, Box* arg1, Box* arg2
}
assert(rtn || (S == CAPI && PyErr_Occurred()));
// XXX
#ifndef NDEBUG
rewriter.release();
#endif
return rtn;
}
......@@ -5587,11 +5582,6 @@ extern "C" Box* binop(Box* lhs, Box* rhs, int op_type) {
rtn = binopInternal<NOT_REWRITABLE>(lhs, rhs, op_type, false, NULL);
}
// XXX
#ifndef NDEBUG
rewriter.release();
#endif
return rtn;
}
......@@ -7239,11 +7229,6 @@ extern "C" Box* getGlobal(Box* globals, BoxedString* name) {
rtn = builtins_module->getattr(name);
}
// XXX
#ifndef NDEBUG
rewriter.release();
#endif
if (rtn) {
assert(rtn->ob_refcnt > 0);
Py_INCREF(rtn);
......
......@@ -41,6 +41,10 @@ extern "C" void rawReraise(Box*, Box*, Box*) __attribute__((__noreturn__));
void raiseExc(STOLEN(Box*) exc_obj) __attribute__((__noreturn__));
void _printStacktrace();
// Note -- most of these functions are marked 'noinline' because they inspect the return-address
// to see if they are getting called from jitted code. If we inline them into a function that
// got called from jitted code, they might incorrectly think that they are a rewritable entrypoint.
extern "C" Box* deopt(AST_expr* expr, Box* value) __attribute__((noinline));
// helper function for raising from the runtime:
......@@ -54,8 +58,6 @@ extern "C" void my_assert(bool b);
extern "C" Box* getattr(Box* obj, BoxedString* attr) __attribute__((noinline));
extern "C" Box* getattr_capi(Box* obj, BoxedString* attr) noexcept __attribute__((noinline));
extern "C" Box* getattrMaybeNonstring(Box* obj, Box* attr);
// XXX: testing. this tail-calls in optimized builds so force it to inline for unoptimized as well to get the same
// behavior.
extern "C" void setattr(Box* obj, BoxedString* attr, STOLEN(Box*) attr_val) __attribute__((noinline));
extern "C" void delattr(Box* obj, BoxedString* attr) __attribute__((noinline));
extern "C" void delattrGeneric(Box* obj, BoxedString* attr, DelattrRewriteArgs* rewrite_args);
......
......@@ -3515,14 +3515,6 @@ extern "C" PyObject* PyObject_Init(PyObject* op, PyTypeObject* tp) noexcept {
*PyObject_GET_WEAKREFS_LISTPTR(op) = NULL;
}
// I think CPython defers the dict creation (equivalent of our initUserAttrs) to the
// first time that an attribute gets set.
// Our HCAttrs object already includes this optimization of no-allocation-if-empty,
// but it's nice to initialize the hcls here so we don't have to check it on every getattr/setattr.
// TODO It does mean that anything not defering to this function will have to call
// initUserAttrs themselves, though.
// initUserAttrs(op, tp);
#ifndef NDEBUG
if (tp->tp_flags & Py_TPFLAGS_HEAPTYPE) {
BoxedHeapClass* heap_cls = static_cast<BoxedHeapClass*>(tp);
......@@ -4623,46 +4615,6 @@ void setupRuntime() {
setupSysEnd();
TRACK_ALLOCATIONS = true;
#if 0
Box* l = NULL;
for (int i = 0; i < 1000000000; i++) {
//if (i % 10000 == 0) {
//Py_XDECREF(l);
//l = PyList_New(0);
//}
//PyList_Append(l, autoDecref(boxInt(i)));
autoDecref(boxInt(i));
}
Py_XDECREF(l);
// XXX
PyGC_Collect(); // To make sure it creates any static objects
IN_SHUTDOWN = true;
PyType_ClearCache();
PyOS_FiniInterrupts();
_PyUnicode_Fini();
for (auto b : constants) {
Py_DECREF(b);
}
// May need to run multiple collections to collect everything:
while (PyGC_Collect())
;
_Py_ReleaseInternedStrings();
for (auto b : classes) {
if (!PyObject_IS_GC(b)) {
b->clearAttrs();
Py_CLEAR(b->tp_mro);
}
Py_DECREF(b);
}
// May need to run multiple collections to collect everything:
while (PyGC_Collect())
;
PRINT_TOTAL_REFS();
exit(0);
// XXX
#endif
}
BORROWED(BoxedModule*) createModule(BoxedString* name, const char* fn, const char* doc) noexcept {
......@@ -4763,7 +4715,7 @@ extern "C" void Py_Finalize() noexcept {
PyType_ClearCache();
PyOS_FiniInterrupts();
_PyCodecRegistry_Deinit();
// TODO: we might have to do this in a loop:
_PyUnicode_Fini();
PyInterpreterState_Clear(PyThreadState_GET()->interp);
......@@ -4805,16 +4757,13 @@ extern "C" void Py_Finalize() noexcept {
#endif
// PyGC_Collect());
// PyImport_Cleanup();
// _PyImport_Fini();
// _PyExc_Fini();
// CPython's implementation:
#if 0
_PyExc_Fini();
// _PyGILState_Fini();
_PyGILState_Fini();
#if 0
/* Delete current thread */
PyThreadState_Swap(NULL);
PyInterpreterState_Delete(interp);
......
......@@ -365,6 +365,8 @@ static_assert(offsetof(pyston::BoxedHeapClass, as_sequence) == offsetof(PyHeapTy
static_assert(offsetof(pyston::BoxedHeapClass, as_buffer) == offsetof(PyHeapTypeObject, as_buffer), "");
static_assert(sizeof(pyston::BoxedHeapClass) == sizeof(PyHeapTypeObject), "");
// DecrefHandle: a simple RAII-style class that [x]decref's its argument when it is destructed.
// Usually shouldn't be used on its own, it is the base of autoDecref() and AUTO_DECREF.
template <typename B, bool Nullable = false> struct DecrefHandle {
private:
B* b;
......@@ -397,12 +399,17 @@ public:
Py_DECREF(old_b);
}
};
// autoDecref(): use this on a subexpression that needs to have a ref removed. For example:
// unboxInt(autoDecref(boxInt(5)))
template <typename B, bool Nullable = false> DecrefHandle<B, Nullable> autoDecref(B* b) {
return DecrefHandle<B, Nullable>(b);
}
template <typename B> DecrefHandle<B, true> autoXDecref(B* b) {
return DecrefHandle<B, true>(b);
}
// AUTO_DECREF: A block-scoped decref handle, that will decref its argument when the block is done.
// It captures the argument by value.
#define AUTO_DECREF(x) DecrefHandle<Box, false> CAT(_autodecref_, __LINE__)((x))
#define AUTO_XDECREF(x) DecrefHandle<Box, true> CAT(_autodecref_, __LINE__)((x))
......@@ -484,6 +491,10 @@ template <ExceptionStyle S, typename Functor> Box* callCXXFromStyle(Functor f) {
return f();
}
// Uncoment this to disable the int freelist, which can make debugging eassier.
// The freelist complicates things since it overwrites class pointers, doesn't free memory when
// it is freed by the application, etc.
// TODO we should have a flag like this to disable all freelists, not just the int one.
//#define DISABLE_INT_FREELIST
extern "C" int PyInt_ClearFreeList() noexcept;
......@@ -1482,16 +1493,9 @@ inline BORROWED(BoxedString*) getStaticString(llvm::StringRef s) {
extern "C" volatile int _pendingcalls_to_do;
inline BORROWED(Box*) Box::getattrString(const char* attr) {
// XXX need to auto-decref
BoxedString* s = internStringMortal(attr);
try {
Box* r = getattr<NOT_REWRITABLE>(s, NULL);
Py_DECREF(s);
return r;
} catch (ExcInfo e) {
Py_DECREF(s);
throw e;
}
AUTO_DECREF(s);
return getattr<NOT_REWRITABLE>(s, NULL);
}
inline void ExcInfo::clear() {
......
......@@ -235,3 +235,14 @@ for x in data:
arg2=y)))
except Exception as e:
print(e.message)
for b in range(26):
try:
print int('123', b)
except ValueError as e:
print e
try:
print int(u'123', b)
except ValueError as e:
print e
# TODO: move this back to intmethods.py once this is working
for b in range(26):
try:
print int('123', b)
except ValueError as e:
print e
try:
print int(u'123', b)
except ValueError as e:
print e
# skip-if: '-n' not in EXTRA_JIT_ARGS and '-O' not in EXTRA_JIT_ARGS
# This test currently fails in the interpreter because we keep dead vreg entries around until the frame exists,
# which causes D() to get destroyed too late
......
......@@ -215,9 +215,6 @@ def get_test_options(fn, check_stats, run_memcheck):
elif l.startswith("# no-collect-stats"):
opts.collect_stats = False
if opts.expected == "reffail":
opts.expected = "fail"
if not opts.skip:
# consider other reasons for skipping file
if SKIP_FAILING_TESTS and opts.expected == 'fail':
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment