Commit 183aa5f7 authored by Kevin Modzelewski's avatar Kevin Modzelewski

Merge pull request #1166 from kmod/undo_changes

Undo a bunch of Pyston changes to cpython code
parents d36b99b0 865898f5
......@@ -23,9 +23,7 @@ __all__ = ["Hashable", "Iterable", "Iterator",
def _hasattr(C, attr):
try:
# Pyston temporary workaround: make this a list comprehension instead of generator comprehension,
# since any() can exit without exhausting the iterable.
return any([attr in B.__dict__ for B in C.__mro__])
return any(attr in B.__dict__ for B in C.__mro__)
except AttributeError:
# Old-style class
return hasattr(C, attr)
......
......@@ -87,10 +87,6 @@ def bisect_left(a, x, lo=0, hi=None):
# Overwrite above definitions with a fast C implementation
try:
# Pyston FIXME: somehow sys.modules['_bisect'] is being bound to 0, which is tripping an assert in importStar.
# import sys
# print '_bisect' in sys.modules
# print sys.modules['_bisect']
from _bisect import *
except ImportError:
pass
......@@ -14,11 +14,7 @@ import __builtin__, sys
try:
from _codecs import *
except ImportError, why:
# Pyston change: for now, let the ImportError propagate instead of
# converting to a SystemError. This leads to better handling by
# the importer
raise
# raise SystemError('Failed to load the builtin codecs: %s' % why)
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
......
__all__ = ['Counter', 'deque', 'defaultdict', 'namedtuple', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
# Pyston change: disable using the _abcoll module for now.
from _abcoll import *
import _abcoll
# __all__ += _abcoll.__all__
__all__ += _abcoll.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter, eq as _eq
......@@ -124,34 +123,7 @@ class OrderedDict(dict):
for k in self:
yield (k, self[k])
# Pyston change: copied the code in from _abcoll rather than calling "update = MutableMapping.update"
def update(*args, **kwds):
''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k, v in F.items(): D[k] = v
'''
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
# Pyston change: changed this from "Mapping" to "dict"
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
update = MutableMapping.update
__update = update # let subclasses override update without breaking __init__
......@@ -407,6 +379,7 @@ def namedtuple(typename, field_names, verbose=False, rename=False):
return result
########################################################################
### Counter
########################################################################
......
......@@ -223,8 +223,7 @@ class TestABC(unittest.TestCase):
C().f()
del C
test_support.gc_collect()
# Pyston change: disable it for now.
# self.assertEqual(r(), None)
self.assertEqual(r(), None)
def test_main():
test_support.run_unittest(TestABC)
......
......@@ -765,8 +765,6 @@ class BaseTest(unittest.TestCase):
b = buffer(a)
self.assertEqual(b[0], a.tostring()[0])
# Pyston change: disable this test because of our GC
@unittest.skip("Pyston" in sys.version)
def test_weakref(self):
s = array.array(self.typecode, self.example)
p = proxy(s)
......
......@@ -433,15 +433,12 @@ class ComplexTest(unittest.TestCase):
test_values = (1, 123.0, 10-19j, xcomplex(1+2j),
xcomplex(1+87j), xcomplex(10+90j))
# Pyston change: if rhs is a subclass of lhs, then should try
# reverse the order. Pyston don't support it yet. Need to improve
# binop handing.
for op in infix_binops:
for x in xcomplex_values:
for y in test_values:
a = 'x %s y' % op
b = 'y %s x' % op
# self.assertTrue(type(eval(a)) is type(eval(b)) is xcomplex)
self.assertTrue(type(eval(a)) is type(eval(b)) is xcomplex)
def test_hash(self):
for x in xrange(-30, 30):
......
......@@ -29,8 +29,6 @@ class AutoFileTests(unittest.TestCase):
self.f.close()
os.remove(TESTFN)
# Pyston change: disabled
@unittest.skip("this depends on refcounting")
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
......
......@@ -28,8 +28,6 @@ class AutoFileTests(unittest.TestCase):
self.f.close()
os.remove(TESTFN)
# Pyston change: disable this test becasue of GC
@unittest.skip("only works with refcounting")
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
......
......@@ -119,10 +119,7 @@ class MmapTests(unittest.TestCase):
def test_access_parameter(self):
# Test for "access" keyword parameter
mapsize = 10
# Pyston change: use a with statement to not rely on the destructor being called:
# open(TESTFN, "wb").write("a"*mapsize)
with open(TESTFN, "wb") as f:
f.write("a"*mapsize)
open(TESTFN, "wb").write("a"*mapsize)
f = open(TESTFN, "rb")
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_READ)
self.assertEqual(m[:], 'a'*mapsize, "Readonly memory map data incorrect.")
......@@ -541,10 +538,7 @@ class MmapTests(unittest.TestCase):
@unittest.skipUnless(hasattr(mmap, 'PROT_READ'), "needs mmap.PROT_READ")
def test_prot_readonly(self):
mapsize = 10
# Pyston change: use a with statement to not rely on the destructor being called:
# open(TESTFN, "wb").write("a"*mapsize)
with open(TESTFN, "wb") as f:
f.write("a"*mapsize)
open(TESTFN, "wb").write("a"*mapsize)
f = open(TESTFN, "rb")
m = mmap.mmap(f.fileno(), mapsize, prot=mmap.PROT_READ)
self.assertRaises(TypeError, m.write, "foo")
......@@ -556,10 +550,7 @@ class MmapTests(unittest.TestCase):
def test_io_methods(self):
data = "0123456789"
# Pyston change: use a with statement to not rely on the destructor being called:
# open(TESTFN, "wb").write("x"*len(data))
with open(TESTFN, "wb") as f:
f.write("x"*len(data))
open(TESTFN, "wb").write("x"*len(data))
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), len(data))
f.close()
......@@ -626,10 +617,7 @@ class MmapTests(unittest.TestCase):
m.close()
# Should not crash (Issue 5385)
# Pyston change: use a with statement to not rely on the destructor being called:
# open(TESTFN, "wb").write("x"*10)
with open(TESTFN, "wb") as f:
f.write("x"*10)
open(TESTFN, "wb").write("x"*10)
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), 0)
f.close()
......
......@@ -337,9 +337,7 @@ class TestJointOps(unittest.TestCase):
obj.x = iter(container)
del obj, container
gc.collect()
# Pyston change: because with conservative scanning
# it is hard to guarantee finalizer calls
# self.assertTrue(ref() is None, "Cycle was not collected")
self.assertTrue(ref() is None, "Cycle was not collected")
class TestSet(TestJointOps):
thetype = set
......@@ -560,9 +558,7 @@ class TestSet(TestJointOps):
p = weakref.proxy(s)
self.assertEqual(str(p), str(s))
s = None
# Pyston change: because with conservative scanning
# it is hard to guarantee finalizer calls
# self.assertRaises(ReferenceError, str, p)
self.assertRaises(ReferenceError, str, p)
@unittest.skipUnless(hasattr(set, "test_c_api"),
'C API test only available in a debug build')
......
......@@ -79,8 +79,7 @@ class InterProcessSignalTests(unittest.TestCase):
# don't worry about re-setting the default handlers.
signal.signal(signal.SIGHUP, self.handlerA)
signal.signal(signal.SIGUSR1, self.handlerB)
# Pyston change: pyston uses SIGUSR2 internally
# signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGALRM, signal.default_int_handler)
# Variables the signals will modify:
......@@ -118,10 +117,9 @@ class InterProcessSignalTests(unittest.TestCase):
print "HandlerBCalled exception caught"
# Pyston change: pyston uses SIGUSR2 internally
# child = ignoring_eintr(subprocess.Popen, ['kill', '-USR2', str(pid)])
# if child:
# self.wait(child) # Nothing should happen.
child = ignoring_eintr(subprocess.Popen, ['kill', '-USR2', str(pid)])
if child:
self.wait(child) # Nothing should happen.
try:
signal.alarm(1)
......
......@@ -71,16 +71,13 @@ try:
except TypeError:
tb = sys.exc_info()[2]
TracebackType = type(tb)
# Pyston change (we don't support tb_frame yet):
FrameType = type(sys._getframe(0))
# FrameType = type(tb.tb_frame)
FrameType = type(tb.tb_frame)
del tb
SliceType = slice
EllipsisType = type(Ellipsis)
# Pyston change: don't support this yet
# DictProxyType = type(TypeType.__dict__)
DictProxyType = type(TypeType.__dict__)
NotImplementedType = type(NotImplemented)
# Pyston change:
......
......@@ -139,10 +139,7 @@ class TestLoader(object):
hasattr(getattr(testCaseClass, attrname), '__call__')
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
# Pyston change:
# TODO(rntz): needs builtin `cmp` to work
#testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
testFnNames.sort()
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
......
......@@ -153,18 +153,15 @@ class TestResult(object):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
# Pyston change: I've commented this out for now. - rntz
# TODO(rntz): needs traceback stuff to work
# while tb and self._is_relevant_tb_level(tb):
# tb = tb.tb_next
# if exctype is test.failureException:
# # Skip assert*() traceback levels
# length = self._count_relevant_tb_levels(tb)
# msgLines = traceback.format_exception(exctype, value, tb, length)
# else:
# msgLines = traceback.format_exception(exctype, value, tb)
msgLines = traceback.format_exception(exctype, value, tb)
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
......
......@@ -183,12 +183,6 @@ def warn(message, category=None, stacklevel=1):
assert issubclass(category, Warning)
# Get context information
try:
# Pyston change: manually skip the call to _getframe.
# A ValueError() is supposed to specify that the "depth" argument is greater
# than the stack level, so it doesn't seem appropriate for us to throw as
# a signal that it's unimplemented.
raise ValueError()
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
......
......@@ -49,18 +49,9 @@ class WeakValueDictionary(UserDict.UserDict):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr)
self._pending_removals.append(wr.key)
else:
# Pyston change: adopted this pypy fix:
#
# Changed this for PyPy: made more resistent. The
# issue is that in some corner cases, self.data
# might already be changed or removed by the time
# this weakref's callback is called. If that is
# the case, we don't want to randomly kill an
# unrelated entry.
if self.data.get(wr.key) is wr:
del self.data[wr.key]
del self.data[wr.key]
self._remove = remove
# A list of keys to be removed
self._pending_removals = []
......@@ -73,9 +64,7 @@ class WeakValueDictionary(UserDict.UserDict):
# We shouldn't encounter any KeyError, because this method should
# always be called *before* mutating the dict.
while l:
wr = l.pop()
if d.get(wr.key) is wr:
del d[wr.key]
del d[l.pop()]
def __getitem__(self, key):
o = self.data[key]()
......@@ -291,32 +280,14 @@ class WeakKeyDictionary(UserDict.UserDict):
"""
def __init__(self, dict=None):
# Pyston change:
# This implementation of WeakKeyDictionary originally relied on quick destruction
# of the weakref key objects and the immediate calling of their callback. With a gc,
# there can be multiple key removals before a collection happens, at which point we
# call remove() with keys that are not the most recent version.
#
# The approach here is to check the key in the dict to make sure it is still the same.
# This is a little bit complicated since 1) if the weakref.ref's referent gets freed,
# the ref object is no longer usable as a hash key, and 2) setting a value in a dict
# when the key already exists will not update the key.
#
# So in __setitem__, remove the existing key and replace it with the new one.
# Since there's no way to query for the current key inside a dict, given a lookup key,
# we keep a separate "refs" dict to look it up.
self.data = {}
self.refs = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
assert len(self.data) == len(self.refs)
if self._iterating:
self._pending_removals.append(k)
else:
if self.refs.get(k) is k:
del self.data[k]
del self.refs[k]
del self.data[k]
self._remove = remove
# A list of dead weakrefs (keys to be removed)
self._pending_removals = []
......@@ -331,20 +302,14 @@ class WeakKeyDictionary(UserDict.UserDict):
# However, it means keys may already have been removed.
l = self._pending_removals
d = self.data
r = self.refs
while l:
try:
k = l.pop()
if self.refs.get(k) is k:
del d[k]
del r[k]
del d[l.pop()]
except KeyError:
pass
def __delitem__(self, key):
r = ref(key)
del self.data[r]
del self.refs[r]
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
......@@ -353,11 +318,7 @@ class WeakKeyDictionary(UserDict.UserDict):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
r = ref(key, self._remove)
self.data.pop(r, None)
self.refs.pop(r, None)
self.data[r] = value
self.refs[r] = r
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
......@@ -460,28 +421,23 @@ class WeakKeyDictionary(UserDict.UserDict):
def popitem(self):
while 1:
_, key = self.refs.popitem()
value = self.data.pop(key)
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
r = ref(key)
self.refs.pop(r, None)
return self.data.pop(r, *args)
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return default
return self[key]
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
self[key] = value
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
......@@ -55,9 +55,7 @@ typedef struct BLOCK {
struct BLOCK *leftlink;
} block;
// Pyston change: disable free block cache
// #define MAXFREEBLOCKS 10
#define MAXFREEBLOCKS 0
#define MAXFREEBLOCKS 10
static Py_ssize_t numfreeblocks = 0;
static block *freeblocks[MAXFREEBLOCKS];
......
......@@ -1291,9 +1291,7 @@ s_get(void *ptr, Py_ssize_t size)
*/
slen = strlen(PyString_AS_STRING(result));
size = min(size, (Py_ssize_t)slen);
// Pyston change: no ob_refcnt
if (0 /*result->ob_refcnt == 1*/) {
if (result->ob_refcnt == 1) {
/* shorten the result */
_PyString_Resize(&result, size);
return result;
......
......@@ -1791,9 +1791,8 @@ treebuilder_handle_data(TreeBuilderObject* self, PyObject* data)
Py_INCREF(data); self->data = data;
} else {
/* more than one item; use a list to collect items */
// Pyston change: Py_REFCNT(self->data) -> 2
if (PyString_CheckExact(self->data) && /*Py_REFCNT(self->data)*/2 == 1 &&
PyString_CheckExact(data) && PyString_GET_SIZE(data) == 1) {
if (PyString_CheckExact(self->data) && Py_REFCNT(self->data) == 1 &&
PyString_CheckExact(data) && PyString_GET_SIZE(data) == 1) {
/* expat often generates single character data sections; handle
the most common case by resizing the existing string... */
Py_ssize_t size = PyString_GET_SIZE(self->data);
......
......@@ -35,8 +35,7 @@ functools_reduce(PyObject *self, PyObject *args)
for (;;) {
PyObject *op2;
// Pyston change: can't do these sorts of refcount optimizations:
if (1 /*args->ob_refcnt > 1*/) {
if (args->ob_refcnt > 1) {
Py_DECREF(args);
if ((args = PyTuple_New(2)) == NULL)
goto Fail;
......
......@@ -257,9 +257,7 @@ static void
data_stack_dealloc(SRE_STATE* state)
{
if (state->data_stack) {
// Pyston change: use malloc
// PyMem_FREE(state->data_stack);
free(state->data_stack);
PyMem_FREE(state->data_stack);
state->data_stack = NULL;
}
state->data_stack_size = state->data_stack_base = 0;
......@@ -275,9 +273,7 @@ data_stack_grow(SRE_STATE* state, Py_ssize_t size)
void* stack;
cursize = minsize+minsize/4+1024;
TRACE(("allocate/grow stack %" PY_FORMAT_SIZE_T "d\n", cursize));
// Pyston change: use malloc
// stack = PyMem_REALLOC(state->data_stack, cursize);
stack = realloc(state->data_stack, cursize);
stack = PyMem_REALLOC(state->data_stack, cursize);
if (!stack) {
data_stack_dealloc(state);
return SRE_ERROR_MEMORY;
......@@ -1180,9 +1176,7 @@ entrance:
ctx->pattern[1], ctx->pattern[2]));
/* install new repeat context */
// Pyston change: use malloc
// ctx->u.rep = (SRE_REPEAT*) PyObject_MALLOC(sizeof(*ctx->u.rep));
ctx->u.rep = (SRE_REPEAT*) malloc(sizeof(*ctx->u.rep));
ctx->u.rep = (SRE_REPEAT*) PyObject_MALLOC(sizeof(*ctx->u.rep));
if (!ctx->u.rep) {
PyErr_NoMemory();
RETURN_FAILURE;
......@@ -1196,9 +1190,7 @@ entrance:
state->ptr = ctx->ptr;
DO_JUMP(JUMP_REPEAT, jump_repeat, ctx->pattern+ctx->pattern[0]);
state->repeat = ctx->u.rep->prev;
// Pyston change: use malloc
// PyObject_FREE(ctx->u.rep);
free(ctx->u.rep);
PyObject_FREE(ctx->u.rep);
if (ret) {
RETURN_ON_ERROR(ret);
......
......@@ -5955,6 +5955,7 @@ init_stuff(PyObject *module_dict)
if (!( t=PyImport_ImportModule("__builtin__"))) return -1;
if (PyDict_SetItemString(module_dict, "__builtins__", t) < 0)
return -1;
// Pyston change: I think you need this
Py_DECREF(t);
if (!( t=PyDict_New())) return -1;
......
......@@ -405,7 +405,7 @@ static void
teedataobject_safe_decref(PyObject *obj)
{
while (obj && Py_TYPE(obj) == &teedataobject_type &&
2 /*Pyston change, was: Py_REFCNT(obj)*/ == 1) {
Py_REFCNT(obj) == 1) {
PyObject *nextlink = ((teedataobject *)obj)->nextlink;
((teedataobject *)obj)->nextlink = NULL;
Py_DECREF(obj);
......@@ -1942,7 +1942,7 @@ product_next(productobject *lz)
Py_ssize_t *indices = lz->indices;
/* Copy the previous result tuple or re-use it if available */
if (2 /*Pyston change, was: Py_REFCNT(result)*/ > 1) {
if (Py_REFCNT(result) > 1) {
PyObject *old_result = result;
result = PyTuple_New(npools);
if (result == NULL)
......@@ -1956,8 +1956,7 @@ product_next(productobject *lz)
Py_DECREF(old_result);
}
/* Now, we've got the only copy so we can update it in-place */
// Pyston change: safe to comment this out since we will always create a new tuple:
// assert (npools==0 || Py_REFCNT(result) == 1);
assert (npools==0 || Py_REFCNT(result) == 1);
/* Update the pool indices right-to-left. Only advance to the
next pool when the previous one rolls-over */
......@@ -2171,7 +2170,7 @@ combinations_next(combinationsobject *co)
}
} else {
/* Copy the previous result tuple or re-use it if available */
if (2 /*Pyston change, was: Py_REFCNT(result)*/ > 1) {
if (Py_REFCNT(result) > 1) {
PyObject *old_result = result;
result = PyTuple_New(r);
if (result == NULL)
......@@ -2188,7 +2187,7 @@ combinations_next(combinationsobject *co)
* CPython's empty tuple is a singleton and cached in
* PyTuple's freelist.
*/
assert(r == 0 || 2 /*Pyston change, was: Py_REFCNT(result)*/ == 1);
assert(r == 0 || Py_REFCNT(result) == 1);
/* Scan indices right-to-left until finding one that is not
at its maximum (i + n - r). */
......@@ -2420,7 +2419,7 @@ cwr_next(cwrobject *co)
}
} else {
/* Copy the previous result tuple or re-use it if available */
if (2 /*Pyston change, was: Py_REFCNT(result)*/ > 1) {
if (Py_REFCNT(result) > 1) {
PyObject *old_result = result;
result = PyTuple_New(r);
if (result == NULL)
......@@ -2435,7 +2434,7 @@ cwr_next(cwrobject *co)
}
/* Now, we've got the only copy so we can update it in-place CPython's
empty tuple is a singleton and cached in PyTuple's freelist. */
assert(r == 0 || 2 /*Pyston change, was: Py_REFCNT(result)*/ == 1);
assert(r == 0 || Py_REFCNT(result) == 1);
/* Scan indices right-to-left until finding one that is not
* at its maximum (n-1). */
......@@ -2683,7 +2682,7 @@ permutations_next(permutationsobject *po)
goto empty;
/* Copy the previous result tuple or re-use it if available */
if (2 /*Pyston change, was: Py_REFCNT(result)*/ > 1) {
if (Py_REFCNT(result) > 1) {
PyObject *old_result = result;
result = PyTuple_New(r);
if (result == NULL)
......@@ -2697,7 +2696,7 @@ permutations_next(permutationsobject *po)
Py_DECREF(old_result);
}
/* Now, we've got the only copy so we can update it in-place */
assert(r == 0 || 2 /*Pyston change, was: Py_REFCNT(result)*/ == 1);
assert(r == 0 || Py_REFCNT(result) == 1);
/* Decrement rightmost cycle, moving leftward upon zero rollover */
for (i=r-1 ; i>=0 ; i--) {
......@@ -3584,7 +3583,7 @@ izip_next(izipobject *lz)
if (tuplesize == 0)
return NULL;
if (2 /*Pyston change, was: Py_REFCNT(result)*/ == 1) {
if (Py_REFCNT(result) == 1) {
Py_INCREF(result);
for (i=0 ; i < tuplesize ; i++) {
it = PyTuple_GET_ITEM(lz->ittuple, i);
......@@ -3929,7 +3928,7 @@ izip_longest_next(iziplongestobject *lz)
return NULL;
if (lz->numactive == 0)
return NULL;
if (2 /*Pyston change, was: Py_REFCNT(result)*/ == 1) {
if (Py_REFCNT(result) == 1) {
Py_INCREF(result);
for (i=0 ; i < tuplesize ; i++) {
it = PyTuple_GET_ITEM(lz->ittuple, i);
......@@ -4081,34 +4080,6 @@ static PyMethodDef module_methods[] = {
{NULL, NULL} /* sentinel */
};
// Pyston change: These are types defined in this file that I manually
// checked. The ones that aren't commented out have a `tp_dealloc` that
// doesn't do anything in Pyston as we switched to garbage collection and
// the finalizer logic in Pyston wants to know that for optimization purposes.
PyTypeObject* Itertool_SafeDealloc_Types[] = {
// &combinations_type,
// &cwr_type,
&cycle_type,
&dropwhile_type,
&takewhile_type,
&islice_type,
&starmap_type,
&imap_type,
&chain_type,
&compress_type,
&ifilter_type,
&ifilterfalse_type,
&count_type,
&izip_type,
&iziplongest_type,
// &permutations_type,
// &product_type,
&repeat_type,
&groupby_type,
NULL
};
PyMODINIT_FUNC
inititertools(void)
{
......
......@@ -429,9 +429,7 @@ close_the_file(PyFileObject *f)
if (local_fp != NULL) {
local_close = f->f_close;
if (local_close != NULL && f->unlocked_count > 0) {
// Pyston change:
// if (f->ob_refcnt > 0) {
if (/*f->ob_refcnt*/ 2 > 0) {
if (f->ob_refcnt > 0) {
PyErr_SetString(PyExc_IOError,
"close() called during concurrent "
"operation on the same file object.");
......@@ -1600,9 +1598,7 @@ PyFile_GetLine(PyObject *f, int n)
"EOF when reading a line");
}
else if (s[len-1] == '\n') {
// Pyston change:
// if (result->ob_refcnt == 1) {
if (/*result->ob_refcnt*/ 2 == 1) {
if (result->ob_refcnt == 1) {
if (_PyString_Resize(&result, len-1))
return NULL;
}
......@@ -1625,9 +1621,7 @@ PyFile_GetLine(PyObject *f, int n)
"EOF when reading a line");
}
else if (s[len-1] == '\n') {
// Pyston change:
// if (result->ob_refcnt == 1)
if (/*result->ob_refcnt*/ 2 == 1)
if (result->ob_refcnt == 1)
PyUnicode_Resize(&result, len-1);
else {
PyObject *v;
......
......@@ -53,8 +53,7 @@ OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
/* Limit for the Unicode object free list */
// Pyston change: set this to 0 (was 1024) to disable the free list since we can't track that through our GC.
#define PyUnicode_MAXFREELIST 0
#define PyUnicode_MAXFREELIST 1024
/* Limit for the Unicode object free list stay alive optimization.
......@@ -356,7 +355,7 @@ int _PyUnicode_Resize(PyUnicodeObject **unicode, Py_ssize_t length)
return -1;
}
v = *unicode;
if (v == NULL || !PyUnicode_Check(v) || /* Pyston change, can't check this: Py_REFCNT(v) != 1 || */ length < 0) {
if (v == NULL || !PyUnicode_Check(v) || Py_REFCNT(v) != 1 || length < 0) {
PyErr_BadInternalCall();
return -1;
}
......
......@@ -124,12 +124,8 @@
#include "float.h"
// Pyston change: use normal malloc allocator because it's faster and we can't use the GC
// because the custom memory managment functions inside this file are not compatible with it.
// #define MALLOC PyMem_Malloc
// #define FREE PyMem_Free
#define MALLOC malloc
#define FREE free
#define MALLOC PyMem_Malloc
#define FREE PyMem_Free
/* This code should also work for ARM mixed-endian format on little-endian
machines, where doubles have byte order 45670123 (in increasing address
......
......@@ -440,7 +440,6 @@ static char* sys_files[] = {
/* Un-initialize things, as good as we can */
// Pyston change: we don't support calling cleanup currently
void
PyImport_Cleanup(void)
{
......
......@@ -130,8 +130,7 @@ block_alloc(block *b, size_t size)
PyArena *
PyArena_New()
{
// Pyston change: conservatively allocate the PyArena metadata object
PyArena* arena = (PyArena *)PyMem_Malloc(sizeof(PyArena));
PyArena* arena = (PyArena *)malloc(sizeof(PyArena));
if (!arena)
return (PyArena*)PyErr_NoMemory();
......@@ -177,8 +176,7 @@ PyArena_Free(PyArena *arena)
*/
Py_DECREF(arena->a_objects);
// Pyston change:
PyMem_Free(arena);
free(arena);
}
void *
......
......@@ -1359,10 +1359,6 @@ extern "C" PyOS_sighandler_t PyOS_getsig(int sig) noexcept {
}
extern "C" PyOS_sighandler_t PyOS_setsig(int sig, PyOS_sighandler_t handler) noexcept {
if (sig == SIGUSR2) {
Py_FatalError("SIGUSR2 is reserved for Pyston internal use");
}
#ifdef HAVE_SIGACTION
/* Some code in Modules/signalmodule.c depends on sigaction() being
* used here if HAVE_SIGACTION is defined. Fix that if this code
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment