Commit a8ed2766 authored by Tim Peters's avatar Tim Peters

Convert some XXXs. More to come.

parent cf6937ca
...@@ -813,8 +813,7 @@ earlier names: ...@@ -813,8 +813,7 @@ earlier names:
The in-memory, per-connection object cache (pickle cache) was changed The in-memory, per-connection object cache (pickle cache) was changed
to participate in garbage collection. This should reduce the number to participate in garbage collection. This should reduce the number
of memory leaks, although we are still tracking a few problems. [XXX of memory leaks, although we are still tracking a few problems.
might say more here]
Multi-version concurrency control Multi-version concurrency control
--------------------------------- ---------------------------------
...@@ -1362,7 +1361,7 @@ The ZEO cache verification protocol was revised to require many fewer ...@@ -1362,7 +1361,7 @@ The ZEO cache verification protocol was revised to require many fewer
messages in cases where a client or server restarts quickly. messages in cases where a client or server restarts quickly.
The performance of full cache verification has improved dramatically. The performance of full cache verification has improved dramatically.
XXX Get measurements from Jim -- somewhere in 2x-5x recall. The Measurements from Jim were somewhere in 2x-5x. The
implementation was fixed to use the very-fast getSerial() method on implementation was fixed to use the very-fast getSerial() method on
the storage instead of the comparatively slow load(). the storage instead of the comparatively slow load().
...@@ -1720,7 +1719,7 @@ The ZEO version number was bumped to 2.0.2 on account of the following ...@@ -1720,7 +1719,7 @@ The ZEO version number was bumped to 2.0.2 on account of the following
minor feature additions. minor feature additions.
The performance of full cache verification has improved dramatically. The performance of full cache verification has improved dramatically.
XXX Get measurements from Jim -- somewhere in 2x-5x recall. The Measurements from Jim were somewhere in 2x-5x. The
implementation was fixed to use the very-fast getSerial() method on implementation was fixed to use the very-fast getSerial() method on
the storage instead of the comparatively slow load(). the storage instead of the comparatively slow load().
......
...@@ -96,7 +96,7 @@ type VALUE_TYPE. The macro returns an int, with value ...@@ -96,7 +96,7 @@ type VALUE_TYPE. The macro returns an int, with value
< 0 if X < Y < 0 if X < Y
== 0 if X == Y == 0 if X == Y
> 0 if X > Y > 0 if X > Y
XXX There is no provision for determining whether the comparison Bug: There is no provision for determining whether the comparison
attempt failed (set a Python exception). attempt failed (set a Python exception).
DECREF_VALUE(K) DECREF_VALUE(K)
......
...@@ -79,9 +79,9 @@ merge_error(int p1, int p2, int p3, int reason) ...@@ -79,9 +79,9 @@ merge_error(int p1, int p2, int p3, int reason)
* Mapping value modification: s2 or s3 can modify the value associated * Mapping value modification: s2 or s3 can modify the value associated
* with a key in s1, provided the other transaction doesn't make a * with a key in s1, provided the other transaction doesn't make a
* modification of the same key to a different value. It's OK if s2 and s3 * modification of the same key to a different value. It's OK if s2 and s3
* both give the same new value to the key (XXX while it's hard to be * both give the same new value to the key while it's hard to be precise about
* precise about why, this doesn't seem consistent with that it's *not* OK * why, this doesn't seem consistent with that it's *not* OK for both to add
* for both to add a new key mapping to the same value). * a new key mapping to the same value).
*/ */
static PyObject * static PyObject *
bucket_merge(Bucket *s1, Bucket *s2, Bucket *s3) bucket_merge(Bucket *s1, Bucket *s2, Bucket *s3)
...@@ -118,7 +118,7 @@ bucket_merge(Bucket *s1, Bucket *s2, Bucket *s3) ...@@ -118,7 +118,7 @@ bucket_merge(Bucket *s1, Bucket *s2, Bucket *s3)
/* Consult zodb/btrees/interfaces.py for the meaning of the last /* Consult zodb/btrees/interfaces.py for the meaning of the last
* argument passed to merge_error(). * argument passed to merge_error().
*/ */
/* XXX This isn't passing on errors raised by value comparisons. */ /* TODO: This isn't passing on errors raised by value comparisons. */
while (i1.position >= 0 && i2.position >= 0 && i3.position >= 0) while (i1.position >= 0 && i2.position >= 0 && i3.position >= 0)
{ {
TEST_KEY_SET_OR(cmp12, i1.key, i2.key) goto err; TEST_KEY_SET_OR(cmp12, i1.key, i2.key) goto err;
......
...@@ -39,12 +39,12 @@ class Base(TestCase): ...@@ -39,12 +39,12 @@ class Base(TestCase):
def _getRoot(self): def _getRoot(self):
if self.db is None: if self.db is None:
# XXX On the next line, the ZODB4 flavor of this routine # Unclear: On the next line, the ZODB4 flavor of this routine
# XXX passes a cache_size argument: # [asses a cache_size argument:
# self.db = DB(MappingStorage(), cache_size=1) # self.db = DB(MappingStorage(), cache_size=1)
# XXX If that's done here, though, testLoadAndStore() and # If that's done here, though, testLoadAndStore() and
# XXX testGhostUnghost() both nail the CPU and seemingly # testGhostUnghost() both nail the CPU and seemingly
# XXX never finish. # never finish.
self.db = DB(MappingStorage()) self.db = DB(MappingStorage())
return self.db.open().root() return self.db.open().root()
...@@ -138,9 +138,9 @@ class MappingBase(Base): ...@@ -138,9 +138,9 @@ class MappingBase(Base):
for i in range(1000): for i in range(1000):
self.t[i] = i self.t[i] = i
r = repr(self.t) r = repr(self.t)
# make sure the repr is 10000 bytes long for a bucket # Make sure the repr is 10000 bytes long for a bucket.
# XXX since we the test is also run for btrees, skip the length # But since the test is also run for btrees, skip the length
# XXX check if the repr starts with '<' # check if the repr starts with '<'
if not r.startswith('<'): if not r.startswith('<'):
self.assert_(len(r) > 10000) self.assert_(len(r) > 10000)
......
...@@ -28,10 +28,10 @@ class SubclassTest(unittest.TestCase): ...@@ -28,10 +28,10 @@ class SubclassTest(unittest.TestCase):
# of that type # of that type
t = T() t = T()
# XXX there's no good way to get a bucket at the moment. # There's no good way to get a bucket at the moment.
# XXX __getstate__() is as good as it gets, but the default # __getstate__() is as good as it gets, but the default
# XXX getstate explicitly includes the pickle of the bucket # getstate explicitly includes the pickle of the bucket
# XXX for small trees, so we have to be clever :-( # for small trees, so we have to be clever :-(
# make sure there is more than one bucket in the tree # make sure there is more than one bucket in the tree
for i in range(1000): for i in range(1000):
......
...@@ -216,7 +216,7 @@ class PersistenceTest(unittest.TestCase): ...@@ -216,7 +216,7 @@ class PersistenceTest(unittest.TestCase):
self.assertEqual(obj.curly, 2) self.assertEqual(obj.curly, 2)
self.assertEqual(obj.moe, 3) self.assertEqual(obj.moe, 3)
# XXX Need to decide how __setattr__ and __delattr__ should work, # TODO: Need to decide how __setattr__ and __delattr__ should work,
# then write tests. # then write tests.
......
...@@ -189,5 +189,4 @@ def loop(timeout=30.0, use_poll=0, map=None): ...@@ -189,5 +189,4 @@ def loop(timeout=30.0, use_poll=0, map=None):
##asyncore.loop = deprecated_loop ##asyncore.loop = deprecated_loop
# XXX Remove this once we've updated ZODB4 since they share this package
asyncore.loop = loop asyncore.loop = loop
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
This server acts as a front-end for one or more real storages, like This server acts as a front-end for one or more real storages, like
file storage or Berkeley storage. file storage or Berkeley storage.
XXX Need some basic access control-- a declaration of the methods TODO: Need some basic access control-- a declaration of the methods
exported for invocation by the server. exported for invocation by the server.
""" """
...@@ -47,8 +47,8 @@ from ZODB.loglevels import BLATHER ...@@ -47,8 +47,8 @@ from ZODB.loglevels import BLATHER
logger = logging.getLogger('ZEO.StorageServer') logger = logging.getLogger('ZEO.StorageServer')
# XXX This used to say "ZSS", which is now implied in the logger name, can this # TODO: This used to say "ZSS", which is now implied in the logger name.
# be either set to str(os.getpid()) (if that makes sense) or removed? # Can this be either set to str(os.getpid()) (if that makes sense) or removed?
_label = "" # default label used for logging. _label = "" # default label used for logging.
def set_label(): def set_label():
...@@ -298,7 +298,7 @@ class ZEOStorage: ...@@ -298,7 +298,7 @@ class ZEOStorage:
os = self.storage.getTid(oid) os = self.storage.getTid(oid)
except KeyError: except KeyError:
self.client.invalidateVerify((oid, '')) self.client.invalidateVerify((oid, ''))
# XXX It's not clear what we should do now. The KeyError # It's not clear what we should do now. The KeyError
# could be caused by an object uncreation, in which case # could be caused by an object uncreation, in which case
# invalidation is right. It could be an application bug # invalidation is right. It could be an application bug
# that left a dangling reference, in which case it's bad. # that left a dangling reference, in which case it's bad.
...@@ -564,7 +564,7 @@ class ZEOStorage: ...@@ -564,7 +564,7 @@ class ZEOStorage:
# finishes as a transaction and finds another instance is in the # finishes as a transaction and finds another instance is in the
# _waiting list. # _waiting list.
# XXX It might be better to have a mechanism to explicitly send # It might be better to have a mechanism to explicitly send
# the finishing transaction's reply before restarting the waiting # the finishing transaction's reply before restarting the waiting
# transaction. If the restart takes a long time, the previous # transaction. If the restart takes a long time, the previous
# client will be blocked until it finishes. # client will be blocked until it finishes.
......
...@@ -103,7 +103,6 @@ class StatsClient(asyncore.dispatcher): ...@@ -103,7 +103,6 @@ class StatsClient(asyncore.dispatcher):
return len(self.buf) return len(self.buf)
def readable(self): def readable(self):
# XXX what goes here?
return 0 return 0
def handle_write(self): def handle_write(self):
......
...@@ -52,7 +52,7 @@ def log(msg, level=logging.INFO, exc_info=False): ...@@ -52,7 +52,7 @@ def log(msg, level=logging.INFO, exc_info=False):
def parse_address(arg): def parse_address(arg):
# XXX Not part of the official ZConfig API # Caution: Not part of the official ZConfig API.
obj = ZConfig.datatypes.SocketAddress(arg) obj = ZConfig.datatypes.SocketAddress(arg)
return obj.family, obj.address return obj.family, obj.address
...@@ -203,7 +203,7 @@ class ZEOServer: ...@@ -203,7 +203,7 @@ class ZEOServer:
transaction_timeout=self.options.transaction_timeout, transaction_timeout=self.options.transaction_timeout,
monitor_address=self.options.monitor_address, monitor_address=self.options.monitor_address,
auth_protocol=self.options.auth_protocol, auth_protocol=self.options.auth_protocol,
auth_database=self.options.auth_database, # XXX option spelling auth_database=self.options.auth_database,
auth_realm=self.options.auth_realm) auth_realm=self.options.auth_realm)
def loop_forever(self): def loop_forever(self):
...@@ -223,9 +223,9 @@ class ZEOServer: ...@@ -223,9 +223,9 @@ class ZEOServer:
sys.exit(1) sys.exit(1)
def handle_sigusr2(self): def handle_sigusr2(self):
# XXX this used to reinitialize zLOG. How do I achieve # TODO: this used to reinitialize zLOG. How do I achieve
# the same effect with Python's logging package? # the same effect with Python's logging package?
# Should we restart as with SIGHUP? # Should we restart as with SIGHUP?
log("received SIGUSR2, but it was not handled!", level=logging.WARNING) log("received SIGUSR2, but it was not handled!", level=logging.WARNING)
def close_storages(self): def close_storages(self):
......
...@@ -524,8 +524,8 @@ class InvalidationTests: ...@@ -524,8 +524,8 @@ class InvalidationTests:
db1.close() db1.close()
db2.close() db2.close()
# XXX Temporarily disabled. I know it fails, and there's no point # TODO: Temporarily disabled. I know it fails, and there's no point
# XXX getting an endless number of reports about that. # getting an endless number of reports about that.
def xxxcheckConcurrentUpdatesInVersions(self): def xxxcheckConcurrentUpdatesInVersions(self):
self._storage = storage1 = self.openClientStorage() self._storage = storage1 = self.openClientStorage()
db1 = DB(storage1) db1 = DB(storage1)
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# #
############################################################################## ##############################################################################
"""A multi-client test of the ZEO storage server""" """A multi-client test of the ZEO storage server"""
# XXX This code is currently broken. # TODO: This code is currently broken.
import ZODB, ZODB.DB, ZODB.FileStorage, ZODB.POSException import ZODB, ZODB.DB, ZODB.FileStorage, ZODB.POSException
import persistent import persistent
...@@ -148,7 +148,7 @@ def main(client_func=None): ...@@ -148,7 +148,7 @@ def main(client_func=None):
server.close() server.close()
os.waitpid(server_pid, 0) os.waitpid(server_pid, 0)
# XXX Should check that the results are consistent! # TODO: Should check that the results are consistent!
print "Total time:", t2 - t0 print "Total time:", t2 - t0
print "Server start time", t1 - t0 print "Server start time", t1 - t0
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
The stress test should run in an infinite loop and should involve The stress test should run in an infinite loop and should involve
multiple connections. multiple connections.
""" """
# XXX This code is currently broken. # TODO: This code is currently broken.
import transaction import transaction
import ZODB import ZODB
......
...@@ -74,8 +74,8 @@ class MonitorTests(CommonSetupTearDown): ...@@ -74,8 +74,8 @@ class MonitorTests(CommonSetupTearDown):
return """<mappingstorage 1/>""" return """<mappingstorage 1/>"""
def testMonitor(self): def testMonitor(self):
# just open a client to know that the server is up and running # Just open a client to know that the server is up and running
# XXX should put this in setUp # TODO: should put this in setUp.
self.storage = self.openClientStorage() self.storage = self.openClientStorage()
s = self.get_monitor_output() s = self.get_monitor_output()
self.storage.close() self.storage.close()
......
...@@ -87,7 +87,7 @@ def get_port(): ...@@ -87,7 +87,7 @@ def get_port():
try: try:
s.connect(('localhost', port)) s.connect(('localhost', port))
except socket.error: except socket.error:
# XXX check value of error? # Perhaps we should check value of error too.
return port return port
finally: finally:
s.close() s.close()
...@@ -112,7 +112,7 @@ class GenericTests( ...@@ -112,7 +112,7 @@ class GenericTests(
"""Combine tests from various origins in one class.""" """Combine tests from various origins in one class."""
def setUp(self): def setUp(self):
logger.info("setUp() %s", self.id()) # XXX is this really needed? logger.info("setUp() %s", self.id())
port = get_port() port = get_port()
zconf = forker.ZEOConfig(('', port)) zconf = forker.ZEOConfig(('', port))
zport, adminaddr, pid, path = forker.start_zeo_server(self.getConfig(), zport, adminaddr, pid, path = forker.start_zeo_server(self.getConfig(),
...@@ -136,7 +136,7 @@ class GenericTests( ...@@ -136,7 +136,7 @@ class GenericTests(
os.waitpid(pid, 0) os.waitpid(pid, 0)
def open(self, read_only=0): def open(self, read_only=0):
# XXX Needed to support ReadOnlyStorage tests. Ought to be a # Needed to support ReadOnlyStorage tests. Ought to be a
# cleaner way. # cleaner way.
addr = self._storage._addr addr = self._storage._addr
self._storage.close() self._storage.close()
...@@ -201,10 +201,6 @@ class MappingStorageTests(GenericTests): ...@@ -201,10 +201,6 @@ class MappingStorageTests(GenericTests):
def getConfig(self): def getConfig(self):
return """<mappingstorage 1/>""" return """<mappingstorage 1/>"""
# XXX There are still a bunch of tests that fail. Are there
# still test classes in GenericTests that shouldn't be there?
# XXX Is the above comment still relevant?
test_classes = [FileStorageTests, MappingStorageTests] test_classes = [FileStorageTests, MappingStorageTests]
def test_suite(): def test_suite():
......
...@@ -123,7 +123,7 @@ class CacheTests(unittest.TestCase): ...@@ -123,7 +123,7 @@ class CacheTests(unittest.TestCase):
self.cache.store(n, "", n, None, data[51]) self.cache.store(n, "", n, None, data[51])
self.assert_(len(self.cache) < 51) self.assert_(len(self.cache) < 51)
# XXX Need to make sure eviction of non-current data # TODO: Need to make sure eviction of non-current data
# and of version data are handled correctly. # and of version data are handled correctly.
def testSerialization(self): def testSerialization(self):
......
...@@ -72,7 +72,7 @@ def find_global(module, name): ...@@ -72,7 +72,7 @@ def find_global(module, name):
if safe: if safe:
return r return r
# XXX what's a better way to do this? esp w/ 2.1 & 2.2 # TODO: is there a better way to do this?
if type(r) == types.ClassType and issubclass(r, Exception): if type(r) == types.ClassType and issubclass(r, Exception):
return r return r
......
...@@ -133,7 +133,7 @@ class SizedMessageAsyncConnection(asyncore.dispatcher): ...@@ -133,7 +133,7 @@ class SizedMessageAsyncConnection(asyncore.dispatcher):
def get_addr(self): def get_addr(self):
return self.addr return self.addr
# XXX avoid expensive getattr calls? Can't remember exactly what # TODO: avoid expensive getattr calls? Can't remember exactly what
# this comment was supposed to mean, but it has something to do # this comment was supposed to mean, but it has something to do
# with the way asyncore uses getattr and uses if sock: # with the way asyncore uses getattr and uses if sock:
def __nonzero__(self): def __nonzero__(self):
...@@ -193,7 +193,7 @@ class SizedMessageAsyncConnection(asyncore.dispatcher): ...@@ -193,7 +193,7 @@ class SizedMessageAsyncConnection(asyncore.dispatcher):
else: else:
msg_size = 4 msg_size = 4
state = 0 state = 0
# XXX We call message_input() with __input_lock # Obscure: We call message_input() with __input_lock
# held!!! And message_input() may end up calling # held!!! And message_input() may end up calling
# message_output(), which has its own lock. But # message_output(), which has its own lock. But
# message_output() cannot call message_input(), so # message_output() cannot call message_input(), so
......
...@@ -292,7 +292,7 @@ class InvalidObjectReference(POSError): ...@@ -292,7 +292,7 @@ class InvalidObjectReference(POSError):
o A reference to an object in a different database connection. o A reference to an object in a different database connection.
XXX The exception ought to have a member that is the invalid object. TODO: The exception ought to have a member that is the invalid object.
""" """
class ConnectionStateError(POSError): class ConnectionStateError(POSError):
......
...@@ -21,7 +21,7 @@ import zope.interface ...@@ -21,7 +21,7 @@ import zope.interface
class IConnection(zope.interface.Interface): class IConnection(zope.interface.Interface):
"""ZODB connection. """ZODB connection.
XXX: This interface is incomplete. TODO: This interface is incomplete.
""" """
def add(ob): def add(ob):
......
...@@ -454,7 +454,7 @@ class ConnectionObjectReader(BaseObjectReader): ...@@ -454,7 +454,7 @@ class ConnectionObjectReader(BaseObjectReader):
# to create the instance w/o hitting the db, so go for it! # to create the instance w/o hitting the db, so go for it!
oid, klass = oid oid, klass = oid
obj = self._cache.get(oid, None) # XXX it's not a dict obj = self._cache.get(oid, None)
if obj is not None: if obj is not None:
return obj return obj
...@@ -474,7 +474,7 @@ class ConnectionObjectReader(BaseObjectReader): ...@@ -474,7 +474,7 @@ class ConnectionObjectReader(BaseObjectReader):
# current data in the object's actual record! # current data in the object's actual record!
return self._conn.get(oid) return self._conn.get(oid)
# XXX should be done by connection # TODO: should be done by connection
obj._p_oid = oid obj._p_oid = oid
obj._p_jar = self._conn obj._p_jar = self._conn
# When an object is created, it is put in the UPTODATE # When an object is created, it is put in the UPTODATE
......
...@@ -127,12 +127,14 @@ class IteratorStorage(IteratorCompare): ...@@ -127,12 +127,14 @@ class IteratorStorage(IteratorCompare):
self.assertEqual(count, 1) self.assertEqual(count, 1)
def checkIterationIntraTransaction(self): def checkIterationIntraTransaction(self):
# XXX try this test with logging enabled. If you see something like # TODO: Try this test with logging enabled. If you see something
# like
# #
# ZODB FS FS21 warn: FileStorageTests.fs truncated, possibly due to # ZODB FS FS21 warn: FileStorageTests.fs truncated, possibly due to
# damaged records at 4 # damaged records at 4
# #
# Then the code in FileIterator.next() hasn't yet been fixed. # Then the code in FileIterator.next() hasn't yet been fixed.
# Should automate that check.
oid = self._storage.new_oid() oid = self._storage.new_oid()
t = Transaction() t = Transaction()
data = zodb_pickle(MinPO(0)) data = zodb_pickle(MinPO(0))
......
...@@ -188,7 +188,7 @@ class ExtStorageClientThread(StorageClientThread): ...@@ -188,7 +188,7 @@ class ExtStorageClientThread(StorageClientThread):
try: try:
iter = self.storage.iterator() iter = self.storage.iterator()
except AttributeError: except AttributeError:
# XXX It's hard to detect that a ZEO ClientStorage # It's hard to detect that a ZEO ClientStorage
# doesn't have this method, but does have all the others. # doesn't have this method, but does have all the others.
return return
for obj in iter: for obj in iter:
......
...@@ -32,7 +32,7 @@ class ReadOnlyStorage: ...@@ -32,7 +32,7 @@ class ReadOnlyStorage:
def checkReadMethods(self): def checkReadMethods(self):
self._create_data() self._create_data()
self._make_readonly() self._make_readonly()
# XXX not going to bother checking all read methods # Note that this doesn't check _all_ read methods.
for oid in self.oids.keys(): for oid in self.oids.keys():
data, revid = self._storage.load(oid, '') data, revid = self._storage.load(oid, '')
self.assertEqual(revid, self.oids[oid]) self.assertEqual(revid, self.oids[oid])
......
...@@ -107,7 +107,7 @@ class RevisionStorage: ...@@ -107,7 +107,7 @@ class RevisionStorage:
self.assertEqual(end, revs[12]) self.assertEqual(end, revs[12])
# XXX Is it okay to assume everyone testing against RevisionStorage # Unsure: Is it okay to assume everyone testing against RevisionStorage
# implements undo? # implements undo?
def checkLoadBeforeUndo(self): def checkLoadBeforeUndo(self):
...@@ -172,4 +172,4 @@ class RevisionStorage: ...@@ -172,4 +172,4 @@ class RevisionStorage:
results = self._storage.loadBefore(oid2, revid2) results = self._storage.loadBefore(oid2, revid2)
eq(results, None) eq(results, None)
# XXX There are other edge cases to handle, including pack. # TODO: There are other edge cases to handle, including pack.
...@@ -80,7 +80,7 @@ def zodb_unpickle(data): ...@@ -80,7 +80,7 @@ def zodb_unpickle(data):
klass_info = u.load() klass_info = u.load()
if isinstance(klass_info, types.TupleType): if isinstance(klass_info, types.TupleType):
if isinstance(klass_info[0], type): if isinstance(klass_info[0], type):
# XXX what is the second part of klass_info? # Unclear: what is the second part of klass_info?
klass, xxx = klass_info klass, xxx = klass_info
assert not xxx assert not xxx
else: else:
...@@ -144,7 +144,7 @@ def import_helper(name): ...@@ -144,7 +144,7 @@ def import_helper(name):
class StorageTestBase(unittest.TestCase): class StorageTestBase(unittest.TestCase):
# XXX It would be simpler if concrete tests didn't need to extend # It would be simpler if concrete tests didn't need to extend
# setUp() and tearDown(). # setUp() and tearDown().
def setUp(self): def setUp(self):
...@@ -210,7 +210,7 @@ class StorageTestBase(unittest.TestCase): ...@@ -210,7 +210,7 @@ class StorageTestBase(unittest.TestCase):
def _undo(self, tid, expected_oids=None, note=None): def _undo(self, tid, expected_oids=None, note=None):
# Undo a tid that affects a single object (oid). # Undo a tid that affects a single object (oid).
# XXX This is very specialized # This is very specialized.
t = transaction.Transaction() t = transaction.Transaction()
t.note(note or "undo") t.note(note or "undo")
self._storage.tpc_begin(t) self._storage.tpc_begin(t)
......
...@@ -142,4 +142,4 @@ class SynchronizedStorage: ...@@ -142,4 +142,4 @@ class SynchronizedStorage:
self._storage.tpc_begin(t) self._storage.tpc_begin(t)
self._storage.tpc_abort(t) self._storage.tpc_abort(t)
# XXX how to check undo? # TODO: how to check undo?
...@@ -107,8 +107,8 @@ class DBMethods(CacheTestBase): ...@@ -107,8 +107,8 @@ class DBMethods(CacheTestBase):
for k, v in dict.items(): for k, v in dict.items():
self.assert_(k in expected) self.assert_(k in expected)
# XXX not really sure how to do a black box test of the cache. # TODO: not really sure how to do a black box test of the cache.
# should the full sweep and minimize calls always remove things? # Should the full sweep and minimize calls always remove things?
def checkFullSweep(self): def checkFullSweep(self):
old_size = self.db.cacheSize() old_size = self.db.cacheSize()
...@@ -175,10 +175,9 @@ class DBMethods(CacheTestBase): ...@@ -175,10 +175,9 @@ class DBMethods(CacheTestBase):
self.fail("cacheMinimize still running after 30 seconds -- " self.fail("cacheMinimize still running after 30 seconds -- "
"almost certainly in an infinite loop") "almost certainly in an infinite loop")
# XXX don't have an explicit test for incrgc, because the # TODO: don't have an explicit test for incrgc, because the
# connection and database call it internally # connection and database call it internally.
# Same for the get and invalidate methods.
# XXX same for the get and invalidate methods
def checkLRUitems(self): def checkLRUitems(self):
# get a cache # get a cache
...@@ -264,7 +263,7 @@ class LRUCacheTests(CacheTestBase): ...@@ -264,7 +263,7 @@ class LRUCacheTests(CacheTestBase):
gc.collect() gc.collect()
# XXX The above gc.collect call is necessary to make this test # Obscure: The above gc.collect call is necessary to make this test
# pass. # pass.
# #
# This test then only works because the order of computations # This test then only works because the order of computations
......
...@@ -142,10 +142,10 @@ class ConnectionDotAdd(unittest.TestCase): ...@@ -142,10 +142,10 @@ class ConnectionDotAdd(unittest.TestCase):
class UserMethodTests(unittest.TestCase): class UserMethodTests(unittest.TestCase):
# XXX add isn't tested here, because there are a bunch of traditional # add isn't tested here, because there are a bunch of traditional
# unit tests for it. # unit tests for it.
# XXX the version tests would require a storage that supports versions # The version tests would require a storage that supports versions
# which is a bit more work. # which is a bit more work.
def test_root(self): def test_root(self):
......
...@@ -60,7 +60,7 @@ class DBTests(unittest.TestCase): ...@@ -60,7 +60,7 @@ class DBTests(unittest.TestCase):
self.db.setCacheDeactivateAfter, 12) self.db.setCacheDeactivateAfter, 12)
self.assertRaises(DeprecationWarning, self.assertRaises(DeprecationWarning,
self.db.setVersionCacheDeactivateAfter, 12) self.db.setVersionCacheDeactivateAfter, 12)
# XXX There is no API call for removing the warning we just # Obscure: There is no API call for removing the warning we just
# added, but filters appears to be a public variable. # added, but filters appears to be a public variable.
del warnings.filters[0] del warnings.filters[0]
self.db.setCacheSize(15) self.db.setCacheSize(15)
......
...@@ -36,11 +36,11 @@ class DemoStorageTests(StorageTestBase.StorageTestBase, ...@@ -36,11 +36,11 @@ class DemoStorageTests(StorageTestBase.StorageTestBase,
pass pass
def checkAbortVersionNonCurrent(self): def checkAbortVersionNonCurrent(self):
# XXX Need to implement a real loadBefore for DemoStorage? # TODO: Need to implement a real loadBefore for DemoStorage?
pass pass
def checkLoadBeforeVersion(self): def checkLoadBeforeVersion(self):
# XXX Need to implement a real loadBefore for DemoStorage? # TODO: Need to implement a real loadBefore for DemoStorage?
pass pass
# the next three pack tests depend on undo # the next three pack tests depend on undo
......
...@@ -104,7 +104,7 @@ class TimeStampTests(unittest.TestCase): ...@@ -104,7 +104,7 @@ class TimeStampTests(unittest.TestCase):
ts2 = ts.laterThan(ts) ts2 = ts.laterThan(ts)
self.assert_(ts2 > ts) self.assert_(ts2 > ts)
# XXX should test for bogus inputs to TimeStamp constructor # TODO: should test for bogus inputs to TimeStamp constructor
def checkTimeStamp(self): def checkTimeStamp(self):
# Alternate test suite # Alternate test suite
......
...@@ -11,8 +11,7 @@ ...@@ -11,8 +11,7 @@
# FOR A PARTICULAR PURPOSE. # FOR A PARTICULAR PURPOSE.
# #
############################################################################## ##############################################################################
"""XXX short summary goes here. """
$Id$ $Id$
""" """
import unittest import unittest
......
...@@ -89,7 +89,7 @@ monotonically increasing, so the first one seen during the current ...@@ -89,7 +89,7 @@ monotonically increasing, so the first one seen during the current
transaction remains the high-water mark for the duration of the transaction remains the high-water mark for the duration of the
transaction. transaction.
XXX We'd like simple abort and commit calls to make txn boundaries, We'd like simple abort and commit calls to make txn boundaries,
but that doesn't work unless an object is modified. sync() will abort but that doesn't work unless an object is modified. sync() will abort
a transaction and process invalidations. a transaction and process invalidations.
......
...@@ -245,7 +245,7 @@ class IPersistentNoReadConflicts(IPersistent): ...@@ -245,7 +245,7 @@ class IPersistentNoReadConflicts(IPersistent):
conflicts for this object. conflicts for this object.
""" """
# XXX TODO: document conflict resolution. # TODO: document conflict resolution.
class IPersistentDataManager(Interface): class IPersistentDataManager(Interface):
"""Provide services for managing persistent state. """Provide services for managing persistent state.
......
...@@ -47,7 +47,7 @@ class Test(unittest.TestCase): ...@@ -47,7 +47,7 @@ class Test(unittest.TestCase):
self.assertEqual(p._p_changed, 1) self.assertEqual(p._p_changed, 1)
self.assertEqual(dm.called, 1) self.assertEqual(dm.called, 1)
del p._p_changed del p._p_changed
# XXX deal with current cPersistence implementation # deal with current cPersistence implementation
if p._p_changed != 3: if p._p_changed != 3:
self.assertEqual(p._p_changed, None) self.assertEqual(p._p_changed, None)
self.assertEqual(dm.called, 1) self.assertEqual(dm.called, 1)
...@@ -164,7 +164,7 @@ class Test(unittest.TestCase): ...@@ -164,7 +164,7 @@ class Test(unittest.TestCase):
p._p_jar = dm p._p_jar = dm
p._p_changed = 0 p._p_changed = 0
p._p_deactivate() p._p_deactivate()
# XXX does this really test the activate method? # Unsure: does this really test the activate method?
p._p_activate() p._p_activate()
self.assertEqual(p._p_changed, 0) self.assertEqual(p._p_changed, 0)
self.assertEqual(p.x, 42) self.assertEqual(p.x, 42)
...@@ -266,10 +266,10 @@ class Test(unittest.TestCase): ...@@ -266,10 +266,10 @@ class Test(unittest.TestCase):
self.assert_(P.__dictoffset__ < P.__weakrefoffset__) self.assert_(P.__dictoffset__ < P.__weakrefoffset__)
self.assert_(P.__basicsize__ > Persistent.__basicsize__) self.assert_(P.__basicsize__ > Persistent.__basicsize__)
# XXX Can anyone defend/explain the test below? The tests classes defined here # Unsure: Can anyone defend/explain the test below? The tests classes defined
# don't define __call__, so this weird test will always pass, but to what # here don't define __call__, so this weird test will always pass, but to what
# end? What the heck is the point. If a klass is given that happens # end? If a klass is given that happens to define __call__, the test *may*
# to define __call__, the test *may* mysteriously fail. Who cares? # mysteriously fail. Who cares?
## def testDeactivateErrors(self): ## def testDeactivateErrors(self):
## p = self.klass() ## p = self.klass()
......
...@@ -251,7 +251,7 @@ class PersistenceTest(unittest.TestCase): ...@@ -251,7 +251,7 @@ class PersistenceTest(unittest.TestCase):
self.assertEqual(obj.curly, 2) self.assertEqual(obj.curly, 2)
self.assertEqual(obj.moe, 3) self.assertEqual(obj.moe, 3)
# XXX Need to decide how __setattr__ and __delattr__ should work, # TODO: Need to decide how __setattr__ and __delattr__ should work,
# then write tests. # then write tests.
......
"""Verify that fstest.py can find errors. """Verify that fstest.py can find errors.
XXX To run this test script fstest.py must be on your PYTHONPATH. Note: To run this test script fstest.py must be on your PYTHONPATH.
""" """
from cStringIO import StringIO from cStringIO import StringIO
...@@ -15,7 +15,6 @@ from fstest import FormatError, U64 ...@@ -15,7 +15,6 @@ from fstest import FormatError, U64
class TestCorruptedFS(unittest.TestCase): class TestCorruptedFS(unittest.TestCase):
# XXX path?
f = open('test-checker.fs', 'rb') f = open('test-checker.fs', 'rb')
datafs = f.read() datafs = f.read()
f.close() f.close()
......
...@@ -13,10 +13,10 @@ import threading ...@@ -13,10 +13,10 @@ import threading
import time import time
import unittest import unittest
# XXX The forker interface isn't clearly defined. It's different on # TODO: The forker interface isn't clearly defined. It's different on
# different branches of ZEO. This will break someday. # different branches of ZEO. This will break someday.
# XXX Only handle the Unix variant of the forker. Just to give Tim # TODO: Only handle the Unix variant of the forker. Just to give Tim
# something to do. # something to do.
class PackerTests(StorageTestBase): class PackerTests(StorageTestBase):
......
...@@ -20,10 +20,10 @@ def find_paths(root, maxdist): ...@@ -20,10 +20,10 @@ def find_paths(root, maxdist):
from the root, looking for persistent objects. Return a dict from the root, looking for persistent objects. Return a dict
mapping oids to traversal paths. mapping oids to traversal paths.
XXX Assumes that the keys of the root are not themselves TODO: Assumes that the keys of the root are not themselves
persistent objects. persistent objects.
XXX Doesn't traverse containers. TODO: Doesn't traverse containers.
""" """
paths = {} paths = {}
......
...@@ -473,9 +473,6 @@ def do_backup(options): ...@@ -473,9 +473,6 @@ def do_backup(options):
# then perhaps the file was packed at some point (or a # then perhaps the file was packed at some point (or a
# non-transactional undo was performed, but this is deprecated). Only # non-transactional undo was performed, but this is deprecated). Only
# do a full backup if forced to. # do a full backup if forced to.
#
# XXX For ZODB4, this needs to take into account the storage metadata
# header that FileStorage has grown at the front of the file.
if reposum == srcsum_backedup: if reposum == srcsum_backedup:
log('doing incremental, starting at: %s', reposz) log('doing incremental, starting at: %s', reposz)
do_incremental_backup(options, reposz, repofiles) do_incremental_backup(options, reposz, repofiles)
......
...@@ -347,7 +347,7 @@ class ITransaction(zope.interface.Interface): ...@@ -347,7 +347,7 @@ class ITransaction(zope.interface.Interface):
"The name of the user on whose behalf the transaction is being\n" "The name of the user on whose behalf the transaction is being\n"
"performed. The format of the user name is defined by the\n" "performed. The format of the user name is defined by the\n"
"application.") "application.")
# XXX required to be a string? # Unsure: required to be a string?
description = zope.interface.Attribute( description = zope.interface.Attribute(
"description", "description",
...@@ -392,7 +392,7 @@ class ITransaction(zope.interface.Interface): ...@@ -392,7 +392,7 @@ class ITransaction(zope.interface.Interface):
end of the description following two newline characters. end of the description following two newline characters.
Surrounding whitespace is stripped from text. Surrounding whitespace is stripped from text.
""" """
# XXX does impl do the right thing with ''? Not clear what # Unsure: does impl do the right thing with ''? Not clear what
# the "right thing" is. # the "right thing" is.
def setUser(user_name, path="/"): def setUser(user_name, path="/"):
...@@ -411,7 +411,7 @@ class ITransaction(zope.interface.Interface): ...@@ -411,7 +411,7 @@ class ITransaction(zope.interface.Interface):
Storage implementations may limit the amount of extension data Storage implementations may limit the amount of extension data
which can be stored. which can be stored.
""" """
# XXX is this this allowed to cause an exception here, during # Unsure: is this allowed to cause an exception here, during
# the two-phase commit, or can it toss data silently? # the two-phase commit, or can it toss data silently?
class ISavePoint(zope.interface.Interface): class ISavePoint(zope.interface.Interface):
......
...@@ -93,7 +93,7 @@ Test harness. ...@@ -93,7 +93,7 @@ Test harness.
This requires that Python was built --with-pydebug. This requires that Python was built --with-pydebug.
-T -T
Use the trace module from Python for code coverage. XXX This only works Use the trace module from Python for code coverage. This only works
if trace.py is explicitly added to PYTHONPATH. The current utility writes if trace.py is explicitly added to PYTHONPATH. The current utility writes
coverage files to a directory named `coverage' that is parallel to coverage files to a directory named `coverage' that is parallel to
`build'. It also prints a summary to stdout. `build'. It also prints a summary to stdout.
...@@ -208,8 +208,8 @@ class ImmediateTestResult(unittest._TextTestResult): ...@@ -208,8 +208,8 @@ class ImmediateTestResult(unittest._TextTestResult):
print "The following test left garbage:" print "The following test left garbage:"
print test print test
print gc.garbage print gc.garbage
# XXX Perhaps eat the garbage here, so that the garbage isn't # TODO: Perhaps eat the garbage here, so that the garbage isn't
# printed for every subsequent test. # |printed for every subsequent test.
def print_times(self, stream, count=None): def print_times(self, stream, count=None):
results = self._testtimes.items() results = self._testtimes.items()
...@@ -244,7 +244,7 @@ class ImmediateTestResult(unittest._TextTestResult): ...@@ -244,7 +244,7 @@ class ImmediateTestResult(unittest._TextTestResult):
if self.showAll: if self.showAll:
self.stream.write(": ") self.stream.write(": ")
elif self._progressWithNames: elif self._progressWithNames:
# XXX will break with multibyte strings # TODO: will break with multibyte strings.
name = self.getShortDescription(test) name = self.getShortDescription(test)
width = len(name) width = len(name)
if width < self._lastWidth: if width < self._lastWidth:
...@@ -641,7 +641,7 @@ def configure_logging(): ...@@ -641,7 +641,7 @@ def configure_logging():
import logging.config import logging.config
# Get the log.ini file from the current directory instead of possibly # Get the log.ini file from the current directory instead of possibly
# buried in the build directory. XXX This isn't perfect because if # buried in the build directory. This isn't perfect because if
# log.ini specifies a log file, it'll be relative to the build directory. # log.ini specifies a log file, it'll be relative to the build directory.
# Hmm... # Hmm...
logini = os.path.abspath("log.ini") logini = os.path.abspath("log.ini")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment