Commit a4537cab authored by Gintautas Miliauskas's avatar Gintautas Miliauskas

This should finally rid ZODB of obsolete zLOG imports. I hope nothing's

processing the messages because the format might have changed a little bit.
parent 149f15c5
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
############################################################################## ##############################################################################
"""Storage implementation using a log written to a single file. """Storage implementation using a log written to a single file.
$Revision: 1.12 $ $Revision: 1.13 $
""" """
import base64 import base64
...@@ -23,6 +23,7 @@ import os ...@@ -23,6 +23,7 @@ import os
import struct import struct
import sys import sys
import time import time
import logging
from types import StringType, DictType from types import StringType, DictType
from struct import pack, unpack from struct import pack, unpack
...@@ -46,34 +47,19 @@ except ImportError: ...@@ -46,34 +47,19 @@ except ImportError:
def fsIndex(): def fsIndex():
return {} return {}
from zLOG import LOG, BLATHER, INFO, WARNING, ERROR, PANIC
t32 = 1L << 32 t32 = 1L << 32
packed_version = "FS21" packed_version = "FS21"
def blather(message, *data): # XXX this isn't really needed, right?
LOG('ZODB FS', BLATHER, "%s blather: %s\n" % (packed_version, CUSTOM_BLATHER = 15
message % data)) logging.addLevelName("BLATHER", CUSTOM_BLATHER)
def info(message, *data):
LOG('ZODB FS', INFO, "%s info: %s\n" % (packed_version,
message % data))
def warn(message, *data):
LOG('ZODB FS', WARNING, "%s warn: %s\n" % (packed_version,
message % data))
def error(message, *data, **kwargs): logger = logging.getLogger('zodb.FileStorage')
LOG('ZODB FS', ERROR, "%s ERROR: %s\n" % (packed_version,
message % data), **kwargs)
def nearPanic(message, *data):
LOG('ZODB FS', PANIC, "%s ERROR: %s\n" % (packed_version,
message % data))
def panic(message, *data): def panic(message, *data):
message = message % data logger.critical(message, *data)
LOG('ZODB FS', PANIC, "%s ERROR: %s\n" % (packed_version, message))
raise CorruptedTransactionError(message) raise CorruptedTransactionError(message)
class FileStorageError(POSException.StorageError): class FileStorageError(POSException.StorageError):
...@@ -208,7 +194,7 @@ class FileStorage(BaseStorage.BaseStorage, ...@@ -208,7 +194,7 @@ class FileStorage(BaseStorage.BaseStorage,
t = time.time() t = time.time()
t = TimeStamp(*time.gmtime(t)[:5] + (t % 60,)) t = TimeStamp(*time.gmtime(t)[:5] + (t % 60,))
if tid > t: if tid > t:
warn("%s Database records in the future", file_name); logger.warning("%s Database records in the future", file_name);
if tid.timeTime() - t.timeTime() > 86400*30: if tid.timeTime() - t.timeTime() > 86400*30:
# a month in the future? This is bogus, use current time # a month in the future? This is bogus, use current time
self._ts = t self._ts = t
...@@ -300,7 +286,7 @@ class FileStorage(BaseStorage.BaseStorage, ...@@ -300,7 +286,7 @@ class FileStorage(BaseStorage.BaseStorage,
""" """
r = self._check_sanity(index, pos) r = self._check_sanity(index, pos)
if not r: if not r:
warn("Ignoring index for %s", self._file_name) logger.warning("Ignoring index for %s", self._file_name)
return r return r
def _check_sanity(self, index, pos): def _check_sanity(self, index, pos):
...@@ -368,8 +354,7 @@ class FileStorage(BaseStorage.BaseStorage, ...@@ -368,8 +354,7 @@ class FileStorage(BaseStorage.BaseStorage,
info=p.load() info=p.load()
except: except:
exc, err = sys.exc_info()[:2] exc, err = sys.exc_info()[:2]
warn("Failed to load database index: %s: %s" % logger.warning("Failed to load database index: %s: %s", exc, err)
(exc, err))
return None return None
index = info.get('index') index = info.get('index')
pos = info.get('pos') pos = info.get('pos')
...@@ -412,8 +397,7 @@ class FileStorage(BaseStorage.BaseStorage, ...@@ -412,8 +397,7 @@ class FileStorage(BaseStorage.BaseStorage,
self._save_index() self._save_index()
except: except:
# Log the error and continue # Log the error and continue
LOG("ZODB FS", ERROR, "Error saving index on close()", logger.error("Error saving index on close()", exc_info=True)
error=sys.exc_info())
# Return tid of most recent record for oid if that's in the # Return tid of most recent record for oid if that's in the
# _oid2tid cache. Else return None. It's important to use this # _oid2tid cache. Else return None. It's important to use this
...@@ -431,7 +415,8 @@ class FileStorage(BaseStorage.BaseStorage, ...@@ -431,7 +415,8 @@ class FileStorage(BaseStorage.BaseStorage,
# In older Pythons, we may overflow if we keep it an int. # In older Pythons, we may overflow if we keep it an int.
self._oid2tid_nlookups = long(self._oid2tid_nlookups) self._oid2tid_nlookups = long(self._oid2tid_nlookups)
self._oid2tid_nhits = long(self._oid2tid_nhits) self._oid2tid_nhits = long(self._oid2tid_nhits)
blather("_oid2tid size %s lookups %s hits %s rate %.1f%%", logger.log(BLATHER,
"_oid2tid size %s lookups %s hits %s rate %.1f%%",
len(self._oid2tid), len(self._oid2tid),
self._oid2tid_nlookups, self._oid2tid_nlookups,
self._oid2tid_nhits, self._oid2tid_nhits,
...@@ -723,7 +708,8 @@ class FileStorage(BaseStorage.BaseStorage, ...@@ -723,7 +708,8 @@ class FileStorage(BaseStorage.BaseStorage,
if h.plen != len(data): if h.plen != len(data):
# The expected data doesn't match what's in the # The expected data doesn't match what's in the
# backpointer. Something is wrong. # backpointer. Something is wrong.
error("Mismatch between data and backpointer at %d", pos) logger.error("Mismatch between data and"
" backpointer at %d", pos)
return 0 return 0
_data = self._file.read(h.plen) _data = self._file.read(h.plen)
if data != _data: if data != _data:
...@@ -1340,7 +1326,7 @@ class FileStorage(BaseStorage.BaseStorage, ...@@ -1340,7 +1326,7 @@ class FileStorage(BaseStorage.BaseStorage,
try: try:
opos = p.pack() opos = p.pack()
except RedundantPackWarning, detail: except RedundantPackWarning, detail:
info(str(detail)) logger.info(str(detail))
if opos is None: if opos is None:
return return
oldpath = self._file_name + ".old" oldpath = self._file_name + ".old"
...@@ -1612,7 +1598,7 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8, ...@@ -1612,7 +1598,7 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8,
if not h: break if not h: break
if len(h) != TRANS_HDR_LEN: if len(h) != TRANS_HDR_LEN:
if not read_only: if not read_only:
warn('%s truncated at %s', name, pos) logger.warning('%s truncated at %s', name, pos)
seek(pos) seek(pos)
file.truncate() file.truncate()
break break
...@@ -1621,7 +1607,7 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8, ...@@ -1621,7 +1607,7 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8,
if el < 0: el=t32-el if el < 0: el=t32-el
if tid <= ltid: if tid <= ltid:
warn("%s time-stamp reduction at %s", name, pos) logger.warning("%s time-stamp reduction at %s", name, pos)
ltid = tid ltid = tid
if pos+(tl+8) > file_size or status=='c': if pos+(tl+8) > file_size or status=='c':
...@@ -1629,13 +1615,14 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8, ...@@ -1629,13 +1615,14 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8,
# cleared. They may also be corrupted, # cleared. They may also be corrupted,
# in which case, we don't want to totally lose the data. # in which case, we don't want to totally lose the data.
if not read_only: if not read_only:
warn("%s truncated, possibly due to damaged records at %s", logger.warning("%s truncated, possibly due to damaged"
name, pos) " records at %s", name, pos)
_truncate(file, name, pos) _truncate(file, name, pos)
break break
if status not in ' up': if status not in ' up':
warn('%s has invalid status, %s, at %s', name, status, pos) logger.warning('%s has invalid status, %s, at %s',
name, status, pos)
if tl < (TRANS_HDR_LEN+ul+dl+el): if tl < (TRANS_HDR_LEN+ul+dl+el):
# We're in trouble. Find out if this is bad data in the # We're in trouble. Find out if this is bad data in the
...@@ -1648,12 +1635,13 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8, ...@@ -1648,12 +1635,13 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8,
# Now check to see if the redundant transaction length is # Now check to see if the redundant transaction length is
# reasonable: # reasonable:
if file_size - rtl < pos or rtl < TRANS_HDR_LEN: if file_size - rtl < pos or rtl < TRANS_HDR_LEN:
nearPanic('%s has invalid transaction header at %s', name, pos) logger.critical('%s has invalid transaction header at %s',
name, pos)
if not read_only: if not read_only:
warn("It appears that there is invalid data at the end of " logger.warning(
"It appears that there is invalid data at the end of "
"the file, possibly due to a system crash. %s " "the file, possibly due to a system crash. %s "
"truncated to recover from bad data at end." "truncated to recover from bad data at end." % name)
% name)
_truncate(file, name, pos) _truncate(file, name, pos)
break break
else: else:
...@@ -1696,9 +1684,11 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8, ...@@ -1696,9 +1684,11 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8,
if index_get(h.oid, 0) != h.prev: if index_get(h.oid, 0) != h.prev:
if prev: if prev:
if recover: return tpos, None, None if recover: return tpos, None, None
error("%s incorrect previous pointer at %s", name, pos) logger.error("%s incorrect previous pointer at %s",
name, pos)
else: else:
warn("%s incorrect previous pointer at %s", name, pos) logger.warning("%s incorrect previous pointer at %s",
name, pos)
pos=pos+dlen pos=pos+dlen
...@@ -1734,15 +1724,16 @@ def _truncate(file, name, pos): ...@@ -1734,15 +1724,16 @@ def _truncate(file, name, pos):
if os.path.exists(oname): if os.path.exists(oname):
i += 1 i += 1
else: else:
warn("Writing truncated data from %s to %s", name, oname) logger.warning("Writing truncated data from %s to %s",
name, oname)
o = open(oname,'wb') o = open(oname,'wb')
file.seek(pos) file.seek(pos)
cp(file, o, file_size-pos) cp(file, o, file_size-pos)
o.close() o.close()
break break
except: except:
error("couldn\'t write truncated data for %s", name, logger.error("couldn\'t write truncated data for %s", name,
error=sys.exc_info()) exc_info=True)
raise POSException.StorageSystemError, ( raise POSException.StorageSystemError, (
"Couldn't save truncated data") "Couldn't save truncated data")
...@@ -1851,7 +1842,8 @@ class FileIterator(Iterator, FileStorageFormatter): ...@@ -1851,7 +1842,8 @@ class FileIterator(Iterator, FileStorageFormatter):
raise raise
if h.tid <= self._ltid: if h.tid <= self._ltid:
warn("%s time-stamp reduction at %s", self._file.name, pos) logger.warning("%s time-stamp reduction at %s",
self._file.name, pos)
self._ltid = h.tid self._ltid = h.tid
if self._stop is not None and h.tid > self._stop: if self._stop is not None and h.tid > self._stop:
...@@ -1865,13 +1857,13 @@ class FileIterator(Iterator, FileStorageFormatter): ...@@ -1865,13 +1857,13 @@ class FileIterator(Iterator, FileStorageFormatter):
# Hm, the data were truncated or the checkpoint flag wasn't # Hm, the data were truncated or the checkpoint flag wasn't
# cleared. They may also be corrupted, # cleared. They may also be corrupted,
# in which case, we don't want to totally lose the data. # in which case, we don't want to totally lose the data.
warn("%s truncated, possibly due to damaged records at %s", logger.warning("%s truncated, possibly due to"
self._file.name, pos) " damaged records at %s", self._file.name, pos)
break break
if h.status not in " up": if h.status not in " up":
warn('%s has invalid status, %s, at %s', self._file.name, logger.warning('%s has invalid status,'
h.status, pos) ' %s, at %s', self._file.name, h.status, pos)
if h.tlen < h.headerlen(): if h.tlen < h.headerlen():
# We're in trouble. Find out if this is bad data in # We're in trouble. Find out if this is bad data in
...@@ -1884,16 +1876,17 @@ class FileIterator(Iterator, FileStorageFormatter): ...@@ -1884,16 +1876,17 @@ class FileIterator(Iterator, FileStorageFormatter):
# Now check to see if the redundant transaction length is # Now check to see if the redundant transaction length is
# reasonable: # reasonable:
if self._file_size - rtl < pos or rtl < TRANS_HDR_LEN: if self._file_size - rtl < pos or rtl < TRANS_HDR_LEN:
nearPanic("%s has invalid transaction header at %s", logger.critical("%s has invalid transaction header at %s",
self._file.name, pos) self._file.name, pos)
warn("It appears that there is invalid data at the end of " logger.warning(
"It appears that there is invalid data at the end of "
"the file, possibly due to a system crash. %s " "the file, possibly due to a system crash. %s "
"truncated to recover from bad data at end." "truncated to recover from bad data at end."
% self._file.name) % self._file.name)
break break
else: else:
warn("%s has invalid transaction header at %s", logger.warning("%s has invalid transaction header at %s",
self._file.name, pos) self._file.name, pos)
break break
tpos = pos tpos = pos
...@@ -1917,8 +1910,8 @@ class FileIterator(Iterator, FileStorageFormatter): ...@@ -1917,8 +1910,8 @@ class FileIterator(Iterator, FileStorageFormatter):
self._file.seek(tend) self._file.seek(tend)
rtl = u64(self._file.read(8)) rtl = u64(self._file.read(8))
if rtl != h.tlen: if rtl != h.tlen:
warn("%s redundant transaction length check failed at %s", logger.warning("%s redundant transaction length check"
self._file.name, tend) " failed at %s", self._file.name, tend)
break break
self._pos = tend + 8 self._pos = tend + 8
...@@ -1948,8 +1941,8 @@ class RecordIterator(Iterator, BaseStorage.TransactionRecord, ...@@ -1948,8 +1941,8 @@ class RecordIterator(Iterator, BaseStorage.TransactionRecord,
dlen = h.recordlen() dlen = h.recordlen()
if pos + dlen > self._tend or h.tloc != self._tpos: if pos + dlen > self._tend or h.tloc != self._tpos:
warn("%s data record exceeds transaction record at %s", logger.warning("%s data record exceeds transaction"
file.name, pos) " record at %s", file.name, pos)
break break
self._pos = pos + dlen self._pos = pos + dlen
......
...@@ -13,14 +13,20 @@ ...@@ -13,14 +13,20 @@
############################################################################## ##############################################################################
"""Mounted database support """Mounted database support
$Id: Mount.py,v 1.23 2004/03/04 22:41:50 jim Exp $""" $Id: Mount.py,v 1.24 2004/04/22 21:27:43 gintautasm Exp $"""
__version__='$Revision: 1.23 $'[11:-2] __version__='$Revision: 1.24 $'[11:-2]
import thread, persistent, Acquisition import string
import time
import sys
import thread
import logging
import persistent
import Acquisition
from Acquisition import aq_base from Acquisition import aq_base
import string, time, sys
from POSException import MountedStorageError from POSException import MountedStorageError
from zLOG import LOG, INFO, WARNING
logger = logging.getLogger('zodb.Mount')
# dbs is a holder for all DB objects, needed to overcome # dbs is a holder for all DB objects, needed to overcome
# threading issues. It maps connection params to a DB object # threading issues. It maps connection params to a DB object
...@@ -99,7 +105,7 @@ class MountPoint(persistent.Persistent, Acquisition.Implicit): ...@@ -99,7 +105,7 @@ class MountPoint(persistent.Persistent, Acquisition.Implicit):
params = self._params params = self._params
dbInfo = dbs.get(params, None) dbInfo = dbs.get(params, None)
if dbInfo is None: if dbInfo is None:
LOG('ZODB', INFO, 'Opening database for mounting: %s' % params) logger.info('Opening database for mounting: %s', params)
db = self._createDB() db = self._createDB()
newMount = 1 newMount = 1
dbs[params] = (db, {self.__mountpoint_id:1}) dbs[params] = (db, {self.__mountpoint_id:1})
...@@ -183,8 +189,8 @@ class MountPoint(persistent.Persistent, Acquisition.Implicit): ...@@ -183,8 +189,8 @@ class MountPoint(persistent.Persistent, Acquisition.Implicit):
try: id = data.getId() try: id = data.getId()
except: id = '???' # data has no getId() method. Bad. except: id = '???' # data has no getId() method. Bad.
p = string.join(parent.getPhysicalPath() + (id,), '/') p = string.join(parent.getPhysicalPath() + (id,), '/')
LOG('ZODB', INFO, 'Mounted database %s at %s' % logger.info('Mounted database %s at %s',
(self._getMountParams(), p)) self._getMountParams(), p)
else: else:
data = t[0] data = t[0]
...@@ -229,9 +235,8 @@ class MountPoint(persistent.Persistent, Acquisition.Implicit): ...@@ -229,9 +235,8 @@ class MountPoint(persistent.Persistent, Acquisition.Implicit):
except: except:
from StringIO import StringIO from StringIO import StringIO
import traceback import traceback
exc = sys.exc_info() logger.warning('Failed to mount database. %s (%s)', exc[:2],
LOG('ZODB', WARNING, 'Failed to mount database. %s (%s)' % exc[:2], exc_info=True)
error=exc)
f=StringIO() f=StringIO()
traceback.print_tb(exc[2], 100, f) traceback.print_tb(exc[2], 100, f)
self._v_connect_error = (exc[0], exc[1], f.getvalue()) self._v_connect_error = (exc[0], exc[1], f.getvalue())
...@@ -275,8 +280,10 @@ class MountedConnectionCloser: ...@@ -275,8 +280,10 @@ class MountedConnectionCloser:
del data.__dict__['_v__object_deleted__'] del data.__dict__['_v__object_deleted__']
close_db = 1 close_db = 1
# Close the child connection. # Close the child connection.
try: del conn._mount_parent_jar try:
except: pass del conn._mount_parent_jar
except:
pass
conn.close() conn.close()
if close_db: if close_db:
...@@ -295,6 +302,6 @@ class MountedConnectionCloser: ...@@ -295,6 +302,6 @@ class MountedConnectionCloser:
# No more mount points are using this database. # No more mount points are using this database.
del dbs[params] del dbs[params]
db.close() db.close()
LOG('ZODB', INFO, 'Closed database: %s' % params) logger.info('Closed database: %s', params)
finally: finally:
dblock.release() dblock.release()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment