Commit 894e2937 authored by Jim Fulton's avatar Jim Fulton

Refactored the way ClientStorage logs to:

- Include the storage name in all log messages

- Use the standard logging APIs directly, rather than going through an
  intermediate function.
parent c1021c81
...@@ -44,16 +44,10 @@ import zc.lockfile ...@@ -44,16 +44,10 @@ import zc.lockfile
import ZODB.BaseStorage import ZODB.BaseStorage
from ZODB import POSException from ZODB import POSException
from ZODB import utils from ZODB import utils
from ZODB.loglevels import BLATHER
from ZODB.blob import rename_or_copy_blob from ZODB.blob import rename_or_copy_blob
from persistent.TimeStamp import TimeStamp from persistent.TimeStamp import TimeStamp
logger = logging.getLogger('ZEO.ClientStorage') logger = logging.getLogger(__name__)
_pid = str(os.getpid())
def log2(msg, level=logging.INFO, subsys=_pid, exc_info=False):
message = "(%s) %s" % (subsys, msg)
logger.log(level, message, exc_info=exc_info)
try: try:
from ZODB.ConflictResolution import ResolvedSerial from ZODB.ConflictResolution import ResolvedSerial
...@@ -212,16 +206,24 @@ class ClientStorage(object): ...@@ -212,16 +206,24 @@ class ClientStorage(object):
testConnection() and doAuth() for details). testConnection() and doAuth() for details).
""" """
log2("%s (pid=%d) created %s/%s for storage: %r" %
(self.__class__.__name__,
os.getpid(),
read_only and "RO" or "RW",
read_only_fallback and "fallback" or "normal",
storage))
self.__name__ = name or str(addr) # Standard convention for storages
logger.info(
"%s %s (pid=%d) created %s/%s for storage: %r",
self.__name__,
self.__class__.__name__,
os.getpid(),
read_only and "RO" or "RW",
read_only_fallback and "fallback" or "normal",
storage,
)
if debug: if debug:
log2("ClientStorage(): debug argument is no longer used") logger.warning(
"%s ClientStorage(): debug argument is no longer used",
self.__name__)
# Remember some parameters for "_setupCache" # Remember some parameters for "_setupCache"
self._var_ = var self._var_ = var
self._storage_ = storage self._storage_ = storage
...@@ -234,12 +236,15 @@ class ClientStorage(object): ...@@ -234,12 +236,15 @@ class ClientStorage(object):
# if not None # if not None
if wait_for_server_on_startup is not None: if wait_for_server_on_startup is not None:
if wait is not None and wait != wait_for_server_on_startup: if wait is not None and wait != wait_for_server_on_startup:
log2("ClientStorage(): conflicting values for wait and " logger.warning(
"wait_for_server_on_startup; wait prevails", "%s ClientStorage(): conflicting values for wait and "
level=logging.WARNING) "wait_for_server_on_startup; wait prevails",
self.__name__)
else: else:
log2("ClientStorage(): wait_for_server_on_startup " logger.info(
"is deprecated; please use wait instead") "%s ClientStorage(): wait_for_server_on_startup "
"is deprecated; please use wait instead",
self.__name__)
wait = wait_for_server_on_startup wait = wait_for_server_on_startup
elif wait is None: elif wait is None:
wait = 1 wait = 1
...@@ -308,8 +313,6 @@ class ClientStorage(object): ...@@ -308,8 +313,6 @@ class ClientStorage(object):
self._serials = [] self._serials = []
self._seriald = {} self._seriald = {}
self.__name__ = name or str(addr) # Standard convention for storages
# A ClientStorage only allows one thread to commit at a time. # A ClientStorage only allows one thread to commit at a time.
# Mutual exclusion is achieved using _tpc_cond, which # Mutual exclusion is achieved using _tpc_cond, which
# protects _transaction. A thread that wants to assign to # protects _transaction. A thread that wants to assign to
...@@ -385,7 +388,7 @@ class ClientStorage(object): ...@@ -385,7 +388,7 @@ class ClientStorage(object):
def _wait(self, timeout=None): def _wait(self, timeout=None):
if timeout is not None: if timeout is not None:
deadline = time.time() + timeout deadline = time.time() + timeout
log2("Setting deadline to %f" % deadline, level=BLATHER) logger.debug("%s Setting deadline to %f", self.__name__, deadline)
else: else:
deadline = None deadline = None
# Wait for a connection to be established. # Wait for a connection to be established.
...@@ -400,10 +403,11 @@ class ClientStorage(object): ...@@ -400,10 +403,11 @@ class ClientStorage(object):
if self._ready.isSet(): if self._ready.isSet():
break break
if timeout and time.time() > deadline: if timeout and time.time() > deadline:
log2("Timed out waiting for connection", logger.warning("%s Timed out waiting for connection",
level=logging.WARNING) self.__name__)
break break
log2("Waiting for cache verification to finish") logger.info("%s Waiting for cache verification to finish",
self.__name__)
def close(self): def close(self):
"""Storage API: finalize the storage, releasing external resources.""" """Storage API: finalize the storage, releasing external resources."""
...@@ -447,15 +451,16 @@ class ClientStorage(object): ...@@ -447,15 +451,16 @@ class ClientStorage(object):
module = get_module(protocol) module = get_module(protocol)
if not module: if not module:
log2("%s: no such an auth protocol: %s" % logger.error("%s %s: no such an auth protocol: %s",
(self.__class__.__name__, protocol), level=logging.WARNING) self.__name__, self.__class__.__name__, protocol)
return return
storage_class, client, db_class = module storage_class, client, db_class = module
if not client: if not client:
log2("%s: %s isn't a valid protocol, must have a Client class" % logger.error(
(self.__class__.__name__, protocol), level=logging.WARNING) "%s %s: %s isn't a valid protocol, must have a Client class",
self.__name__, self.__class__.__name__, protocol)
raise AuthError("invalid protocol") raise AuthError("invalid protocol")
c = client(stub) c = client(stub)
...@@ -485,20 +490,22 @@ class ClientStorage(object): ...@@ -485,20 +490,22 @@ class ClientStorage(object):
exception raised by register() is passed through. exception raised by register() is passed through.
""" """
log2("Testing connection %r" % conn) logger.info("%s Testing connection %r", self.__name__, conn)
# TODO: Should we check the protocol version here? # TODO: Should we check the protocol version here?
conn._is_read_only = self._is_read_only conn._is_read_only = self._is_read_only
stub = self.StorageServerStubClass(conn) stub = self.StorageServerStubClass(conn)
auth = stub.getAuthProtocol() auth = stub.getAuthProtocol()
log2("Server authentication protocol %r" % auth) logger.info("%s Server authentication protocol %r", self.__name__, auth)
if auth: if auth:
skey = self.doAuth(auth, stub) skey = self.doAuth(auth, stub)
if skey: if skey:
log2("Client authentication successful") logger.info("%s Client authentication successful",
self.__name__)
conn.setSessionKey(skey) conn.setSessionKey(skey)
else: else:
log2("Authentication failed") logger.info("%s Authentication failed",
self.__name__)
raise AuthError("Authentication failed") raise AuthError("Authentication failed")
try: try:
...@@ -507,7 +514,8 @@ class ClientStorage(object): ...@@ -507,7 +514,8 @@ class ClientStorage(object):
except POSException.ReadOnlyError: except POSException.ReadOnlyError:
if not self._read_only_fallback: if not self._read_only_fallback:
raise raise
log2("Got ReadOnlyError; trying again with read_only=1") logger.info("%s Got ReadOnlyError; trying again with read_only=1",
self.__name__)
stub.register(str(self._storage), read_only=1) stub.register(str(self._storage), read_only=1)
conn._is_read_only = True conn._is_read_only = True
return 0 return 0
...@@ -544,9 +552,11 @@ class ClientStorage(object): ...@@ -544,9 +552,11 @@ class ClientStorage(object):
self._db.invalidateCache() self._db.invalidateCache()
if reconnect: if reconnect:
log2("Reconnected to storage: %s" % self._server_addr) logger.info("%s Reconnected to storage: %s",
self.__name__, self._server_addr)
else: else:
log2("Connected to storage: %s" % self._server_addr) logger.info("%s Connected to storage: %s",
self.__name__, self._server_addr)
stub = self.StorageServerStubClass(conn) stub = self.StorageServerStubClass(conn)
self._oids = [] self._oids = []
...@@ -591,8 +601,8 @@ class ClientStorage(object): ...@@ -591,8 +601,8 @@ class ClientStorage(object):
try: try:
canonical, aliases, addrs = socket.gethostbyaddr(host) canonical, aliases, addrs = socket.gethostbyaddr(host)
except socket.error, err: except socket.error, err:
log2("Error resolving host: %s (%s)" % (host, err), logger.debug("%s Error resolving host: %s (%s)",
level=BLATHER) self.__name__, host, err)
canonical = host canonical = host
self._server_addr = str((canonical, addr[1])) self._server_addr = str((canonical, addr[1]))
...@@ -619,7 +629,8 @@ class ClientStorage(object): ...@@ -619,7 +629,8 @@ class ClientStorage(object):
closed or when certain problems with the connection occur. closed or when certain problems with the connection occur.
""" """
log2("Disconnected from storage: %s" % repr(self._server_addr)) logger.info("%s Disconnected from storage: %r",
self.__name__, self._server_addr)
self._connection = None self._connection = None
self._ready.clear() self._ready.clear()
self._server = disconnected_stub self._server = disconnected_stub
...@@ -877,8 +888,8 @@ class ClientStorage(object): ...@@ -877,8 +888,8 @@ class ClientStorage(object):
def _have_blob(self, blob_filename, oid, serial): def _have_blob(self, blob_filename, oid, serial):
if os.path.exists(blob_filename): if os.path.exists(blob_filename):
log2("Found blob %s/%s in cache." % (utils.oid_repr(oid), logger.debug("%s Found blob %r/%r in cache.",
utils.tid_repr(serial)), level=BLATHER) self.__name__, oid, serial)
return True return True
return False return False
...@@ -1052,8 +1063,8 @@ class ClientStorage(object): ...@@ -1052,8 +1063,8 @@ class ClientStorage(object):
try: try:
self._server.tpc_abort(id(txn)) self._server.tpc_abort(id(txn))
except ClientDisconnected: except ClientDisconnected:
log2("ClientDisconnected in tpc_abort() ignored", logger.debug("%s ClientDisconnected in tpc_abort() ignored",
level=BLATHER) self.__name__)
finally: finally:
self._tbuf.clear() self._tbuf.clear()
self._seriald.clear() self._seriald.clear()
...@@ -1218,7 +1229,8 @@ class ClientStorage(object): ...@@ -1218,7 +1229,8 @@ class ClientStorage(object):
ltid = server.lastTransaction() ltid = server.lastTransaction()
if not self._cache: if not self._cache:
log2("No verification necessary -- empty cache") logger.info("%s No verification necessary -- empty cache",
self.__name__)
if ltid and ltid != utils.z64: if ltid and ltid != utils.z64:
self._cache.setLastTid(ltid) self._cache.setLastTid(ltid)
self.finish_verification() self.finish_verification()
...@@ -1227,23 +1239,28 @@ class ClientStorage(object): ...@@ -1227,23 +1239,28 @@ class ClientStorage(object):
last_inval_tid = self._cache.getLastTid() last_inval_tid = self._cache.getLastTid()
if last_inval_tid is not None: if last_inval_tid is not None:
if ltid == last_inval_tid: if ltid == last_inval_tid:
log2("No verification necessary (last_inval_tid up-to-date)") logger.info(
"%s No verification necessary (last_inval_tid up-to-date)",
self.__name__)
self.finish_verification() self.finish_verification()
return "no verification" return "no verification"
elif ltid < last_inval_tid: elif ltid < last_inval_tid:
message = "Client has seen newer transactions than server!" logger.critical(
log2(message, level=logging.CRITICAL) "%s Client has seen newer transactions than server!",
self.__name__)
raise ClientStorageError(message) raise ClientStorageError(message)
# log some hints about last transaction # log some hints about last transaction
log2("last inval tid: %r %s\n" logger.info("%s last inval tid: %r %s\n",
% (last_inval_tid, tid2time(last_inval_tid))) self.__name__, last_inval_tid,
log2("last transaction: %r %s" % tid2time(last_inval_tid))
(ltid, ltid and tid2time(ltid))) logger.info("%s last transaction: %r %s",
self.__name__, ltid, ltid and tid2time(ltid))
pair = server.getInvalidations(last_inval_tid) pair = server.getInvalidations(last_inval_tid)
if pair is not None: if pair is not None:
log2("Recovering %d invalidations" % len(pair[1])) logger.info("%s Recovering %d invalidations",
self.__name__, len(pair[1]))
self.finish_verification(pair) self.finish_verification(pair)
return "quick verification" return "quick verification"
elif ltid and ltid != utils.z64: elif ltid and ltid != utils.z64:
...@@ -1259,14 +1276,14 @@ class ClientStorage(object): ...@@ -1259,14 +1276,14 @@ class ClientStorage(object):
self._db.invalidateCache() self._db.invalidateCache()
if self._cache and self._drop_cache_rather_verify: if self._cache and self._drop_cache_rather_verify:
log2("dropping cache") logger.info("%s dropping cache", self.__name__)
self._cache.close() self._cache.close()
self._setupCache() # creates a new cache self._setupCache() # creates a new cache
self._server = server self._server = server
self._ready.set() self._ready.set()
return "cache dropped" return "cache dropped"
log2("Verifying cache") logger.info("%s Verifying cache", self.__name__)
for oid, tid in self._cache.contents(): for oid, tid in self._cache.contents():
server.verify(oid, tid) server.verify(oid, tid)
server.endZeoVerify() server.endZeoVerify()
...@@ -1281,16 +1298,16 @@ class ClientStorage(object): ...@@ -1281,16 +1298,16 @@ class ClientStorage(object):
# Queue an invalidate for the end the verification procedure. # Queue an invalidate for the end the verification procedure.
if self._pickler is None: if self._pickler is None:
# This should never happen. # This should never happen.
log2("invalidateVerify with no _pickler", level = logging.ERROR) logger.error("%s invalidateVerify with no _pickler", self.__name__)
return return
self._pickler.dump((None, [args[0]])) self._pickler.dump((None, [args[0]]))
def endVerify(self): def endVerify(self):
"""Server callback to signal end of cache validation.""" """Server callback to signal end of cache validation."""
log2("endVerify finishing") logger.info("%s endVerify finishing", self.__name__)
self.finish_verification() self.finish_verification()
log2("endVerify finished") logger.info("%s endVerify finished", self.__name__)
def finish_verification(self, catch_up=None): def finish_verification(self, catch_up=None):
self._lock.acquire() self._lock.acquire()
...@@ -1335,8 +1352,9 @@ class ClientStorage(object): ...@@ -1335,8 +1352,9 @@ class ClientStorage(object):
self._lock.acquire() self._lock.acquire()
try: try:
if self._pickler is not None: if self._pickler is not None:
log2("Transactional invalidation during cache verification", logger.debug(
level=BLATHER) "%s Transactional invalidation during cache verification",
self.__name__)
self._pickler.dump((tid, [arg[0] for arg in args])) self._pickler.dump((tid, [arg[0] for arg in args]))
else: else:
self._process_invalidations(tid, (arg[0] for arg in args)) self._process_invalidations(tid, (arg[0] for arg in args))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment