Commit 2739fae0 authored by Tres Seaver's avatar Tres Seaver

Play with svn:externals stitching.

parent 508ace53
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""ZODB transfer activity monitoring
$Id$"""
import time
class ActivityMonitor:
"""ZODB load/store activity monitor
This simple implementation just keeps a small log in memory
and iterates over the log when getActivityAnalysis() is called.
It assumes that log entries are added in chronological sequence,
which is only guaranteed because DB.py holds a lock when calling
the closedConnection() method.
"""
def __init__(self, history_length=3600):
self.history_length = history_length # Number of seconds
self.log = [] # [(time, loads, stores)]
def closedConnection(self, conn):
log = self.log
now = time.time()
loads, stores = conn.getTransferCounts(1)
log.append((now, loads, stores))
self.trim(now)
def trim(self, now):
log = self.log
cutoff = now - self.history_length
n = 0
loglen = len(log)
while n < loglen and log[n][0] < cutoff:
n = n + 1
if n:
del log[:n]
def setHistoryLength(self, history_length):
self.history_length = history_length
self.trim(time.time())
def getHistoryLength(self):
return self.history_length
def getActivityAnalysis(self, start=0, end=0, divisions=10):
res = []
now = time.time()
if start == 0:
start = now - self.history_length
if end == 0:
end = now
for n in range(divisions):
res.append({
'start': start + (end - start) * n / divisions,
'end': start + (end - start) * (n + 1) / divisions,
'loads': 0,
'stores': 0,
'connections': 0,
})
div = res[0]
div_end = div['end']
div_index = 0
connections = 0
total_loads = 0
total_stores = 0
for t, loads, stores in self.log:
if t < start:
# We could use a binary search to find the start.
continue
elif t > end:
# We could use a binary search to find the end also.
break
while t > div_end:
div['loads'] = total_loads
div['stores'] = total_stores
div['connections'] = connections
total_loads = 0
total_stores = 0
connections = 0
div_index = div_index + 1
if div_index < divisions:
div = res[div_index]
div_end = div['end']
connections = connections + 1
total_loads = total_loads + loads
total_stores = total_stores + stores
div['stores'] = div['stores'] + total_stores
div['loads'] = div['loads'] + total_loads
div['connections'] = div['connections'] + connections
return res
This diff is collapsed.
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import logging
from cStringIO import StringIO
from cPickle import Unpickler, Pickler
from pickle import PicklingError
from ZODB.POSException import ConflictError
from ZODB.loglevels import BLATHER
logger = logging.getLogger('ZODB.ConflictResolution')
ResolvedSerial = 'rs'
class BadClassName(Exception):
pass
_class_cache = {}
_class_cache_get = _class_cache.get
def find_global(*args):
cls = _class_cache_get(args, 0)
if cls == 0:
# Not cached. Try to import
try:
module = __import__(args[0], {}, {}, ['cluck'])
except ImportError:
cls = 1
else:
cls = getattr(module, args[1], 1)
_class_cache[args] = cls
if cls == 1:
logger.log(BLATHER, "Unable to load class", exc_info=True)
if cls == 1:
# Not importable
raise BadClassName(*args)
return cls
def state(self, oid, serial, prfactory, p=''):
p = p or self.loadSerial(oid, serial)
file = StringIO(p)
unpickler = Unpickler(file)
unpickler.find_global = find_global
unpickler.persistent_load = prfactory.persistent_load
unpickler.load() # skip the class tuple
return unpickler.load()
class PersistentReference:
def __repr__(self):
return "PR(%s %s)" % (id(self), self.data)
def __getstate__(self):
raise PicklingError, "Can't pickle PersistentReference"
class PersistentReferenceFactory:
data = None
def persistent_load(self, oid):
if self.data is None:
self.data = {}
r = self.data.get(oid, None)
if r is None:
r = PersistentReference()
r.data = oid
self.data[oid] = r
return r
def persistent_id(object):
if getattr(object, '__class__', 0) is not PersistentReference:
return None
return object.data
_unresolvable = {}
def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
committedData=''):
# class_tuple, old, committed, newstate = ('',''), 0, 0, 0
try:
prfactory = PersistentReferenceFactory()
file = StringIO(newpickle)
unpickler = Unpickler(file)
unpickler.find_global = find_global
unpickler.persistent_load = prfactory.persistent_load
meta = unpickler.load()
if isinstance(meta, tuple):
klass = meta[0]
newargs = meta[1] or ()
if isinstance(klass, tuple):
klass = find_global(*klass)
else:
klass = meta
newargs = ()
if klass in _unresolvable:
return None
newstate = unpickler.load()
inst = klass.__new__(klass, *newargs)
try:
resolve = inst._p_resolveConflict
except AttributeError:
_unresolvable[klass] = 1
return None
old = state(self, oid, oldSerial, prfactory)
committed = state(self, oid, committedSerial, prfactory, committedData)
resolved = resolve(old, committed, newstate)
file = StringIO()
pickler = Pickler(file,1)
pickler.persistent_id = persistent_id
pickler.dump(meta)
pickler.dump(resolved)
return file.getvalue(1)
except (ConflictError, BadClassName):
return None
except:
# If anything else went wrong, catch it here and avoid passing an
# arbitrary exception back to the client. The error here will mask
# the original ConflictError. A client can recover from a
# ConflictError, but not necessarily from other errors. But log
# the error so that any problems can be fixed.
logger.error("Unexpected error", exc_info=True)
return None
class ConflictResolvingStorage:
"Mix-in class that provides conflict resolution handling for storages"
tryToResolveConflict = tryToResolveConflict
This diff is collapsed.
This diff is collapsed.
BTrees
ZConfig
persistent
transaction
# referenced by ZODB.config and related tests
ZEO
This diff is collapsed.
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Support for database export and import."""
from cStringIO import StringIO
from cPickle import Pickler, Unpickler
from tempfile import TemporaryFile
import logging
from ZODB.POSException import ExportError
from ZODB.utils import p64, u64
from ZODB.serialize import referencesf
logger = logging.getLogger('ZODB.ExportImport')
class ExportImport:
def exportFile(self, oid, f=None):
if f is None:
f = TemporaryFile()
elif isinstance(f, str):
f = open(f,'w+b')
f.write('ZEXP')
oids = [oid]
done_oids = {}
done=done_oids.has_key
load=self._storage.load
while oids:
oid = oids.pop(0)
if oid in done_oids:
continue
done_oids[oid] = True
try:
p, serial = load(oid, self._version)
except:
logger.debug("broken reference for oid %s", repr(oid),
exc_info=True)
else:
referencesf(p, oids)
f.writelines([oid, p64(len(p)), p])
f.write(export_end_marker)
return f
def importFile(self, f, clue='', customImporters=None):
# This is tricky, because we need to work in a transaction!
if isinstance(f, str):
f = open(f,'rb')
magic = f.read(4)
if magic != 'ZEXP':
if customImporters and customImporters.has_key(magic):
f.seek(0)
return customImporters[magic](self, f, clue)
raise ExportError("Invalid export header")
t = self._txn_mgr.get()
if clue:
t.note(clue)
return_oid_list = []
self._import = f, return_oid_list
self._register()
t.commit(1)
# Return the root imported object.
if return_oid_list:
return self.get(return_oid_list[0])
else:
return None
def _importDuringCommit(self, transaction, f, return_oid_list):
"""Import data during two-phase commit.
Invoked by the transaction manager mid commit.
Appends one item, the OID of the first object created,
to return_oid_list.
"""
oids = {}
def persistent_load(ooid):
"""Remap a persistent id to a new ID and create a ghost for it."""
klass = None
if isinstance(ooid, tuple):
ooid, klass = ooid
if ooid in oids:
oid = oids[ooid]
else:
if klass is None:
oid = self._storage.new_oid()
else:
oid = self._storage.new_oid(), klass
oids[ooid] = oid
return Ghost(oid)
version = self._version
while 1:
h = f.read(16)
if h == export_end_marker:
break
if len(h) != 16:
raise ExportError("Truncated export file")
l = u64(h[8:16])
p = f.read(l)
if len(p) != l:
raise ExportError("Truncated export file")
ooid = h[:8]
if oids:
oid = oids[ooid]
if isinstance(oid, tuple):
oid = oid[0]
else:
oids[ooid] = oid = self._storage.new_oid()
return_oid_list.append(oid)
pfile = StringIO(p)
unpickler = Unpickler(pfile)
unpickler.persistent_load = persistent_load
newp = StringIO()
pickler = Pickler(newp, 1)
pickler.persistent_id = persistent_id
pickler.dump(unpickler.load())
pickler.dump(unpickler.load())
p = newp.getvalue()
self._storage.store(oid, None, p, version, transaction)
export_end_marker = '\377'*16
class Ghost(object):
__slots__ = ("oid",)
def __init__(self, oid):
self.oid = oid
def persistent_id(obj):
if isinstance(obj, Ghost):
return obj.oid
This diff is collapsed.
# this is a package
from ZODB.FileStorage.FileStorage \
import FileStorage, RecordIterator, FileIterator, Record, packed_version
This diff is collapsed.
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import md5
import struct
from ZODB.FileStorage import FileIterator
from ZODB.FileStorage.format \
import TRANS_HDR, TRANS_HDR_LEN, DATA_HDR, DATA_HDR_LEN
from ZODB.TimeStamp import TimeStamp
from ZODB.utils import u64, get_pickle_metadata
from ZODB.tests.StorageTestBase import zodb_unpickle
def fsdump(path, file=None, with_offset=1):
i = 0
iter = FileIterator(path)
for trans in iter:
if with_offset:
print >> file, "Trans #%05d tid=%016x time=%s offset=%d" % \
(i, u64(trans.tid), str(TimeStamp(trans.tid)), trans._pos)
else:
print >> file, "Trans #%05d tid=%016x time=%s" % \
(i, u64(trans.tid), str(TimeStamp(trans.tid)))
print >> file, "\tstatus=%s user=%s description=%s" % \
(`trans.status`, trans.user, trans.description)
j = 0
for rec in trans:
if rec.data is None:
fullclass = "undo or abort of object creation"
else:
modname, classname = get_pickle_metadata(rec.data)
dig = md5.new(rec.data).hexdigest()
fullclass = "%s.%s" % (modname, classname)
# special case for testing purposes
if fullclass == "ZODB.tests.MinPO.MinPO":
obj = zodb_unpickle(rec.data)
fullclass = "%s %s" % (fullclass, obj.value)
if rec.version:
version = "version=%s " % rec.version
else:
version = ''
if rec.data_txn:
# XXX It would be nice to print the transaction number
# (i) but it would be too expensive to keep track of.
bp = "bp=%016x" % u64(rec.data_txn)
else:
bp = ""
print >> file, " data #%05d oid=%016x %sclass=%s %s" % \
(j, u64(rec.oid), version, fullclass, bp)
j += 1
print >> file
i += 1
iter.close()
def fmt(p64):
# Return a nicely formatted string for a packaged 64-bit value
return "%016x" % u64(p64)
class Dumper:
"""A very verbose dumper for debuggin FileStorage problems."""
# XXX Should revise this class to use FileStorageFormatter.
def __init__(self, path, dest=None):
self.file = open(path, "rb")
self.dest = dest
def dump(self):
fid = self.file.read(4)
print >> self.dest, "*" * 60
print >> self.dest, "file identifier: %r" % fid
while self.dump_txn():
pass
def dump_txn(self):
pos = self.file.tell()
h = self.file.read(TRANS_HDR_LEN)
if not h:
return False
tid, tlen, status, ul, dl, el = struct.unpack(TRANS_HDR, h)
end = pos + tlen
print >> self.dest, "=" * 60
print >> self.dest, "offset: %d" % pos
print >> self.dest, "end pos: %d" % end
print >> self.dest, "transaction id: %s" % fmt(tid)
print >> self.dest, "trec len: %d" % tlen
print >> self.dest, "status: %r" % status
user = descr = extra = ""
if ul:
user = self.file.read(ul)
if dl:
descr = self.file.read(dl)
if el:
extra = self.file.read(el)
print >> self.dest, "user: %r" % user
print >> self.dest, "description: %r" % descr
print >> self.dest, "len(extra): %d" % el
while self.file.tell() < end:
self.dump_data(pos)
stlen = self.file.read(8)
print >> self.dest, "redundant trec len: %d" % u64(stlen)
return 1
def dump_data(self, tloc):
pos = self.file.tell()
h = self.file.read(DATA_HDR_LEN)
assert len(h) == DATA_HDR_LEN
oid, revid, prev, tloc, vlen, dlen = struct.unpack(DATA_HDR, h)
print >> self.dest, "-" * 60
print >> self.dest, "offset: %d" % pos
print >> self.dest, "oid: %s" % fmt(oid)
print >> self.dest, "revid: %s" % fmt(revid)
print >> self.dest, "previous record offset: %d" % prev
print >> self.dest, "transaction offset: %d" % tloc
if vlen:
pnv = self.file.read(8)
sprevdata = self.file.read(8)
version = self.file.read(vlen)
print >> self.dest, "version: %r" % version
print >> self.dest, "non-version data offset: %d" % u64(pnv)
print >> self.dest, \
"previous version data offset: %d" % u64(sprevdata)
print >> self.dest, "len(data): %d" % dlen
self.file.read(dlen)
if not dlen:
sbp = self.file.read(8)
print >> self.dest, "backpointer: %d" % u64(sbp)
This diff is collapsed.
##############################################################################
#
# Copyright (c) 2001, 2002, 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Very Simple Mapping ZODB storage
The Mapping storage provides an extremely simple storage implementation that
doesn't provide undo or version support.
It is meant to illustrate the simplest possible storage.
The Mapping storage uses a single data structure to map object ids to data.
"""
from ZODB.utils import u64, z64
from ZODB.BaseStorage import BaseStorage
from ZODB import POSException
from persistent.TimeStamp import TimeStamp
class MappingStorage(BaseStorage):
def __init__(self, name='Mapping Storage'):
BaseStorage.__init__(self, name)
self._index = {}
# FIXME: Why we don't use dict for _tindex?
self._tindex = []
self._ltid = None
# Note: If you subclass this and use a persistent mapping facility
# (e.g. a dbm file), you will need to get the maximum key and save it
# as self._oid. See dbmStorage.
def __len__(self):
return len(self._index)
def getSize(self):
self._lock_acquire()
try:
# These constants are for Python object memory overheads
s = 32
for p in self._index.itervalues():
s += 56 + len(p)
return s
finally:
self._lock_release()
def load(self, oid, version):
self._lock_acquire()
try:
p = self._index[oid]
return p[8:], p[:8] # pickle, serial
finally:
self._lock_release()
def loadEx(self, oid, version):
self._lock_acquire()
try:
# Since this storage doesn't support versions, tid and
# serial will always be the same.
p = self._index[oid]
return p[8:], p[:8], "" # pickle, tid, version
finally:
self._lock_release()
def getTid(self, oid):
self._lock_acquire()
try:
# The tid is the first 8 bytes of the buffer.
return self._index[oid][:8]
finally:
self._lock_release()
def store(self, oid, serial, data, version, transaction):
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
if version:
raise POSException.Unsupported("Versions aren't supported")
self._lock_acquire()
try:
if oid in self._index:
oserial = self._index[oid][:8]
if serial != oserial:
raise POSException.ConflictError(oid=oid,
serials=(oserial, serial),
data=data)
self._tindex.append((oid, self._tid + data))
finally:
self._lock_release()
return self._tid
def _clear_temp(self):
self._tindex = []
def _finish(self, tid, user, desc, ext):
self._index.update(dict(self._tindex))
self._ltid = self._tid
def lastTransaction(self):
return self._ltid
def pack(self, t, referencesf):
self._lock_acquire()
try:
if not self._index:
return
# Build an index of *only* those objects reachable from the root.
rootl = [z64]
pindex = {}
while rootl:
oid = rootl.pop()
if oid in pindex:
continue
# Scan non-version pickle for references
r = self._index[oid]
pindex[oid] = r
referencesf(r[8:], rootl)
# Now delete any unreferenced entries:
for oid in self._index.keys():
if oid not in pindex:
del self._index[oid]
finally:
self._lock_release()
def _splat(self):
"""Spit out a string showing state."""
o = ['Index:']
keys = self._index.keys()
keys.sort()
for oid in keys:
r = self._index[oid]
o.append(' %s: %s, %s' %
(u64(oid), TimeStamp(r[:8]), repr(r[8:])))
return '\n'.join(o)
This diff is collapsed.
This diff is collapsed.
# Extension information for zpkg.
<extension winlock>
source winlock.c
</extension>
This diff is collapsed.
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Provide backward compatibility with storages that only have undoLog()."""
class UndoLogCompatible:
def undoInfo(self, first=0, last=-20, specification=None):
if specification:
def filter(desc, spec=specification.items()):
get=desc.get
for k, v in spec:
if get(k, None) != v:
return 0
return 1
else: filter=None
return self.undoLog(first, last, filter)
This diff is collapsed.
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
# The next line must use double quotes, so release.py recognizes it.
__version__ = "3.3.1a1"
import sys
import __builtin__
from persistent import TimeStamp
from DB import DB
from transaction import get as get_transaction
# Backward compat for old imports. I don't think TimeStamp should
# really be in persistent anyway.
sys.modules['ZODB.TimeStamp'] = sys.modules['persistent.TimeStamp']
# TODO Issue deprecation warning if this variant is used?
__builtin__.get_transaction = get_transaction
del __builtin__
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
<schema prefix="ZODB.config">
<import package="ZODB"/>
<section type="ZODB.database" name="*" attribute="database"/>
</schema>
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import persistent.mapping
class fixer:
def __of__(self, parent):
def __setstate__(state, self=parent):
self._container=state
del self.__setstate__
return __setstate__
fixer=fixer()
class hack: pass
hack=hack()
def __basicnew__():
r=persistent.mapping.PersistentMapping()
r.__setstate__=fixer
return r
hack.__basicnew__=__basicnew__
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
<schema>
<import package="ZODB"/>
<section type="ZODB.storage" name="*" attribute="storage"/>
</schema>
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# Having this makes debugging better.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment