Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Z
ZEO
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
ZEO
Commits
e8bec203
Commit
e8bec203
authored
Jul 07, 2016
by
Jim Fulton
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote-tracking branch 'origin/asyncio' into uvloop
Conflicts: setup.py
parents
b76fea7d
e5653d43
Changes
21
Hide whitespace changes
Inline
Side-by-side
Showing
21 changed files
with
702 additions
and
515 deletions
+702
-515
src/ZEO/ClientStorage.py
src/ZEO/ClientStorage.py
+44
-9
src/ZEO/StorageServer.py
src/ZEO/StorageServer.py
+78
-128
src/ZEO/TransactionBuffer.py
src/ZEO/TransactionBuffer.py
+19
-11
src/ZEO/asyncio/base.py
src/ZEO/asyncio/base.py
+12
-0
src/ZEO/asyncio/server.py
src/ZEO/asyncio/server.py
+2
-3
src/ZEO/asyncio/testing.py
src/ZEO/asyncio/testing.py
+1
-1
src/ZEO/asyncio/tests.py
src/ZEO/asyncio/tests.py
+3
-3
src/ZEO/cache.py
src/ZEO/cache.py
+180
-166
src/ZEO/interfaces.py
src/ZEO/interfaces.py
+1
-2
src/ZEO/runzeo.py
src/ZEO/runzeo.py
+4
-0
src/ZEO/server.xml
src/ZEO/server.xml
+8
-0
src/ZEO/tests/CommitLockTests.py
src/ZEO/tests/CommitLockTests.py
+2
-0
src/ZEO/tests/ConnectionTests.py
src/ZEO/tests/ConnectionTests.py
+3
-84
src/ZEO/tests/InvalidationTests.py
src/ZEO/tests/InvalidationTests.py
+2
-2
src/ZEO/tests/forker.py
src/ZEO/tests/forker.py
+10
-5
src/ZEO/tests/protocols.test
src/ZEO/tests/protocols.test
+65
-62
src/ZEO/tests/testConversionSupport.py
src/ZEO/tests/testConversionSupport.py
+2
-0
src/ZEO/tests/testZEO.py
src/ZEO/tests/testZEO.py
+37
-31
src/ZEO/tests/testZEO2.py
src/ZEO/tests/testZEO2.py
+17
-8
src/ZEO/tests/test_client_side_conflict_resolution.py
src/ZEO/tests/test_client_side_conflict_resolution.py
+150
-0
src/ZEO/tests/utils.py
src/ZEO/tests/utils.py
+62
-0
No files found.
src/ZEO/ClientStorage.py
View file @
e8bec203
...
...
@@ -34,6 +34,7 @@ import BTrees.OOBTree
import
zc.lockfile
import
ZODB
import
ZODB.BaseStorage
import
ZODB.ConflictResolution
import
ZODB.interfaces
import
zope.interface
import
six
...
...
@@ -53,10 +54,7 @@ logger = logging.getLogger(__name__)
# max signed 64-bit value ~ infinity :) Signed cuz LBTree and TimeStamp
m64
=
b'
\
x7f
\
xff
\
xff
\
xff
\
xff
\
xff
\
xff
\
xff
'
try
:
from
ZODB.ConflictResolution
import
ResolvedSerial
except
ImportError
:
ResolvedSerial
=
'rs'
from
ZODB.ConflictResolution
import
ResolvedSerial
def
tid2time
(
tid
):
return
str
(
TimeStamp
(
tid
))
...
...
@@ -77,7 +75,8 @@ def get_timestamp(prev_ts=None):
MB
=
1024
**
2
class
ClientStorage
(
object
):
@
zope
.
interface
.
implementer
(
ZODB
.
interfaces
.
IMultiCommitStorage
)
class
ClientStorage
(
ZODB
.
ConflictResolution
.
ConflictResolvingStorage
):
"""A storage class that is a network client to a remote storage.
This is a faithful implementation of the Storage API.
...
...
@@ -333,6 +332,7 @@ class ClientStorage(object):
The storage isn't really ready to use until after this call.
"""
super
(
ClientStorage
,
self
).
registerDB
(
db
)
self
.
_db
=
db
def
is_connected
(
self
,
test
=
False
):
...
...
@@ -724,18 +724,51 @@ class ClientStorage(object):
"""
tbuf
=
self
.
_check_trans
(
txn
,
'tpc_vote'
)
try
:
self
.
_call
(
'vote'
,
id
(
txn
))
conflicts
=
True
vote_attempts
=
0
while
conflicts
and
vote_attempts
<
9
:
# 9? Mainly avoid inf. loop
conflicts
=
False
for
oid
in
self
.
_call
(
'vote'
,
id
(
txn
))
or
():
if
isinstance
(
oid
,
dict
):
# Conflict, let's try to resolve it
conflicts
=
True
conflict
=
oid
oid
=
conflict
[
'oid'
]
committed
,
read
=
conflict
[
'serials'
]
data
=
self
.
tryToResolveConflict
(
oid
,
committed
,
read
,
conflict
[
'data'
])
self
.
_async
(
'storea'
,
oid
,
committed
,
data
,
id
(
txn
))
tbuf
.
resolve
(
oid
,
data
)
else
:
tbuf
.
serial
(
oid
,
ResolvedSerial
)
vote_attempts
+=
1
except
POSException
.
StorageTransactionError
:
# Hm, we got disconnected and reconnected bwtween
# _check_trans and voting. Let's chack the transaction again:
tbuf
=
self
.
_check_trans
(
txn
,
'tpc_vote'
)
self
.
_check_trans
(
txn
,
'tpc_vote'
)
raise
except
POSException
.
ConflictError
as
err
:
oid
=
getattr
(
err
,
'oid'
,
None
)
if
oid
is
not
None
:
# This is a band-aid to help recover from a situation
# that shouldn't happen. A Client somehow misses some
# invalidations and has out of date data in its
# cache. We need some whay to invalidate the cache
# entry without invalidations. So, if we see a
# (unresolved) conflict error, we assume that the
# cache entry is bad and invalidate it.
self
.
_cache
.
invalidate
(
oid
,
None
)
raise
if
tbuf
.
exception
:
raise
tbuf
.
exception
if
tbuf
.
ser
ials
:
return
list
(
tbuf
.
ser
ials
.
items
()
)
if
tbuf
.
ser
ver_resolved
or
tbuf
.
client_resolved
:
return
list
(
tbuf
.
ser
ver_resolved
)
+
list
(
tbuf
.
client_resolved
)
else
:
return
None
...
...
@@ -830,6 +863,8 @@ class ClientStorage(object):
self
.
_update_blob_cache
(
tbuf
,
tid
)
return
tid
def
_update_blob_cache
(
self
,
tbuf
,
tid
):
"""Internal helper move blobs updated by a transaction to the cache.
"""
...
...
src/ZEO/StorageServer.py
View file @
e8bec203
...
...
@@ -85,10 +85,11 @@ class ZEOStorage:
blob_tempfile
=
None
log_label
=
'unconnected'
locked
=
False
# Don't have storage lock
verifying
=
store_failed
=
0
verifying
=
0
def
__init__
(
self
,
server
,
read_only
=
0
):
self
.
server
=
server
self
.
client_conflict_resolution
=
server
.
client_conflict_resolution
# timeout and stats will be initialized in register()
self
.
read_only
=
read_only
# The authentication protocol may define extra methods.
...
...
@@ -334,12 +335,12 @@ class ZEOStorage:
t
.
_extension
=
ext
self
.
serials
=
[]
self
.
conflicts
=
{}
self
.
invalidated
=
[]
self
.
txnlog
=
CommitLog
()
self
.
blob_log
=
[]
self
.
tid
=
tid
self
.
status
=
status
self
.
store_failed
=
0
self
.
stats
.
active_txns
+=
1
# Assign the transaction attribute last. This is so we don't
...
...
@@ -414,6 +415,7 @@ class ZEOStorage:
self
.
locked
,
delay
=
self
.
server
.
lock_storage
(
self
,
delay
)
if
self
.
locked
:
result
=
None
try
:
self
.
log
(
"Preparing to commit transaction: %d objects, %d bytes"
...
...
@@ -427,38 +429,56 @@ class ZEOStorage:
self
.
storage
.
tpc_begin
(
self
.
transaction
)
for
op
,
args
in
self
.
txnlog
:
if
not
getattr
(
self
,
op
)(
*
args
):
break
getattr
(
self
,
op
)(
*
args
)
# Blob support
while
self
.
blob_log
and
not
self
.
store_failed
:
while
self
.
blob_log
:
oid
,
oldserial
,
data
,
blobfilename
=
self
.
blob_log
.
pop
()
self
.
_store
(
oid
,
oldserial
,
data
,
blobfilename
)
if
not
self
.
store_failed
:
# Only call tpc_vote of no store call failed,
# otherwise the serialnos() call will deliver an
# exception that will be handled by the client in
# its tpc_vote() method.
serials
=
self
.
storage
.
tpc_vote
(
self
.
transaction
)
if
serials
:
self
.
serials
.
extend
(
serials
)
self
.
connection
.
async
(
'serialnos'
,
self
.
serials
)
except
Exception
:
if
not
self
.
conflicts
:
try
:
serials
=
self
.
storage
.
tpc_vote
(
self
.
transaction
)
except
ConflictError
as
err
:
if
(
self
.
client_conflict_resolution
and
err
.
oid
and
err
.
serials
and
err
.
data
):
self
.
conflicts
[
err
.
oid
]
=
dict
(
oid
=
err
.
oid
,
serials
=
err
.
serials
,
data
=
err
.
data
)
else
:
raise
else
:
if
serials
:
self
.
serials
.
extend
(
serials
)
result
=
self
.
serials
if
self
.
conflicts
:
result
=
list
(
self
.
conflicts
.
values
())
self
.
storage
.
tpc_abort
(
self
.
transaction
)
self
.
server
.
unlock_storage
(
self
)
self
.
locked
=
False
self
.
server
.
stop_waiting
(
self
)
except
Exception
as
err
:
self
.
storage
.
tpc_abort
(
self
.
transaction
)
self
.
_clear_transaction
()
if
isinstance
(
err
,
ConflictError
):
self
.
stats
.
conflicts
+=
1
self
.
log
(
"conflict error %s"
%
err
,
BLATHER
)
if
not
isinstance
(
err
,
TransactionError
):
logger
.
exception
(
"While voting"
)
if
delay
is
not
None
:
delay
.
error
(
sys
.
exc_info
())
else
:
raise
else
:
if
delay
is
not
None
:
delay
.
reply
(
None
)
delay
.
reply
(
result
)
else
:
return
None
return
result
else
:
return
delay
...
...
@@ -550,120 +570,41 @@ class ZEOStorage:
self
.
_check_tid
(
tid
,
exc
=
StorageTransactionError
)
self
.
txnlog
.
undo
(
trans_id
)
def
_op_error
(
self
,
oid
,
err
,
op
):
self
.
store_failed
=
1
if
isinstance
(
err
,
ConflictError
):
self
.
stats
.
conflicts
+=
1
self
.
log
(
"conflict error oid=%s msg=%s"
%
(
oid_repr
(
oid
),
str
(
err
)),
BLATHER
)
if
not
isinstance
(
err
,
TransactionError
):
# Unexpected errors are logged and passed to the client
self
.
log
(
"%s error: %s, %s"
%
((
op
,)
+
sys
.
exc_info
()[:
2
]),
logging
.
ERROR
,
exc_info
=
True
)
err
=
self
.
_marshal_error
(
err
)
# The exception is reported back as newserial for this oid
self
.
serials
.
append
((
oid
,
err
))
def
_delete
(
self
,
oid
,
serial
):
err
=
None
try
:
self
.
storage
.
deleteObject
(
oid
,
serial
,
self
.
transaction
)
except
(
SystemExit
,
KeyboardInterrupt
):
raise
except
Exception
as
e
:
err
=
e
self
.
_op_error
(
oid
,
err
,
'delete'
)
return
err
is
None
self
.
storage
.
deleteObject
(
oid
,
serial
,
self
.
transaction
)
def
_checkread
(
self
,
oid
,
serial
):
err
=
None
try
:
self
.
storage
.
checkCurrentSerialInTransaction
(
oid
,
serial
,
self
.
transaction
)
except
(
SystemExit
,
KeyboardInterrupt
):
raise
except
Exception
as
e
:
err
=
e
self
.
_op_error
(
oid
,
err
,
'checkCurrentSerialInTransaction'
)
return
err
is
None
self
.
storage
.
checkCurrentSerialInTransaction
(
oid
,
serial
,
self
.
transaction
)
def
_store
(
self
,
oid
,
serial
,
data
,
blobfile
=
None
):
err
=
None
try
:
if
blobfile
is
None
:
newserial
=
self
.
storage
.
store
(
oid
,
serial
,
data
,
''
,
self
.
transaction
)
self
.
storage
.
store
(
oid
,
serial
,
data
,
''
,
self
.
transaction
)
else
:
newserial
=
self
.
storage
.
storeBlob
(
self
.
storage
.
storeBlob
(
oid
,
serial
,
data
,
blobfile
,
''
,
self
.
transaction
)
except
(
SystemExit
,
KeyboardInterrupt
):
raise
except
Exception
as
error
:
self
.
_op_error
(
oid
,
error
,
'store'
)
err
=
error
except
ConflictError
as
err
:
if
self
.
client_conflict_resolution
and
err
.
serials
:
self
.
conflicts
[
oid
]
=
dict
(
oid
=
oid
,
serials
=
err
.
serials
,
data
=
data
)
else
:
raise
else
:
if
oid
in
self
.
conflicts
:
del
self
.
conflicts
[
oid
]
if
serial
!=
b"
\
0
\
0
\
0
\
0
\
0
\
0
\
0
\
0
"
:
self
.
invalidated
.
append
(
oid
)
if
isinstance
(
newserial
,
bytes
):
newserial
=
[(
oid
,
newserial
)]
for
oid
,
s
in
newserial
or
():
if
s
==
ResolvedSerial
:
self
.
stats
.
conflicts_resolved
+=
1
self
.
log
(
"conflict resolved oid=%s"
%
oid_repr
(
oid
),
BLATHER
)
self
.
serials
.
append
((
oid
,
s
))
return
err
is
None
def
_restore
(
self
,
oid
,
serial
,
data
,
prev_txn
):
err
=
None
try
:
self
.
storage
.
restore
(
oid
,
serial
,
data
,
''
,
prev_txn
,
self
.
transaction
)
except
(
SystemExit
,
KeyboardInterrupt
):
raise
except
Exception
as
err
:
self
.
_op_error
(
oid
,
err
,
'restore'
)
return
err
is
None
self
.
storage
.
restore
(
oid
,
serial
,
data
,
''
,
prev_txn
,
self
.
transaction
)
def
_undo
(
self
,
trans_id
):
err
=
None
try
:
tid
,
oids
=
self
.
storage
.
undo
(
trans_id
,
self
.
transaction
)
except
(
SystemExit
,
KeyboardInterrupt
):
raise
except
Exception
as
e
:
err
=
e
self
.
_op_error
(
z64
,
err
,
'undo'
)
else
:
self
.
invalidated
.
extend
(
oids
)
self
.
serials
.
extend
((
oid
,
ResolvedSerial
)
for
oid
in
oids
)
return
err
is
None
def
_marshal_error
(
self
,
error
):
# Try to pickle the exception. If it can't be pickled,
# the RPC response would fail, so use something that can be pickled.
if
PY3
:
pickler
=
Pickler
(
BytesIO
(),
3
)
else
:
# The pure-python version requires at least one argument (PyPy)
pickler
=
Pickler
(
0
)
pickler
.
fast
=
1
try
:
pickler
.
dump
(
error
)
except
:
msg
=
"Couldn't pickle storage exception: %s"
%
repr
(
error
)
self
.
log
(
msg
,
logging
.
ERROR
)
error
=
StorageServerError
(
msg
)
return
error
tid
,
oids
=
self
.
storage
.
undo
(
trans_id
,
self
.
transaction
)
self
.
invalidated
.
extend
(
oids
)
self
.
serials
.
extend
(
oids
)
# IStorageIteration support
...
...
@@ -771,6 +712,7 @@ class StorageServer:
invalidation_age
=
None
,
transaction_timeout
=
None
,
ssl
=
None
,
client_conflict_resolution
=
False
,
):
"""StorageServer constructor.
...
...
@@ -841,15 +783,23 @@ class StorageServer:
for
name
,
storage
in
storages
.
items
():
self
.
_setup_invq
(
name
,
storage
)
storage
.
registerDB
(
StorageServerDB
(
self
,
name
))
if
client_conflict_resolution
:
# XXX this may go away later, when storages grow
# configuration for this.
storage
.
tryToResolveConflict
=
never_resolve_conflict
self
.
invalidation_age
=
invalidation_age
self
.
zeo_storages_by_storage_id
=
{}
# {storage_id -> [ZEOStorage]}
self
.
acceptor
=
Acceptor
(
self
,
addr
,
ssl
)
if
isinstance
(
addr
,
tuple
)
and
addr
[
0
]:
self
.
addr
=
self
.
acceptor
.
addr
else
:
self
.
addr
=
addr
self
.
loop
=
self
.
acceptor
.
loop
ZODB
.
event
.
notify
(
Serving
(
self
,
address
=
self
.
acceptor
.
addr
))
self
.
client_conflict_resolution
=
client_conflict_resolution
if
addr
is
not
None
:
self
.
acceptor
=
Acceptor
(
self
,
addr
,
ssl
)
if
isinstance
(
addr
,
tuple
)
and
addr
[
0
]:
self
.
addr
=
self
.
acceptor
.
addr
else
:
self
.
addr
=
addr
self
.
loop
=
self
.
acceptor
.
loop
ZODB
.
event
.
notify
(
Serving
(
self
,
address
=
self
.
acceptor
.
addr
))
self
.
stats
=
{}
self
.
timeouts
=
{}
for
name
in
self
.
storages
.
keys
():
...
...
@@ -1383,7 +1333,7 @@ class Serving(ServerEvent):
class
Closed
(
ServerEvent
):
pass
def
ault_cert_authenticate
=
'SIGNED'
def
ssl_config
(
section
):
from
.sslconfig
import
ssl_config
return
ssl_config
(
section
,
Tru
e
)
def
never_resolve_conflict
(
oid
,
committedSerial
,
oldSerial
,
newpickle
,
committedData
=
b''
):
raise
ConflictError
(
oid
=
oid
,
serials
=
(
committedSerial
,
oldSerial
),
data
=
newpickl
e
)
src/ZEO/TransactionBuffer.py
View file @
e8bec203
...
...
@@ -46,7 +46,8 @@ class TransactionBuffer:
# stored are builtin types -- strings or None.
self
.
pickler
=
Pickler
(
self
.
file
,
1
)
self
.
pickler
.
fast
=
1
self
.
serials
=
{}
# processed { oid -> serial }
self
.
server_resolved
=
set
()
# {oid}
self
.
client_resolved
=
{}
# {oid -> buffer_record_number}
self
.
exception
=
None
def
close
(
self
):
...
...
@@ -59,12 +60,17 @@ class TransactionBuffer:
# Estimate per-record cache size
self
.
size
=
self
.
size
+
(
data
and
len
(
data
)
or
0
)
+
31
def
resolve
(
self
,
oid
,
data
):
"""Record client-resolved data
"""
self
.
store
(
oid
,
data
)
self
.
client_resolved
[
oid
]
=
self
.
count
-
1
def
serial
(
self
,
oid
,
serial
):
if
isinstance
(
serial
,
Exception
):
self
.
exception
=
serial
self
.
serials
[
oid
]
=
None
else
:
self
.
serials
[
oid
]
=
serial
self
.
exception
=
serial
# This transaction will never be committed
elif
serial
==
ResolvedSerial
:
self
.
server_resolved
.
add
(
oid
)
def
storeBlob
(
self
,
oid
,
blobfilename
):
self
.
blobs
.
append
((
oid
,
blobfilename
))
...
...
@@ -72,7 +78,8 @@ class TransactionBuffer:
def
__iter__
(
self
):
self
.
file
.
seek
(
0
)
unpickler
=
Unpickler
(
self
.
file
)
serials
=
self
.
serials
server_resolved
=
self
.
server_resolved
client_resolved
=
self
.
client_resolved
# Gaaaa, this is awkward. There can be entries in serials that
# aren't in the buffer, because undo. Entries can be repeated
...
...
@@ -82,10 +89,11 @@ class TransactionBuffer:
seen
=
set
()
for
i
in
range
(
self
.
count
):
oid
,
data
=
unpickler
.
load
()
seen
.
add
(
oid
)
yield
oid
,
data
,
serials
.
get
(
oid
)
==
ResolvedSerial
if
client_resolved
.
get
(
oid
,
i
)
==
i
:
seen
.
add
(
oid
)
yield
oid
,
data
,
oid
in
server_resolved
# We may have leftover
serial
s because undo
for
oid
,
serial
in
serials
.
items
()
:
# We may have leftover
oid
s because undo
for
oid
in
server_resolved
:
if
oid
not
in
seen
:
yield
oid
,
None
,
serial
==
ResolvedSerial
yield
oid
,
None
,
True
src/ZEO/asyncio/base.py
View file @
e8bec203
from
struct
import
unpack
import
asyncio
import
logging
import
socket
import
sys
from
.marshal
import
encoder
logger
=
logging
.
getLogger
(
__name__
)
INET_FAMILIES
=
socket
.
AF_INET
,
socket
.
AF_INET6
class
Protocol
(
asyncio
.
Protocol
):
"""asyncio low-level ZEO base interface
"""
...
...
@@ -41,7 +45,15 @@ class Protocol(asyncio.Protocol):
def
connection_made
(
self
,
transport
):
logger
.
info
(
"Connected %s"
,
self
)
if
sys
.
version_info
<
(
3
,
6
):
sock
=
transport
.
get_extra_info
(
'socket'
)
if
sock
is
not
None
and
sock
.
family
in
INET_FAMILIES
:
# See https://bugs.python.org/issue27456 :(
sock
.
setsockopt
(
socket
.
IPPROTO_TCP
,
socket
.
TCP_NODELAY
,
True
)
self
.
transport
=
transport
paused
=
self
.
paused
output
=
self
.
output
append
=
output
.
append
...
...
src/ZEO/asyncio/server.py
View file @
e8bec203
...
...
@@ -17,7 +17,7 @@ class ServerProtocol(base.Protocol):
"""asyncio low-level ZEO server interface
"""
protocols
=
b'Z4'
,
b'Z5'
protocols
=
(
b'Z5'
,
)
name
=
'server protocol'
methods
=
set
((
'register'
,
))
...
...
@@ -162,7 +162,7 @@ class Delay:
def
error
(
self
,
exc_info
):
self
.
sent
=
'error'
log
(
"Error raised in delayed method"
,
logging
.
ERROR
,
exc_info
=
exc_info
)
log
ger
.
error
(
"Error raised in delayed method"
,
exc_info
=
exc_info
)
self
.
protocol
.
send_error
(
self
.
msgid
,
exc_info
[
1
])
def
__repr__
(
self
):
...
...
@@ -199,7 +199,6 @@ class MTDelay(Delay):
def
error
(
self
,
exc_info
):
self
.
ready
.
wait
()
log
(
"Error raised in delayed method"
,
logging
.
ERROR
,
exc_info
=
exc_info
)
self
.
protocol
.
call_soon_threadsafe
(
Delay
.
error
,
self
,
exc_info
)
...
...
src/ZEO/asyncio/testing.py
View file @
e8bec203
...
...
@@ -86,7 +86,7 @@ class Transport:
capacity
=
1
<<
64
paused
=
False
extra
=
dict
(
peername
=
'1.2.3.4'
,
sockname
=
(
'127.0.0.1'
,
4200
))
extra
=
dict
(
peername
=
'1.2.3.4'
,
sockname
=
(
'127.0.0.1'
,
4200
)
,
socket
=
None
)
def
__init__
(
self
,
protocol
):
self
.
data
=
[]
...
...
src/ZEO/asyncio/tests.py
View file @
e8bec203
...
...
@@ -750,7 +750,7 @@ class ServerTests(Base, setupstack.TestCase):
self
.
target
=
protocol
.
zeo_storage
if
finish
:
self
.
assertEqual
(
self
.
pop
(
parse
=
False
),
best_protocol_version
)
protocol
.
data_received
(
sized
(
b'Z
4
'
))
protocol
.
data_received
(
sized
(
b'Z
5
'
))
return
protocol
message_id
=
0
...
...
@@ -788,9 +788,9 @@ class ServerTests(Base, setupstack.TestCase):
self
.
assertEqual
(
self
.
pop
(
parse
=
False
),
best_protocol_version
)
# The client sends it's protocol:
protocol
.
data_received
(
sized
(
b'Z
4
'
))
protocol
.
data_received
(
sized
(
b'Z
5
'
))
self
.
assertEqual
(
protocol
.
protocol_version
,
b'Z
4
'
)
self
.
assertEqual
(
protocol
.
protocol_version
,
b'Z
5
'
)
protocol
.
zeo_storage
.
notify_connected
.
assert_called_once_with
(
protocol
)
...
...
src/ZEO/cache.py
View file @
e8bec203
...
...
@@ -33,7 +33,7 @@ import time
import
ZODB.fsIndex
import
zc.lockfile
from
ZODB.utils
import
p64
,
u64
,
z64
from
ZODB.utils
import
p64
,
u64
,
z64
,
RLock
import
six
from
._compat
import
PYPY
...
...
@@ -182,6 +182,8 @@ class ClientCache(object):
# currentofs.
self
.
currentofs
=
ZEC_HEADER_SIZE
self
.
_lock
=
RLock
()
# self.f is the open file object.
# When we're not reusing an existing file, self.f is left None
# here -- the scan() method must be called then to open the file
...
...
@@ -239,9 +241,10 @@ class ClientCache(object):
return
self
def
clear
(
self
):
self
.
f
.
seek
(
ZEC_HEADER_SIZE
)
self
.
f
.
truncate
()
self
.
_initfile
(
ZEC_HEADER_SIZE
)
with
self
.
_lock
:
self
.
f
.
seek
(
ZEC_HEADER_SIZE
)
self
.
f
.
truncate
()
self
.
_initfile
(
ZEC_HEADER_SIZE
)
##
# Scan the current contents of the cache file, calling `install`
...
...
@@ -451,26 +454,28 @@ class ClientCache(object):
# new tid must be strictly greater than our current idea of the most
# recent tid.
def
setLastTid
(
self
,
tid
):
if
(
not
tid
)
or
(
tid
==
z64
):
return
if
(
tid
<=
self
.
tid
)
and
self
.
_len
:
if
tid
==
self
.
tid
:
return
# Be a little forgiving
raise
ValueError
(
"new last tid (%s) must be greater than "
"previous one (%s)"
%
(
u64
(
tid
),
u64
(
self
.
tid
)))
assert
isinstance
(
tid
,
bytes
)
and
len
(
tid
)
==
8
,
tid
self
.
tid
=
tid
self
.
f
.
seek
(
len
(
magic
))
self
.
f
.
write
(
tid
)
self
.
f
.
flush
()
with
self
.
_lock
:
if
(
not
tid
)
or
(
tid
==
z64
):
return
if
(
tid
<=
self
.
tid
)
and
self
.
_len
:
if
tid
==
self
.
tid
:
return
# Be a little forgiving
raise
ValueError
(
"new last tid (%s) must be greater than "
"previous one (%s)"
%
(
u64
(
tid
),
u64
(
self
.
tid
)))
assert
isinstance
(
tid
,
bytes
)
and
len
(
tid
)
==
8
,
tid
self
.
tid
=
tid
self
.
f
.
seek
(
len
(
magic
))
self
.
f
.
write
(
tid
)
self
.
f
.
flush
()
##
# Return the last transaction seen by the cache.
# @return a transaction id
# @defreturn string, or 8 nulls if no transaction is yet known
def
getLastTid
(
self
):
return
self
.
tid
with
self
.
_lock
:
return
self
.
tid
##
# Return the current data record for oid.
...
...
@@ -479,52 +484,54 @@ class ClientCache(object):
# in the cache
# @defreturn 3-tuple: (string, string, string)
def
load
(
self
,
oid
,
before_tid
=
None
):
ofs
=
self
.
current
.
get
(
oid
)
if
ofs
is
None
:
self
.
_trace
(
0x20
,
oid
)
return
None
self
.
f
.
seek
(
ofs
)
read
=
self
.
f
.
read
status
=
read
(
1
)
assert
status
==
b'a'
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
size
,
saved_oid
,
tid
,
end_tid
,
lver
,
ldata
=
unpack
(
">I8s8s8sHI"
,
read
(
34
))
assert
saved_oid
==
oid
,
(
ofs
,
self
.
f
.
tell
(),
oid
,
saved_oid
)
assert
end_tid
==
z64
,
(
ofs
,
self
.
f
.
tell
(),
oid
,
tid
,
end_tid
)
assert
lver
==
0
,
"Versions aren't supported"
if
before_tid
and
tid
>=
before_tid
:
return
None
data
=
read
(
ldata
)
assert
len
(
data
)
==
ldata
,
(
ofs
,
self
.
f
.
tell
(),
oid
,
len
(
data
),
ldata
)
# WARNING: The following assert changes the file position.
# We must not depend on this below or we'll fail in optimized mode.
assert
read
(
8
)
==
oid
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
self
.
_n_accesses
+=
1
self
.
_trace
(
0x22
,
oid
,
tid
,
end_tid
,
ldata
)
ofsofs
=
self
.
currentofs
-
ofs
if
ofsofs
<
0
:
ofsofs
+=
self
.
maxsize
if
(
ofsofs
>
self
.
rearrange
and
self
.
maxsize
>
10
*
len
(
data
)
and
size
>
4
):
# The record is far back and might get evicted, but it's
# valuable, so move it forward.
# Remove fromn old loc:
del
self
.
current
[
oid
]
with
self
.
_lock
:
ofs
=
self
.
current
.
get
(
oid
)
if
ofs
is
None
:
self
.
_trace
(
0x20
,
oid
)
return
None
self
.
f
.
seek
(
ofs
)
self
.
f
.
write
(
b'f'
+
pack
(
">I"
,
size
))
read
=
self
.
f
.
read
status
=
read
(
1
)
assert
status
==
b'a'
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
size
,
saved_oid
,
tid
,
end_tid
,
lver
,
ldata
=
unpack
(
">I8s8s8sHI"
,
read
(
34
))
assert
saved_oid
==
oid
,
(
ofs
,
self
.
f
.
tell
(),
oid
,
saved_oid
)
assert
end_tid
==
z64
,
(
ofs
,
self
.
f
.
tell
(),
oid
,
tid
,
end_tid
)
assert
lver
==
0
,
"Versions aren't supported"
if
before_tid
and
tid
>=
before_tid
:
return
None
data
=
read
(
ldata
)
assert
len
(
data
)
==
ldata
,
(
ofs
,
self
.
f
.
tell
(),
oid
,
len
(
data
),
ldata
)
# WARNING: The following assert changes the file position.
# We must not depend on this below or we'll fail in optimized mode.
assert
read
(
8
)
==
oid
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
self
.
_n_accesses
+=
1
self
.
_trace
(
0x22
,
oid
,
tid
,
end_tid
,
ldata
)
# Write to new location:
self
.
_store
(
oid
,
tid
,
None
,
data
,
size
)
ofsofs
=
self
.
currentofs
-
ofs
if
ofsofs
<
0
:
ofsofs
+=
self
.
maxsize
return
data
,
tid
if
(
ofsofs
>
self
.
rearrange
and
self
.
maxsize
>
10
*
len
(
data
)
and
size
>
4
):
# The record is far back and might get evicted, but it's
# valuable, so move it forward.
# Remove fromn old loc:
del
self
.
current
[
oid
]
self
.
f
.
seek
(
ofs
)
self
.
f
.
write
(
b'f'
+
pack
(
">I"
,
size
))
# Write to new location:
self
.
_store
(
oid
,
tid
,
None
,
data
,
size
)
return
data
,
tid
##
# Return a non-current revision of oid that was current before tid.
...
...
@@ -533,54 +540,56 @@ class ClientCache(object):
# @return data record, serial number, start tid, and end tid
# @defreturn 4-tuple: (string, string, string, string)
def
loadBefore
(
self
,
oid
,
before_tid
):
noncurrent_for_oid
=
self
.
noncurrent
.
get
(
u64
(
oid
))
if
noncurrent_for_oid
is
None
:
result
=
self
.
load
(
oid
,
before_tid
)
if
result
:
return
result
[
0
],
result
[
1
],
None
else
:
self
.
_trace
(
0x24
,
oid
,
""
,
before_tid
)
return
result
items
=
noncurrent_for_oid
.
items
(
None
,
u64
(
before_tid
)
-
1
)
if
not
items
:
result
=
self
.
load
(
oid
,
before_tid
)
if
result
:
return
result
[
0
],
result
[
1
],
None
else
:
self
.
_trace
(
0x24
,
oid
,
""
,
before_tid
)
return
result
with
self
.
_lock
:
noncurrent_for_oid
=
self
.
noncurrent
.
get
(
u64
(
oid
))
if
noncurrent_for_oid
is
None
:
result
=
self
.
load
(
oid
,
before_tid
)
if
result
:
return
result
[
0
],
result
[
1
],
None
else
:
self
.
_trace
(
0x24
,
oid
,
""
,
before_tid
)
return
result
items
=
noncurrent_for_oid
.
items
(
None
,
u64
(
before_tid
)
-
1
)
if
not
items
:
result
=
self
.
load
(
oid
,
before_tid
)
if
result
:
return
result
[
0
],
result
[
1
],
None
else
:
self
.
_trace
(
0x24
,
oid
,
""
,
before_tid
)
return
result
tid
,
ofs
=
items
[
-
1
]
tid
,
ofs
=
items
[
-
1
]
self
.
f
.
seek
(
ofs
)
read
=
self
.
f
.
read
status
=
read
(
1
)
assert
status
==
b'a'
,
(
ofs
,
self
.
f
.
tell
(),
oid
,
before_tid
)
size
,
saved_oid
,
saved_tid
,
end_tid
,
lver
,
ldata
=
unpack
(
">I8s8s8sHI"
,
read
(
34
))
assert
saved_oid
==
oid
,
(
ofs
,
self
.
f
.
tell
(),
oid
,
saved_oid
)
assert
saved_tid
==
p64
(
tid
),
(
ofs
,
self
.
f
.
tell
(),
oid
,
saved_tid
,
tid
)
assert
end_tid
!=
z64
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
assert
lver
==
0
,
"Versions aren't supported"
data
=
read
(
ldata
)
assert
len
(
data
)
==
ldata
,
(
ofs
,
self
.
f
.
tell
())
# WARNING: The following assert changes the file position.
# We must not depend on this below or we'll fail in optimized mode.
assert
read
(
8
)
==
oid
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
if
end_tid
<
before_tid
:
result
=
self
.
load
(
oid
,
before_tid
)
if
result
:
return
result
[
0
],
result
[
1
],
None
else
:
self
.
_trace
(
0x24
,
oid
,
""
,
before_tid
)
return
result
self
.
f
.
seek
(
ofs
)
read
=
self
.
f
.
read
status
=
read
(
1
)
assert
status
==
b'a'
,
(
ofs
,
self
.
f
.
tell
(),
oid
,
before_tid
)
size
,
saved_oid
,
saved_tid
,
end_tid
,
lver
,
ldata
=
unpack
(
">I8s8s8sHI"
,
read
(
34
))
assert
saved_oid
==
oid
,
(
ofs
,
self
.
f
.
tell
(),
oid
,
saved_oid
)
assert
saved_tid
==
p64
(
tid
),
(
ofs
,
self
.
f
.
tell
(),
oid
,
saved_tid
,
tid
)
assert
end_tid
!=
z64
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
assert
lver
==
0
,
"Versions aren't supported"
data
=
read
(
ldata
)
assert
len
(
data
)
==
ldata
,
(
ofs
,
self
.
f
.
tell
())
# WARNING: The following assert changes the file position.
# We must not depend on this below or we'll fail in optimized mode.
assert
read
(
8
)
==
oid
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
if
end_tid
<
before_tid
:
result
=
self
.
load
(
oid
,
before_tid
)
if
result
:
return
result
[
0
],
result
[
1
],
None
else
:
self
.
_trace
(
0x24
,
oid
,
""
,
before_tid
)
return
result
self
.
_n_accesses
+=
1
self
.
_trace
(
0x26
,
oid
,
""
,
saved_tid
)
return
data
,
saved_tid
,
end_tid
self
.
_n_accesses
+=
1
self
.
_trace
(
0x26
,
oid
,
""
,
saved_tid
)
return
data
,
saved_tid
,
end_tid
##
# Store a new data record in the cache.
...
...
@@ -591,45 +600,48 @@ class ClientCache(object):
# current.
# @param data the actual data
def
store
(
self
,
oid
,
start_tid
,
end_tid
,
data
):
seek
=
self
.
f
.
seek
if
end_tid
is
None
:
ofs
=
self
.
current
.
get
(
oid
)
if
ofs
:
seek
(
ofs
)
read
=
self
.
f
.
read
status
=
read
(
1
)
assert
status
==
b'a'
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
size
,
saved_oid
,
saved_tid
,
end_tid
=
unpack
(
">I8s8s8s"
,
read
(
28
))
assert
saved_oid
==
oid
,
(
ofs
,
self
.
f
.
tell
(),
oid
,
saved_oid
)
assert
end_tid
==
z64
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
if
saved_tid
==
start_tid
:
with
self
.
_lock
:
seek
=
self
.
f
.
seek
if
end_tid
is
None
:
ofs
=
self
.
current
.
get
(
oid
)
if
ofs
:
seek
(
ofs
)
read
=
self
.
f
.
read
status
=
read
(
1
)
assert
status
==
b'a'
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
size
,
saved_oid
,
saved_tid
,
end_tid
=
unpack
(
">I8s8s8s"
,
read
(
28
))
assert
saved_oid
==
oid
,
(
ofs
,
self
.
f
.
tell
(),
oid
,
saved_oid
)
assert
end_tid
==
z64
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
if
saved_tid
==
start_tid
:
return
raise
ValueError
(
"already have current data for oid"
)
else
:
noncurrent_for_oid
=
self
.
noncurrent
.
get
(
u64
(
oid
))
if
noncurrent_for_oid
and
(
u64
(
start_tid
)
in
noncurrent_for_oid
):
return
raise
ValueError
(
"already have current data for oid"
)
else
:
noncurrent_for_oid
=
self
.
noncurrent
.
get
(
u64
(
oid
))
if
noncurrent_for_oid
and
(
u64
(
start_tid
)
in
noncurrent_for_oid
):
return
size
=
allocated_record_overhead
+
len
(
data
)
size
=
allocated_record_overhead
+
len
(
data
)
# A number of cache simulation experiments all concluded that the
# 2nd-level ZEO cache got a much higher hit rate if "very large"
# objects simply weren't cached. For now, we ignore the request
# only if the entire cache file is too small to hold the object.
if
size
>=
min
(
max_block_size
,
self
.
maxsize
-
ZEC_HEADER_SIZE
):
return
# A number of cache simulation experiments all concluded that the
# 2nd-level ZEO cache got a much higher hit rate if "very large"
# objects simply weren't cached. For now, we ignore the request
# only if the entire cache file is too small to hold the object.
if
size
>=
min
(
max_block_size
,
self
.
maxsize
-
ZEC_HEADER_SIZE
):
return
self
.
_n_adds
+=
1
self
.
_n_added_bytes
+=
size
self
.
_len
+=
1
self
.
_n_adds
+=
1
self
.
_n_added_bytes
+=
size
self
.
_len
+=
1
self
.
_store
(
oid
,
start_tid
,
end_tid
,
data
,
size
)
self
.
_store
(
oid
,
start_tid
,
end_tid
,
data
,
size
)
if
end_tid
:
self
.
_trace
(
0x54
,
oid
,
start_tid
,
end_tid
,
dlen
=
len
(
data
))
else
:
self
.
_trace
(
0x52
,
oid
,
start_tid
,
dlen
=
len
(
data
))
if
end_tid
:
self
.
_trace
(
0x54
,
oid
,
start_tid
,
end_tid
,
dlen
=
len
(
data
))
else
:
self
.
_trace
(
0x52
,
oid
,
start_tid
,
dlen
=
len
(
data
))
def
_store
(
self
,
oid
,
start_tid
,
end_tid
,
data
,
size
):
# Low-level store used by store and load
...
...
@@ -696,35 +708,37 @@ class ClientCache(object):
# - tid the id of the transaction that wrote a new revision of oid,
# or None to forget all cached info about oid.
def
invalidate
(
self
,
oid
,
tid
):
ofs
=
self
.
current
.
get
(
oid
)
if
ofs
is
None
:
# 0x10 == invalidate (miss)
self
.
_trace
(
0x10
,
oid
,
tid
)
return
with
self
.
_lock
:
ofs
=
self
.
current
.
get
(
oid
)
if
ofs
is
None
:
# 0x10 == invalidate (miss)
self
.
_trace
(
0x10
,
oid
,
tid
)
return
self
.
f
.
seek
(
ofs
)
read
=
self
.
f
.
read
status
=
read
(
1
)
assert
status
==
b'a'
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
size
,
saved_oid
,
saved_tid
,
end_tid
=
unpack
(
">I8s8s8s"
,
read
(
28
))
assert
saved_oid
==
oid
,
(
ofs
,
self
.
f
.
tell
(),
oid
,
saved_oid
)
assert
end_tid
==
z64
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
del
self
.
current
[
oid
]
if
tid
is
None
:
self
.
f
.
seek
(
ofs
)
self
.
f
.
write
(
b'f'
+
pack
(
">I"
,
size
))
# 0x1E = invalidate (hit, discarding current or non-current)
self
.
_trace
(
0x1E
,
oid
,
tid
)
self
.
_len
-=
1
else
:
if
tid
==
saved_tid
:
logger
.
warning
(
"Ignoring invalidation with same tid as current"
)
return
self
.
f
.
seek
(
ofs
+
21
)
self
.
f
.
write
(
tid
)
self
.
_set_noncurrent
(
oid
,
saved_tid
,
ofs
)
# 0x1C = invalidate (hit, saving non-current)
self
.
_trace
(
0x1C
,
oid
,
tid
)
read
=
self
.
f
.
read
status
=
read
(
1
)
assert
status
==
b'a'
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
size
,
saved_oid
,
saved_tid
,
end_tid
=
unpack
(
">I8s8s8s"
,
read
(
28
))
assert
saved_oid
==
oid
,
(
ofs
,
self
.
f
.
tell
(),
oid
,
saved_oid
)
assert
end_tid
==
z64
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
del
self
.
current
[
oid
]
if
tid
is
None
:
self
.
f
.
seek
(
ofs
)
self
.
f
.
write
(
b'f'
+
pack
(
">I"
,
size
))
# 0x1E = invalidate (hit, discarding current or non-current)
self
.
_trace
(
0x1E
,
oid
,
tid
)
self
.
_len
-=
1
else
:
if
tid
==
saved_tid
:
logger
.
warning
(
"Ignoring invalidation with same tid as current"
)
return
self
.
f
.
seek
(
ofs
+
21
)
self
.
f
.
write
(
tid
)
self
.
_set_noncurrent
(
oid
,
saved_tid
,
ofs
)
# 0x1C = invalidate (hit, saving non-current)
self
.
_trace
(
0x1C
,
oid
,
tid
)
##
# Generates (oid, serial) oairs for all objects in the
...
...
src/ZEO/interfaces.py
View file @
e8bec203
...
...
@@ -24,8 +24,7 @@ class StaleCache(object):
class
IClientCache
(
zope
.
interface
.
Interface
):
"""Client cache interface.
Note that caches need not be thread safe, fpr the most part,
except for getLastTid, which may be called from multiple threads.
Note that caches need to be thread safe.
"""
def
close
():
...
...
src/ZEO/runzeo.py
View file @
e8bec203
...
...
@@ -98,6 +98,9 @@ class ZEOOptionsMixin:
self
.
add
(
"address"
,
"zeo.address.address"
,
required
=
"no server address specified; use -a or -C"
)
self
.
add
(
"read_only"
,
"zeo.read_only"
,
default
=
0
)
self
.
add
(
"client_conflict_resolution"
,
"zeo.client_conflict_resolution"
,
default
=
0
)
self
.
add
(
"invalidation_queue_size"
,
"zeo.invalidation_queue_size"
,
default
=
100
)
self
.
add
(
"invalidation_age"
,
"zeo.invalidation_age"
)
...
...
@@ -339,6 +342,7 @@ def create_server(storages, options):
options
.
address
,
storages
,
read_only
=
options
.
read_only
,
client_conflict_resolution
=
options
.
client_conflict_resolution
,
invalidation_queue_size
=
options
.
invalidation_queue_size
,
invalidation_age
=
options
.
invalidation_age
,
transaction_timeout
=
options
.
transaction_timeout
,
...
...
src/ZEO/server.xml
View file @
e8bec203
...
...
@@ -107,6 +107,14 @@
<metadefault>
$INSTANCE/var/ZEO.pid (or $clienthome/ZEO.pid)
</metadefault>
</key>
<key
name=
"client-conflict-resolution"
datatype=
"boolean"
required=
"no"
default=
"false"
>
<description>
Flag indicating whether the server should return conflict
errors to the client, for resolution there.
</description>
</key>
</sectiontype>
</component>
src/ZEO/tests/CommitLockTests.py
View file @
e8bec203
...
...
@@ -30,6 +30,8 @@ class DummyDB:
def
invalidate
(
self
,
*
args
,
**
kwargs
):
pass
transform_record_data
=
untransform_record_data
=
lambda
self
,
data
:
data
class
WorkerThread
(
TestThread
):
# run the entire test in a thread so that the blocking call for
...
...
src/ZEO/tests/ConnectionTests.py
View file @
e8bec203
...
...
@@ -59,6 +59,9 @@ class DummyDB:
def
invalidateCache
(
self
):
pass
transform_record_data
=
untransform_record_data
=
lambda
self
,
data
:
data
class
CommonSetupTearDown
(
StorageTestBase
):
"""Common boilerplate"""
...
...
@@ -1018,90 +1021,6 @@ class TimeoutTests(CommonSetupTearDown):
# or the server.
self
.
assertRaises
(
KeyError
,
storage
.
load
,
oid
,
''
)
def
checkTimeoutProvokingConflicts
(
self
):
self
.
_storage
=
storage
=
self
.
openClientStorage
()
# Assert that the zeo cache is empty.
self
.
assert_
(
not
list
(
storage
.
_cache
.
contents
()))
# Create the object
oid
=
storage
.
new_oid
()
obj
=
MinPO
(
7
)
# We need to successfully commit an object now so we have something to
# conflict about.
t
=
Transaction
()
storage
.
tpc_begin
(
t
)
revid1a
=
storage
.
store
(
oid
,
ZERO
,
zodb_pickle
(
obj
),
''
,
t
)
revid1b
=
storage
.
tpc_vote
(
t
)
revid1
=
handle_serials
(
oid
,
revid1a
,
revid1b
)
storage
.
tpc_finish
(
t
)
# Now do a store, sleeping before the finish so as to cause a timeout.
obj
.
value
=
8
t
=
Transaction
()
old_connection_count
=
storage
.
connection_count_for_tests
storage
.
tpc_begin
(
t
)
revid2a
=
storage
.
store
(
oid
,
revid1
,
zodb_pickle
(
obj
),
''
,
t
)
revid2b
=
storage
.
tpc_vote
(
t
)
revid2
=
handle_serials
(
oid
,
revid2a
,
revid2b
)
# Now sleep long enough for the storage to time out.
# This used to sleep for 3 seconds, and sometimes (but very rarely)
# failed then. Now we try for a minute. It typically succeeds
# on the second time thru the loop, and, since self.timeout is 1,
# it's typically faster now (2/1.8 ~= 1.11 seconds sleeping instead
# of 3).
deadline
=
time
.
time
()
+
60
# wait up to a minute
while
time
.
time
()
<
deadline
:
if
(
storage
.
is_connected
()
and
(
storage
.
connection_count_for_tests
==
old_connection_count
)
):
time
.
sleep
(
self
.
timeout
/
1.8
)
else
:
break
self
.
assert_
(
(
not
storage
.
is_connected
())
or
(
storage
.
connection_count_for_tests
>
old_connection_count
)
)
storage
.
_wait
()
self
.
assert_
(
storage
.
is_connected
())
# We expect finish to fail.
self
.
assertRaises
(
ClientDisconnected
,
storage
.
tpc_finish
,
t
)
storage
.
tpc_abort
(
t
)
# Now we think we've committed the second transaction, but we really
# haven't. A third one should produce a POSKeyError on the server,
# which manifests as a ConflictError on the client.
obj
.
value
=
9
t
=
Transaction
()
storage
.
tpc_begin
(
t
)
storage
.
store
(
oid
,
revid2
,
zodb_pickle
(
obj
),
''
,
t
)
self
.
assertRaises
(
ConflictError
,
storage
.
tpc_vote
,
t
)
# Even aborting won't help.
storage
.
tpc_abort
(
t
)
self
.
assertRaises
(
ZODB
.
POSException
.
StorageTransactionError
,
storage
.
tpc_finish
,
t
)
# Try again.
obj
.
value
=
10
t
=
Transaction
()
storage
.
tpc_begin
(
t
)
storage
.
store
(
oid
,
revid2
,
zodb_pickle
(
obj
),
''
,
t
)
# Even aborting won't help.
self
.
assertRaises
(
ConflictError
,
storage
.
tpc_vote
,
t
)
# Abort this one and try a transaction that should succeed.
storage
.
tpc_abort
(
t
)
# Now do a store.
obj
.
value
=
11
t
=
Transaction
()
storage
.
tpc_begin
(
t
)
revid2a
=
storage
.
store
(
oid
,
revid1
,
zodb_pickle
(
obj
),
''
,
t
)
revid2b
=
storage
.
tpc_vote
(
t
)
revid2
=
handle_serials
(
oid
,
revid2a
,
revid2b
)
storage
.
tpc_finish
(
t
)
# Now load the object and verify that it has a value of 11.
data
,
revid
=
storage
.
load
(
oid
,
''
)
self
.
assertEqual
(
zodb_unpickle
(
data
),
MinPO
(
11
))
self
.
assertEqual
(
revid
,
revid2
)
class
MSTThread
(
threading
.
Thread
):
__super_init
=
threading
.
Thread
.
__init__
...
...
src/ZEO/tests/InvalidationTests.py
View file @
e8bec203
...
...
@@ -324,8 +324,8 @@ class InvalidationTests:
def
checkConcurrentUpdates2Storages_emulated
(
self
):
self
.
_storage
=
storage1
=
self
.
openClientStorage
()
storage2
=
self
.
openClientStorage
()
db1
=
DB
(
storage1
)
storage2
=
self
.
openClientStorage
()
db2
=
DB
(
storage2
)
cn
=
db1
.
open
()
...
...
@@ -349,8 +349,8 @@ class InvalidationTests:
def
checkConcurrentUpdates2Storages
(
self
):
self
.
_storage
=
storage1
=
self
.
openClientStorage
()
storage2
=
self
.
openClientStorage
()
db1
=
DB
(
storage1
)
storage2
=
self
.
openClientStorage
()
db2
=
DB
(
storage2
)
stop
=
threading
.
Event
()
...
...
src/ZEO/tests/forker.py
View file @
e8bec203
...
...
@@ -33,7 +33,7 @@ logger = logging.getLogger('ZEO.tests.forker')
class
ZEOConfig
:
"""Class to generate ZEO configuration file. """
def
__init__
(
self
,
addr
):
def
__init__
(
self
,
addr
,
**
options
):
if
isinstance
(
addr
,
str
):
self
.
logpath
=
addr
+
'.log'
else
:
...
...
@@ -42,6 +42,7 @@ class ZEOConfig:
self
.
address
=
addr
self
.
read_only
=
None
self
.
loglevel
=
'INFO'
self
.
__dict__
.
update
(
options
)
def
dump
(
self
,
f
):
print
(
"<zeo>"
,
file
=
f
)
...
...
@@ -52,7 +53,7 @@ class ZEOConfig:
for
name
in
(
'invalidation_queue_size'
,
'invalidation_age'
,
'transaction_timeout'
,
'pid_filename'
,
'ssl_certificate'
,
'ssl_key'
,
'ssl_certificate'
,
'ssl_key'
,
'client_conflict_resolution'
,
):
v
=
getattr
(
self
,
name
,
None
)
if
v
:
...
...
@@ -95,6 +96,10 @@ def runner(config, qin, qout, timeout=None,
import
ZEO.asyncio.server
old_protocol
=
ZEO
.
asyncio
.
server
.
best_protocol_version
ZEO
.
asyncio
.
server
.
best_protocol_version
=
protocol
old_protocols
=
ZEO
.
asyncio
.
server
.
ServerProtocol
.
protocols
ZEO
.
asyncio
.
server
.
ServerProtocol
.
protocols
=
tuple
(
sorted
(
set
(
old_protocols
)
|
set
([
protocol
])
))
try
:
import
ZEO.runzeo
,
threading
...
...
@@ -142,8 +147,8 @@ def runner(config, qin, qout, timeout=None,
finally
:
if
old_protocol
:
ZEO
.
asyncio
.
server
.
best_protocol_version
=
protocol
ZEO
.
asyncio
.
server
.
best_protocol_version
=
old_
protocol
ZEO
.
asyncio
.
server
.
ServerProtocol
.
protocols
=
old_protocols
def
stop_runner
(
thread
,
config
,
qin
,
qout
,
stop_timeout
=
9
,
pid
=
None
):
qin
.
put
(
'stop'
)
...
...
@@ -155,7 +160,7 @@ def stop_runner(thread, config, qin, qout, stop_timeout=9, pid=None):
# The runner thread didn't stop. If it was a process,
# give it some time to exit
if
hasattr
(
thread
,
'pid'
)
and
thread
.
pid
:
os
.
waitpid
(
thread
.
pid
)
os
.
waitpid
(
thread
.
pid
,
0
)
else
:
# Gaaaa, force gc in hopes of maybe getting the unclosed
# sockets to get GCed
...
...
src/ZEO/tests/protocols.test
View file @
e8bec203
...
...
@@ -5,7 +5,7 @@ A full test of all protocols isn't practical. But we'll do a limited
test
that
at
least
the
current
and
previous
protocols
are
supported
in
both
directions
.
Let
's start a Z
309
server
Let
's start a Z
4
server
>>> storage_conf = '''
...
<
blobstorage
>
...
...
@@ -94,82 +94,85 @@ A current client should be able to connect to a old server:
>>> zope.testing.setupstack.rmtree('
blobs
')
>>> zope.testing.setupstack.rmtree('
server
-
blobs
')
And the other way around:
#############################################################################
# Note that the ZEO 5.0 server only supports clients that use the Z5 protocol
>>> addr, _ = start_server(storage_conf, dict(invalidation_queue_size=5))
# And the other way around:
Note that we'
ll
have
to
pull
some
hijinks
:
# >>> addr, _ = start_server(storage_conf, dict(invalidation_queue_size=5))
>>>
import
ZEO
.
asyncio
.
client
>>>
old_protocols
=
ZEO
.
asyncio
.
client
.
Protocol
.
protocols
>>>
ZEO
.
asyncio
.
client
.
Protocol
.
protocols
=
[
b
'Z4'
]
# Note that we'
ll
have
to
pull
some
hijinks
:
>>>
db
=
ZEO
.
DB
(
addr
,
client
=
'client'
,
blob_dir
=
'blobs'
)
>>>
db
.
storage
.
protocol_version
b
'Z4'
>>>
wait_connected
(
db
.
storage
)
>>>
conn
=
db
.
open
()
>>>
conn
.
root
()
.
x
=
0
>>>
transaction
.
commit
()
>>>
len
(
db
.
history
(
conn
.
root
()
.
_p_oid
,
99
))
2
# >>> import ZEO.asyncio.client
# >>> old_protocols = ZEO.asyncio.client.Protocol.protocols
# >>> ZEO.asyncio.client.Protocol.protocols = [b'Z4']
>>>
conn
.
root
()[
'blob1'
]
=
ZODB
.
blob
.
Blob
()
>>>
with
conn
.
root
()[
'blob1'
]
.
open
(
'w'
)
as
f
:
...
r
=
f
.
write
(
b
'blob data 1'
)
>>>
transaction
.
commit
()
# >>> db = ZEO.DB(addr, client='client', blob_dir='blobs')
# >>> db.storage.protocol_version
# b'Z4'
# >>> wait_connected(db.storage)
# >>> conn = db.open()
# >>> conn.root().x = 0
# >>> transaction.commit()
# >>> len(db.history(conn.root()._p_oid, 99))
# 2
>>>
db2
=
ZEO
.
DB
(
addr
,
blob_dir
=
'server-blobs'
,
shared_blob_dir
=
True
)
>>>
wait_connected
(
db2
.
storage
)
>>>
conn2
=
db2
.
open
()
>>>
for
i
in
range
(
5
)
:
...
conn2
.
root
()
.
x
+=
1
...
transaction
.
commit
()
>>>
conn2
.
root
()[
'blob2'
]
=
ZODB
.
blob
.
Blob
()
>>>
with
conn2
.
root
()[
'blob2'
]
.
open
(
'w'
)
as
f
:
...
r
=
f
.
write
(
b
'blob data 2'
)
>>>
transaction
.
commit
()
# >>> conn.root()['blob1'] = ZODB.blob.Blob()
# >>> with conn.root()['blob1'].open('w') as f:
# ... r = f.write(b'blob data 1')
# >>> transaction.commit()
# >>> db2 = ZEO.DB(addr, blob_dir='server-blobs', shared_blob_dir=True)
# >>> wait_connected(db2.storage)
# >>> conn2 = db2.open()
# >>> for i in range(5):
# ... conn2.root().x += 1
# ... transaction.commit()
# >>> conn2.root()['blob2'] = ZODB.blob.Blob()
# >>> with conn2.root()['blob2'].open('w') as f:
# ... r = f.write(b'blob data 2')
# >>> transaction.commit()
>>>
@
wait_until
()
...
def
x_to_be_5
()
:
...
conn
.
sync
()
...
return
conn
.
root
()
.
x
==
5
>>>
db
.
close
()
# >>> @wait_until()
# ... def x_to_be_5():
# ... conn.sync()
# ... return conn.root().x == 5
>>>
for
i
in
range
(
2
)
:
...
conn2
.
root
()
.
x
+=
1
...
transaction
.
commit
()
# >>> db.close()
>>>
db
=
ZEO
.
DB
(
addr
,
client
=
'client'
,
blob_dir
=
'blobs'
)
>>>
wait_connected
(
db
.
storage
)
>>>
conn
=
db
.
open
()
>>>
conn
.
root
()
.
x
7
# >>> for i in range(2):
# ... conn2.root().x += 1
# ... transaction.commit()
>>>
db
.
close
()
# >>> db = ZEO.DB(addr, client='client', blob_dir='blobs')
# >>> wait_connected(db.storage)
# >>> conn = db.open()
# >>> conn.root().x
# 7
>>>
for
i
in
range
(
10
)
:
...
conn2
.
root
()
.
x
+=
1
...
transaction
.
commit
()
# >>> db.close()
>>>
db
=
ZEO
.
DB
(
addr
,
client
=
'client'
,
blob_dir
=
'blobs'
)
>>>
wait_connected
(
db
.
storage
)
>>>
conn
=
db
.
open
()
>>>
conn
.
root
()
.
x
17
# >>> for i in range(10):
# ... conn2.root().x += 1
# ... transaction.commit()
>>>
with
conn
.
root
()[
'blob1'
]
.
open
()
as
f
:
...
f
.
read
()
b
'blob data 1'
>>>
with
conn
.
root
()[
'blob2'
]
.
open
()
as
f
:
...
f
.
read
()
b
'blob data 2'
# >>> db = ZEO.DB(addr, client='client', blob_dir='blobs')
# >>> wait_connected(db.storage)
# >>> conn = db.open()
# >>> conn.root().x
# 17
>>>
db2
.
close
()
>>>
db
.
close
()
# >>> with conn.root()['blob1'].open() as f:
# ... f.read()
# b'blob data 1'
# >>> with conn.root()['blob2'].open() as f:
# ... f.read()
# b'blob data 2'
# >>> db2.close()
# >>> db.close()
Undo
the
hijinks
:
#
Undo the hijinks:
>>>
ZEO
.
asyncio
.
client
.
Protocol
.
protocols
=
old_protocols
#
>>> ZEO.asyncio.client.Protocol.protocols = old_protocols
src/ZEO/tests/testConversionSupport.py
View file @
e8bec203
...
...
@@ -52,6 +52,8 @@ class FakeServer:
def
register_connection
(
*
args
):
return
None
,
None
client_conflict_resolution
=
False
class
FakeConnection
:
protocol_version
=
b'Z4'
addr
=
'test'
...
...
src/ZEO/tests/testZEO.py
View file @
e8bec203
...
...
@@ -143,23 +143,9 @@ class MiscZEOTests:
self
.
assertNotEquals
(
ZODB
.
utils
.
z64
,
storage3
.
lastTransaction
())
storage3
.
close
()
class
GenericTest
s
(
class
GenericTest
Base
(
# Base class for all ZODB tests
StorageTestBase
.
StorageTestBase
,
# ZODB test mixin classes (in the same order as imported)
BasicStorage
.
BasicStorage
,
PackableStorage
.
PackableStorage
,
Synchronization
.
SynchronizedStorage
,
MTStorage
.
MTStorage
,
ReadOnlyStorage
.
ReadOnlyStorage
,
# ZEO test mixin classes (in the same order as imported)
CommitLockTests
.
CommitLockVoteTests
,
ThreadTests
.
ThreadTests
,
# Locally defined (see above)
MiscZEOTests
,
):
"""Combine tests from various origins in one class."""
StorageTestBase
.
StorageTestBase
):
shared_blob_dir
=
False
blob_cache_dir
=
None
...
...
@@ -200,14 +186,23 @@ class GenericTests(
stop
()
StorageTestBase
.
StorageTestBase
.
tearDown
(
self
)
def
runTest
(
self
):
try
:
super
(
GenericTests
,
self
).
runTest
()
except
:
self
.
_failed
=
True
raise
else
:
self
.
_failed
=
False
class
GenericTests
(
GenericTestBase
,
# ZODB test mixin classes (in the same order as imported)
BasicStorage
.
BasicStorage
,
PackableStorage
.
PackableStorage
,
Synchronization
.
SynchronizedStorage
,
MTStorage
.
MTStorage
,
ReadOnlyStorage
.
ReadOnlyStorage
,
# ZEO test mixin classes (in the same order as imported)
CommitLockTests
.
CommitLockVoteTests
,
ThreadTests
.
ThreadTests
,
# Locally defined (see above)
MiscZEOTests
,
):
"""Combine tests from various origins in one class.
"""
def
open
(
self
,
read_only
=
0
):
# Needed to support ReadOnlyStorage tests. Ought to be a
...
...
@@ -394,7 +389,16 @@ class FileStorageClientHexTests(FileStorageHexTests):
def
_wrap_client
(
self
,
client
):
return
ZODB
.
tests
.
hexstorage
.
HexStorage
(
client
)
class
ClientConflictResolutionTests
(
GenericTestBase
,
ConflictResolution
.
ConflictResolvingStorage
,
):
def
getConfig
(
self
):
return
'<mappingstorage>
\
n
</mappingstorage>
\
n
'
def
getZEOConfig
(
self
):
return
forker
.
ZEOConfig
((
''
,
0
),
client_conflict_resolution
=
True
)
class
MappingStorageTests
(
GenericTests
):
"""ZEO backed by a Mapping storage."""
...
...
@@ -492,6 +496,8 @@ class ZRPCConnectionTests(ZEO.tests.ConnectionTests.CommonSetupTearDown):
self
.
_invalidatedCache
+=
1
def
invalidate
(
*
a
,
**
k
):
pass
transform_record_data
=
untransform_record_data
=
\
lambda
self
,
data
:
data
db
=
DummyDB
()
storage
.
registerDB
(
db
)
...
...
@@ -753,24 +759,23 @@ class StorageServerWrapper:
self
.
server
.
tpc_begin
(
id
(
transaction
),
''
,
''
,
{},
None
,
' '
)
def
tpc_vote
(
self
,
transaction
):
vote_result
=
self
.
server
.
vote
(
id
(
transaction
))
assert
vote_result
is
None
result
=
self
.
server
.
connection
.
serials
[:]
result
=
self
.
server
.
vote
(
id
(
transaction
))
assert
result
==
self
.
server
.
connection
.
serials
[:]
del
self
.
server
.
connection
.
serials
[:]
return
result
def
store
(
self
,
oid
,
serial
,
data
,
version_ignored
,
transaction
):
self
.
server
.
storea
(
oid
,
serial
,
data
,
id
(
transaction
))
def
send_reply
(
self
,
*
args
):
# Masquerade as conn
pass
def
send_reply
(
self
,
_
,
result
):
# Masquerade as conn
self
.
_result
=
result
def
tpc_abort
(
self
,
transaction
):
self
.
server
.
tpc_abort
(
id
(
transaction
))
def
tpc_finish
(
self
,
transaction
,
func
=
lambda
:
None
):
self
.
server
.
tpc_finish
(
id
(
transaction
)).
set_sender
(
0
,
self
)
return
self
.
_result
def
multiple_storages_invalidation_queue_is_not_insane
():
"""
...
...
@@ -937,7 +942,7 @@ def tpc_finish_error():
buffer, sadly, using implementation details:
>>> tbuf = t.data(client)
>>> tbuf.
serials
= None
>>> tbuf.
client_resolved
= None
tpc_finish will fail:
...
...
@@ -1596,6 +1601,7 @@ def test_suite():
"ClientDisconnected"),
)),
))
zeo.addTest(unittest.makeSuite(ClientConflictResolutionTests, '
check
'))
zeo.layer = ZODB.tests.util.MininalTestLayer('
testZeo
-
misc
')
suite.addTest(zeo)
...
...
src/ZEO/tests/testZEO2.py
View file @
e8bec203
...
...
@@ -78,6 +78,8 @@ will conflict. It will be blocked at the vote call.
>>> class Sender:
... def send_reply(self, id, reply):
... print('reply', id, reply)
... def send_error(self, id, err):
... print('error', id, err)
>>> delay.set_sender(1, Sender())
>>> logger = logging.getLogger('ZEO')
...
...
@@ -87,13 +89,20 @@ will conflict. It will be blocked at the vote call.
Now, when we abort the transaction for the first client. The second
client will be restarted. It will get a conflict error, that is
handled correctly
:
raised to the client
:
>>> zs1.tpc_abort('0') # doctest: +ELLIPSIS
reply 1 None
Error raised in delayed method
Traceback (most recent call last):
...
ZODB.POSException.ConflictError: ...
error 1 database conflict error ...
The transaction is aborted by the server:
>>> fs.tpc_transaction() is
not
None
>>> fs.tpc_transaction() is None
True
>>> zs2.connected
True
...
...
@@ -116,7 +125,7 @@ And an initial client.
>>> zs1 = ZEO.tests.servertesting.client(server, 1)
>>> zs1.tpc_begin('0', '', '', {})
>>> zs1.storea(ZODB.utils.p64(99), ZODB.utils.z64, 'x', '0')
>>> zs1.storea(ZODB.utils.p64(99), ZODB.utils.z64,
b
'x', '0')
Intentionally break zs1:
...
...
@@ -135,7 +144,7 @@ We can start another client and get the storage lock.
>>> zs1 = ZEO.tests.servertesting.client(server, 1)
>>> zs1.tpc_begin('1', '', '', {})
>>> zs1.storea(ZODB.utils.p64(99), ZODB.utils.z64, 'x', '1')
>>> zs1.storea(ZODB.utils.p64(99), ZODB.utils.z64,
b
'x', '1')
>>> _ = zs1.vote('1') # doctest: +ELLIPSIS
>>> zs1.tpc_finish('1').set_sender(0, zs1.connection)
...
...
@@ -220,7 +229,7 @@ We start a transaction and vote, this leads to getting the lock.
ZEO.asyncio.server INFO
received handshake b'Z5'
>>> tid1 = start_trans(zs1)
>>> zs1.vote(tid1) # doctest: +ELLIPSIS
>>>
resolved1 =
zs1.vote(tid1) # doctest: +ELLIPSIS
ZEO.StorageServer DEBUG
(test-addr-1) ('1') lock: transactions waiting: 0
ZEO.StorageServer BLATHER
...
...
@@ -477,7 +486,7 @@ ZEOStorage as closed and see if trying to get a lock cleans it up:
ZEO.asyncio.server INFO
received handshake b'Z5'
>>> tid1 = start_trans(zs1)
>>> zs1.vote(tid1) # doctest: +ELLIPSIS
>>>
resolved1 =
zs1.vote(tid1) # doctest: +ELLIPSIS
ZEO.StorageServer DEBUG
(test-addr-1) ('1') lock: transactions waiting: 0
ZEO.StorageServer BLATHER
...
...
@@ -493,7 +502,7 @@ ZEOStorage as closed and see if trying to get a lock cleans it up:
ZEO.asyncio.server INFO
received handshake b'Z5'
>>> tid2 = start_trans(zs2)
>>> zs2.vote(tid2) # doctest: +ELLIPSIS
>>>
resolved2 =
zs2.vote(tid2) # doctest: +ELLIPSIS
ZEO.StorageServer DEBUG
(test-addr-2) ('1') lock: transactions waiting: 0
ZEO.StorageServer BLATHER
...
...
src/ZEO/tests/test_client_side_conflict_resolution.py
0 → 100644
View file @
e8bec203
import
unittest
import
zope.testing.setupstack
from
BTrees.Length
import
Length
from
ZODB
import
serialize
from
ZODB.DemoStorage
import
DemoStorage
from
ZODB.utils
import
p64
,
z64
,
maxtid
from
ZODB.broken
import
find_global
import
ZEO
from
.utils
import
StorageServer
class
Var
(
object
):
def
__eq__
(
self
,
other
):
self
.
value
=
other
return
True
class
ClientSideConflictResolutionTests
(
zope
.
testing
.
setupstack
.
TestCase
):
def
test_server_side
(
self
):
# First, verify default conflict resolution.
server
=
StorageServer
(
self
,
DemoStorage
())
zs
=
server
.
zs
reader
=
serialize
.
ObjectReader
(
factory
=
lambda
conn
,
*
args
:
find_global
(
*
args
))
writer
=
serialize
.
ObjectWriter
()
ob
=
Length
(
0
)
ob
.
_p_oid
=
z64
# 2 non-conflicting transactions:
zs
.
tpc_begin
(
1
,
''
,
''
,
{})
zs
.
storea
(
ob
.
_p_oid
,
z64
,
writer
.
serialize
(
ob
),
1
)
self
.
assertEqual
(
zs
.
vote
(
1
),
[])
tid1
=
server
.
unpack_result
(
zs
.
tpc_finish
(
1
))
server
.
assert_calls
(
self
,
(
'info'
,
{
'length'
:
1
,
'size'
:
Var
()}))
ob
.
change
(
1
)
zs
.
tpc_begin
(
2
,
''
,
''
,
{})
zs
.
storea
(
ob
.
_p_oid
,
tid1
,
writer
.
serialize
(
ob
),
2
)
self
.
assertEqual
(
zs
.
vote
(
2
),
[])
tid2
=
server
.
unpack_result
(
zs
.
tpc_finish
(
2
))
server
.
assert_calls
(
self
,
(
'info'
,
{
'size'
:
Var
(),
'length'
:
1
}))
# Now, a cnflicting one:
zs
.
tpc_begin
(
3
,
''
,
''
,
{})
zs
.
storea
(
ob
.
_p_oid
,
tid1
,
writer
.
serialize
(
ob
),
3
)
# Vote returns the object id, indicating that a conflict was resolved.
self
.
assertEqual
(
zs
.
vote
(
3
),
[
ob
.
_p_oid
])
tid3
=
server
.
unpack_result
(
zs
.
tpc_finish
(
3
))
p
,
serial
,
next_serial
=
zs
.
loadBefore
(
ob
.
_p_oid
,
maxtid
)
self
.
assertEqual
((
serial
,
next_serial
),
(
tid3
,
None
))
self
.
assertEqual
(
reader
.
getClassName
(
p
),
'BTrees.Length.Length'
)
self
.
assertEqual
(
reader
.
getState
(
p
),
2
)
# Now, we'll create a server that expects the client to
# resolve conflicts:
server
=
StorageServer
(
self
,
DemoStorage
(),
client_conflict_resolution
=
True
)
zs
=
server
.
zs
# 2 non-conflicting transactions:
zs
.
tpc_begin
(
1
,
''
,
''
,
{})
zs
.
storea
(
ob
.
_p_oid
,
z64
,
writer
.
serialize
(
ob
),
1
)
self
.
assertEqual
(
zs
.
vote
(
1
),
[])
tid1
=
server
.
unpack_result
(
zs
.
tpc_finish
(
1
))
server
.
assert_calls
(
self
,
(
'info'
,
{
'size'
:
Var
(),
'length'
:
1
}))
ob
.
change
(
1
)
zs
.
tpc_begin
(
2
,
''
,
''
,
{})
zs
.
storea
(
ob
.
_p_oid
,
tid1
,
writer
.
serialize
(
ob
),
2
)
self
.
assertEqual
(
zs
.
vote
(
2
),
[])
tid2
=
server
.
unpack_result
(
zs
.
tpc_finish
(
2
))
server
.
assert_calls
(
self
,
(
'info'
,
{
'length'
:
1
,
'size'
:
Var
()}))
# Now, a conflicting one:
zs
.
tpc_begin
(
3
,
''
,
''
,
{})
zs
.
storea
(
ob
.
_p_oid
,
tid1
,
writer
.
serialize
(
ob
),
3
)
# Vote returns an object, indicating that a conflict was not resolved.
self
.
assertEqual
(
zs
.
vote
(
3
),
[
dict
(
oid
=
ob
.
_p_oid
,
serials
=
(
tid2
,
tid1
),
data
=
writer
.
serialize
(
ob
),
)],
)
# Now, it's up to the client to resolve the conflict. It can
# do this by making another store call. In this call, we use
# tid2 as the starting tid:
ob
.
change
(
1
)
zs
.
storea
(
ob
.
_p_oid
,
tid2
,
writer
.
serialize
(
ob
),
3
)
self
.
assertEqual
(
zs
.
vote
(
3
),
[])
tid3
=
server
.
unpack_result
(
zs
.
tpc_finish
(
3
))
server
.
assert_calls
(
self
,
(
'info'
,
{
'size'
:
Var
(),
'length'
:
1
}))
p
,
serial
,
next_serial
=
zs
.
loadBefore
(
ob
.
_p_oid
,
maxtid
)
self
.
assertEqual
((
serial
,
next_serial
),
(
tid3
,
None
))
self
.
assertEqual
(
reader
.
getClassName
(
p
),
'BTrees.Length.Length'
)
self
.
assertEqual
(
reader
.
getState
(
p
),
3
)
def
test_client_side
(
self
):
# First, traditional:
addr
,
stop
=
ZEO
.
server
(
'data.fs'
)
db
=
ZEO
.
DB
(
addr
)
with
db
.
transaction
()
as
conn
:
conn
.
root
.
l
=
Length
(
0
)
conn2
=
db
.
open
()
conn2
.
root
.
l
.
change
(
1
)
with
db
.
transaction
()
as
conn
:
conn
.
root
.
l
.
change
(
1
)
conn2
.
transaction_manager
.
commit
()
self
.
assertEqual
(
conn2
.
root
.
l
.
value
,
2
)
db
.
close
();
stop
()
# Now, do conflict resolution on the client.
addr2
,
stop
=
ZEO
.
server
(
storage_conf
=
'<mappingstorage>
\
n
</mappingstorage>
\
n
'
,
zeo_conf
=
dict
(
client_conflict_resolution
=
True
),
)
db
=
ZEO
.
DB
(
addr2
)
with
db
.
transaction
()
as
conn
:
conn
.
root
.
l
=
Length
(
0
)
conn2
=
db
.
open
()
conn2
.
root
.
l
.
change
(
1
)
with
db
.
transaction
()
as
conn
:
conn
.
root
.
l
.
change
(
1
)
self
.
assertEqual
(
conn2
.
root
.
l
.
value
,
1
)
conn2
.
transaction_manager
.
commit
()
self
.
assertEqual
(
conn2
.
root
.
l
.
value
,
2
)
db
.
close
();
stop
()
def
test_suite
():
return
unittest
.
makeSuite
(
ClientSideConflictResolutionTests
)
src/ZEO/tests/utils.py
0 → 100644
View file @
e8bec203
"""Testing helpers
"""
import
ZEO.StorageServer
from
..asyncio.server
import
best_protocol_version
class
ServerProtocol
:
method
=
(
'register'
,
)
def
__init__
(
self
,
zs
,
protocol_version
=
best_protocol_version
,
addr
=
'test-address'
):
self
.
calls
=
[]
self
.
addr
=
addr
self
.
zs
=
zs
self
.
protocol_version
=
protocol_version
zs
.
notify_connected
(
self
)
closed
=
False
def
close
(
self
):
if
not
self
.
closed
:
self
.
closed
=
True
self
.
zs
.
notify_disconnected
()
def
call_soon_threadsafe
(
self
,
func
,
*
args
):
func
(
*
args
)
def
async
(
self
,
*
args
):
self
.
calls
.
append
(
args
)
class
StorageServer
:
"""Create a client interface to a StorageServer.
This is for testing StorageServer. It interacts with the storgr
server through its network interface, but without creating a
network connection.
"""
def
__init__
(
self
,
test
,
storage
,
protocol_version
=
best_protocol_version
,
**
kw
):
self
.
test
=
test
self
.
storage_server
=
ZEO
.
StorageServer
.
StorageServer
(
None
,
{
'1'
:
storage
},
**
kw
)
self
.
zs
=
self
.
storage_server
.
create_client_handler
()
self
.
protocol
=
ServerProtocol
(
self
.
zs
,
protocol_version
=
protocol_version
)
self
.
zs
.
register
(
'1'
,
kw
.
get
(
'read_only'
,
False
))
def
assert_calls
(
self
,
test
,
*
argss
):
if
argss
:
for
args
in
argss
:
test
.
assertEqual
(
self
.
protocol
.
calls
.
pop
(
0
),
args
)
else
:
test
.
assertEqual
(
self
.
protocol
.
calls
,
())
def
unpack_result
(
self
,
result
):
"""For methods that return Result objects, unwrap the results
"""
result
,
callback
=
result
.
args
callback
()
return
result
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment