Commit ef9e02df authored by Kirill Smelkov's avatar Kirill Smelkov
parent e923c9a8
...@@ -24,6 +24,7 @@ from ZODB.Connection import TransactionMetaData ...@@ -24,6 +24,7 @@ from ZODB.Connection import TransactionMetaData
from ZODB.tests.MinPO import MinPO from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_unpickle, zodb_pickle from ZODB.tests.StorageTestBase import zodb_unpickle, zodb_pickle
from ZODB.tests.StorageTestBase import ZERO from ZODB.tests.StorageTestBase import ZERO
from ZODB.tests.util import with_high_concurrency
import threading import threading
import time import time
...@@ -392,6 +393,7 @@ class BasicStorage(object): ...@@ -392,6 +393,7 @@ class BasicStorage(object):
# verify storage/Connection for race in between load/open and local invalidations. # verify storage/Connection for race in between load/open and local invalidations.
# https://github.com/zopefoundation/ZEO/issues/166 # https://github.com/zopefoundation/ZEO/issues/166
# https://github.com/zopefoundation/ZODB/issues/290 # https://github.com/zopefoundation/ZODB/issues/290
@with_high_concurrency
def check_race_loadopen_vs_local_invalidate(self): def check_race_loadopen_vs_local_invalidate(self):
db = DB(self._storage) db = DB(self._storage)
...@@ -498,6 +500,7 @@ class BasicStorage(object): ...@@ -498,6 +500,7 @@ class BasicStorage(object):
# This test is similar to check_race_loadopen_vs_local_invalidate but does # This test is similar to check_race_loadopen_vs_local_invalidate but does
# not reuse its code because the probability to reproduce external # not reuse its code because the probability to reproduce external
# invalidation bug with only 1 mutator + 1 verifier is low. # invalidation bug with only 1 mutator + 1 verifier is low.
@with_high_concurrency
def check_race_load_vs_external_invalidate(self): def check_race_load_vs_external_invalidate(self):
# dbopen creates new client storage connection and wraps it with DB. # dbopen creates new client storage connection and wraps it with DB.
def dbopen(): def dbopen():
......
...@@ -19,6 +19,7 @@ import atexit ...@@ -19,6 +19,7 @@ import atexit
import os import os
import persistent import persistent
import re import re
import sys
import tempfile import tempfile
import time import time
import transaction import transaction
...@@ -338,3 +339,41 @@ class MonotonicallyIncreasingTimeMinimalTestLayer(MininalTestLayer): ...@@ -338,3 +339,41 @@ class MonotonicallyIncreasingTimeMinimalTestLayer(MininalTestLayer):
def testTearDown(self): def testTearDown(self):
self.time_manager.close() self.time_manager.close()
reset_monotonic_time() reset_monotonic_time()
def with_high_concurrency(f):
"""
with_high_concurrency decorates f to run with high frequency of thread context switches.
It is useful for tests that try to probabilistically reproduce race
condition scenarios.
"""
@functools.wraps(f)
def _(*argv, **kw):
if six.PY3:
# Python3, by default, switches every 5ms, which turns threads in
# intended "high concurrency" scenarios to execute almost serially.
# Raise the frequency of context switches in order to increase the
# probability to reproduce interesting/tricky overlapping of threads.
#
# See https://github.com/zopefoundation/ZODB/pull/345#issuecomment-822188305 and
# https://github.com/zopefoundation/ZEO/issues/168#issuecomment-821829116 for details.
_ = sys.getswitchinterval()
def restore():
sys.setswitchinterval(_)
sys.setswitchinterval(5e-6) # ~ 100 simple instructions on modern hardware
else:
# Python2, by default, switches threads every "100 instructions".
# Just make sure we run f with that default.
_ = sys.getcheckinterval()
def restore():
sys.setcheckinterval(_)
sys.setcheckinterval(100)
try:
return f(*argv, **kw)
finally:
restore()
return _
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment