Commit a9baf7b0 authored by Alexandre Boeglin's avatar Alexandre Boeglin

Prevents TimerService from starting multiple threads in parallel (thus

creating database conflicts).
Added missing "global" statement in distribute.


git-svn-id: https://svn.erp5.org/repos/public/erp5/trunk@5672 20353a03-c40f-0410-a6d1-a30d3c3de9de
parent 7efca437
...@@ -61,7 +61,8 @@ except ImportError: ...@@ -61,7 +61,8 @@ except ImportError:
active_threads = 0 active_threads = 0
max_active_threads = 1 # 2 will cause more bug to appear (he he) max_active_threads = 1 # 2 will cause more bug to appear (he he)
is_initialized = 0 is_initialized = 0
tic_lock = threading.Lock() # A RAM based lock tic_lock = threading.Lock() # A RAM based lock to prevent too many concurrent tic() calls
timerservice_lock = threading.Lock() # A RAM based lock to prevent TimerService spamming when busy
first_run = 1 first_run = 1
# Activity Registration # Activity Registration
...@@ -416,34 +417,44 @@ class ActivityTool (Folder, UniqueObject): ...@@ -416,34 +417,44 @@ class ActivityTool (Folder, UniqueObject):
This method is called by TimerService in the interval given This method is called by TimerService in the interval given
in zope.conf. The Default is every 5 seconds. in zope.conf. The Default is every 5 seconds.
""" """
# Prevent TimerService from starting multiple threads in parallel
# get owner of portal_catalog, so normally we should be able to acquired = timerservice_lock.acquire(0)
# have the permission to invoke all activities if not acquired:
user = self.portal_catalog.getOwner() return
newSecurityManager(self.REQUEST, user)
try:
currentNode = self.getCurrentNode() # get owner of portal_catalog, so normally we should be able to
# have the permission to invoke all activities
# only distribute when we are the distributingNode or if it's empty user = self.portal_catalog.getOwner()
if (self.distributingNode == self.getCurrentNode()): newSecurityManager(self.REQUEST, user)
self.distribute(len(self._nodes))
#LOG('CMFActivity:', INFO, 'self.distribute(node_count=%s)' %len(self._nodes)) currentNode = self.getCurrentNode()
elif not self.distributingNode: # only distribute when we are the distributingNode or if it's empty
self.distribute(1) if (self.distributingNode == self.getCurrentNode()):
#LOG('CMFActivity:', INFO, 'distributingNodes empty! Calling distribute(1)') self.distribute(len(self._nodes))
#LOG('CMFActivity:', INFO, 'self.distribute(node_count=%s)' %len(self._nodes))
# call tic for the current processing_node
# the processing_node numbers are the indices of the elements in the node tuple +1 elif not self.distributingNode:
# because processing_node starts form 1 self.distribute(1)
if currentNode in self._nodes: #LOG('CMFActivity:', INFO, 'distributingNodes empty! Calling distribute(1)')
self.tic(list(self._nodes).index(currentNode)+1)
#LOG('CMFActivity:', INFO, 'self.tic(processing_node=%s)' %str(list(self._nodes).index(currentNode)+1)) # call tic for the current processing_node
# the processing_node numbers are the indices of the elements in the node tuple +1
elif len(self._nodes) == 0: # because processing_node starts form 1
self.tic(1) if currentNode in self._nodes:
#LOG('CMFActivity:', INFO, 'Node List is empty! Calling tic(1)') self.tic(list(self._nodes).index(currentNode)+1)
#LOG('CMFActivity:', INFO, 'self.tic(processing_node=%s)' %str(list(self._nodes).index(currentNode)+1))
elif len(self._nodes) == 0:
self.tic(1)
#LOG('CMFActivity:', INFO, 'Node List is empty! Calling tic(1)')
except:
timerservice_lock.release()
raise
else:
timerservice_lock.release()
security.declarePublic('distribute') security.declarePublic('distribute')
def distribute(self, node_count=1): def distribute(self, node_count=1):
...@@ -451,6 +462,7 @@ class ActivityTool (Folder, UniqueObject): ...@@ -451,6 +462,7 @@ class ActivityTool (Folder, UniqueObject):
Distribute load Distribute load
""" """
# Initialize if needed # Initialize if needed
global is_initialized
if not is_initialized: self.initialize() if not is_initialized: self.initialize()
# Call distribute on each queue # Call distribute on each queue
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment