Commit c42272a3 authored by Kirill Smelkov's avatar Kirill Smelkov

Merge remote-tracking branch 'origin/master' into t

* origin/master:
  neomigrate: fix typo in a warning message
  neoctl: make cell padding consistent when displaying the partition table
  storage: fix possible crash when delaying replication requests
  qa: bug found in assignment of storage node ids, add test
  Update comment of RECOVERING state
  Add support for OpenSSL >= 1.1
No related merge requests found
......@@ -307,7 +307,10 @@ class _SSLHandshake(_SSL):
except ssl.SSLWantWriteError:
return read_buf is not None
except socket.error, e:
self._error('send' if read_buf is None else 'recv', e)
# OpenSSL 1.1 may raise socket.error(0)
# where previous versions raised SSLEOFError.
self._error('send' if read_buf is None else 'recv',
e if e.errno else None)
if not self.queued[0]:
del self.queued[0]
del self.receive, self.send
......
......@@ -76,13 +76,13 @@ def ErrorCodes():
@Enum
def ClusterStates():
# Once the primary master is elected, the cluster has a state, which is
# initially RECOVERING, during which the master:
# The cluster is initially in the RECOVERING state, and it goes back to
# this state whenever the partition table becomes non-operational again.
# An election of the primary master always happens, in case of a network
# cut between a primary master and all other nodes. The primary master:
# - first recovers its own data by reading it from storage nodes;
# - waits for the partition table be operational;
# - automatically switch to VERIFYING if the cluster can be safely started.
# Whenever the partition table becomes non-operational again, the cluster
# goes back to this state.
RECOVERING
# Transient state, used to:
# - replay the transaction log, in case of unclean shutdown;
......
......@@ -67,8 +67,8 @@ class TerminalNeoCTL(object):
asNode = staticmethod(uuid_int)
def formatRowList(self, row_list):
return '\n'.join('%03d | %s' % (offset,
''.join('%s - %s |' % (uuid_str(uuid), state)
return '\n'.join('%03d |%s' % (offset,
''.join(' %s - %s |' % (uuid_str(uuid), state)
for (uuid, state) in cell_list))
for (offset, cell_list) in row_list)
......
......@@ -45,7 +45,7 @@ def main(args=None):
from neo.client.Storage import Storage as NEOStorage
if os.path.exists(source):
print("WARNING: This is not the recommended way to import data to NEO:"
" you should use Imported backend instead.\n"
" you should use the Importer backend instead.\n"
"NEO also does not implement IStorageRestoreable interface,"
" which means that undo information is not preserved when using"
" this tool: conflict resolution could happen when undoing an"
......
......@@ -142,7 +142,7 @@ class StorageOperationHandler(EventHandler):
# if client tasks are finished)
def getEventQueue(self):
return self.app.tm
return self.app.tm.read_queue
@checkFeedingConnection(check=True)
def askCheckTIDRange(self, conn, *args):
......
......@@ -32,7 +32,7 @@ from neo.storage.transactions import TransactionManager, ConflictError
from neo.lib.connection import ConnectionClosed, \
ServerConnection, MTClientConnection
from neo.lib.exception import DatabaseFailure, StoppedOperation
from neo.lib.handler import DelayEvent
from neo.lib.handler import DelayEvent, EventHandler
from neo.lib import logging
from neo.lib.protocol import (CellStates, ClusterStates, NodeStates, NodeTypes,
Packets, Packet, uuid_str, ZERO_OID, ZERO_TID, MAX_TID)
......@@ -2322,6 +2322,36 @@ class Test(NEOThreadedTest):
self.assertFalse(m1.primary)
self.assertTrue(m1.is_alive())
@with_cluster(start_cluster=0, master_count=2,
partitions=2, storage_count=2, autostart=2)
def testSplitBrainAtCreation(self, cluster):
"""
Check cluster creation when storage nodes are identified before all
masters see each other and elect a primary.
XXX: Do storage nodes need a node id before the cluster is created ?
Another solution is that they treat their ids as temporary as long
as the partition table is empty.
"""
for m, s in zip((min, max), cluster.storage_list):
s.nm.remove(m(s.nm.getMasterList(),
key=lambda node: node.getAddress()))
with ConnectionFilter() as f:
f.add(lambda conn, packet:
isinstance(packet, Packets.RequestIdentification)
and packet.decode()[0] == NodeTypes.MASTER)
cluster.start(recovering=True)
neoctl = cluster.neoctl
getClusterState = neoctl.getClusterState
getStorageList = lambda: neoctl.getNodeList(NodeTypes.STORAGE)
self.assertEqual(getClusterState(), ClusterStates.RECOVERING)
self.assertEqual(1, len(getStorageList()))
with Patch(EventHandler, protocolError=lambda *_: sys.exit()):
self.tic()
expectedFailure(self.assertEqual)(neoctl.getClusterState(),
ClusterStates.RUNNING)
self.assertEqual({1: NodeStates.RUNNING, 2: NodeStates.RUNNING},
{x[2]: x[3] for x in neoctl.getNodeList(NodeTypes.STORAGE)})
@with_cluster(partitions=2, storage_count=2)
def testStorageBackendLastIDs(self, cluster):
"""
......
......@@ -314,12 +314,27 @@ class ReplicationTests(NEOThreadedTest):
the backup cluster reacts very quickly to a new transaction.
"""
upstream = backup.upstream
t1, c1 = upstream.getTransaction()
ob = c1.root()[''] = PCounterWithResolution()
t1.commit()
ob.value += 2
t2, c2 = upstream.getTransaction()
c2.root()[''].value += 3
self.tic()
with upstream.master.filterConnection(upstream.storage) as f:
f.delayNotifyUnlockInformation()
upstream.importZODB()(1)
delay = f.delayNotifyUnlockInformation()
t1.commit()
self.tic()
def storeObject(orig, *args, **kw):
p.revert()
f.remove(delay)
return orig(*args, **kw)
with Patch(upstream.storage.tm, storeObject=storeObject) as p:
t2.commit()
self.tic()
# TODO check tids
t1.begin()
self.assertEqual(5, ob.value)
self.assertEqual(1, self.checkBackup(backup))
@with_cluster()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment