Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neoppod
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Vincent Pelletier
neoppod
Commits
f2070ca4
Commit
f2070ca4
authored
Dec 13, 2017
by
Julien Muchembled
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
doc: comments, fixups
parent
c76b3a0a
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
38 additions
and
7 deletions
+38
-7
CHANGELOG.rst
CHANGELOG.rst
+1
-1
neo/scripts/neostorage.py
neo/scripts/neostorage.py
+3
-2
neo/storage/database/importer.py
neo/storage/database/importer.py
+1
-0
neo/storage/database/manager.py
neo/storage/database/manager.py
+22
-3
neo/storage/database/mysqldb.py
neo/storage/database/mysqldb.py
+4
-0
neo/storage/handlers/storage.py
neo/storage/handlers/storage.py
+7
-1
No files found.
CHANGELOG.rst
View file @
f2070ca4
...
@@ -42,7 +42,7 @@ Other changes:
...
@@ -42,7 +42,7 @@ Other changes:
- Proper handling of incoming packets for closed/aborted connections.
- Proper handling of incoming packets for closed/aborted connections.
- An exception while processing an answer could leave the handler switcher
- An exception while processing an answer could leave the handler switcher
in the bad state.
in the bad state.
- In STOPPING cluster state, really wait for all transaction to be finished.
- In STOPPING cluster state, really wait for all transaction
s
to be finished.
- Several issues when undoing transactions with conflict resolutions
- Several issues when undoing transactions with conflict resolutions
have been fixed.
have been fixed.
- Delayed connection acceptation when the storage node is ready.
- Delayed connection acceptation when the storage node is ready.
...
...
neo/scripts/neostorage.py
View file @
f2070ca4
...
@@ -31,8 +31,9 @@ parser.add_option('-e', '--engine', help = 'database engine')
...
@@ -31,8 +31,9 @@ parser.add_option('-e', '--engine', help = 'database engine')
parser
.
add_option
(
'-w'
,
'--wait'
,
help
=
'seconds to wait for backend to be '
parser
.
add_option
(
'-w'
,
'--wait'
,
help
=
'seconds to wait for backend to be '
'available, before erroring-out (-1 = infinite)'
,
type
=
'float'
,
default
=
0
)
'available, before erroring-out (-1 = infinite)'
,
type
=
'float'
,
default
=
0
)
parser
.
add_option
(
'--dedup'
,
action
=
'store_true'
,
parser
.
add_option
(
'--dedup'
,
action
=
'store_true'
,
help
=
'enable deduplication of data'
help
=
'enable deduplication of data when setting'
' when setting up a new storage node'
)
' up a new storage node (for RocksDB, check'
' https://github.com/facebook/mysql-5.6/issues/702)'
)
parser
.
add_option
(
'--disable-drop-partitions'
,
action
=
'store_true'
,
parser
.
add_option
(
'--disable-drop-partitions'
,
action
=
'store_true'
,
help
=
'do not delete data of discarded cells, which is'
help
=
'do not delete data of discarded cells, which is'
' useful for big databases because the current'
' useful for big databases because the current'
...
...
neo/storage/database/importer.py
View file @
f2070ca4
...
@@ -314,6 +314,7 @@ class ImporterDatabaseManager(DatabaseManager):
...
@@ -314,6 +314,7 @@ class ImporterDatabaseManager(DatabaseManager):
def
commit
(
self
):
def
commit
(
self
):
self
.
db
.
commit
()
self
.
db
.
commit
()
# XXX: This misses commits done internally by self.db (lockTransaction).
self
.
_last_commit
=
time
.
time
()
self
.
_last_commit
=
time
.
time
()
def
close
(
self
):
def
close
(
self
):
...
...
neo/storage/database/manager.py
View file @
f2070ca4
...
@@ -395,9 +395,20 @@ class DatabaseManager(object):
...
@@ -395,9 +395,20 @@ class DatabaseManager(object):
Identifier of object to retrieve.
Identifier of object to retrieve.
tid (int, None)
tid (int, None)
Exact serial to retrieve.
Exact serial to retrieve.
before_tid (
packed
, None)
before_tid (
int
, None)
Serial to retrieve is the highest existing one strictly below this
Serial to retrieve is the highest existing one strictly below this
value.
value.
Return value:
None: oid doesn't exist at requested tid/before_tid (getObject
takes care of checking if the oid exists at other serial)
6-tuple: Record content.
- record serial (int)
- serial or next record modifying object (int, None)
- compression (boolean-ish, None)
- checksum (binary string, None)
- data (binary string, None)
- data_serial (int, None)
"""
"""
@
requires
(
_getObject
)
@
requires
(
_getObject
)
...
@@ -418,7 +429,7 @@ class DatabaseManager(object):
...
@@ -418,7 +429,7 @@ class DatabaseManager(object):
- record serial (packed)
- record serial (packed)
- serial or next record modifying object (packed, None)
- serial or next record modifying object (packed, None)
- compression (boolean-ish, None)
- compression (boolean-ish, None)
- checksum (
integer
, None)
- checksum (
binary string
, None)
- data (binary string, None)
- data (binary string, None)
- data_serial (packed, None)
- data_serial (packed, None)
"""
"""
...
@@ -437,11 +448,19 @@ class DatabaseManager(object):
...
@@ -437,11 +448,19 @@ class DatabaseManager(object):
@
fallback
@
fallback
def
_fetchObject
(
self
,
oid
,
tid
):
def
_fetchObject
(
self
,
oid
,
tid
):
"""Specialized version of _getObject, for replication"""
r
=
self
.
_getObject
(
oid
,
tid
)
r
=
self
.
_getObject
(
oid
,
tid
)
if
r
:
if
r
:
return
r
[:
1
]
+
r
[
2
:]
return
r
[:
1
]
+
r
[
2
:]
# remove next_serial
def
fetchObject
(
self
,
oid
,
tid
):
def
fetchObject
(
self
,
oid
,
tid
):
"""
Specialized version of getObject, for replication:
- the oid can only be at an exact serial (parameter 'tid')
- next_serial is not part of the result
- if there's no result for the requested serial,
no need check if oid exists at other serial
"""
u64
=
util
.
u64
u64
=
util
.
u64
r
=
self
.
_fetchObject
(
u64
(
oid
),
u64
(
tid
))
r
=
self
.
_fetchObject
(
u64
(
oid
),
u64
(
tid
))
if
r
:
if
r
:
...
...
neo/storage/database/mysqldb.py
View file @
f2070ca4
...
@@ -143,6 +143,10 @@ class MySQLDatabaseManager(DatabaseManager):
...
@@ -143,6 +143,10 @@ class MySQLDatabaseManager(DatabaseManager):
break
break
except
OperationalError
as
m
:
except
OperationalError
as
m
:
code
,
m
=
m
.
args
code
,
m
=
m
.
args
# IDEA: Is it safe to retry in case of DISK_FULL ?
# XXX: However, this would another case of failure that would
# be unnoticed by other nodes (ADMIN & MASTER). When
# there are replicas, it may be preferred to not retry.
if
self
.
_active
or
SERVER_GONE_ERROR
!=
code
!=
SERVER_LOST
\
if
self
.
_active
or
SERVER_GONE_ERROR
!=
code
!=
SERVER_LOST
\
or
not
retry
:
or
not
retry
:
raise
DatabaseFailure
(
'MySQL error %d: %s
\
n
Query: %s'
raise
DatabaseFailure
(
'MySQL error %d: %s
\
n
Query: %s'
...
...
neo/storage/handlers/storage.py
View file @
f2070ca4
...
@@ -140,6 +140,9 @@ class StorageOperationHandler(EventHandler):
...
@@ -140,6 +140,9 @@ class StorageOperationHandler(EventHandler):
# Server (all methods must set connection as server so that it isn't closed
# Server (all methods must set connection as server so that it isn't closed
# if client tasks are finished)
# if client tasks are finished)
#
# These are all low-priority packets, in that we don't want to delay
# answers to clients, so tasks are used to postpone work when we're idle.
def
getEventQueue
(
self
):
def
getEventQueue
(
self
):
return
self
.
app
.
tm
.
read_queue
return
self
.
app
.
tm
.
read_queue
...
@@ -157,6 +160,9 @@ class StorageOperationHandler(EventHandler):
...
@@ -157,6 +160,9 @@ class StorageOperationHandler(EventHandler):
conn
.
send
(
Packets
.
AnswerCheckTIDRange
(
*
r
),
msg_id
)
conn
.
send
(
Packets
.
AnswerCheckTIDRange
(
*
r
),
msg_id
)
except
(
weakref
.
ReferenceError
,
ConnectionClosed
):
except
(
weakref
.
ReferenceError
,
ConnectionClosed
):
pass
pass
# Splitting this task would cause useless overhead. However, a
# generator function is expected, hence the following fake yield
# so that iteration stops immediately.
return
;
yield
return
;
yield
app
.
newTask
(
check
())
app
.
newTask
(
check
())
...
@@ -173,7 +179,7 @@ class StorageOperationHandler(EventHandler):
...
@@ -173,7 +179,7 @@ class StorageOperationHandler(EventHandler):
conn
.
send
(
Packets
.
AnswerCheckSerialRange
(
*
r
),
msg_id
)
conn
.
send
(
Packets
.
AnswerCheckSerialRange
(
*
r
),
msg_id
)
except
(
weakref
.
ReferenceError
,
ConnectionClosed
):
except
(
weakref
.
ReferenceError
,
ConnectionClosed
):
pass
pass
return
;
yield
return
;
yield
# same as in askCheckTIDRange
app
.
newTask
(
check
())
app
.
newTask
(
check
())
@
checkFeedingConnection
(
check
=
False
)
@
checkFeedingConnection
(
check
=
False
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment