Commit 7d2cc57d authored by Roque's avatar Roque

Use new data-stream-list server script data structure

- add to report method saves new large-hash parameter
- delete from report updated
- split download updated
- resume operation (both full and split files) updated
- warn conflicts updated
- update dataset operation updated
- rename operation (download and ingest)
- download discard changes updated
- stage feature works properly (add/remove local changes)
- ingestion reportUpToDate updated
- split ingestion updated
- dataset utils remove hardcoded url
- report up to date method updated
parent 3c00a569
......@@ -6,18 +6,9 @@ require 'open-uri'
# class that handles dataset tasks report
class DatasetUtils
begin
uri = URI(URI.escape("https://wendelin.io/ERP5Site_getIngestionConstantsJson"))
res = open(uri).read
json = JSON.parse(res)
EOF = json['split_end_suffix']
NONE_EXT = json['none_extension']
REFERENCE_SEPARATOR = json['reference_separator']
rescue
EOF = "EOF"
NONE_EXT = "none"
REFERENCE_SEPARATOR = "/"
end
EOF = "EOF"
NONE_EXT = "none"
REFERENCE_SEPARATOR = "/"
DATASET_REPORT_FILE = ".dataset-task-report"
DATASET_TEMP_REPORT_FILE = ".temp-dataset-task-report"
......@@ -68,7 +59,21 @@ class DatasetUtils
RECORD_SEPARATOR = ";"
DATE_FORMAT = "%Y-%m-%d-%H-%M-%S"
def initialize(data_set_directory)
def initialize(data_set_directory, url=FALSE)
if url
begin
uri = URI(URI.escape(url + "/ERP5Site_getIngestionConstantsJson"))
res = open(uri).read
json = JSON.parse(res)
@EOF = json['split_end_suffix']
@NONE_EXT = json['none_extension']
@REFERENCE_SEPARATOR = json['reference_separator']
rescue
@EOF = EOF
@NONE_EXT = NONE_EXT
@REFERENCE_SEPARATOR = REFERENCE_SEPARATOR
end
end
@data_set_directory = appendSlashTo(data_set_directory)
@logger = LogManager.instance()
@task_report_file = @data_set_directory + DATASET_REPORT_FILE
......@@ -179,7 +184,7 @@ class DatasetUtils
if record[1].chomp == RUN_DONE
if (remove.nil?) || (remove != record[0])
local_files[record[0]] = {"size" => record[2].chomp, "hash" => record[3].chomp,
"status" => record[1].chomp, "modification_date" => record[4].chomp }
"status" => record[1].chomp, "modification_date" => record[4].chomp, "large_hash" => record[5].chomp }
end
end
end
......@@ -197,7 +202,7 @@ class DatasetUtils
File.open(@temp_report_file, 'w') {}
else
local_files.each do |key, array|
record = [key, array["status"], array["size"].to_s, array["hash"], array["modification_date"]].join(RECORD_SEPARATOR)
record = [key, array["status"], array["size"].to_s, array["hash"], array["modification_date"], array["large_hash"]].join(RECORD_SEPARATOR)
File.open(@temp_report_file, 'ab') { |file| file.puts(record) }
end
end
......@@ -262,9 +267,10 @@ class DatasetUtils
else
file_path = referenceToPath(changes[0]["reference"], @data_set_directory, data_set)
end
size = changes[0]["size"]
hash = changes[0]["hash"]
addToReport(changes[0]["reference"], RUN_DONE, size, hash, data_set, new_reference)
size = changes[0]["full-size"]
large_hash = changes[0]["large-hash"]
hash = getHash(file_path).to_s
addToReport(changes[0]["reference"], RUN_DONE, size, hash, large_hash, data_set, new_reference)
File.delete(@resume_operation_file)
return TRUE
end
......@@ -376,9 +382,9 @@ class DatasetUtils
end
end
def saveSplitOperation(operation, reference, eof, hash, chunk_size)
def saveSplitOperation(operation, reference, eof, hash, chunk_size, large_hash="NONE")
file_reference = reference.gsub('/', '__')
record = [operation, reference, eof, hash, Integer(chunk_size)].join(RECORD_SEPARATOR)
record = [operation, reference, eof, hash, Integer(chunk_size), large_hash].join(RECORD_SEPARATOR)
File.open(@split_file + "__" + file_reference, 'w') { |file| file.puts(record) }
deleteSplitOperationControlFile(reference)
end
......@@ -388,9 +394,16 @@ class DatasetUtils
return File.exist?(@split_file + "__" + file_reference)
end
def getLastSplitOperation(operation, reference, hash, chunk_size)
def getLastSplitOperation(operation, reference, hash, chunk_size, large_hash=FALSE)
# returns last split operation index (and partial large hash if large_hash is TRUE)
file_reference = reference.gsub('/', '__')
return 0 if not File.exist?(@split_file + "__" + file_reference)
if not File.exist?(@split_file + "__" + file_reference)
if large_hash
return 0, ""
else
return 0
end
end
record = File.open(@split_file + "__" + file_reference).read.chomp.split(RECORD_SEPARATOR)
if record[0] == operation && record[1] == reference && record[3] == hash && record[4] == Integer(chunk_size).to_s && record[2] != EOF
# discard if user interrupted (ctrl+c) the operation
......@@ -398,16 +411,32 @@ class DatasetUtils
@logger.warn("Previous split operation attempt for file #{reference} was interrupt by user (aborted tool execution), it will be restarted.", print=TRUE)
deleteSplitOperationFile(file_reference)
deleteSplitOperationControlFile(file_reference)
return 0
if large_hash
return 0, ""
else
return 0
end
end
createSplitOperationControlFile(file_reference)
return record[2].to_i
if large_hash
return record[2].to_i, record[5]
else
return record[2].to_i
end
end
if large_hash
return 0, ""
else
return 0
end
return 0
rescue Exception => e
@logger.error("An error occurred in getLastSplitOperation method:" + e.to_s)
@logger.error(e.backtrace)
return 0
if large_hash
return 0, ""
else
return 0
end
end
def deleteDiscardChangesFile()
......@@ -502,7 +531,7 @@ class DatasetUtils
return filename, extension, reference
end
def addToReport(reference, status, size, hash, data_set, new_reference=FALSE)
def addToReport(reference, status, size, hash, large_hash, data_set, new_reference=FALSE)
local_files = {}
begin
file_path = referenceToPath(reference, @data_set_directory, data_set)
......@@ -518,18 +547,19 @@ class DatasetUtils
reference = new_reference
file_path = referenceToPath(reference, @data_set_directory, data_set)
modification_date = File.exist?(file_path) ? File.mtime(file_path).strftime(DATE_FORMAT) : "not-modification-date"
large_hash = record[5] if large_hash == ""
end
local_files[reference] = {"size" => size, "hash" => hash, "status" => status,
"modification_date" => modification_date }
"modification_date" => modification_date, "large_hash" => large_hash }
new_file = FALSE
else
local_files[record[0]] = {"size" => record[2].chomp, "hash" => record[3].chomp,
"status" => record[1].chomp, "modification_date" => record[4].chomp }
"status" => record[1].chomp, "modification_date" => record[4].chomp, "large_hash" => record[5] }
end
end
if new_file
local_files[reference] = {"size" => size, "hash" => hash, "status" => status,
"modification_date" => modification_date }
"modification_date" => modification_date, "large_hash" => large_hash }
end
rescue Exception => e
@logger.error("An error occurred in DatasetUtils method 'addToReport':" + e.to_s)
......@@ -543,9 +573,21 @@ class DatasetUtils
saveReport(local_files)
end
def getHashFromChunk(content)
begin
md5 = Digest::MD5.new
md5.update(content)
return md5.hexdigest
rescue Exception => e
@logger.error("An error occurred while getting hash from chunk:" + e.to_s, print=TRUE)
@logger.error(e.backtrace)
raise e
end
end
def getHash(file)
return "FILE-NOT-EXISTS" if ! File.exist?(file)
begin
raise "File doesn't exist" if ! File.exist?(file)
chunk_size = 4 * MEGA
md5 = Digest::MD5.new
open(file) do |f|
......@@ -608,16 +650,32 @@ class DatasetUtils
end
end
def isRenamed(file, file_dict_list, file_dict=FALSE)
hash = file_dict ? file_dict["hash"] : ""
size = file_dict ? file_dict["size"] : (File.size(file).to_s if File.exist?(file))
file_dict_list.each do |path, dict|
if size == dict["size"].to_s
hash = hash != "" ? hash : getHash(file).to_s
if hash == dict["hash"]
old_path = path
return {"key" => old_path, "size" => size, "hash" => hash}
end
def isLocalChangeRenamed(local_file_path, deleted_file_dict_list)
# checking if local_file_path marked as new is also a deleted file, so it's in fact a renamed
size = File.size(local_file_path).to_s if File.exist?(local_file_path)
deleted_file_dict_list.each do |path, deleted_file_dict|
if size == deleted_file_dict["size"].to_s
hash = getHash(local_file_path).to_s
if hash == deleted_file_dict["hash"]
old_path = path
return {"key" => old_path, "size" => size, "hash" => hash}
end
end
end
return FALSE
end
def isRemoteChangeRenamed(new_remote_file_dict_list, local_report_file_dict)
#checking if report local file missing in remote dataset is one of the remote files marked as new/modified, so it's in fact a remote rename
hash = local_report_file_dict["large-hash"]
size = local_report_file_dict["size"]
new_remote_file_dict_list.each do |path, new_remote_file_dict|
if size == new_remote_file_dict["full-size"].to_s
hash = hash != "" ? hash : getHash(file).to_s
if hash == new_remote_file_dict["large-hash"]
old_path = path
return {"key" => old_path, "size" => size, "hash" => hash}
end
end
end
return FALSE
......@@ -643,10 +701,10 @@ class DatasetUtils
return staged_status["status"] == status
end
def checkRenamed(path, deleted_files, change)
renamed_dict = isRenamed(path, deleted_files)
def checkRenamed(path, deleted_file_dict_list, change)
renamed_dict = isLocalChangeRenamed(path, deleted_file_dict_list)
if renamed_dict
deleted_files.delete(renamed_dict["key"])
deleted_file_dict_list.delete(renamed_dict["key"])
change["status"] = STATUS_RENAMED
change["new_path"] = change["path"]
change["path"] = renamed_dict["key"]
......@@ -791,37 +849,37 @@ class DatasetUtils
File.readlines(@task_report_file).each do |line|
record = line.split(RECORD_SEPARATOR)
if record[1].chomp == RUN_DONE
local_files[record[0]] = {"size" => record[2].chomp, "hash" => record[3].chomp, }
local_files[record[0]] = {"size" => record[2].chomp, "hash" => record[3].chomp, "large-hash" => record[5].chomp, }
end
end
data_streams.each do |data_stream|
remote_files.push(data_stream["reference"])
pending = TRUE
reference = data_stream["reference"]
remote_files.push(reference)
pending = TRUE
if local_files.has_key? reference
size = local_files[reference]["size"]
if size.to_s == data_stream["size"].to_s
hash = local_files[reference]["hash"]
if hash == data_stream["hash"] or data_stream["hash"] == ""
if size.to_s == data_stream["full-size"].to_s
hash = local_files[reference]["large-hash"]
if hash == data_stream["large-hash"] or data_stream["large-hash"] == ""
pending = FALSE
end
end
end
if pending
local_files.delete(reference)
file_path = referenceToPath(data_stream["reference"], @data_set_directory, data_set)
file_path = referenceToPath(reference, @data_set_directory, data_set)
new_changed_files[file_path] = data_stream
end
end
local_files.each do |reference, file_dict|
if not remote_files.include? reference
changed_data_stream = {"reference" => reference, "hash" => DELETE }
changed_data_stream = {"reference" => reference, "large-hash" => DELETE }
file_path = referenceToPath(reference, @data_set_directory, data_set)
renamed_dict = isRenamed(file_path, new_changed_files, file_dict)
renamed_dict = isRemoteChangeRenamed(new_changed_files, file_dict)
if renamed_dict
changed_data_stream = {"reference" => reference, "id" => new_changed_files[renamed_dict["key"]]["id"],
"new_reference" => new_changed_files[renamed_dict["key"]]["reference"], "status" => STATUS_RENAMED,
"size" => renamed_dict["size"], "hash" => renamed_dict["hash"] }
"full-size" => renamed_dict["size"], "large-hash" => renamed_dict["hash"] }
new_changed_files.delete(renamed_dict["key"])
end
changed_data_streams.push(changed_data_stream)
......
......@@ -138,9 +138,9 @@ module Embulk
@logger.error("Your current dataset is outdated. Please, run a download to update it before ingest your changes.", print=TRUE)
puts
@logger.abortExecution(error=FALSE)
end
end
end
end
end
end
@logger.info("Supplier: #{task['supplier']}")
@logger.info("Dataset name: #{task['data_set']}")
......@@ -215,7 +215,7 @@ module Embulk
super
@supplier = task['supplier']
@dataset = task['data_set']
@chunk_size = task['chunk_size']
@chunk_size = DatasetUtils::CHUNK_SIZE
@data_set_directory = task['data_set_directory']
@logger = LogManager.instance()
@dataset_utils = DatasetUtils.new(@data_set_directory)
......@@ -239,13 +239,19 @@ module Embulk
filename, extension, reference = @dataset_utils.getPathInfo(path, @dataset)
operation = rename ? DatasetUtils::RENAME : DatasetUtils::INGESTION
@dataset_utils.saveCurrentOperation(operation, reference, new_reference)
resume_split = @dataset_utils.splitOperationFileExist(reference) ? @dataset_utils.getLastSplitOperation(operation, reference, hash, @chunk_size) : 0
resume_split, large_hash = 0, ""
if @dataset_utils.splitOperationFileExist(reference)
resume_split, large_hash = @dataset_utils.getLastSplitOperation(operation, reference, hash, @chunk_size, large_hash=TRUE)
end
each_chunk(path, filename, extension, size, hash, schema[1..-1].map{|elm| elm.name}, @chunk_size, delete, new_reference, resume_split) do |entry|
@dataset_utils.createSplitOperationControlFile(reference) if split
large_hash += entry[8]
#no need to send large hash to server
entry.pop()
@page_builder.add(entry)
if ! delete && ! rename && entry[5] != ""
split = TRUE
@dataset_utils.saveSplitOperation(operation, reference, entry[5], hash, @chunk_size)
@dataset_utils.saveSplitOperation(operation, reference, entry[5], hash, @chunk_size, large_hash)
end
end
@page_builder.finish
......@@ -269,7 +275,7 @@ module Embulk
end
else
if @dataset_utils.reportFileExist()
@dataset_utils.addToReport(reference, return_value, size, hash, task['data_set'], new_reference)
@dataset_utils.addToReport(reference, return_value, size, hash, large_hash, task['data_set'], new_reference)
end
end
end
......@@ -282,11 +288,11 @@ module Embulk
def each_chunk(path, filename, extension, size, hash, fields, chunk_size=DatasetUtils::CHUNK_SIZE, delete=FALSE, new_reference=FALSE, resume_split=0)
if delete
File.delete(path) if File.exist?(path)
values = [@supplier, @dataset, filename, extension, "", DatasetUtils::DELETE, "", ""]
values = [@supplier, @dataset, filename, extension, "", DatasetUtils::DELETE, "", "", ""]
yield(values)
elsif new_reference
File.delete(path) if File.exist?(path)
values = [@supplier, @dataset, filename, extension, new_reference, DatasetUtils::RENAME, "", ""]
values = [@supplier, @dataset, filename, extension, new_reference, DatasetUtils::RENAME, "", "", ""]
yield(values)
else
file_object = File.open(path, "rb")
......@@ -297,7 +303,7 @@ module Embulk
data = next_byte
if not next_byte
if first # this means this is an empty file
values = [@supplier, @dataset, filename, extension, "", "", size, hash]
values = [@supplier, @dataset, filename, extension, "", "", size, hash, hash]
yield(values)
end
break
......@@ -320,7 +326,8 @@ module Embulk
eof = npart.to_s.rjust(3, "0")
end
content = Base64.encode64(data)
values = [@supplier, @dataset, filename, extension, content, eof, size, hash]
chunk_hash = @dataset_utils.getHashFromChunk(data)
values = [@supplier, @dataset, filename, extension, content, eof, size, hash, chunk_hash]
first = FALSE
yield(values)
end
......
......@@ -242,7 +242,7 @@ module Embulk
def initialize(task, schema, index, page_builder)
super
@data_set = task['data_set']
@chunk_size = task['chunk_size']
@chunk_size = DatasetUtils::CHUNK_SIZE
@data_set_directory = task['data_set_directory']
@wendelin = WendelinClient.new(task['erp5_url'], task['user'], task['password'])
@logger = LogManager.instance()
......@@ -250,46 +250,52 @@ module Embulk
end
def run
data_stream = task['data_streams'][@index]
id = data_stream["id"]
reference = data_stream["reference"]
size = data_stream["size"]
hash = data_stream["hash"]
renamed = data_stream["status"] == DatasetUtils::STATUS_RENAMED
deleted = hash.to_s == DatasetUtils::DELETE
remote_file = task['data_streams'][@index]
reference = remote_file["reference"]
size = remote_file["full-size"]
large_hash = remote_file["large-hash"]
data_stream_chunk_list = remote_file["data-stream-list"]
renamed = remote_file["status"] == DatasetUtils::STATUS_RENAMED
deleted = large_hash.to_s == DatasetUtils::DELETE
begin
if deleted
entry = [reference, "", @data_set, DatasetUtils::DELETE, renamed]
page_builder.add(entry)
elsif renamed
new_reference = data_stream["new_reference"]
new_reference = remote_file["new_reference"]
entry = [reference, new_reference, @data_set, TRUE, renamed]
page_builder.add(entry)
else
@logger.info("Discarding local change on '#{data_stream["path"]}'", print=TRUE) if task['discard_changes']
@logger.info("Discarding local change on '#{remote_file["path"]}'", print=TRUE) if task['discard_changes']
@logger.info("Getting content from remote #{reference}", print=TRUE)
@logger.info("Downloading...", print=TRUE)
resume_split = @dataset_utils.splitOperationFileExist(reference) ? @dataset_utils.getLastSplitOperation(DatasetUtils::DOWNLOAD, reference, hash, @chunk_size) : 0
n_chunk = resume_split == 0 ? 0 : resume_split+1
split = n_chunk > 0
resume_split = @dataset_utils.splitOperationFileExist(reference) ? @dataset_utils.getLastSplitOperation(DatasetUtils::DOWNLOAD, reference, large_hash, @chunk_size) : 0
n_chunk = resume_split == 0 ? 0 : resume_split+1
split = n_chunk > 0
@logger.info("Resuming interrupted split download...", print=TRUE) if split
@wendelin.eachDataStreamContentChunk(id, @chunk_size, n_chunk) do |chunk|
content = chunk.nil? || chunk.empty? ? "" : Base64.encode64(chunk)
begin_of_file = n_chunk == 0
split = n_chunk > 0
@dataset_utils.createSplitOperationControlFile(reference) if split
entry = [reference, content, @data_set, begin_of_file, renamed]
page_builder.add(entry)
@dataset_utils.saveSplitOperation(DatasetUtils::DOWNLOAD, reference, n_chunk, hash, @chunk_size) if split
n_chunk += 1
end
data_stream_chunk_list.each_with_index do |data_stream_chunk, index|
#skip datastreams/chunks already downloaded
if n_chunk == index
content = ""
@wendelin.eachDataStreamContentChunk(data_stream_chunk["id"], @chunk_size, 0) do |chunk|
content = chunk.nil? || chunk.empty? ? "" : Base64.encode64(chunk)
end
begin_of_file = n_chunk == 0
split = n_chunk > 0
@dataset_utils.createSplitOperationControlFile(reference) if split
entry = [reference, content, @data_set, begin_of_file, renamed]
page_builder.add(entry)
@dataset_utils.saveSplitOperation(DatasetUtils::DOWNLOAD, reference, n_chunk, large_hash, @chunk_size) if split
n_chunk += 1
end
end
end
page_builder.finish
@dataset_utils.deleteSplitOperationFile(reference) if split
rescue java.lang.OutOfMemoryError
@logger.logOutOfMemoryError(reference)
return_value = DatasetUtils::RUN_ABORTED
rescue Exception => e
rescue Exception => e
@logger.error(e.to_s, print=TRUE)
@logger.error(e.backtrace)
puts "[INFO] For more detailed information, please refer to the log file: " + @logger.getLogPath()
......@@ -302,7 +308,9 @@ module Embulk
if deleted
@dataset_utils.deleteFromReport(reference, return_value)
else
@dataset_utils.addToReport(reference, return_value, size, hash, task['data_set'], new_reference)
file_path = renamed ? @dataset_utils.referenceToPath(new_reference, @data_set_directory, @data_set) : @dataset_utils.referenceToPath(reference, @data_set_directory, @data_set)
hash = @dataset_utils.getHash(file_path).to_s
@dataset_utils.addToReport(reference, return_value, size, hash, large_hash, task['data_set'], new_reference)
end
end
return {return_value => reference}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment