Commit 446f7dad authored by Paul Graydon's avatar Paul Graydon

wendelin_telecom_base: Add maximum data chunk size to KPI calculation

parent f172a89b
import json import json
import numpy as np import numpy as np
MAX_CHUNK_SIZE = 10000000 # bytes
HISTORY_LINE_COUNT = 64 HISTORY_LINE_COUNT = 64
T_PERIOD = 30 # seconds T_PERIOD = 30 # seconds
QCI_COUNT = 256 QCI_COUNT = 256
...@@ -9,7 +10,7 @@ in_data_stream = in_stream['Data Stream'] ...@@ -9,7 +10,7 @@ in_data_stream = in_stream['Data Stream']
progress_indicator = in_stream['Progress Indicator'] progress_indicator = in_stream['Progress Indicator']
offset_index = progress_indicator.getIntOffsetIndex() offset_index = progress_indicator.getIntOffsetIndex()
start = 0 start = 0
end = in_data_stream.getSize() end = min(in_data_stream.getSize(), offset_index + MAX_CHUNK_SIZE)
# No new data to process # No new data to process
if offset_index >= end: if offset_index >= end:
...@@ -21,6 +22,15 @@ new_log_data = ''.join(in_data_stream.readChunkList(offset_index, end)) ...@@ -21,6 +22,15 @@ new_log_data = ''.join(in_data_stream.readChunkList(offset_index, end))
previous_log_data_line_list = previous_log_data.splitlines() previous_log_data_line_list = previous_log_data.splitlines()
new_log_data_line_list = new_log_data.splitlines() new_log_data_line_list = new_log_data.splitlines()
# Last new log line may not be valid JSON due to MAX_CHUNK_SIZE:
# In that case, leave it to the next KPI calculation
last_new_log_data_line = new_log_data_line_list[-1]
try:
json.loads(last_new_log_data_line)
except ValueError:
end -= len(last_new_log_data_line)
new_log_data_line_list = new_log_data_line_list[:-1]
log_data_line_list = previous_log_data_line_list + new_log_data_line_list log_data_line_list = previous_log_data_line_list + new_log_data_line_list
# Remove duplicate log lines # Remove duplicate log lines
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment