Commit 761a5f3a authored by Joanne Hugé's avatar Joanne Hugé

Fully implement table generation

parent 6dc0ca1c
......@@ -5,6 +5,7 @@ import json
import markdown_table
import argparse
import os
import parse
class MeasureSetHandler:
......@@ -28,7 +29,7 @@ class MeasureSetHandler:
def get_measure_set(self, measure_name):
measure_set = MeasureSet()
measure_path = "{}/{}".format(self.measures_dir, measure_name)
measure_path = "{}/{}.json".format(self.measures_dir, measure_name)
measure_set.import_from_json(measure_path, False)
return measure_set
......@@ -40,7 +41,7 @@ class MeasureSetHandler:
self.measure_sets[mtype] = {'ids': [], 'next_id': 0}
next_id = self.measure_sets[mtype]['next_id']
measure_file_name = MeasureSetHandler.measures_dir + "/" + mtype + str(next_id)
measure_file_name = "{}/{}{}.json".format(MeasureSetHandler.measures_dir, mtype, next_id)
measure_set.export_to_json(measure_file_name)
self.measure_sets[mtype]['ids'].append(next_id)
......@@ -54,28 +55,64 @@ class MeasureSetHandler:
if mtype in self.measure_sets and len(self.measure_sets[mtype]['ids']) > 0:
self.measure_sets[mtype]['ids'].remove(mid)
measure_file_name = "{}/{}{}".format(MeasureSetHandler.measures_dir, mtype, mid)
measure_file_name = "{}/{}{}.json".format(MeasureSetHandler.measures_dir, mtype, mid)
os.remove(measure_file_name)
self.save()
print("Removed measure {}{}".format(mtype, mid))
def remove_all(self):
for mtype in self.measure_sets:
for mid in self.measure_sets[mtype]['ids']:
while True:
if len(self.measure_sets[mtype]['ids']) == 0:
break
mid = self.measure_sets[mtype]['ids'][0]
print(" Deleting {}{}...".format(mtype, mid))
self.remove_measure_set(mtype, mid)
print("Removed all measures".format(mtype, mid))
def generate_tables():
def generate_tables(self):
with open(self.measures_dir + "/" + "measure_tables.md") as measure_table:
with open(self.measures_dir + "/" + "measure_tables.md", 'w+') as measure_table:
measure_table.write("# Measure Tables\n\n")
measure_table.write("## Measurements tables\n\n")
measure_table.write("### Abbreviations used\n\n")
for abbr_name in MeasureSet.abbreviations:
measure_table.write("* {}: {}\n".format(abbr_name, MeasureSet.abbreviations[abbr_name]))
measure_table.write("\n")
for mtype in self.measure_sets:
need_header = True
props_lens = []
measure_table.write("### {} tables\n\n".format(mtype))
# Generate the metadata mask, by grouping the identical metadatas
metadata_mask = []
measures = []
for mid in self.measure_sets[mtype]['ids']:
measures.append(self.get_measure_set("{}{}".format(mtype, mid)))
if len(measures) == 0:
continue
first_metadata = measures[0].metadata
measure_table.write("## {} Tables\n\n".format(mtype))
for measure in measures[1:]:
for metadata_name in measure.metadata:
# If it is not already in the metadata mask
if metadata_name not in metadata_mask:
# If there are two different metadatas, they are added to the mask
if measure.metadata[metadata_name] != first_metadata[metadata_name]:
metadata_mask.append(metadata_name)
# Write the identical metadatas before the table
measure_table.write("**Common metadatas:** ")
common_metadatas = []
for metadata_name in first_metadata:
if metadata_name not in metadata_mask:
common_metadatas.append("{}: {}".format(MeasureSet.abbreviations[metadata_name], first_metadata[metadata_name]))
measure_table.write(", ".join(common_metadatas) + "\n\n")
for mid in self.measure_sets[mtype]['ids']:
......@@ -83,85 +120,182 @@ class MeasureSetHandler:
if need_header:
measure_table.write(measure.generate_table(headers=True))
table_str, props_lens = measure.generate_table(headers=True, metadata_mask=metadata_mask)
measure_table.write(table_str)
need_header = False
else:
measure_table.write(measure.generate_table(headers=False))
measure_table.write(measure.generate_table(headers=False, props_lens=props_lens, metadata_mask=metadata_mask)[0])
measure_table.write("\n")
measure_table.write("\n")
measure_table.write("\n")
class MeasureSet:
abbreviations = {
'ker': 'Linux kernel version',
'prio': 'Task priority',
'i': 'Interval',
'board': 'Board name',
'boot_p': 'Boot Parameters',
'delta': 'ETF qdisc delta',
}
def __init__(self):
self.metadata = {}
self.metadata['board'] = "Emerald"
self.metadata['linux_version'] = "4.19"
self.metadata['boot_params'] = "isolcpus"
self.cols = {}
self.interval = 0
self.metadata['ker'] = "4.19"
self.metadata['boot_p'] = "isolcpus"
self.metadata['i'] = "200us"
self.metadata['delta'] = "200us"
self.metadata['prio'] = "200us"
self.props = []
self.props_name = []
self.units = []
def __str__(self):
return "Cols: " + str(self.cols) + "\nInterval: " + str(self.interval)
return "Cols: " + str(self.props) + "\nInterval: " + str(self.interval)
def input_metadata(self):
def set_meta_data(self, board, linux_version, boot_params):
metadata = {}
metadata.update(self.metadata)
self.board = board
self.linux_version = linux_version
self.boot_params = boot_params
metadata_name = ""
while True:
def add_results(self, measure_type, interval, col_names, units, cols, metadata):
print("Current metadata:\n")
for metadata_name in metadata:
print(" {}: {}".format(metadata_name, metadata[metadata_name]))
metadata_name = input('Enter metadata name (type "done" to exit): ')
if metadata_name == "done":
break
metadata_value = input('Enter metadata value (type "done" to exit, "cancel" to cancel current metadata): ')
if metadata_value == "done":
break
if metadata_value == "cancel":
continue
metadata[metadata_name] = metadata_value
return metadata
def add_metadata(self, measure_type, units, metadata):
self.measure_type = measure_type
self.cols = cols
self.col_names = col_names
self.interval = interval
self.units = units
self.metadata.update(metadata)
self.max = [max(col) for col in cols]
self.min = [min(col) for col in cols]
self.avg = [statistics.mean(col) for col in cols]
self.var = [statistics.variance(col) for col in cols]
def add_chronological(self, props_names, props):
self.props = props
self.props_names = props_names
self.props_type = 'chronological'
self.max = [max(prop) for prop in props]
self.min = [min(prop) for prop in props]
self.avg = [statistics.mean(prop) for prop in props]
self.var = [statistics.variance(prop) for prop in props]
def histogram_to_chronological(histogram):
chrono = list(map(lambda x: [x[1]]*x[0], list(enumerate(histogram))))
chrono = [x for l in chrono for x in l]
return chrono
def add_histogram(self, props_names, props):
self.props = props
self.props_names = props_names
self.props_type = 'histogram'
self.max = []
self.min = []
self.avg = []
self.var = []
for prop in props:
chrono = MeasureSet.histogram_to_chronological(prop)
self.max.append(max(chrono))
self.min.append(min(chrono))
self.avg.append(statistics.mean(chrono))
self.var.append(statistics.variance(chrono))
def export_to_json(self, path):
with open(path, 'w') as outfile:
json.dump({'measure_type': self.measure_type, 'interval': self.interval, 'col_names': self.col_names, 'units': self.units, 'cols': self.cols, 'metadata': self.metadata}, outfile)
json.dump({'measure_type': self.measure_type,
'props_names': self.props_names,
'units': self.units,
'props': self.props,
'props_type': self.props_type,
'metadata': self.metadata}, outfile)
def parse_cyclictest(infile):
data = {}
data['measure_type'] = 'cyclictest_wake-up_latency'
data['props_type'] = 'histogram'
data['props_names'] = ['wake-up latency']
data['units'] = ['us']
data['props'] = [[]]
def import_from_json(self, path, flat):
lines = [line for line in infile]
for line in lines[2:]:
if line[0] == '#':
break
i, x = parse.parse('{:d} {:d}', line)
data['props'][0].append(x)
return data
def import_from_json(self, path, flat=False, cyclictest=False):
with open(path) as infile:
data = json.load(infile)
if cyclictest:
data = MeasureSet.parse_cyclictest(infile)
data['metadata'] = self.input_metadata()
else:
data = json.load(infile)
measure_type = data['measure_type']
interval = data['interval']
col_names = data['col_names']
units = data['units']
metadata = data['metadata']
if flat:
self.add_metadata(measure_type, units, metadata)
values = data['values']
props_names = data['props_names']
nb_cols = len(col_names)
cols = [[] for c in range(nb_cols)]
if data['props_type'] == 'histogram':
for i,value in enumerate(values):
cols[i % nb_cols].append(value)
props = data['props']
self.add_histogram(props_names, props)
else:
cols = data['cols']
if flat:
values = data['values']
nb_props = len(props_names)
props = [[] for c in range(nb_props)]
self.add_results(measure_type, interval, col_names, units, cols, metadata)
for i,value in enumerate(values):
props[i % nb_props].append(value)
else:
props = data['props']
self.add_chronological(props_names, props)
def generate_graph(self, path):
pass
def generate_table(self, headers=True, values=True, metadata_mask={}):
def generate_table(self, headers=True, values=True, metadata_mask=[], props_lens=[]):
if headers == False and values == False:
return ""
......@@ -172,12 +306,12 @@ class MeasureSet:
headers = ["Min", "Max", "Avg", "Var"]
if metadata_mask != {}:
if metadata_mask != []:
table += [["Metadata"] + headers]
table += [["**" + ", ".join(metadata_mask) + "**"] + ["**" + " - ".join(self.col_names) + "**"] * len(headers)]
table += [["**" + ", ".join(metadata_mask) + "**"] + ["**" + " - ".join(self.props_names) + "**"] * len(headers)]
else:
table += [headers]
table += [["**" + " - ".join(self.col_names) + "**"] * len(headers)]
table += [["**" + " - ".join(self.props_names) + "**"] * len(headers)]
if values:
m = [self.min, self.max, self.avg, self.var]
......@@ -190,27 +324,30 @@ class MeasureSet:
else:
table += [[" - ".join(values[i]) for i in range(len(values))]]
col_lens = [max([len(table[i][j]) for i in range(len(table))]) for j in range(len(table[0]))]
table = [[ table[i][j].ljust(col_lens[j]) for j in range(len(table[0]))] for i in range(len(table))]
if props_lens == []:
props_lens = [max([len(table[i][j]) for i in range(len(table))]) for j in range(len(table[0]))]
table = [[ table[i][j].ljust(props_lens[j]) for j in range(len(table[0]))] for i in range(len(table))]
table_str = ""
if headers:
table_str += " | ".join(table[0]) + "\n"
table_str += " | ".join([ ("-" * col_lens[i]) for i in range(len(col_lens)) ]) + "\n"
table_str += " | ".join([ ("-" * props_lens[i]) for i in range(len(props_lens)) ]) + "\n"
if values:
table_str += "\n".join([" | ".join(line) for line in table[1:]])
else:
table_str += "\n".join([" | ".join(line) for line in table])
return table_str
return (table_str, props_lens)
def parse():
def parse_args():
parser = argparse
parser = argparse.ArgumentParser(description='Measure analysis')
parser.add_argument('-i', nargs=1, required=False, help='import file')
parser.add_argument('--remove_all', action='store_true', help='remove all measure sets')
parser.add_argument('-c', action='store_true', required=False, help='parse cyclictest histogram')
parser.add_argument('--remove-all', action='store_true', help='remove all measure sets')
parser.add_argument('-t', nargs='?', const='input_file', required=False, help='generate table')
parser.add_argument('-T', action='store_true', required=False, help='generate all tables')
parser.add_argument('-s', action='store_true', help='show measures')
args = parser.parse_args()
......@@ -219,15 +356,23 @@ def parse():
if args.i is not None:
measure_set = MeasureSet()
measure_set.import_from_json(args.i[0], True)
if args.c:
measure_set.import_from_json(args.i[0], cyclictest=True)
else:
measure_set.import_from_json(args.i[0], flat=True)
ms_handler.add_measure_set(measure_set)
if args.t is not None:
print(measure_set.generate_table())
print(measure_set.generate_table()[0])
elif args.t is not None and args.t != "input_file":
measure_set = ms_handler.get_measure_set(args.t)
print(measure_set.generate_table(True, True, {'board', 'linux_version', 'boot_params'}))
print(measure_set.generate_table(True, True, {'board', 'linux_version', 'boot_params'})[0])
if args.T:
ms_handler.generate_tables()
if args.remove_all:
confirm = input("Are you sure all measure sets should be removed ? [Yes] / [No]: ")
......@@ -237,4 +382,4 @@ def parse():
if args.s:
print( ms_handler)
parse()
parse_args()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment