Commit a0f1cc18 authored by Weilin Wang's avatar Weilin Wang Committed by Namhyung Kim

perf test: Add skip list for metrics known would fail

Add skip list for metrics known would fail because some of the metrics are
very likely to fail due to multiplexing or other errors. So add all of the
flaky tests into the skip list.
Signed-off-by: default avatarWeilin Wang <weilin.wang@intel.com>
Tested-by: default avatarNamhyung Kim <namhyung@kernel.org>
Cc: ravi.bangoria@amd.com
Cc: Ian Rogers <irogers@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Caleb Biggers <caleb.biggers@intel.com>
Cc: Perry Taylor <perry.taylor@intel.com>
Cc: Samantha Alt <samantha.alt@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Link: https://lore.kernel.org/r/20230620170027.1861012-3-weilin.wang@intel.comSigned-off-by: default avatarNamhyung Kim <namhyung@kernel.org>
parent 3ad7092f
...@@ -12,7 +12,7 @@ class Validator: ...@@ -12,7 +12,7 @@ class Validator:
self.reportfname = reportfname self.reportfname = reportfname
self.rules = None self.rules = None
self.collectlist=metrics self.collectlist=metrics
self.metrics = set() self.metrics = set(metrics)
self.tolerance = t self.tolerance = t
self.workloads = [x for x in workload.split(",") if x] self.workloads = [x for x in workload.split(",") if x]
...@@ -148,6 +148,7 @@ class Validator: ...@@ -148,6 +148,7 @@ class Validator:
self.errlist.append("Metric '%s' is not collected"%(name)) self.errlist.append("Metric '%s' is not collected"%(name))
elif val < 0: elif val < 0:
negmetric.add("{0}(={1:.4f})".format(name, val)) negmetric.add("{0}(={1:.4f})".format(name, val))
self.collectlist[0].append(name)
else: else:
pcnt += 1 pcnt += 1
tcnt += 1 tcnt += 1
...@@ -266,6 +267,7 @@ class Validator: ...@@ -266,6 +267,7 @@ class Validator:
passcnt += 1 passcnt += 1
else: else:
faillist.append({'MetricName':m['Name'], 'CollectedValue':result}) faillist.append({'MetricName':m['Name'], 'CollectedValue':result})
self.collectlist[0].append(m['Name'])
self.totalcnt += totalcnt self.totalcnt += totalcnt
self.passedcnt += passcnt self.passedcnt += passcnt
...@@ -348,7 +350,7 @@ class Validator: ...@@ -348,7 +350,7 @@ class Validator:
if rule["TestType"] == "RelationshipTest": if rule["TestType"] == "RelationshipTest":
metrics = [m["Name"] for m in rule["Metrics"]] metrics = [m["Name"] for m in rule["Metrics"]]
if not any(m not in collectlist[0] for m in metrics): if not any(m not in collectlist[0] for m in metrics):
collectlist[rule["RuleIndex"]] = set(metrics) collectlist[rule["RuleIndex"]] = [",".join(list(set(metrics)))]
for idx, metrics in collectlist.items(): for idx, metrics in collectlist.items():
if idx == 0: wl = "sleep 0.5".split() if idx == 0: wl = "sleep 0.5".split()
...@@ -356,9 +358,12 @@ class Validator: ...@@ -356,9 +358,12 @@ class Validator:
for metric in metrics: for metric in metrics:
command = [tool, 'stat', '-j', '-M', f"{metric}", "-a"] command = [tool, 'stat', '-j', '-M', f"{metric}", "-a"]
command.extend(wl) command.extend(wl)
print(" ".join(command))
cmd = subprocess.run(command, stderr=subprocess.PIPE, encoding='utf-8') cmd = subprocess.run(command, stderr=subprocess.PIPE, encoding='utf-8')
data = [x+'}' for x in cmd.stderr.split('}\n') if x] data = [x+'}' for x in cmd.stderr.split('}\n') if x]
self.convert(data, idx) self.convert(data, idx)
self.collectlist = dict()
self.collectlist[0] = list()
# End of Collector and Converter # End of Collector and Converter
# Start of Rule Generator # Start of Rule Generator
...@@ -386,6 +391,20 @@ class Validator: ...@@ -386,6 +391,20 @@ class Validator:
return return
def remove_unsupported_rules(self, rules, skiplist: set = None):
for m in skiplist:
self.metrics.discard(m)
new_rules = []
for rule in rules:
add_rule = True
for m in rule["Metrics"]:
if m["Name"] not in self.metrics:
add_rule = False
break
if add_rule:
new_rules.append(rule)
return new_rules
def create_rules(self): def create_rules(self):
""" """
Create full rules which includes: Create full rules which includes:
...@@ -394,7 +413,10 @@ class Validator: ...@@ -394,7 +413,10 @@ class Validator:
Reindex all the rules to avoid repeated RuleIndex Reindex all the rules to avoid repeated RuleIndex
""" """
self.rules = self.read_json(self.rulefname)['RelationshipRules'] data = self.read_json(self.rulefname)
rules = data['RelationshipRules']
skiplist = set(data['SkipList'])
self.rules = self.remove_unsupported_rules(rules, skiplist)
pctgrule = {'RuleIndex':0, pctgrule = {'RuleIndex':0,
'TestType':'SingleMetricTest', 'TestType':'SingleMetricTest',
'RangeLower':'0', 'RangeLower':'0',
...@@ -453,6 +475,7 @@ class Validator: ...@@ -453,6 +475,7 @@ class Validator:
The final report is written into a JSON file. The final report is written into a JSON file.
''' '''
if not self.collectlist:
self.parse_perf_metrics() self.parse_perf_metrics()
self.create_rules() self.create_rules()
for i in range(0, len(self.workloads)): for i in range(0, len(self.workloads)):
......
{ {
"SkipList": [
"tsx_aborted_cycles",
"tsx_transactional_cycles",
"C2_Pkg_Residency",
"C6_Pkg_Residency",
"C1_Core_Residency",
"C6_Core_Residency",
"tma_false_sharing",
"tma_remote_cache",
"tma_contested_accesses"
],
"RelationshipRules": [ "RelationshipRules": [
{ {
"RuleIndex": 1, "RuleIndex": 1,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment