Lines Matching +full:test +full:- +full:rules
1 #SPDX-License-Identifier: GPL-2.0
13 self.rules = None
29 # vars for test pass/failure statistics
30 …oremetrics= set() # metrics with no results or negative results, neg result counts as a failed test
51 def read_json(self, filename: str) -> dict:
75 def get_bounds(self, lb, ub, error, alias={}, ridx:int = 0) -> list:
84 upper bound, return -1 if the upper bound is a metric value and is not collected
107 ubv = get_bound_value(ub, -1, ridx)
116 def get_value(self, name:str, ridx:int = 0) -> list:
120 All future test(s) on this metric will fail.
137 return True if val <= ub + err and val >= lb - err else False
142 Check if metrics value are non-negative.
143 One metric is counted as one test.
182 @returns: value of the formula is success; -1 if the one or more metric value not provided
192 if i+1 == len(formula) or formula[i] in ('+', '-', '*', '/'):
200 stack[-1] = stack[-1] * v
202 stack[-1] = stack[-1] / v
203 elif sign == '-':
204 stack.append(-v[0])
213 return -1, "Metric value missing: "+','.join(errs)
223 One rule is counted as ont test.
233 if val == -1:
257 # Single Metric Test
262 One metric is counted as one test in this type of test.
265 … This test updates self.total_cnt and records failed tests in self.failtest['SingleMetricTest'].
323 print("Test validation finished. Final report: ")
347 Convert collected metric data from the -j output to dict of {metric_name:value}.
352 … if "metric-unit" in result and result["metric-unit"] != "(null)" and result["metric-unit"] != "":
353 … name = result["metric-unit"].split(" ")[1] if len(result["metric-unit"].split(" ")) > 1 \
354 else result["metric-unit"]
355 metricvalues[name.lower()] = float(result["metric-value"])
362 command = [tool, 'stat', '-j', '-M', f"{metric}", "-a"]
366 cmd = subprocess.run(command, stderr=subprocess.PIPE, encoding='utf-8')
373 Collect metric data with "perf stat -M" on given workload with -a and -j.
383 # Create metric set for relationship rules
384 for rule in self.rules:
414 command = ['perf', 'list', '-j', '--details', 'metrics']
415 … cmd = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
432 def remove_unsupported_rules(self, rules): argument
434 for rule in rules:
446 Create full rules which includes:
447 1) All the rules from the "relationshi_rules" file
450 Reindex all the rules to avoid repeated RuleIndex
453 rules = data['RelationshipRules']
455 self.rules = self.remove_unsupported_rules(rules)
463 self.rules.append(pctgrule)
465 # Re-index all rules to avoid repeated RuleIndex
467 for r in self.rules:
472 #TODO: need to test and generate file name correctly
473 …data = {'RelationshipRules':self.rules, 'SupportedMetrics': [{"MetricName": name} for name in self…
502 def test(self): member in Validator
504 The real entry point of the test framework.
505 … This function loads the validation rule JSON file and Standard Metric file to create rules for
509 … In the test process, it passes through each rule and launch correct test function bases on the
521 # Run positive value test
523 for r in self.rules:
524 # skip rules that uses metrics not exist in this platform
533 print("Unsupported Test Type: ", testtype)
534 self.errlist.append("Unsupported Test Type from rule: " + r['RuleIndex'])
538 … print("Non-negative metric count: ", self.failtests['PositiveValueTest']['Passed Tests'])
539 print("Total Test Count: ", self.totalcnt)
540 print("Passed Test Count: ", self.passedcnt)
547 def main() -> None:
550 parser.add_argument("-rule", help="Base validation rule file", required=True)
551 …parser.add_argument("-output_dir", help="Path for validator output file, report file", required=Tr…
552 …parser.add_argument("-debug", help="Debug run, save intermediate data to files", action="store_tru…
553 parser.add_argument("-wl", help="Workload to run while data collection", default="true")
554 parser.add_argument("-m", help="Metric list to validate", default="")
564 ret = validator.test()