From nobody Sat Feb 7 11:05:23 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 96D6EEB64D7 for ; Tue, 20 Jun 2023 17:00:52 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231283AbjFTRAv (ORCPT ); Tue, 20 Jun 2023 13:00:51 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45330 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230511AbjFTRAl (ORCPT ); Tue, 20 Jun 2023 13:00:41 -0400 Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id C7132E42; Tue, 20 Jun 2023 10:00:34 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1687280434; x=1718816434; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=HZnD6giduDPWaGqoqd4h0SowAiDNmtq89p92Y7f5LFE=; b=Bi6+L+MUXUA1pYUVkI7lP/Lfuk21ebABfmGaMGHkKHwrpyBTQxpVt1jP yl62eySUcbyGkPbtevhAM7LxvWxRgt0AcrLoD6BTITTOTh6dLH4UDK9VN 5DcQQhycVe03MFP2xIDI0QhNx6aDp4Z2d6v7C1998q288XMCFcpKf6vyz AOniQHSFjVX0EJNoFqSnBUiLvHMx9SIW7NZCB7IvOXxef8N8Nx2lR6cNU 0VagZhJ8tpkrXnvwEPD7XhcwCs1N7XXaPfnnHAKtxKbZBCphCM9zLSSuN 6nBLWqX5ihgVkQ71S895TILqtolu0O/zDJns3YB75eoTjcehAY4kki5T9 w==; X-IronPort-AV: E=McAfee;i="6600,9927,10747"; a="359929611" X-IronPort-AV: E=Sophos;i="6.00,257,1681196400"; d="scan'208";a="359929611" Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 20 Jun 2023 10:00:33 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10747"; a="888314336" X-IronPort-AV: E=Sophos;i="6.00,257,1681196400"; d="scan'208";a="888314336" Received: from a0cec87da3f2.jf.intel.com (HELO worker-node-1.jf.intel.com) ([10.165.55.163]) by orsmga005.jf.intel.com with ESMTP; 20 Jun 2023 10:00:33 -0700 From: Weilin Wang To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Jiri Olsa , Namhyung Kim , Adrian Hunter , Ian Rogers , linux-perf-users@vger.kernel.org, linux-kernel@vger.kernel.org Cc: Weilin Wang , Kan Liang , Samantha Alt , Perry Taylor , Caleb Biggers , ravi.bangoria@amd.com Subject: [PATCH v5 1/3] perf test: Add metric value validation test Date: Tue, 20 Jun 2023 10:00:25 -0700 Message-Id: <20230620170027.1861012-2-weilin.wang@intel.com> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230620170027.1861012-1-weilin.wang@intel.com> References: <20230620170027.1861012-1-weilin.wang@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Add metric value validation test to check if metric values are with in correct value ranges. There are three types of tests included: 1) positive-value test checks if all the metrics collected are non-negative; 2) single-value test checks if the list of metrics have values in given value ranges; 3) relationship test checks if multiple metrics follow a given relationship, e.g. memory_bandwidth_read + memory_bandwidth_write =3D memory_bandwidth_total. Signed-off-by: Weilin Wang Tested-by: Namhyung Kim --- .../tests/shell/lib/perf_metric_validation.py | 514 ++++++++++++++++++ .../lib/perf_metric_validation_rules.json | 387 +++++++++++++ tools/perf/tests/shell/stat_metrics_values.sh | 30 + 3 files changed, 931 insertions(+) create mode 100644 tools/perf/tests/shell/lib/perf_metric_validation.py create mode 100644 tools/perf/tests/shell/lib/perf_metric_validation_rules= .json create mode 100755 tools/perf/tests/shell/stat_metrics_values.sh diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/p= erf/tests/shell/lib/perf_metric_validation.py new file mode 100644 index 000000000000..81bd2bf38b67 --- /dev/null +++ b/tools/perf/tests/shell/lib/perf_metric_validation.py @@ -0,0 +1,514 @@ +#SPDX-License-Identifier: GPL-2.0 +import re +import csv +import json +import argparse +from pathlib import Path +import subprocess + +class Validator: + def __init__(self, rulefname, reportfname=3D'', t=3D5, debug=3DFalse, = datafname=3D'', fullrulefname=3D'', workload=3D'true', metrics=3D''): + self.rulefname =3D rulefname + self.reportfname =3D reportfname + self.rules =3D None + self.collectlist=3Dmetrics + self.metrics =3D set() + self.tolerance =3D t + + self.workloads =3D [x for x in workload.split(",") if x] + self.wlidx =3D 0 # idx of current workloads + self.allresults =3D dict() # metric results of all workload + self.allignoremetrics =3D dict() # metrics with no results or nega= tive results + self.allfailtests =3D dict() + self.alltotalcnt =3D dict() + self.allpassedcnt =3D dict() + self.allerrlist =3D dict() + + self.results =3D dict() # metric results of current workload + # vars for test pass/failure statistics + self.ignoremetrics=3D set() # metrics with no results or negative = results, neg result counts as a failed test + self.failtests =3D dict() + self.totalcnt =3D 0 + self.passedcnt =3D 0 + # vars for errors + self.errlist =3D list() + + # vars for Rule Generator + self.pctgmetrics =3D set() # Percentage rule + + # vars for debug + self.datafname =3D datafname + self.debug =3D debug + self.fullrulefname =3D fullrulefname + + def read_json(self, filename: str) -> dict: + try: + with open(Path(filename).resolve(), "r") as f: + data =3D json.loads(f.read()) + except OSError as e: + print(f"Error when reading file {e}") + sys.exit() + + return data + + def json_dump(self, data, output_file): + parent =3D Path(output_file).parent + if not parent.exists(): + parent.mkdir(parents=3DTrue) + + with open(output_file, "w+") as output_file: + json.dump(data, + output_file, + ensure_ascii=3DTrue, + indent=3D4) + + def get_results(self, idx:int =3D 0): + return self.results[idx] + + def get_bounds(self, lb, ub, error, alias=3D{}, ridx:int =3D 0) -> lis= t: + """ + Get bounds and tolerance from lb, ub, and error. + If missing lb, use 0.0; missing ub, use float('inf); missing error= , use self.tolerance. + + @param lb: str/float, lower bound + @param ub: str/float, upper bound + @param error: float/str, error tolerance + @returns: lower bound, return inf if the lower bound is a metric v= alue and is not collected + upper bound, return -1 if the upper bound is a metric va= lue and is not collected + tolerance, denormalized base on upper bound value + """ + # init ubv and lbv to invalid values + def get_bound_value (bound, initval, ridx): + val =3D initval + if isinstance(bound, int) or isinstance(bound, float): + val =3D bound + elif isinstance(bound, str): + if bound =3D=3D '': + val =3D float("inf") + elif bound in alias: + vall =3D self.get_value(alias[ub], ridx) + if vall: + val =3D vall[0] + elif bound.replace('.', '1').isdigit(): + val =3D float(bound) + else: + print("Wrong bound: {0}".format(bound)) + else: + print("Wrong bound: {0}".format(bound)) + return val + + ubv =3D get_bound_value(ub, -1, ridx) + lbv =3D get_bound_value(lb, float('inf'), ridx) + t =3D get_bound_value(error, self.tolerance, ridx) + + # denormalize error threshold + denormerr =3D t * ubv / 100 if ubv !=3D 100 and ubv > 0 else t + + return lbv, ubv, denormerr + + def get_value(self, name:str, ridx:int =3D 0) -> list: + """ + Get value of the metric from self.results. + If result of this metric is not provided, the metric name will be = added into self.ignoremetics and self.errlist. + All future test(s) on this metric will fail. + + @param name: name of the metric + @returns: list with value found in self.results; list is empty whe= n not value found. + """ + results =3D [] + data =3D self.results[ridx] if ridx in self.results else self.resu= lts[0] + if name not in self.ignoremetrics: + if name in data: + results.append(data[name]) + elif name.replace('.', '1').isdigit(): + results.append(float(name)) + else: + self.errlist.append("Metric '%s' is not collected or the v= alue format is incorrect"%(name)) + self.ignoremetrics.add(name) + return results + + def check_bound(self, val, lb, ub, err): + return True if val <=3D ub + err and val >=3D lb - err else False + + # Positive Value Sanity check + def pos_val_test(self): + """ + Check if metrics value are non-negative. + One metric is counted as one test. + Failure: when metric value is negative or not provided. + Metrics with negative value will be added into the self.failtests[= 'PositiveValueTest'] and self.ignoremetrics. + """ + negmetric =3D set() + missmetric =3D set() + pcnt =3D 0 + tcnt =3D 0 + for name, val in self.get_results().items(): + if val is None or val =3D=3D '': + missmetric.add(name) + self.errlist.append("Metric '%s' is not collected"%(name)) + elif val < 0: + negmetric.add("{0}(=3D{1:.4f})".format(name, val)) + else: + pcnt +=3D 1 + tcnt +=3D 1 + + self.failtests['PositiveValueTest']['Total Tests'] =3D tcnt + self.failtests['PositiveValueTest']['Passed Tests'] =3D pcnt + if len(negmetric) or len(missmetric)> 0: + self.ignoremetrics.update(negmetric) + self.ignoremetrics.update(missmetric) + self.failtests['PositiveValueTest']['Failed Tests'].append({'N= egativeValue':list(negmetric), 'MissingValue':list(missmetric)}) + + return + + def evaluate_formula(self, formula:str, alias:dict, ridx:int =3D 0): + """ + Evaluate the value of formula. + + @param formula: the formula to be evaluated + @param alias: the dict has alias to metric name mapping + @returns: value of the formula is success; -1 if the one or more m= etric value not provided + """ + stack =3D [] + b =3D 0 + errs =3D [] + sign =3D "+" + f =3D str() + + #TODO: support parenthesis? + for i in range(len(formula)): + if i+1 =3D=3D len(formula) or formula[i] in ('+', '-', '*', '/= '): + s =3D alias[formula[b:i]] if i+1 < len(formula) else alias= [formula[b:]] + v =3D self.get_value(s, ridx) + if not v: + errs.append(s) + else: + f =3D f + "{0}(=3D{1:.4f})".format(s, v[0]) + if sign =3D=3D "*": + stack[-1] =3D stack[-1] * v + elif sign =3D=3D "/": + stack[-1] =3D stack[-1] / v + elif sign =3D=3D '-': + stack.append(-v[0]) + else: + stack.append(v[0]) + if i + 1 < len(formula): + sign =3D formula[i] + f +=3D sign + b =3D i + 1 + + if len(errs) > 0: + return -1, "Metric value missing: "+','.join(errs) + + val =3D sum(stack) + return val, f + + # Relationships Tests + def relationship_test(self, rule: dict): + """ + Validate if the metrics follow the required relationship in the ru= le. + eg. lower_bound <=3D eval(formula)<=3D upper_bound + One rule is counted as ont test. + Failure: when one or more metric result(s) not provided, or when f= ormula evaluated outside of upper/lower bounds. + + @param rule: dict with metric name(+alias), formula, and required = upper and lower bounds. + """ + alias =3D dict() + for m in rule['Metrics']: + alias[m['Alias']] =3D m['Name'] + lbv, ubv, t =3D self.get_bounds(rule['RangeLower'], rule['RangeUpp= er'], rule['ErrorThreshold'], alias, ridx=3Drule['RuleIndex']) + val, f =3D self.evaluate_formula(rule['Formula'], alias, ridx=3Dru= le['RuleIndex']) + if val =3D=3D -1: + self.failtests['RelationshipTest']['Failed Tests'].append({'Ru= leIndex': rule['RuleIndex'], 'Description':f}) + elif not self.check_bound(val, lbv, ubv, t): + lb =3D rule['RangeLower'] + ub =3D rule['RangeUpper'] + if isinstance(lb, str): + if lb in alias: + lb =3D alias[lb] + if isinstance(ub, str): + if ub in alias: + ub =3D alias[ub] + self.failtests['RelationshipTest']['Failed Tests'].append({'Ru= leIndex': rule['RuleIndex'], 'Formula':f, + 'Ra= ngeLower': lb, 'LowerBoundValue': self.get_value(lb), + 'Ra= ngeUpper': ub, 'UpperBoundValue':self.get_value(ub), + 'Er= rorThreshold': t, 'CollectedValue': val}) + else: + self.passedcnt +=3D 1 + self.failtests['RelationshipTest']['Passed Tests'] +=3D 1 + self.totalcnt +=3D 1 + self.failtests['RelationshipTest']['Total Tests'] +=3D 1 + + return + + + # Single Metric Test + def single_test(self, rule:dict): + """ + Validate if the metrics are in the required value range. + eg. lower_bound <=3D metrics_value <=3D upper_bound + One metric is counted as one test in this type of test. + One rule may include one or more metrics. + Failure: when the metric value not provided or the value is outsid= e the bounds. + This test updates self.total_cnt and records failed tests in self.= failtest['SingleMetricTest']. + + @param rule: dict with metrics to validate and the value range req= uirement + """ + lbv, ubv, t =3D self.get_bounds(rule['RangeLower'], rule['RangeUpp= er'], rule['ErrorThreshold']) + metrics =3D rule['Metrics'] + passcnt =3D 0 + totalcnt =3D 0 + faillist =3D [] + for m in metrics: + totalcnt +=3D 1 + result =3D self.get_value(m['Name']) + if len(result) > 0 and self.check_bound(result[0], lbv, ubv, t= ): + passcnt +=3D 1 + else: + faillist.append({'MetricName':m['Name'], 'CollectedValue':= result}) + + self.totalcnt +=3D totalcnt + self.passedcnt +=3D passcnt + self.failtests['SingleMetricTest']['Total Tests'] +=3D totalcnt + self.failtests['SingleMetricTest']['Passed Tests'] +=3D passcnt + if len(faillist) !=3D 0: + self.failtests['SingleMetricTest']['Failed Tests'].append({'Ru= leIndex':rule['RuleIndex'], + 'Ra= ngeLower': rule['RangeLower'], + 'Ra= ngeUpper': rule['RangeUpper'], + 'Er= rorThreshold':rule['ErrorThreshold'], + 'Fa= ilure':faillist}) + + return + + def create_report(self): + """ + Create final report and write into a JSON file. + """ + alldata =3D list() + for i in range(0, len(self.workloads)): + reportstas =3D {"Total Rule Count": self.alltotalcnt[i], "Pass= ed Rule Count": self.allpassedcnt[i]} + data =3D {"Metric Validation Statistics": reportstas, "Tests i= n Category": self.allfailtests[i], + "Errors":self.allerrlist[i]} + alldata.append({"Workload": self.workloads[i], "Report": data}) + + json_str =3D json.dumps(alldata, indent=3D4) + print("Test validation finished. Final report: ") + print(json_str) + + if self.debug: + allres =3D [{"Workload": self.workloads[i], "Results": self.al= lresults[i]} for i in range(0, len(self.workloads))] + self.json_dump(allres, self.datafname) + + def check_rule(self, testtype, metric_list): + """ + Check if the rule uses metric(s) that not exist in current platfor= m. + + @param metric_list: list of metrics from the rule. + @return: False when find one metric out in Metric file. (This rule= should not skipped.) + True when all metrics used in the rule are found in Metri= c file. + """ + if testtype =3D=3D "RelationshipTest": + for m in metric_list: + if m['Name'] not in self.metrics: + return False + return True + + # Start of Collector and Converter + def convert(self, data: list, idx: int): + """ + Convert collected metric data from the -j output to dict of {metri= c_name:value}. + """ + for json_string in data: + try: + result =3Djson.loads(json_string) + if "metric-unit" in result and result["metric-unit"] !=3D = "(null)" and result["metric-unit"] !=3D "": + name =3D result["metric-unit"].split(" ")[1] if len(r= esult["metric-unit"].split(" ")) > 1 \ + else result["metric-unit"] + if idx not in self.results: self.results[idx] =3D dict= () + self.results[idx][name.lower()] =3D float(result["metr= ic-value"]) + except ValueError as error: + continue + return + + def collect_perf(self, data_file: str, workload: str): + """ + Collect metric data with "perf stat -M" on given workload with -a = and -j. + """ + self.results =3D dict() + tool =3D 'perf' + print(f"Starting perf collection") + print(f"Workload: {workload}") + collectlist =3D dict() + if self.collectlist !=3D "": + collectlist[0] =3D {x for x in self.collectlist.split(",")} + else: + collectlist[0] =3D set(list(self.metrics)) + # Create metric set for relationship rules + for rule in self.rules: + if rule["TestType"] =3D=3D "RelationshipTest": + metrics =3D [m["Name"] for m in rule["Metrics"]] + if not any(m not in collectlist[0] for m in metrics): + collectlist[rule["RuleIndex"]] =3D set(metrics) + + for idx, metrics in collectlist.items(): + if idx =3D=3D 0: wl =3D "sleep 0.5".split() + else: wl =3D workload.split() + for metric in metrics: + command =3D [tool, 'stat', '-j', '-M', f"{metric}", "-a"] + command.extend(wl) + cmd =3D subprocess.run(command, stderr=3Dsubprocess.PIPE, = encoding=3D'utf-8') + data =3D [x+'}' for x in cmd.stderr.split('}\n') if x] + self.convert(data, idx) + # End of Collector and Converter + + # Start of Rule Generator + def parse_perf_metrics(self): + """ + Read and parse perf metric file: + 1) find metrics with '1%' or '100%' as ScaleUnit for Percent check + 2) create metric name list + """ + command =3D ['perf', 'list', '-j', '--details', 'metrics'] + cmd =3D subprocess.run(command, stdout=3Dsubprocess.PIPE, stderr= =3Dsubprocess.PIPE, encoding=3D'utf-8') + try: + data =3D json.loads(cmd.stdout) + for m in data: + if 'MetricName' not in m: + print("Warning: no metric name") + continue + name =3D m['MetricName'] + self.metrics.add(name) + if 'ScaleUnit' in m and (m['ScaleUnit'] =3D=3D '1%' or m['= ScaleUnit'] =3D=3D '100%'): + self.pctgmetrics.add(name.lower()) + except ValueError as error: + print(f"Error when parsing metric data") + sys.exit() + + return + + def create_rules(self): + """ + Create full rules which includes: + 1) All the rules from the "relationshi_rules" file + 2) SingleMetric rule for all the 'percent' metrics + + Reindex all the rules to avoid repeated RuleIndex + """ + self.rules =3D self.read_json(self.rulefname)['RelationshipRules'] + pctgrule =3D {'RuleIndex':0, + 'TestType':'SingleMetricTest', + 'RangeLower':'0', + 'RangeUpper': '100', + 'ErrorThreshold': self.tolerance, + 'Description':'Metrics in percent unit have value with= in [0, 100]', + 'Metrics': [{'Name': m} for m in self.pctgmetrics]} + self.rules.append(pctgrule) + + # Re-index all rules to avoid repeated RuleIndex + idx =3D 1 + for r in self.rules: + r['RuleIndex'] =3D idx + idx +=3D 1 + + if self.debug: + #TODO: need to test and generate file name correctly + data =3D {'RelationshipRules':self.rules, 'SupportedMetrics': = [{"MetricName": name} for name in self.metrics]} + self.json_dump(data, self.fullrulefname) + + return + # End of Rule Generator + + def _storewldata(self, key): + ''' + Store all the data of one workload into the corresponding data str= ucture for all workloads. + @param key: key to the dictionaries (index of self.workloads). + ''' + self.allresults[key] =3D self.results + self.allignoremetrics[key] =3D self.ignoremetrics + self.allfailtests[key] =3D self.failtests + self.alltotalcnt[key] =3D self.totalcnt + self.allpassedcnt[key] =3D self.passedcnt + self.allerrlist[key] =3D self.errlist + + #Initialize data structures before data validation of each workload + def _init_data(self): + + testtypes =3D ['PositiveValueTest', 'RelationshipTest', 'SingleMet= ricTest'] + self.results =3D dict() + self.ignoremetrics=3D set() + self.errlist =3D list() + self.failtests =3D {k:{'Total Tests':0, 'Passed Tests':0, 'Failed = Tests':[]} for k in testtypes} + self.totalcnt =3D 0 + self.passedcnt =3D 0 + + def test(self): + ''' + The real entry point of the test framework. + This function loads the validation rule JSON file and Standard Met= ric file to create rules for + testing and namemap dictionaries. + It also reads in result JSON file for testing. + + In the test process, it passes through each rule and launch correc= t test function bases on the + 'TestType' field of the rule. + + The final report is written into a JSON file. + ''' + self.parse_perf_metrics() + self.create_rules() + for i in range(0, len(self.workloads)): + self._init_data() + self.collect_perf(self.datafname, self.workloads[i]) + # Run positive value test + self.pos_val_test() + for r in self.rules: + # skip rules that uses metrics not exist in this platform + testtype =3D r['TestType'] + if not self.check_rule(testtype, r['Metrics']): + continue + if testtype =3D=3D 'RelationshipTest': + self.relationship_test(r) + elif testtype =3D=3D 'SingleMetricTest': + self.single_test(r) + else: + print("Unsupported Test Type: ", testtype) + self.errlist.append("Unsupported Test Type from rule: = " + r['RuleIndex']) + self._storewldata(i) + print("Workload: ", self.workloads[i]) + print("Total metrics collected: ", self.failtests['PositiveVal= ueTest']['Total Tests']) + print("Non-negative metric count: ", self.failtests['PositiveV= alueTest']['Passed Tests']) + print("Total Test Count: ", self.totalcnt) + print("Passed Test Count: ", self.passedcnt) + + self.create_report() + return sum(self.alltotalcnt.values()) !=3D sum(self.allpassedcnt.v= alues()) +# End of Class Validator + + +def main() -> None: + parser =3D argparse.ArgumentParser(description=3D"Launch metric value = validation") + + parser.add_argument("-rule", help=3D"Base validation rule file", requi= red=3DTrue) + parser.add_argument("-output_dir", help=3D"Path for validator output f= ile, report file", required=3DTrue) + parser.add_argument("-debug", help=3D"Debug run, save intermediate dat= a to files", action=3D"store_true", default=3DFalse) + parser.add_argument("-wl", help=3D"Workload to run while data collecti= on", default=3D"true") + parser.add_argument("-m", help=3D"Metric list to validate", default=3D= "") + args =3D parser.parse_args() + outpath =3D Path(args.output_dir) + reportf =3D Path.joinpath(outpath, 'perf_report.json') + fullrule =3D Path.joinpath(outpath, 'full_rule.json') + datafile =3D Path.joinpath(outpath, 'perf_data.json') + + validator =3D Validator(args.rule, reportf, debug=3Dargs.debug, + datafname=3Ddatafile, fullrulefname=3Dfullrule, wo= rkload=3Dargs.wl, + metrics=3Dargs.m) + ret =3D validator.test() + + return ret + + +if __name__ =3D=3D "__main__": + import sys + sys.exit(main()) + + + diff --git a/tools/perf/tests/shell/lib/perf_metric_validation_rules.json b= /tools/perf/tests/shell/lib/perf_metric_validation_rules.json new file mode 100644 index 000000000000..debaa910da9f --- /dev/null +++ b/tools/perf/tests/shell/lib/perf_metric_validation_rules.json @@ -0,0 +1,387 @@ +{ + "RelationshipRules": [ + { + "RuleIndex": 1, + "Formula": "a+b", + "TestType": "RelationshipTest", + "RangeLower": "c", + "RangeUpper": "c", + "ErrorThreshold": 5.0, + "Description": "Intel(R) Optane(TM) Persistent Memory(PMEM) b= andwidth total includes Intel(R) Optane(TM) Persistent Memory(PMEM) read ba= ndwidth and Intel(R) Optane(TM) Persistent Memory(PMEM) write bandwidth", + "Metrics": [ + { + "Name": "pmem_memory_bandwidth_read", + "Alias": "a" + }, + { + "Name": "pmem_memory_bandwidth_write", + "Alias": "b" + }, + { + "Name": "pmem_memory_bandwidth_total", + "Alias": "c" + } + ] + }, + { + "RuleIndex": 2, + "Formula": "a+b", + "TestType": "RelationshipTest", + "RangeLower": "c", + "RangeUpper": "c", + "ErrorThreshold": 5.0, + "Description": "DDR memory bandwidth total includes DDR memory= read bandwidth and DDR memory write bandwidth", + "Metrics": [ + { + "Name": "memory_bandwidth_read", + "Alias": "a" + }, + { + "Name": "memory_bandwidth_write", + "Alias": "b" + }, + { + "Name": "memory_bandwidth_total", + "Alias": "c" + } + ] + }, + { + "RuleIndex": 3, + "Formula": "a+b", + "TestType": "RelationshipTest", + "RangeLower": "100", + "RangeUpper": "100", + "ErrorThreshold": 5.0, + "Description": "Total memory read accesses includes memory rea= ds from last level cache (LLC) addressed to local DRAM and memory reads fro= m the last level cache (LLC) addressed to remote DRAM.", + "Metrics": [ + { + "Name": "numa_reads_addressed_to_local_dram", + "Alias": "a" + }, + { + "Name": "numa_reads_addressed_to_remote_dram", + "Alias": "b" + } + ] + }, + { + "RuleIndex": 4, + "Formula": "a", + "TestType": "SingleMetricTest", + "RangeLower": "0.125", + "RangeUpper": "", + "ErrorThreshold": "", + "Description": "", + "Metrics": [ + { + "Name": "cpi", + "Alias": "a" + } + ] + }, + { + "RuleIndex": 5, + "Formula": "", + "TestType": "SingleMetricTest", + "RangeLower": "0", + "RangeUpper": "1", + "ErrorThreshold": 5.0, + "Description": "Ratio values should be within value range [0,1= )", + "Metrics": [ + { + "Name": "loads_per_instr", + "Alias": "" + }, + { + "Name": "stores_per_instr", + "Alias": "" + }, + { + "Name": "l1d_mpi", + "Alias": "" + }, + { + "Name": "l1d_demand_data_read_hits_per_instr", + "Alias": "" + }, + { + "Name": "l1_i_code_read_misses_with_prefetches_per_ins= tr", + "Alias": "" + }, + { + "Name": "l2_demand_data_read_hits_per_instr", + "Alias": "" + }, + { + "Name": "l2_mpi", + "Alias": "" + }, + { + "Name": "l2_demand_data_read_mpi", + "Alias": "" + }, + { + "Name": "l2_demand_code_mpi", + "Alias": "" + } + ] + }, + { + "RuleIndex": 6, + "Formula": "a+b+c+d", + "TestType": "RelationshipTest", + "RangeLower": "100", + "RangeUpper": "100", + "ErrorThreshold": 5.0, + "Description": "Sum of TMA level 1 metrics should be 100%", + "Metrics": [ + { + "Name": "tma_frontend_bound", + "Alias": "a" + }, + { + "Name": "tma_bad_speculation", + "Alias": "b" + }, + { + "Name": "tma_backend_bound", + "Alias": "c" + }, + { + "Name": "tma_retiring", + "Alias": "d" + } + ] + }, + { + "RuleIndex": 7, + "Formula": "a+b", + "TestType": "RelationshipTest", + "RangeLower": "c", + "RangeUpper": "c", + "ErrorThreshold": 5.0, + "Description": "Sum of the level 2 children should equal level= 1 parent", + "Metrics": [ + { + "Name": "tma_fetch_latency", + "Alias": "a" + }, + { + "Name": "tma_fetch_bandwidth", + "Alias": "b" + }, + { + "Name": "tma_frontend_bound", + "Alias": "c" + } + ] + }, + { + "RuleIndex": 8, + "Formula": "a+b", + "TestType": "RelationshipTest", + "RangeLower": "c", + "RangeUpper": "c", + "ErrorThreshold": 5.0, + "Description": "Sum of the level 2 children should equal level= 1 parent", + "Metrics": [ + { + "Name": "tma_branch_mispredicts", + "Alias": "a" + }, + { + "Name": "tma_machine_clears", + "Alias": "b" + }, + { + "Name": "tma_bad_speculation", + "Alias": "c" + } + ] + }, + { + "RuleIndex": 9, + "Formula": "a+b", + "TestType": "RelationshipTest", + "RangeLower": "c", + "RangeUpper": "c", + "ErrorThreshold": 5.0, + "Description": "Sum of the level 2 children should equal level= 1 parent", + "Metrics": [ + { + "Name": "tma_memory_bound", + "Alias": "a" + }, + { + "Name": "tma_core_bound", + "Alias": "b" + }, + { + "Name": "tma_backend_bound", + "Alias": "c" + } + ] + }, + { + "RuleIndex": 10, + "Formula": "a+b", + "TestType": "RelationshipTest", + "RangeLower": "c", + "RangeUpper": "c", + "ErrorThreshold": 5.0, + "Description": "Sum of the level 2 children should equal level= 1 parent", + "Metrics": [ + { + "Name": "tma_light_operations", + "Alias": "a" + }, + { + "Name": "tma_heavy_operations", + "Alias": "b" + }, + { + "Name": "tma_retiring", + "Alias": "c" + } + ] + }, + { + "RuleIndex": 11, + "Formula": "a+b+c", + "TestType": "RelationshipTest", + "RangeLower": "100", + "RangeUpper": "100", + "ErrorThreshold": 5.0, + "Description": "The all_requests includes the memory_page_empt= y, memory_page_misses, and memory_page_hits equals.", + "Metrics": [ + { + "Name": "memory_page_empty_vs_all_requests", + "Alias": "a" + }, + { + "Name": "memory_page_misses_vs_all_requests", + "Alias": "b" + }, + { + "Name": "memory_page_hits_vs_all_requests", + "Alias": "c" + } + ] + }, + { + "RuleIndex": 12, + "Formula": "a-b", + "TestType": "RelationshipTest", + "RangeLower": "0", + "RangeUpper": "", + "ErrorThreshold": 5.0, + "Description": "CPU utilization in kernel mode should always b= e <=3D cpu utilization", + "Metrics": [ + { + "Name": "cpu_utilization", + "Alias": "a" + }, + { + "Name": "cpu_utilization_in_kernel_mode", + "Alias": "b" + } + ] + }, + { + "RuleIndex": 13, + "Formula": "a-b", + "TestType": "RelationshipTest", + "RangeLower": "0", + "RangeUpper": "", + "ErrorThreshold": 5.0, + "Description": "Total L2 misses per instruction should be >=3D= L2 demand data read misses per instruction", + "Metrics": [ + { + "Name": "l2_mpi", + "Alias": "a" + }, + { + "Name": "l2_demand_data_read_mpi", + "Alias": "b" + } + ] + }, + { + "RuleIndex": 14, + "Formula": "a-b", + "TestType": "RelationshipTest", + "RangeLower": "0", + "RangeUpper": "", + "ErrorThreshold": 5.0, + "Description": "Total L2 misses per instruction should be >=3D= L2 demand code misses per instruction", + "Metrics": [ + { + "Name": "l2_mpi", + "Alias": "a" + }, + { + "Name": "l2_demand_code_mpi", + "Alias": "b" + } + ] + }, + { + "RuleIndex": 15, + "Formula": "b+c+d", + "TestType": "RelationshipTest", + "RangeLower": "a", + "RangeUpper": "a", + "ErrorThreshold": 5.0, + "Description": "L3 data read, rfo, code misses per instruction= equals total L3 misses per instruction.", + "Metrics": [ + { + "Name": "llc_mpi", + "Alias": "a" + }, + { + "Name": "llc_data_read_mpi_demand_plus_prefetch", + "Alias": "b" + }, + { + "Name": "llc_rfo_read_mpi_demand_plus_prefetch", + "Alias": "c" + }, + { + "Name": "llc_code_read_mpi_demand_plus_prefetch", + "Alias": "d" + } + ] + }, + { + "RuleIndex": 16, + "Formula": "a", + "TestType": "SingleMetricTest", + "RangeLower": "0", + "RangeUpper": "8", + "ErrorThreshold": 0.0, + "Description": "Setting generous range for allowable frequenci= es", + "Metrics": [ + { + "Name": "uncore_freq", + "Alias": "a" + } + ] + }, + { + "RuleIndex": 17, + "Formula": "a", + "TestType": "SingleMetricTest", + "RangeLower": "0", + "RangeUpper": "8", + "ErrorThreshold": 0.0, + "Description": "Setting generous range for allowable frequenci= es", + "Metrics": [ + { + "Name": "cpu_operating_frequency", + "Alias": "a" + } + ] + } + ] +} \ No newline at end of file diff --git a/tools/perf/tests/shell/stat_metrics_values.sh b/tools/perf/tes= ts/shell/stat_metrics_values.sh new file mode 100755 index 000000000000..ad94c936de7e --- /dev/null +++ b/tools/perf/tests/shell/stat_metrics_values.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# perf metrics value validation +# SPDX-License-Identifier: GPL-2.0 +if [ "x$PYTHON" =3D=3D "x" ] +then + if which python3 > /dev/null + then + PYTHON=3Dpython3 + else + echo Skipping test, python3 not detected please set environment variable= PYTHON. + exit 2 + fi +fi + +grep -q GenuineIntel /proc/cpuinfo || { echo Skipping non-Intel; exit 2; } + +pythonvalidator=3D$(dirname $0)/lib/perf_metric_validation.py +rulefile=3D$(dirname $0)/lib/perf_metric_validation_rules.json +tmpdir=3D$(mktemp -d /tmp/__perf_test.program.XXXXX) +workload=3D"perf bench futex hash -r 2 -s" + +# Add -debug, save data file and full rule file +echo "Launch python validation script $pythonvalidator" +echo "Output will be stored in: $tmpdir" +$PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${worklo= ad}" +ret=3D$? +rm -rf $tmpdir + +exit $ret + --=20 2.39.1 From nobody Sat Feb 7 11:05:23 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A5BF2EB64D8 for ; Tue, 20 Jun 2023 17:00:50 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230241AbjFTRAt (ORCPT ); Tue, 20 Jun 2023 13:00:49 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45332 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230515AbjFTRAl (ORCPT ); Tue, 20 Jun 2023 13:00:41 -0400 Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 531C31731; Tue, 20 Jun 2023 10:00:37 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1687280437; x=1718816437; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=k5j1Yn2V9lz4sCs0dVmB92SlhzxFJ8oMSsBzj0DJzm4=; b=VaWO+QTW18dgDO76kjXjksOIKC4Co+83BDHy7w5apZdfPuXcCeCqxia1 iY/lTVMUOfRaa7NitgeutHMDoozB3CH9hzuY8azklJikymCOao4g9OmOt Lh3uWcVn2Wvn5/zIY3e9JbBvAHXyyEHX3Etr1QNi2j5hK2zc8t/ukh5Sa ZRXSPPA8qiXFucyY1WvfMp/kkHoSqpmZRmKpa5qrxmI24A2aOlZy1Bq7l cmAQN+CKQBTiwBBksXKM3W7lzvc00Aa5g7/OJc4fpCiRuoJ1aOpOoZAk/ qQPTc4qQ+UgU/f3JZZiPC8rZMfKZqezFn252UEAglysXSALEge+2n7b86 A==; X-IronPort-AV: E=McAfee;i="6600,9927,10747"; a="359929632" X-IronPort-AV: E=Sophos;i="6.00,257,1681196400"; d="scan'208";a="359929632" Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 20 Jun 2023 10:00:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10747"; a="888314364" X-IronPort-AV: E=Sophos;i="6.00,257,1681196400"; d="scan'208";a="888314364" Received: from a0cec87da3f2.jf.intel.com (HELO worker-node-1.jf.intel.com) ([10.165.55.163]) by orsmga005.jf.intel.com with ESMTP; 20 Jun 2023 10:00:34 -0700 From: Weilin Wang To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Jiri Olsa , Namhyung Kim , Adrian Hunter , Ian Rogers , linux-perf-users@vger.kernel.org, linux-kernel@vger.kernel.org Cc: Weilin Wang , Kan Liang , Samantha Alt , Perry Taylor , Caleb Biggers , ravi.bangoria@amd.com Subject: [PATCH v5 2/3] perf test: Add skip list for metrics known would fail Date: Tue, 20 Jun 2023 10:00:26 -0700 Message-Id: <20230620170027.1861012-3-weilin.wang@intel.com> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230620170027.1861012-1-weilin.wang@intel.com> References: <20230620170027.1861012-1-weilin.wang@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Add skip list for metrics known would fail because some of the metrics are very likely to fail due to multiplexing or other errors. So add all of the flaky tests into the skip list. Signed-off-by: Weilin Wang Tested-by: Namhyung Kim --- .../tests/shell/lib/perf_metric_validation.py | 31 ++++++++++++++++--- .../lib/perf_metric_validation_rules.json | 11 +++++++ 2 files changed, 38 insertions(+), 4 deletions(-) diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/p= erf/tests/shell/lib/perf_metric_validation.py index 81bd2bf38b67..3c3a9b4f8b82 100644 --- a/tools/perf/tests/shell/lib/perf_metric_validation.py +++ b/tools/perf/tests/shell/lib/perf_metric_validation.py @@ -12,7 +12,7 @@ class Validator: self.reportfname =3D reportfname self.rules =3D None self.collectlist=3Dmetrics - self.metrics =3D set() + self.metrics =3D set(metrics) self.tolerance =3D t =20 self.workloads =3D [x for x in workload.split(",") if x] @@ -148,6 +148,7 @@ class Validator: self.errlist.append("Metric '%s' is not collected"%(name)) elif val < 0: negmetric.add("{0}(=3D{1:.4f})".format(name, val)) + self.collectlist[0].append(name) else: pcnt +=3D 1 tcnt +=3D 1 @@ -266,6 +267,7 @@ class Validator: passcnt +=3D 1 else: faillist.append({'MetricName':m['Name'], 'CollectedValue':= result}) + self.collectlist[0].append(m['Name']) =20 self.totalcnt +=3D totalcnt self.passedcnt +=3D passcnt @@ -348,7 +350,7 @@ class Validator: if rule["TestType"] =3D=3D "RelationshipTest": metrics =3D [m["Name"] for m in rule["Metrics"]] if not any(m not in collectlist[0] for m in metrics): - collectlist[rule["RuleIndex"]] =3D set(metrics) + collectlist[rule["RuleIndex"]] =3D [",".join(list(set(= metrics)))] =20 for idx, metrics in collectlist.items(): if idx =3D=3D 0: wl =3D "sleep 0.5".split() @@ -356,9 +358,12 @@ class Validator: for metric in metrics: command =3D [tool, 'stat', '-j', '-M', f"{metric}", "-a"] command.extend(wl) + print(" ".join(command)) cmd =3D subprocess.run(command, stderr=3Dsubprocess.PIPE, = encoding=3D'utf-8') data =3D [x+'}' for x in cmd.stderr.split('}\n') if x] self.convert(data, idx) + self.collectlist =3D dict() + self.collectlist[0] =3D list() # End of Collector and Converter =20 # Start of Rule Generator @@ -386,6 +391,20 @@ class Validator: =20 return =20 + def remove_unsupported_rules(self, rules, skiplist: set =3D None): + for m in skiplist: + self.metrics.discard(m) + new_rules =3D [] + for rule in rules: + add_rule =3D True + for m in rule["Metrics"]: + if m["Name"] not in self.metrics: + add_rule =3D False + break + if add_rule: + new_rules.append(rule) + return new_rules + def create_rules(self): """ Create full rules which includes: @@ -394,7 +413,10 @@ class Validator: =20 Reindex all the rules to avoid repeated RuleIndex """ - self.rules =3D self.read_json(self.rulefname)['RelationshipRules'] + data =3D self.read_json(self.rulefname) + rules =3D data['RelationshipRules'] + skiplist =3D set(data['SkipList']) + self.rules =3D self.remove_unsupported_rules(rules, skiplist) pctgrule =3D {'RuleIndex':0, 'TestType':'SingleMetricTest', 'RangeLower':'0', @@ -453,7 +475,8 @@ class Validator: =20 The final report is written into a JSON file. ''' - self.parse_perf_metrics() + if not self.collectlist: + self.parse_perf_metrics() self.create_rules() for i in range(0, len(self.workloads)): self._init_data() diff --git a/tools/perf/tests/shell/lib/perf_metric_validation_rules.json b= /tools/perf/tests/shell/lib/perf_metric_validation_rules.json index debaa910da9f..eb6f59e018b7 100644 --- a/tools/perf/tests/shell/lib/perf_metric_validation_rules.json +++ b/tools/perf/tests/shell/lib/perf_metric_validation_rules.json @@ -1,4 +1,15 @@ { + "SkipList": [ + "tsx_aborted_cycles", + "tsx_transactional_cycles", + "C2_Pkg_Residency", + "C6_Pkg_Residency", + "C1_Core_Residency", + "C6_Core_Residency", + "tma_false_sharing", + "tma_remote_cache", + "tma_contested_accesses" + ], "RelationshipRules": [ { "RuleIndex": 1, --=20 2.39.1 From nobody Sat Feb 7 11:05:23 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A24A5EB64D8 for ; Tue, 20 Jun 2023 17:00:55 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231607AbjFTRAx (ORCPT ); Tue, 20 Jun 2023 13:00:53 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45332 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230032AbjFTRAm (ORCPT ); Tue, 20 Jun 2023 13:00:42 -0400 Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id EFA501729; Tue, 20 Jun 2023 10:00:37 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1687280437; x=1718816437; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=lx6VvDMUnfTWmZ+SOzq2AS5ZcPMg85xEXw9RWTbNa68=; b=nkI+akmsZeCCvR3r7lgK6t2TxqVMmh35ywL9zTJS7uRAaq2MrpQYaL13 MqIgAhxdc/7MAElZTGGph1MsBSwbmakdkZbB4ouFg0LhZZNVHYizd0N0X ftaSYPE1NZY9RCKkrx2IHWV20o9n+J97wgd19VeC7y+T6MpJ6fZnvuQWq RP513vG4IxdhH7IHoPASHeDrsA2SEwFUpvrMKOGnPxwiqwS3ep0+XO3/d 7XFbsG1YNZuE17ptbRJt5FwQ2bOz/DWxIj9XX8lQT1RERzwWGecVLG24N jsWDUydVtzinKCk+Uac8Qpo6qMatWokfPy99Qqe/+dC5Vg8San3uRu0EN w==; X-IronPort-AV: E=McAfee;i="6600,9927,10747"; a="359929653" X-IronPort-AV: E=Sophos;i="6.00,257,1681196400"; d="scan'208";a="359929653" Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 20 Jun 2023 10:00:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10747"; a="888314370" X-IronPort-AV: E=Sophos;i="6.00,257,1681196400"; d="scan'208";a="888314370" Received: from a0cec87da3f2.jf.intel.com (HELO worker-node-1.jf.intel.com) ([10.165.55.163]) by orsmga005.jf.intel.com with ESMTP; 20 Jun 2023 10:00:35 -0700 From: Weilin Wang To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Jiri Olsa , Namhyung Kim , Adrian Hunter , Ian Rogers , linux-perf-users@vger.kernel.org, linux-kernel@vger.kernel.org Cc: Weilin Wang , Kan Liang , Samantha Alt , Perry Taylor , Caleb Biggers , ravi.bangoria@amd.com Subject: [PATCH v5 3/3] perf test: Rerun failed metrics with longer workload Date: Tue, 20 Jun 2023 10:00:27 -0700 Message-Id: <20230620170027.1861012-4-weilin.wang@intel.com> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230620170027.1861012-1-weilin.wang@intel.com> References: <20230620170027.1861012-1-weilin.wang@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Rerun failed metrics with longer workload to avoid false failure because sometimes metric value test fails when running in very short amount of time. Skip rerun if equal to or more than 20 metrics fail. Signed-off-by: Weilin Wang Tested-by: Namhyung Kim --- .../tests/shell/lib/perf_metric_validation.py | 129 +++++++++++------- 1 file changed, 83 insertions(+), 46 deletions(-) diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/p= erf/tests/shell/lib/perf_metric_validation.py index 3c3a9b4f8b82..50a34a9cc040 100644 --- a/tools/perf/tests/shell/lib/perf_metric_validation.py +++ b/tools/perf/tests/shell/lib/perf_metric_validation.py @@ -11,8 +11,9 @@ class Validator: self.rulefname =3D rulefname self.reportfname =3D reportfname self.rules =3D None - self.collectlist=3Dmetrics - self.metrics =3D set(metrics) + self.collectlist:str =3D metrics + self.metrics =3D self.__set_metrics(metrics) + self.skiplist =3D set() self.tolerance =3D t =20 self.workloads =3D [x for x in workload.split(",") if x] @@ -41,6 +42,12 @@ class Validator: self.debug =3D debug self.fullrulefname =3D fullrulefname =20 + def __set_metrics(self, metrics=3D''): + if metrics !=3D '': + return set(metrics.split(",")) + else: + return set() + def read_json(self, filename: str) -> dict: try: with open(Path(filename).resolve(), "r") as f: @@ -113,7 +120,7 @@ class Validator: All future test(s) on this metric will fail. =20 @param name: name of the metric - @returns: list with value found in self.results; list is empty whe= n not value found. + @returns: list with value found in self.results; list is empty whe= n value is not found. """ results =3D [] data =3D self.results[ridx] if ridx in self.results else self.resu= lts[0] @@ -123,7 +130,6 @@ class Validator: elif name.replace('.', '1').isdigit(): results.append(float(name)) else: - self.errlist.append("Metric '%s' is not collected or the v= alue format is incorrect"%(name)) self.ignoremetrics.add(name) return results =20 @@ -138,27 +144,32 @@ class Validator: Failure: when metric value is negative or not provided. Metrics with negative value will be added into the self.failtests[= 'PositiveValueTest'] and self.ignoremetrics. """ - negmetric =3D set() - missmetric =3D set() + negmetric =3D dict() pcnt =3D 0 tcnt =3D 0 + rerun =3D list() for name, val in self.get_results().items(): - if val is None or val =3D=3D '': - missmetric.add(name) - self.errlist.append("Metric '%s' is not collected"%(name)) - elif val < 0: - negmetric.add("{0}(=3D{1:.4f})".format(name, val)) - self.collectlist[0].append(name) + if val < 0: + negmetric[name] =3D val + rerun.append(name) else: pcnt +=3D 1 tcnt +=3D 1 + if len(rerun) > 0 and len(rerun) < 20: + second_results =3D dict() + self.second_test(rerun, second_results) + for name, val in second_results.items(): + if name not in negmetric: continue + if val >=3D 0: + del negmetric[name] + pcnt +=3D 1 =20 self.failtests['PositiveValueTest']['Total Tests'] =3D tcnt self.failtests['PositiveValueTest']['Passed Tests'] =3D pcnt - if len(negmetric) or len(missmetric)> 0: - self.ignoremetrics.update(negmetric) - self.ignoremetrics.update(missmetric) - self.failtests['PositiveValueTest']['Failed Tests'].append({'N= egativeValue':list(negmetric), 'MissingValue':list(missmetric)}) + if len(negmetric.keys()): + self.ignoremetrics.update(negmetric.keys()) + negmessage =3D ["{0}(=3D{1:.4f})".format(name, val) for name, = val in negmetric.items()] + self.failtests['PositiveValueTest']['Failed Tests'].append({'N= egativeValue': negmessage}) =20 return =20 @@ -259,21 +270,36 @@ class Validator: metrics =3D rule['Metrics'] passcnt =3D 0 totalcnt =3D 0 - faillist =3D [] + faillist =3D list() + failures =3D dict() + rerun =3D list() for m in metrics: totalcnt +=3D 1 result =3D self.get_value(m['Name']) - if len(result) > 0 and self.check_bound(result[0], lbv, ubv, t= ): + if len(result) > 0 and self.check_bound(result[0], lbv, ubv, t= ) or m['Name'] in self.skiplist: passcnt +=3D 1 else: - faillist.append({'MetricName':m['Name'], 'CollectedValue':= result}) - self.collectlist[0].append(m['Name']) + failures[m['Name']] =3D result + rerun.append(m['Name']) + + if len(rerun) > 0 and len(rerun) < 20: + second_results =3D dict() + self.second_test(rerun, second_results) + for name, val in second_results.items(): + if name not in failures: continue + if self.check_bound(val, lbv, ubv, t): + passcnt +=3D 1 + del failures[name] + else: + failures[name] =3D val + self.results[0][name] =3D val =20 self.totalcnt +=3D totalcnt self.passedcnt +=3D passcnt self.failtests['SingleMetricTest']['Total Tests'] +=3D totalcnt self.failtests['SingleMetricTest']['Passed Tests'] +=3D passcnt - if len(faillist) !=3D 0: + if len(failures.keys()) !=3D 0: + faillist =3D [{'MetricName':name, 'CollectedValue':val} for na= me, val in failures.items()] self.failtests['SingleMetricTest']['Failed Tests'].append({'Ru= leIndex':rule['RuleIndex'], 'Ra= ngeLower': rule['RangeLower'], 'Ra= ngeUpper': rule['RangeUpper'], @@ -316,7 +342,7 @@ class Validator: return True =20 # Start of Collector and Converter - def convert(self, data: list, idx: int): + def convert(self, data: list, metricvalues:dict): """ Convert collected metric data from the -j output to dict of {metri= c_name:value}. """ @@ -326,20 +352,29 @@ class Validator: if "metric-unit" in result and result["metric-unit"] !=3D = "(null)" and result["metric-unit"] !=3D "": name =3D result["metric-unit"].split(" ")[1] if len(r= esult["metric-unit"].split(" ")) > 1 \ else result["metric-unit"] - if idx not in self.results: self.results[idx] =3D dict= () - self.results[idx][name.lower()] =3D float(result["metr= ic-value"]) + metricvalues[name.lower()] =3D float(result["metric-va= lue"]) except ValueError as error: continue return =20 - def collect_perf(self, data_file: str, workload: str): + def _run_perf(self, metric, workload: str): + tool =3D 'perf' + command =3D [tool, 'stat', '-j', '-M', f"{metric}", "-a"] + wl =3D workload.split() + command.extend(wl) + print(" ".join(command)) + cmd =3D subprocess.run(command, stderr=3Dsubprocess.PIPE, encoding= =3D'utf-8') + data =3D [x+'}' for x in cmd.stderr.split('}\n') if x] + return data + + + def collect_perf(self, workload: str): """ Collect metric data with "perf stat -M" on given workload with -a = and -j. """ self.results =3D dict() - tool =3D 'perf' print(f"Starting perf collection") - print(f"Workload: {workload}") + print(f"Long workload: {workload}") collectlist =3D dict() if self.collectlist !=3D "": collectlist[0] =3D {x for x in self.collectlist.split(",")} @@ -353,17 +388,20 @@ class Validator: collectlist[rule["RuleIndex"]] =3D [",".join(list(set(= metrics)))] =20 for idx, metrics in collectlist.items(): - if idx =3D=3D 0: wl =3D "sleep 0.5".split() - else: wl =3D workload.split() + if idx =3D=3D 0: wl =3D "true" + else: wl =3D workload for metric in metrics: - command =3D [tool, 'stat', '-j', '-M', f"{metric}", "-a"] - command.extend(wl) - print(" ".join(command)) - cmd =3D subprocess.run(command, stderr=3Dsubprocess.PIPE, = encoding=3D'utf-8') - data =3D [x+'}' for x in cmd.stderr.split('}\n') if x] - self.convert(data, idx) - self.collectlist =3D dict() - self.collectlist[0] =3D list() + data =3D self._run_perf(metric, wl) + if idx not in self.results: self.results[idx] =3D dict() + self.convert(data, self.results[idx]) + return + + def second_test(self, collectlist, second_results): + workload =3D self.workloads[self.wlidx] + for metric in collectlist: + data =3D self._run_perf(metric, workload) + self.convert(data, second_results) + # End of Collector and Converter =20 # Start of Rule Generator @@ -381,7 +419,7 @@ class Validator: if 'MetricName' not in m: print("Warning: no metric name") continue - name =3D m['MetricName'] + name =3D m['MetricName'].lower() self.metrics.add(name) if 'ScaleUnit' in m and (m['ScaleUnit'] =3D=3D '1%' or m['= ScaleUnit'] =3D=3D '100%'): self.pctgmetrics.add(name.lower()) @@ -391,14 +429,12 @@ class Validator: =20 return =20 - def remove_unsupported_rules(self, rules, skiplist: set =3D None): - for m in skiplist: - self.metrics.discard(m) + def remove_unsupported_rules(self, rules): new_rules =3D [] for rule in rules: add_rule =3D True for m in rule["Metrics"]: - if m["Name"] not in self.metrics: + if m["Name"] in self.skiplist or m["Name"] not in self.met= rics: add_rule =3D False break if add_rule: @@ -415,15 +451,15 @@ class Validator: """ data =3D self.read_json(self.rulefname) rules =3D data['RelationshipRules'] - skiplist =3D set(data['SkipList']) - self.rules =3D self.remove_unsupported_rules(rules, skiplist) + self.skiplist =3D set([name.lower() for name in data['SkipList']]) + self.rules =3D self.remove_unsupported_rules(rules) pctgrule =3D {'RuleIndex':0, 'TestType':'SingleMetricTest', 'RangeLower':'0', 'RangeUpper': '100', 'ErrorThreshold': self.tolerance, 'Description':'Metrics in percent unit have value with= in [0, 100]', - 'Metrics': [{'Name': m} for m in self.pctgmetrics]} + 'Metrics': [{'Name': m.lower()} for m in self.pctgmetr= ics]} self.rules.append(pctgrule) =20 # Re-index all rules to avoid repeated RuleIndex @@ -479,8 +515,9 @@ class Validator: self.parse_perf_metrics() self.create_rules() for i in range(0, len(self.workloads)): + self.wlidx =3D i self._init_data() - self.collect_perf(self.datafname, self.workloads[i]) + self.collect_perf(self.workloads[i]) # Run positive value test self.pos_val_test() for r in self.rules: --=20 2.39.1