Source code for tests.analyze_results

#!/usr/bin/python3

import json
import collections
import re
import argparse

from ssg_test_suite import common


[docs] class Difference(object): def __init__(self, what_failed, why_failed, what_not_failed): self.what_failed = what_failed self.why_failed = why_failed self.what_not_failed = what_not_failed def __str__(self): ret = ("failed {failed_stage}\n\t\tfailed on {failed_config}\n\t\tpassed on {good_config}" .format( failed_stage=self.why_failed, failed_config=self.what_failed, good_config=self.what_not_failed)) return ret
[docs] def aggregate_results_by_scenarios(rule_results): aggregated = collections.defaultdict(list) for result in rule_results: aggregated[result.scenario].append(result) return aggregated
[docs] def analyze_differences(rules): rules = sorted(rules) # For the time being, support only comparison of two results - # compare the best one and the worst one return analyze_pair(rules[0], rules[-1])
[docs] def analyze_pair(best, other): if best == other: return None if other.passed_stages_count < common.Stage.PREPARATION: failure_string = "preparation" elif other.passed_stages_count < common.Stage.INITIAL_SCAN: failure_string = "initial scan" elif other.passed_stages_count < common.Stage.REMEDIATION: failure_string = "remediation" else: failure_string = "final scan" good_conditions, bad_conditions = best.relative_conditions_to(other) return Difference(bad_conditions, failure_string, good_conditions)
[docs] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("json_results", nargs="+") return parser.parse_args()
[docs] def main(): args = parse_args() print_result_differences(args.json_results)
if __name__ == "__main__": main()