tests package

tests.analyze_results module

class tests.analyze_results.Difference(what_failed, why_failed, what_not_failed)[source]

Bases: object

tests.analyze_results.aggregate_results_by_scenarios(rule_results)[source]
tests.analyze_results.analyze_differences(rules)[source]
tests.analyze_results.analyze_pair(best, other)[source]
tests.analyze_results.main()[source]
tests.analyze_results.parse_args()[source]
tests.analyze_results.print_result_differences(json_results)[source]

tests.ansible_playbooks_generated_for_all_rules module

tests.ansible_playbooks_generated_for_all_rules.compare_ds_with_playbooks_dir(ds_path, playbooks_dir_path)[source]
tests.ansible_playbooks_generated_for_all_rules.compare_lists(rules_in_ds_with_ansible_fix, playbooks_in_dir)[source]
tests.ansible_playbooks_generated_for_all_rules.main()[source]

tests.assert_ansible_schema module

tests.assert_ansible_schema.make_parser()[source]
tests.assert_ansible_schema.validate_comments(fname, args)[source]
tests.assert_ansible_schema.validate_input(fname, args)[source]
tests.assert_ansible_schema.validate_playbook(playbook, args)[source]
tests.assert_ansible_schema.validate_yaml(fname, args)[source]

tests.ensure_paths_are_short module

tests.ensure_paths_are_short.main()[source]

tests.install_vm module

tests.install_vm.err(rc, msg)[source]
tests.install_vm.get_virt_install_command(data)[source]
tests.install_vm.give_info(data)[source]
tests.install_vm.handle_disk(data)[source]
tests.install_vm.handle_kickstart(data)[source]
tests.install_vm.handle_rest(data)[source]
tests.install_vm.handle_ssh_pubkey(data)[source]
tests.install_vm.handle_url(data)[source]
tests.install_vm.join_extented_opt(opt_name, delim, opts)[source]
tests.install_vm.main()[source]
tests.install_vm.parse_args()[source]
tests.install_vm.path_from_tests(path)[source]
tests.install_vm.run_virt_install(data, command)[source]
tests.install_vm.wait_vm_not_running(domain)[source]

tests.missing_cces module

tests.missing_cces.check_all_rules(root, filter_profiles)[source]
tests.missing_cces.get_selected_rules(benchmark, filter_profiles)[source]
tests.missing_cces.get_selections_by_profile(benchmark, filter_profiles)[source]
tests.missing_cces.good_profile(profile_id, filter_profiles)[source]
tests.missing_cces.match_profile(needle, haystack)[source]

tests.oval_tester module

class tests.oval_tester.OVALTester(verbose)[source]

Bases: object

finish()[source]

Exit test with an appropriate return code.

test(description, oval_content, config_file_content, expected_result)[source]

Execute a test. description: a very short description to be displayed in test output oval_content: content of the OVAL shorthand file written in a way you

write OVALs in SSG rules (not a valid OVAL)

config_file_content: content of the text configuration file that the

OVAL will check

expected_result: expected result of evaluation of the OVAL definition

tests.run_scapval module

tests.run_scapval.main()[source]
tests.run_scapval.parse_args()[source]
tests.run_scapval.print_requirement_feedback(req_id, message)[source]
tests.run_scapval.process_results(result_path)[source]
tests.run_scapval.test_datastream(datastream_path, scapval_path, scap_version)[source]

tests.stable_profile_ids module

tests.stable_profile_ids.check_build_dir(build_dir)[source]
tests.stable_profile_ids.gather_profiles_from_datastream(path, build_dir, profiles_per_benchmark)[source]
tests.stable_profile_ids.main()[source]
tests.stable_profile_ids.parse_args()[source]
tests.stable_profile_ids.respective_datastream_absent(bench_id, build_dir)[source]

tests.test_machine_only_rules module

tests.test_machine_only_rules.check_if_machine_only(dirpath, name, is_machine_only_group)[source]
tests.test_machine_only_rules.check_product(ds_path, rules_dirs)[source]
tests.test_machine_only_rules.get_element_fix_text_by_system(element)[source]
tests.test_machine_only_rules.get_only_elements_to_check_from_benchmark(benchmark, element_query, short_ids_to_check)[source]
tests.test_machine_only_rules.machine_platform_missing_in_rules(ds_path, short_ids_to_check)[source]
tests.test_machine_only_rules.main()[source]
tests.test_machine_only_rules.parse_command_line_args()[source]
tests.test_machine_only_rules.scan_rules_group(dir_path, parent_machine_only, groups, rules)[source]
tests.test_machine_only_rules.scan_rules_groups(dir_paths, parent_machine_only)[source]
tests.test_machine_only_rules.shorten_id(full_id)[source]

tests.test_macros_oval module

tests.test_macros_oval.main()[source]

tests.test_profile_stability module

tests.test_profile_stability.compare_sets(reference, sample)[source]
tests.test_profile_stability.corresponding_product_built(build_dir, reference_fname)[source]
tests.test_profile_stability.describe_change(difference, name)[source]
tests.test_profile_stability.get_matching_compiled_profile_filename(build_dir, reference_fname)[source]
tests.test_profile_stability.get_profile_name_from_reference_filename(fname)[source]
tests.test_profile_stability.get_reference_vs_built_difference(reference_fname, built_fname)[source]
tests.test_profile_stability.get_references_filenames(ref_root)[source]
tests.test_profile_stability.get_selections_key_from_yaml(yaml_fname)[source]
tests.test_profile_stability.inform_and_append_fix_based_on_reference_compiled_profile(ref, build_root, fix_commands)[source]
tests.test_profile_stability.main()[source]

tests.test_suite module

Module contents