smart-tests-cli 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. smart_tests/__init__.py +0 -0
  2. smart_tests/__main__.py +60 -0
  3. smart_tests/app.py +67 -0
  4. smart_tests/args4p/README.md +102 -0
  5. smart_tests/args4p/__init__.py +13 -0
  6. smart_tests/args4p/argument.py +45 -0
  7. smart_tests/args4p/command.py +593 -0
  8. smart_tests/args4p/converters/__init__.py +75 -0
  9. smart_tests/args4p/decorators.py +98 -0
  10. smart_tests/args4p/exceptions.py +12 -0
  11. smart_tests/args4p/option.py +85 -0
  12. smart_tests/args4p/parameter.py +84 -0
  13. smart_tests/args4p/typer/__init__.py +42 -0
  14. smart_tests/commands/__init__.py +0 -0
  15. smart_tests/commands/compare/__init__.py +11 -0
  16. smart_tests/commands/compare/subsets.py +58 -0
  17. smart_tests/commands/detect_flakes.py +105 -0
  18. smart_tests/commands/inspect/__init__.py +13 -0
  19. smart_tests/commands/inspect/model.py +52 -0
  20. smart_tests/commands/inspect/subset.py +138 -0
  21. smart_tests/commands/record/__init__.py +19 -0
  22. smart_tests/commands/record/attachment.py +38 -0
  23. smart_tests/commands/record/build.py +356 -0
  24. smart_tests/commands/record/case_event.py +190 -0
  25. smart_tests/commands/record/commit.py +157 -0
  26. smart_tests/commands/record/session.py +120 -0
  27. smart_tests/commands/record/tests.py +498 -0
  28. smart_tests/commands/stats/__init__.py +11 -0
  29. smart_tests/commands/stats/test_sessions.py +45 -0
  30. smart_tests/commands/subset.py +567 -0
  31. smart_tests/commands/test_path_writer.py +51 -0
  32. smart_tests/commands/verify.py +153 -0
  33. smart_tests/jar/exe_deploy.jar +0 -0
  34. smart_tests/plugins/__init__.py +0 -0
  35. smart_tests/test_runners/__init__.py +0 -0
  36. smart_tests/test_runners/adb.py +24 -0
  37. smart_tests/test_runners/ant.py +35 -0
  38. smart_tests/test_runners/bazel.py +103 -0
  39. smart_tests/test_runners/behave.py +62 -0
  40. smart_tests/test_runners/codeceptjs.py +33 -0
  41. smart_tests/test_runners/ctest.py +164 -0
  42. smart_tests/test_runners/cts.py +189 -0
  43. smart_tests/test_runners/cucumber.py +451 -0
  44. smart_tests/test_runners/cypress.py +46 -0
  45. smart_tests/test_runners/dotnet.py +106 -0
  46. smart_tests/test_runners/file.py +20 -0
  47. smart_tests/test_runners/flutter.py +251 -0
  48. smart_tests/test_runners/go_test.py +99 -0
  49. smart_tests/test_runners/googletest.py +34 -0
  50. smart_tests/test_runners/gradle.py +96 -0
  51. smart_tests/test_runners/jest.py +52 -0
  52. smart_tests/test_runners/maven.py +149 -0
  53. smart_tests/test_runners/minitest.py +40 -0
  54. smart_tests/test_runners/nunit.py +190 -0
  55. smart_tests/test_runners/playwright.py +252 -0
  56. smart_tests/test_runners/prove.py +74 -0
  57. smart_tests/test_runners/pytest.py +358 -0
  58. smart_tests/test_runners/raw.py +238 -0
  59. smart_tests/test_runners/robot.py +125 -0
  60. smart_tests/test_runners/rspec.py +5 -0
  61. smart_tests/test_runners/smart_tests.py +235 -0
  62. smart_tests/test_runners/vitest.py +49 -0
  63. smart_tests/test_runners/xctest.py +79 -0
  64. smart_tests/testpath.py +154 -0
  65. smart_tests/utils/__init__.py +0 -0
  66. smart_tests/utils/authentication.py +78 -0
  67. smart_tests/utils/ci_provider.py +7 -0
  68. smart_tests/utils/commands.py +14 -0
  69. smart_tests/utils/commit_ingester.py +59 -0
  70. smart_tests/utils/common_tz.py +12 -0
  71. smart_tests/utils/edit_distance.py +11 -0
  72. smart_tests/utils/env_keys.py +19 -0
  73. smart_tests/utils/exceptions.py +34 -0
  74. smart_tests/utils/fail_fast_mode.py +99 -0
  75. smart_tests/utils/file_name_pattern.py +4 -0
  76. smart_tests/utils/git_log_parser.py +53 -0
  77. smart_tests/utils/glob.py +44 -0
  78. smart_tests/utils/gzipgen.py +46 -0
  79. smart_tests/utils/http_client.py +169 -0
  80. smart_tests/utils/java.py +61 -0
  81. smart_tests/utils/link.py +149 -0
  82. smart_tests/utils/logger.py +53 -0
  83. smart_tests/utils/no_build.py +2 -0
  84. smart_tests/utils/sax.py +119 -0
  85. smart_tests/utils/session.py +73 -0
  86. smart_tests/utils/smart_tests_client.py +134 -0
  87. smart_tests/utils/subprocess.py +12 -0
  88. smart_tests/utils/tracking.py +95 -0
  89. smart_tests/utils/typer_types.py +241 -0
  90. smart_tests/version.py +7 -0
  91. smart_tests_cli-2.0.0.dist-info/METADATA +168 -0
  92. smart_tests_cli-2.0.0.dist-info/RECORD +96 -0
  93. smart_tests_cli-2.0.0.dist-info/WHEEL +5 -0
  94. smart_tests_cli-2.0.0.dist-info/entry_points.txt +2 -0
  95. smart_tests_cli-2.0.0.dist-info/licenses/LICENSE.txt +202 -0
  96. smart_tests_cli-2.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,153 @@
1
+ import os
2
+ import platform
3
+ import re
4
+ import subprocess
5
+ from typing import List
6
+
7
+ import click
8
+
9
+ import smart_tests.args4p.typer as typer
10
+ from smart_tests.utils.commands import Command
11
+ from smart_tests.utils.env_keys import TOKEN_KEY
12
+ from smart_tests.utils.tracking import Tracking, TrackingClient
13
+
14
+ from .. import args4p
15
+ from ..app import Application
16
+ from ..utils.authentication import get_org_workspace
17
+ from ..utils.java import get_java_command
18
+ from ..utils.smart_tests_client import SmartTestsClient
19
+ from ..utils.typer_types import emoji
20
+ from ..version import __version__ as version
21
+
22
+
23
+ def compare_version(a: List[int], b: List[int]):
24
+ """Compare two version numbers represented as int arrays"""
25
+
26
+ def pick(a, i):
27
+ return a[i] if i < len(a) else 0
28
+
29
+ for i in range(max(len(a), len(b))):
30
+ d = pick(a, i) - pick(b, i)
31
+ if d != 0:
32
+ return d # if they are different, we have the result
33
+ return 0 # identical
34
+
35
+
36
+ def compare_java_version(output: str) -> int:
37
+ """Check if the Java version meets what we need. returns >=0 if we meet the requirement"""
38
+ pattern = re.compile('"([^"]+)"')
39
+ for line in output.splitlines():
40
+ if line.find("java version") != -1:
41
+ # line is like: java version "1.8.0_144"
42
+ m = pattern.search(line)
43
+ if m:
44
+ tokens = m.group(1).split(".")
45
+ if len(tokens) >= 2:
46
+ versions = [int(x) for x in tokens[0:2]]
47
+ required = [1, 8]
48
+ return compare_version(versions, required)
49
+ # couldn't determine, so err on the safe side
50
+ return 0
51
+
52
+
53
+ def check_java_version(javacmd: str) -> int:
54
+ """Check if the Java version meets what we need. returns >=0 if we meet the requirement"""
55
+ try:
56
+ v = subprocess.run([javacmd, "-version"], check=True, stderr=subprocess.PIPE, universal_newlines=True)
57
+ return compare_java_version(v.stderr)
58
+ except subprocess.CalledProcessError:
59
+ return -1
60
+
61
+
62
+ @args4p.command(help="Verify CLI setup and connectivity")
63
+ def verify(app_instance: Application):
64
+ # Run the verification (no subcommands in this app)
65
+ # In this command, regardless of REPORT_ERROR_KEY, always report an unexpected error with full stack trace
66
+ # to assist troubleshooting. `typer.BadParameter` is handled by the invoking
67
+ # Click gracefully.
68
+
69
+ org, workspace = get_org_workspace()
70
+ tracking_client = TrackingClient(Command.VERIFY, app=app_instance)
71
+ client = SmartTestsClient(tracking_client=tracking_client, app=app_instance)
72
+ java = get_java_command()
73
+
74
+ # Print the system information first so that we can get them even if there's
75
+ # an issue.
76
+
77
+ click.echo("Organization: " + repr(org))
78
+ click.echo("Workspace: " + repr(workspace))
79
+ click.echo("Proxy: " + repr(os.getenv("HTTPS_PROXY")))
80
+ click.echo("Platform: " + repr(platform.platform()))
81
+ click.echo("Python version: " + repr(platform.python_version()))
82
+ click.echo("Java command: " + repr(java))
83
+ click.echo("smart-tests version: " + repr(version))
84
+
85
+ if org is None or workspace is None:
86
+ msg = (
87
+ "Could not identify Smart Tests organization/workspace. "
88
+ "Please confirm if you set SMART_TESTS_TOKEN or SMART_TESTS_ORGANIZATION and SMART_TESTS_WORKSPACE "
89
+ "environment variables"
90
+ )
91
+ tracking_client.send_error_event(
92
+ event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR,
93
+ stack_trace=msg
94
+ )
95
+ click.secho(msg, fg='red', err=True)
96
+ raise typer.Exit(1)
97
+
98
+ try:
99
+ res = client.request("get", "verification")
100
+ if res.status_code == 401:
101
+ if os.getenv(TOKEN_KEY):
102
+ msg = ("Authentication failed. Most likely the value for the SMART_TESTS_TOKEN "
103
+ "environment variable is invalid.")
104
+ else:
105
+ msg = ("Authentication failed. Please set the SMART_TESTS_TOKEN. "
106
+ "If you intend to use tokenless authentication, "
107
+ "kindly reach out to our support team for further assistance.")
108
+ click.secho(msg, fg='red', err=True)
109
+ tracking_client.send_error_event(
110
+ event_name=Tracking.ErrorEvent.USER_ERROR,
111
+ stack_trace=msg,
112
+ )
113
+ raise typer.Exit(2)
114
+ res.raise_for_status()
115
+ except Exception as e:
116
+ tracking_client.send_error_event(
117
+ event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR,
118
+ stack_trace=str(e),
119
+ api="verification",
120
+ )
121
+ client.print_exception_and_recover(e)
122
+
123
+ if java is None:
124
+ msg = "Java is not installed. Install Java version 8 or newer to use the Smart Tests CLI."
125
+ tracking_client.send_error_event(
126
+ event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR,
127
+ stack_trace=msg
128
+ )
129
+ click.secho(msg, fg='red', err=True)
130
+ raise typer.Exit(1)
131
+
132
+ # Level 2 check: versions. This is more fragile than just reporting the number, so we move
133
+ # this out here
134
+
135
+ if compare_version([int(x) for x in platform.python_version().split('.')], [3, 6]) < 0:
136
+ msg = "Python 3.6 or later is required"
137
+ tracking_client.send_error_event(
138
+ event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR,
139
+ stack_trace=msg
140
+ )
141
+ click.secho(msg, fg='red', err=True)
142
+ raise typer.Exit(1)
143
+
144
+ if check_java_version(java) < 0:
145
+ msg = "Java 8 or later is required"
146
+ tracking_client.send_error_event(
147
+ event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR,
148
+ stack_trace=msg
149
+ )
150
+ click.secho(msg, fg='red', err=True)
151
+ raise typer.Exit(1)
152
+
153
+ click.secho("Your CLI configuration is successfully verified" + emoji(" \U0001f389"), fg='green')
Binary file
File without changes
File without changes
@@ -0,0 +1,24 @@
1
+ import re
2
+
3
+ from ..commands.subset import Subset
4
+ from . import smart_tests
5
+
6
+
7
+ @smart_tests.subset
8
+ def subset(client: Subset):
9
+ prev_cls_name = None
10
+ pattern = re.compile(r'^INSTRUMENTATION_STATUS: class=(.+)$')
11
+ for line in client.stdin():
12
+ match = pattern.match(line)
13
+ if match:
14
+ cls_name = match.group(1)
15
+ if prev_cls_name != cls_name:
16
+ client.test_path([{"type": "class", "name": cls_name}])
17
+ prev_cls_name = cls_name
18
+
19
+ client.separator = ','
20
+
21
+ client.run()
22
+
23
+
24
+ record_tests = smart_tests.CommonRecordTestImpls(__name__).report_files()
@@ -0,0 +1,35 @@
1
+ import os
2
+ from typing import Annotated, List
3
+
4
+ import smart_tests.args4p.typer as typer
5
+
6
+ from ..commands.subset import Subset
7
+ from ..utils.file_name_pattern import jvm_test_pattern
8
+ from . import smart_tests
9
+
10
+
11
+ @smart_tests.subset
12
+ def subset(
13
+ client: Subset,
14
+ source_roots: Annotated[List[str], typer.Argument(
15
+ multiple=True,
16
+ required=False,
17
+ help="Source directories to scan for test files"
18
+ )] = []
19
+ ):
20
+ def file2test(f: str):
21
+ if jvm_test_pattern.match(f):
22
+ f = f[:f.rindex('.')] # remove extension
23
+ # directory -> package name conversion
24
+ cls_name = f.replace(os.path.sep, '.')
25
+ return [{"type": "class", "name": cls_name}]
26
+ else:
27
+ return None
28
+
29
+ for root in source_roots:
30
+ client.scan(root.rstrip('/'), "**/*Test.java", file2test)
31
+
32
+ client.run()
33
+
34
+
35
+ record_tests = smart_tests.CommonRecordTestImpls(__name__).report_files()
@@ -0,0 +1,103 @@
1
+ import json
2
+ import os
3
+ import sys
4
+ from pathlib import Path
5
+ from typing import Annotated, Generator, List
6
+
7
+ from junitparser import TestCase, TestSuite # type: ignore
8
+
9
+ import smart_tests.args4p.typer as typer
10
+
11
+ from ..args4p.converters import path
12
+ from ..commands.subset import Subset
13
+ from ..testpath import TestPath
14
+ from ..utils.logger import Logger
15
+ from . import smart_tests
16
+
17
+
18
+ def make_test_path(pkg, target) -> TestPath:
19
+ return [{'type': 'package', 'name': pkg}, {'type': 'target', 'name': target}]
20
+
21
+
22
+ @smart_tests.subset
23
+ def subset(client: Subset):
24
+ # Read targets from stdin, which generally looks like //foo/bar:zot
25
+ for label in client.stdin():
26
+ # //foo/bar:zot -> //foo/bar & zot
27
+ if label.startswith('//'):
28
+ pkg, target = label.rstrip('\n').split(':')
29
+ # TODO: error checks and more robustness
30
+ client.test_path(make_test_path(pkg.lstrip('//'), target))
31
+
32
+ client.formatter = lambda x: x[0]['name'] + ":" + x[1]['name']
33
+ client.run()
34
+
35
+
36
+ smart_tests.CommonDetectFlakesImpls(__name__, formatter=lambda x: x[0]['name'] + ":" + x[1]['name']).detect_flakes()
37
+
38
+
39
+ @smart_tests.record.tests
40
+ def record_tests(
41
+ client,
42
+ workspace: Annotated[str, typer.Argument(help="Bazel workspace directory")],
43
+ build_event_json_files: Annotated[List[Path] | None, typer.Option(
44
+ "--build-event-json",
45
+ help="set file path generated by --build_event_json_file",
46
+ multiple=True,
47
+ type=path(exists=True)
48
+ )] = None,
49
+ ):
50
+ """
51
+ Takes Bazel workspace, then report all its test results
52
+ """
53
+ base = Path(workspace).joinpath('bazel-testlogs').resolve()
54
+ if not base.exists():
55
+ sys.exit("No such directory: %s" % str(base))
56
+
57
+ default_path_builder = client.path_builder
58
+
59
+ def f(case: TestCase, suite: TestSuite, report_file: str) -> TestPath:
60
+ # In Bazel, report path name contains package & target.
61
+ # for example, for //foo/bar:zot, the report file is at bazel-testlogs/foo/bar/zot/test.xml
62
+ # TODO: robustness
63
+ pkgNtarget = report_file[len(str(base)) + 1:-len("/test.xml")]
64
+
65
+ # last path component is the target, the rest is package
66
+ # TODO: does this work correctly when on Windows?
67
+ path = make_test_path(os.path.dirname(pkgNtarget), os.path.basename(pkgNtarget))
68
+
69
+ # let the normal path building kicks in
70
+ path.extend(default_path_builder(case, suite, report_file))
71
+ return path
72
+
73
+ client.path_builder = f
74
+ client.check_timestamp = False
75
+
76
+ if build_event_json_files:
77
+ for l in parse_build_event_json(build_event_json_files):
78
+ if l is None:
79
+ continue
80
+
81
+ client.report(str(Path(base).joinpath(l, 'test.xml')))
82
+ else:
83
+ client.scan(str(base), '**/test.xml')
84
+
85
+ client.run()
86
+
87
+
88
+ def parse_build_event_json(files: List[Path]) -> Generator:
89
+ for file in files:
90
+ with open(file) as f:
91
+ for line in f:
92
+ try:
93
+ d = json.loads(line)
94
+ except Exception:
95
+ Logger().error(f"Can not parse build event json {line}")
96
+ yield
97
+ if "id" in d:
98
+ if "testResult" in d["id"]:
99
+ if "label" in d["id"]["testResult"]:
100
+ label = d["id"]["testResult"]["label"]
101
+ # replace //foo/bar:zot to /foo/bar/zot
102
+ label = label.lstrip("/").replace(":", "/")
103
+ yield label
@@ -0,0 +1,62 @@
1
+ import os
2
+ from typing import Annotated, List, cast
3
+ from xml.etree import ElementTree as ET
4
+
5
+ import smart_tests.args4p.typer as typer
6
+
7
+ from ..commands.record.tests import RecordTests
8
+ from ..commands.subset import Subset
9
+ from . import smart_tests
10
+
11
+
12
+ @smart_tests.record.tests
13
+ def record_tests(
14
+ client: RecordTests,
15
+ reports: Annotated[List[str], typer.Argument(
16
+ multiple=True,
17
+ help="Test report files to process"
18
+ )],
19
+ ):
20
+ for r in reports:
21
+ client.report(r)
22
+
23
+ def parse_func(p: str) -> ET.ElementTree:
24
+ tree = cast(ET.ElementTree, ET.parse(p))
25
+ for suite in tree.iter("testsuite"):
26
+ if len(suite) == 0:
27
+ continue
28
+
29
+ name = suite.get('name')
30
+ if name is None:
31
+ continue
32
+
33
+ suite_name = name.split('.')
34
+ if len(suite_name) < 2:
35
+ continue
36
+
37
+ file_name = suite_name[0] + ".feature"
38
+ class_name = suite_name[1]
39
+ suite.attrib.update({"filepath": file_name})
40
+
41
+ for case in suite:
42
+ case.attrib.update({"classname": class_name})
43
+
44
+ return tree
45
+
46
+ client.junitxml_parse_func = parse_func
47
+ client.run()
48
+
49
+
50
+ @smart_tests.subset
51
+ def subset(client: Subset):
52
+ for t in client.stdin():
53
+ if 0 < t.find(".feature"):
54
+ paths = os.path.split(t)
55
+ if len(paths) < 2:
56
+ continue
57
+
58
+ file = paths[1]
59
+ client.test_path(file.rstrip('\n'))
60
+
61
+ client.separator = "|"
62
+ client.run()
@@ -0,0 +1,33 @@
1
+ import json
2
+ from typing import List
3
+
4
+ import click
5
+
6
+ from ..commands.subset import Subset
7
+ from ..testpath import TestPath
8
+ from . import smart_tests
9
+
10
+
11
+ @smart_tests.subset
12
+ def subset(client: Subset):
13
+ def handler(output: List[TestPath], rests: List[TestPath]):
14
+ # The output would be something like this:
15
+ # {"tests": ["test/example_test.js", "test/login_test.js"]}
16
+ if client.rest:
17
+ with open(client.rest, "w+", encoding="utf-8") as f:
18
+ f.write(json.dumps({"tests": [client.formatter(t) for t in rests]}))
19
+ if output:
20
+ click.echo(json.dumps({"tests": [client.formatter(t) for t in output]}))
21
+
22
+ # read lines as test file names
23
+ for t in client.stdin():
24
+ if t.rstrip("\n"):
25
+ client.test_path(t.rstrip("\n"))
26
+ client.output_handler = handler
27
+
28
+ client.run()
29
+
30
+
31
+ record_tests = smart_tests.CommonRecordTestImpls(__name__).file_profile_report_files()
32
+
33
+ # split_subset = smart_tests.CommonSplitSubsetImpls(__name__).split_subset()
@@ -0,0 +1,164 @@
1
+ import glob
2
+ import json
3
+ import os
4
+ import re
5
+ from pathlib import Path
6
+ from typing import Annotated, List
7
+ from xml.etree import ElementTree as ET
8
+
9
+ import click
10
+
11
+ import smart_tests.args4p.typer as typer
12
+
13
+ from ..commands.record.tests import RecordTests
14
+ from ..commands.subset import Subset
15
+ from . import smart_tests
16
+
17
+
18
+ @smart_tests.subset
19
+ def subset(
20
+ client: Subset,
21
+ file: Annotated[str, typer.Argument(
22
+ help="JSON file to process"
23
+ )],
24
+ output_regex_files: Annotated[bool, typer.Option(
25
+ "--output-regex-files",
26
+ help="Output test regex to files"
27
+ )] = False,
28
+ output_regex_files_dir: Annotated[str, typer.Option(
29
+ "--output-regex-files-dir",
30
+ help="Output directory for test regex"
31
+ )] = ".",
32
+ output_regex_files_size: Annotated[int, typer.Option(
33
+ "--output-regex-files-size",
34
+ help="Max size of each regex file"
35
+ )] = 60 * 1024,
36
+ ):
37
+ if file:
38
+ with Path(file).open() as json_file:
39
+ data = json.load(json_file)
40
+ else:
41
+ data = json.loads(client.stdin()) # type: ignore # TODO
42
+
43
+ for test in data['tests']:
44
+ case = test['name']
45
+ client.test_path([{'type': 'testcase', 'name': case}])
46
+
47
+ if output_regex_files:
48
+ def handler(output, rests):
49
+ _write_regex_files(output_regex_files_dir, 'subset', output_regex_files_size, output)
50
+ _write_regex_files(output_regex_files_dir, 'rest', output_regex_files_size, rests)
51
+ client.output_handler = handler
52
+ client.run()
53
+ else:
54
+ client.formatter = lambda x: f"^{x[0]['name']}$"
55
+ client.separator = '|'
56
+ client.run()
57
+
58
+
59
+ def _write_regex_files(output_dir, prefix, max_size, paths):
60
+ # Python's regexp spec and CTest's regexp spec would be different, but
61
+ # this escape would work in most of the cases.
62
+ escaped = _group_by_size(['^' + re.escape(tp[0]['name']) + '$' for tp in paths], max_size)
63
+ if not os.path.exists(output_dir):
64
+ os.makedirs(output_dir)
65
+ for i, elems in enumerate(escaped):
66
+ with open(os.path.join(output_dir, f"{prefix}_{i}"), 'w') as f:
67
+ f.write('|'.join(elems) + '\n')
68
+
69
+
70
+ def _group_by_size(elems, max_size):
71
+ ret = []
72
+ curr = []
73
+ curr_size = 0
74
+ for elem in elems:
75
+ # +1 for the separator
76
+ if max_size < curr_size + len(elem) + 1:
77
+ ret.append(curr)
78
+ curr = [elem]
79
+ curr_size = len(elem)
80
+ else:
81
+ curr.append(elem)
82
+ curr_size = len(elem) + 1
83
+ if len(curr) != 0:
84
+ ret.append(curr)
85
+ return ret
86
+
87
+
88
+ @smart_tests.record.tests
89
+ def record_tests(
90
+ client: RecordTests,
91
+ source_roots: Annotated[List[str], typer.Argument(
92
+ multiple=True,
93
+ help="Source root directories or files to process"
94
+ )],
95
+ ):
96
+ for root in source_roots:
97
+ match = False
98
+ for t in glob.iglob(root, recursive=True):
99
+ match = True
100
+ if os.path.isdir(t):
101
+ client.scan(t, "*.xml")
102
+ else:
103
+ client.report(t)
104
+ if not match:
105
+ click.echo(f"No matches found: {root}", err=True)
106
+
107
+ def parse_func(p: str) -> ET.ElementTree:
108
+ """
109
+ Convert from CTest own XML format to JUnit XML format
110
+ The projections of these properties are based on
111
+ https://github.com/rpavlik/jenkins-ctest-plugin/blob/master/ctest-to-junit.xsl
112
+ """
113
+ original_tree = ET.parse(p)
114
+
115
+ testsuite = ET.Element("testsuite", {"name": "CTest"})
116
+ test_count = 0
117
+ failure_count = 0
118
+ skip_count = 0
119
+
120
+ for test in original_tree.findall("./Testing/Test"):
121
+ test_name = test.find("Name")
122
+ if test_name is not None:
123
+ duration_node = test.find("./Results/NamedMeasurement[@name=\"Execution Time\"]/Value")
124
+ measurement_node = test.find("Results/Measurement/Value")
125
+
126
+ stdout = measurement_node.text if measurement_node is not None else ''
127
+ duration = duration_node.text if duration_node is not None else '0'
128
+
129
+ testcase = ET.SubElement(testsuite, "testcase",
130
+ {"name": test_name.text or '',
131
+ "time": str(duration),
132
+ "system-out": stdout or '',
133
+ })
134
+
135
+ system_out = ET.SubElement(testcase, "system-out")
136
+ system_out.text = stdout
137
+
138
+ test_count += 1
139
+ status = test.get("Status")
140
+ if status is not None:
141
+ if status == "failed":
142
+ failure = ET.SubElement(testcase, "failure")
143
+ failure.text = stdout
144
+
145
+ failure_count += 1
146
+
147
+ if status == "notrun":
148
+ skipped = ET.SubElement(testcase, "skipped")
149
+ skipped.text = stdout
150
+
151
+ skip_count += 1
152
+
153
+ testsuite.attrib.update({
154
+ "tests": str(test_count),
155
+ "time": "0",
156
+ "failures": str(failure_count),
157
+ "errors": "0",
158
+ "skipped": str(skip_count)
159
+ })
160
+
161
+ return ET.ElementTree(testsuite)
162
+
163
+ client.junitxml_parse_func = parse_func
164
+ client.run()