artefacts-cli 0.6.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- artefacts/cli/__init__.py +342 -0
- artefacts/cli/app.py +617 -0
- artefacts/cli/bagparser.py +98 -0
- artefacts/cli/constants.py +16 -0
- artefacts/cli/other.py +40 -0
- artefacts/cli/parameters.py +23 -0
- artefacts/cli/ros1.py +240 -0
- artefacts/cli/ros2.py +125 -0
- artefacts/cli/utils.py +35 -0
- artefacts/cli/utils_ros.py +68 -0
- artefacts/cli/version.py +16 -0
- artefacts/wrappers/artefacts_ros1_meta.launch +45 -0
- artefacts_cli-0.6.8.dist-info/METADATA +101 -0
- artefacts_cli-0.6.8.dist-info/RECORD +17 -0
- artefacts_cli-0.6.8.dist-info/WHEEL +5 -0
- artefacts_cli-0.6.8.dist-info/entry_points.txt +2 -0
- artefacts_cli-0.6.8.dist-info/top_level.txt +1 -0
@@ -0,0 +1,98 @@
|
|
1
|
+
import sqlite3
|
2
|
+
from rosidl_runtime_py.utilities import get_message
|
3
|
+
from rclpy.serialization import deserialize_message
|
4
|
+
from mcap.reader import make_reader
|
5
|
+
from mcap_ros2.decoder import DecoderFactory
|
6
|
+
|
7
|
+
|
8
|
+
class BagFileParser:
|
9
|
+
def __init__(self, bag_file):
|
10
|
+
self.bag_file = bag_file
|
11
|
+
if bag_file.endswith(".db3"):
|
12
|
+
self.conn = sqlite3.connect(bag_file)
|
13
|
+
self.cursor = self.conn.cursor()
|
14
|
+
|
15
|
+
# create a message type map
|
16
|
+
topics_data = self.cursor.execute(
|
17
|
+
"SELECT id, name, type FROM topics"
|
18
|
+
).fetchall()
|
19
|
+
self.topic_type = {
|
20
|
+
name_of: type_of for id_of, name_of, type_of in topics_data
|
21
|
+
}
|
22
|
+
self.topic_id = {name_of: id_of for id_of, name_of, type_of in topics_data}
|
23
|
+
self.topic_msg_message = {
|
24
|
+
name_of: get_message(type_of) for id_of, name_of, type_of in topics_data
|
25
|
+
}
|
26
|
+
elif bag_file.endswith(".mcap"):
|
27
|
+
self.mcap_file = open(bag_file, "rb")
|
28
|
+
self.mcap_reader = make_reader(
|
29
|
+
self.mcap_file, decoder_factories=[DecoderFactory()]
|
30
|
+
)
|
31
|
+
self.topic_type = {}
|
32
|
+
self.topic_msg_message = {}
|
33
|
+
for schema, channel, _, _ in self.mcap_reader.iter_decoded_messages():
|
34
|
+
if channel.topic not in self.topic_type:
|
35
|
+
self.topic_type[channel.topic] = schema.name
|
36
|
+
self.topic_msg_message[channel.topic] = get_message(schema.name)
|
37
|
+
|
38
|
+
def __del__(self):
|
39
|
+
if hasattr(self, "conn"):
|
40
|
+
self.conn.close()
|
41
|
+
elif hasattr(self, "mcap_file"):
|
42
|
+
self.mcap_file.close()
|
43
|
+
|
44
|
+
def get_messages(self, topic_name):
|
45
|
+
"""
|
46
|
+
Return [(timestamp0, message0), (timestamp1, message1), ...]
|
47
|
+
"""
|
48
|
+
if hasattr(self, "conn"):
|
49
|
+
topic_id = self.topic_id[topic_name]
|
50
|
+
rows = self.cursor.execute(
|
51
|
+
"SELECT timestamp, data FROM messages WHERE topic_id = {}".format(
|
52
|
+
topic_id
|
53
|
+
)
|
54
|
+
).fetchall()
|
55
|
+
# Deserialize all and timestamp them
|
56
|
+
return [
|
57
|
+
(
|
58
|
+
timestamp,
|
59
|
+
deserialize_message(data, self.topic_msg_message[topic_name]),
|
60
|
+
)
|
61
|
+
for timestamp, data in rows
|
62
|
+
]
|
63
|
+
elif hasattr(self, "mcap_reader"):
|
64
|
+
messages = []
|
65
|
+
for (
|
66
|
+
schema,
|
67
|
+
channel,
|
68
|
+
message,
|
69
|
+
ros_msg,
|
70
|
+
) in self.mcap_reader.iter_decoded_messages():
|
71
|
+
if channel.topic == topic_name:
|
72
|
+
timestamp = message.log_time
|
73
|
+
messages.append((timestamp, ros_msg))
|
74
|
+
return messages
|
75
|
+
|
76
|
+
def get_last_message(self, topic_name):
|
77
|
+
if hasattr(self, "conn"):
|
78
|
+
topic_id = self.topic_id[topic_name]
|
79
|
+
timestamp, msg = self.cursor.execute(
|
80
|
+
"SELECT timestamp, data FROM messages WHERE topic_id = {} ORDER BY timestamp DESC LIMIT 1".format(
|
81
|
+
topic_id
|
82
|
+
)
|
83
|
+
).fetchone()
|
84
|
+
return (
|
85
|
+
timestamp,
|
86
|
+
deserialize_message(msg, self.topic_msg_message[topic_name]),
|
87
|
+
)
|
88
|
+
elif hasattr(self, "mcap_reader"):
|
89
|
+
last_message = None
|
90
|
+
for (
|
91
|
+
schema,
|
92
|
+
channel,
|
93
|
+
message,
|
94
|
+
ros_msg,
|
95
|
+
) in self.mcap_reader.iter_decoded_messages():
|
96
|
+
if channel.topic == topic_name:
|
97
|
+
last_message = (message.log_time, ros_msg)
|
98
|
+
return last_message
|
@@ -0,0 +1,16 @@
|
|
1
|
+
SUPPORTED_FRAMEWORKS = [
|
2
|
+
"ros2:iron",
|
3
|
+
"ros2:humble",
|
4
|
+
"ros2:galactic",
|
5
|
+
"ros1:noetic",
|
6
|
+
"maniskill:challenge2022",
|
7
|
+
"None",
|
8
|
+
"none",
|
9
|
+
"null",
|
10
|
+
None,
|
11
|
+
]
|
12
|
+
|
13
|
+
DEPRECATED_FRAMEWORKS = {
|
14
|
+
"ros2:0": "ros2:galactic",
|
15
|
+
"ros1:0": "ros1:noetic",
|
16
|
+
}
|
artefacts/cli/other.py
ADDED
@@ -0,0 +1,40 @@
|
|
1
|
+
import json
|
2
|
+
import yaml
|
3
|
+
import os
|
4
|
+
from .utils import run_and_save_logs
|
5
|
+
from .parameters import TMP_SCENARIO_PARAMS_YAML, TMP_SCENARIO_PARAMS_JSON
|
6
|
+
|
7
|
+
|
8
|
+
def generate_parameter_output(params: dict):
|
9
|
+
"""Store `params` in both json and yaml temporary files
|
10
|
+
Note: fixed filenames will lead concurent executions to overwrite each other
|
11
|
+
"""
|
12
|
+
with open(TMP_SCENARIO_PARAMS_JSON, "w") as f:
|
13
|
+
json.dump(params, f)
|
14
|
+
with open(TMP_SCENARIO_PARAMS_YAML, "w") as f:
|
15
|
+
yaml.dump(params, f)
|
16
|
+
|
17
|
+
|
18
|
+
def run_other_tests(run):
|
19
|
+
scenario = run.params
|
20
|
+
if "params" in scenario:
|
21
|
+
generate_parameter_output(scenario["params"])
|
22
|
+
full_env = {**os.environ, **scenario.get("params", {})}
|
23
|
+
|
24
|
+
command = scenario["run"]
|
25
|
+
run_and_save_logs(
|
26
|
+
command,
|
27
|
+
shell=True,
|
28
|
+
env={k: str(v) for k, v in full_env.items()},
|
29
|
+
output_path=os.path.join(run.output_path, "test_process_log.txt"),
|
30
|
+
)
|
31
|
+
|
32
|
+
results = []
|
33
|
+
success = True
|
34
|
+
run.log_artifacts(run.output_path)
|
35
|
+
|
36
|
+
for output in scenario.get("output_dirs", []):
|
37
|
+
run.log_artifacts(output)
|
38
|
+
|
39
|
+
run.log_tests_results(results, success)
|
40
|
+
return results, success
|
@@ -0,0 +1,23 @@
|
|
1
|
+
from itertools import product
|
2
|
+
from typing import Iterable
|
3
|
+
|
4
|
+
# constants for internal temporary files
|
5
|
+
|
6
|
+
TMP_RUNTIME_PARAMS_YAML = "/tmp/runtime_params.yaml"
|
7
|
+
TMP_SCENARIO_PARAMS_YAML = "/tmp/scenario_params.yaml"
|
8
|
+
TMP_SCENARIO_PARAMS_JSON = "/tmp/scenario_params.json"
|
9
|
+
|
10
|
+
|
11
|
+
def iter_grid(grid_spec: dict) -> Iterable[dict]:
|
12
|
+
"""Iterate over the points defined by the `grid_spec`"""
|
13
|
+
if not grid_spec:
|
14
|
+
yield {}
|
15
|
+
else:
|
16
|
+
# Sort the keys of the dictionary, for reproducibility
|
17
|
+
items = sorted(grid_spec.items())
|
18
|
+
keys, values = zip(*items)
|
19
|
+
# Make sure single values are converted to lists
|
20
|
+
values = [x if type(x) == list else [x] for x in values]
|
21
|
+
for v in product(*values):
|
22
|
+
params = dict(zip(keys, v))
|
23
|
+
yield params
|
artefacts/cli/ros1.py
ADDED
@@ -0,0 +1,240 @@
|
|
1
|
+
import yaml
|
2
|
+
import subprocess
|
3
|
+
from glob import glob
|
4
|
+
import os
|
5
|
+
import logging
|
6
|
+
|
7
|
+
from .utils import run_and_save_logs
|
8
|
+
from .utils_ros import parse_tests_results
|
9
|
+
from .parameters import TMP_RUNTIME_PARAMS_YAML, TMP_SCENARIO_PARAMS_YAML
|
10
|
+
|
11
|
+
logging.basicConfig(level=logging.INFO)
|
12
|
+
|
13
|
+
|
14
|
+
def generate_scenario_parameter_output(params: dict, param_file: str) -> None:
|
15
|
+
"""
|
16
|
+
Write `params` in `param_file` as YAML format
|
17
|
+
(supports namespace nesting via forward slashes)
|
18
|
+
roslaunch will then load them
|
19
|
+
"""
|
20
|
+
with open(param_file, "w") as f:
|
21
|
+
yaml.dump(params, f)
|
22
|
+
|
23
|
+
|
24
|
+
def generate_runtime_parameter_output(params: dict, param_file: str) -> None:
|
25
|
+
"""
|
26
|
+
Write `params` in `param_file` as YAML format
|
27
|
+
A strict dump is performed to avoid ambiguity:
|
28
|
+
whatever the user specifies in artefacts.yaml/runtime/params will be made available in param_file
|
29
|
+
"""
|
30
|
+
with open(param_file, "w") as f:
|
31
|
+
yaml.dump(params, f)
|
32
|
+
|
33
|
+
|
34
|
+
def generate_rosbag_args(scenario: dict) -> str:
|
35
|
+
if "rosbag_record" in scenario.keys():
|
36
|
+
rosbag_record = scenario["rosbag_record"]
|
37
|
+
else:
|
38
|
+
# default behavior: do not record any rosbag
|
39
|
+
rosbag_record = "none"
|
40
|
+
|
41
|
+
# return the rosbag args as a string with the proper format
|
42
|
+
if rosbag_record == "none":
|
43
|
+
return "none"
|
44
|
+
elif rosbag_record == "all":
|
45
|
+
return "--all"
|
46
|
+
elif rosbag_record == "subscriptions":
|
47
|
+
if "subscriptions" in scenario.keys():
|
48
|
+
sub = scenario["subscriptions"]
|
49
|
+
topics = " ".join(list(sub.values()))
|
50
|
+
return topics
|
51
|
+
else:
|
52
|
+
logging.warning(
|
53
|
+
f"[warning in generate_rosbag_args] rosbag_record asks for 'subscriptions' but they are not specified. Falling back to default: no rosbag will be recorded"
|
54
|
+
)
|
55
|
+
return "none"
|
56
|
+
else:
|
57
|
+
assert (
|
58
|
+
type(rosbag_record) == list
|
59
|
+
), f"rosbag_record supports 'all', 'none', 'subscriptions' or a list of strings interpreted as a list of ROS topics, regex supported"
|
60
|
+
for e in rosbag_record:
|
61
|
+
assert (
|
62
|
+
type(e) == str
|
63
|
+
), f"Elements of the rosbag_record list must only be strings. They are interpreted as a list of ROS topics, regex supported"
|
64
|
+
return f"--regex {' '.join(rosbag_record)}"
|
65
|
+
|
66
|
+
|
67
|
+
def get_result_path(scenario, PKGDIR, PACKAGE):
|
68
|
+
"""Can't choose the unittest output .xml filename with rostest.
|
69
|
+
instead re-create here the hardcoded naming logic of rostest:
|
70
|
+
need to find out the name of the test method
|
71
|
+
"""
|
72
|
+
try:
|
73
|
+
import rospkg
|
74
|
+
import ast
|
75
|
+
import xml.etree.ElementTree as ET
|
76
|
+
|
77
|
+
# find the path of the user's launch file
|
78
|
+
launch_file = (
|
79
|
+
rospkg.RosPack().get_path(scenario["ros_testpackage"])
|
80
|
+
+ "/launch/"
|
81
|
+
+ scenario["ros_testfile"]
|
82
|
+
)
|
83
|
+
# find the user's test file and test package
|
84
|
+
xml_root = ET.parse(launch_file).getroot()
|
85
|
+
test_package = xml_root.find("test").get("pkg")
|
86
|
+
test_file = xml_root.find("test").get("type")
|
87
|
+
# find the path of the user's test file
|
88
|
+
full_path = rospkg.RosPack().get_path(test_package) + "/src/" + test_file
|
89
|
+
# parse the python file
|
90
|
+
with open(full_path) as file:
|
91
|
+
node = ast.parse(file.read())
|
92
|
+
# find the class and method that match unittest convention naming (start with 'Test')
|
93
|
+
# note: returns the first match
|
94
|
+
test_class = [
|
95
|
+
n for n in node.body if isinstance(n, ast.ClassDef) and "Test" in n.name
|
96
|
+
][0]
|
97
|
+
test_class_suffix = test_class.name.split("Test")[-1].lower()
|
98
|
+
# Finally, build the unittest result file path
|
99
|
+
test_result_file_path = os.path.expanduser(
|
100
|
+
f"{PKGDIR}/{PACKAGE}/rosunit-{test_class_suffix}.xml"
|
101
|
+
)
|
102
|
+
return test_result_file_path
|
103
|
+
except Exception as e:
|
104
|
+
logging.error(f"[Exception in get_result_path()] {e}")
|
105
|
+
logging.error(
|
106
|
+
f"Unable to parse the ros1 .launch specified ({scenario['ros_testfile']}) and the <test> tag within to find the unittest test method's name.",
|
107
|
+
"Please ensure all ROS and unittest naming conventions are respected. Exiting..",
|
108
|
+
)
|
109
|
+
return None
|
110
|
+
|
111
|
+
|
112
|
+
def get_wrapper_path():
|
113
|
+
"""Get the absolute path of artefacts_ros1_meta.launch wrapper inside our warp-client repo"""
|
114
|
+
current_dir = os.path.abspath(os.path.dirname(__file__))
|
115
|
+
wrapper_path = current_dir + "/wrappers/artefacts_ros1_meta.launch"
|
116
|
+
return wrapper_path
|
117
|
+
|
118
|
+
|
119
|
+
def generate_sim_args(simulator_value, nosim_flag):
|
120
|
+
if nosim_flag:
|
121
|
+
return "none"
|
122
|
+
else:
|
123
|
+
return simulator_value
|
124
|
+
|
125
|
+
|
126
|
+
def run_ros1_tests(run):
|
127
|
+
scenario = run.params
|
128
|
+
job = run.job.params
|
129
|
+
|
130
|
+
# ROS1 specific naming conventions for outputting results (can be chosen arbitrarily)
|
131
|
+
PKGDIR = f"{os.path.expanduser(os.getenv('ROS_HOME', '~/.ros'))}/test_results"
|
132
|
+
PACKAGE = "artefacts"
|
133
|
+
|
134
|
+
# dump params specified by the user in the scenario section of the artefacts.yaml
|
135
|
+
# to load them onto the rosparam server
|
136
|
+
if "params" in scenario:
|
137
|
+
param_file = TMP_SCENARIO_PARAMS_YAML # note: fixed filename will lead concurent executions to overwrite each other
|
138
|
+
generate_scenario_parameter_output(scenario["params"], param_file)
|
139
|
+
else:
|
140
|
+
param_file = "none"
|
141
|
+
|
142
|
+
# dump params specified by the user in the runtime section of the artefacts.yaml
|
143
|
+
# to make them available for the prelaunch script
|
144
|
+
if "params" in job["runtime"]:
|
145
|
+
# note: fixed filename will lead concurent executions to overwrite each other
|
146
|
+
generate_runtime_parameter_output(
|
147
|
+
job["runtime"]["params"], TMP_RUNTIME_PARAMS_YAML
|
148
|
+
)
|
149
|
+
|
150
|
+
# take note of previous rosbags
|
151
|
+
rosbag_path = os.path.expanduser("~/.ros/*.bag")
|
152
|
+
preexisting_rosbags = glob(rosbag_path)
|
153
|
+
|
154
|
+
# get the unittest result path
|
155
|
+
test_result_file_path = get_result_path(scenario, PKGDIR, PACKAGE)
|
156
|
+
if test_result_file_path is None:
|
157
|
+
return {}, False
|
158
|
+
|
159
|
+
## Main launch of the test sequence:
|
160
|
+
# command line arguments control user specified settings for resource provisions
|
161
|
+
# get_wrapper_path() is the absolute path of artefacts_ros1_meta.launch inside the warp-client repo
|
162
|
+
command = [
|
163
|
+
"rostest",
|
164
|
+
get_wrapper_path(),
|
165
|
+
"--package",
|
166
|
+
PACKAGE,
|
167
|
+
"--pkgdir",
|
168
|
+
PKGDIR,
|
169
|
+
f"rosbag_args:='{generate_rosbag_args(scenario)}'",
|
170
|
+
f"simulator:={generate_sim_args(job['runtime']['simulator'], run.job.nosim)}",
|
171
|
+
f"param_file:={param_file}",
|
172
|
+
f"ros_testfile:={scenario['ros_testfile']}",
|
173
|
+
f"ros_testpackage:={scenario['ros_testpackage']}",
|
174
|
+
]
|
175
|
+
# for debugging: break rostest isolation from the host's ROSmaster
|
176
|
+
if run.job.noisolation:
|
177
|
+
command.insert(1, "--reuse-master")
|
178
|
+
# last step to prepare the command for execution with subprocess.run():
|
179
|
+
if "pre_launch" in job["runtime"].keys():
|
180
|
+
# join the user's specified pre_launch command with the regular rostest command
|
181
|
+
# note that a single subprocess.run() call is required for any environment variables sourced in the pre_launch command to be available for the rostest command
|
182
|
+
command = f"{job['runtime']['pre_launch']} && {' '.join(command)}"
|
183
|
+
else:
|
184
|
+
command = " ".join(command)
|
185
|
+
|
186
|
+
# Main: test execution
|
187
|
+
# shell=True required to execute two commands in the same shell environment
|
188
|
+
run_and_save_logs(
|
189
|
+
command,
|
190
|
+
shell=True,
|
191
|
+
executable="/bin/bash",
|
192
|
+
output_path=os.path.join(run.output_path, "test_process_log.txt"),
|
193
|
+
)
|
194
|
+
|
195
|
+
# parse xml generated by rostest
|
196
|
+
results, success = parse_tests_results(test_result_file_path)
|
197
|
+
|
198
|
+
# upload artefacts generated by rostest
|
199
|
+
run.log_artifacts(os.path.expanduser(f"{PKGDIR}/{PACKAGE}"))
|
200
|
+
|
201
|
+
# upload files from the general output folder
|
202
|
+
run.log_artifacts(run.output_path)
|
203
|
+
|
204
|
+
# upload any additional files in the folders specified by the user in artefacts.yaml
|
205
|
+
for output in scenario.get("output_dirs", []):
|
206
|
+
run.log_artifacts(output)
|
207
|
+
|
208
|
+
# check if any rosbag was created
|
209
|
+
rosbags = glob(rosbag_path)
|
210
|
+
new_rosbags = set(rosbags).difference(set(preexisting_rosbags))
|
211
|
+
if len(new_rosbags) > 0:
|
212
|
+
new_rosbag = new_rosbags.pop()
|
213
|
+
logging.info(f"new rosbag found: {new_rosbag}")
|
214
|
+
# upload rosbag to dashboard
|
215
|
+
run.log_single_artifact(new_rosbag, "rosbag")
|
216
|
+
# perform any post processing, using the rosbag
|
217
|
+
if "rosbag_postprocess" in scenario.keys():
|
218
|
+
logging.info("starting rosbag postprocess")
|
219
|
+
post_process_folder = os.path.expanduser(f"{PKGDIR}/{PACKAGE}_postprocess")
|
220
|
+
os.makedirs(post_process_folder, exist_ok=True)
|
221
|
+
existing_files = glob(f"{post_process_folder}/*")
|
222
|
+
for f in existing_files:
|
223
|
+
os.remove(f)
|
224
|
+
command = [
|
225
|
+
"rosrun",
|
226
|
+
scenario["ros_testpackage"],
|
227
|
+
scenario["rosbag_postprocess"],
|
228
|
+
"--bag_path",
|
229
|
+
f"{new_rosbag}",
|
230
|
+
"--out_folder",
|
231
|
+
f"{post_process_folder}",
|
232
|
+
]
|
233
|
+
# shell=True required to support command list items that are strings with spaces
|
234
|
+
# (allows passing additional arguments from the convenience of artefacts.yaml)
|
235
|
+
subprocess.run(" ".join(command), shell=True, executable="/bin/bash")
|
236
|
+
run.log_artifacts(post_process_folder)
|
237
|
+
run.log_post_process_metrics(post_process_folder)
|
238
|
+
|
239
|
+
run.log_tests_results(results, success)
|
240
|
+
return results, success
|
artefacts/cli/ros2.py
ADDED
@@ -0,0 +1,125 @@
|
|
1
|
+
import yaml
|
2
|
+
from glob import glob
|
3
|
+
import os
|
4
|
+
import shutil
|
5
|
+
|
6
|
+
from .utils import run_and_save_logs
|
7
|
+
from .utils_ros import parse_tests_results
|
8
|
+
from .parameters import TMP_SCENARIO_PARAMS_YAML
|
9
|
+
|
10
|
+
|
11
|
+
def generate_scenario_parameter_output(params: dict, param_file: str):
|
12
|
+
"""
|
13
|
+
Store `params` in `param_file` and convert to ros2 param file nested format,
|
14
|
+
to be used by the launch file
|
15
|
+
"""
|
16
|
+
content = {}
|
17
|
+
for k, v in params.items():
|
18
|
+
try:
|
19
|
+
node, pname = k.split("/")
|
20
|
+
except Exception:
|
21
|
+
print("ensure params are in the format `{node_name}/{param_name}`")
|
22
|
+
return
|
23
|
+
if node not in content:
|
24
|
+
content[node] = {"ros__parameters": {}}
|
25
|
+
content[node]["ros__parameters"][pname] = v
|
26
|
+
with open(param_file, "w") as f:
|
27
|
+
yaml.dump(content, f)
|
28
|
+
|
29
|
+
|
30
|
+
def run_ros2_tests(run):
|
31
|
+
scenario = run.params
|
32
|
+
# TODO: HOW TO ADD NODE to launch
|
33
|
+
# TODO: set params from conf
|
34
|
+
# TODO: get params to log
|
35
|
+
# TODO: where is the rosbag
|
36
|
+
if "params" in run.params:
|
37
|
+
# note: fixed filename will lead concurent executions to overwrite each other
|
38
|
+
generate_scenario_parameter_output(
|
39
|
+
run.params["params"], TMP_SCENARIO_PARAMS_YAML
|
40
|
+
)
|
41
|
+
|
42
|
+
preexisting_rosbags = glob("rosbag2*")
|
43
|
+
test_result_file_path = f"{run.output_path}/tests_junit.xml"
|
44
|
+
launch_arguments = [
|
45
|
+
f"{k}:={v}" for k, v in run.params.get("launch_arguments", {}).items()
|
46
|
+
]
|
47
|
+
command = [
|
48
|
+
"launch_test",
|
49
|
+
"--junit-xml",
|
50
|
+
test_result_file_path,
|
51
|
+
scenario["ros_testfile"],
|
52
|
+
] + launch_arguments
|
53
|
+
|
54
|
+
# save ROS logs in the output dir
|
55
|
+
ros_log_dir = os.path.join(run.output_path, "ros_logs")
|
56
|
+
|
57
|
+
# Main: test execution
|
58
|
+
# shell=True required to support command list items that are strings with spaces
|
59
|
+
# (this way, scenario["ros_testfile"] can be either a path to the launch file or '<package_name> <launch_name>')
|
60
|
+
return_code = run_and_save_logs(
|
61
|
+
" ".join(command),
|
62
|
+
shell=True,
|
63
|
+
executable="/bin/bash",
|
64
|
+
env={
|
65
|
+
**os.environ,
|
66
|
+
**{
|
67
|
+
"ROS_LOG_DIR": ros_log_dir,
|
68
|
+
"ARTEFACTS_SCENARIO_PARAMS_FILE": TMP_SCENARIO_PARAMS_YAML,
|
69
|
+
},
|
70
|
+
},
|
71
|
+
output_path=os.path.join(run.output_path, "test_process_log.txt"),
|
72
|
+
)
|
73
|
+
if return_code == 2:
|
74
|
+
raise Exception(
|
75
|
+
f"Running {scenario['ros_testfile']} failed. Please check that the launch file exists."
|
76
|
+
)
|
77
|
+
|
78
|
+
# zip ROS logs and delete the original folder
|
79
|
+
if os.path.exists(ros_log_dir):
|
80
|
+
shutil.make_archive(ros_log_dir, "zip", ros_log_dir)
|
81
|
+
shutil.rmtree(ros_log_dir)
|
82
|
+
|
83
|
+
# parse xml generated by launch_test
|
84
|
+
results, success = parse_tests_results(test_result_file_path)
|
85
|
+
if success is None:
|
86
|
+
run.log_tests_results(results, False)
|
87
|
+
return results, success
|
88
|
+
run.log_artifacts(run.output_path)
|
89
|
+
|
90
|
+
# upload any additional files in the folders specified by the user in artefacts.yaml
|
91
|
+
for output in scenario.get("output_dirs", []):
|
92
|
+
run.log_artifacts(output)
|
93
|
+
|
94
|
+
# check if any rosbag was created
|
95
|
+
rosbags = glob("rosbag2*")
|
96
|
+
new_rosbags = set(rosbags).difference(set(preexisting_rosbags))
|
97
|
+
from artefacts.bagparser import BagFileParser
|
98
|
+
|
99
|
+
if len(new_rosbags) > 0:
|
100
|
+
rosbag_path = new_rosbags.pop()
|
101
|
+
run.log_artifacts(rosbag_path, "rosbag")
|
102
|
+
if "metrics" in run.params:
|
103
|
+
# TODO should go inside BagFileParser?
|
104
|
+
db_files = glob(f"{rosbag_path}/*.mcap") # Ros2 Default
|
105
|
+
if not db_files:
|
106
|
+
db_files = glob(f"{rosbag_path}/*.db3") # Legacy
|
107
|
+
if not db_files:
|
108
|
+
raise FileNotFoundError(
|
109
|
+
"No .mcap or .db3 files found in the specified path."
|
110
|
+
)
|
111
|
+
db_file = db_files[0]
|
112
|
+
bag = BagFileParser(db_file)
|
113
|
+
for metric in run.params["metrics"]:
|
114
|
+
try:
|
115
|
+
last_value = bag.get_last_message(metric)[1].data
|
116
|
+
run.log_metric(metric, last_value)
|
117
|
+
except KeyError as e:
|
118
|
+
print(f"Metric {metric} not found in rosbag, skipping.")
|
119
|
+
except TypeError or IndexError as e:
|
120
|
+
print(
|
121
|
+
f"Metric {metric} not found. Is it being published?. Skipping."
|
122
|
+
)
|
123
|
+
|
124
|
+
run.log_tests_results(results, success)
|
125
|
+
return results, success
|
artefacts/cli/utils.py
ADDED
@@ -0,0 +1,35 @@
|
|
1
|
+
import subprocess
|
2
|
+
import sys
|
3
|
+
|
4
|
+
|
5
|
+
def run_and_save_logs(
|
6
|
+
args, output_path, shell=False, executable=None, env=None, cwd=None
|
7
|
+
):
|
8
|
+
"""
|
9
|
+
Run a command and save stdout and stderr to a file in output_path
|
10
|
+
|
11
|
+
Note: explicitly list used named params instead of using **kwargs to avoid typing issue: https://github.com/microsoft/pyright/issues/455#issuecomment-780076232
|
12
|
+
"""
|
13
|
+
output_file = open(output_path, "wb")
|
14
|
+
proc = subprocess.Popen(
|
15
|
+
args,
|
16
|
+
stdout=subprocess.PIPE, # Capture stdout
|
17
|
+
stderr=subprocess.PIPE, # Capture stderr
|
18
|
+
shell=shell,
|
19
|
+
executable=executable,
|
20
|
+
env=env,
|
21
|
+
cwd=cwd,
|
22
|
+
)
|
23
|
+
# write test-process stdout and stderr into file and stdout
|
24
|
+
if proc.stdout:
|
25
|
+
for line in proc.stdout:
|
26
|
+
decoded_line = line.decode()
|
27
|
+
sys.stdout.write(decoded_line)
|
28
|
+
output_file.write(line)
|
29
|
+
if proc.stderr:
|
30
|
+
for line in proc.stderr:
|
31
|
+
decoded_line = line.decode()
|
32
|
+
sys.stderr.write(decoded_line)
|
33
|
+
output_file.write(line)
|
34
|
+
proc.wait()
|
35
|
+
return proc.returncode
|
@@ -0,0 +1,68 @@
|
|
1
|
+
from junitparser import JUnitXml, Attr, Element
|
2
|
+
|
3
|
+
|
4
|
+
class FailureElement(Element):
|
5
|
+
_tag = "failure"
|
6
|
+
message = Attr()
|
7
|
+
|
8
|
+
|
9
|
+
def parse_tests_results(file):
|
10
|
+
def parse_suite(suite):
|
11
|
+
nonlocal success, results
|
12
|
+
suite_results = {
|
13
|
+
"suite": suite.name,
|
14
|
+
"errors": suite.errors,
|
15
|
+
"failures": suite.failures,
|
16
|
+
"tests": suite.tests,
|
17
|
+
}
|
18
|
+
details = []
|
19
|
+
for case in suite:
|
20
|
+
case_details = {
|
21
|
+
"name": case.name,
|
22
|
+
}
|
23
|
+
try:
|
24
|
+
case_details["failure_message"] = case.child(FailureElement).message
|
25
|
+
case_details["result"] = "failure"
|
26
|
+
success = False
|
27
|
+
except AttributeError:
|
28
|
+
case_details["result"] = "success"
|
29
|
+
details.append(case_details)
|
30
|
+
suite_results["details"] = details
|
31
|
+
results.append(suite_results)
|
32
|
+
|
33
|
+
try:
|
34
|
+
xml = JUnitXml.fromfile(file)
|
35
|
+
|
36
|
+
results = []
|
37
|
+
success = True
|
38
|
+
# some xml files do not have the <testsuites> tag, just a single <tessuite>
|
39
|
+
if xml._tag == "testsuite":
|
40
|
+
# handle single suite
|
41
|
+
suite = xml
|
42
|
+
parse_suite(suite)
|
43
|
+
elif xml._tag == "testsuites":
|
44
|
+
# handle suites
|
45
|
+
for suite in xml:
|
46
|
+
parse_suite(suite)
|
47
|
+
# else: TODO
|
48
|
+
return results, success
|
49
|
+
|
50
|
+
except Exception as e:
|
51
|
+
print(f"[Exception in parse_tests_results] {e}")
|
52
|
+
print(f"Test result xml could not be loaded, marking success as False")
|
53
|
+
results = [
|
54
|
+
{
|
55
|
+
"suite": "unittest.suite.TestSuite",
|
56
|
+
"errors": 1,
|
57
|
+
"failures": 0,
|
58
|
+
"tests": 1,
|
59
|
+
"details": [
|
60
|
+
{
|
61
|
+
"name": "Error parsing XML test results",
|
62
|
+
"failure_message": f"The test may have timed out. Exception: {e}",
|
63
|
+
"result": "failure",
|
64
|
+
}
|
65
|
+
],
|
66
|
+
}
|
67
|
+
]
|
68
|
+
return results, None
|
artefacts/cli/version.py
ADDED
@@ -0,0 +1,16 @@
|
|
1
|
+
# file generated by setuptools_scm
|
2
|
+
# don't change, don't track in version control
|
3
|
+
TYPE_CHECKING = False
|
4
|
+
if TYPE_CHECKING:
|
5
|
+
from typing import Tuple, Union
|
6
|
+
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
7
|
+
else:
|
8
|
+
VERSION_TUPLE = object
|
9
|
+
|
10
|
+
version: str
|
11
|
+
__version__: str
|
12
|
+
__version_tuple__: VERSION_TUPLE
|
13
|
+
version_tuple: VERSION_TUPLE
|
14
|
+
|
15
|
+
__version__ = version = '0.6.8'
|
16
|
+
__version_tuple__ = version_tuple = (0, 6, 8)
|