artefacts-cli 0.6.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- artefacts/cli/__init__.py +342 -0
- artefacts/cli/app.py +617 -0
- artefacts/cli/bagparser.py +98 -0
- artefacts/cli/constants.py +16 -0
- artefacts/cli/other.py +40 -0
- artefacts/cli/parameters.py +23 -0
- artefacts/cli/ros1.py +240 -0
- artefacts/cli/ros2.py +125 -0
- artefacts/cli/utils.py +35 -0
- artefacts/cli/utils_ros.py +68 -0
- artefacts/cli/version.py +16 -0
- artefacts/wrappers/artefacts_ros1_meta.launch +45 -0
- artefacts_cli-0.6.8.dist-info/METADATA +101 -0
- artefacts_cli-0.6.8.dist-info/RECORD +17 -0
- artefacts_cli-0.6.8.dist-info/WHEEL +5 -0
- artefacts_cli-0.6.8.dist-info/entry_points.txt +2 -0
- artefacts_cli-0.6.8.dist-info/top_level.txt +1 -0
@@ -0,0 +1,342 @@
|
|
1
|
+
from importlib.metadata import version, PackageNotFoundError
|
2
|
+
import json
|
3
|
+
import glob
|
4
|
+
from datetime import datetime, timezone
|
5
|
+
import logging
|
6
|
+
import os
|
7
|
+
import math
|
8
|
+
import requests
|
9
|
+
import copy
|
10
|
+
from typing import Optional
|
11
|
+
|
12
|
+
from .parameters import iter_grid
|
13
|
+
|
14
|
+
logging.basicConfig(level=logging.INFO)
|
15
|
+
|
16
|
+
|
17
|
+
try:
|
18
|
+
__version__ = version("package-name")
|
19
|
+
except PackageNotFoundError:
|
20
|
+
try:
|
21
|
+
# Package is not installed, most likely dev/test mode
|
22
|
+
from setuptools_scm import get_version
|
23
|
+
|
24
|
+
__version__ = get_version()
|
25
|
+
except Exception as e:
|
26
|
+
logging.warning(f"Could not determine package version: {e}. Default to 0.0.0")
|
27
|
+
__version__ = "0.0.0"
|
28
|
+
|
29
|
+
|
30
|
+
class AuthenticationError(Exception):
|
31
|
+
"""Raised when artefacts authentication failed"""
|
32
|
+
|
33
|
+
pass
|
34
|
+
|
35
|
+
|
36
|
+
class WarpJob:
|
37
|
+
def __init__(
|
38
|
+
self,
|
39
|
+
project_id,
|
40
|
+
api_conf,
|
41
|
+
jobname,
|
42
|
+
jobconf,
|
43
|
+
dryrun=False,
|
44
|
+
nosim=False,
|
45
|
+
noisolation=False,
|
46
|
+
context=None,
|
47
|
+
run_offset=0,
|
48
|
+
):
|
49
|
+
self.project_id = project_id
|
50
|
+
self.job_id = os.environ.get("ARTEFACTS_JOB_ID", None)
|
51
|
+
self.api_conf = api_conf
|
52
|
+
self.start = datetime.now(timezone.utc).timestamp()
|
53
|
+
self.uploads = {}
|
54
|
+
self.jobname = jobname
|
55
|
+
self.params = jobconf
|
56
|
+
self.success = False
|
57
|
+
self.n_runs = run_offset
|
58
|
+
self.dryrun = dryrun
|
59
|
+
self.nosim = nosim
|
60
|
+
self.noisolation = noisolation
|
61
|
+
self.context = context
|
62
|
+
|
63
|
+
if dryrun:
|
64
|
+
self.job_id = "dryrun"
|
65
|
+
if self.job_id is None:
|
66
|
+
# Only create a new job if job_id is not specified
|
67
|
+
data = {
|
68
|
+
"start": round(self.start),
|
69
|
+
"status": "in progress",
|
70
|
+
"params": json.dumps(self.params),
|
71
|
+
"project": self.project_id,
|
72
|
+
"jobname": self.jobname,
|
73
|
+
"timeout": self.params.get("timeout", 5) * 60,
|
74
|
+
}
|
75
|
+
if context is not None:
|
76
|
+
data["message"] = context["description"]
|
77
|
+
data["commit"] = context["commit"]
|
78
|
+
data["ref"] = context["ref"]
|
79
|
+
response = requests.post(
|
80
|
+
f"{api_conf.api_url}/{self.project_id}/job",
|
81
|
+
json=data,
|
82
|
+
headers=api_conf.headers,
|
83
|
+
)
|
84
|
+
if response.status_code != 200:
|
85
|
+
if response.status_code == 403:
|
86
|
+
msg = response.json()["message"]
|
87
|
+
logging.warning(msg)
|
88
|
+
raise AuthenticationError(msg)
|
89
|
+
logging.warning(f"Error on job creation: {response.status_code}")
|
90
|
+
logging.warning(response.text)
|
91
|
+
raise AuthenticationError(str(response.status_code))
|
92
|
+
self.job_id = response.json()["job_id"]
|
93
|
+
self.output_path = self.params.get("output_path", f"/tmp/{self.job_id}")
|
94
|
+
os.makedirs(self.output_path, exist_ok=True)
|
95
|
+
return
|
96
|
+
|
97
|
+
def log_tests_result(self, success):
|
98
|
+
self.success = success
|
99
|
+
|
100
|
+
def stop(self):
|
101
|
+
end = datetime.now(timezone.utc).timestamp()
|
102
|
+
if self.dryrun:
|
103
|
+
return
|
104
|
+
# Log metadata
|
105
|
+
data = {
|
106
|
+
"end": round(end),
|
107
|
+
"duration": round(end - self.start),
|
108
|
+
"success": self.success, # need to be determined based on all runs, can be an AND in the API
|
109
|
+
"status": "finished", # need to be determined based on all runs
|
110
|
+
}
|
111
|
+
response = requests.put(
|
112
|
+
f"{self.api_conf.api_url}/{self.project_id}/job/{self.job_id}",
|
113
|
+
json=data,
|
114
|
+
headers=self.api_conf.headers,
|
115
|
+
)
|
116
|
+
|
117
|
+
return
|
118
|
+
|
119
|
+
def new_run(self, scenario):
|
120
|
+
run = WarpRun(self, scenario, self.n_runs)
|
121
|
+
self.n_runs += 1
|
122
|
+
return run
|
123
|
+
|
124
|
+
|
125
|
+
class WarpRun:
|
126
|
+
def __init__(self, job, scenario, run_n):
|
127
|
+
self.job = job
|
128
|
+
self.start = datetime.now(timezone.utc).timestamp()
|
129
|
+
self.uploads = {}
|
130
|
+
self.params = scenario
|
131
|
+
self.metrics = {}
|
132
|
+
self.run_n = run_n
|
133
|
+
self.output_path = self.params.get(
|
134
|
+
"output_path", f"{self.job.output_path}/{self.run_n}"
|
135
|
+
)
|
136
|
+
os.makedirs(self.output_path, exist_ok=True)
|
137
|
+
data = {
|
138
|
+
"job_id": job.job_id,
|
139
|
+
"run_n": self.run_n,
|
140
|
+
"start": round(self.start),
|
141
|
+
"tests": [],
|
142
|
+
"params": json.dumps(self.params),
|
143
|
+
}
|
144
|
+
|
145
|
+
if self.job.dryrun:
|
146
|
+
return
|
147
|
+
query_url = (
|
148
|
+
f"{self.job.api_conf.api_url}/{self.job.project_id}/job/{job.job_id}/run"
|
149
|
+
)
|
150
|
+
response = requests.post(
|
151
|
+
query_url,
|
152
|
+
json=data,
|
153
|
+
headers=self.job.api_conf.headers,
|
154
|
+
)
|
155
|
+
if response.status_code != 200:
|
156
|
+
if response.status_code == 403:
|
157
|
+
msg = response.json()["message"]
|
158
|
+
logging.warning(msg)
|
159
|
+
raise AuthenticationError(msg)
|
160
|
+
logging.warning(f"Error on scenario creation: {response.status_code}")
|
161
|
+
logging.warning(response.text)
|
162
|
+
raise AuthenticationError(str(response.status_code))
|
163
|
+
return
|
164
|
+
|
165
|
+
def log_params(self, params):
|
166
|
+
self.params = params
|
167
|
+
|
168
|
+
def log_metric(self, name, value):
|
169
|
+
self.metrics[name] = value
|
170
|
+
|
171
|
+
def log_metrics(self):
|
172
|
+
metrics = self.params.get("metrics", None)
|
173
|
+
if type(metrics) == str:
|
174
|
+
with open(f"{self.output_path}/{metrics}") as f:
|
175
|
+
metric_values = json.load(f)
|
176
|
+
for k, v in metric_values.items():
|
177
|
+
self.log_metric(k, v)
|
178
|
+
|
179
|
+
def log_post_process_metrics(self, post_process_folder):
|
180
|
+
try:
|
181
|
+
with open(f"{post_process_folder}/metrics.json") as f:
|
182
|
+
metric_values = json.load(f)
|
183
|
+
for k, v in metric_values.items():
|
184
|
+
self.log_metric(k, v)
|
185
|
+
except: # if the metrics.json file does not exist, do nothing
|
186
|
+
pass
|
187
|
+
|
188
|
+
def log_tests_results(self, test_results, success):
|
189
|
+
self.test_results = test_results
|
190
|
+
self.success = success
|
191
|
+
|
192
|
+
def log_artifacts(self, output_path, prefix=None):
|
193
|
+
"""log all files within folder output_path"""
|
194
|
+
|
195
|
+
def _get_filename(root_path, full_path):
|
196
|
+
filename = full_path.split(f"{root_path}/")[-1]
|
197
|
+
# print(root_path, full_path, filename)
|
198
|
+
return filename
|
199
|
+
|
200
|
+
files = [
|
201
|
+
f
|
202
|
+
for f in glob.glob(f"{output_path}/**", recursive=True)
|
203
|
+
if "." in f and f[-1] != "/"
|
204
|
+
]
|
205
|
+
# careful: glob with recursive sometimes returns non existent paths!
|
206
|
+
# https://stackoverflow.com/questions/72366844/unexpected-result-with-recursive-glob-glob-using-pattern
|
207
|
+
|
208
|
+
# update dictionary of uploads
|
209
|
+
# key = filename: value = file path
|
210
|
+
# Note: filename must not be empty string (happened when '.ros' in root path)
|
211
|
+
if prefix is not None:
|
212
|
+
self.uploads.update(
|
213
|
+
{f"{prefix}/{_get_filename(output_path, f)}": f for f in files}
|
214
|
+
)
|
215
|
+
else:
|
216
|
+
self.uploads.update(
|
217
|
+
{
|
218
|
+
_get_filename(output_path, f): f
|
219
|
+
for f in files
|
220
|
+
if _get_filename(output_path, f) != ""
|
221
|
+
}
|
222
|
+
)
|
223
|
+
|
224
|
+
def log_single_artifact(self, filename, prefix=None):
|
225
|
+
"""log a single file filename"""
|
226
|
+
|
227
|
+
def _get_filename(path):
|
228
|
+
return path.split(f"/")[-1]
|
229
|
+
|
230
|
+
if prefix is not None:
|
231
|
+
self.uploads.update({f"{prefix}/{_get_filename(filename)}": filename})
|
232
|
+
else:
|
233
|
+
self.uploads.update({_get_filename(filename): filename})
|
234
|
+
|
235
|
+
def stop(self):
|
236
|
+
end = datetime.now(timezone.utc).timestamp()
|
237
|
+
if self.job.dryrun:
|
238
|
+
return
|
239
|
+
# Log metadata
|
240
|
+
data = {
|
241
|
+
"job_id": self.job.job_id,
|
242
|
+
"run_n": self.run_n,
|
243
|
+
"start": math.floor(self.start),
|
244
|
+
"params": json.dumps(self.params),
|
245
|
+
"end": round(end),
|
246
|
+
"duration": math.ceil(end - self.start),
|
247
|
+
"tests": self.test_results,
|
248
|
+
"success": self.success,
|
249
|
+
"uploads": self.uploads,
|
250
|
+
"metrics": self.metrics,
|
251
|
+
}
|
252
|
+
response = requests.put(
|
253
|
+
f"{self.job.api_conf.api_url}/{self.job.project_id}/job/{self.job.job_id}/run/{self.run_n}",
|
254
|
+
json=data,
|
255
|
+
headers=self.job.api_conf.headers,
|
256
|
+
)
|
257
|
+
# use s3 presigned urls to upload the artifacts
|
258
|
+
upload_urls = response.json()["upload_urls"]
|
259
|
+
for key, file_name in self.uploads.items():
|
260
|
+
files = {"file": open(file_name, "rb")}
|
261
|
+
upload_info = upload_urls[key]
|
262
|
+
file_size_mb = os.path.getsize(file_name) / 1024 / 1024
|
263
|
+
try:
|
264
|
+
print(f"Uploading {file_name} ({file_size_mb:.2f} MB)")
|
265
|
+
# TODO: add a retry policy
|
266
|
+
r = requests.post(
|
267
|
+
upload_info["url"],
|
268
|
+
data=upload_info["fields"],
|
269
|
+
files=files,
|
270
|
+
)
|
271
|
+
except OverflowError:
|
272
|
+
logging.warning(f"File too large: {file_name} could not be uploaded")
|
273
|
+
except Exception as e:
|
274
|
+
logging.warning(f"Error uploading {file_name}: {e}, skipping")
|
275
|
+
|
276
|
+
|
277
|
+
def init_job(
|
278
|
+
project_id: str,
|
279
|
+
api_token: str,
|
280
|
+
jobname: str,
|
281
|
+
jobconf: dict,
|
282
|
+
dryrun: bool = False,
|
283
|
+
nosim: bool = False,
|
284
|
+
noisolation: bool = False,
|
285
|
+
context: Optional[dict] = None,
|
286
|
+
run_offset=0,
|
287
|
+
):
|
288
|
+
return WarpJob(
|
289
|
+
project_id,
|
290
|
+
api_token,
|
291
|
+
jobname,
|
292
|
+
jobconf,
|
293
|
+
dryrun,
|
294
|
+
nosim,
|
295
|
+
noisolation,
|
296
|
+
context,
|
297
|
+
run_offset,
|
298
|
+
)
|
299
|
+
|
300
|
+
|
301
|
+
def generate_scenarios(jobconf, scenario_n=None):
|
302
|
+
"""Create each scenario conf by:
|
303
|
+
1. selecting only named scenario specified by scenario_n (for parallel processing)
|
304
|
+
2. merging default values to each scenario
|
305
|
+
3. generating parameter grids
|
306
|
+
"""
|
307
|
+
scenarios = sorted(jobconf["scenarios"]["settings"], key=lambda x: x["name"])
|
308
|
+
defaults = jobconf["scenarios"].get("defaults", {})
|
309
|
+
first_scenario = 0
|
310
|
+
last_scenario = None
|
311
|
+
generated_scenarios = []
|
312
|
+
for n, scenario_settings in enumerate(scenarios):
|
313
|
+
if scenario_n is not None:
|
314
|
+
if n == scenario_n:
|
315
|
+
first_scenario = len(generated_scenarios)
|
316
|
+
if n == scenario_n + 1:
|
317
|
+
last_scenario = len(generated_scenarios)
|
318
|
+
# add `settings` keys on top of `defaults` keys
|
319
|
+
# (taking special care to merge the `params` keys)
|
320
|
+
scenario = copy.deepcopy(defaults) # deepcopy mandatory
|
321
|
+
for k in scenario_settings.keys():
|
322
|
+
# merge scenario dict values into default dict values
|
323
|
+
if k == "params" or k == "launch_arguments":
|
324
|
+
scenario[k] = {
|
325
|
+
**scenario.get(k, {}),
|
326
|
+
**scenario_settings[k],
|
327
|
+
}
|
328
|
+
else:
|
329
|
+
# add all other keys (overwriting defaults if already present)
|
330
|
+
scenario[k] = scenario_settings[k]
|
331
|
+
|
332
|
+
# generate scenarios for each combination of parameter values (grid coverage)
|
333
|
+
if "params" in scenario.keys():
|
334
|
+
grid_values = iter_grid(scenario["params"])
|
335
|
+
for value in grid_values:
|
336
|
+
grid_scenario = scenario.copy()
|
337
|
+
grid_scenario["params"] = value
|
338
|
+
generated_scenarios.append(grid_scenario)
|
339
|
+
|
340
|
+
else:
|
341
|
+
generated_scenarios.append(scenario)
|
342
|
+
return generated_scenarios[first_scenario:last_scenario], first_scenario
|