humalab 0.0.5__py3-none-any.whl → 0.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of humalab might be problematic. Click here for more details.
- humalab/__init__.py +11 -0
- humalab/assets/__init__.py +2 -2
- humalab/assets/files/resource_file.py +29 -3
- humalab/assets/files/urdf_file.py +14 -10
- humalab/assets/resource_operator.py +91 -0
- humalab/constants.py +39 -5
- humalab/dists/bernoulli.py +2 -1
- humalab/dists/discrete.py +2 -2
- humalab/dists/gaussian.py +2 -2
- humalab/dists/log_uniform.py +2 -2
- humalab/dists/truncated_gaussian.py +4 -4
- humalab/episode.py +181 -11
- humalab/humalab.py +44 -28
- humalab/humalab_api_client.py +301 -94
- humalab/humalab_test.py +46 -17
- humalab/metrics/__init__.py +5 -5
- humalab/metrics/code.py +28 -0
- humalab/metrics/metric.py +41 -108
- humalab/metrics/scenario_stats.py +95 -0
- humalab/metrics/summary.py +24 -18
- humalab/run.py +180 -103
- humalab/scenarios/__init__.py +4 -0
- humalab/{scenario.py → scenarios/scenario.py} +120 -129
- humalab/scenarios/scenario_operator.py +82 -0
- humalab/{scenario_test.py → scenarios/scenario_test.py} +150 -269
- humalab/utils.py +37 -0
- {humalab-0.0.5.dist-info → humalab-0.0.6.dist-info}/METADATA +1 -1
- humalab-0.0.6.dist-info/RECORD +39 -0
- humalab/assets/resource_manager.py +0 -58
- humalab/evaluators/__init__.py +0 -16
- humalab/humalab_main.py +0 -119
- humalab/metrics/dist_metric.py +0 -22
- humalab-0.0.5.dist-info/RECORD +0 -37
- {humalab-0.0.5.dist-info → humalab-0.0.6.dist-info}/WHEEL +0 -0
- {humalab-0.0.5.dist-info → humalab-0.0.6.dist-info}/entry_points.txt +0 -0
- {humalab-0.0.5.dist-info → humalab-0.0.6.dist-info}/licenses/LICENSE +0 -0
- {humalab-0.0.5.dist-info → humalab-0.0.6.dist-info}/top_level.txt +0 -0
humalab/run.py
CHANGED
|
@@ -1,19 +1,32 @@
|
|
|
1
1
|
import uuid
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
2
|
+
import traceback
|
|
3
|
+
import pickle
|
|
4
|
+
import base64
|
|
5
5
|
|
|
6
|
+
from humalab.metrics.code import Code
|
|
6
7
|
from humalab.metrics.summary import Summary
|
|
7
|
-
|
|
8
|
+
|
|
9
|
+
from humalab.constants import DEFAULT_PROJECT, RESERVED_NAMES, ArtifactType
|
|
10
|
+
from humalab.metrics.scenario_stats import ScenarioStats
|
|
11
|
+
from humalab.humalab_api_client import EpisodeStatus, HumaLabApiClient, RunStatus
|
|
12
|
+
from humalab.metrics.metric import Metrics
|
|
13
|
+
from humalab.episode import Episode
|
|
14
|
+
from humalab.utils import is_standard_type
|
|
15
|
+
|
|
16
|
+
from humalab.scenarios.scenario import Scenario
|
|
8
17
|
|
|
9
18
|
class Run:
|
|
10
19
|
def __init__(self,
|
|
11
|
-
project: str,
|
|
12
20
|
scenario: Scenario,
|
|
21
|
+
project: str = DEFAULT_PROJECT,
|
|
13
22
|
name: str | None = None,
|
|
14
23
|
description: str | None = None,
|
|
15
24
|
id: str | None = None,
|
|
16
25
|
tags: list[str] | None = None,
|
|
26
|
+
|
|
27
|
+
base_url: str | None = None,
|
|
28
|
+
api_key: str | None = None,
|
|
29
|
+
timeout: float | None = None,
|
|
17
30
|
) -> None:
|
|
18
31
|
"""
|
|
19
32
|
Initialize a new Run instance.
|
|
@@ -31,13 +44,16 @@ class Run:
|
|
|
31
44
|
self._name = name or ""
|
|
32
45
|
self._description = description or ""
|
|
33
46
|
self._tags = tags or []
|
|
34
|
-
self._finished = False
|
|
35
|
-
|
|
36
|
-
self._episode = str(uuid.uuid4())
|
|
37
47
|
|
|
38
48
|
self._scenario = scenario
|
|
49
|
+
self._logs = {}
|
|
50
|
+
self._episodes = {}
|
|
51
|
+
self._is_finished = False
|
|
52
|
+
|
|
53
|
+
self._api_client = HumaLabApiClient(base_url=base_url,
|
|
54
|
+
api_key=api_key,
|
|
55
|
+
timeout=timeout)
|
|
39
56
|
|
|
40
|
-
self._metrics = {}
|
|
41
57
|
|
|
42
58
|
@property
|
|
43
59
|
def project(self) -> str:
|
|
@@ -84,15 +100,6 @@ class Run:
|
|
|
84
100
|
"""
|
|
85
101
|
return self._tags
|
|
86
102
|
|
|
87
|
-
@property
|
|
88
|
-
def episode(self) -> str:
|
|
89
|
-
"""The episode ID for the run.
|
|
90
|
-
|
|
91
|
-
Returns:
|
|
92
|
-
str: The episode ID.
|
|
93
|
-
"""
|
|
94
|
-
return self._episode
|
|
95
|
-
|
|
96
103
|
@property
|
|
97
104
|
def scenario(self) -> Scenario:
|
|
98
105
|
"""The scenario associated with the run.
|
|
@@ -101,102 +108,172 @@ class Run:
|
|
|
101
108
|
Scenario: The scenario instance.
|
|
102
109
|
"""
|
|
103
110
|
return self._scenario
|
|
111
|
+
|
|
112
|
+
def __enter__(self):
|
|
113
|
+
return self
|
|
104
114
|
|
|
105
|
-
def
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
115
|
+
def __exit__(self, exception_type, exception_value, exception_traceback):
|
|
116
|
+
if self._is_finished:
|
|
117
|
+
return
|
|
118
|
+
if exception_type is not None:
|
|
119
|
+
err_msg = "".join(traceback.format_exception(exception_type, exception_value, exception_traceback))
|
|
120
|
+
self.finish(status=RunStatus.ERRORED, err_msg=err_msg)
|
|
121
|
+
else:
|
|
122
|
+
self.finish()
|
|
123
|
+
|
|
124
|
+
def create_episode(self, episode_id: str | None = None) -> Episode:
|
|
125
|
+
"""Reset the run for a new episode.
|
|
109
126
|
|
|
110
127
|
Args:
|
|
111
|
-
status (EpisodeStatus): The
|
|
112
|
-
quiet (bool | None): Whether to suppress output.
|
|
128
|
+
status (EpisodeStatus): The status of the current episode before reset.
|
|
113
129
|
"""
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
130
|
+
episode = None
|
|
131
|
+
episode_id = episode_id or str(uuid.uuid4())
|
|
132
|
+
cur_scenario, episode_vals = self._scenario.resolve()
|
|
133
|
+
episode = Episode(run_id=self._id,
|
|
134
|
+
episode_id=episode_id,
|
|
135
|
+
scenario_conf=cur_scenario,
|
|
136
|
+
episode_vals=episode_vals)
|
|
137
|
+
self._handle_scenario_stats(episode, episode_vals)
|
|
138
|
+
|
|
139
|
+
return episode
|
|
140
|
+
|
|
141
|
+
def _handle_scenario_stats(self, episode: Episode, episode_vals: dict) -> None:
|
|
142
|
+
for metric_name, value in episode_vals.items():
|
|
143
|
+
if metric_name not in self._logs:
|
|
144
|
+
stat = ScenarioStats(name=metric_name,
|
|
145
|
+
distribution_type=value["distribution"],
|
|
146
|
+
metric_dim_type=value["metric_dim_type"],
|
|
147
|
+
graph_type=value["graph_type"])
|
|
148
|
+
self._logs[metric_name] = stat
|
|
149
|
+
self._logs[metric_name].log(data=value["value"],
|
|
150
|
+
x=episode.episode_id)
|
|
151
|
+
self._episodes[episode.episode_id] = episode
|
|
118
152
|
|
|
119
|
-
def
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
153
|
+
def add_metric(self, name: str, metric: Metrics) -> None:
|
|
154
|
+
if name in self._logs:
|
|
155
|
+
raise ValueError(f"{name} is a reserved name and is not allowed.")
|
|
156
|
+
self._logs[name] = metric
|
|
157
|
+
|
|
158
|
+
def log_code(self, key: str, code_content: str) -> None:
|
|
159
|
+
"""Log code content as an artifact.
|
|
125
160
|
|
|
126
161
|
Args:
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
commit (bool): Whether to commit the metrics immediately.
|
|
162
|
+
key (str): The key for the code artifact.
|
|
163
|
+
code_content (str): The code content to log.
|
|
130
164
|
"""
|
|
165
|
+
if key in RESERVED_NAMES:
|
|
166
|
+
raise ValueError(f"{key} is a reserved name and is not allowed.")
|
|
167
|
+
self._logs[key] = Code(
|
|
168
|
+
run_id=self._id,
|
|
169
|
+
key=key,
|
|
170
|
+
code_content=code_content,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def log(self, data: dict, x: dict | None = None, replace: bool = False) -> None:
|
|
131
175
|
for key, value in data.items():
|
|
132
|
-
if key in
|
|
133
|
-
|
|
134
|
-
|
|
176
|
+
if key in RESERVED_NAMES:
|
|
177
|
+
raise ValueError(f"{key} is a reserved name and is not allowed.")
|
|
178
|
+
if key not in self._logs:
|
|
179
|
+
self._logs[key] = value
|
|
135
180
|
else:
|
|
136
|
-
self.
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
def _submit_episode_status(self, status: EpisodeStatus, episode: str) -> None:
|
|
157
|
-
# TODO: Implement submission of episode status
|
|
158
|
-
pass
|
|
159
|
-
|
|
160
|
-
def define_metric(self,
|
|
161
|
-
name: str,
|
|
162
|
-
metric_type: MetricType = MetricType.DEFAULT,
|
|
163
|
-
granularity: MetricGranularity = MetricGranularity.RUN,
|
|
164
|
-
distribution_type: str | None = None,
|
|
165
|
-
summary: str | None = None,
|
|
166
|
-
replace: bool = False) -> None:
|
|
167
|
-
"""Define a new metric for the run.
|
|
181
|
+
cur_val = self._logs[key]
|
|
182
|
+
if isinstance(cur_val, Metrics):
|
|
183
|
+
cur_x = x.get(key) if x is not None else None
|
|
184
|
+
cur_val.log(value, x=cur_x, replace=replace)
|
|
185
|
+
else:
|
|
186
|
+
if replace:
|
|
187
|
+
self._logs[key] = value
|
|
188
|
+
else:
|
|
189
|
+
raise ValueError(f"Cannot log value for key '{key}' as there is already a value logged.")
|
|
190
|
+
def _finish_episodes(self,
|
|
191
|
+
status: RunStatus,
|
|
192
|
+
err_msg: str | None = None) -> None:
|
|
193
|
+
for episode in self._episodes.values():
|
|
194
|
+
if not episode.is_finished:
|
|
195
|
+
if status == RunStatus.FINISHED:
|
|
196
|
+
episode.finish(status=EpisodeStatus.CANCELED, err_msg=err_msg)
|
|
197
|
+
elif status == RunStatus.ERRORED:
|
|
198
|
+
episode.finish(status=EpisodeStatus.ERRORED, err_msg=err_msg)
|
|
199
|
+
elif status == RunStatus.CANCELED:
|
|
200
|
+
episode.finish(status=EpisodeStatus.CANCELED, err_msg=err_msg)
|
|
168
201
|
|
|
202
|
+
|
|
203
|
+
def finish(self,
|
|
204
|
+
status: RunStatus = RunStatus.FINISHED,
|
|
205
|
+
err_msg: str | None = None) -> None:
|
|
206
|
+
"""Finish the run and submit final metrics.
|
|
207
|
+
|
|
169
208
|
Args:
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
granularity (MetricGranularity): The granularity of the metric.
|
|
173
|
-
distribution_type (str | None): The type of distribution if metric_type is DISTRIBUTION.
|
|
174
|
-
summary (str | None): Specify aggregate metrics added to summary.
|
|
175
|
-
Supported aggregations include "min", "max", "mean", "last",
|
|
176
|
-
"first", and "none". "none" prevents a summary
|
|
177
|
-
from being generated.
|
|
178
|
-
replace (bool): Whether to replace the metric if it already exists.
|
|
209
|
+
status (RunStatus): The final status of the run.
|
|
210
|
+
err_msg (str | None): An optional error message.
|
|
179
211
|
"""
|
|
180
|
-
if
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
212
|
+
if self._is_finished:
|
|
213
|
+
return
|
|
214
|
+
self._is_finished = True
|
|
215
|
+
self._finish_episodes(status=status, err_msg=err_msg)
|
|
216
|
+
|
|
217
|
+
self._api_client.upload_code(
|
|
218
|
+
artifact_key="scenario",
|
|
219
|
+
run_id=self._id,
|
|
220
|
+
code_content=self.scenario.yaml
|
|
221
|
+
)
|
|
222
|
+
# TODO: submit final metrics
|
|
223
|
+
for key, value in self._logs.items():
|
|
224
|
+
if isinstance(value, ScenarioStats):
|
|
225
|
+
for episode_id, episode in self._episodes.items():
|
|
226
|
+
episode_status = episode.status
|
|
227
|
+
value.log_status(
|
|
228
|
+
episode_id=episode_id,
|
|
229
|
+
episode_status=episode_status
|
|
230
|
+
)
|
|
231
|
+
metric_val = value.finalize()
|
|
232
|
+
pickled = pickle.dumps(metric_val)
|
|
233
|
+
self._api_client.upload_scenario_stats_artifact(
|
|
234
|
+
artifact_key=key,
|
|
235
|
+
run_id=self._id,
|
|
236
|
+
pickled_bytes=pickled,
|
|
237
|
+
graph_type=value.graph_type.value,
|
|
238
|
+
metric_dim_type=value.metric_dim_type.value
|
|
239
|
+
)
|
|
240
|
+
elif isinstance(value, Summary):
|
|
241
|
+
metric_val = value.finalize()
|
|
242
|
+
pickled = pickle.dumps(metric_val["value"])
|
|
243
|
+
self._api_client.upload_python(
|
|
244
|
+
artifact_key=key,
|
|
245
|
+
run_id=self._id,
|
|
246
|
+
pickled_bytes=pickled
|
|
247
|
+
)
|
|
248
|
+
elif isinstance(value, Metrics):
|
|
249
|
+
metric_val = value.finalize()
|
|
250
|
+
pickled = pickle.dumps(metric_val)
|
|
251
|
+
self._api_client.upload_metrics(
|
|
252
|
+
artifact_key=key,
|
|
253
|
+
run_id=self._id,
|
|
254
|
+
pickled_bytes=pickled,
|
|
255
|
+
graph_type=value.graph_type.value,
|
|
256
|
+
metric_dim_type=value.metric_dim_type.value
|
|
257
|
+
)
|
|
258
|
+
elif isinstance(value, Code):
|
|
259
|
+
self._api_client.upload_code(
|
|
260
|
+
artifact_key=value.key,
|
|
261
|
+
run_id=value.run_id,
|
|
262
|
+
episode_id=value.episode_id,
|
|
263
|
+
code_content=value.code_content
|
|
264
|
+
)
|
|
195
265
|
else:
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
266
|
+
if not is_standard_type(value):
|
|
267
|
+
raise ValueError(f"Value for key '{key}' is not a standard type.")
|
|
268
|
+
pickled = pickle.dumps(value)
|
|
269
|
+
self._api_client.upload_python(
|
|
270
|
+
artifact_key=key,
|
|
271
|
+
run_id=self._id,
|
|
272
|
+
pickled_bytes=pickled
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
self._api_client.update_run(
|
|
276
|
+
run_id=self._id,
|
|
277
|
+
status=status,
|
|
278
|
+
err_msg=err_msg
|
|
279
|
+
)
|