humalab 0.0.5__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of humalab might be problematic. Click here for more details.

Files changed (37) hide show
  1. humalab/__init__.py +11 -0
  2. humalab/assets/__init__.py +2 -2
  3. humalab/assets/files/resource_file.py +29 -3
  4. humalab/assets/files/urdf_file.py +14 -10
  5. humalab/assets/resource_operator.py +91 -0
  6. humalab/constants.py +39 -5
  7. humalab/dists/bernoulli.py +2 -1
  8. humalab/dists/discrete.py +2 -2
  9. humalab/dists/gaussian.py +2 -2
  10. humalab/dists/log_uniform.py +2 -2
  11. humalab/dists/truncated_gaussian.py +4 -4
  12. humalab/episode.py +181 -11
  13. humalab/humalab.py +44 -28
  14. humalab/humalab_api_client.py +301 -94
  15. humalab/humalab_test.py +46 -17
  16. humalab/metrics/__init__.py +5 -5
  17. humalab/metrics/code.py +28 -0
  18. humalab/metrics/metric.py +41 -108
  19. humalab/metrics/scenario_stats.py +95 -0
  20. humalab/metrics/summary.py +24 -18
  21. humalab/run.py +180 -103
  22. humalab/scenarios/__init__.py +4 -0
  23. humalab/{scenario.py → scenarios/scenario.py} +120 -129
  24. humalab/scenarios/scenario_operator.py +82 -0
  25. humalab/{scenario_test.py → scenarios/scenario_test.py} +150 -269
  26. humalab/utils.py +37 -0
  27. {humalab-0.0.5.dist-info → humalab-0.0.6.dist-info}/METADATA +1 -1
  28. humalab-0.0.6.dist-info/RECORD +39 -0
  29. humalab/assets/resource_manager.py +0 -58
  30. humalab/evaluators/__init__.py +0 -16
  31. humalab/humalab_main.py +0 -119
  32. humalab/metrics/dist_metric.py +0 -22
  33. humalab-0.0.5.dist-info/RECORD +0 -37
  34. {humalab-0.0.5.dist-info → humalab-0.0.6.dist-info}/WHEEL +0 -0
  35. {humalab-0.0.5.dist-info → humalab-0.0.6.dist-info}/entry_points.txt +0 -0
  36. {humalab-0.0.5.dist-info → humalab-0.0.6.dist-info}/licenses/LICENSE +0 -0
  37. {humalab-0.0.5.dist-info → humalab-0.0.6.dist-info}/top_level.txt +0 -0
humalab/run.py CHANGED
@@ -1,19 +1,32 @@
1
1
  import uuid
2
- from humalab.metrics.dist_metric import DistributionMetric
3
- from humalab.metrics.metric import MetricGranularity, MetricType, Metrics
4
- from humalab.constants import EpisodeStatus
2
+ import traceback
3
+ import pickle
4
+ import base64
5
5
 
6
+ from humalab.metrics.code import Code
6
7
  from humalab.metrics.summary import Summary
7
- from humalab.scenario import Scenario
8
+
9
+ from humalab.constants import DEFAULT_PROJECT, RESERVED_NAMES, ArtifactType
10
+ from humalab.metrics.scenario_stats import ScenarioStats
11
+ from humalab.humalab_api_client import EpisodeStatus, HumaLabApiClient, RunStatus
12
+ from humalab.metrics.metric import Metrics
13
+ from humalab.episode import Episode
14
+ from humalab.utils import is_standard_type
15
+
16
+ from humalab.scenarios.scenario import Scenario
8
17
 
9
18
  class Run:
10
19
  def __init__(self,
11
- project: str,
12
20
  scenario: Scenario,
21
+ project: str = DEFAULT_PROJECT,
13
22
  name: str | None = None,
14
23
  description: str | None = None,
15
24
  id: str | None = None,
16
25
  tags: list[str] | None = None,
26
+
27
+ base_url: str | None = None,
28
+ api_key: str | None = None,
29
+ timeout: float | None = None,
17
30
  ) -> None:
18
31
  """
19
32
  Initialize a new Run instance.
@@ -31,13 +44,16 @@ class Run:
31
44
  self._name = name or ""
32
45
  self._description = description or ""
33
46
  self._tags = tags or []
34
- self._finished = False
35
-
36
- self._episode = str(uuid.uuid4())
37
47
 
38
48
  self._scenario = scenario
49
+ self._logs = {}
50
+ self._episodes = {}
51
+ self._is_finished = False
52
+
53
+ self._api_client = HumaLabApiClient(base_url=base_url,
54
+ api_key=api_key,
55
+ timeout=timeout)
39
56
 
40
- self._metrics = {}
41
57
 
42
58
  @property
43
59
  def project(self) -> str:
@@ -84,15 +100,6 @@ class Run:
84
100
  """
85
101
  return self._tags
86
102
 
87
- @property
88
- def episode(self) -> str:
89
- """The episode ID for the run.
90
-
91
- Returns:
92
- str: The episode ID.
93
- """
94
- return self._episode
95
-
96
103
  @property
97
104
  def scenario(self) -> Scenario:
98
105
  """The scenario associated with the run.
@@ -101,102 +108,172 @@ class Run:
101
108
  Scenario: The scenario instance.
102
109
  """
103
110
  return self._scenario
111
+
112
+ def __enter__(self):
113
+ return self
104
114
 
105
- def finish(self,
106
- status: EpisodeStatus = EpisodeStatus.PASS,
107
- quiet: bool | None = None) -> None:
108
- """Finish the run and submit final metrics.
115
+ def __exit__(self, exception_type, exception_value, exception_traceback):
116
+ if self._is_finished:
117
+ return
118
+ if exception_type is not None:
119
+ err_msg = "".join(traceback.format_exception(exception_type, exception_value, exception_traceback))
120
+ self.finish(status=RunStatus.ERRORED, err_msg=err_msg)
121
+ else:
122
+ self.finish()
123
+
124
+ def create_episode(self, episode_id: str | None = None) -> Episode:
125
+ """Reset the run for a new episode.
109
126
 
110
127
  Args:
111
- status (EpisodeStatus): The final status of the episode.
112
- quiet (bool | None): Whether to suppress output.
128
+ status (EpisodeStatus): The status of the current episode before reset.
113
129
  """
114
- self._finished = True
115
- self._scenario.finish()
116
- for metric in self._metrics.values():
117
- metric.finish(status=status)
130
+ episode = None
131
+ episode_id = episode_id or str(uuid.uuid4())
132
+ cur_scenario, episode_vals = self._scenario.resolve()
133
+ episode = Episode(run_id=self._id,
134
+ episode_id=episode_id,
135
+ scenario_conf=cur_scenario,
136
+ episode_vals=episode_vals)
137
+ self._handle_scenario_stats(episode, episode_vals)
138
+
139
+ return episode
140
+
141
+ def _handle_scenario_stats(self, episode: Episode, episode_vals: dict) -> None:
142
+ for metric_name, value in episode_vals.items():
143
+ if metric_name not in self._logs:
144
+ stat = ScenarioStats(name=metric_name,
145
+ distribution_type=value["distribution"],
146
+ metric_dim_type=value["metric_dim_type"],
147
+ graph_type=value["graph_type"])
148
+ self._logs[metric_name] = stat
149
+ self._logs[metric_name].log(data=value["value"],
150
+ x=episode.episode_id)
151
+ self._episodes[episode.episode_id] = episode
118
152
 
119
- def log(self,
120
- data: dict,
121
- step: int | None = None,
122
- commit: bool = True,
123
- ) -> None:
124
- """Log metrics for the run.
153
+ def add_metric(self, name: str, metric: Metrics) -> None:
154
+ if name in self._logs:
155
+ raise ValueError(f"{name} is a reserved name and is not allowed.")
156
+ self._logs[name] = metric
157
+
158
+ def log_code(self, key: str, code_content: str) -> None:
159
+ """Log code content as an artifact.
125
160
 
126
161
  Args:
127
- data (dict): A dictionary of metric names and their values.
128
- step (int | None): The step number for the metrics.
129
- commit (bool): Whether to commit the metrics immediately.
162
+ key (str): The key for the code artifact.
163
+ code_content (str): The code content to log.
130
164
  """
165
+ if key in RESERVED_NAMES:
166
+ raise ValueError(f"{key} is a reserved name and is not allowed.")
167
+ self._logs[key] = Code(
168
+ run_id=self._id,
169
+ key=key,
170
+ code_content=code_content,
171
+ )
172
+
173
+
174
+ def log(self, data: dict, x: dict | None = None, replace: bool = False) -> None:
131
175
  for key, value in data.items():
132
- if key in self._metrics:
133
- metric = self._metrics[key]
134
- metric.log(value, step=step, commit=commit)
176
+ if key in RESERVED_NAMES:
177
+ raise ValueError(f"{key} is a reserved name and is not allowed.")
178
+ if key not in self._logs:
179
+ self._logs[key] = value
135
180
  else:
136
- self._metrics[key] = Metrics(key,
137
- metric_type=MetricType.DEFAULT,
138
- run_id=self._id,
139
- granularity=MetricGranularity.EPISODE,
140
- episode_id=self._episode)
141
- self._metrics[key].log(value, step=step, commit=commit)
142
-
143
- def reset(self, status: EpisodeStatus = EpisodeStatus.PASS) -> None:
144
- """Reset the run for a new episode.
145
-
146
- Args:
147
- status (EpisodeStatus): The status of the current episode before reset.
148
- """
149
- self._submit_episode_status(status=status, episode=self._episode)
150
- self._episode = str(uuid.uuid4())
151
- self._finished = False
152
- self._scenario.reset(episode_id=self._episode)
153
- for metric in self._metrics.values():
154
- metric.reset(episode=self._episode)
155
-
156
- def _submit_episode_status(self, status: EpisodeStatus, episode: str) -> None:
157
- # TODO: Implement submission of episode status
158
- pass
159
-
160
- def define_metric(self,
161
- name: str,
162
- metric_type: MetricType = MetricType.DEFAULT,
163
- granularity: MetricGranularity = MetricGranularity.RUN,
164
- distribution_type: str | None = None,
165
- summary: str | None = None,
166
- replace: bool = False) -> None:
167
- """Define a new metric for the run.
181
+ cur_val = self._logs[key]
182
+ if isinstance(cur_val, Metrics):
183
+ cur_x = x.get(key) if x is not None else None
184
+ cur_val.log(value, x=cur_x, replace=replace)
185
+ else:
186
+ if replace:
187
+ self._logs[key] = value
188
+ else:
189
+ raise ValueError(f"Cannot log value for key '{key}' as there is already a value logged.")
190
+ def _finish_episodes(self,
191
+ status: RunStatus,
192
+ err_msg: str | None = None) -> None:
193
+ for episode in self._episodes.values():
194
+ if not episode.is_finished:
195
+ if status == RunStatus.FINISHED:
196
+ episode.finish(status=EpisodeStatus.CANCELED, err_msg=err_msg)
197
+ elif status == RunStatus.ERRORED:
198
+ episode.finish(status=EpisodeStatus.ERRORED, err_msg=err_msg)
199
+ elif status == RunStatus.CANCELED:
200
+ episode.finish(status=EpisodeStatus.CANCELED, err_msg=err_msg)
168
201
 
202
+
203
+ def finish(self,
204
+ status: RunStatus = RunStatus.FINISHED,
205
+ err_msg: str | None = None) -> None:
206
+ """Finish the run and submit final metrics.
207
+
169
208
  Args:
170
- name (str): The name of the metric.
171
- metric_type (MetricType): The type of the metric.
172
- granularity (MetricGranularity): The granularity of the metric.
173
- distribution_type (str | None): The type of distribution if metric_type is DISTRIBUTION.
174
- summary (str | None): Specify aggregate metrics added to summary.
175
- Supported aggregations include "min", "max", "mean", "last",
176
- "first", and "none". "none" prevents a summary
177
- from being generated.
178
- replace (bool): Whether to replace the metric if it already exists.
209
+ status (RunStatus): The final status of the run.
210
+ err_msg (str | None): An optional error message.
179
211
  """
180
- if name not in self._metrics or replace:
181
- if metric_type == MetricType.DISTRIBUTION:
182
- if distribution_type is None:
183
- raise ValueError("distribution_type must be specified for distribution metrics.")
184
- self._metrics[name] = DistributionMetric(name=name,
185
- distribution_type=distribution_type,
186
- run_id=self._id,
187
- episode_id=self._episode,
188
- granularity=granularity)
189
- elif summary is not None:
190
- self._metrics[name] = Summary(name=name,
191
- summary=summary,
192
- run_id=self._id,
193
- episode_id=self._episode,
194
- granularity=granularity)
212
+ if self._is_finished:
213
+ return
214
+ self._is_finished = True
215
+ self._finish_episodes(status=status, err_msg=err_msg)
216
+
217
+ self._api_client.upload_code(
218
+ artifact_key="scenario",
219
+ run_id=self._id,
220
+ code_content=self.scenario.yaml
221
+ )
222
+ # TODO: submit final metrics
223
+ for key, value in self._logs.items():
224
+ if isinstance(value, ScenarioStats):
225
+ for episode_id, episode in self._episodes.items():
226
+ episode_status = episode.status
227
+ value.log_status(
228
+ episode_id=episode_id,
229
+ episode_status=episode_status
230
+ )
231
+ metric_val = value.finalize()
232
+ pickled = pickle.dumps(metric_val)
233
+ self._api_client.upload_scenario_stats_artifact(
234
+ artifact_key=key,
235
+ run_id=self._id,
236
+ pickled_bytes=pickled,
237
+ graph_type=value.graph_type.value,
238
+ metric_dim_type=value.metric_dim_type.value
239
+ )
240
+ elif isinstance(value, Summary):
241
+ metric_val = value.finalize()
242
+ pickled = pickle.dumps(metric_val["value"])
243
+ self._api_client.upload_python(
244
+ artifact_key=key,
245
+ run_id=self._id,
246
+ pickled_bytes=pickled
247
+ )
248
+ elif isinstance(value, Metrics):
249
+ metric_val = value.finalize()
250
+ pickled = pickle.dumps(metric_val)
251
+ self._api_client.upload_metrics(
252
+ artifact_key=key,
253
+ run_id=self._id,
254
+ pickled_bytes=pickled,
255
+ graph_type=value.graph_type.value,
256
+ metric_dim_type=value.metric_dim_type.value
257
+ )
258
+ elif isinstance(value, Code):
259
+ self._api_client.upload_code(
260
+ artifact_key=value.key,
261
+ run_id=value.run_id,
262
+ episode_id=value.episode_id,
263
+ code_content=value.code_content
264
+ )
195
265
  else:
196
- self._metrics[name] = Metrics(name=name,
197
- metric_type=metric_type,
198
- run_id=self._id,
199
- episode_id=self._episode,
200
- granularity=granularity)
201
- else:
202
- raise ValueError(f"Metric {name} already exists.")
266
+ if not is_standard_type(value):
267
+ raise ValueError(f"Value for key '{key}' is not a standard type.")
268
+ pickled = pickle.dumps(value)
269
+ self._api_client.upload_python(
270
+ artifact_key=key,
271
+ run_id=self._id,
272
+ pickled_bytes=pickled
273
+ )
274
+
275
+ self._api_client.update_run(
276
+ run_id=self._id,
277
+ status=status,
278
+ err_msg=err_msg
279
+ )
@@ -0,0 +1,4 @@
1
+ from .scenario import Scenario
2
+ from .scenario_operator import list_scenarios, get_scenario
3
+
4
+ __all__ = ["Scenario", "list_scenarios", "get_scenario"]