humalab 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of humalab might be problematic. Click here for more details.
- humalab/__init__.py +9 -0
- humalab/assets/__init__.py +0 -0
- humalab/assets/archive.py +101 -0
- humalab/assets/resource_file.py +28 -0
- humalab/assets/resource_handler.py +175 -0
- humalab/constants.py +7 -0
- humalab/dists/__init__.py +17 -0
- humalab/dists/bernoulli.py +44 -0
- humalab/dists/categorical.py +49 -0
- humalab/dists/discrete.py +56 -0
- humalab/dists/distribution.py +38 -0
- humalab/dists/gaussian.py +49 -0
- humalab/dists/log_uniform.py +49 -0
- humalab/dists/truncated_gaussian.py +64 -0
- humalab/dists/uniform.py +49 -0
- humalab/humalab.py +149 -0
- humalab/humalab_api_client.py +273 -0
- humalab/humalab_config.py +86 -0
- humalab/humalab_test.py +510 -0
- humalab/metrics/__init__.py +11 -0
- humalab/metrics/dist_metric.py +22 -0
- humalab/metrics/metric.py +129 -0
- humalab/metrics/summary.py +54 -0
- humalab/run.py +214 -0
- humalab/scenario.py +225 -0
- humalab/scenario_test.py +911 -0
- humalab-0.0.1.dist-info/METADATA +43 -0
- humalab-0.0.1.dist-info/RECORD +32 -0
- humalab-0.0.1.dist-info/WHEEL +5 -0
- humalab-0.0.1.dist-info/entry_points.txt +2 -0
- humalab-0.0.1.dist-info/licenses/LICENSE +21 -0
- humalab-0.0.1.dist-info/top_level.txt +1 -0
humalab/scenario_test.py
ADDED
|
@@ -0,0 +1,911 @@
|
|
|
1
|
+
import unittest
|
|
2
|
+
import numpy as np
|
|
3
|
+
from humalab_sdk.scenario import Scenario
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ScenarioTest(unittest.TestCase):
|
|
7
|
+
"""Unit tests for Scenario class."""
|
|
8
|
+
|
|
9
|
+
def setUp(self):
|
|
10
|
+
"""Set up test fixtures before each test method."""
|
|
11
|
+
self.scenario = Scenario()
|
|
12
|
+
self.run_id = "test_run_id"
|
|
13
|
+
self.episode_id = "test_episode_id"
|
|
14
|
+
|
|
15
|
+
def tearDown(self):
|
|
16
|
+
"""Clean up after each test method."""
|
|
17
|
+
self.scenario._clear_resolvers()
|
|
18
|
+
|
|
19
|
+
def test_init_should_initialize_with_empty_scenario(self):
|
|
20
|
+
"""Test that init() initializes with empty scenario when none provided."""
|
|
21
|
+
# Pre-condition
|
|
22
|
+
self.assertIsNone(self.scenario._scenario_id)
|
|
23
|
+
|
|
24
|
+
# In-test
|
|
25
|
+
self.scenario.init(
|
|
26
|
+
run_id=self.run_id,
|
|
27
|
+
episode_id=self.episode_id,
|
|
28
|
+
scenario=None,
|
|
29
|
+
seed=42
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
# Post-condition
|
|
33
|
+
self.assertEqual(self.scenario._run_id, self.run_id)
|
|
34
|
+
self.assertEqual(self.scenario._episode_id, self.episode_id)
|
|
35
|
+
self.assertIsNotNone(self.scenario._scenario_id)
|
|
36
|
+
self.assertEqual(len(self.scenario._cur_scenario), 0)
|
|
37
|
+
|
|
38
|
+
def test_init_should_initialize_with_dict_scenario(self):
|
|
39
|
+
"""Test that init() correctly processes dict-based scenario."""
|
|
40
|
+
# Pre-condition
|
|
41
|
+
scenario_dict = {
|
|
42
|
+
"test_key": "test_value",
|
|
43
|
+
"nested": {"inner_key": "inner_value"}
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
# In-test
|
|
47
|
+
self.scenario.init(
|
|
48
|
+
run_id=self.run_id,
|
|
49
|
+
episode_id=self.episode_id,
|
|
50
|
+
scenario=scenario_dict,
|
|
51
|
+
seed=42
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
# Post-condition
|
|
55
|
+
self.assertEqual(self.scenario.test_key, "test_value")
|
|
56
|
+
self.assertEqual(self.scenario.nested.inner_key, "inner_value")
|
|
57
|
+
|
|
58
|
+
def test_init_should_use_provided_scenario_id(self):
|
|
59
|
+
"""Test that init() uses provided scenario_id."""
|
|
60
|
+
# Pre-condition
|
|
61
|
+
custom_id = "custom_scenario_id"
|
|
62
|
+
|
|
63
|
+
# In-test
|
|
64
|
+
self.scenario.init(
|
|
65
|
+
run_id=self.run_id,
|
|
66
|
+
episode_id=self.episode_id,
|
|
67
|
+
scenario={},
|
|
68
|
+
scenario_id=custom_id
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# Post-condition
|
|
72
|
+
self.assertEqual(self.scenario._scenario_id, custom_id)
|
|
73
|
+
|
|
74
|
+
def test_init_should_set_seed_for_reproducibility(self):
|
|
75
|
+
"""Test that init() with same seed produces reproducible results."""
|
|
76
|
+
# Pre-condition
|
|
77
|
+
scenario_config = {"value": "${uniform: 0.0, 1.0}"}
|
|
78
|
+
seed = 42
|
|
79
|
+
|
|
80
|
+
# In-test
|
|
81
|
+
scenario1 = Scenario()
|
|
82
|
+
scenario1.init(
|
|
83
|
+
run_id=self.run_id,
|
|
84
|
+
episode_id=self.episode_id,
|
|
85
|
+
scenario=scenario_config,
|
|
86
|
+
seed=seed
|
|
87
|
+
)
|
|
88
|
+
value1 = scenario1.value
|
|
89
|
+
|
|
90
|
+
scenario2 = Scenario()
|
|
91
|
+
scenario2.init(
|
|
92
|
+
run_id=self.run_id,
|
|
93
|
+
episode_id=self.episode_id,
|
|
94
|
+
scenario=scenario_config,
|
|
95
|
+
seed=seed
|
|
96
|
+
)
|
|
97
|
+
value2 = scenario2.value
|
|
98
|
+
|
|
99
|
+
# Post-condition
|
|
100
|
+
self.assertEqual(value1, value2)
|
|
101
|
+
|
|
102
|
+
# Cleanup
|
|
103
|
+
scenario1._clear_resolvers()
|
|
104
|
+
scenario2._clear_resolvers()
|
|
105
|
+
|
|
106
|
+
def test_uniform_distribution_should_resolve_correctly(self):
|
|
107
|
+
"""Test that uniform distribution resolver works correctly."""
|
|
108
|
+
# Pre-condition
|
|
109
|
+
scenario_config = {
|
|
110
|
+
"uniform_value": "${uniform: 0.0, 1.0}"
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
# In-test
|
|
114
|
+
self.scenario.init(
|
|
115
|
+
run_id=self.run_id,
|
|
116
|
+
episode_id=self.episode_id,
|
|
117
|
+
scenario=scenario_config,
|
|
118
|
+
seed=42
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
# Post-condition
|
|
122
|
+
value = self.scenario.uniform_value
|
|
123
|
+
self.assertIsInstance(value, (int, float))
|
|
124
|
+
self.assertGreaterEqual(value, 0.0)
|
|
125
|
+
self.assertLessEqual(value, 1.0)
|
|
126
|
+
|
|
127
|
+
def test_uniform_distribution_should_handle_size_parameter(self):
|
|
128
|
+
"""Test that uniform distribution with size parameter returns list."""
|
|
129
|
+
# Pre-condition
|
|
130
|
+
scenario_config = {
|
|
131
|
+
"uniform_array": "${uniform: 0.0, 1.0, 5}"
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
# In-test
|
|
135
|
+
self.scenario.init(
|
|
136
|
+
run_id=self.run_id,
|
|
137
|
+
episode_id=self.episode_id,
|
|
138
|
+
scenario=scenario_config,
|
|
139
|
+
seed=42
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Post-condition
|
|
143
|
+
value = self.scenario.uniform_array
|
|
144
|
+
# Convert to list if it's a ListConfig
|
|
145
|
+
value_list = list(value) if hasattr(value, '__iter__') else [value]
|
|
146
|
+
self.assertEqual(len(value_list), 5)
|
|
147
|
+
for v in value_list:
|
|
148
|
+
self.assertGreaterEqual(v, 0.0)
|
|
149
|
+
self.assertLessEqual(v, 1.0)
|
|
150
|
+
|
|
151
|
+
def test_gaussian_distribution_should_resolve_correctly(self):
|
|
152
|
+
"""Test that gaussian distribution resolver works correctly."""
|
|
153
|
+
# Pre-condition
|
|
154
|
+
scenario_config = {
|
|
155
|
+
"gaussian_value": "${gaussian: 0.0, 1.0}"
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
# In-test
|
|
159
|
+
self.scenario.init(
|
|
160
|
+
run_id=self.run_id,
|
|
161
|
+
episode_id=self.episode_id,
|
|
162
|
+
scenario=scenario_config,
|
|
163
|
+
seed=42
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
# Post-condition
|
|
167
|
+
value = self.scenario.gaussian_value
|
|
168
|
+
self.assertIsInstance(value, (int, float))
|
|
169
|
+
|
|
170
|
+
def test_gaussian_distribution_should_handle_size_parameter(self):
|
|
171
|
+
"""Test that gaussian distribution with size parameter returns list."""
|
|
172
|
+
# Pre-condition
|
|
173
|
+
scenario_config = {
|
|
174
|
+
"gaussian_array": "${gaussian: 0.0, 1.0, 3}"
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
# In-test
|
|
178
|
+
self.scenario.init(
|
|
179
|
+
run_id=self.run_id,
|
|
180
|
+
episode_id=self.episode_id,
|
|
181
|
+
scenario=scenario_config,
|
|
182
|
+
seed=42
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
# Post-condition
|
|
186
|
+
value = self.scenario.gaussian_array
|
|
187
|
+
# Convert to list if it's a ListConfig
|
|
188
|
+
value_list = list(value) if hasattr(value, '__iter__') else [value]
|
|
189
|
+
self.assertEqual(len(value_list), 3)
|
|
190
|
+
|
|
191
|
+
def test_bernoulli_distribution_should_resolve_correctly(self):
|
|
192
|
+
"""Test that bernoulli distribution resolver works correctly."""
|
|
193
|
+
# Pre-condition
|
|
194
|
+
scenario_config = {
|
|
195
|
+
"bernoulli_value": "${bernoulli: 0.5}"
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
# In-test
|
|
199
|
+
self.scenario.init(
|
|
200
|
+
run_id=self.run_id,
|
|
201
|
+
episode_id=self.episode_id,
|
|
202
|
+
scenario=scenario_config,
|
|
203
|
+
seed=42
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
# Post-condition
|
|
207
|
+
value = self.scenario.bernoulli_value
|
|
208
|
+
self.assertIn(value, [0, 1, True, False])
|
|
209
|
+
|
|
210
|
+
def test_reset_should_regenerate_distribution_values(self):
|
|
211
|
+
"""Test that reset() regenerates new values from distributions."""
|
|
212
|
+
# Pre-condition
|
|
213
|
+
scenario_config = {
|
|
214
|
+
"random_value": "${uniform: 0.0, 100.0}"
|
|
215
|
+
}
|
|
216
|
+
self.scenario.init(
|
|
217
|
+
run_id=self.run_id,
|
|
218
|
+
episode_id=self.episode_id,
|
|
219
|
+
scenario=scenario_config,
|
|
220
|
+
seed=None # No seed for randomness
|
|
221
|
+
)
|
|
222
|
+
_ = self.scenario.random_value # Access once to populate cache
|
|
223
|
+
|
|
224
|
+
# In-test
|
|
225
|
+
self.scenario.reset(episode_id="new_episode")
|
|
226
|
+
|
|
227
|
+
# Post-condition
|
|
228
|
+
second_value = self.scenario.random_value
|
|
229
|
+
# Values should be different (statistically very unlikely to be same)
|
|
230
|
+
# Note: There's a tiny chance they could be equal, but extremely unlikely
|
|
231
|
+
self.assertIsInstance(second_value, (int, float))
|
|
232
|
+
|
|
233
|
+
def test_getattr_should_access_scenario_values(self):
|
|
234
|
+
"""Test that __getattr__ allows attribute-style access."""
|
|
235
|
+
# Pre-condition
|
|
236
|
+
scenario_config = {
|
|
237
|
+
"test_attribute": "test_value"
|
|
238
|
+
}
|
|
239
|
+
self.scenario.init(
|
|
240
|
+
run_id=self.run_id,
|
|
241
|
+
episode_id=self.episode_id,
|
|
242
|
+
scenario=scenario_config
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
# In-test
|
|
246
|
+
value = self.scenario.test_attribute
|
|
247
|
+
|
|
248
|
+
# Post-condition
|
|
249
|
+
self.assertEqual(value, "test_value")
|
|
250
|
+
|
|
251
|
+
def test_getattr_should_raise_error_for_missing_attribute(self):
|
|
252
|
+
"""Test that __getattr__ raises AttributeError for missing attributes."""
|
|
253
|
+
# Pre-condition
|
|
254
|
+
self.scenario.init(
|
|
255
|
+
run_id=self.run_id,
|
|
256
|
+
episode_id=self.episode_id,
|
|
257
|
+
scenario={}
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
# In-test & Post-condition
|
|
261
|
+
with self.assertRaises(AttributeError) as context:
|
|
262
|
+
_ = self.scenario.nonexistent_attribute
|
|
263
|
+
self.assertIn("nonexistent_attribute", str(context.exception))
|
|
264
|
+
|
|
265
|
+
def test_getitem_should_access_scenario_values(self):
|
|
266
|
+
"""Test that __getitem__ allows dict-style access."""
|
|
267
|
+
# Pre-condition
|
|
268
|
+
scenario_config = {
|
|
269
|
+
"test_key": "test_value"
|
|
270
|
+
}
|
|
271
|
+
self.scenario.init(
|
|
272
|
+
run_id=self.run_id,
|
|
273
|
+
episode_id=self.episode_id,
|
|
274
|
+
scenario=scenario_config
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
# In-test
|
|
278
|
+
value = self.scenario["test_key"]
|
|
279
|
+
|
|
280
|
+
# Post-condition
|
|
281
|
+
self.assertEqual(value, "test_value")
|
|
282
|
+
|
|
283
|
+
def test_getitem_should_raise_error_for_missing_key(self):
|
|
284
|
+
"""Test that __getitem__ raises KeyError for missing keys."""
|
|
285
|
+
# Pre-condition
|
|
286
|
+
self.scenario.init(
|
|
287
|
+
run_id=self.run_id,
|
|
288
|
+
episode_id=self.episode_id,
|
|
289
|
+
scenario={}
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
# In-test & Post-condition
|
|
293
|
+
with self.assertRaises(KeyError) as context:
|
|
294
|
+
_ = self.scenario["nonexistent_key"]
|
|
295
|
+
self.assertIn("nonexistent_key", str(context.exception))
|
|
296
|
+
|
|
297
|
+
def test_get_final_size_should_handle_none_size_with_num_env(self):
|
|
298
|
+
"""Test _get_final_size with None size and num_env set."""
|
|
299
|
+
# Pre-condition
|
|
300
|
+
self.scenario.init(
|
|
301
|
+
run_id=self.run_id,
|
|
302
|
+
episode_id=self.episode_id,
|
|
303
|
+
scenario={},
|
|
304
|
+
num_env=4
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
# In-test
|
|
308
|
+
result = self.scenario._get_final_size(None)
|
|
309
|
+
|
|
310
|
+
# Post-condition
|
|
311
|
+
self.assertEqual(result, 4)
|
|
312
|
+
|
|
313
|
+
def test_get_final_size_should_handle_int_size_with_num_env(self):
|
|
314
|
+
"""Test _get_final_size with int size and num_env set."""
|
|
315
|
+
# Pre-condition
|
|
316
|
+
self.scenario.init(
|
|
317
|
+
run_id=self.run_id,
|
|
318
|
+
episode_id=self.episode_id,
|
|
319
|
+
scenario={},
|
|
320
|
+
num_env=4
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
# In-test
|
|
324
|
+
result = self.scenario._get_final_size(3)
|
|
325
|
+
|
|
326
|
+
# Post-condition
|
|
327
|
+
self.assertEqual(result, (4, 3))
|
|
328
|
+
|
|
329
|
+
def test_get_final_size_should_handle_tuple_size_with_num_env(self):
|
|
330
|
+
"""Test _get_final_size with tuple size and num_env set."""
|
|
331
|
+
# Pre-condition
|
|
332
|
+
self.scenario.init(
|
|
333
|
+
run_id=self.run_id,
|
|
334
|
+
episode_id=self.episode_id,
|
|
335
|
+
scenario={},
|
|
336
|
+
num_env=4
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
# In-test
|
|
340
|
+
result = self.scenario._get_final_size((2, 3))
|
|
341
|
+
|
|
342
|
+
# Post-condition
|
|
343
|
+
self.assertEqual(result, (4, 2, 3))
|
|
344
|
+
|
|
345
|
+
def test_get_final_size_should_handle_size_without_num_env(self):
|
|
346
|
+
"""Test _get_final_size with size but no num_env."""
|
|
347
|
+
# Pre-condition
|
|
348
|
+
self.scenario.init(
|
|
349
|
+
run_id=self.run_id,
|
|
350
|
+
episode_id=self.episode_id,
|
|
351
|
+
scenario={},
|
|
352
|
+
num_env=None
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
# In-test
|
|
356
|
+
result = self.scenario._get_final_size(5)
|
|
357
|
+
|
|
358
|
+
# Post-condition
|
|
359
|
+
self.assertEqual(result, 5)
|
|
360
|
+
|
|
361
|
+
def test_convert_to_python_should_handle_numpy_scalar(self):
|
|
362
|
+
"""Test _convert_to_python with numpy scalar."""
|
|
363
|
+
# Pre-condition
|
|
364
|
+
np_scalar = np.float64(3.14)
|
|
365
|
+
|
|
366
|
+
# In-test
|
|
367
|
+
result = Scenario._convert_to_python(np_scalar)
|
|
368
|
+
|
|
369
|
+
# Post-condition
|
|
370
|
+
self.assertIsInstance(result, float)
|
|
371
|
+
self.assertEqual(result, 3.14)
|
|
372
|
+
|
|
373
|
+
def test_convert_to_python_should_handle_numpy_array(self):
|
|
374
|
+
"""Test _convert_to_python with numpy array."""
|
|
375
|
+
# Pre-condition
|
|
376
|
+
np_array = np.array([1, 2, 3])
|
|
377
|
+
|
|
378
|
+
# In-test
|
|
379
|
+
result = Scenario._convert_to_python(np_array)
|
|
380
|
+
|
|
381
|
+
# Post-condition
|
|
382
|
+
self.assertIsInstance(result, list)
|
|
383
|
+
self.assertEqual(result, [1, 2, 3])
|
|
384
|
+
|
|
385
|
+
def test_convert_to_python_should_handle_zero_dim_array(self):
|
|
386
|
+
"""Test _convert_to_python with 0-dimensional numpy array."""
|
|
387
|
+
# Pre-condition
|
|
388
|
+
np_zero_dim = np.array(42)
|
|
389
|
+
|
|
390
|
+
# In-test
|
|
391
|
+
result = Scenario._convert_to_python(np_zero_dim)
|
|
392
|
+
|
|
393
|
+
# Post-condition
|
|
394
|
+
self.assertIsInstance(result, int)
|
|
395
|
+
self.assertEqual(result, 42)
|
|
396
|
+
|
|
397
|
+
def test_convert_to_python_should_handle_regular_python_types(self):
|
|
398
|
+
"""Test _convert_to_python with regular Python types."""
|
|
399
|
+
# Pre-condition
|
|
400
|
+
regular_values = [42, 3.14, "string", [1, 2, 3], {"key": "value"}]
|
|
401
|
+
|
|
402
|
+
# In-test & Post-condition
|
|
403
|
+
for value in regular_values:
|
|
404
|
+
result = Scenario._convert_to_python(value)
|
|
405
|
+
self.assertEqual(result, value)
|
|
406
|
+
|
|
407
|
+
def test_get_node_path_should_find_simple_key(self):
|
|
408
|
+
"""Test _get_node_path with simple dictionary key."""
|
|
409
|
+
# Pre-condition
|
|
410
|
+
root = {"key1": "target_node", "key2": "other"}
|
|
411
|
+
self.scenario.init(
|
|
412
|
+
run_id=self.run_id,
|
|
413
|
+
episode_id=self.episode_id,
|
|
414
|
+
scenario={}
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
# In-test
|
|
418
|
+
path = self.scenario._get_node_path(root, "target_node")
|
|
419
|
+
|
|
420
|
+
# Post-condition
|
|
421
|
+
self.assertEqual(path, "key1")
|
|
422
|
+
|
|
423
|
+
def test_get_node_path_should_find_nested_key(self):
|
|
424
|
+
"""Test _get_node_path with nested dictionary."""
|
|
425
|
+
# Pre-condition
|
|
426
|
+
root = {"level1": {"level2": "target_node"}}
|
|
427
|
+
self.scenario.init(
|
|
428
|
+
run_id=self.run_id,
|
|
429
|
+
episode_id=self.episode_id,
|
|
430
|
+
scenario={}
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
# In-test
|
|
434
|
+
path = self.scenario._get_node_path(root, "target_node")
|
|
435
|
+
|
|
436
|
+
# Post-condition
|
|
437
|
+
self.assertEqual(path, "level1.level2")
|
|
438
|
+
|
|
439
|
+
def test_get_node_path_should_find_in_list(self):
|
|
440
|
+
"""Test _get_node_path with list containing target."""
|
|
441
|
+
# Pre-condition
|
|
442
|
+
root = {"key": ["item1", "target_node", "item3"]}
|
|
443
|
+
self.scenario.init(
|
|
444
|
+
run_id=self.run_id,
|
|
445
|
+
episode_id=self.episode_id,
|
|
446
|
+
scenario={}
|
|
447
|
+
)
|
|
448
|
+
|
|
449
|
+
# In-test
|
|
450
|
+
path = self.scenario._get_node_path(root, "target_node")
|
|
451
|
+
|
|
452
|
+
# Post-condition
|
|
453
|
+
self.assertEqual(path, "key[1]")
|
|
454
|
+
|
|
455
|
+
def test_get_node_path_should_return_empty_for_missing_node(self):
|
|
456
|
+
"""Test _get_node_path returns empty string when node not found."""
|
|
457
|
+
# Pre-condition
|
|
458
|
+
root = {"key": "value"}
|
|
459
|
+
self.scenario.init(
|
|
460
|
+
run_id=self.run_id,
|
|
461
|
+
episode_id=self.episode_id,
|
|
462
|
+
scenario={}
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
# In-test
|
|
466
|
+
path = self.scenario._get_node_path(root, "nonexistent")
|
|
467
|
+
|
|
468
|
+
# Post-condition
|
|
469
|
+
self.assertEqual(path, "")
|
|
470
|
+
|
|
471
|
+
def test_template_property_should_return_scenario_template(self):
|
|
472
|
+
"""Test that template property returns the scenario template."""
|
|
473
|
+
# Pre-condition
|
|
474
|
+
scenario_config = {"key": "value"}
|
|
475
|
+
self.scenario.init(
|
|
476
|
+
run_id=self.run_id,
|
|
477
|
+
episode_id=self.episode_id,
|
|
478
|
+
scenario=scenario_config
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
# In-test
|
|
482
|
+
template = self.scenario.template
|
|
483
|
+
|
|
484
|
+
# Post-condition
|
|
485
|
+
self.assertIsNotNone(template)
|
|
486
|
+
self.assertEqual(template.key, "value")
|
|
487
|
+
|
|
488
|
+
def test_cur_scenario_property_should_return_current_scenario(self):
|
|
489
|
+
"""Test that cur_scenario property returns the current scenario."""
|
|
490
|
+
# Pre-condition
|
|
491
|
+
scenario_config = {"key": "value"}
|
|
492
|
+
self.scenario.init(
|
|
493
|
+
run_id=self.run_id,
|
|
494
|
+
episode_id=self.episode_id,
|
|
495
|
+
scenario=scenario_config
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
# In-test
|
|
499
|
+
cur_scenario = self.scenario.cur_scenario
|
|
500
|
+
|
|
501
|
+
# Post-condition
|
|
502
|
+
self.assertIsNotNone(cur_scenario)
|
|
503
|
+
self.assertEqual(cur_scenario.key, "value")
|
|
504
|
+
|
|
505
|
+
def test_yaml_property_should_return_yaml_representation(self):
|
|
506
|
+
"""Test that yaml property returns YAML string."""
|
|
507
|
+
# Pre-condition
|
|
508
|
+
scenario_config = {"key": "value"}
|
|
509
|
+
self.scenario.init(
|
|
510
|
+
run_id=self.run_id,
|
|
511
|
+
episode_id=self.episode_id,
|
|
512
|
+
scenario=scenario_config
|
|
513
|
+
)
|
|
514
|
+
|
|
515
|
+
# In-test
|
|
516
|
+
yaml_str = self.scenario.yaml
|
|
517
|
+
|
|
518
|
+
# Post-condition
|
|
519
|
+
self.assertIsInstance(yaml_str, str)
|
|
520
|
+
self.assertIn("key:", yaml_str)
|
|
521
|
+
self.assertIn("value", yaml_str)
|
|
522
|
+
|
|
523
|
+
def test_finish_should_call_finish_on_metrics(self):
|
|
524
|
+
"""Test that finish() calls finish on all metrics."""
|
|
525
|
+
# Pre-condition
|
|
526
|
+
scenario_config = {
|
|
527
|
+
"dist_value": "${uniform: 0.0, 1.0}"
|
|
528
|
+
}
|
|
529
|
+
self.scenario.init(
|
|
530
|
+
run_id=self.run_id,
|
|
531
|
+
episode_id=self.episode_id,
|
|
532
|
+
scenario=scenario_config,
|
|
533
|
+
seed=42
|
|
534
|
+
)
|
|
535
|
+
# Access the value to create the metric
|
|
536
|
+
_ = self.scenario.dist_value
|
|
537
|
+
|
|
538
|
+
# In-test
|
|
539
|
+
self.scenario.finish()
|
|
540
|
+
|
|
541
|
+
# Post-condition
|
|
542
|
+
# Verify metrics exist and finish was called
|
|
543
|
+
self.assertGreater(len(self.scenario._metrics), 0)
|
|
544
|
+
|
|
545
|
+
def test_nested_scenario_access_should_work(self):
|
|
546
|
+
"""Test accessing deeply nested scenario values."""
|
|
547
|
+
# Pre-condition
|
|
548
|
+
scenario_config = {
|
|
549
|
+
"level1": {
|
|
550
|
+
"level2": {
|
|
551
|
+
"level3": "deep_value"
|
|
552
|
+
}
|
|
553
|
+
}
|
|
554
|
+
}
|
|
555
|
+
self.scenario.init(
|
|
556
|
+
run_id=self.run_id,
|
|
557
|
+
episode_id=self.episode_id,
|
|
558
|
+
scenario=scenario_config
|
|
559
|
+
)
|
|
560
|
+
|
|
561
|
+
# In-test
|
|
562
|
+
value = self.scenario.level1.level2.level3
|
|
563
|
+
|
|
564
|
+
# Post-condition
|
|
565
|
+
self.assertEqual(value, "deep_value")
|
|
566
|
+
|
|
567
|
+
def test_multiple_distributions_should_work_together(self):
|
|
568
|
+
"""Test scenario with multiple different distributions."""
|
|
569
|
+
# Pre-condition
|
|
570
|
+
scenario_config = {
|
|
571
|
+
"uniform_val": "${uniform: 0.0, 1.0}",
|
|
572
|
+
"gaussian_val": "${gaussian: 0.0, 1.0}",
|
|
573
|
+
"bernoulli_val": "${bernoulli: 0.5}"
|
|
574
|
+
}
|
|
575
|
+
|
|
576
|
+
# In-test
|
|
577
|
+
self.scenario.init(
|
|
578
|
+
run_id=self.run_id,
|
|
579
|
+
episode_id=self.episode_id,
|
|
580
|
+
scenario=scenario_config,
|
|
581
|
+
seed=42
|
|
582
|
+
)
|
|
583
|
+
|
|
584
|
+
# Post-condition
|
|
585
|
+
self.assertIsInstance(self.scenario.uniform_val, (int, float))
|
|
586
|
+
self.assertIsInstance(self.scenario.gaussian_val, (int, float))
|
|
587
|
+
self.assertIn(self.scenario.bernoulli_val, [0, 1, True, False])
|
|
588
|
+
|
|
589
|
+
def test_num_env_should_affect_distribution_size(self):
|
|
590
|
+
"""Test that num_env parameter affects distribution output size."""
|
|
591
|
+
# Pre-condition
|
|
592
|
+
scenario_config = {
|
|
593
|
+
"value": "${uniform: 0.0, 1.0}"
|
|
594
|
+
}
|
|
595
|
+
|
|
596
|
+
# In-test
|
|
597
|
+
self.scenario.init(
|
|
598
|
+
run_id=self.run_id,
|
|
599
|
+
episode_id=self.episode_id,
|
|
600
|
+
scenario=scenario_config,
|
|
601
|
+
num_env=3,
|
|
602
|
+
seed=42
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
# Post-condition
|
|
606
|
+
value = self.scenario.value
|
|
607
|
+
# Convert to list if it's a ListConfig
|
|
608
|
+
value_list = list(value) if hasattr(value, '__iter__') else [value]
|
|
609
|
+
self.assertEqual(len(value_list), 3)
|
|
610
|
+
|
|
611
|
+
def test_clear_resolvers_should_clear_dist_cache(self):
|
|
612
|
+
"""Test that _clear_resolvers clears the distribution cache."""
|
|
613
|
+
# Pre-condition
|
|
614
|
+
scenario_config = {"value": "${uniform: 0.0, 1.0}"}
|
|
615
|
+
self.scenario.init(
|
|
616
|
+
run_id=self.run_id,
|
|
617
|
+
episode_id=self.episode_id,
|
|
618
|
+
scenario=scenario_config,
|
|
619
|
+
seed=42
|
|
620
|
+
)
|
|
621
|
+
_ = self.scenario.value # Trigger cache population
|
|
622
|
+
|
|
623
|
+
# In-test
|
|
624
|
+
self.scenario._clear_resolvers()
|
|
625
|
+
|
|
626
|
+
# Post-condition
|
|
627
|
+
self.assertEqual(len(Scenario.dist_cache), 0)
|
|
628
|
+
|
|
629
|
+
def test_main_script_scenario_should_initialize_with_nested_structure(self):
|
|
630
|
+
"""Test scenario initialization matching the __main__ script example."""
|
|
631
|
+
# Pre-condition
|
|
632
|
+
scenario_config = {
|
|
633
|
+
"scenario": {
|
|
634
|
+
"scenario_id": "scenario_1",
|
|
635
|
+
"cup_x": "${uniform: 0.7, 1.5}",
|
|
636
|
+
"cup_y": "${uniform: 0.3, 0.7}",
|
|
637
|
+
}
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
# In-test
|
|
641
|
+
self.scenario.init(
|
|
642
|
+
run_id="run_id",
|
|
643
|
+
episode_id="episode_id",
|
|
644
|
+
scenario=scenario_config,
|
|
645
|
+
seed=42,
|
|
646
|
+
num_env=2
|
|
647
|
+
)
|
|
648
|
+
|
|
649
|
+
# Post-condition
|
|
650
|
+
# Verify scenario structure exists
|
|
651
|
+
self.assertIsNotNone(self.scenario.scenario)
|
|
652
|
+
self.assertEqual(self.scenario.scenario.scenario_id, "scenario_1")
|
|
653
|
+
|
|
654
|
+
# Verify cup_x and cup_y are resolved and are lists (due to num_env=2)
|
|
655
|
+
cup_x = self.scenario.scenario.cup_x
|
|
656
|
+
cup_y = self.scenario.scenario.cup_y
|
|
657
|
+
|
|
658
|
+
cup_x_list = list(cup_x) if hasattr(cup_x, '__iter__') else [cup_x]
|
|
659
|
+
cup_y_list = list(cup_y) if hasattr(cup_y, '__iter__') else [cup_y]
|
|
660
|
+
|
|
661
|
+
self.assertEqual(len(cup_x_list), 2)
|
|
662
|
+
self.assertEqual(len(cup_y_list), 2)
|
|
663
|
+
|
|
664
|
+
# Verify values are in expected ranges
|
|
665
|
+
for val in cup_x_list:
|
|
666
|
+
self.assertGreaterEqual(val, 0.7)
|
|
667
|
+
self.assertLessEqual(val, 1.5)
|
|
668
|
+
|
|
669
|
+
for val in cup_y_list:
|
|
670
|
+
self.assertGreaterEqual(val, 0.3)
|
|
671
|
+
self.assertLessEqual(val, 0.7)
|
|
672
|
+
|
|
673
|
+
def test_main_script_scenario_should_allow_both_access_methods(self):
|
|
674
|
+
"""Test that both attribute and dict access work as shown in __main__ script."""
|
|
675
|
+
# Pre-condition
|
|
676
|
+
scenario_config = {
|
|
677
|
+
"scenario": {
|
|
678
|
+
"scenario_id": "scenario_1",
|
|
679
|
+
"cup_x": "${uniform: 0.7, 1.5}",
|
|
680
|
+
}
|
|
681
|
+
}
|
|
682
|
+
|
|
683
|
+
# In-test
|
|
684
|
+
self.scenario.init(
|
|
685
|
+
run_id="run_id",
|
|
686
|
+
episode_id="episode_id",
|
|
687
|
+
scenario=scenario_config,
|
|
688
|
+
seed=42,
|
|
689
|
+
num_env=2
|
|
690
|
+
)
|
|
691
|
+
|
|
692
|
+
# Post-condition
|
|
693
|
+
# Both access methods should return the same value
|
|
694
|
+
cup_x_attr = self.scenario.scenario.cup_x
|
|
695
|
+
cup_x_dict = self.scenario["scenario"].cup_x
|
|
696
|
+
|
|
697
|
+
# Convert to lists for comparison
|
|
698
|
+
cup_x_attr_list = list(cup_x_attr) if hasattr(cup_x_attr, '__iter__') else [cup_x_attr]
|
|
699
|
+
cup_x_dict_list = list(cup_x_dict) if hasattr(cup_x_dict, '__iter__') else [cup_x_dict]
|
|
700
|
+
|
|
701
|
+
self.assertEqual(cup_x_attr_list, cup_x_dict_list)
|
|
702
|
+
|
|
703
|
+
def test_main_script_scenario_should_regenerate_on_reset(self):
|
|
704
|
+
"""Test that reset regenerates values as shown in __main__ script."""
|
|
705
|
+
# Pre-condition
|
|
706
|
+
scenario_config = {
|
|
707
|
+
"scenario": {
|
|
708
|
+
"cup_x": "${uniform: 0.7, 1.5}",
|
|
709
|
+
}
|
|
710
|
+
}
|
|
711
|
+
self.scenario.init(
|
|
712
|
+
run_id="run_id",
|
|
713
|
+
episode_id="episode_id",
|
|
714
|
+
scenario=scenario_config,
|
|
715
|
+
seed=None, # No seed for random values
|
|
716
|
+
num_env=2
|
|
717
|
+
)
|
|
718
|
+
|
|
719
|
+
first_cup_x = self.scenario.scenario.cup_x
|
|
720
|
+
first_list = list(first_cup_x) if hasattr(first_cup_x, '__iter__') else [first_cup_x]
|
|
721
|
+
|
|
722
|
+
# In-test
|
|
723
|
+
self.scenario.reset()
|
|
724
|
+
|
|
725
|
+
# Post-condition
|
|
726
|
+
second_cup_x = self.scenario.scenario.cup_x
|
|
727
|
+
second_list = list(second_cup_x) if hasattr(second_cup_x, '__iter__') else [second_cup_x]
|
|
728
|
+
|
|
729
|
+
# Both should be valid lists
|
|
730
|
+
self.assertEqual(len(first_list), 2)
|
|
731
|
+
self.assertEqual(len(second_list), 2)
|
|
732
|
+
|
|
733
|
+
# Values should be in valid range
|
|
734
|
+
for val in second_list:
|
|
735
|
+
self.assertGreaterEqual(val, 0.7)
|
|
736
|
+
self.assertLessEqual(val, 1.5)
|
|
737
|
+
|
|
738
|
+
def test_main_script_scenario_should_convert_to_numpy_array(self):
|
|
739
|
+
"""Test that scenario values can be converted to numpy arrays."""
|
|
740
|
+
# Pre-condition
|
|
741
|
+
scenario_config = {
|
|
742
|
+
"scenario": {
|
|
743
|
+
"cup_x": "${uniform: 0.7, 1.5}",
|
|
744
|
+
}
|
|
745
|
+
}
|
|
746
|
+
self.scenario.init(
|
|
747
|
+
run_id="run_id",
|
|
748
|
+
episode_id="episode_id",
|
|
749
|
+
scenario=scenario_config,
|
|
750
|
+
seed=42,
|
|
751
|
+
num_env=2
|
|
752
|
+
)
|
|
753
|
+
|
|
754
|
+
# In-test
|
|
755
|
+
cup_x = self.scenario.scenario.cup_x
|
|
756
|
+
np_array = np.array(cup_x)
|
|
757
|
+
|
|
758
|
+
# Post-condition
|
|
759
|
+
self.assertIsInstance(np_array, np.ndarray)
|
|
760
|
+
self.assertEqual(len(np_array), 2)
|
|
761
|
+
|
|
762
|
+
# Verify values are in expected range
|
|
763
|
+
for val in np_array:
|
|
764
|
+
self.assertGreaterEqual(val, 0.7)
|
|
765
|
+
self.assertLessEqual(val, 1.5)
|
|
766
|
+
|
|
767
|
+
def test_main_script_scenario_should_produce_valid_yaml(self):
|
|
768
|
+
"""Test that scenario.yaml returns valid YAML string."""
|
|
769
|
+
# Pre-condition
|
|
770
|
+
scenario_config = {
|
|
771
|
+
"scenario": {
|
|
772
|
+
"scenario_id": "scenario_1",
|
|
773
|
+
"cup_x": "${uniform: 0.7, 1.5}",
|
|
774
|
+
"cup_y": "${uniform: 0.3, 0.7}",
|
|
775
|
+
}
|
|
776
|
+
}
|
|
777
|
+
self.scenario.init(
|
|
778
|
+
run_id="run_id",
|
|
779
|
+
episode_id="episode_id",
|
|
780
|
+
scenario=scenario_config,
|
|
781
|
+
seed=42,
|
|
782
|
+
num_env=2
|
|
783
|
+
)
|
|
784
|
+
|
|
785
|
+
# In-test
|
|
786
|
+
yaml_str = self.scenario.yaml
|
|
787
|
+
|
|
788
|
+
# Post-condition
|
|
789
|
+
self.assertIsInstance(yaml_str, str)
|
|
790
|
+
self.assertIn("scenario:", yaml_str)
|
|
791
|
+
self.assertIn("scenario_id:", yaml_str)
|
|
792
|
+
self.assertIn("scenario_1", yaml_str)
|
|
793
|
+
self.assertIn("cup_x:", yaml_str)
|
|
794
|
+
self.assertIn("cup_y:", yaml_str)
|
|
795
|
+
|
|
796
|
+
def test_main_script_scenario_should_handle_multiple_resets(self):
|
|
797
|
+
"""Test multiple reset calls as shown in __main__ script."""
|
|
798
|
+
# Pre-condition
|
|
799
|
+
scenario_config = {
|
|
800
|
+
"scenario": {
|
|
801
|
+
"cup_x": "${uniform: 0.7, 1.5}",
|
|
802
|
+
}
|
|
803
|
+
}
|
|
804
|
+
self.scenario.init(
|
|
805
|
+
run_id="run_id",
|
|
806
|
+
episode_id="episode_id",
|
|
807
|
+
scenario=scenario_config,
|
|
808
|
+
seed=42,
|
|
809
|
+
num_env=2
|
|
810
|
+
)
|
|
811
|
+
|
|
812
|
+
first_values = list(self.scenario.scenario.cup_x)
|
|
813
|
+
|
|
814
|
+
# In-test - First reset
|
|
815
|
+
self.scenario.reset()
|
|
816
|
+
second_values = list(self.scenario.scenario.cup_x)
|
|
817
|
+
|
|
818
|
+
# In-test - Second reset
|
|
819
|
+
self.scenario.reset()
|
|
820
|
+
third_values = list(self.scenario.scenario.cup_x)
|
|
821
|
+
|
|
822
|
+
# Post-condition
|
|
823
|
+
# All should be valid lists of size 2
|
|
824
|
+
self.assertEqual(len(first_values), 2)
|
|
825
|
+
self.assertEqual(len(second_values), 2)
|
|
826
|
+
self.assertEqual(len(third_values), 2)
|
|
827
|
+
|
|
828
|
+
# All values should be in range
|
|
829
|
+
for vals in [first_values, second_values, third_values]:
|
|
830
|
+
for val in vals:
|
|
831
|
+
self.assertGreaterEqual(val, 0.7)
|
|
832
|
+
self.assertLessEqual(val, 1.5)
|
|
833
|
+
|
|
834
|
+
def test_main_script_scenario_should_reinitialize_with_none(self):
|
|
835
|
+
"""Test reinitializing scenario with None as shown in __main__ script."""
|
|
836
|
+
# Pre-condition
|
|
837
|
+
scenario_config = {
|
|
838
|
+
"scenario": {
|
|
839
|
+
"cup_x": "${uniform: 0.7, 1.5}",
|
|
840
|
+
}
|
|
841
|
+
}
|
|
842
|
+
self.scenario.init(
|
|
843
|
+
run_id="run_id",
|
|
844
|
+
episode_id="episode_id",
|
|
845
|
+
scenario=scenario_config,
|
|
846
|
+
seed=42,
|
|
847
|
+
num_env=2
|
|
848
|
+
)
|
|
849
|
+
|
|
850
|
+
# Verify initial scenario has content
|
|
851
|
+
first_yaml = self.scenario.yaml
|
|
852
|
+
self.assertIn("cup_x:", first_yaml)
|
|
853
|
+
|
|
854
|
+
# In-test - Reinitialize with None
|
|
855
|
+
self.scenario.init(
|
|
856
|
+
run_id="run_id",
|
|
857
|
+
episode_id="episode_id",
|
|
858
|
+
scenario=None,
|
|
859
|
+
seed=42
|
|
860
|
+
)
|
|
861
|
+
|
|
862
|
+
# Post-condition
|
|
863
|
+
# Should have an empty scenario
|
|
864
|
+
second_yaml = self.scenario.yaml
|
|
865
|
+
self.assertEqual(second_yaml.strip(), "{}")
|
|
866
|
+
|
|
867
|
+
def test_main_script_scenario_should_handle_seed_consistency(self):
|
|
868
|
+
"""Test that same seed produces consistent results across resets."""
|
|
869
|
+
# Pre-condition
|
|
870
|
+
scenario_config = {
|
|
871
|
+
"scenario": {
|
|
872
|
+
"cup_x": "${uniform: 0.7, 1.5}",
|
|
873
|
+
"cup_y": "${uniform: 0.3, 0.7}",
|
|
874
|
+
}
|
|
875
|
+
}
|
|
876
|
+
|
|
877
|
+
# Create first scenario with seed
|
|
878
|
+
scenario1 = Scenario()
|
|
879
|
+
scenario1.init(
|
|
880
|
+
run_id="run_id",
|
|
881
|
+
episode_id="episode_id",
|
|
882
|
+
scenario=scenario_config,
|
|
883
|
+
seed=42,
|
|
884
|
+
num_env=2
|
|
885
|
+
)
|
|
886
|
+
values1_x = list(scenario1.scenario.cup_x)
|
|
887
|
+
values1_y = list(scenario1.scenario.cup_y)
|
|
888
|
+
|
|
889
|
+
# Create second scenario with same seed
|
|
890
|
+
scenario2 = Scenario()
|
|
891
|
+
scenario2.init(
|
|
892
|
+
run_id="run_id",
|
|
893
|
+
episode_id="episode_id",
|
|
894
|
+
scenario=scenario_config,
|
|
895
|
+
seed=42,
|
|
896
|
+
num_env=2
|
|
897
|
+
)
|
|
898
|
+
values2_x = list(scenario2.scenario.cup_x)
|
|
899
|
+
values2_y = list(scenario2.scenario.cup_y)
|
|
900
|
+
|
|
901
|
+
# Post-condition
|
|
902
|
+
self.assertEqual(values1_x, values2_x)
|
|
903
|
+
self.assertEqual(values1_y, values2_y)
|
|
904
|
+
|
|
905
|
+
# Cleanup
|
|
906
|
+
scenario1._clear_resolvers()
|
|
907
|
+
scenario2._clear_resolvers()
|
|
908
|
+
|
|
909
|
+
|
|
910
|
+
if __name__ == "__main__":
|
|
911
|
+
unittest.main()
|