humalab 0.0.4__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of humalab might be problematic. Click here for more details.

Files changed (39) hide show
  1. humalab/__init__.py +11 -0
  2. humalab/assets/__init__.py +2 -2
  3. humalab/assets/files/resource_file.py +29 -3
  4. humalab/assets/files/urdf_file.py +14 -10
  5. humalab/assets/resource_operator.py +91 -0
  6. humalab/constants.py +39 -5
  7. humalab/dists/bernoulli.py +16 -0
  8. humalab/dists/categorical.py +4 -0
  9. humalab/dists/discrete.py +22 -0
  10. humalab/dists/gaussian.py +22 -0
  11. humalab/dists/log_uniform.py +22 -0
  12. humalab/dists/truncated_gaussian.py +36 -0
  13. humalab/dists/uniform.py +22 -0
  14. humalab/episode.py +196 -0
  15. humalab/humalab.py +116 -153
  16. humalab/humalab_api_client.py +760 -62
  17. humalab/humalab_config.py +0 -13
  18. humalab/humalab_test.py +46 -29
  19. humalab/metrics/__init__.py +5 -5
  20. humalab/metrics/code.py +28 -0
  21. humalab/metrics/metric.py +41 -108
  22. humalab/metrics/scenario_stats.py +95 -0
  23. humalab/metrics/summary.py +24 -18
  24. humalab/run.py +180 -115
  25. humalab/scenarios/__init__.py +4 -0
  26. humalab/scenarios/scenario.py +372 -0
  27. humalab/scenarios/scenario_operator.py +82 -0
  28. humalab/{scenario_test.py → scenarios/scenario_test.py} +150 -269
  29. humalab/utils.py +37 -0
  30. {humalab-0.0.4.dist-info → humalab-0.0.6.dist-info}/METADATA +1 -1
  31. humalab-0.0.6.dist-info/RECORD +39 -0
  32. humalab/assets/resource_manager.py +0 -57
  33. humalab/metrics/dist_metric.py +0 -22
  34. humalab/scenario.py +0 -225
  35. humalab-0.0.4.dist-info/RECORD +0 -34
  36. {humalab-0.0.4.dist-info → humalab-0.0.6.dist-info}/WHEEL +0 -0
  37. {humalab-0.0.4.dist-info → humalab-0.0.6.dist-info}/entry_points.txt +0 -0
  38. {humalab-0.0.4.dist-info → humalab-0.0.6.dist-info}/licenses/LICENSE +0 -0
  39. {humalab-0.0.4.dist-info → humalab-0.0.6.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  import unittest
2
2
  import numpy as np
3
- from humalab.scenario import Scenario
3
+ from humalab.scenarios.scenario import Scenario
4
4
 
5
5
 
6
6
  class ScenarioTest(unittest.TestCase):
@@ -9,8 +9,6 @@ class ScenarioTest(unittest.TestCase):
9
9
  def setUp(self):
10
10
  """Set up test fixtures before each test method."""
11
11
  self.scenario = Scenario()
12
- self.run_id = "test_run_id"
13
- self.episode_id = "test_episode_id"
14
12
 
15
13
  def tearDown(self):
16
14
  """Clean up after each test method."""
@@ -23,17 +21,13 @@ class ScenarioTest(unittest.TestCase):
23
21
 
24
22
  # In-test
25
23
  self.scenario.init(
26
- run_id=self.run_id,
27
- episode_id=self.episode_id,
28
24
  scenario=None,
29
25
  seed=42
30
26
  )
31
27
 
32
28
  # Post-condition
33
- self.assertEqual(self.scenario._run_id, self.run_id)
34
- self.assertEqual(self.scenario._episode_id, self.episode_id)
35
29
  self.assertIsNotNone(self.scenario._scenario_id)
36
- self.assertEqual(len(self.scenario._cur_scenario), 0)
30
+ self.assertEqual(len(self.scenario._scenario_template), 0)
37
31
 
38
32
  def test_init_should_initialize_with_dict_scenario(self):
39
33
  """Test that init() correctly processes dict-based scenario."""
@@ -45,15 +39,14 @@ class ScenarioTest(unittest.TestCase):
45
39
 
46
40
  # In-test
47
41
  self.scenario.init(
48
- run_id=self.run_id,
49
- episode_id=self.episode_id,
50
42
  scenario=scenario_dict,
51
43
  seed=42
52
44
  )
53
45
 
54
46
  # Post-condition
55
- self.assertEqual(self.scenario.test_key, "test_value")
56
- self.assertEqual(self.scenario.nested.inner_key, "inner_value")
47
+ resolved, _ = self.scenario.resolve()
48
+ self.assertEqual(resolved.test_key, "test_value")
49
+ self.assertEqual(resolved.nested.inner_key, "inner_value")
57
50
 
58
51
  def test_init_should_use_provided_scenario_id(self):
59
52
  """Test that init() uses provided scenario_id."""
@@ -62,8 +55,6 @@ class ScenarioTest(unittest.TestCase):
62
55
 
63
56
  # In-test
64
57
  self.scenario.init(
65
- run_id=self.run_id,
66
- episode_id=self.episode_id,
67
58
  scenario={},
68
59
  scenario_id=custom_id
69
60
  )
@@ -80,21 +71,19 @@ class ScenarioTest(unittest.TestCase):
80
71
  # In-test
81
72
  scenario1 = Scenario()
82
73
  scenario1.init(
83
- run_id=self.run_id,
84
- episode_id=self.episode_id,
85
74
  scenario=scenario_config,
86
75
  seed=seed
87
76
  )
88
- value1 = scenario1.value
77
+ resolved1, _ = scenario1.resolve()
78
+ value1 = resolved1.value
89
79
 
90
80
  scenario2 = Scenario()
91
81
  scenario2.init(
92
- run_id=self.run_id,
93
- episode_id=self.episode_id,
94
82
  scenario=scenario_config,
95
83
  seed=seed
96
84
  )
97
- value2 = scenario2.value
85
+ resolved2, _ = scenario2.resolve()
86
+ value2 = resolved2.value
98
87
 
99
88
  # Post-condition
100
89
  self.assertEqual(value1, value2)
@@ -112,38 +101,36 @@ class ScenarioTest(unittest.TestCase):
112
101
 
113
102
  # In-test
114
103
  self.scenario.init(
115
- run_id=self.run_id,
116
- episode_id=self.episode_id,
117
104
  scenario=scenario_config,
118
105
  seed=42
119
106
  )
120
107
 
121
108
  # Post-condition
122
- value = self.scenario.uniform_value
109
+ resolved, _ = self.scenario.resolve()
110
+ value = resolved.uniform_value
123
111
  self.assertIsInstance(value, (int, float))
124
112
  self.assertGreaterEqual(value, 0.0)
125
113
  self.assertLessEqual(value, 1.0)
126
114
 
127
115
  def test_uniform_distribution_should_handle_size_parameter(self):
128
- """Test that uniform distribution with size parameter returns list."""
116
+ """Test that uniform_1d distribution returns list."""
129
117
  # Pre-condition
130
118
  scenario_config = {
131
- "uniform_array": "${uniform: 0.0, 1.0, 5}"
119
+ "uniform_array": "${uniform_1d: 0.0, 1.0}"
132
120
  }
133
121
 
134
122
  # In-test
135
123
  self.scenario.init(
136
- run_id=self.run_id,
137
- episode_id=self.episode_id,
138
124
  scenario=scenario_config,
139
125
  seed=42
140
126
  )
141
127
 
142
128
  # Post-condition
143
- value = self.scenario.uniform_array
129
+ resolved, _ = self.scenario.resolve()
130
+ value = resolved.uniform_array
144
131
  # Convert to list if it's a ListConfig
145
- value_list = list(value) if hasattr(value, '__iter__') else [value]
146
- self.assertEqual(len(value_list), 5)
132
+ value_list = list(value) if hasattr(value, '__iter__') and not isinstance(value, str) else [value]
133
+ self.assertGreaterEqual(len(value_list), 1)
147
134
  for v in value_list:
148
135
  self.assertGreaterEqual(v, 0.0)
149
136
  self.assertLessEqual(v, 1.0)
@@ -157,36 +144,34 @@ class ScenarioTest(unittest.TestCase):
157
144
 
158
145
  # In-test
159
146
  self.scenario.init(
160
- run_id=self.run_id,
161
- episode_id=self.episode_id,
162
147
  scenario=scenario_config,
163
148
  seed=42
164
149
  )
165
150
 
166
151
  # Post-condition
167
- value = self.scenario.gaussian_value
152
+ resolved, _ = self.scenario.resolve()
153
+ value = resolved.gaussian_value
168
154
  self.assertIsInstance(value, (int, float))
169
155
 
170
156
  def test_gaussian_distribution_should_handle_size_parameter(self):
171
- """Test that gaussian distribution with size parameter returns list."""
157
+ """Test that gaussian_1d distribution returns list."""
172
158
  # Pre-condition
173
159
  scenario_config = {
174
- "gaussian_array": "${gaussian: 0.0, 1.0, 3}"
160
+ "gaussian_array": "${gaussian_1d: 0.0, 1.0}"
175
161
  }
176
162
 
177
163
  # In-test
178
164
  self.scenario.init(
179
- run_id=self.run_id,
180
- episode_id=self.episode_id,
181
165
  scenario=scenario_config,
182
166
  seed=42
183
167
  )
184
168
 
185
169
  # Post-condition
186
- value = self.scenario.gaussian_array
170
+ resolved, _ = self.scenario.resolve()
171
+ value = resolved.gaussian_array
187
172
  # Convert to list if it's a ListConfig
188
- value_list = list(value) if hasattr(value, '__iter__') else [value]
189
- self.assertEqual(len(value_list), 3)
173
+ value_list = list(value) if hasattr(value, '__iter__') and not isinstance(value, str) else [value]
174
+ self.assertGreaterEqual(len(value_list), 1)
190
175
 
191
176
  def test_bernoulli_distribution_should_resolve_correctly(self):
192
177
  """Test that bernoulli distribution resolver works correctly."""
@@ -197,159 +182,145 @@ class ScenarioTest(unittest.TestCase):
197
182
 
198
183
  # In-test
199
184
  self.scenario.init(
200
- run_id=self.run_id,
201
- episode_id=self.episode_id,
202
185
  scenario=scenario_config,
203
186
  seed=42
204
187
  )
205
188
 
206
189
  # Post-condition
207
- value = self.scenario.bernoulli_value
190
+ resolved, _ = self.scenario.resolve()
191
+ value = resolved.bernoulli_value
208
192
  self.assertIn(value, [0, 1, True, False])
209
193
 
210
- def test_reset_should_regenerate_distribution_values(self):
211
- """Test that reset() regenerates new values from distributions."""
194
+ def test_resolve_should_regenerate_distribution_values(self):
195
+ """Test that resolve() regenerates new values from distributions."""
212
196
  # Pre-condition
213
197
  scenario_config = {
214
198
  "random_value": "${uniform: 0.0, 100.0}"
215
199
  }
216
200
  self.scenario.init(
217
- run_id=self.run_id,
218
- episode_id=self.episode_id,
219
201
  scenario=scenario_config,
220
202
  seed=None # No seed for randomness
221
203
  )
222
- _ = self.scenario.random_value # Access once to populate cache
223
204
 
224
- # In-test
225
- self.scenario.reset(episode_id="new_episode")
205
+ # In-test - First resolve
206
+ resolved1, _ = self.scenario.resolve()
207
+ value1 = resolved1.random_value
208
+
209
+ # Second resolve
210
+ resolved2, _ = self.scenario.resolve()
211
+ value2 = resolved2.random_value
226
212
 
227
213
  # Post-condition
228
- second_value = self.scenario.random_value
229
214
  # Values should be different (statistically very unlikely to be same)
230
215
  # Note: There's a tiny chance they could be equal, but extremely unlikely
231
- self.assertIsInstance(second_value, (int, float))
216
+ self.assertIsInstance(value1, (int, float))
217
+ self.assertIsInstance(value2, (int, float))
232
218
 
233
- def test_getattr_should_access_scenario_values(self):
234
- """Test that __getattr__ allows attribute-style access."""
219
+ def test_template_property_should_access_scenario_template(self):
220
+ """Test that template property allows access to scenario values."""
235
221
  # Pre-condition
236
222
  scenario_config = {
237
223
  "test_attribute": "test_value"
238
224
  }
239
225
  self.scenario.init(
240
- run_id=self.run_id,
241
- episode_id=self.episode_id,
242
226
  scenario=scenario_config
243
227
  )
244
228
 
245
229
  # In-test
246
- value = self.scenario.test_attribute
230
+ value = self.scenario.template.test_attribute
247
231
 
248
232
  # Post-condition
249
233
  self.assertEqual(value, "test_value")
250
234
 
251
- def test_getattr_should_raise_error_for_missing_attribute(self):
252
- """Test that __getattr__ raises AttributeError for missing attributes."""
235
+ def test_template_should_contain_unresolved_distributions(self):
236
+ """Test that template contains unresolved distribution strings."""
253
237
  # Pre-condition
238
+ scenario_config = {
239
+ "test_key": "${uniform: 0.0, 1.0}"
240
+ }
254
241
  self.scenario.init(
255
- run_id=self.run_id,
256
- episode_id=self.episode_id,
257
- scenario={}
242
+ scenario=scenario_config
258
243
  )
259
244
 
260
- # In-test & Post-condition
261
- with self.assertRaises(AttributeError) as context:
262
- _ = self.scenario.nonexistent_attribute
263
- self.assertIn("nonexistent_attribute", str(context.exception))
245
+ # In-test
246
+ yaml_str = self.scenario.yaml
247
+
248
+ # Post-condition
249
+ self.assertIn("uniform", yaml_str)
264
250
 
265
- def test_getitem_should_access_scenario_values(self):
266
- """Test that __getitem__ allows dict-style access."""
251
+ def test_resolve_should_return_resolved_values(self):
252
+ """Test that resolve() returns dict-style access to resolved values."""
267
253
  # Pre-condition
268
254
  scenario_config = {
269
255
  "test_key": "test_value"
270
256
  }
271
257
  self.scenario.init(
272
- run_id=self.run_id,
273
- episode_id=self.episode_id,
274
258
  scenario=scenario_config
275
259
  )
276
260
 
277
261
  # In-test
278
- value = self.scenario["test_key"]
262
+ resolved, _ = self.scenario.resolve()
263
+ value = resolved["test_key"]
279
264
 
280
265
  # Post-condition
281
266
  self.assertEqual(value, "test_value")
282
267
 
283
- def test_getitem_should_raise_error_for_missing_key(self):
284
- """Test that __getitem__ raises KeyError for missing keys."""
268
+ def test_resolve_should_raise_error_for_missing_key(self):
269
+ """Test that resolve() result raises KeyError for missing keys."""
285
270
  # Pre-condition
286
271
  self.scenario.init(
287
- run_id=self.run_id,
288
- episode_id=self.episode_id,
289
272
  scenario={}
290
273
  )
291
274
 
292
275
  # In-test & Post-condition
293
- with self.assertRaises(KeyError) as context:
294
- _ = self.scenario["nonexistent_key"]
295
- self.assertIn("nonexistent_key", str(context.exception))
276
+ resolved, _ = self.scenario.resolve()
277
+ with self.assertRaises(KeyError):
278
+ _ = resolved["nonexistent_key"]
296
279
 
297
- def test_get_final_size_should_handle_none_size_with_num_env(self):
298
- """Test _get_final_size with None size and num_env set."""
280
+ def test_get_final_size_should_handle_none_size_without_num_env(self):
281
+ """Test _get_final_size with None size and no num_env."""
299
282
  # Pre-condition
300
283
  self.scenario.init(
301
- run_id=self.run_id,
302
- episode_id=self.episode_id,
303
- scenario={},
304
- num_env=4
284
+ scenario={}
305
285
  )
306
286
 
307
287
  # In-test
308
288
  result = self.scenario._get_final_size(None)
309
289
 
310
290
  # Post-condition
311
- self.assertEqual(result, 4)
291
+ self.assertIsNone(result)
312
292
 
313
- def test_get_final_size_should_handle_int_size_with_num_env(self):
314
- """Test _get_final_size with int size and num_env set."""
293
+ def test_get_final_size_should_handle_int_size_without_num_env(self):
294
+ """Test _get_final_size with int size and no num_env."""
315
295
  # Pre-condition
316
296
  self.scenario.init(
317
- run_id=self.run_id,
318
- episode_id=self.episode_id,
319
- scenario={},
320
- num_env=4
297
+ scenario={}
321
298
  )
322
299
 
323
300
  # In-test
324
301
  result = self.scenario._get_final_size(3)
325
302
 
326
303
  # Post-condition
327
- self.assertEqual(result, (4, 3))
304
+ self.assertEqual(result, 3)
328
305
 
329
- def test_get_final_size_should_handle_tuple_size_with_num_env(self):
330
- """Test _get_final_size with tuple size and num_env set."""
306
+ def test_get_final_size_should_handle_tuple_size_without_num_env(self):
307
+ """Test _get_final_size with tuple size and no num_env."""
331
308
  # Pre-condition
332
309
  self.scenario.init(
333
- run_id=self.run_id,
334
- episode_id=self.episode_id,
335
- scenario={},
336
- num_env=4
310
+ scenario={}
337
311
  )
338
312
 
339
313
  # In-test
340
314
  result = self.scenario._get_final_size((2, 3))
341
315
 
342
316
  # Post-condition
343
- self.assertEqual(result, (4, 2, 3))
317
+ self.assertEqual(result, (2, 3))
344
318
 
345
319
  def test_get_final_size_should_handle_size_without_num_env(self):
346
320
  """Test _get_final_size with size but no num_env."""
347
321
  # Pre-condition
348
322
  self.scenario.init(
349
- run_id=self.run_id,
350
- episode_id=self.episode_id,
351
- scenario={},
352
- num_env=None
323
+ scenario={}
353
324
  )
354
325
 
355
326
  # In-test
@@ -409,8 +380,6 @@ class ScenarioTest(unittest.TestCase):
409
380
  # Pre-condition
410
381
  root = {"key1": "target_node", "key2": "other"}
411
382
  self.scenario.init(
412
- run_id=self.run_id,
413
- episode_id=self.episode_id,
414
383
  scenario={}
415
384
  )
416
385
 
@@ -425,8 +394,6 @@ class ScenarioTest(unittest.TestCase):
425
394
  # Pre-condition
426
395
  root = {"level1": {"level2": "target_node"}}
427
396
  self.scenario.init(
428
- run_id=self.run_id,
429
- episode_id=self.episode_id,
430
397
  scenario={}
431
398
  )
432
399
 
@@ -441,8 +408,6 @@ class ScenarioTest(unittest.TestCase):
441
408
  # Pre-condition
442
409
  root = {"key": ["item1", "target_node", "item3"]}
443
410
  self.scenario.init(
444
- run_id=self.run_id,
445
- episode_id=self.episode_id,
446
411
  scenario={}
447
412
  )
448
413
 
@@ -457,8 +422,6 @@ class ScenarioTest(unittest.TestCase):
457
422
  # Pre-condition
458
423
  root = {"key": "value"}
459
424
  self.scenario.init(
460
- run_id=self.run_id,
461
- episode_id=self.episode_id,
462
425
  scenario={}
463
426
  )
464
427
 
@@ -473,8 +436,6 @@ class ScenarioTest(unittest.TestCase):
473
436
  # Pre-condition
474
437
  scenario_config = {"key": "value"}
475
438
  self.scenario.init(
476
- run_id=self.run_id,
477
- episode_id=self.episode_id,
478
439
  scenario=scenario_config
479
440
  )
480
441
 
@@ -485,30 +446,26 @@ class ScenarioTest(unittest.TestCase):
485
446
  self.assertIsNotNone(template)
486
447
  self.assertEqual(template.key, "value")
487
448
 
488
- def test_cur_scenario_property_should_return_current_scenario(self):
489
- """Test that cur_scenario property returns the current scenario."""
449
+ def test_resolve_should_return_resolved_scenario(self):
450
+ """Test that resolve() returns the resolved scenario."""
490
451
  # Pre-condition
491
452
  scenario_config = {"key": "value"}
492
453
  self.scenario.init(
493
- run_id=self.run_id,
494
- episode_id=self.episode_id,
495
454
  scenario=scenario_config
496
455
  )
497
456
 
498
457
  # In-test
499
- cur_scenario = self.scenario.cur_scenario
458
+ resolved, _ = self.scenario.resolve()
500
459
 
501
460
  # Post-condition
502
- self.assertIsNotNone(cur_scenario)
503
- self.assertEqual(cur_scenario.key, "value")
461
+ self.assertIsNotNone(resolved)
462
+ self.assertEqual(resolved.key, "value")
504
463
 
505
464
  def test_yaml_property_should_return_yaml_representation(self):
506
465
  """Test that yaml property returns YAML string."""
507
466
  # Pre-condition
508
467
  scenario_config = {"key": "value"}
509
468
  self.scenario.init(
510
- run_id=self.run_id,
511
- episode_id=self.episode_id,
512
469
  scenario=scenario_config
513
470
  )
514
471
 
@@ -520,27 +477,25 @@ class ScenarioTest(unittest.TestCase):
520
477
  self.assertIn("key:", yaml_str)
521
478
  self.assertIn("value", yaml_str)
522
479
 
523
- def test_finish_should_call_finish_on_metrics(self):
524
- """Test that finish() calls finish on all metrics."""
480
+ def test_resolve_returns_episode_vals(self):
481
+ """Test that resolve() returns episode values for distributions."""
525
482
  # Pre-condition
526
483
  scenario_config = {
527
484
  "dist_value": "${uniform: 0.0, 1.0}"
528
485
  }
529
486
  self.scenario.init(
530
- run_id=self.run_id,
531
- episode_id=self.episode_id,
532
487
  scenario=scenario_config,
533
488
  seed=42
534
489
  )
535
- # Access the value to create the metric
536
- _ = self.scenario.dist_value
537
490
 
538
491
  # In-test
539
- self.scenario.finish()
492
+ resolved, episode_vals = self.scenario.resolve()
540
493
 
541
494
  # Post-condition
542
- # Verify metrics exist and finish was called
543
- self.assertGreater(len(self.scenario._metrics), 0)
495
+ # Verify resolved scenario has the value
496
+ self.assertIsNotNone(resolved.dist_value)
497
+ # Verify episode_vals dict contains the distribution samples
498
+ self.assertGreater(len(episode_vals), 0)
544
499
 
545
500
  def test_nested_scenario_access_should_work(self):
546
501
  """Test accessing deeply nested scenario values."""
@@ -553,13 +508,12 @@ class ScenarioTest(unittest.TestCase):
553
508
  }
554
509
  }
555
510
  self.scenario.init(
556
- run_id=self.run_id,
557
- episode_id=self.episode_id,
558
511
  scenario=scenario_config
559
512
  )
560
513
 
561
514
  # In-test
562
- value = self.scenario.level1.level2.level3
515
+ resolved, _ = self.scenario.resolve()
516
+ value = resolved.level1.level2.level3
563
517
 
564
518
  # Post-condition
565
519
  self.assertEqual(value, "deep_value")
@@ -575,50 +529,25 @@ class ScenarioTest(unittest.TestCase):
575
529
 
576
530
  # In-test
577
531
  self.scenario.init(
578
- run_id=self.run_id,
579
- episode_id=self.episode_id,
580
- scenario=scenario_config,
581
- seed=42
582
- )
583
-
584
- # Post-condition
585
- self.assertIsInstance(self.scenario.uniform_val, (int, float))
586
- self.assertIsInstance(self.scenario.gaussian_val, (int, float))
587
- self.assertIn(self.scenario.bernoulli_val, [0, 1, True, False])
588
-
589
- def test_num_env_should_affect_distribution_size(self):
590
- """Test that num_env parameter affects distribution output size."""
591
- # Pre-condition
592
- scenario_config = {
593
- "value": "${uniform: 0.0, 1.0}"
594
- }
595
-
596
- # In-test
597
- self.scenario.init(
598
- run_id=self.run_id,
599
- episode_id=self.episode_id,
600
532
  scenario=scenario_config,
601
- num_env=3,
602
533
  seed=42
603
534
  )
604
535
 
605
536
  # Post-condition
606
- value = self.scenario.value
607
- # Convert to list if it's a ListConfig
608
- value_list = list(value) if hasattr(value, '__iter__') else [value]
609
- self.assertEqual(len(value_list), 3)
537
+ resolved, _ = self.scenario.resolve()
538
+ self.assertIsInstance(resolved.uniform_val, (int, float))
539
+ self.assertIsInstance(resolved.gaussian_val, (int, float))
540
+ self.assertIn(resolved.bernoulli_val, [0, 1, True, False])
610
541
 
611
542
  def test_clear_resolvers_should_clear_dist_cache(self):
612
543
  """Test that _clear_resolvers clears the distribution cache."""
613
544
  # Pre-condition
614
545
  scenario_config = {"value": "${uniform: 0.0, 1.0}"}
615
546
  self.scenario.init(
616
- run_id=self.run_id,
617
- episode_id=self.episode_id,
618
547
  scenario=scenario_config,
619
548
  seed=42
620
549
  )
621
- _ = self.scenario.value # Trigger cache population
550
+ _ = self.scenario.resolve() # Trigger cache population
622
551
 
623
552
  # In-test
624
553
  self.scenario._clear_resolvers()
@@ -639,36 +568,25 @@ class ScenarioTest(unittest.TestCase):
639
568
 
640
569
  # In-test
641
570
  self.scenario.init(
642
- run_id="run_id",
643
- episode_id="episode_id",
644
571
  scenario=scenario_config,
645
- seed=42,
646
- num_env=2
572
+ seed=42
647
573
  )
648
574
 
649
575
  # Post-condition
650
576
  # Verify scenario structure exists
651
- self.assertIsNotNone(self.scenario.scenario)
652
- self.assertEqual(self.scenario.scenario.scenario_id, "scenario_1")
653
-
654
- # Verify cup_x and cup_y are resolved and are lists (due to num_env=2)
655
- cup_x = self.scenario.scenario.cup_x
656
- cup_y = self.scenario.scenario.cup_y
577
+ resolved, _ = self.scenario.resolve()
578
+ self.assertIsNotNone(resolved.scenario)
579
+ self.assertEqual(resolved.scenario.scenario_id, "scenario_1")
657
580
 
658
- cup_x_list = list(cup_x) if hasattr(cup_x, '__iter__') else [cup_x]
659
- cup_y_list = list(cup_y) if hasattr(cup_y, '__iter__') else [cup_y]
660
-
661
- self.assertEqual(len(cup_x_list), 2)
662
- self.assertEqual(len(cup_y_list), 2)
581
+ # Verify cup_x and cup_y are resolved
582
+ cup_x = resolved.scenario.cup_x
583
+ cup_y = resolved.scenario.cup_y
663
584
 
664
585
  # Verify values are in expected ranges
665
- for val in cup_x_list:
666
- self.assertGreaterEqual(val, 0.7)
667
- self.assertLessEqual(val, 1.5)
668
-
669
- for val in cup_y_list:
670
- self.assertGreaterEqual(val, 0.3)
671
- self.assertLessEqual(val, 0.7)
586
+ self.assertGreaterEqual(cup_x, 0.7)
587
+ self.assertLessEqual(cup_x, 1.5)
588
+ self.assertGreaterEqual(cup_y, 0.3)
589
+ self.assertLessEqual(cup_y, 0.7)
672
590
 
673
591
  def test_main_script_scenario_should_allow_both_access_methods(self):
674
592
  """Test that both attribute and dict access work as shown in __main__ script."""
@@ -682,26 +600,20 @@ class ScenarioTest(unittest.TestCase):
682
600
 
683
601
  # In-test
684
602
  self.scenario.init(
685
- run_id="run_id",
686
- episode_id="episode_id",
687
603
  scenario=scenario_config,
688
- seed=42,
689
- num_env=2
604
+ seed=42
690
605
  )
691
606
 
692
607
  # Post-condition
693
608
  # Both access methods should return the same value
694
- cup_x_attr = self.scenario.scenario.cup_x
695
- cup_x_dict = self.scenario["scenario"].cup_x
696
-
697
- # Convert to lists for comparison
698
- cup_x_attr_list = list(cup_x_attr) if hasattr(cup_x_attr, '__iter__') else [cup_x_attr]
699
- cup_x_dict_list = list(cup_x_dict) if hasattr(cup_x_dict, '__iter__') else [cup_x_dict]
609
+ resolved, _ = self.scenario.resolve()
610
+ cup_x_attr = resolved.scenario.cup_x
611
+ cup_x_dict = resolved["scenario"].cup_x
700
612
 
701
- self.assertEqual(cup_x_attr_list, cup_x_dict_list)
613
+ self.assertEqual(cup_x_attr, cup_x_dict)
702
614
 
703
- def test_main_script_scenario_should_regenerate_on_reset(self):
704
- """Test that reset regenerates values as shown in __main__ script."""
615
+ def test_main_script_scenario_should_regenerate_on_resolve(self):
616
+ """Test that resolve regenerates values as shown in __main__ script."""
705
617
  # Pre-condition
706
618
  scenario_config = {
707
619
  "scenario": {
@@ -709,58 +621,47 @@ class ScenarioTest(unittest.TestCase):
709
621
  }
710
622
  }
711
623
  self.scenario.init(
712
- run_id="run_id",
713
- episode_id="episode_id",
714
624
  scenario=scenario_config,
715
- seed=None, # No seed for random values
716
- num_env=2
625
+ seed=None # No seed for random values
717
626
  )
718
627
 
719
- first_cup_x = self.scenario.scenario.cup_x
720
- first_list = list(first_cup_x) if hasattr(first_cup_x, '__iter__') else [first_cup_x]
628
+ resolved1, _ = self.scenario.resolve()
629
+ first_cup_x = resolved1.scenario.cup_x
721
630
 
722
631
  # In-test
723
- self.scenario.reset()
632
+ resolved2, _ = self.scenario.resolve()
633
+ second_cup_x = resolved2.scenario.cup_x
724
634
 
725
635
  # Post-condition
726
- second_cup_x = self.scenario.scenario.cup_x
727
- second_list = list(second_cup_x) if hasattr(second_cup_x, '__iter__') else [second_cup_x]
728
-
729
- # Both should be valid lists
730
- self.assertEqual(len(first_list), 2)
731
- self.assertEqual(len(second_list), 2)
732
-
733
636
  # Values should be in valid range
734
- for val in second_list:
735
- self.assertGreaterEqual(val, 0.7)
736
- self.assertLessEqual(val, 1.5)
637
+ self.assertGreaterEqual(first_cup_x, 0.7)
638
+ self.assertLessEqual(first_cup_x, 1.5)
639
+ self.assertGreaterEqual(second_cup_x, 0.7)
640
+ self.assertLessEqual(second_cup_x, 1.5)
737
641
 
738
642
  def test_main_script_scenario_should_convert_to_numpy_array(self):
739
643
  """Test that scenario values can be converted to numpy arrays."""
740
644
  # Pre-condition
741
645
  scenario_config = {
742
646
  "scenario": {
743
- "cup_x": "${uniform: 0.7, 1.5}",
647
+ "cup_x": "${uniform_1d: 0.7, 1.5}",
744
648
  }
745
649
  }
746
650
  self.scenario.init(
747
- run_id="run_id",
748
- episode_id="episode_id",
749
651
  scenario=scenario_config,
750
- seed=42,
751
- num_env=2
652
+ seed=42
752
653
  )
753
654
 
754
655
  # In-test
755
- cup_x = self.scenario.scenario.cup_x
656
+ resolved, _ = self.scenario.resolve()
657
+ cup_x = resolved.scenario.cup_x
756
658
  np_array = np.array(cup_x)
757
659
 
758
660
  # Post-condition
759
661
  self.assertIsInstance(np_array, np.ndarray)
760
- self.assertEqual(len(np_array), 2)
761
662
 
762
663
  # Verify values are in expected range
763
- for val in np_array:
664
+ for val in np.atleast_1d(np_array):
764
665
  self.assertGreaterEqual(val, 0.7)
765
666
  self.assertLessEqual(val, 1.5)
766
667
 
@@ -775,11 +676,8 @@ class ScenarioTest(unittest.TestCase):
775
676
  }
776
677
  }
777
678
  self.scenario.init(
778
- run_id="run_id",
779
- episode_id="episode_id",
780
679
  scenario=scenario_config,
781
- seed=42,
782
- num_env=2
680
+ seed=42
783
681
  )
784
682
 
785
683
  # In-test
@@ -793,8 +691,8 @@ class ScenarioTest(unittest.TestCase):
793
691
  self.assertIn("cup_x:", yaml_str)
794
692
  self.assertIn("cup_y:", yaml_str)
795
693
 
796
- def test_main_script_scenario_should_handle_multiple_resets(self):
797
- """Test multiple reset calls as shown in __main__ script."""
694
+ def test_main_script_scenario_should_handle_multiple_resolves(self):
695
+ """Test multiple resolve calls as shown in __main__ script."""
798
696
  # Pre-condition
799
697
  scenario_config = {
800
698
  "scenario": {
@@ -802,34 +700,26 @@ class ScenarioTest(unittest.TestCase):
802
700
  }
803
701
  }
804
702
  self.scenario.init(
805
- run_id="run_id",
806
- episode_id="episode_id",
807
703
  scenario=scenario_config,
808
- seed=42,
809
- num_env=2
704
+ seed=42
810
705
  )
811
706
 
812
- first_values = list(self.scenario.scenario.cup_x)
707
+ resolved1, _ = self.scenario.resolve()
708
+ first_value = resolved1.scenario.cup_x
813
709
 
814
- # In-test - First reset
815
- self.scenario.reset()
816
- second_values = list(self.scenario.scenario.cup_x)
710
+ # In-test - First resolve
711
+ resolved2, _ = self.scenario.resolve()
712
+ second_value = resolved2.scenario.cup_x
817
713
 
818
- # In-test - Second reset
819
- self.scenario.reset()
820
- third_values = list(self.scenario.scenario.cup_x)
714
+ # In-test - Second resolve
715
+ resolved3, _ = self.scenario.resolve()
716
+ third_value = resolved3.scenario.cup_x
821
717
 
822
718
  # Post-condition
823
- # All should be valid lists of size 2
824
- self.assertEqual(len(first_values), 2)
825
- self.assertEqual(len(second_values), 2)
826
- self.assertEqual(len(third_values), 2)
827
-
828
719
  # All values should be in range
829
- for vals in [first_values, second_values, third_values]:
830
- for val in vals:
831
- self.assertGreaterEqual(val, 0.7)
832
- self.assertLessEqual(val, 1.5)
720
+ for val in [first_value, second_value, third_value]:
721
+ self.assertGreaterEqual(val, 0.7)
722
+ self.assertLessEqual(val, 1.5)
833
723
 
834
724
  def test_main_script_scenario_should_reinitialize_with_none(self):
835
725
  """Test reinitializing scenario with None as shown in __main__ script."""
@@ -840,11 +730,8 @@ class ScenarioTest(unittest.TestCase):
840
730
  }
841
731
  }
842
732
  self.scenario.init(
843
- run_id="run_id",
844
- episode_id="episode_id",
845
733
  scenario=scenario_config,
846
- seed=42,
847
- num_env=2
734
+ seed=42
848
735
  )
849
736
 
850
737
  # Verify initial scenario has content
@@ -853,8 +740,6 @@ class ScenarioTest(unittest.TestCase):
853
740
 
854
741
  # In-test - Reinitialize with None
855
742
  self.scenario.init(
856
- run_id="run_id",
857
- episode_id="episode_id",
858
743
  scenario=None,
859
744
  seed=42
860
745
  )
@@ -865,7 +750,7 @@ class ScenarioTest(unittest.TestCase):
865
750
  self.assertEqual(second_yaml.strip(), "{}")
866
751
 
867
752
  def test_main_script_scenario_should_handle_seed_consistency(self):
868
- """Test that same seed produces consistent results across resets."""
753
+ """Test that same seed produces consistent results across resolves."""
869
754
  # Pre-condition
870
755
  scenario_config = {
871
756
  "scenario": {
@@ -877,26 +762,22 @@ class ScenarioTest(unittest.TestCase):
877
762
  # Create first scenario with seed
878
763
  scenario1 = Scenario()
879
764
  scenario1.init(
880
- run_id="run_id",
881
- episode_id="episode_id",
882
765
  scenario=scenario_config,
883
- seed=42,
884
- num_env=2
766
+ seed=42
885
767
  )
886
- values1_x = list(scenario1.scenario.cup_x)
887
- values1_y = list(scenario1.scenario.cup_y)
768
+ resolved1, _ = scenario1.resolve()
769
+ values1_x = resolved1.scenario.cup_x
770
+ values1_y = resolved1.scenario.cup_y
888
771
 
889
772
  # Create second scenario with same seed
890
773
  scenario2 = Scenario()
891
774
  scenario2.init(
892
- run_id="run_id",
893
- episode_id="episode_id",
894
775
  scenario=scenario_config,
895
- seed=42,
896
- num_env=2
776
+ seed=42
897
777
  )
898
- values2_x = list(scenario2.scenario.cup_x)
899
- values2_y = list(scenario2.scenario.cup_y)
778
+ resolved2, _ = scenario2.resolve()
779
+ values2_x = resolved2.scenario.cup_x
780
+ values2_y = resolved2.scenario.cup_y
900
781
 
901
782
  # Post-condition
902
783
  self.assertEqual(values1_x, values2_x)