opentau 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. opentau/__init__.py +179 -0
  2. opentau/__version__.py +24 -0
  3. opentau/configs/__init__.py +19 -0
  4. opentau/configs/default.py +297 -0
  5. opentau/configs/libero.py +113 -0
  6. opentau/configs/parser.py +393 -0
  7. opentau/configs/policies.py +297 -0
  8. opentau/configs/reward.py +42 -0
  9. opentau/configs/train.py +370 -0
  10. opentau/configs/types.py +76 -0
  11. opentau/constants.py +52 -0
  12. opentau/datasets/__init__.py +84 -0
  13. opentau/datasets/backward_compatibility.py +78 -0
  14. opentau/datasets/compute_stats.py +333 -0
  15. opentau/datasets/dataset_mixture.py +460 -0
  16. opentau/datasets/factory.py +232 -0
  17. opentau/datasets/grounding/__init__.py +67 -0
  18. opentau/datasets/grounding/base.py +154 -0
  19. opentau/datasets/grounding/clevr.py +110 -0
  20. opentau/datasets/grounding/cocoqa.py +130 -0
  21. opentau/datasets/grounding/dummy.py +101 -0
  22. opentau/datasets/grounding/pixmo.py +177 -0
  23. opentau/datasets/grounding/vsr.py +141 -0
  24. opentau/datasets/image_writer.py +304 -0
  25. opentau/datasets/lerobot_dataset.py +1910 -0
  26. opentau/datasets/online_buffer.py +442 -0
  27. opentau/datasets/push_dataset_to_hub/utils.py +132 -0
  28. opentau/datasets/sampler.py +99 -0
  29. opentau/datasets/standard_data_format_mapping.py +278 -0
  30. opentau/datasets/transforms.py +330 -0
  31. opentau/datasets/utils.py +1243 -0
  32. opentau/datasets/v2/batch_convert_dataset_v1_to_v2.py +887 -0
  33. opentau/datasets/v2/convert_dataset_v1_to_v2.py +829 -0
  34. opentau/datasets/v21/_remove_language_instruction.py +109 -0
  35. opentau/datasets/v21/batch_convert_dataset_v20_to_v21.py +60 -0
  36. opentau/datasets/v21/convert_dataset_v20_to_v21.py +183 -0
  37. opentau/datasets/v21/convert_stats.py +150 -0
  38. opentau/datasets/video_utils.py +597 -0
  39. opentau/envs/__init__.py +18 -0
  40. opentau/envs/configs.py +178 -0
  41. opentau/envs/factory.py +99 -0
  42. opentau/envs/libero.py +439 -0
  43. opentau/envs/utils.py +204 -0
  44. opentau/optim/__init__.py +16 -0
  45. opentau/optim/factory.py +43 -0
  46. opentau/optim/optimizers.py +121 -0
  47. opentau/optim/schedulers.py +140 -0
  48. opentau/planner/__init__.py +82 -0
  49. opentau/planner/high_level_planner.py +366 -0
  50. opentau/planner/utils/memory.py +64 -0
  51. opentau/planner/utils/utils.py +65 -0
  52. opentau/policies/__init__.py +24 -0
  53. opentau/policies/factory.py +172 -0
  54. opentau/policies/normalize.py +315 -0
  55. opentau/policies/pi0/__init__.py +19 -0
  56. opentau/policies/pi0/configuration_pi0.py +250 -0
  57. opentau/policies/pi0/modeling_pi0.py +994 -0
  58. opentau/policies/pi0/paligemma_with_expert.py +516 -0
  59. opentau/policies/pi05/__init__.py +20 -0
  60. opentau/policies/pi05/configuration_pi05.py +231 -0
  61. opentau/policies/pi05/modeling_pi05.py +1257 -0
  62. opentau/policies/pi05/paligemma_with_expert.py +572 -0
  63. opentau/policies/pretrained.py +315 -0
  64. opentau/policies/utils.py +123 -0
  65. opentau/policies/value/__init__.py +18 -0
  66. opentau/policies/value/configuration_value.py +170 -0
  67. opentau/policies/value/modeling_value.py +512 -0
  68. opentau/policies/value/reward.py +87 -0
  69. opentau/policies/value/siglip_gemma.py +221 -0
  70. opentau/scripts/actions_mse_loss.py +89 -0
  71. opentau/scripts/bin_to_safetensors.py +116 -0
  72. opentau/scripts/compute_max_token_length.py +111 -0
  73. opentau/scripts/display_sys_info.py +90 -0
  74. opentau/scripts/download_libero_benchmarks.py +54 -0
  75. opentau/scripts/eval.py +877 -0
  76. opentau/scripts/export_to_onnx.py +180 -0
  77. opentau/scripts/fake_tensor_training.py +87 -0
  78. opentau/scripts/get_advantage_and_percentiles.py +220 -0
  79. opentau/scripts/high_level_planner_inference.py +114 -0
  80. opentau/scripts/inference.py +70 -0
  81. opentau/scripts/launch_train.py +63 -0
  82. opentau/scripts/libero_simulation_parallel.py +356 -0
  83. opentau/scripts/libero_simulation_sequential.py +122 -0
  84. opentau/scripts/nav_high_level_planner_inference.py +61 -0
  85. opentau/scripts/train.py +379 -0
  86. opentau/scripts/visualize_dataset.py +294 -0
  87. opentau/scripts/visualize_dataset_html.py +507 -0
  88. opentau/scripts/zero_to_fp32.py +760 -0
  89. opentau/utils/__init__.py +20 -0
  90. opentau/utils/accelerate_utils.py +79 -0
  91. opentau/utils/benchmark.py +98 -0
  92. opentau/utils/fake_tensor.py +81 -0
  93. opentau/utils/hub.py +209 -0
  94. opentau/utils/import_utils.py +79 -0
  95. opentau/utils/io_utils.py +137 -0
  96. opentau/utils/libero.py +214 -0
  97. opentau/utils/libero_dataset_recorder.py +460 -0
  98. opentau/utils/logging_utils.py +180 -0
  99. opentau/utils/monkey_patch.py +278 -0
  100. opentau/utils/random_utils.py +244 -0
  101. opentau/utils/train_utils.py +198 -0
  102. opentau/utils/utils.py +471 -0
  103. opentau-0.1.0.dist-info/METADATA +161 -0
  104. opentau-0.1.0.dist-info/RECORD +108 -0
  105. opentau-0.1.0.dist-info/WHEEL +5 -0
  106. opentau-0.1.0.dist-info/entry_points.txt +2 -0
  107. opentau-0.1.0.dist-info/licenses/LICENSE +508 -0
  108. opentau-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,877 @@
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ # Copyright 2026 Tensor Auto Inc. All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """Evaluate a policy on an environment by running rollouts and computing metrics."""
18
+
19
+ import concurrent.futures as cf
20
+ import datetime as dt
21
+ import json
22
+ import logging
23
+ import threading
24
+ import time
25
+ from collections import defaultdict
26
+ from collections.abc import Callable
27
+ from contextlib import nullcontext
28
+ from copy import deepcopy
29
+ from dataclasses import asdict
30
+ from functools import partial
31
+ from pathlib import Path
32
+ from pprint import pformat
33
+ from typing import TypedDict
34
+
35
+ import einops
36
+ import gymnasium as gym
37
+ import imageio
38
+ import numpy as np
39
+ import torch
40
+ from accelerate import Accelerator
41
+ from accelerate.utils import gather_object
42
+ from termcolor import colored
43
+ from torch import nn
44
+ from tqdm import trange
45
+
46
+ from opentau.configs import parser
47
+ from opentau.configs.train import TrainPipelineConfig
48
+ from opentau.envs.factory import make_envs
49
+ from opentau.envs.utils import (
50
+ add_envs_task,
51
+ check_env_attributes_and_types,
52
+ close_envs,
53
+ preprocess_observation,
54
+ )
55
+ from opentau.policies.factory import make_policy
56
+ from opentau.policies.pretrained import PreTrainedPolicy
57
+ from opentau.utils.accelerate_utils import acc_print, get_proc_accelerator, set_proc_accelerator
58
+ from opentau.utils.io_utils import write_video
59
+ from opentau.utils.libero_dataset_recorder import aggregate_task_results, consolidate_task_result
60
+ from opentau.utils.random_utils import set_seed
61
+ from opentau.utils.utils import (
62
+ init_logging,
63
+ inside_slurm,
64
+ is_launched_with_accelerate,
65
+ )
66
+
67
+
68
+ def rollout(
69
+ env: gym.vector.VectorEnv,
70
+ policy: PreTrainedPolicy,
71
+ cfg: TrainPipelineConfig,
72
+ seeds: list[int] | None = None,
73
+ return_observations: bool = False,
74
+ render_callback: Callable[[gym.vector.VectorEnv], None] | None = None,
75
+ ) -> dict:
76
+ """Run a batched policy rollout once through a batch of environments.
77
+
78
+ Note that all environments in the batch are run until the last environment is done. This means some
79
+ data will probably need to be discarded (for environments that aren't the first one to be done).
80
+
81
+ The return dictionary contains:
82
+ (optional) "observation": A dictionary of (batch, sequence + 1, *) tensors mapped to observation
83
+ keys. NOTE that this has an extra sequence element relative to the other keys in the
84
+ dictionary. This is because an extra observation is included for after the environment is
85
+ terminated or truncated.
86
+ "action": A (batch, sequence, action_dim) tensor of actions applied based on the observations (not
87
+ including the last observations).
88
+ "reward": A (batch, sequence) tensor of rewards received for applying the actions.
89
+ "success": A (batch, sequence) tensor of success conditions (the only time this can be True is upon
90
+ environment termination/truncation).
91
+ "done": A (batch, sequence) tensor of **cumulative** done conditions. For any given batch element,
92
+ the first True is followed by True's all the way till the end. This can be used for masking
93
+ extraneous elements from the sequences above.
94
+
95
+ Args:
96
+ env: The batch of environments.
97
+ policy: The policy. Must be a PyTorch nn module.
98
+ seeds: The environments are seeded once at the start of the rollout. If provided, this argument
99
+ specifies the seeds for each of the environments.
100
+ return_observations: Whether to include all observations in the returned rollout data. Observations
101
+ are returned optionally because they typically take more memory to cache. Defaults to False.
102
+ render_callback: Optional rendering callback to be used after the environments are reset, and after
103
+ every step.
104
+ Returns:
105
+ The dictionary described above.
106
+ """
107
+ assert isinstance(policy, nn.Module), "Policy must be a PyTorch nn module."
108
+
109
+ acc = get_proc_accelerator()
110
+ if acc is not None and not isinstance(policy, PreTrainedPolicy):
111
+ policy = acc.unwrap_model(policy)
112
+
113
+ # Reset the policy and environments.
114
+ policy.reset()
115
+ observation, info = env.reset(seed=seeds)
116
+ if render_callback is not None:
117
+ render_callback(env)
118
+
119
+ all_observations = []
120
+ all_actions = []
121
+ all_rewards = []
122
+ all_successes = []
123
+ all_dones = []
124
+
125
+ step = 0
126
+ # Keep track of which environments are done.
127
+ done = np.array([False] * env.num_envs)
128
+ max_steps = env.call("_max_episode_steps")[0]
129
+ progbar = trange(
130
+ max_steps,
131
+ desc=f"Running rollout with at most {max_steps} steps",
132
+ disable=inside_slurm(), # we dont want progress bar when we use slurm, since it clutters the logs
133
+ leave=False,
134
+ )
135
+ check_env_attributes_and_types(env)
136
+ successes = np.zeros((env.num_envs,), dtype=bool)
137
+ while not np.all(done) and step < max_steps:
138
+ # Numpy array to tensor and changing dictionary keys to OpenTau policy format.
139
+ observation = preprocess_observation(observation, cfg=cfg)
140
+ # Infer "task" from attributes of environments.
141
+ # TODO: works with SyncVectorEnv but not AsyncVectorEnv
142
+ observation = add_envs_task(env, observation)
143
+
144
+ if return_observations:
145
+ all_observations.append(deepcopy(observation))
146
+
147
+ with torch.inference_mode():
148
+ action = policy.select_action(observation)
149
+
150
+ # Convert to CPU / numpy.
151
+ action_numpy: np.ndarray = action.to("cpu").numpy()
152
+ assert action_numpy.ndim == 2, "Action dimensions should be (batch, action_dim)"
153
+
154
+ # Apply the next action.
155
+ observation, reward, terminated, truncated, info = env.step(action_numpy)
156
+ if render_callback is not None:
157
+ render_callback(env)
158
+
159
+ # Once a success, always a success.
160
+ if "is_success" in info:
161
+ successes = successes | info["is_success"].astype(bool)
162
+
163
+ # Keep track of which environments are done so far.
164
+ # Mark the episode as done if we reach the maximum step limit.
165
+ # This ensures that the rollout always terminates cleanly at `max_steps`,
166
+ # and allows logging/saving (e.g., videos) to be triggered consistently.
167
+ done = terminated | truncated | done
168
+ if step + 1 == max_steps:
169
+ done = np.ones_like(done, dtype=bool)
170
+
171
+ all_actions.append(torch.from_numpy(action_numpy))
172
+ all_rewards.append(torch.from_numpy(reward))
173
+ all_dones.append(torch.from_numpy(done))
174
+ all_successes.append(torch.tensor(successes))
175
+
176
+ step += 1
177
+ running_success_rate = (
178
+ einops.reduce(torch.stack(all_successes, dim=1), "b n -> b", "any").numpy().mean()
179
+ )
180
+ progbar.set_postfix({"running_success_rate": f"{running_success_rate.item() * 100:.1f}%"})
181
+ progbar.update()
182
+
183
+ # Track the final observation.
184
+ if return_observations:
185
+ observation = preprocess_observation(observation, cfg=cfg)
186
+ observation = add_envs_task(env, observation)
187
+ all_observations.append(deepcopy(observation))
188
+
189
+ # Stack the sequence along the first dimension so that we have (batch, sequence, *) tensors.
190
+ ret = {
191
+ "action": torch.stack(all_actions, dim=1),
192
+ "reward": torch.stack(all_rewards, dim=1),
193
+ "success": torch.stack(all_successes, dim=1),
194
+ "done": torch.stack(all_dones, dim=1),
195
+ }
196
+ if return_observations:
197
+ stacked_observations = {}
198
+ for key, value0 in all_observations[0].items():
199
+ if isinstance(value0, torch.Tensor):
200
+ stacked_observations[key] = torch.stack([obs[key] for obs in all_observations], dim=1)
201
+ elif isinstance(value0, list):
202
+ stacked_observations[key] = list(zip(*[obs[key] for obs in all_observations], strict=True))
203
+ else:
204
+ raise TypeError(
205
+ f"Unsupported observation type for key {key}: {type(value0)}. "
206
+ "Only `torch.Tensor` and `list` are supported for now."
207
+ )
208
+ ret["observation"] = stacked_observations
209
+
210
+ if hasattr(policy, "use_original_modules"):
211
+ policy.use_original_modules()
212
+
213
+ return ret
214
+
215
+
216
+ def eval_policy(
217
+ env: gym.vector.VectorEnv,
218
+ policy: PreTrainedPolicy,
219
+ n_episodes: int,
220
+ cfg: TrainPipelineConfig,
221
+ max_episodes_rendered: int = 0,
222
+ videos_dir: Path | None = None,
223
+ return_episode_data: bool = False,
224
+ start_seed: int | None = None,
225
+ grid_size: tuple[int, int] | None = None,
226
+ ) -> dict:
227
+ """
228
+ Args:
229
+ env: The batch of environments.
230
+ policy: The policy.
231
+ n_episodes: The number of episodes to evaluate.
232
+ cfg: The training config.
233
+ max_episodes_rendered: Maximum number of episodes to render into videos.
234
+ videos_dir: Where to save rendered videos.
235
+ return_episode_data: Whether to return episode data for online training. Incorporates the data into
236
+ the "episodes" key of the returned dictionary.
237
+ start_seed: The first seed to use for the first individual rollout. For all subsequent rollouts the
238
+ seed is incremented by 1. If not provided, the environments are not manually seeded.
239
+ grid_size: The grid size to use for rendering concatenated rollouts.
240
+ Returns:
241
+ Dictionary with metrics and data regarding the rollouts.
242
+ """
243
+ if max_episodes_rendered > 0 and not videos_dir:
244
+ raise ValueError("If max_episodes_rendered > 0, videos_dir must be provided.")
245
+
246
+ start = time.time()
247
+ policy.eval()
248
+
249
+ # Determine how many batched rollouts we need to get n_episodes. Note that if n_episodes is not evenly
250
+ # divisible by env.num_envs we end up discarding some data in the last batch.
251
+ n_batches = n_episodes // env.num_envs + int((n_episodes % env.num_envs) != 0)
252
+
253
+ # Keep track of some metrics.
254
+ sum_rewards = []
255
+ max_rewards = []
256
+ all_successes = []
257
+ all_seeds = []
258
+ all_done_indices = []
259
+ threads = [] # for video saving threads
260
+ n_episodes_rendered = 0 # for saving the correct number of videos
261
+
262
+ # Callback for visualization.
263
+ def render_frame(env: gym.vector.VectorEnv):
264
+ # noqa: B023
265
+ if n_episodes_rendered >= max_episodes_rendered:
266
+ return
267
+ n_to_render_now = min(max_episodes_rendered - n_episodes_rendered, env.num_envs)
268
+ if isinstance(env, gym.vector.SyncVectorEnv):
269
+ ep_frames.append(np.stack([env.envs[i].render() for i in range(n_to_render_now)])) # noqa: B023
270
+ elif isinstance(env, gym.vector.AsyncVectorEnv):
271
+ # Here we must render all frames and discard any we don't need.
272
+ ep_frames.append(np.stack(env.call("render")[:n_to_render_now]))
273
+
274
+ if max_episodes_rendered > 0:
275
+ video_paths: list[str] = []
276
+ rendered_successes: list[bool] = []
277
+
278
+ if return_episode_data:
279
+ episode_data: dict[str, list | torch.Tensor] | None = None
280
+
281
+ # we dont want progress bar when we use slurm, since it clutters the logs
282
+ progbar = trange(n_batches, desc="Stepping through eval batches", disable=inside_slurm())
283
+ for batch_ix in progbar:
284
+ # Cache frames for rendering videos. Each item will be (b, h, w, c), and the list indexes the rollout
285
+ # step.
286
+ if max_episodes_rendered > 0:
287
+ ep_frames: list[np.ndarray] = []
288
+
289
+ if start_seed is None:
290
+ seeds = None
291
+ else:
292
+ # HACK: to get different seeds per accelerator process when using distributed eval.
293
+ acc = get_proc_accelerator()
294
+ acc_offset = acc.process_index * 10000 if acc else 0
295
+ seeds = range(
296
+ start_seed + acc_offset + (batch_ix * env.num_envs),
297
+ start_seed + acc_offset + ((batch_ix + 1) * env.num_envs),
298
+ )
299
+ rollout_data = rollout(
300
+ env=env,
301
+ policy=policy,
302
+ cfg=cfg,
303
+ seeds=list(seeds) if seeds else None,
304
+ return_observations=return_episode_data,
305
+ render_callback=render_frame if max_episodes_rendered > 0 else None,
306
+ )
307
+ if return_episode_data:
308
+ if not episode_data:
309
+ episode_data = deepcopy(rollout_data)
310
+ else:
311
+ for key, value in rollout_data.items():
312
+ if isinstance(value, torch.Tensor):
313
+ episode_data[key] = torch.cat([episode_data[key], value], dim=0)
314
+ elif isinstance(value, list):
315
+ episode_data[key].extend(value)
316
+
317
+ # Figure out where in each rollout sequence the first done condition was encountered (results after
318
+ # this won't be included).
319
+ n_steps = rollout_data["done"].shape[1]
320
+ # Note: this relies on a property of argmax: that it returns the first occurrence as a tiebreaker.
321
+ all_done_indices = torch.argmax(rollout_data["done"].to(int), dim=1)
322
+
323
+ # Make a mask with shape (batch, n_steps) to mask out rollout data after the first done
324
+ # (batch-element-wise). Note the `done_indices + 1` to make sure to keep the data from the done step.
325
+ mask = (torch.arange(n_steps) <= einops.repeat(all_done_indices + 1, "b -> b s", s=n_steps)).int()
326
+ # Extend metrics.
327
+ batch_sum_rewards = einops.reduce((rollout_data["reward"] * mask), "b n -> b", "sum")
328
+ sum_rewards.extend(batch_sum_rewards.tolist())
329
+ batch_max_rewards = einops.reduce((rollout_data["reward"] * mask), "b n -> b", "max")
330
+ max_rewards.extend(batch_max_rewards.tolist())
331
+ batch_successes = einops.reduce((rollout_data["success"] * mask), "b n -> b", "any")
332
+ all_successes.extend(batch_successes.tolist())
333
+ if seeds:
334
+ all_seeds.extend(seeds)
335
+ else:
336
+ all_seeds.append(None)
337
+
338
+ # Maybe render video for visualization.
339
+ if max_episodes_rendered > 0 and len(ep_frames) > 0:
340
+ batch_stacked_frames = np.stack(ep_frames, axis=1) # (b, t, *)
341
+ for stacked_frames, done_index, success in zip(
342
+ batch_stacked_frames,
343
+ all_done_indices.flatten().tolist(),
344
+ batch_successes.tolist(),
345
+ strict=False,
346
+ ):
347
+ if n_episodes_rendered >= max_episodes_rendered:
348
+ break
349
+
350
+ videos_dir.mkdir(parents=True, exist_ok=True)
351
+ video_path = videos_dir / f"eval_episode_{n_episodes_rendered}.mp4"
352
+ video_paths.append(str(video_path))
353
+ rendered_successes.append(success)
354
+ thread = threading.Thread(
355
+ target=write_video,
356
+ args=(
357
+ str(video_path),
358
+ stacked_frames[: done_index + 1], # + 1 to capture the last observation
359
+ env.unwrapped.metadata["render_fps"],
360
+ ),
361
+ )
362
+ thread.start()
363
+ threads.append(thread)
364
+ n_episodes_rendered += 1
365
+
366
+ progbar.set_postfix(
367
+ {"running_success_rate": f"{np.mean(all_successes[:n_episodes]).item() * 100:.1f}%"}
368
+ )
369
+
370
+ # Wait till all video rendering threads are done.
371
+ for thread in threads:
372
+ thread.join()
373
+
374
+ # Create grid summary video if we have videos to render
375
+ if max_episodes_rendered > 0 and len(video_paths) > 0:
376
+ try:
377
+ grid_summary_path = videos_dir / "grid_summary.mp4"
378
+ create_grid_summary_video(
379
+ video_paths=video_paths,
380
+ success_statuses=rendered_successes,
381
+ output_path=str(grid_summary_path),
382
+ fps=env.unwrapped.metadata["render_fps"],
383
+ highlight_duration=2.0,
384
+ grid_size=grid_size,
385
+ )
386
+ logging.info(f"Grid summary video created: {grid_summary_path}")
387
+ except Exception as e:
388
+ logging.error(f"Failed to create grid summary video: {e}")
389
+
390
+ # Compile eval info.
391
+ info = {
392
+ "per_episode": [
393
+ {
394
+ "episode_ix": i,
395
+ "sum_reward": sum_reward,
396
+ "max_reward": max_reward,
397
+ "success": success,
398
+ "seed": seed,
399
+ "done_index": done_index,
400
+ }
401
+ for i, (sum_reward, max_reward, success, seed, done_index) in enumerate(
402
+ zip(
403
+ sum_rewards[:n_episodes],
404
+ max_rewards[:n_episodes],
405
+ all_successes[:n_episodes],
406
+ all_seeds[:n_episodes],
407
+ all_done_indices[:n_episodes],
408
+ strict=True,
409
+ )
410
+ )
411
+ ],
412
+ "aggregated": {
413
+ "avg_sum_reward": float(np.nanmean(sum_rewards[:n_episodes])),
414
+ "avg_max_reward": float(np.nanmean(max_rewards[:n_episodes])),
415
+ "pc_success": float(np.nanmean(all_successes[:n_episodes]) * 100),
416
+ "eval_s": time.time() - start,
417
+ "eval_ep_s": (time.time() - start) / n_episodes,
418
+ },
419
+ }
420
+
421
+ if return_episode_data:
422
+ info["episodes"] = episode_data
423
+
424
+ if max_episodes_rendered > 0:
425
+ info["video_paths"] = video_paths
426
+
427
+ return info
428
+
429
+
430
+ def create_grid_summary_video(
431
+ video_paths: list[str],
432
+ success_statuses: list[bool],
433
+ output_path: str,
434
+ fps: float,
435
+ highlight_duration: float = 1.0,
436
+ grid_size: tuple[int, int] | None = None,
437
+ ) -> None:
438
+ """Create a grid summary video from individual episode videos.
439
+
440
+ Args:
441
+ video_paths: List of paths to individual video files
442
+ success_statuses: List of boolean success statuses for each video
443
+ output_path: Path where the summary video will be saved
444
+ fps: Frames per second for the output video
445
+ highlight_duration: Duration in seconds to show the highlighting at the end
446
+ grid_size: Tuple of (rows, cols) for the grid. If None, will be auto-calculated as square grid.
447
+ """
448
+ if len(video_paths) != len(success_statuses):
449
+ raise ValueError(
450
+ f"Number of videos ({len(video_paths)}) must match number of success statuses ({len(success_statuses)})"
451
+ )
452
+
453
+ # Auto-calculate grid size if not provided
454
+ if grid_size is None:
455
+ # Calculate square grid size
456
+ n_videos = len(video_paths)
457
+ grid_rows = int(np.ceil(np.sqrt(n_videos)))
458
+ grid_cols = int(np.ceil(n_videos / grid_rows))
459
+ grid_size = (grid_rows, grid_cols)
460
+
461
+ grid_rows, grid_cols = grid_size
462
+ expected_videos = grid_rows * grid_cols
463
+
464
+ if len(video_paths) > expected_videos:
465
+ raise ValueError(
466
+ f"Too many videos ({len(video_paths)}) for grid size {grid_size} (max {expected_videos})"
467
+ )
468
+
469
+ # Load all videos
470
+ videos = []
471
+ max_frames = 0
472
+ for video_path in video_paths:
473
+ if not Path(video_path).exists():
474
+ logging.warning(f"Video file not found: {video_path}")
475
+ continue
476
+ video = imageio.mimread(video_path)
477
+ videos.append(video)
478
+ max_frames = max(max_frames, len(video))
479
+
480
+ if not videos:
481
+ logging.error("No valid videos found to create grid summary")
482
+ return
483
+
484
+ # Get dimensions from first video
485
+ frame_height, frame_width = videos[0][0].shape[:2]
486
+ grid_width = frame_width * grid_cols
487
+ grid_height = frame_height * grid_rows
488
+
489
+ # Create grid frames
490
+ grid_frames = []
491
+
492
+ for frame_idx in range(max_frames):
493
+ # Create empty grid frame
494
+ grid_frame = np.zeros((grid_height, grid_width, 3), dtype=np.uint8)
495
+
496
+ # Fill grid with video frames
497
+ for i, video in enumerate(videos):
498
+ row = i // grid_cols
499
+ col = i % grid_cols
500
+
501
+ # Use last frame if video is shorter
502
+ frame_to_use = min(frame_idx, len(video) - 1)
503
+ frame = video[frame_to_use]
504
+
505
+ # Ensure frame is RGB
506
+ if len(frame.shape) == 3 and frame.shape[2] == 3:
507
+ y_start = row * frame_height
508
+ y_end = y_start + frame_height
509
+ x_start = col * frame_width
510
+ x_end = x_start + frame_width
511
+ grid_frame[y_start:y_end, x_start:x_end] = frame
512
+
513
+ grid_frames.append(grid_frame)
514
+
515
+ # Add highlighting frames at the end
516
+ highlight_frames = int(highlight_duration * fps)
517
+ for _ in range(highlight_frames):
518
+ # Create highlighted version of the last frame
519
+ highlighted_frame = grid_frame.copy()
520
+
521
+ for i, success in enumerate(success_statuses):
522
+ row = i // grid_cols
523
+ col = i % grid_cols
524
+
525
+ y_start = row * frame_height
526
+ y_end = y_start + frame_height
527
+ x_start = col * frame_width
528
+ x_end = x_start + frame_width
529
+
530
+ # Create colored overlay
531
+ color = np.array([0, 255, 0]) if success else np.array([255, 0, 0]) # Green or Red
532
+ overlay = np.full((frame_height, frame_width, 3), color, dtype=np.uint8)
533
+
534
+ # Blend with original frame (50% opacity)
535
+ highlighted_frame[y_start:y_end, x_start:x_end] = (
536
+ 0.5 * highlighted_frame[y_start:y_end, x_start:x_end] + 0.5 * overlay
537
+ ).astype(np.uint8)
538
+
539
+ grid_frames.append(highlighted_frame)
540
+
541
+ # Save the grid video
542
+ imageio.mimsave(output_path, grid_frames, fps=fps)
543
+ logging.info(f"Grid summary video saved to: {output_path}")
544
+
545
+
546
+ @parser.wrap()
547
+ def eval_main(cfg: TrainPipelineConfig):
548
+ accelerator = Accelerator()
549
+ set_proc_accelerator(accelerator)
550
+
551
+ init_logging(accelerator=accelerator)
552
+ logging.info(pformat(asdict(cfg)))
553
+
554
+ torch.backends.cudnn.benchmark = True
555
+ torch.backends.cuda.matmul.allow_tf32 = True
556
+ set_seed(cfg.seed)
557
+
558
+ details = f"{cfg.env.type}-{cfg.env.task}-{cfg.eval.n_episodes}"
559
+ now = f"{dt.datetime.now():%Y%m%d-%H%M%S}"
560
+ eval_output_dir = Path(cfg.output_dir) / "post-training-eval" / f"{details}-{now}"
561
+
562
+ logging.info(colored("Output dir:", "yellow", attrs=["bold"]) + f" {eval_output_dir}")
563
+
564
+ logging.info("Making environment.")
565
+ envs = make_envs(cfg.env, cfg, n_envs=cfg.eval.batch_size, use_async_envs=cfg.eval.use_async_envs)
566
+
567
+ logging.info("Making policy.")
568
+
569
+ policy = make_policy(cfg=cfg.policy)
570
+ policy.to(torch.bfloat16)
571
+ policy = accelerator.prepare(policy)
572
+ policy.eval()
573
+ with (
574
+ torch.no_grad(),
575
+ torch.autocast(device_type=accelerator.device.type) if cfg.policy.use_amp else nullcontext(),
576
+ ):
577
+ eval_info = eval_policy_all(
578
+ envs=envs,
579
+ policy=policy,
580
+ n_episodes=cfg.eval.n_episodes,
581
+ cfg=cfg,
582
+ max_episodes_rendered=10,
583
+ videos_dir=eval_output_dir / "videos",
584
+ start_seed=cfg.seed,
585
+ max_parallel_tasks=cfg.env.max_parallel_tasks,
586
+ return_episode_data=bool(cfg.eval.recording_root),
587
+ )
588
+
589
+ acc_print("Local Eval Info", eval_info)
590
+ eval_info = gather_object([eval_info])
591
+
592
+ if accelerator.is_main_process:
593
+ eval_info = consolidate_eval_info(eval_info)
594
+ with open(eval_output_dir / "eval_info.json", "w") as f:
595
+ json.dump(eval_info, f, indent=2)
596
+ print("Overall Aggregated Metrics:")
597
+ print(eval_info["overall"])
598
+ for task_group, task_group_info in eval_info["per_group"].items():
599
+ print(f"\nAggregated Metrics for {task_group}:")
600
+ print(task_group_info)
601
+
602
+ # Close all vec envs
603
+ close_envs(envs)
604
+ accelerator.end_training()
605
+
606
+ logging.info("End of eval")
607
+
608
+
609
+ # ---- typed payload returned by one task eval ----
610
+ class TaskMetrics(TypedDict):
611
+ sum_rewards: list[float]
612
+ max_rewards: list[float]
613
+ successes: list[bool]
614
+ video_paths: list[str]
615
+
616
+
617
+ ACC_KEYS = ("sum_rewards", "max_rewards", "successes", "video_paths")
618
+
619
+
620
+ def eval_one(
621
+ env: gym.vector.VectorEnv,
622
+ *,
623
+ policy: PreTrainedPolicy,
624
+ n_episodes: int,
625
+ cfg: TrainPipelineConfig,
626
+ max_episodes_rendered: int,
627
+ videos_dir: Path | None,
628
+ return_episode_data: bool,
629
+ start_seed: int | None,
630
+ ) -> tuple[TaskMetrics, dict]:
631
+ """Evaluates one task_id of one suite using the provided vec env."""
632
+
633
+ task_videos_dir = videos_dir
634
+
635
+ task_result = eval_policy(
636
+ env=env,
637
+ policy=policy,
638
+ n_episodes=n_episodes,
639
+ cfg=cfg,
640
+ max_episodes_rendered=max_episodes_rendered,
641
+ videos_dir=task_videos_dir,
642
+ return_episode_data=return_episode_data,
643
+ start_seed=start_seed,
644
+ )
645
+
646
+ per_episode = task_result["per_episode"]
647
+ return TaskMetrics(
648
+ sum_rewards=[ep["sum_reward"] for ep in per_episode],
649
+ max_rewards=[ep["max_reward"] for ep in per_episode],
650
+ successes=[ep["success"] for ep in per_episode],
651
+ video_paths=task_result.get("video_paths", []),
652
+ ), task_result
653
+
654
+
655
+ def run_one(
656
+ task_group: str,
657
+ task_id: int,
658
+ env,
659
+ *,
660
+ policy,
661
+ n_episodes: int,
662
+ cfg: TrainPipelineConfig,
663
+ max_episodes_rendered: int,
664
+ videos_dir: Path | None,
665
+ return_episode_data: bool,
666
+ start_seed: int | None,
667
+ ) -> tuple[str, int, TaskMetrics, dict]:
668
+ """
669
+ Run eval_one for a single (task_group, task_id, env).
670
+ Returns (task_group, task_id, task_metrics_dict).
671
+ This function is intentionally module-level to make it easy to test.
672
+ """
673
+ task_videos_dir = None
674
+ if videos_dir is not None:
675
+ acc = get_proc_accelerator()
676
+ if acc is None:
677
+ task_videos_dir = videos_dir / f"{task_group}_{task_id}"
678
+ else:
679
+ task_videos_dir = videos_dir / f"{task_group}_{task_id}_rank{acc.local_process_index}"
680
+ task_videos_dir.mkdir(parents=True, exist_ok=True)
681
+
682
+ # Call the existing eval_one (assumed to return TaskMetrics-like dict)
683
+ metrics, task_result = eval_one(
684
+ env,
685
+ policy=policy,
686
+ n_episodes=n_episodes,
687
+ cfg=cfg,
688
+ max_episodes_rendered=max_episodes_rendered,
689
+ videos_dir=task_videos_dir,
690
+ return_episode_data=return_episode_data,
691
+ start_seed=start_seed,
692
+ )
693
+ # ensure we always provide video_paths key to simplify accumulation
694
+ if max_episodes_rendered > 0:
695
+ metrics.setdefault("video_paths", [])
696
+ return task_group, task_id, metrics, task_result
697
+
698
+
699
+ # compute aggregated metrics helper (robust to lists/scalars)
700
+ def _agg_from_list(xs):
701
+ if not xs:
702
+ return float("nan")
703
+ arr = np.array(list(xs), dtype=float)
704
+ return float(np.nanmean(arr))
705
+
706
+
707
+ def eval_policy_all(
708
+ envs: dict[str, dict[int, gym.vector.VectorEnv]],
709
+ policy,
710
+ n_episodes: int,
711
+ cfg: TrainPipelineConfig,
712
+ *,
713
+ max_episodes_rendered: int = 0,
714
+ videos_dir: Path | None = None,
715
+ return_episode_data: bool = False,
716
+ start_seed: int | None = None,
717
+ max_parallel_tasks: int = 1,
718
+ ) -> dict:
719
+ """
720
+ Evaluate a nested `envs` dict: {task_group: {task_id: vec_env}}.
721
+ This implementation flattens tasks, runs them sequentially or via ThreadPoolExecutor,
722
+ accumulates per-group and overall statistics, and returns the same aggregate metrics
723
+ schema as the single-env evaluator (avg_sum_reward / avg_max_reward / pc_success / timings)
724
+ plus per-task infos.
725
+ """
726
+ start_t = time.time()
727
+
728
+ # Flatten envs into list of (task_group, task_id, env)
729
+ tasks = [(tg, tid, vec) for tg, group in envs.items() for tid, vec in group.items()]
730
+
731
+ # accumulators: track metrics at both per-group level and across all groups
732
+ group_acc: dict[str, dict[str, list]] = defaultdict(lambda: {k: [] for k in ACC_KEYS})
733
+ overall: dict[str, list] = {k: [] for k in ACC_KEYS}
734
+ per_task_infos: list[dict] = []
735
+
736
+ # small inline helper to accumulate one task's metrics into accumulators
737
+ def _accumulate_to(group: str, metrics: dict):
738
+ # metrics expected to contain 'sum_rewards', 'max_rewards', 'successes', optionally 'video_paths'
739
+ # but eval_one may store per-episode lists; we assume metrics uses scalars averaged per task as before.
740
+ # To be robust, accept scalars or lists.
741
+ def _append(key, value):
742
+ if value is None:
743
+ return
744
+ if isinstance(value, list):
745
+ group_acc[group][key].extend(value)
746
+ overall[key].extend(value)
747
+ else:
748
+ group_acc[group][key].append(value)
749
+ overall[key].append(value)
750
+
751
+ _append("sum_rewards", metrics.get("sum_rewards"))
752
+ _append("max_rewards", metrics.get("max_rewards"))
753
+ _append("successes", metrics.get("successes"))
754
+ # video_paths is list-like
755
+ paths = metrics.get("video_paths", [])
756
+ if paths:
757
+ group_acc[group]["video_paths"].extend(paths)
758
+ overall["video_paths"].extend(paths)
759
+
760
+ # Choose runner (sequential vs threaded)
761
+ task_runner = partial(
762
+ run_one,
763
+ policy=policy,
764
+ n_episodes=n_episodes,
765
+ cfg=cfg,
766
+ max_episodes_rendered=max_episodes_rendered,
767
+ videos_dir=videos_dir,
768
+ return_episode_data=return_episode_data,
769
+ start_seed=start_seed,
770
+ )
771
+
772
+ task_results = []
773
+ if max_parallel_tasks <= 1:
774
+ # sequential path (single accumulator path on the main thread)
775
+ # NOTE: keeping a single-threaded accumulator avoids concurrent list appends or locks
776
+ for task_group, task_id, env in tasks:
777
+ tg, tid, metrics, tres = task_runner(task_group, task_id, env)
778
+ task_results.append(tres)
779
+ _accumulate_to(tg, metrics)
780
+ per_task_infos.append({"task_group": tg, "task_id": tid, "metrics": metrics})
781
+ else:
782
+ # threaded path: submit all tasks, consume completions on main thread and accumulate there
783
+ with cf.ThreadPoolExecutor(max_workers=max_parallel_tasks) as executor:
784
+ fut2meta = {}
785
+ for task_group, task_id, env in tasks:
786
+ fut = executor.submit(task_runner, task_group, task_id, env)
787
+ fut2meta[fut] = (task_group, task_id)
788
+ for fut in cf.as_completed(fut2meta):
789
+ tg, tid, metrics, tres = fut.result()
790
+ task_results.append(tres)
791
+ _accumulate_to(tg, metrics)
792
+ per_task_infos.append({"task_group": tg, "task_id": tid, "metrics": metrics})
793
+
794
+ if cfg.eval.recording_root is not None:
795
+ acc = get_proc_accelerator()
796
+ acc_rank = acc.local_process_index if acc else 0
797
+ recording_dir = Path(cfg.eval.recording_root) / f"rank{acc_rank}"
798
+ logging.info(f"Consolidating Libero dataset to {recording_dir}...")
799
+ consolidate_task_result(
800
+ aggregate_task_results(task_results),
801
+ output_dir=recording_dir,
802
+ allow_overwrite=True,
803
+ )
804
+
805
+ # compute per-group aggregates
806
+ groups_aggregated = {}
807
+ for group, acc in group_acc.items():
808
+ groups_aggregated[group] = {
809
+ "avg_sum_reward": _agg_from_list(acc["sum_rewards"]),
810
+ "avg_max_reward": _agg_from_list(acc["max_rewards"]),
811
+ "pc_success": _agg_from_list(acc["successes"]) * 100 if acc["successes"] else float("nan"),
812
+ "n_episodes": len(acc["sum_rewards"]),
813
+ "video_paths": list(acc["video_paths"]),
814
+ }
815
+
816
+ # overall aggregates
817
+ overall_agg = {
818
+ "avg_sum_reward": _agg_from_list(overall["sum_rewards"]),
819
+ "avg_max_reward": _agg_from_list(overall["max_rewards"]),
820
+ "pc_success": _agg_from_list(overall["successes"]) * 100 if overall["successes"] else float("nan"),
821
+ "n_episodes": len(overall["sum_rewards"]),
822
+ "eval_s": time.time() - start_t,
823
+ "eval_ep_s": (time.time() - start_t) / max(1, len(overall["sum_rewards"])),
824
+ "video_paths": list(overall["video_paths"]),
825
+ }
826
+
827
+ return {
828
+ "per_task": per_task_infos,
829
+ "per_group": groups_aggregated,
830
+ "overall": overall_agg,
831
+ }
832
+
833
+
834
+ def consolidate_eval_info(eval_infos: list[dict]) -> dict:
835
+ n_gpu_procs = len(eval_infos)
836
+ per_tasks = [per_task for einfo in eval_infos for per_task in einfo["per_task"]]
837
+ per_tasks.sort(key=lambda x: (x["task_group"], x["task_id"]))
838
+
839
+ per_groups = {}
840
+ for group in {t["task_group"] for t in per_tasks}:
841
+ group_tasks = [t for t in per_tasks if t["task_group"] == group]
842
+ per_groups[group] = {
843
+ "avg_sum_reward": _agg_from_list(r for t in group_tasks for r in t["metrics"]["sum_rewards"]),
844
+ "avg_max_reward": _agg_from_list(r for t in group_tasks for r in t["metrics"]["max_rewards"]),
845
+ "pc_success": _agg_from_list(s for t in group_tasks for s in t["metrics"]["successes"]) * 100,
846
+ "n_episodes": sum(1 for t in group_tasks for _ in t["metrics"]["successes"]),
847
+ "video_paths": [p for t in group_tasks for p in t["metrics"].get("video_paths", [])],
848
+ }
849
+
850
+ total_time = sum(einfo["overall"]["eval_s"] for einfo in eval_infos if "overall" in einfo)
851
+ n_episodes = sum(1 for t in per_tasks for _ in t["metrics"]["successes"])
852
+ overall = {
853
+ "avg_sum_reward": _agg_from_list(r for t in per_tasks for r in t["metrics"]["sum_rewards"]),
854
+ "avg_max_reward": _agg_from_list(r for t in per_tasks for r in t["metrics"]["max_rewards"]),
855
+ "pc_success": _agg_from_list(s for t in per_tasks for s in t["metrics"]["successes"]) * 100,
856
+ "n_episodes": n_episodes,
857
+ "video_paths": [p for t in per_tasks for p in t["metrics"].get("video_paths", [])],
858
+ "eval_per_gpu_s": total_time / n_gpu_procs,
859
+ "eval_ep_s": total_time / n_episodes,
860
+ }
861
+ return {
862
+ "per_task": per_tasks,
863
+ "per_group": per_groups,
864
+ "overall": overall,
865
+ }
866
+
867
+
868
+ def main():
869
+ eval_main()
870
+
871
+
872
+ if __name__ == "__main__":
873
+ if not is_launched_with_accelerate():
874
+ raise Exception(
875
+ "This script should be launched with accelerate. Please use `accelerate launch` to run this script."
876
+ )
877
+ main()