opentau 0.1.1__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,37 +12,268 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- """Module for patching transformers metadata lookup.
15
+ """Module for patching transformers
16
16
 
17
- This module monkey patches `importlib.metadata.distribution` to redirect
18
- 'transformers' lookups to 'opentau-transformers'. This ensures that the custom
19
- transformers fork is correctly recognized by libraries checking for transformers
20
- installation.
17
+ Most patches come from the branch fix/lerobot-openpi
21
18
  """
22
19
 
23
- import importlib.metadata
20
+ from typing import Optional, Tuple
24
21
 
25
- # Keep a reference to the original distribution function
26
- _orig_distribution = importlib.metadata.distribution
22
+ import torch
23
+ from torch import nn
24
+ from transformers.models.gemma import modeling_gemma
25
+ from transformers.models.gemma.configuration_gemma import GemmaConfig
26
+ from transformers.models.paligemma.modeling_paligemma import PaliGemmaModel
27
27
 
28
+ # Monkey patch __init__ of GemmaConfig to fix or modify its behavior as needed.
28
29
 
29
- def distribution(distribution_name):
30
- """Monkey patch to redirect 'transformers' metadata lookups to 'opentau-transformers'.
30
+ _original_gemma_config_init = GemmaConfig.__init__
31
31
 
32
- This function intercepts calls to `importlib.metadata.distribution`. If the
33
- requested distribution is "transformers", it redirects the lookup to
34
- "opentau-transformers". Otherwise, it delegates to the original implementation.
32
+
33
+ def patched_gemma_config_init(
34
+ self, *args, use_adarms: bool = False, adarms_cond_dim: Optional[int] = None, **kwargs
35
+ ):
36
+ """Initializes the GemmaConfig with added ADARMS support.
37
+
38
+ Args:
39
+ self: The GemmaConfig instance.
40
+ *args: Variable length argument list.
41
+ use_adarms: Whether to use Adaptive RMS normalization.
42
+ adarms_cond_dim: The dimension of the conditioning vector for ADARMS.
43
+ **kwargs: Arbitrary keyword arguments.
44
+ """
45
+ # Call the original init with all other arguments
46
+ _original_gemma_config_init(self, *args, **kwargs)
47
+
48
+ # Initialize custom attributes
49
+ self.use_adarms = use_adarms
50
+ self.adarms_cond_dim = adarms_cond_dim
51
+
52
+ # Set default for adarms_cond_dim if use_adarms is True
53
+ if self.use_adarms and self.adarms_cond_dim is None:
54
+ # hidden_size is set by _original_gemma_config_init
55
+ self.adarms_cond_dim = self.hidden_size
56
+
57
+
58
+ GemmaConfig.__init__ = patched_gemma_config_init
59
+
60
+
61
+ # --- Modeling Patches ---
62
+
63
+
64
+ def _gated_residual(x, y, gate):
65
+ """
66
+ Applies gated residual connection with optional gate parameter.
67
+
68
+ Args:
69
+ x: Input tensor (residual)
70
+ y: Output tensor to be added
71
+ gate: Optional gate tensor to modulate the addition
72
+
73
+ Returns:
74
+ x + y if gate is None, otherwise x + y * gate
75
+ """
76
+ if x is None and y is None:
77
+ return None
78
+ if x is None or y is None:
79
+ return x if x is not None else y
80
+ if gate is None:
81
+ return x + y
82
+ return x + y * gate
83
+
84
+
85
+ modeling_gemma._gated_residual = _gated_residual
86
+
87
+
88
+ class PatchedGemmaRMSNorm(nn.Module):
89
+ """RMS normalization with optional adaptive support (ADARMS)."""
90
+
91
+ def __init__(self, dim: int, eps: float = 1e-6, cond_dim: Optional[int] = None):
92
+ """Initializes the PatchedGemmaRMSNorm.
93
+
94
+ Args:
95
+ dim: The dimension of the input tensor.
96
+ eps: The epsilon value for numerical stability.
97
+ cond_dim: The dimension of the conditioning vector (if using ADARMS).
98
+ """
99
+ super().__init__()
100
+ self.eps = eps
101
+ self.dim = dim
102
+ self.cond_dim = cond_dim
103
+
104
+ # Dense layer for adaptive normalization (if cond_dim is provided)
105
+ if cond_dim is not None:
106
+ self.dense = nn.Linear(cond_dim, dim * 3, bias=True)
107
+ # Initialize with zeros (matches source implementation)
108
+ nn.init.zeros_(self.dense.weight)
109
+ else:
110
+ self.weight = nn.Parameter(torch.zeros(dim))
111
+ self.dense = None
112
+
113
+ def _norm(self, x: torch.Tensor) -> torch.Tensor:
114
+ """Applies RMS normalization.
115
+
116
+ Args:
117
+ x: The input tensor.
118
+
119
+ Returns:
120
+ The normalized tensor.
121
+ """
122
+ # Compute variance in float32 (like the source implementation)
123
+ var = torch.mean(torch.square(x.float()), dim=-1, keepdim=True)
124
+ # Compute normalization in float32
125
+ normed_inputs = x * torch.rsqrt(var + self.eps)
126
+ return normed_inputs
127
+
128
+ def forward(
129
+ self, x: torch.Tensor, cond: Optional[torch.Tensor] = None
130
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
131
+ """Forward pass of the normalization layer.
132
+
133
+ Args:
134
+ x: The input tensor.
135
+ cond: The conditioning tensor for adaptive normalization.
136
+
137
+ Returns:
138
+ A tuple containing the normalized tensor and the gate tensor (if applicable).
139
+ If cond is None, the gate tensor will be None.
140
+
141
+ Raises:
142
+ ValueError: If cond dimension does not match the configured cond_dim.
143
+ """
144
+ dtype = x.dtype # original dtype, could be half-precision
145
+ normed_inputs = self._norm(x)
146
+
147
+ if cond is None or self.dense is None:
148
+ # regular RMSNorm
149
+ # scale by learned parameter in float32 (matches source implementation)
150
+ normed_inputs = normed_inputs * (1.0 + self.weight.float())
151
+ return normed_inputs.to(dtype), None # return in original dtype with None gate
152
+
153
+ # adaptive RMSNorm (if cond is provided and dense layer exists)
154
+ if cond.shape[-1] != self.cond_dim:
155
+ raise ValueError(f"Expected cond dimension {self.cond_dim}, got {cond.shape[-1]}")
156
+
157
+ modulation = self.dense(cond)
158
+ # Reshape modulation to broadcast properly: [batch, 1, features] for [batch, seq, features]
159
+ if len(x.shape) == 3: # [batch, seq, features]
160
+ modulation = modulation.unsqueeze(1)
161
+
162
+ scale, shift, gate = torch.chunk(modulation, 3, dim=-1)
163
+
164
+ normed_inputs = normed_inputs * (1 + scale.to(torch.float32)) + shift.to(torch.float32)
165
+
166
+ return normed_inputs.to(dtype), gate.to(dtype)
167
+
168
+ def extra_repr(self) -> str:
169
+ """Returns the extra representation of the module."""
170
+ if hasattr(self, "weight") and self.weight is not None:
171
+ repr_str = f"{tuple(self.weight.shape)}, eps={self.eps}"
172
+ else:
173
+ repr_str = f"dim={self.dim}, eps={self.eps}"
174
+ if self.dense is not None:
175
+ repr_str += f", adaptive=True, cond_dim={self.cond_dim}"
176
+ return repr_str
177
+
178
+
179
+ # Apply patches
180
+ modeling_gemma.GemmaRMSNorm = PatchedGemmaRMSNorm
181
+
182
+
183
+ def patched_gemma_decoder_layer_init(self, config: GemmaConfig, layer_idx: int):
184
+ """Initializes a GemmaDecoderLayer with potential ADARMS support.
185
+
186
+ Args:
187
+ self: The GemmaDecoderLayer instance.
188
+ config: The configuration object.
189
+ layer_idx: The index of the layer.
190
+ """
191
+ modeling_gemma.GradientCheckpointingLayer.__init__(self)
192
+ self.hidden_size = config.hidden_size
193
+
194
+ self.self_attn = modeling_gemma.GemmaAttention(config=config, layer_idx=layer_idx)
195
+
196
+ self.mlp = modeling_gemma.GemmaMLP(config)
197
+
198
+ cond_dim = getattr(config, "adarms_cond_dim", None) if getattr(config, "use_adarms", False) else None
199
+ self.input_layernorm = modeling_gemma.GemmaRMSNorm(
200
+ config.hidden_size, eps=config.rms_norm_eps, cond_dim=cond_dim
201
+ )
202
+ self.post_attention_layernorm = modeling_gemma.GemmaRMSNorm(
203
+ config.hidden_size, eps=config.rms_norm_eps, cond_dim=cond_dim
204
+ )
205
+
206
+
207
+ modeling_gemma.GemmaDecoderLayer.__init__ = patched_gemma_decoder_layer_init
208
+
209
+
210
+ def patched_gemma_model_init(self, config: GemmaConfig):
211
+ """Initializes the GemmaModel with potential ADARMS support.
212
+
213
+ Args:
214
+ self: The GemmaModel instance.
215
+ config: The configuration object.
216
+ """
217
+ modeling_gemma.GemmaPreTrainedModel.__init__(self, config)
218
+ self.padding_idx = config.pad_token_id
219
+ self.vocab_size = config.vocab_size
220
+
221
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
222
+ self.layers = nn.ModuleList(
223
+ [modeling_gemma.GemmaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
224
+ )
225
+
226
+ cond_dim = getattr(config, "adarms_cond_dim", None) if getattr(config, "use_adarms", False) else None
227
+ self.norm = modeling_gemma.GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps, cond_dim=cond_dim)
228
+ self.rotary_emb = modeling_gemma.GemmaRotaryEmbedding(config=config)
229
+ self.gradient_checkpointing = False
230
+
231
+ # Initialize weights and apply final processing
232
+ self.post_init()
233
+
234
+
235
+ modeling_gemma.GemmaModel.__init__ = patched_gemma_model_init
236
+
237
+
238
+ def patched_gemma_pretrained_model_init_weights(self, module: nn.Module):
239
+ """Initializes the weights of the GemmaPreTrainedModel.
240
+
241
+ Args:
242
+ self: The GemmaPreTrainedModel instance.
243
+ module: The module to initialize.
244
+ """
245
+ std = self.config.initializer_range
246
+ if isinstance(module, nn.Linear):
247
+ module.weight.data.normal_(mean=0.0, std=std)
248
+ if module.bias is not None:
249
+ module.bias.data.zero_()
250
+ elif isinstance(module, nn.Embedding):
251
+ module.weight.data.normal_(mean=0.0, std=std)
252
+ if module.padding_idx is not None:
253
+ module.weight.data[module.padding_idx].zero_()
254
+ elif isinstance(module, modeling_gemma.GemmaRMSNorm):
255
+ if hasattr(module, "weight"):
256
+ module.weight.data.fill_(1.0)
257
+
258
+
259
+ modeling_gemma.GemmaPreTrainedModel._init_weights = patched_gemma_pretrained_model_init_weights
260
+
261
+
262
+ def patched_paligemma_model_get_image_features(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
263
+ """Obtains image last hidden states from the vision tower and apply multimodal projection.
35
264
 
36
265
  Args:
37
- distribution_name: The name of the distribution to retrieve metadata for.
266
+ self: The PaliGemmaModel instance.
267
+ pixel_values: The tensors corresponding to the input images.
268
+ Shape: (batch_size, channels, height, width).
38
269
 
39
270
  Returns:
40
- The distribution metadata object.
271
+ Image feature tensor of shape (num_images, image_length, embed_dim).
41
272
  """
42
- if distribution_name == "transformers":
43
- distribution_name = "opentau-transformers"
44
- return _orig_distribution(distribution_name)
273
+ image_outputs = self.vision_tower(pixel_values)
274
+ selected_image_feature = image_outputs.last_hidden_state
275
+ image_features = self.multi_modal_projector(selected_image_feature)
276
+ return image_features
45
277
 
46
278
 
47
- # Apply the patch
48
- importlib.metadata.distribution = distribution
279
+ PaliGemmaModel.get_image_features = patched_paligemma_model_get_image_features
@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: opentau
3
- Version: 0.1.1
3
+ Version: 0.2.0
4
4
  Summary: OpenTau: Tensor's VLA Training Infrastructure for Real-World Robotics in Pytorch
5
- Author-email: Shuheng Liu <wish1104@icloud.com>, William Yue <williamyue37@gmail.com>, Akshay Shah <akshayhitendrashah@gmail.com>, Xingrui Gu <xingrui_gu@berkeley.edu>
5
+ Author-email: Shuheng Liu <wish1104@icloud.com>, William Yue <williamyue37@gmail.com>, Akshay Shah <akshayhitendrashah@gmail.com>
6
6
  License: Apache-2.0
7
7
  Project-URL: homepage, https://github.com/TensorAuto/OpenTau
8
8
  Project-URL: issues, https://github.com/TensorAuto/OpenTau/issues
@@ -41,9 +41,9 @@ Requires-Dist: pynput>=1.7.7
41
41
  Requires-Dist: pyzmq>=26.2.1
42
42
  Requires-Dist: rerun-sdk>=0.21.0
43
43
  Requires-Dist: termcolor>=2.4.0
44
- Requires-Dist: torch<2.8.0,>=2.7.1
44
+ Requires-Dist: torch>=2.7.1
45
45
  Requires-Dist: torchcodec<0.5.0,>=0.4.0; sys_platform != "win32" and (sys_platform != "linux" or (platform_machine != "aarch64" and platform_machine != "arm64" and platform_machine != "armv7l")) and (sys_platform != "darwin" or platform_machine != "x86_64")
46
- Requires-Dist: torchvision<0.23.0,>=0.22.1
46
+ Requires-Dist: torchvision>=0.22.1
47
47
  Requires-Dist: wandb>=0.16.3
48
48
  Requires-Dist: zarr>=2.17.0
49
49
  Requires-Dist: scikit-learn>=1.7.1
@@ -52,7 +52,7 @@ Requires-Dist: onnxruntime>=1.22.1; sys_platform == "darwin" or platform_machine
52
52
  Requires-Dist: onnxruntime-gpu>=1.22.0; (sys_platform == "linux" and platform_machine == "x86_64") or (sys_platform == "win32" and (platform_machine == "AMD64" or platform_machine == "x86_64"))
53
53
  Requires-Dist: onnxscript>=0.3.1
54
54
  Requires-Dist: onnx-ir>=0.1.4
55
- Requires-Dist: opentau-transformers==4.53.3
55
+ Requires-Dist: transformers==4.53.3
56
56
  Requires-Dist: scipy>=1.15.2
57
57
  Requires-Dist: pytest>=8.1.0
58
58
  Requires-Dist: pytest-cov>=5.0.0
@@ -62,6 +62,10 @@ Requires-Dist: scikit-image>=0.23.2
62
62
  Requires-Dist: pandas>=2.2.2
63
63
  Requires-Dist: accelerate>=1.4.0
64
64
  Requires-Dist: deepspeed>=0.17.1
65
+ Requires-Dist: gymnasium[other]>=0.29
66
+ Requires-Dist: grpcio>=1.60.0
67
+ Requires-Dist: grpcio-tools>=1.60.0
68
+ Requires-Dist: protobuf>=4.25.0
65
69
  Provides-Extra: dev
66
70
  Requires-Dist: pre-commit>=3.7.0; extra == "dev"
67
71
  Requires-Dist: debugpy>=1.8.1; extra == "dev"
@@ -93,10 +97,11 @@ Requires-Dist: libero; extra == "libero"
93
97
  Requires-Dist: numpy<2; extra == "libero"
94
98
  Requires-Dist: gym<0.27,>=0.25; extra == "libero"
95
99
  Requires-Dist: pyopengl-accelerate==3.1.7; sys_platform == "linux" and extra == "libero"
96
- Requires-Dist: gymnasium[other]>=0.29; extra == "libero"
97
100
  Requires-Dist: mujoco>=3.1.6; sys_platform == "linux" and extra == "libero"
98
101
  Requires-Dist: pyopengl==3.1.7; sys_platform == "linux" and extra == "libero"
99
102
  Requires-Dist: numpy==1.26.4; sys_platform == "linux" and extra == "libero"
103
+ Provides-Extra: urdf
104
+ Requires-Dist: rerun-sdk>=0.28.2; extra == "urdf"
100
105
  Dynamic: license-file
101
106
 
102
107
  <p align="center">
@@ -105,6 +110,19 @@ Dynamic: license-file
105
110
  </a>
106
111
  </p>
107
112
 
113
+ <p align="center">
114
+ <a href="https://github.com/TensorAuto/OpenTau/actions/workflows/cpu_test.yml?query=branch%3Amain"><img src="https://github.com/TensorAuto/OpenTau/actions/workflows/cpu_test.yml/badge.svg?branch=main" alt="CPU Tests"></a>
115
+ <a href="https://github.com/TensorAuto/OpenTau/actions/workflows/gpu_test.yml"><img src="https://github.com/TensorAuto/OpenTau/actions/workflows/gpu_test.yml/badge.svg" alt="Nightly GPU Tests"></a>
116
+ <a href="https://github.com/TensorAuto/OpenTau/actions/workflows/regression_test.yml"><img src="https://github.com/TensorAuto/OpenTau/actions/workflows/regression_test.yml/badge.svg" alt="Nightly Regression Tests"></a>
117
+ <a href="https://opentau.readthedocs.io/en/latest/?badge=latest"><img src="https://readthedocs.org/projects/opentau/badge/?version=latest" alt="Documentation"></a>
118
+ <a href="https://pypi.org/project/opentau/"><img src="https://img.shields.io/pypi/v/opentau" alt="Version"></a>
119
+ <a href="https://pypi.org/project/opentau/"><img src="https://img.shields.io/pypi/status/opentau" alt="Status"></a>
120
+ <a href="https://www.python.org/downloads/"><img src="https://img.shields.io/pypi/pyversions/opentau" alt="Python versions"></a>
121
+ <a href="https://github.com/TensorAuto/OpenTau/blob/main/LICENSE"><img src="https://img.shields.io/badge/License-Apache%202.0-blue.svg" alt="License"></a>
122
+ <a href="https://hub.docker.com/r/tensorauto/opentau"><img src="https://img.shields.io/docker/v/tensorauto/opentau?label=Docker" alt="Docker"></a>
123
+ <a href="https://github.com/pre-commit/pre-commit"><img src="https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit" alt="pre-commit"></a>
124
+ </p>
125
+
108
126
  # OpenTau - Train VLA models with state-of-the-art techniques by Tensor
109
127
 
110
128
  At Tensor, we are pushing the frontier of large foundation models for physical AI. In robot learning, a vision-language-action (VLA) model is a multimodal foundation model that integrates vision, language, and action. Today, VLA represents the leading approach for embodied AI, spanning autonomous driving, robot manipulation, and navigation.
@@ -122,17 +140,19 @@ Whether you use the official OpenPi codebase or LeRobot’s reimplementation, yo
122
140
 
123
141
  OpenTau ($\tau$) is a tool developed by *[Tensor][1]* to bridge this gap, and we also use it internally to train our proprietary in-house models. Our goal is to help you train VLAs on any dataset while fully leveraging state-of-the-art techniques. We plan to continuously upgrade this repository to keep pace with the state of the art in the robotics community.
124
142
 
125
- | Features | OpenPi | LeRobot | **OpenTau** |
126
- | -------------------------------------------------------: | :---------------------: | :------------------------------: | :---------: |
127
- | Co-training with Heterogeneous Datasets | | | |
128
- | Discrete Actions Training in $\pi_{0.5}$ | | | |
129
- | Knowledge Insulation (KI) between VLM and Action Decoder | | | |
130
- | Dropout Layers in PaliGemma | ✅ (Jax) <br>❌ (PyTorch) | | |
131
- | Multi-Node and Multi-GPU Training | | | |
132
- | Fully Functioning $\pi_{0.5}$ Checkpoint | | ❌ <br> (Missing Text Embeddings) | |
133
- | Simulation Environments for Evaluating Models | | | ✅ |
134
- | $\pi^{*}_{0.6}$ style Reinforcement Learning Pipeline | || |
135
- | Framework | Jax / PyTorch | PyTorch | PyTorch |
143
+ | Features | OpenPi | LeRobot | **OpenTau** |
144
+ |---------------------------------------------------------:|:-----------------------:|:--------------------------------:|:-----------:|
145
+ | Co-training with Heterogeneous Datasets | | | |
146
+ | Discrete Actions Training in $\pi_{0.5}$ | | | |
147
+ | Knowledge Insulation (KI) between VLM and Action Decoder | | | |
148
+ | Dropout Layers in PaliGemma | ✅ (Jax) <br>❌ (PyTorch) | | |
149
+ | Multi-Node and Multi-GPU Training | | | |
150
+ | Fully Functioning $\pi_{0.5}$ Checkpoint | | ❌ <br> (Missing Text Embeddings) | |
151
+ | Visualize dataset with URDF models | | | |
152
+ | Simulation Environments for Evaluating Models | || |
153
+ | Create Validation Splits During Training || ❌ | ✅ |
154
+ | $\pi^{*}_{0.6}$ style Reinforcement Learning Pipeline | ❌ | ❌ | ✅ |
155
+ | Framework | Jax / PyTorch | PyTorch | PyTorch |
136
156
 
137
157
  ## Quick Start
138
158
  If you are familiar with LeRobot, getting started with OpenTau is very easy.
@@ -2,26 +2,27 @@ opentau/__init__.py,sha256=fIqOYsZsF-bitUI-4taSNke_D1YJYCehGseZNe29GG0,6756
2
2
  opentau/__version__.py,sha256=junxoss59Jz_hmg3YnzhpVk_Q5Fo6uha23P1ET81N1c,889
3
3
  opentau/constants.py,sha256=-_CbJujCp6hbBjJHgYMguCTcSAkVkmdpM4wHqZp7vRQ,2020
4
4
  opentau/configs/__init__.py,sha256=hC-KkeCfq1mtMw9WjPCZfOTxrzQWW7hAa1w8BRC_Bqw,784
5
- opentau/configs/default.py,sha256=MEoZyzK8olVXVHDy3FHQKK43-ULq2UQv3almymbmdCI,14387
5
+ opentau/configs/default.py,sha256=T5QS84VNcoFaek8Ry7BsDAvsNsV5RLu2L2y2VvO3THo,15273
6
+ opentau/configs/deployment.py,sha256=kem4WCkAKV6H_JUFEVyzw4h-EhQTPa6-9pY65Wr9wDQ,3106
6
7
  opentau/configs/libero.py,sha256=CrRfiCBYOw7hVqv6orH_ahNyQudj6iyqHtZM9YpdvzE,4688
7
8
  opentau/configs/parser.py,sha256=Pb7sw6yx38F31Aqw1J7wK6BRzfBA7DutywShyP_t9bY,14890
8
9
  opentau/configs/policies.py,sha256=06oUJx0B4V6krRwyjH1goTYM3RIpRozg4SSwcVJurG4,11667
9
10
  opentau/configs/reward.py,sha256=t7S8_RpEy31fAP4v_ygB-ETvaUR6OyrXmS1JNSz3cOk,1537
10
- opentau/configs/train.py,sha256=a-c-s2zpCUkK8n0DqBPGGaJ87UcVNF02RcLa1pH843Y,18049
11
+ opentau/configs/train.py,sha256=nn2QX151wI-R-qbghyMkSv1miSPvNSUtsX2Swu-HVGU,18396
11
12
  opentau/configs/types.py,sha256=DvKasR2v0ecSmozL0YD4S-64OeuDYhVBhtspxUDV5u0,2453
12
13
  opentau/datasets/__init__.py,sha256=oLfV9vfFOg7o2XIRFiN5zOf529FafIkPwqFG7iUX4gc,4248
13
14
  opentau/datasets/backward_compatibility.py,sha256=ENVQk9QDPCip6NfAxNF6Vo4VvyCWb0acV-ZxcJBsB6o,3459
14
15
  opentau/datasets/compute_stats.py,sha256=N359TDuJicLKMtxxy0JVEcUtnTOB57gL5G8e9Dq0gMQ,13069
15
16
  opentau/datasets/dataset_mixture.py,sha256=8UWjY9oKn9jEMe-e9Dy6no1p_21H0kXKv8A10Ku_8_o,19850
16
- opentau/datasets/factory.py,sha256=NKWpbuNBve0PsmK1midj8g1IpQapeHn-VrxCOC3X4eI,10480
17
+ opentau/datasets/factory.py,sha256=KVN8XEjeIdfTMohyftghG3dsM50UPjv05lL3eS5aTI4,12116
17
18
  opentau/datasets/image_writer.py,sha256=JYCkImHFYpLuE88t16cYqXqQS7EHS7g6kLWXPCJmWgw,11072
18
- opentau/datasets/lerobot_dataset.py,sha256=rz_3BcXqpcIzYr0NEVmkfLf1dY7vcTdo6zuV1CZkIuI,84747
19
+ opentau/datasets/lerobot_dataset.py,sha256=f15Sy3jWOnuPiXiqB8pGdHqv3MOBZgToJyjcA7ry0JU,84778
19
20
  opentau/datasets/online_buffer.py,sha256=x14P8tBz25s-hRlE8loFJs5CAvh65RGWeogF271hiF0,19671
20
21
  opentau/datasets/sampler.py,sha256=5g-6prsWItVjqkt1J7mA9JPNQPhDSFx3r6rA4JphP9U,4012
21
22
  opentau/datasets/standard_data_format_mapping.py,sha256=wEKilksMUjJGeIhvyLuR9qhyhtiJMK1e1AzCkbyx-l4,9667
22
23
  opentau/datasets/transforms.py,sha256=pr_8vOEDUoWu7aOUdnI0_wgetsFuie3I2UYFrcStG1k,12976
23
24
  opentau/datasets/utils.py,sha256=bZ0Q8KPZMWe9fLdrqJqslgDxI9sa8uxqPQTxEyWwDKw,45062
24
- opentau/datasets/video_utils.py,sha256=NY20Et6SKWLdG4EjTNdXhpPqWEFON-UccIn_P2YukSQ,21810
25
+ opentau/datasets/video_utils.py,sha256=AUNUKr4IrDVetfYjzZj9Uq4GKrdHrzVYTcMoF1Jlggw,21968
25
26
  opentau/datasets/grounding/__init__.py,sha256=ojvdscCIjp5EQxptFAjPgvjKGZa_Xk9LLZ2wNUebWFw,3139
26
27
  opentau/datasets/grounding/base.py,sha256=FDAn2QPQHNBB7mzD7IQ2Bz882Dt1RPasBTgskXqKbP4,5773
27
28
  opentau/datasets/grounding/clevr.py,sha256=lNZ0hr5EhQKTh-eg35zujybcAo5p8JQEn9dW8DJhOjI,3983
@@ -59,9 +60,9 @@ opentau/policies/pi0/configuration_pi0.py,sha256=94EG2QlraDsPjD0zyuGwKPqToqV_ayP
59
60
  opentau/policies/pi0/modeling_pi0.py,sha256=rz1S7hDOVEv12sN0ECGupddKwQVXMqdvm7G_OooWZLA,37442
60
61
  opentau/policies/pi0/paligemma_with_expert.py,sha256=j9P6SL7MVP11MgyJqNnsrZAlOctWmqwDaJJAx4z9F84,20724
61
62
  opentau/policies/pi05/__init__.py,sha256=VcIjZwlRW1JChRHqAK9Vz4JAIEP40RrP-W-UdyR6xk4,821
62
- opentau/policies/pi05/configuration_pi05.py,sha256=ucgCC3BaIC6rcnotMYbElTq7ymPZh4xDGghxCseK33M,9307
63
- opentau/policies/pi05/modeling_pi05.py,sha256=sF4OPGQWQ0eJevGDRZDPUItBkBC5Wp6WdpwsN4uqS14,50639
64
- opentau/policies/pi05/paligemma_with_expert.py,sha256=nxBfUwBt6S4WwPDkn8LW3lHFcOib4CtoZzJEq4VKlck,23328
63
+ opentau/policies/pi05/configuration_pi05.py,sha256=GmENtmgvI5q3gQQlZnH6RalV5msU5gwtTK_imbNH6a8,9367
64
+ opentau/policies/pi05/modeling_pi05.py,sha256=sBmnP2cqaVHvCGGpg624LYbun52hZs63UCeadiE4dH4,63327
65
+ opentau/policies/pi05/paligemma_with_expert.py,sha256=jyYkcOVMxMNYJtmbR3qZPmEoinOY1myqnq1JKmledkc,23407
65
66
  opentau/policies/value/__init__.py,sha256=wUP5vdpsvfnREqt4enfwaakzJ-ynX9sLYN14t2zEtpA,772
66
67
  opentau/policies/value/configuration_value.py,sha256=ApjrNKHxvjNlSZ71-BPvanNAJh9GzAK9W0JCiz3mMHs,5951
67
68
  opentau/policies/value/modeling_value.py,sha256=21x2EVGFlJbLasJDGTs3b5YMrrChrkuGxrYP7wLjkCY,18594
@@ -78,14 +79,16 @@ opentau/scripts/fake_tensor_training.py,sha256=y4F3CFs2jjpIJcT1wKvsrgFEebU9QFzba
78
79
  opentau/scripts/get_advantage_and_percentiles.py,sha256=JdjlADYzdS1Jc_19H6lLYMRnPlWxeckRSUQqwqb0rC4,8993
79
80
  opentau/scripts/high_level_planner_inference.py,sha256=nbXr8Hp64YGeprMTpT8kvT_NgpBlI02CUlO6Mm2Js_E,3846
80
81
  opentau/scripts/inference.py,sha256=_lp9YjPzarAnjiA8k2jBlIKZxza6PEHw--UyaqLPdNo,2110
81
- opentau/scripts/launch.py,sha256=cHhE_LuPnmkSkm6F-5XEgwmPDmbhVnLTVWDZ0lqfjPo,2663
82
- opentau/scripts/libero_simulation_parallel.py,sha256=qMee6T0EwMoAT1J2u8X4w8rsbOJYwyqD3LRAPe2Ta1g,13105
83
- opentau/scripts/libero_simulation_sequential.py,sha256=xFSUQEuyai20QD-pYitp-UJPGE9zlaaIu4YSO0bhYKg,4775
82
+ opentau/scripts/launch.py,sha256=L_KlkcJpcOsSMGlBKSmtTUyzb7q8tH4FkmuHx8lEdDI,2845
84
83
  opentau/scripts/nav_high_level_planner_inference.py,sha256=z2WHw68NWi-fJUd5TV4CrJHzxo-L7e2UliGjfOlqifM,1878
85
- opentau/scripts/train.py,sha256=nkvsvna5yliphp7pwVyFY6yBwCA_kmffyohRO2wjiHU,16850
86
- opentau/scripts/visualize_dataset.py,sha256=_xGfAXQqhjGYMi__6L7qRH2xS5XQ2-GQRXjNw3KXMlY,10109
87
- opentau/scripts/visualize_dataset_html.py,sha256=gEX-E5fFqBhINthf7xLMICHySvw9e3Kcf1HPRnJIyug,17979
84
+ opentau/scripts/train.py,sha256=dp_366gKpFeIcv2tfDkuuFGCdPyP74lkENETZpwR5m4,20547
85
+ opentau/scripts/visualize_dataset.py,sha256=ZfB7Qbsl3RGqu8k7n6CK6iRbhuYhYSX167tta2b01NQ,13625
88
86
  opentau/scripts/zero_to_fp32.py,sha256=Rkl1ZczytKix9vGMg0EELzdJYFqUM1yB9p3xvSaK9k8,33272
87
+ opentau/scripts/grpc/__init__.py,sha256=wBiZyRqF1PCpZHgqwHjSZaaeFRHLOX4ZggCXbAzngOs,799
88
+ opentau/scripts/grpc/client.py,sha256=PbuAb14izNAItspdvppItpv6gaycBs2_mdcvrdtAXnQ,20181
89
+ opentau/scripts/grpc/robot_inference_pb2.py,sha256=Se7elLBIHeoi3UIBXrn02w8rZVOg-gpBcuirt23d8Tg,4125
90
+ opentau/scripts/grpc/robot_inference_pb2_grpc.py,sha256=LymJbsaei6C-pp9ffz2ETCVIhq4SRh--LdK8ne0-yug,7659
91
+ opentau/scripts/grpc/server.py,sha256=x6BA0F0uYIXeBw4mnOXaYb2-y8zEbgMXoiRdtNvyq1g,11146
89
92
  opentau/utils/__init__.py,sha256=hIUeGPpZHf2AVf0-5C2p0BOcY0cFHCTT5yHn-SpEPwY,856
90
93
  opentau/utils/accelerate_utils.py,sha256=vXnSGo1hXCUNof-oNKLMJ_SOMjpKhpZ1gx21ObSsopI,2630
91
94
  opentau/utils/benchmark.py,sha256=jVli6gdBRMXAqNM3AIi43a0N_O1CLQMbKXsPK_e2y3s,3063
@@ -99,11 +102,11 @@ opentau/utils/logging_utils.py,sha256=zd7ypmk7aqVposPhA7Kg-PYrstapY4MsuTklsTD4r4
99
102
  opentau/utils/monkey_patch.py,sha256=cVgZ1N-NNVnlRKPA1dwO9FM4IbxR0V_Hbil6p-6knhA,9558
100
103
  opentau/utils/random_utils.py,sha256=k3Ab3Y98LozGdsBzKoP8xSsFTcnaRqUzY34BsETCrrA,9102
101
104
  opentau/utils/train_utils.py,sha256=0d7yvk8wlP-75pwB55gr095b_b1sWG5nlqdVxyH6_o0,6796
102
- opentau/utils/transformers_patch.py,sha256=3r51KjUQIQQLpG3RQHgA7OP4kboHsH2y5upFBCMgiKI,1781
105
+ opentau/utils/transformers_patch.py,sha256=rPG2Yn7GQr2gCEykhW42uOoKP_jdAMx4p3q-IUcGYDI,10045
103
106
  opentau/utils/utils.py,sha256=DrMStfjBEkw_8WVhYMnCQJNBxMeozIJ8LBSpOtMQhFM,15760
104
- opentau-0.1.1.dist-info/licenses/LICENSE,sha256=tl3_NkxplsgU86xSvEWnDlE1UR_JsIvGo7t4hPtsIbE,27680
105
- opentau-0.1.1.dist-info/METADATA,sha256=CZpUmmtkXZ9K0dNMU3F4K_xwDMwa3mo2hFq16N23PnM,10951
106
- opentau-0.1.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
107
- opentau-0.1.1.dist-info/entry_points.txt,sha256=CmA0PWn2JepmMtBY4RDfZ48KaMCYVYF81Xjk55Jd9HM,153
108
- opentau-0.1.1.dist-info/top_level.txt,sha256=7_yrS4x5KSeTRr2LICTCNOZmF-1_kSOFPKHvtJPL1Dw,8
109
- opentau-0.1.1.dist-info/RECORD,,
107
+ opentau-0.2.0.dist-info/licenses/LICENSE,sha256=tl3_NkxplsgU86xSvEWnDlE1UR_JsIvGo7t4hPtsIbE,27680
108
+ opentau-0.2.0.dist-info/METADATA,sha256=OUN-KCCeTA1cFdlzmwRKecxepcmjpie6-AbbIkoELhI,12992
109
+ opentau-0.2.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
110
+ opentau-0.2.0.dist-info/entry_points.txt,sha256=NGF_MWpSKri0lvjR9WGN4pBUap8B-z21f7XMluxc1M4,208
111
+ opentau-0.2.0.dist-info/top_level.txt,sha256=7_yrS4x5KSeTRr2LICTCNOZmF-1_kSOFPKHvtJPL1Dw,8
112
+ opentau-0.2.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,4 +1,5 @@
1
1
  [console_scripts]
2
+ opentau-dataset-viz = opentau.scripts.launch:visualize
2
3
  opentau-eval = opentau.scripts.launch:eval
3
4
  opentau-export = opentau.scripts.launch:export
4
5
  opentau-train = opentau.scripts.launch:train