opentau 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. opentau/__init__.py +179 -0
  2. opentau/__version__.py +24 -0
  3. opentau/configs/__init__.py +19 -0
  4. opentau/configs/default.py +297 -0
  5. opentau/configs/libero.py +113 -0
  6. opentau/configs/parser.py +393 -0
  7. opentau/configs/policies.py +297 -0
  8. opentau/configs/reward.py +42 -0
  9. opentau/configs/train.py +370 -0
  10. opentau/configs/types.py +76 -0
  11. opentau/constants.py +52 -0
  12. opentau/datasets/__init__.py +84 -0
  13. opentau/datasets/backward_compatibility.py +78 -0
  14. opentau/datasets/compute_stats.py +333 -0
  15. opentau/datasets/dataset_mixture.py +460 -0
  16. opentau/datasets/factory.py +232 -0
  17. opentau/datasets/grounding/__init__.py +67 -0
  18. opentau/datasets/grounding/base.py +154 -0
  19. opentau/datasets/grounding/clevr.py +110 -0
  20. opentau/datasets/grounding/cocoqa.py +130 -0
  21. opentau/datasets/grounding/dummy.py +101 -0
  22. opentau/datasets/grounding/pixmo.py +177 -0
  23. opentau/datasets/grounding/vsr.py +141 -0
  24. opentau/datasets/image_writer.py +304 -0
  25. opentau/datasets/lerobot_dataset.py +1910 -0
  26. opentau/datasets/online_buffer.py +442 -0
  27. opentau/datasets/push_dataset_to_hub/utils.py +132 -0
  28. opentau/datasets/sampler.py +99 -0
  29. opentau/datasets/standard_data_format_mapping.py +278 -0
  30. opentau/datasets/transforms.py +330 -0
  31. opentau/datasets/utils.py +1243 -0
  32. opentau/datasets/v2/batch_convert_dataset_v1_to_v2.py +887 -0
  33. opentau/datasets/v2/convert_dataset_v1_to_v2.py +829 -0
  34. opentau/datasets/v21/_remove_language_instruction.py +109 -0
  35. opentau/datasets/v21/batch_convert_dataset_v20_to_v21.py +60 -0
  36. opentau/datasets/v21/convert_dataset_v20_to_v21.py +183 -0
  37. opentau/datasets/v21/convert_stats.py +150 -0
  38. opentau/datasets/video_utils.py +597 -0
  39. opentau/envs/__init__.py +18 -0
  40. opentau/envs/configs.py +178 -0
  41. opentau/envs/factory.py +99 -0
  42. opentau/envs/libero.py +439 -0
  43. opentau/envs/utils.py +204 -0
  44. opentau/optim/__init__.py +16 -0
  45. opentau/optim/factory.py +43 -0
  46. opentau/optim/optimizers.py +121 -0
  47. opentau/optim/schedulers.py +140 -0
  48. opentau/planner/__init__.py +82 -0
  49. opentau/planner/high_level_planner.py +366 -0
  50. opentau/planner/utils/memory.py +64 -0
  51. opentau/planner/utils/utils.py +65 -0
  52. opentau/policies/__init__.py +24 -0
  53. opentau/policies/factory.py +172 -0
  54. opentau/policies/normalize.py +315 -0
  55. opentau/policies/pi0/__init__.py +19 -0
  56. opentau/policies/pi0/configuration_pi0.py +250 -0
  57. opentau/policies/pi0/modeling_pi0.py +994 -0
  58. opentau/policies/pi0/paligemma_with_expert.py +516 -0
  59. opentau/policies/pi05/__init__.py +20 -0
  60. opentau/policies/pi05/configuration_pi05.py +231 -0
  61. opentau/policies/pi05/modeling_pi05.py +1257 -0
  62. opentau/policies/pi05/paligemma_with_expert.py +572 -0
  63. opentau/policies/pretrained.py +315 -0
  64. opentau/policies/utils.py +123 -0
  65. opentau/policies/value/__init__.py +18 -0
  66. opentau/policies/value/configuration_value.py +170 -0
  67. opentau/policies/value/modeling_value.py +512 -0
  68. opentau/policies/value/reward.py +87 -0
  69. opentau/policies/value/siglip_gemma.py +221 -0
  70. opentau/scripts/actions_mse_loss.py +89 -0
  71. opentau/scripts/bin_to_safetensors.py +116 -0
  72. opentau/scripts/compute_max_token_length.py +111 -0
  73. opentau/scripts/display_sys_info.py +90 -0
  74. opentau/scripts/download_libero_benchmarks.py +54 -0
  75. opentau/scripts/eval.py +877 -0
  76. opentau/scripts/export_to_onnx.py +180 -0
  77. opentau/scripts/fake_tensor_training.py +87 -0
  78. opentau/scripts/get_advantage_and_percentiles.py +220 -0
  79. opentau/scripts/high_level_planner_inference.py +114 -0
  80. opentau/scripts/inference.py +70 -0
  81. opentau/scripts/launch_train.py +63 -0
  82. opentau/scripts/libero_simulation_parallel.py +356 -0
  83. opentau/scripts/libero_simulation_sequential.py +122 -0
  84. opentau/scripts/nav_high_level_planner_inference.py +61 -0
  85. opentau/scripts/train.py +379 -0
  86. opentau/scripts/visualize_dataset.py +294 -0
  87. opentau/scripts/visualize_dataset_html.py +507 -0
  88. opentau/scripts/zero_to_fp32.py +760 -0
  89. opentau/utils/__init__.py +20 -0
  90. opentau/utils/accelerate_utils.py +79 -0
  91. opentau/utils/benchmark.py +98 -0
  92. opentau/utils/fake_tensor.py +81 -0
  93. opentau/utils/hub.py +209 -0
  94. opentau/utils/import_utils.py +79 -0
  95. opentau/utils/io_utils.py +137 -0
  96. opentau/utils/libero.py +214 -0
  97. opentau/utils/libero_dataset_recorder.py +460 -0
  98. opentau/utils/logging_utils.py +180 -0
  99. opentau/utils/monkey_patch.py +278 -0
  100. opentau/utils/random_utils.py +244 -0
  101. opentau/utils/train_utils.py +198 -0
  102. opentau/utils/utils.py +471 -0
  103. opentau-0.1.0.dist-info/METADATA +161 -0
  104. opentau-0.1.0.dist-info/RECORD +108 -0
  105. opentau-0.1.0.dist-info/WHEEL +5 -0
  106. opentau-0.1.0.dist-info/entry_points.txt +2 -0
  107. opentau-0.1.0.dist-info/licenses/LICENSE +508 -0
  108. opentau-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,278 @@
1
+ # Copyright 2026 Tensor Auto Inc. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Standard data format mapping for dataset feature names and loss types.
16
+
17
+ This module provides mappings between standard feature names used internally
18
+ by OpenTau and dataset-specific feature names used in various robot learning
19
+ and vision-language datasets. It also maps datasets to their appropriate loss
20
+ types for training.
21
+
22
+ The standard format uses canonical names like "camera0", "camera1", "state",
23
+ "actions", "prompt", and "response", while different datasets may use
24
+ various naming conventions (e.g., "observation.images.image",
25
+ "observation.state", "action", "task", etc.). These mappings enable the
26
+ codebase to work with multiple datasets without requiring dataset-specific
27
+ code paths.
28
+
29
+ Key Features:
30
+ - Feature name standardization: Maps dataset-specific feature names to
31
+ standard format names for consistent processing across datasets.
32
+ - Multi-camera support: Handles datasets with varying numbers of camera
33
+ views, mapping them to standard camera0, camera1, etc. names.
34
+ - Loss type specification: Maps datasets to appropriate loss functions
35
+ (MSE for continuous actions, CE for discrete classification tasks).
36
+
37
+ Constants:
38
+
39
+ DATA_FEATURES_NAME_MAPPING
40
+ Dictionary mapping dataset repository IDs to feature name dictionaries.
41
+ Each inner dictionary maps standard feature names (keys) to
42
+ dataset-specific feature names (values). Standard feature names include:
43
+
44
+ - "camera0", "camera1", ...: Camera/image observations
45
+ - "state": Robot state observations
46
+ - "actions": Action outputs
47
+ - "prompt": Task descriptions or prompts
48
+ - "response": Expected responses or labels
49
+
50
+ LOSS_TYPE_MAPPING
51
+ Dictionary mapping dataset repository IDs to loss type strings. Valid
52
+ values are:
53
+
54
+ - "MSE": Mean Squared Error (typically for continuous robotic actions)
55
+ - "CE": Cross Entropy (typically for discrete classification tasks
56
+ like VQA)
57
+
58
+ Example:
59
+ Access feature name mapping for a dataset:
60
+ >>> mapping = DATA_FEATURES_NAME_MAPPING["lerobot/aloha_mobile_cabinet"]
61
+ >>> mapping["camera0"] # Returns "observation.images.cam_right_wrist"
62
+ >>> mapping["actions"] # Returns "action"
63
+
64
+ Access loss type for a dataset:
65
+ >>> loss_type = LOSS_TYPE_MAPPING["lerobot/aloha_mobile_cabinet"]
66
+ >>> loss_type # Returns "MSE"
67
+ """
68
+
69
+ DATA_FEATURES_NAME_MAPPING = {
70
+ "ML-GOD/mt-button-press": {
71
+ "camera0": "observation.image",
72
+ "state": "observation.robot_state",
73
+ "actions": "action",
74
+ "prompt": "task",
75
+ "response": "response",
76
+ },
77
+ "ML-GOD/libero_spatial_no_noops_1.0.0_lerobot": {
78
+ "camera0": "observation.images.image",
79
+ "camera1": "observation.images.wrist_image",
80
+ "state": "observation.state",
81
+ "actions": "action",
82
+ "prompt": "task",
83
+ "response": "response",
84
+ },
85
+ "ML-GOD/libero": {
86
+ "camera0": "image",
87
+ "camera1": "wrist_image",
88
+ "state": "state",
89
+ "actions": "actions",
90
+ "prompt": "task",
91
+ "response": "response",
92
+ },
93
+ "physical-intelligence/libero": {
94
+ "camera0": "image",
95
+ "camera1": "wrist_image",
96
+ "state": "state",
97
+ "actions": "actions",
98
+ "prompt": "task",
99
+ "response": "response",
100
+ },
101
+ "danaaubakirova/koch_test": {
102
+ "camera0": "observation.images.laptop",
103
+ "camera1": "observation.images.phone",
104
+ "state": "observation.state",
105
+ "actions": "action",
106
+ "prompt": "task",
107
+ "response": "response",
108
+ },
109
+ "lerobot/droid_100": {
110
+ "camera0": "observation.images.exterior_image_1_left",
111
+ "camera1": "observation.images.exterior_image_2_left",
112
+ "camera2": "observation.images.wrist_image_left",
113
+ "state": "observation.state",
114
+ "actions": "action",
115
+ "prompt": "task",
116
+ "response": "response",
117
+ },
118
+ "lerobot/aloha_mobile_cabinet": {
119
+ "camera0": "observation.images.cam_right_wrist",
120
+ "camera1": "observation.images.cam_high",
121
+ "camera2": "observation.images.cam_left_wrist",
122
+ "state": "observation.state",
123
+ "actions": "action",
124
+ "prompt": "task",
125
+ "response": "response",
126
+ },
127
+ "autox/agibot-sample": {
128
+ "camera0": "observation.images.head_left_fisheye",
129
+ "camera1": "observation.images.head_right_fisheye",
130
+ "camera2": "observation.images.top_head",
131
+ "camera3": "observation.images.hand_left",
132
+ "camera4": "observation.images.hand_right",
133
+ "camera5": "observation.images.head_center_fisheye",
134
+ "camera6": "observation.images.back_left_fisheye",
135
+ "camera7": "observation.images.back_right_fisheye",
136
+ "camera8": "observation.images.cam_top_depth",
137
+ "state": "observation.state",
138
+ "actions": "action",
139
+ "prompt": "task",
140
+ "response": "response",
141
+ },
142
+ "bi-so100-block-manipulation": {
143
+ "camera0": "observation.images.top",
144
+ "camera1": "observation.images.main",
145
+ "camera2": "observation.images.cv",
146
+ "local_camera0": "observation.images.top",
147
+ "local_camera1": "observation.images.main",
148
+ "local_camera2": "observation.images.cv",
149
+ "state": "observation.state",
150
+ "actions": "action",
151
+ "prompt": "task",
152
+ "response": "response",
153
+ },
154
+ "cube-on-cylinder": {
155
+ "camera0": "observation.images.top",
156
+ "camera1": "observation.images.main",
157
+ "camera2": "observation.images.cv",
158
+ "state": "observation.state",
159
+ "actions": "action",
160
+ "prompt": "task",
161
+ "response": "response",
162
+ },
163
+ "cylinder-on-cube": {
164
+ "camera0": "observation.images.top",
165
+ "camera1": "observation.images.main",
166
+ "camera2": "observation.images.cv",
167
+ "state": "observation.state",
168
+ "actions": "action",
169
+ "prompt": "task",
170
+ "response": "response",
171
+ },
172
+ "l-shape-on-cross-shape": {
173
+ "camera0": "observation.images.top",
174
+ "camera1": "observation.images.main",
175
+ "camera2": "observation.images.cv",
176
+ "state": "observation.state",
177
+ "actions": "action",
178
+ "prompt": "task",
179
+ "response": "response",
180
+ },
181
+ "lerobot/svla_so101_pickplace": {
182
+ "camera0": "observation.images.up",
183
+ "camera1": "observation.images.side",
184
+ "local_camera0": "observation.images.up",
185
+ "local_camera1": "observation.images.side",
186
+ "state": "observation.state",
187
+ "actions": "action",
188
+ "prompt": "task",
189
+ "response": "response",
190
+ },
191
+ "lerobot/svla_so100_pickplace": {
192
+ "camera0": "observation.images.top",
193
+ "camera1": "observation.images.wrist",
194
+ "state": "observation.state",
195
+ "actions": "action",
196
+ "prompt": "task",
197
+ "response": "response",
198
+ },
199
+ "lerobot/svla_so100_stacking": {
200
+ "camera0": "observation.images.top",
201
+ "camera1": "observation.images.wrist",
202
+ "state": "observation.state",
203
+ "actions": "action",
204
+ "prompt": "task",
205
+ "response": "response",
206
+ },
207
+ "pixmo": {
208
+ "camera0": "image",
209
+ "state": "state",
210
+ "actions": "actions",
211
+ "prompt": "prompt",
212
+ "response": "postfix",
213
+ },
214
+ "dummy": {
215
+ "camera0": "image",
216
+ "state": "state",
217
+ "actions": "actions",
218
+ "prompt": "prompt",
219
+ "response": "postfix",
220
+ },
221
+ "vsr": {
222
+ "camera0": "image",
223
+ "state": "state",
224
+ "actions": "actions",
225
+ "prompt": "prompt",
226
+ "response": "postfix",
227
+ },
228
+ "clevr": {
229
+ "camera0": "image",
230
+ "state": "state",
231
+ "actions": "actions",
232
+ "prompt": "prompt",
233
+ "response": "postfix",
234
+ },
235
+ "cocoqa": {
236
+ "camera0": "image",
237
+ "state": "state",
238
+ "actions": "actions",
239
+ "prompt": "prompt",
240
+ "response": "postfix",
241
+ },
242
+ "lerobot_dummy": {
243
+ "camera0": "camera0",
244
+ "state": "state",
245
+ "actions": "actions",
246
+ "prompt": "task",
247
+ "response": "response",
248
+ },
249
+ }
250
+
251
+ """
252
+ Use "MSE" for mean squared error and "CE" for cross entropy.
253
+ Usually robotic data with actions will have an MSE loss while
254
+ VQA tasks will have a CE loss.
255
+ """
256
+ LOSS_TYPE_MAPPING = {
257
+ "ML-GOD/mt-button-press": "MSE",
258
+ "ML-GOD/libero_spatial_no_noops_1.0.0_lerobot": "MSE",
259
+ "ML-GOD/libero": "MSE",
260
+ "physical-intelligence/libero": "MSE",
261
+ "danaaubakirova/koch_test": "MSE",
262
+ "lerobot/droid_100": "MSE",
263
+ "lerobot/aloha_mobile_cabinet": "MSE",
264
+ "autox/agibot-sample": "MSE",
265
+ "bi-so100-block-manipulation": "MSE",
266
+ "cube-on-cylinder": "MSE",
267
+ "cylinder-on-cube": "MSE",
268
+ "l-shape-on-cross-shape": "MSE",
269
+ "lerobot/svla_so101_pickplace": "MSE",
270
+ "lerobot/svla_so100_pickplace": "MSE",
271
+ "lerobot/svla_so100_stacking": "MSE",
272
+ "pixmo": "CE",
273
+ "dummy": "CE",
274
+ "vsr": "CE",
275
+ "clevr": "CE",
276
+ "cocoqa": "CE",
277
+ "lerobot_dummy": "MSE",
278
+ }
@@ -0,0 +1,330 @@
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ # Copyright 2026 Tensor Auto Inc. All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """Image transformation utilities for data augmentation.
18
+
19
+ This module provides configurable image transformation pipelines for data
20
+ augmentation during training. It extends torchvision.transforms.v2 with custom
21
+ transforms and a flexible configuration system that supports weighted random
22
+ sampling of transform subsets.
23
+
24
+ The module implements a probabilistic augmentation strategy where a random
25
+ subset of available transforms is applied to each image, with configurable
26
+ weights controlling the sampling probability. This approach provides more
27
+ diverse augmentations compared to applying all transforms deterministically.
28
+
29
+ Key Features:
30
+ - Random subset sampling: Applies a random subset of N transforms from a
31
+ larger pool, controlled by configurable weights.
32
+ - Custom transforms: Includes SharpnessJitter for more diverse sharpness
33
+ augmentation compared to standard torchvision transforms.
34
+ - Configurable pipeline: Dataclass-based configuration system for easy
35
+ customization of transform parameters and weights.
36
+ - Flexible ordering: Option to apply transforms in random order or
37
+ deterministic order.
38
+ - Torchvision v2 integration: Built on top of torchvision.transforms.v2
39
+ for modern transform API support.
40
+
41
+ Classes:
42
+ RandomSubsetApply: Transform container that applies a random subset of
43
+ transforms with weighted sampling and optional random ordering.
44
+ SharpnessJitter: Custom transform for randomly jittering image sharpness
45
+ with uniform distribution sampling.
46
+ ImageTransformConfig: Dataclass for configuring individual transform
47
+ parameters (weight, type, kwargs).
48
+ ImageTransformsConfig: Dataclass for configuring the overall transform
49
+ pipeline (enable flag, max transforms, random order, transform list).
50
+ ImageTransforms: Main transform class that composes transforms based on
51
+ configuration.
52
+
53
+ Functions:
54
+ make_transform_from_config: Factory function to create transform instances
55
+ from ImageTransformConfig.
56
+
57
+ Example:
58
+ Create and use image transforms:
59
+ >>> config = ImageTransformsConfig(
60
+ ... enable=True,
61
+ ... max_num_transforms=3,
62
+ ... random_order=True
63
+ ... )
64
+ >>> transforms = ImageTransforms(config)
65
+ >>> augmented_image = transforms(image_tensor)
66
+ """
67
+
68
+ import collections
69
+ from dataclasses import dataclass, field
70
+ from typing import Any, Callable, Sequence
71
+
72
+ import torch
73
+ from torchvision.transforms import v2
74
+ from torchvision.transforms.v2 import Transform
75
+ from torchvision.transforms.v2 import functional as F # noqa: N812
76
+
77
+
78
+ class RandomSubsetApply(Transform):
79
+ """Apply a random subset of N transformations from a list of transformations.
80
+
81
+ Args:
82
+ transforms: list of transformations.
83
+ p: represents the multinomial probabilities (with no replacement) used for sampling the transform.
84
+ If the sum of the weights is not 1, they will be normalized. If ``None`` (default), all transforms
85
+ have the same probability.
86
+ n_subset: number of transformations to apply. If ``None``, all transforms are applied.
87
+ Must be in [1, len(transforms)].
88
+ random_order: apply transformations in a random order.
89
+ """
90
+
91
+ def __init__(
92
+ self,
93
+ transforms: Sequence[Callable],
94
+ p: list[float] | None = None,
95
+ n_subset: int | None = None,
96
+ random_order: bool = False,
97
+ ) -> None:
98
+ super().__init__()
99
+ if not isinstance(transforms, Sequence):
100
+ raise TypeError("Argument transforms should be a sequence of callables")
101
+ if p is None:
102
+ p = [1] * len(transforms)
103
+ elif len(p) != len(transforms):
104
+ raise ValueError(
105
+ f"Length of p doesn't match the number of transforms: {len(p)} != {len(transforms)}"
106
+ )
107
+
108
+ if n_subset is None:
109
+ n_subset = len(transforms)
110
+ elif not isinstance(n_subset, int):
111
+ raise TypeError("n_subset should be an int or None")
112
+ elif not (1 <= n_subset <= len(transforms)):
113
+ raise ValueError(f"n_subset should be in the interval [1, {len(transforms)}]")
114
+
115
+ self.transforms = transforms
116
+ total = sum(p)
117
+ self.p = [prob / total for prob in p]
118
+ self.n_subset = n_subset
119
+ self.random_order = random_order
120
+
121
+ self.selected_transforms = None
122
+
123
+ def forward(self, *inputs: Any) -> Any:
124
+ needs_unpacking = len(inputs) > 1
125
+
126
+ selected_indices = torch.multinomial(torch.tensor(self.p), self.n_subset)
127
+ if not self.random_order:
128
+ selected_indices = selected_indices.sort().values
129
+
130
+ self.selected_transforms = [self.transforms[i] for i in selected_indices]
131
+
132
+ for transform in self.selected_transforms:
133
+ outputs = transform(*inputs)
134
+ inputs = outputs if needs_unpacking else (outputs,)
135
+
136
+ return outputs
137
+
138
+ def extra_repr(self) -> str:
139
+ return (
140
+ f"transforms={self.transforms}, "
141
+ f"p={self.p}, "
142
+ f"n_subset={self.n_subset}, "
143
+ f"random_order={self.random_order}"
144
+ )
145
+
146
+
147
+ class SharpnessJitter(Transform):
148
+ """Randomly change the sharpness of an image or video.
149
+
150
+ Similar to a v2.RandomAdjustSharpness with p=1 and a sharpness_factor sampled randomly.
151
+ While v2.RandomAdjustSharpness applies — with a given probability — a fixed sharpness_factor to an image,
152
+ SharpnessJitter applies a random sharpness_factor each time. This is to have a more diverse set of
153
+ augmentations as a result.
154
+
155
+ A sharpness_factor of 0 gives a blurred image, 1 gives the original image while 2 increases the sharpness
156
+ by a factor of 2.
157
+
158
+ If the input is a :class:`torch.Tensor`,
159
+ it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
160
+
161
+ Args:
162
+ sharpness: How much to jitter sharpness. sharpness_factor is chosen uniformly from
163
+ [max(0, 1 - sharpness), 1 + sharpness] or the given
164
+ [min, max]. Should be non negative numbers.
165
+ """
166
+
167
+ def __init__(self, sharpness: float | Sequence[float]) -> None:
168
+ super().__init__()
169
+ self.sharpness = self._check_input(sharpness)
170
+
171
+ def _check_input(self, sharpness):
172
+ if isinstance(sharpness, (int, float)):
173
+ if sharpness < 0:
174
+ raise ValueError("If sharpness is a single number, it must be non negative.")
175
+ sharpness = [1.0 - sharpness, 1.0 + sharpness]
176
+ sharpness[0] = max(sharpness[0], 0.0)
177
+ elif isinstance(sharpness, collections.abc.Sequence) and len(sharpness) == 2:
178
+ sharpness = [float(v) for v in sharpness]
179
+ else:
180
+ raise TypeError(f"{sharpness=} should be a single number or a sequence with length 2.")
181
+
182
+ if not 0.0 <= sharpness[0] <= sharpness[1]:
183
+ raise ValueError(f"sharpnesss values should be between (0., inf), but got {sharpness}.")
184
+
185
+ return float(sharpness[0]), float(sharpness[1])
186
+
187
+ def make_params(self, flat_inputs: list[Any]) -> dict[str, Any]:
188
+ """Generate random parameters for sharpness jitter.
189
+
190
+ Args:
191
+ flat_inputs: List of input tensors.
192
+
193
+ Returns:
194
+ Dictionary containing 'sharpness_factor' sampled uniformly from
195
+ the configured sharpness range.
196
+ """
197
+ sharpness_factor = torch.empty(1).uniform_(self.sharpness[0], self.sharpness[1]).item()
198
+ return {"sharpness_factor": sharpness_factor}
199
+
200
+ def transform(self, inpt: Any, params: dict[str, Any]) -> Any:
201
+ """Apply sharpness adjustment to input.
202
+
203
+ Args:
204
+ inpt: Input image or video tensor.
205
+ params: Dictionary containing 'sharpness_factor' from make_params.
206
+
207
+ Returns:
208
+ Transformed image or video with adjusted sharpness.
209
+ """
210
+ sharpness_factor = params["sharpness_factor"]
211
+ return self._call_kernel(F.adjust_sharpness, inpt, sharpness_factor=sharpness_factor)
212
+
213
+
214
+ @dataclass
215
+ class ImageTransformConfig:
216
+ """
217
+ For each transform, the following parameters are available:
218
+ weight: This represents the multinomial probability (with no replacement)
219
+ used for sampling the transform. If the sum of the weights is not 1,
220
+ they will be normalized.
221
+ type: The name of the class used. This is either a class available under torchvision.transforms.v2 or a
222
+ custom transform defined here.
223
+ kwargs: Lower & upper bound respectively used for sampling the transform's parameter
224
+ (following uniform distribution) when it's applied.
225
+ """
226
+
227
+ weight: float = 1.0
228
+ type: str = "Identity"
229
+ kwargs: dict[str, Any] = field(default_factory=dict)
230
+
231
+
232
+ @dataclass
233
+ class ImageTransformsConfig:
234
+ """
235
+ These transforms are all using standard torchvision.transforms.v2
236
+ You can find out how these transformations affect images here:
237
+ https://pytorch.org/vision/0.18/auto_examples/transforms/plot_transforms_illustrations.html
238
+ We use a custom RandomSubsetApply container to sample them.
239
+ """
240
+
241
+ # Set this flag to `true` to enable transforms during training
242
+ enable: bool = False
243
+ # This is the maximum number of transforms (sampled from these below) that will be applied to each frame.
244
+ # It's an integer in the interval [1, number_of_available_transforms].
245
+ max_num_transforms: int = 3
246
+ # By default, transforms are applied in Torchvision's suggested order (shown below).
247
+ # Set this to True to apply them in a random order.
248
+ random_order: bool = False
249
+ tfs: dict[str, ImageTransformConfig] = field(
250
+ default_factory=lambda: {
251
+ "brightness": ImageTransformConfig(
252
+ weight=1.0,
253
+ type="ColorJitter",
254
+ kwargs={"brightness": (0.8, 1.2)},
255
+ ),
256
+ "contrast": ImageTransformConfig(
257
+ weight=1.0,
258
+ type="ColorJitter",
259
+ kwargs={"contrast": (0.8, 1.2)},
260
+ ),
261
+ "saturation": ImageTransformConfig(
262
+ weight=1.0,
263
+ type="ColorJitter",
264
+ kwargs={"saturation": (0.5, 1.5)},
265
+ ),
266
+ "hue": ImageTransformConfig(
267
+ weight=1.0,
268
+ type="ColorJitter",
269
+ kwargs={"hue": (-0.05, 0.05)},
270
+ ),
271
+ "sharpness": ImageTransformConfig(
272
+ weight=1.0,
273
+ type="SharpnessJitter",
274
+ kwargs={"sharpness": (0.5, 1.5)},
275
+ ),
276
+ }
277
+ )
278
+
279
+
280
+ def make_transform_from_config(cfg: ImageTransformConfig) -> Transform:
281
+ """Create a transform instance from an ImageTransformConfig.
282
+
283
+ Args:
284
+ cfg: Configuration object specifying the transform type and parameters.
285
+
286
+ Returns:
287
+ Transform instance (Identity, ColorJitter, or SharpnessJitter).
288
+
289
+ Raises:
290
+ ValueError: If the transform type is not recognized.
291
+ """
292
+ if cfg.type == "Identity":
293
+ return v2.Identity(**cfg.kwargs)
294
+ elif cfg.type == "ColorJitter":
295
+ return v2.ColorJitter(**cfg.kwargs)
296
+ elif cfg.type == "SharpnessJitter":
297
+ return SharpnessJitter(**cfg.kwargs)
298
+ else:
299
+ raise ValueError(f"Transform '{cfg.type}' is not valid.")
300
+
301
+
302
+ class ImageTransforms(Transform):
303
+ """A class to compose image transforms based on configuration."""
304
+
305
+ def __init__(self, cfg: ImageTransformsConfig) -> None:
306
+ super().__init__()
307
+ self._cfg = cfg
308
+
309
+ self.weights = []
310
+ self.transforms = {}
311
+ for tf_name, tf_cfg in cfg.tfs.items():
312
+ if tf_cfg.weight <= 0.0:
313
+ continue
314
+
315
+ self.transforms[tf_name] = make_transform_from_config(tf_cfg)
316
+ self.weights.append(tf_cfg.weight)
317
+
318
+ n_subset = min(len(self.transforms), cfg.max_num_transforms)
319
+ if n_subset == 0 or not cfg.enable:
320
+ self.tf = v2.Identity()
321
+ else:
322
+ self.tf = RandomSubsetApply(
323
+ transforms=list(self.transforms.values()),
324
+ p=self.weights,
325
+ n_subset=n_subset,
326
+ random_order=cfg.random_order,
327
+ )
328
+
329
+ def forward(self, *inputs: Any) -> Any:
330
+ return self.tf(*inputs)