returnn 1.20250416.172956__py3-none-any.whl → 1.20250418.115249__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of returnn might be problematic. Click here for more details.
- returnn/PKG-INFO +1 -1
- returnn/_setup_info_generated.py +2 -2
- returnn/datasets/generating.py +2 -2
- returnn/datasets/postprocessing.py +4 -3
- returnn/frontend/audio/specaugment.py +3 -1
- returnn/frontend/dropout.py +1 -1
- returnn/frontend/normalization.py +3 -2
- returnn/frontend/parametrizations.py +2 -2
- returnn/frontend/rec.py +1 -1
- returnn/frontend/reduce.py +5 -1
- returnn/frontend/run_ctx.py +53 -12
- returnn/torch/frontend/_backend.py +2 -2
- {returnn-1.20250416.172956.dist-info → returnn-1.20250418.115249.dist-info}/METADATA +1 -1
- {returnn-1.20250416.172956.dist-info → returnn-1.20250418.115249.dist-info}/RECORD +17 -17
- {returnn-1.20250416.172956.dist-info → returnn-1.20250418.115249.dist-info}/LICENSE +0 -0
- {returnn-1.20250416.172956.dist-info → returnn-1.20250418.115249.dist-info}/WHEEL +0 -0
- {returnn-1.20250416.172956.dist-info → returnn-1.20250418.115249.dist-info}/top_level.txt +0 -0
returnn/PKG-INFO
CHANGED
returnn/_setup_info_generated.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
version = '1.
|
|
2
|
-
long_version = '1.
|
|
1
|
+
version = '1.20250418.115249'
|
|
2
|
+
long_version = '1.20250418.115249+git.3cef372'
|
returnn/datasets/generating.py
CHANGED
|
@@ -792,10 +792,10 @@ class DummyDataset(GeneratingDataset):
|
|
|
792
792
|
i1 = seq_idx
|
|
793
793
|
i2 = i1 + seq_len * self.num_inputs
|
|
794
794
|
features = numpy.array(
|
|
795
|
-
[((i % self.input_max_value) + self.input_shift) * self.input_scale for i in range(i1, i2)]
|
|
795
|
+
[((i % self.input_max_value) + self.input_shift) * self.input_scale for i in range(i1, i2)], dtype="float32"
|
|
796
796
|
).reshape((seq_len, self.num_inputs))
|
|
797
797
|
i1, i2 = i2, i2 + seq_len
|
|
798
|
-
targets = numpy.array([i % self.num_outputs["classes"][0] for i in range(i1, i2)])
|
|
798
|
+
targets = numpy.array([i % self.num_outputs["classes"][0] for i in range(i1, i2)], dtype="int32")
|
|
799
799
|
return DatasetSeq(seq_idx=seq_idx, features=features, targets=targets)
|
|
800
800
|
|
|
801
801
|
|
|
@@ -141,13 +141,14 @@ class PostprocessingDataset(CachedDataset2):
|
|
|
141
141
|
self._map_seq_stream = map_seq_stream
|
|
142
142
|
if map_seq_stream_preserves_num_seqs is None and map_seq_stream is not None:
|
|
143
143
|
map_seq_stream_preserves_num_seqs = getattr(map_seq_stream, "preserves_num_seqs", None)
|
|
144
|
-
|
|
144
|
+
assert map_seq_stream_preserves_num_seqs is None or isinstance(map_seq_stream_preserves_num_seqs, bool)
|
|
145
|
+
self._map_seq_stream_preserves_num_seqs = map_seq_stream_preserves_num_seqs
|
|
145
146
|
self._map_outputs = map_outputs
|
|
146
147
|
self._rng = RandomState(self._get_random_seed_for_epoch(0))
|
|
147
148
|
self._seq_list_for_validation: Optional[List[str]] = None
|
|
148
149
|
|
|
149
150
|
self._dataset = init_dataset(self._dataset_def, parent_dataset=self)
|
|
150
|
-
if self._map_seq_stream is None or self._map_seq_stream_preserves_num_seqs:
|
|
151
|
+
if self._map_seq_stream is None or self._map_seq_stream_preserves_num_seqs is True:
|
|
151
152
|
# if the stream mapper is set, the num_seqs may change and the estimation is less accurate
|
|
152
153
|
self._estimated_num_seqs = self._dataset.estimated_num_seqs
|
|
153
154
|
self._data_iter: Optional[Iterator[Tuple[int, TensorDict]]] = None
|
|
@@ -210,7 +211,7 @@ class PostprocessingDataset(CachedDataset2):
|
|
|
210
211
|
self._data_iter = enumerate(self._build_mapping_iter())
|
|
211
212
|
self._data_iter_produced_num_seqs = 0
|
|
212
213
|
self._seq_list_for_validation = seq_list
|
|
213
|
-
if self._map_seq_stream is None or self._map_seq_stream_preserves_num_seqs:
|
|
214
|
+
if self._map_seq_stream is None or self._map_seq_stream_preserves_num_seqs is True:
|
|
214
215
|
# If we don't have an iterable mapper (or the user explicitly specifies this),
|
|
215
216
|
# we know the number of segments exactly equals the number of segments in the wrapped dataset
|
|
216
217
|
try:
|
|
@@ -67,7 +67,9 @@ def specaugment(
|
|
|
67
67
|
)
|
|
68
68
|
return x_masked
|
|
69
69
|
|
|
70
|
-
return rf.cond(
|
|
70
|
+
return rf.cond(
|
|
71
|
+
rf.get_run_ctx().is_train_flag_enabled(func=specaugment) | (not only_on_train), _mask_branch, lambda: x
|
|
72
|
+
)
|
|
71
73
|
|
|
72
74
|
|
|
73
75
|
def random_mask(
|
returnn/frontend/dropout.py
CHANGED
|
@@ -60,7 +60,7 @@ def dropout(
|
|
|
60
60
|
return _dropout(source, keep_prob, noise_dims=noise_dims)
|
|
61
61
|
|
|
62
62
|
return rf.cond(
|
|
63
|
-
pred=rf.get_run_ctx().
|
|
63
|
+
pred=rf.get_run_ctx().is_train_flag_enabled(func=dropout),
|
|
64
64
|
true_fn=lambda: _dropout(source, keep_prob, noise_dims=noise_dims),
|
|
65
65
|
false_fn=lambda: source,
|
|
66
66
|
)
|
|
@@ -226,8 +226,9 @@ class BatchNorm(rf.Module):
|
|
|
226
226
|
|
|
227
227
|
if use_mask:
|
|
228
228
|
# Generic implementation which supports masking.
|
|
229
|
-
|
|
230
|
-
|
|
229
|
+
train_flag = rf.get_run_ctx().is_train_flag_enabled(func=BatchNorm.__call__)
|
|
230
|
+
use_current_batch_stats = self.running_mean is None or train_flag
|
|
231
|
+
update_running_stats = self.running_mean is not None and train_flag
|
|
231
232
|
need_current_batch_stats = rf.opt_logical_or(use_current_batch_stats, update_running_stats)
|
|
232
233
|
|
|
233
234
|
mean_cur_batch, variance_cur_batch = rf.cond(
|
|
@@ -48,7 +48,7 @@ class WeightDropout:
|
|
|
48
48
|
# on_forward=True because we already checked for train_flag
|
|
49
49
|
return rf.dropout(param, drop_prob=self.drop_prob, on_forward=True)
|
|
50
50
|
|
|
51
|
-
return rf.cond(rf.get_run_ctx().
|
|
51
|
+
return rf.cond(rf.get_run_ctx().is_train_flag_enabled(func=WeightDropout.__call__), _on_train, lambda: param)
|
|
52
52
|
|
|
53
53
|
|
|
54
54
|
def weight_noise(module: rf.Module, param_name: str, *, std: float) -> rf.Module:
|
|
@@ -84,4 +84,4 @@ class WeightNoise:
|
|
|
84
84
|
noise = rf.random_normal(param.dims, dtype=param.dtype, stddev=self.std)
|
|
85
85
|
return param + noise
|
|
86
86
|
|
|
87
|
-
return rf.cond(rf.get_run_ctx().
|
|
87
|
+
return rf.cond(rf.get_run_ctx().is_train_flag_enabled(func=WeightNoise.__call__), _on_train, lambda: param)
|
returnn/frontend/rec.py
CHANGED
|
@@ -218,7 +218,7 @@ def _zoneout(*, prev: Tensor, cur: Tensor, factor: float, out_dim: Dim, dropout_
|
|
|
218
218
|
if factor == 0.0:
|
|
219
219
|
return cur
|
|
220
220
|
return rf.cond(
|
|
221
|
-
rf.get_run_ctx().
|
|
221
|
+
rf.get_run_ctx().is_train_flag_enabled(func=ZoneoutLSTM.__call__),
|
|
222
222
|
lambda: (1 - factor) * rf.dropout(cur - prev, factor, axis=dropout_broadcast and out_dim) + prev,
|
|
223
223
|
lambda: (1 - factor) * cur + factor * prev,
|
|
224
224
|
)
|
returnn/frontend/reduce.py
CHANGED
|
@@ -251,7 +251,11 @@ class RunningMean(rf.Module):
|
|
|
251
251
|
x_ = rf.reduce_mean(x, axis=[d for d in x.dims if d not in self.shape])
|
|
252
252
|
self.mean.assign_add(self.alpha * (x_ - self.mean))
|
|
253
253
|
|
|
254
|
-
rf.cond(
|
|
254
|
+
rf.cond(
|
|
255
|
+
(not self.update_only_in_train) or rf.get_run_ctx().is_train_flag_enabled(func=RunningMean.__call__),
|
|
256
|
+
_update_running_stats,
|
|
257
|
+
lambda: None,
|
|
258
|
+
)
|
|
255
259
|
return self.mean
|
|
256
260
|
|
|
257
261
|
|
returnn/frontend/run_ctx.py
CHANGED
|
@@ -7,7 +7,8 @@ or forwarding loop.
|
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
9
|
from __future__ import annotations
|
|
10
|
-
from typing import Optional, Union, Any, Sequence, Dict
|
|
10
|
+
from typing import Optional, Union, Any, Callable, Sequence, Dict, List
|
|
11
|
+
from types import FunctionType
|
|
11
12
|
from dataclasses import dataclass
|
|
12
13
|
from contextlib import contextmanager
|
|
13
14
|
from returnn.tensor import Tensor, Dim, TensorDict, batch_dim
|
|
@@ -101,7 +102,7 @@ class RunCtx:
|
|
|
101
102
|
- "forward_step", for mark_as_output
|
|
102
103
|
"""
|
|
103
104
|
self._stage = stage
|
|
104
|
-
self.
|
|
105
|
+
self._train_flags_stack: List[Dict[Optional[FunctionType], Union[Tensor, bool]]] = [{None: train_flag}]
|
|
105
106
|
self._step = step
|
|
106
107
|
self._epoch = epoch
|
|
107
108
|
self.losses = {} # type: Dict[str, Loss]
|
|
@@ -121,14 +122,17 @@ class RunCtx:
|
|
|
121
122
|
@property
|
|
122
123
|
def train_flag(self) -> Union[bool, Tensor]:
|
|
123
124
|
"""
|
|
124
|
-
:return:
|
|
125
|
-
and we are supposed to use dropout and similar mechanisms.
|
|
126
|
-
In a graph-based backend, this can be dynamic.
|
|
125
|
+
:return: ``is_train_flag_enabled(func=None)``. See :func:`is_train_flag_enabled`.
|
|
127
126
|
"""
|
|
128
|
-
return self.
|
|
127
|
+
return self.is_train_flag_enabled(func=None)
|
|
129
128
|
|
|
130
129
|
@contextmanager
|
|
131
|
-
def train_flag_ctx(
|
|
130
|
+
def train_flag_ctx(
|
|
131
|
+
self,
|
|
132
|
+
train_flag: Union[bool, Tensor],
|
|
133
|
+
*,
|
|
134
|
+
func: Optional[Union[Sequence[Union[FunctionType, Callable]], FunctionType, Callable]] = None,
|
|
135
|
+
):
|
|
132
136
|
"""
|
|
133
137
|
Context manager to temporarily set the train_flag.
|
|
134
138
|
|
|
@@ -137,14 +141,51 @@ class RunCtx:
|
|
|
137
141
|
with rf.get_run_ctx().train_flag_ctx(False):
|
|
138
142
|
...
|
|
139
143
|
|
|
140
|
-
:param train_flag: whether we are in training mode
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
+
:param train_flag: whether we are in training mode.
|
|
145
|
+
In a graph-based backend, this can be dynamic (scalar Tensor, not just bool).
|
|
146
|
+
:param func: if given, the train flag is only enabled/disabled for this specific function(s)
|
|
147
|
+
(e.g. ``rf.dropout`` or ``rf.BatchNorm.__call__``).
|
|
148
|
+
(See https://github.com/rwth-i6/returnn/issues/1712 for some discussion.)
|
|
149
|
+
(Note: We expect a Python function, not just any general Callable. But typing seems to get this wrong.)
|
|
150
|
+
"""
|
|
151
|
+
old_train_flags = self._train_flags_stack[-1]
|
|
152
|
+
new_train_flags = old_train_flags.copy()
|
|
153
|
+
if func is None:
|
|
154
|
+
new_train_flags[None] = train_flag
|
|
155
|
+
elif isinstance(func, FunctionType):
|
|
156
|
+
new_train_flags[func] = train_flag
|
|
157
|
+
elif isinstance(func, (list, tuple)):
|
|
158
|
+
for f in func:
|
|
159
|
+
if not isinstance(f, FunctionType):
|
|
160
|
+
raise TypeError(f"Expected function, got {type(f)}")
|
|
161
|
+
new_train_flags[f] = train_flag
|
|
162
|
+
else:
|
|
163
|
+
raise TypeError(f"Expected function or sequence of functions, got {type(func)}")
|
|
164
|
+
self._train_flags_stack.append(new_train_flags)
|
|
144
165
|
try:
|
|
145
166
|
yield
|
|
146
167
|
finally:
|
|
147
|
-
|
|
168
|
+
last = self._train_flags_stack.pop(-1)
|
|
169
|
+
assert last is new_train_flags
|
|
170
|
+
assert len(self._train_flags_stack) >= 1
|
|
171
|
+
|
|
172
|
+
def is_train_flag_enabled(self, *, func: Optional[Union[FunctionType, Callable]]) -> Union[bool, Tensor]:
|
|
173
|
+
"""
|
|
174
|
+
:param func: function for which we want to check the train flag
|
|
175
|
+
(e.g. ``rf.dropout`` or ``rf.BatchNorm.__call__``),
|
|
176
|
+
or None for the global fallback.
|
|
177
|
+
(See https://github.com/rwth-i6/returnn/issues/1712 for some discussion.)
|
|
178
|
+
:return: Whether the train flag is enabled, either for the specific function, or globally.
|
|
179
|
+
Training is usually when the model is updated,
|
|
180
|
+
and we are supposed to use dropout and similar mechanisms.
|
|
181
|
+
This is either for the specified function, or globally.
|
|
182
|
+
In a graph-based backend, this can also be dynamic (scalar Tensor, not just bool).
|
|
183
|
+
"""
|
|
184
|
+
train_flags = self._train_flags_stack[-1]
|
|
185
|
+
if func in train_flags:
|
|
186
|
+
return train_flags[func]
|
|
187
|
+
assert isinstance(func, FunctionType)
|
|
188
|
+
return train_flags[None] # global fallback. this should always be defined, see __init__
|
|
148
189
|
|
|
149
190
|
@property
|
|
150
191
|
def step(self) -> Union[int, Tensor]:
|
|
@@ -1859,7 +1859,7 @@ class TorchBackend(Backend[torch.Tensor]):
|
|
|
1859
1859
|
bias=beta.raw_tensor if affine else None,
|
|
1860
1860
|
# training: means whether we should use the current batch statistics
|
|
1861
1861
|
# + update the running statistics (if given)
|
|
1862
|
-
training=rf.get_run_ctx().
|
|
1862
|
+
training=rf.get_run_ctx().is_train_flag_enabled(func=rf.BatchNorm.__call__) or (running_mean is None),
|
|
1863
1863
|
momentum=momentum,
|
|
1864
1864
|
eps=epsilon,
|
|
1865
1865
|
)
|
|
@@ -2236,7 +2236,7 @@ class TorchBackend(Backend[torch.Tensor]):
|
|
|
2236
2236
|
has_biases=has_biases,
|
|
2237
2237
|
num_layers=1,
|
|
2238
2238
|
dropout=0.0,
|
|
2239
|
-
train=rf.get_run_ctx().
|
|
2239
|
+
train=rf.get_run_ctx().is_train_flag_enabled(func=rf.LSTM.__call__),
|
|
2240
2240
|
bidirectional=False,
|
|
2241
2241
|
)
|
|
2242
2242
|
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
returnn/PKG-INFO,sha256=
|
|
1
|
+
returnn/PKG-INFO,sha256=vWxYPYgsvU3GDUCQEiHY5h2C09lxsFvZ-GUR6jfYUxM,5215
|
|
2
2
|
returnn/__init__.py,sha256=biBtRsM0WZ406vShaeH-9WFoqJ8XwTbn6g0EeFJ7l8E,1012
|
|
3
3
|
returnn/__main__.py,sha256=qBFbuB1yN3adgVM5pXt2-Yq9vorjRNchNPL8kDKx44M,31752
|
|
4
4
|
returnn/__old_mod_loader__.py,sha256=nvsNY-xELdS_IPNkv66Q9Rmvg4dbGW0-EBRDcCmctos,7654
|
|
5
5
|
returnn/__setup__.py,sha256=22kQn2fh11iPM0hLb2Fy5sLmoU1JGvmDxXRYuRgQkwU,4659
|
|
6
|
-
returnn/_setup_info_generated.py,sha256=
|
|
6
|
+
returnn/_setup_info_generated.py,sha256=oEWWbm7JJwkhmRryz4mvP5IIrbCPVx8zczj4izvepn0,77
|
|
7
7
|
returnn/config.py,sha256=3tmKhB6FnQZaNdtcYsiB61JnEY--iZ2qmJ4yq0b6tE0,29140
|
|
8
8
|
returnn/forward_iface.py,sha256=A_OJiaXsX4MlXQRzST86ylyxSUZbC402PQL1REcqHjM,911
|
|
9
9
|
returnn/learning_rate_control.py,sha256=ZvWryAn_tv9DhV8sh1LV3eE34Yltl3On3mYZAG4hR9s,34684
|
|
@@ -18,7 +18,7 @@ returnn/datasets/bundle_file.py,sha256=KQNrS1MSf-4_idlK0c0KFwON-f5sEK0sWU15WpoMY
|
|
|
18
18
|
returnn/datasets/cached.py,sha256=DIRdWrxBmsZG8O_9eVxBO5mcdo4f5KU-Xb-4wVz59Io,25418
|
|
19
19
|
returnn/datasets/cached2.py,sha256=_6pza3IG68JexaExhj1ld3fP6pE7T-G804driJ9Z_qo,12141
|
|
20
20
|
returnn/datasets/distrib_files.py,sha256=_UlcrnaU1rA9v6D3H3X4dPhcA--09fNeVnWs9VNo0yg,27656
|
|
21
|
-
returnn/datasets/generating.py,sha256=
|
|
21
|
+
returnn/datasets/generating.py,sha256=E_6KpnSu8ChqG3pb4VTChWDsBTonIwFFAj53SI9NSow,99846
|
|
22
22
|
returnn/datasets/hdf.py,sha256=yqzr-nzqlt02QZoW2uFowKT19gd5e-9mJpHCKSQxW8o,67643
|
|
23
23
|
returnn/datasets/lm.py,sha256=5hSdBgmgTP0IzO2p-JjiWtny0Zb0M20goXtjlw4JVR4,99206
|
|
24
24
|
returnn/datasets/map.py,sha256=kOBJVZmwDhLsOplzDNByIfa0NRSUaMo2Lsy36lBvxrM,10907
|
|
@@ -26,7 +26,7 @@ returnn/datasets/meta.py,sha256=0wQzRzjShLSYNFoGo_MdR5IT8arxHr9gFjUlEqb2rbY,9496
|
|
|
26
26
|
returnn/datasets/multi_proc.py,sha256=aVjsLt2qjHnHOrEYCgIPCwNYE-f1fiGP6eZ8NGAr3A4,22583
|
|
27
27
|
returnn/datasets/normalization_data.py,sha256=wOHrbO3612uWXpzLHHxksDw0qeVmQ42w7byBL9QMh9Q,14618
|
|
28
28
|
returnn/datasets/numpy_dump.py,sha256=wl8bKIKAlff2HPJPtuu5wBg3TLOf16d2wLVB4lLAwTM,5158
|
|
29
|
-
returnn/datasets/postprocessing.py,sha256=
|
|
29
|
+
returnn/datasets/postprocessing.py,sha256=Ug2fvzbutnwJSvErIK2Ft-bd0pz79ZBmEg12pLEo1f0,23514
|
|
30
30
|
returnn/datasets/raw_wav.py,sha256=M7eTHp4CTtLQf3yPTiJY-mSJYgZNxkGV9IFN9J1dq_4,9144
|
|
31
31
|
returnn/datasets/sprint.py,sha256=YhhdNbBTuL_HCc3asgK3o6vgq5h5nMPH5nBFvsuwVjA,55464
|
|
32
32
|
returnn/datasets/stereo.py,sha256=PkowC91bZWihIYuIZgyGgPcNwgq5jBvyxxu1nER-VhM,17633
|
|
@@ -91,7 +91,7 @@ returnn/frontend/control_flow_ctx.py,sha256=v17CsNwRnZYe8GdMtGJt2ftibfxMCGK1i0l-
|
|
|
91
91
|
returnn/frontend/conv.py,sha256=Q0q90-uu9d6qV-v8_DlFGxpZtc6FjfXVpfkkXmv1Alk,31959
|
|
92
92
|
returnn/frontend/device.py,sha256=K7Y1qoQcO4GIHgLkPLQWK-GVT8gKL8GwyQrmPo8LgBE,1438
|
|
93
93
|
returnn/frontend/dims.py,sha256=aH5FQ_m0xMD6Rj-BUWGx8lB-HkCuwZfMBf6mZbGGW5E,12611
|
|
94
|
-
returnn/frontend/dropout.py,sha256=
|
|
94
|
+
returnn/frontend/dropout.py,sha256=bH0keqKcBzkC_SPlQoir9HPxTtgoVCp61YD5ZvEOuA4,5031
|
|
95
95
|
returnn/frontend/dtype.py,sha256=Ooc5BrcNrTp6XShuFEV9g5V6-niuy4ImP_Lt_Qgq3jE,1886
|
|
96
96
|
returnn/frontend/gradient.py,sha256=G-Qv4gKGHYEeB92Zwco9ao4qjd6umZPUzQC4J-fbYWo,4033
|
|
97
97
|
returnn/frontend/graph.py,sha256=PIv901WZ1rfTV0QGkyzBv6UxfWk9NsLGxdoJ5x9-8Xg,1818
|
|
@@ -105,15 +105,15 @@ returnn/frontend/math_.py,sha256=KlJxdIib8ENlid7cc4lcwHv5e21tzTjTEV8VgEDAijo,169
|
|
|
105
105
|
returnn/frontend/matmul.py,sha256=3QaGiZtSs9PriT40T7Vc3KnYKPgYSN4tCZytYeq9qMA,1945
|
|
106
106
|
returnn/frontend/module.py,sha256=219rh5mE0CD0-NdxXLsKyhv3BNtOI9jSyiI1Rb8MOyU,10700
|
|
107
107
|
returnn/frontend/nested.py,sha256=Hm4GT5ZI1OyWpYxv_SP5jlBztJsjGVMgtvKJnvQYa00,15068
|
|
108
|
-
returnn/frontend/normalization.py,sha256
|
|
108
|
+
returnn/frontend/normalization.py,sha256=-lYJ9IWcheOQu1gXJehSOA76qgVtxd1C07Jqps6Qg1o,14116
|
|
109
109
|
returnn/frontend/parameter.py,sha256=w6SN-uv87OyeWBt90_3UBbK0h6sftSOCxkqXPg76caY,10375
|
|
110
|
-
returnn/frontend/parametrizations.py,sha256=
|
|
110
|
+
returnn/frontend/parametrizations.py,sha256=ptNgBw5IiPXVpB3QGse7AGAhdXp8X1rCqYUl2Mae8aI,2876
|
|
111
111
|
returnn/frontend/parametrize.py,sha256=VhgTEP7ehON950Q4bkCy8rvg9641moEKAXn0XzomK6E,7216
|
|
112
112
|
returnn/frontend/piecewise_linear.py,sha256=TdL6wzop8P1dcIZwkEbJFvSUZSI1cbhS3XKzlWQkEVI,1964
|
|
113
113
|
returnn/frontend/rand.py,sha256=Levgf5VtOOBKDSgz0869Jf3VW4BWxYZuRXsa_fOxNI4,12969
|
|
114
|
-
returnn/frontend/rec.py,sha256=
|
|
115
|
-
returnn/frontend/reduce.py,sha256
|
|
116
|
-
returnn/frontend/run_ctx.py,sha256=
|
|
114
|
+
returnn/frontend/rec.py,sha256=6YSsSG7fdtfvvg24vmexSg8R2aVCcKHBdGLh-Mgn9Co,8037
|
|
115
|
+
returnn/frontend/reduce.py,sha256=xvxN_h3LsMJdmT0IbW4nOf8qFhckuAniIhD9PalO6j0,10305
|
|
116
|
+
returnn/frontend/run_ctx.py,sha256=WxKmBpkb2b34AQbnQ9YIRYvNoy3Fa0yJIezNCWLpeWU,23785
|
|
117
117
|
returnn/frontend/signal.py,sha256=XgOBL1iy-cJgulePH5HRPAwp2cScy60q4RItr7xzvGc,4412
|
|
118
118
|
returnn/frontend/state.py,sha256=EePdrx6PtWL4mJ2XZmGlh5dl4nq6G9wZpqP4hdDEzfY,2935
|
|
119
119
|
returnn/frontend/stepwise_scheduler.py,sha256=fMOTR7npGCDXrXDmSQ4VwmudoHEbY3Yr-QGyjFdQJSc,927
|
|
@@ -129,7 +129,7 @@ returnn/frontend/_native/tensor_ops.cpp,sha256=bYtwwn_NeJfAEHWYPEJlkoLDKt9baZ3RA
|
|
|
129
129
|
returnn/frontend/_native/tensor_ops.hpp,sha256=dDqvUejRNHjItnmOP5aHyAQbAmXmXoDVXSe3tveEU8A,3732
|
|
130
130
|
returnn/frontend/audio/__init__.py,sha256=8mahwucBje8qHKw0bOvoySlvvD0rFKxviSvcAHSjiJY,67
|
|
131
131
|
returnn/frontend/audio/mel.py,sha256=VZdxf2mTLzLOXsLRzCvaad712Zf0c2iwdthrzeVfgxk,7885
|
|
132
|
-
returnn/frontend/audio/specaugment.py,sha256=
|
|
132
|
+
returnn/frontend/audio/specaugment.py,sha256=w7YPEJ6zhCaG5AAaDd-HxsKwa_2vA7wFqHrEjxiUVPI,5841
|
|
133
133
|
returnn/frontend/conversions/__init__.py,sha256=7plsDxWVYhASa-3qmqbdzSI34A9ujUH2iMkL3eRD0TI,84
|
|
134
134
|
returnn/frontend/conversions/espnet_e_branchformer.py,sha256=Mmp3G6nySy0CqeHa-um-RAuUSnFH1DKNjBbqQB_Pomo,9018
|
|
135
135
|
returnn/frontend/conversions/hf_llama.py,sha256=1WQOhQyUWwkAznaRqK2zpThP8XZbaomkaE8qMG_bZPY,9662
|
|
@@ -216,7 +216,7 @@ returnn/torch/data/queued_data_iter.py,sha256=PoOsGHdHVZjTmcyfq_ZOw--P6hyfTdmAWI
|
|
|
216
216
|
returnn/torch/data/returnn_dataset_wrapper.py,sha256=2CaDapzrlqahANuq-nyVAtv5ENHuM8A7okORwYJDisg,8006
|
|
217
217
|
returnn/torch/data/tensor_utils.py,sha256=-Teqi--LLbt6q_5mDRdoHZHmPgSdC83W706ukif_YiU,1284
|
|
218
218
|
returnn/torch/frontend/__init__.py,sha256=AA48HZnC17ASuKA0EWy8loZ-Bib_yUtqF4T1wYvjst4,62
|
|
219
|
-
returnn/torch/frontend/_backend.py,sha256=
|
|
219
|
+
returnn/torch/frontend/_backend.py,sha256=xX3xGe-TtGy_VnTyQyhDD3lfZ55gZajgxE9khOyQ4HE,101844
|
|
220
220
|
returnn/torch/frontend/_rand.py,sha256=1JgIkV2XmpgJD86zXZ-NCAe-QuoP2swr6NaS1oz3Qa8,1830
|
|
221
221
|
returnn/torch/frontend/bridge.py,sha256=Z2_UW8AagezC7zsXDc5PKcd8G9WwisV7j9SWGHU0m4U,7840
|
|
222
222
|
returnn/torch/frontend/raw_ops.py,sha256=lF0h-KtYYsdaaqQADylVZp9qzPskOOXA4MfmYDyx5IU,296
|
|
@@ -253,8 +253,8 @@ returnn/util/sig_proc.py,sha256=Tjz0VOAVyqu2qDCF5HZ1JjALjcFsHcNkcd96WgZeKfE,7265
|
|
|
253
253
|
returnn/util/task_system.py,sha256=y4sMVXQ25Qd2z0rx03uOlXlkE-jbCYC1Sjfn-XlraVU,26003
|
|
254
254
|
returnn/util/train_proc_manager.py,sha256=Pjht28k6uz6BNQ47uW6Gf880iyq5q4wx7P_K2tmoAM8,3266
|
|
255
255
|
returnn/util/watch_memory.py,sha256=BR5P2kvBN6UI81cE0_1WAA6Hd1SByLbBaiDxvLhPOew,4213
|
|
256
|
-
returnn-1.
|
|
257
|
-
returnn-1.
|
|
258
|
-
returnn-1.
|
|
259
|
-
returnn-1.
|
|
260
|
-
returnn-1.
|
|
256
|
+
returnn-1.20250418.115249.dist-info/LICENSE,sha256=ywBD_U2aD4vpuoIgNAsjIGBYydl0tVKll3De0Z8s77c,11041
|
|
257
|
+
returnn-1.20250418.115249.dist-info/METADATA,sha256=vWxYPYgsvU3GDUCQEiHY5h2C09lxsFvZ-GUR6jfYUxM,5215
|
|
258
|
+
returnn-1.20250418.115249.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
|
259
|
+
returnn-1.20250418.115249.dist-info/top_level.txt,sha256=Lsn4WZc5Pbfk0-xDQOgnFCxOoqxL4CyeM3N1TFbJncw,8
|
|
260
|
+
returnn-1.20250418.115249.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|