returnn 1.20250416.134058__py3-none-any.whl → 1.20250417.191918__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of returnn might be problematic. Click here for more details.

returnn/PKG-INFO CHANGED
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: returnn
3
- Version: 1.20250416.134058
3
+ Version: 1.20250417.191918
4
4
  Summary: The RWTH extensible training framework for universal recurrent neural networks
5
5
  Home-page: https://github.com/rwth-i6/returnn/
6
6
  Author: Albert Zeyer
@@ -1,2 +1,2 @@
1
- version = '1.20250416.134058'
2
- long_version = '1.20250416.134058+git.ead6299'
1
+ version = '1.20250417.191918'
2
+ long_version = '1.20250417.191918+git.4ce0802'
returnn/datasets/hdf.py CHANGED
@@ -1201,6 +1201,7 @@ class SimpleHDFWriter:
1201
1201
  shape = [None] * ndim # type: typing.List[typing.Optional[int]]
1202
1202
  if ndim >= 2:
1203
1203
  shape[-1] = dim
1204
+ assert all(shape[1:]), f"{self} extra {data_key!r} supports only dyn dim in first axis, got shape {shape!r}"
1204
1205
  if dtype == "string":
1205
1206
  # noinspection PyUnresolvedReferences
1206
1207
  dtype = h5py.special_dtype(vlen=str)
@@ -1237,10 +1238,15 @@ class SimpleHDFWriter:
1237
1238
  self._datasets[name] = self._file.create_dataset(
1238
1239
  name, raw_data.shape, raw_data.dtype, maxshape=tuple(None for _ in raw_data.shape)
1239
1240
  )
1241
+ expected_shape = (raw_data.shape[0],) + self._datasets[name].shape[1:]
1240
1242
  else:
1241
1243
  old_shape = self._datasets[name].shape
1242
1244
  self._datasets[name].resize(old_shape[0] + raw_data.shape[0], axis=0)
1245
+ expected_shape = (raw_data.shape[0],) + old_shape[1:]
1243
1246
  # append raw data to dataset
1247
+ assert (
1248
+ expected_shape == raw_data.shape
1249
+ ), f"{self} insert: shape mismatch: expected {expected_shape}, got {raw_data.shape}"
1244
1250
  self._datasets[name][self._file.attrs["numTimesteps"] :] = raw_data
1245
1251
  self._file.attrs["numTimesteps"] += raw_data.shape[0]
1246
1252
  self._file.attrs["numSeqs"] += 1
@@ -1286,13 +1292,17 @@ class SimpleHDFWriter:
1286
1292
  self._seq_lengths[seq_idx, data_key_idx_0 + 1] = self._extra_num_time_steps[data_key_]
1287
1293
 
1288
1294
  self._extra_num_time_steps[data_key] += raw_data.shape[0]
1289
- self._datasets[data_key].resize(self._extra_num_time_steps[data_key], axis=0)
1295
+ hdf_data = self._datasets[data_key]
1296
+ hdf_data.resize(self._extra_num_time_steps[data_key], axis=0)
1290
1297
 
1291
1298
  data_key_idx = sorted(self._prepared_extra).index(data_key) + 1
1292
1299
  self._seq_lengths[seq_idx, data_key_idx] = raw_data.shape[0]
1293
1300
 
1294
1301
  offset = self._extra_num_time_steps[data_key] - raw_data.shape[0]
1295
- hdf_data = self._datasets[data_key]
1302
+ expected_shape = (raw_data.shape[0],) + hdf_data.shape[1:]
1303
+ assert (
1304
+ expected_shape == raw_data.shape
1305
+ ), f"{self} insert other {data_key!r}: shape mismatch: expected {expected_shape}, got {raw_data.shape}"
1296
1306
  hdf_data[offset:] = raw_data
1297
1307
 
1298
1308
  def insert_batch(self, inputs, seq_len, seq_tag, extra=None):
@@ -67,7 +67,9 @@ def specaugment(
67
67
  )
68
68
  return x_masked
69
69
 
70
- return rf.cond(rf.get_run_ctx().train_flag | (not only_on_train), _mask_branch, lambda: x)
70
+ return rf.cond(
71
+ rf.get_run_ctx().is_train_flag_enabled(func=specaugment) | (not only_on_train), _mask_branch, lambda: x
72
+ )
71
73
 
72
74
 
73
75
  def random_mask(
@@ -60,7 +60,7 @@ def dropout(
60
60
  return _dropout(source, keep_prob, noise_dims=noise_dims)
61
61
 
62
62
  return rf.cond(
63
- pred=rf.get_run_ctx().train_flag,
63
+ pred=rf.get_run_ctx().is_train_flag_enabled(func=dropout),
64
64
  true_fn=lambda: _dropout(source, keep_prob, noise_dims=noise_dims),
65
65
  false_fn=lambda: source,
66
66
  )
@@ -226,8 +226,9 @@ class BatchNorm(rf.Module):
226
226
 
227
227
  if use_mask:
228
228
  # Generic implementation which supports masking.
229
- use_current_batch_stats = self.running_mean is None or rf.get_run_ctx().train_flag
230
- update_running_stats = self.running_mean is not None and rf.get_run_ctx().train_flag
229
+ train_flag = rf.get_run_ctx().is_train_flag_enabled(func=BatchNorm.__call__)
230
+ use_current_batch_stats = self.running_mean is None or train_flag
231
+ update_running_stats = self.running_mean is not None and train_flag
231
232
  need_current_batch_stats = rf.opt_logical_or(use_current_batch_stats, update_running_stats)
232
233
 
233
234
  mean_cur_batch, variance_cur_batch = rf.cond(
@@ -48,7 +48,7 @@ class WeightDropout:
48
48
  # on_forward=True because we already checked for train_flag
49
49
  return rf.dropout(param, drop_prob=self.drop_prob, on_forward=True)
50
50
 
51
- return rf.cond(rf.get_run_ctx().train_flag, _on_train, lambda: param)
51
+ return rf.cond(rf.get_run_ctx().is_train_flag_enabled(func=WeightDropout.__call__), _on_train, lambda: param)
52
52
 
53
53
 
54
54
  def weight_noise(module: rf.Module, param_name: str, *, std: float) -> rf.Module:
@@ -84,4 +84,4 @@ class WeightNoise:
84
84
  noise = rf.random_normal(param.dims, dtype=param.dtype, stddev=self.std)
85
85
  return param + noise
86
86
 
87
- return rf.cond(rf.get_run_ctx().train_flag, _on_train, lambda: param)
87
+ return rf.cond(rf.get_run_ctx().is_train_flag_enabled(func=WeightNoise.__call__), _on_train, lambda: param)
returnn/frontend/rec.py CHANGED
@@ -218,7 +218,7 @@ def _zoneout(*, prev: Tensor, cur: Tensor, factor: float, out_dim: Dim, dropout_
218
218
  if factor == 0.0:
219
219
  return cur
220
220
  return rf.cond(
221
- rf.get_run_ctx().train_flag,
221
+ rf.get_run_ctx().is_train_flag_enabled(func=ZoneoutLSTM.__call__),
222
222
  lambda: (1 - factor) * rf.dropout(cur - prev, factor, axis=dropout_broadcast and out_dim) + prev,
223
223
  lambda: (1 - factor) * cur + factor * prev,
224
224
  )
@@ -251,7 +251,11 @@ class RunningMean(rf.Module):
251
251
  x_ = rf.reduce_mean(x, axis=[d for d in x.dims if d not in self.shape])
252
252
  self.mean.assign_add(self.alpha * (x_ - self.mean))
253
253
 
254
- rf.cond((not self.update_only_in_train) or rf.get_run_ctx().train_flag, _update_running_stats, lambda: None)
254
+ rf.cond(
255
+ (not self.update_only_in_train) or rf.get_run_ctx().is_train_flag_enabled(func=RunningMean.__call__),
256
+ _update_running_stats,
257
+ lambda: None,
258
+ )
255
259
  return self.mean
256
260
 
257
261
 
@@ -7,7 +7,8 @@ or forwarding loop.
7
7
  """
8
8
 
9
9
  from __future__ import annotations
10
- from typing import Optional, Union, Any, Sequence, Dict
10
+ from typing import Optional, Union, Any, Callable, Sequence, Dict, List
11
+ from types import FunctionType
11
12
  from dataclasses import dataclass
12
13
  from contextlib import contextmanager
13
14
  from returnn.tensor import Tensor, Dim, TensorDict, batch_dim
@@ -101,7 +102,7 @@ class RunCtx:
101
102
  - "forward_step", for mark_as_output
102
103
  """
103
104
  self._stage = stage
104
- self._train_flag = train_flag
105
+ self._train_flags_stack: List[Dict[Optional[FunctionType], Union[Tensor, bool]]] = [{None: train_flag}]
105
106
  self._step = step
106
107
  self._epoch = epoch
107
108
  self.losses = {} # type: Dict[str, Loss]
@@ -121,14 +122,17 @@ class RunCtx:
121
122
  @property
122
123
  def train_flag(self) -> Union[bool, Tensor]:
123
124
  """
124
- :return: whether we are in training mode, i.e. the model is updated,
125
- and we are supposed to use dropout and similar mechanisms.
126
- In a graph-based backend, this can be dynamic.
125
+ :return: ``is_train_flag_enabled(func=None)``. See :func:`is_train_flag_enabled`.
127
126
  """
128
- return self._train_flag
127
+ return self.is_train_flag_enabled(func=None)
129
128
 
130
129
  @contextmanager
131
- def train_flag_ctx(self, train_flag: Union[bool, Tensor]):
130
+ def train_flag_ctx(
131
+ self,
132
+ train_flag: Union[bool, Tensor],
133
+ *,
134
+ func: Optional[Union[Sequence[Union[FunctionType, Callable]], FunctionType, Callable]] = None,
135
+ ):
132
136
  """
133
137
  Context manager to temporarily set the train_flag.
134
138
 
@@ -137,14 +141,51 @@ class RunCtx:
137
141
  with rf.get_run_ctx().train_flag_ctx(False):
138
142
  ...
139
143
 
140
- :param train_flag: whether we are in training mode
141
- """
142
- old_train_flag = self.train_flag
143
- self._train_flag = train_flag
144
+ :param train_flag: whether we are in training mode.
145
+ In a graph-based backend, this can be dynamic (scalar Tensor, not just bool).
146
+ :param func: if given, the train flag is only enabled/disabled for this specific function(s)
147
+ (e.g. ``rf.dropout`` or ``rf.BatchNorm.__call__``).
148
+ (See https://github.com/rwth-i6/returnn/issues/1712 for some discussion.)
149
+ (Note: We expect a Python function, not just any general Callable. But typing seems to get this wrong.)
150
+ """
151
+ old_train_flags = self._train_flags_stack[-1]
152
+ new_train_flags = old_train_flags.copy()
153
+ if func is None:
154
+ new_train_flags[None] = train_flag
155
+ elif isinstance(func, FunctionType):
156
+ new_train_flags[func] = train_flag
157
+ elif isinstance(func, (list, tuple)):
158
+ for f in func:
159
+ if not isinstance(f, FunctionType):
160
+ raise TypeError(f"Expected function, got {type(f)}")
161
+ new_train_flags[f] = train_flag
162
+ else:
163
+ raise TypeError(f"Expected function or sequence of functions, got {type(func)}")
164
+ self._train_flags_stack.append(new_train_flags)
144
165
  try:
145
166
  yield
146
167
  finally:
147
- self._train_flag = old_train_flag
168
+ last = self._train_flags_stack.pop(-1)
169
+ assert last is new_train_flags
170
+ assert len(self._train_flags_stack) >= 1
171
+
172
+ def is_train_flag_enabled(self, *, func: Optional[Union[FunctionType, Callable]]) -> Union[bool, Tensor]:
173
+ """
174
+ :param func: function for which we want to check the train flag
175
+ (e.g. ``rf.dropout`` or ``rf.BatchNorm.__call__``),
176
+ or None for the global fallback.
177
+ (See https://github.com/rwth-i6/returnn/issues/1712 for some discussion.)
178
+ :return: Whether the train flag is enabled, either for the specific function, or globally.
179
+ Training is usually when the model is updated,
180
+ and we are supposed to use dropout and similar mechanisms.
181
+ This is either for the specified function, or globally.
182
+ In a graph-based backend, this can also be dynamic (scalar Tensor, not just bool).
183
+ """
184
+ train_flags = self._train_flags_stack[-1]
185
+ if func in train_flags:
186
+ return train_flags[func]
187
+ assert isinstance(func, FunctionType)
188
+ return train_flags[None] # global fallback. this should always be defined, see __init__
148
189
 
149
190
  @property
150
191
  def step(self) -> Union[int, Tensor]:
@@ -1859,7 +1859,7 @@ class TorchBackend(Backend[torch.Tensor]):
1859
1859
  bias=beta.raw_tensor if affine else None,
1860
1860
  # training: means whether we should use the current batch statistics
1861
1861
  # + update the running statistics (if given)
1862
- training=rf.get_run_ctx().train_flag or (running_mean is None),
1862
+ training=rf.get_run_ctx().is_train_flag_enabled(func=rf.BatchNorm.__call__) or (running_mean is None),
1863
1863
  momentum=momentum,
1864
1864
  eps=epsilon,
1865
1865
  )
@@ -2236,7 +2236,7 @@ class TorchBackend(Backend[torch.Tensor]):
2236
2236
  has_biases=has_biases,
2237
2237
  num_layers=1,
2238
2238
  dropout=0.0,
2239
- train=rf.get_run_ctx().train_flag,
2239
+ train=rf.get_run_ctx().is_train_flag_enabled(func=rf.LSTM.__call__),
2240
2240
  bidirectional=False,
2241
2241
  )
2242
2242
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: returnn
3
- Version: 1.20250416.134058
3
+ Version: 1.20250417.191918
4
4
  Summary: The RWTH extensible training framework for universal recurrent neural networks
5
5
  Home-page: https://github.com/rwth-i6/returnn/
6
6
  Author: Albert Zeyer
@@ -1,9 +1,9 @@
1
- returnn/PKG-INFO,sha256=7DsJhNRMx120PgvkCadLKZw80JtB_JVeD_oBNG4z2Gk,5215
1
+ returnn/PKG-INFO,sha256=F-f1DEzHygaHesMySEkH2_u39GXt9OERAslZKyZmcmI,5215
2
2
  returnn/__init__.py,sha256=biBtRsM0WZ406vShaeH-9WFoqJ8XwTbn6g0EeFJ7l8E,1012
3
3
  returnn/__main__.py,sha256=qBFbuB1yN3adgVM5pXt2-Yq9vorjRNchNPL8kDKx44M,31752
4
4
  returnn/__old_mod_loader__.py,sha256=nvsNY-xELdS_IPNkv66Q9Rmvg4dbGW0-EBRDcCmctos,7654
5
5
  returnn/__setup__.py,sha256=22kQn2fh11iPM0hLb2Fy5sLmoU1JGvmDxXRYuRgQkwU,4659
6
- returnn/_setup_info_generated.py,sha256=DgiJfR1KjQflhw91humh9dADdAZGYBaAZ8_d4ianWiI,77
6
+ returnn/_setup_info_generated.py,sha256=bEaq0TU9FCMkk-5FMiUwVz_fxtRp8y5ZMan1RswtRTU,77
7
7
  returnn/config.py,sha256=3tmKhB6FnQZaNdtcYsiB61JnEY--iZ2qmJ4yq0b6tE0,29140
8
8
  returnn/forward_iface.py,sha256=A_OJiaXsX4MlXQRzST86ylyxSUZbC402PQL1REcqHjM,911
9
9
  returnn/learning_rate_control.py,sha256=ZvWryAn_tv9DhV8sh1LV3eE34Yltl3On3mYZAG4hR9s,34684
@@ -19,7 +19,7 @@ returnn/datasets/cached.py,sha256=DIRdWrxBmsZG8O_9eVxBO5mcdo4f5KU-Xb-4wVz59Io,25
19
19
  returnn/datasets/cached2.py,sha256=_6pza3IG68JexaExhj1ld3fP6pE7T-G804driJ9Z_qo,12141
20
20
  returnn/datasets/distrib_files.py,sha256=_UlcrnaU1rA9v6D3H3X4dPhcA--09fNeVnWs9VNo0yg,27656
21
21
  returnn/datasets/generating.py,sha256=O1fs9dhk1Um2E3ZeOTfDHS5FlwvqFImfGcMlJP-xAQM,99814
22
- returnn/datasets/hdf.py,sha256=shif0aQqWWNJ0b6YnycpPjIVNsxjLrA41Y66-_SluGI,66993
22
+ returnn/datasets/hdf.py,sha256=yqzr-nzqlt02QZoW2uFowKT19gd5e-9mJpHCKSQxW8o,67643
23
23
  returnn/datasets/lm.py,sha256=5hSdBgmgTP0IzO2p-JjiWtny0Zb0M20goXtjlw4JVR4,99206
24
24
  returnn/datasets/map.py,sha256=kOBJVZmwDhLsOplzDNByIfa0NRSUaMo2Lsy36lBvxrM,10907
25
25
  returnn/datasets/meta.py,sha256=0wQzRzjShLSYNFoGo_MdR5IT8arxHr9gFjUlEqb2rbY,94969
@@ -91,7 +91,7 @@ returnn/frontend/control_flow_ctx.py,sha256=v17CsNwRnZYe8GdMtGJt2ftibfxMCGK1i0l-
91
91
  returnn/frontend/conv.py,sha256=Q0q90-uu9d6qV-v8_DlFGxpZtc6FjfXVpfkkXmv1Alk,31959
92
92
  returnn/frontend/device.py,sha256=K7Y1qoQcO4GIHgLkPLQWK-GVT8gKL8GwyQrmPo8LgBE,1438
93
93
  returnn/frontend/dims.py,sha256=aH5FQ_m0xMD6Rj-BUWGx8lB-HkCuwZfMBf6mZbGGW5E,12611
94
- returnn/frontend/dropout.py,sha256=rsx3p5b0NblBfXXSQZTQFJ8jUUS3fj4Qzc39iffBMCA,5006
94
+ returnn/frontend/dropout.py,sha256=bH0keqKcBzkC_SPlQoir9HPxTtgoVCp61YD5ZvEOuA4,5031
95
95
  returnn/frontend/dtype.py,sha256=Ooc5BrcNrTp6XShuFEV9g5V6-niuy4ImP_Lt_Qgq3jE,1886
96
96
  returnn/frontend/gradient.py,sha256=G-Qv4gKGHYEeB92Zwco9ao4qjd6umZPUzQC4J-fbYWo,4033
97
97
  returnn/frontend/graph.py,sha256=PIv901WZ1rfTV0QGkyzBv6UxfWk9NsLGxdoJ5x9-8Xg,1818
@@ -105,15 +105,15 @@ returnn/frontend/math_.py,sha256=KlJxdIib8ENlid7cc4lcwHv5e21tzTjTEV8VgEDAijo,169
105
105
  returnn/frontend/matmul.py,sha256=3QaGiZtSs9PriT40T7Vc3KnYKPgYSN4tCZytYeq9qMA,1945
106
106
  returnn/frontend/module.py,sha256=219rh5mE0CD0-NdxXLsKyhv3BNtOI9jSyiI1Rb8MOyU,10700
107
107
  returnn/frontend/nested.py,sha256=Hm4GT5ZI1OyWpYxv_SP5jlBztJsjGVMgtvKJnvQYa00,15068
108
- returnn/frontend/normalization.py,sha256=QIjXYg0C8BD2g_1lAkVO4Cara729uHC_bsQh99VsWeI,14061
108
+ returnn/frontend/normalization.py,sha256=-lYJ9IWcheOQu1gXJehSOA76qgVtxd1C07Jqps6Qg1o,14116
109
109
  returnn/frontend/parameter.py,sha256=w6SN-uv87OyeWBt90_3UBbK0h6sftSOCxkqXPg76caY,10375
110
- returnn/frontend/parametrizations.py,sha256=hVbOlgm1pQAmZnAnNxq8Tk23rykr_iy3-6R1H6CwlMA,2798
110
+ returnn/frontend/parametrizations.py,sha256=ptNgBw5IiPXVpB3QGse7AGAhdXp8X1rCqYUl2Mae8aI,2876
111
111
  returnn/frontend/parametrize.py,sha256=VhgTEP7ehON950Q4bkCy8rvg9641moEKAXn0XzomK6E,7216
112
112
  returnn/frontend/piecewise_linear.py,sha256=TdL6wzop8P1dcIZwkEbJFvSUZSI1cbhS3XKzlWQkEVI,1964
113
113
  returnn/frontend/rand.py,sha256=Levgf5VtOOBKDSgz0869Jf3VW4BWxYZuRXsa_fOxNI4,12969
114
- returnn/frontend/rec.py,sha256=la-VXR_hzvwNzpAgn4Okl-yDx3F4gOW-81EKm-jAAlg,7999
115
- returnn/frontend/reduce.py,sha256=-Zt-OH6Zbtb9uR6YEzurCyrowH-anIXvuga6Pla2V70,10220
116
- returnn/frontend/run_ctx.py,sha256=ItcZwuFItkZjYWrg715L1Za2Xg7__MQCrRCAwBeTUxA,21411
114
+ returnn/frontend/rec.py,sha256=6YSsSG7fdtfvvg24vmexSg8R2aVCcKHBdGLh-Mgn9Co,8037
115
+ returnn/frontend/reduce.py,sha256=xvxN_h3LsMJdmT0IbW4nOf8qFhckuAniIhD9PalO6j0,10305
116
+ returnn/frontend/run_ctx.py,sha256=WxKmBpkb2b34AQbnQ9YIRYvNoy3Fa0yJIezNCWLpeWU,23785
117
117
  returnn/frontend/signal.py,sha256=XgOBL1iy-cJgulePH5HRPAwp2cScy60q4RItr7xzvGc,4412
118
118
  returnn/frontend/state.py,sha256=EePdrx6PtWL4mJ2XZmGlh5dl4nq6G9wZpqP4hdDEzfY,2935
119
119
  returnn/frontend/stepwise_scheduler.py,sha256=fMOTR7npGCDXrXDmSQ4VwmudoHEbY3Yr-QGyjFdQJSc,927
@@ -129,7 +129,7 @@ returnn/frontend/_native/tensor_ops.cpp,sha256=bYtwwn_NeJfAEHWYPEJlkoLDKt9baZ3RA
129
129
  returnn/frontend/_native/tensor_ops.hpp,sha256=dDqvUejRNHjItnmOP5aHyAQbAmXmXoDVXSe3tveEU8A,3732
130
130
  returnn/frontend/audio/__init__.py,sha256=8mahwucBje8qHKw0bOvoySlvvD0rFKxviSvcAHSjiJY,67
131
131
  returnn/frontend/audio/mel.py,sha256=VZdxf2mTLzLOXsLRzCvaad712Zf0c2iwdthrzeVfgxk,7885
132
- returnn/frontend/audio/specaugment.py,sha256=nw8PepKPPwmI13-QyGBm45QjnoY4I_FEjA-_X6KIwzM,5798
132
+ returnn/frontend/audio/specaugment.py,sha256=w7YPEJ6zhCaG5AAaDd-HxsKwa_2vA7wFqHrEjxiUVPI,5841
133
133
  returnn/frontend/conversions/__init__.py,sha256=7plsDxWVYhASa-3qmqbdzSI34A9ujUH2iMkL3eRD0TI,84
134
134
  returnn/frontend/conversions/espnet_e_branchformer.py,sha256=Mmp3G6nySy0CqeHa-um-RAuUSnFH1DKNjBbqQB_Pomo,9018
135
135
  returnn/frontend/conversions/hf_llama.py,sha256=1WQOhQyUWwkAznaRqK2zpThP8XZbaomkaE8qMG_bZPY,9662
@@ -216,7 +216,7 @@ returnn/torch/data/queued_data_iter.py,sha256=PoOsGHdHVZjTmcyfq_ZOw--P6hyfTdmAWI
216
216
  returnn/torch/data/returnn_dataset_wrapper.py,sha256=2CaDapzrlqahANuq-nyVAtv5ENHuM8A7okORwYJDisg,8006
217
217
  returnn/torch/data/tensor_utils.py,sha256=-Teqi--LLbt6q_5mDRdoHZHmPgSdC83W706ukif_YiU,1284
218
218
  returnn/torch/frontend/__init__.py,sha256=AA48HZnC17ASuKA0EWy8loZ-Bib_yUtqF4T1wYvjst4,62
219
- returnn/torch/frontend/_backend.py,sha256=SKxxpIM0rXEcZ92p-Um5thfC7vmDoZmda13SMAXVYL0,101771
219
+ returnn/torch/frontend/_backend.py,sha256=xX3xGe-TtGy_VnTyQyhDD3lfZ55gZajgxE9khOyQ4HE,101844
220
220
  returnn/torch/frontend/_rand.py,sha256=1JgIkV2XmpgJD86zXZ-NCAe-QuoP2swr6NaS1oz3Qa8,1830
221
221
  returnn/torch/frontend/bridge.py,sha256=Z2_UW8AagezC7zsXDc5PKcd8G9WwisV7j9SWGHU0m4U,7840
222
222
  returnn/torch/frontend/raw_ops.py,sha256=lF0h-KtYYsdaaqQADylVZp9qzPskOOXA4MfmYDyx5IU,296
@@ -253,8 +253,8 @@ returnn/util/sig_proc.py,sha256=Tjz0VOAVyqu2qDCF5HZ1JjALjcFsHcNkcd96WgZeKfE,7265
253
253
  returnn/util/task_system.py,sha256=y4sMVXQ25Qd2z0rx03uOlXlkE-jbCYC1Sjfn-XlraVU,26003
254
254
  returnn/util/train_proc_manager.py,sha256=Pjht28k6uz6BNQ47uW6Gf880iyq5q4wx7P_K2tmoAM8,3266
255
255
  returnn/util/watch_memory.py,sha256=BR5P2kvBN6UI81cE0_1WAA6Hd1SByLbBaiDxvLhPOew,4213
256
- returnn-1.20250416.134058.dist-info/LICENSE,sha256=ywBD_U2aD4vpuoIgNAsjIGBYydl0tVKll3De0Z8s77c,11041
257
- returnn-1.20250416.134058.dist-info/METADATA,sha256=7DsJhNRMx120PgvkCadLKZw80JtB_JVeD_oBNG4z2Gk,5215
258
- returnn-1.20250416.134058.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
259
- returnn-1.20250416.134058.dist-info/top_level.txt,sha256=Lsn4WZc5Pbfk0-xDQOgnFCxOoqxL4CyeM3N1TFbJncw,8
260
- returnn-1.20250416.134058.dist-info/RECORD,,
256
+ returnn-1.20250417.191918.dist-info/LICENSE,sha256=ywBD_U2aD4vpuoIgNAsjIGBYydl0tVKll3De0Z8s77c,11041
257
+ returnn-1.20250417.191918.dist-info/METADATA,sha256=F-f1DEzHygaHesMySEkH2_u39GXt9OERAslZKyZmcmI,5215
258
+ returnn-1.20250417.191918.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
259
+ returnn-1.20250417.191918.dist-info/top_level.txt,sha256=Lsn4WZc5Pbfk0-xDQOgnFCxOoqxL4CyeM3N1TFbJncw,8
260
+ returnn-1.20250417.191918.dist-info/RECORD,,