returnn 1.20251027.224345__py3-none-any.whl → 1.20260109.93428__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of returnn might be problematic. Click here for more details.
- returnn/PKG-INFO +2 -2
- returnn/_setup_info_generated.py +2 -2
- returnn/config.py +1 -1
- returnn/datasets/lm.py +20 -0
- returnn/datasets/meta.py +93 -43
- returnn/datasets/postprocessing.py +597 -108
- returnn/datasets/util/vocabulary.py +90 -0
- returnn/frontend/array_.py +46 -0
- returnn/frontend/attention.py +54 -20
- returnn/frontend/conv.py +273 -54
- returnn/frontend/device.py +14 -1
- returnn/frontend/encoder/conformer.py +20 -0
- returnn/frontend/encoder/transformer.py +2 -0
- returnn/frontend/loss.py +40 -1
- returnn/frontend/math_.py +54 -14
- returnn/native_op.cpp +80 -0
- returnn/sprint/cache.py +12 -13
- returnn/tensor/utils.py +7 -4
- returnn/tf/frontend_layers/_backend.py +4 -3
- returnn/tf/layers/basic.py +15 -39
- returnn/tf/native_op.py +11 -58
- returnn/tf/network.py +1 -1
- returnn/tf/util/basic.py +19 -0
- returnn/torch/engine.py +37 -3
- returnn/torch/frontend/_backend.py +135 -13
- returnn/torch/frontend/bridge.py +61 -0
- returnn/torch/util/exception_helper.py +7 -1
- returnn/util/basic.py +3 -6
- returnn/util/better_exchook.py +4 -0
- returnn/util/debug.py +11 -2
- returnn/util/file_cache.py +15 -1
- returnn/util/task_system.py +1 -1
- {returnn-1.20251027.224345.dist-info → returnn-1.20260109.93428.dist-info}/METADATA +2 -2
- {returnn-1.20251027.224345.dist-info → returnn-1.20260109.93428.dist-info}/RECORD +37 -37
- {returnn-1.20251027.224345.dist-info → returnn-1.20260109.93428.dist-info}/LICENSE +0 -0
- {returnn-1.20251027.224345.dist-info → returnn-1.20260109.93428.dist-info}/WHEEL +0 -0
- {returnn-1.20251027.224345.dist-info → returnn-1.20260109.93428.dist-info}/top_level.txt +0 -0
|
@@ -1166,20 +1166,29 @@ class TorchBackend(Backend[torch.Tensor]):
|
|
|
1166
1166
|
if start is None:
|
|
1167
1167
|
start = 0
|
|
1168
1168
|
if isinstance(size, Dim):
|
|
1169
|
+
assert end is None
|
|
1169
1170
|
size = size.get_dim_value()
|
|
1170
1171
|
elif isinstance(size, Tensor):
|
|
1172
|
+
assert end is None
|
|
1171
1173
|
assert size.dims == () # scalar
|
|
1172
1174
|
size = size.raw_tensor
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
else:
|
|
1175
|
+
elif isinstance(size, int):
|
|
1176
|
+
pass
|
|
1177
|
+
elif size is None:
|
|
1177
1178
|
if isinstance(end, Tensor):
|
|
1178
1179
|
assert end.dims == ()
|
|
1179
1180
|
end = end.raw_tensor
|
|
1180
|
-
|
|
1181
|
+
elif isinstance(end, int):
|
|
1182
|
+
if end < 0:
|
|
1183
|
+
end += axis.get_dim_value()
|
|
1184
|
+
elif end is None:
|
|
1181
1185
|
end = axis.get_dim_value()
|
|
1182
|
-
|
|
1186
|
+
else:
|
|
1187
|
+
raise TypeError(f"slice: unsupported type for end: {type(end)}")
|
|
1188
|
+
size = end - start
|
|
1189
|
+
else:
|
|
1190
|
+
raise TypeError(f"slice: unsupported type for size: {type(size)}")
|
|
1191
|
+
out.raw_tensor = torch.narrow(source.raw_tensor, dim=axis_int, start=start, length=size)
|
|
1183
1192
|
return out
|
|
1184
1193
|
|
|
1185
1194
|
@staticmethod
|
|
@@ -1352,12 +1361,24 @@ class TorchBackend(Backend[torch.Tensor]):
|
|
|
1352
1361
|
a_dims = a.dims
|
|
1353
1362
|
b_dims = b.dims
|
|
1354
1363
|
|
|
1355
|
-
|
|
1356
|
-
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1364
|
+
if not all(dim in a_dims for dim in reduce) or not all(dim in b_dims for dim in reduce):
|
|
1365
|
+
# revert to the generic einsum implementation
|
|
1366
|
+
assert all(dim in a_dims + b_dims for dim in reduce), "Some reduce Dims not in a or b."
|
|
1367
|
+
result_dims = [dim for dim in a_dims if dim not in reduce] + [
|
|
1368
|
+
dim for dim in b_dims if dim not in reduce and dim not in a_dims
|
|
1369
|
+
]
|
|
1370
|
+
map_to_letter = {}
|
|
1371
|
+
for dim in a_dims + b_dims:
|
|
1372
|
+
if dim not in map_to_letter:
|
|
1373
|
+
map_to_letter[dim] = chr(97 + len(map_to_letter)) # 'a', 'b', 'c', ...
|
|
1374
|
+
a_subscript = "".join(map_to_letter[dim] for dim in a_dims)
|
|
1375
|
+
b_subscript = "".join(map_to_letter[dim] for dim in b_dims)
|
|
1376
|
+
out_subscript = "".join(map_to_letter[dim] for dim in result_dims)
|
|
1377
|
+
raw_result = torch.einsum(f"{a_subscript},{b_subscript}->{out_subscript}", a.raw_tensor, b.raw_tensor)
|
|
1378
|
+
result_tensor = Tensor(
|
|
1379
|
+
"einsum", dims=result_dims, raw_tensor=raw_result, dtype=TorchBackend.get_dtype_name_raw(raw_result)
|
|
1380
|
+
)
|
|
1381
|
+
return result_tensor
|
|
1361
1382
|
|
|
1362
1383
|
if len(reduce) > 1:
|
|
1363
1384
|
reduce = list(reduce)
|
|
@@ -1767,6 +1788,9 @@ class TorchBackend(Backend[torch.Tensor]):
|
|
|
1767
1788
|
remaining_dims = [d for d in tensor.dims if d not in mask.dims]
|
|
1768
1789
|
tensor_templ_dims = tuple(dims) + tuple(remaining_dims)
|
|
1769
1790
|
in_raw = tensor.copy_compatible_to_dims_raw(tensor_templ_dims)
|
|
1791
|
+
if any(in_raw.shape[i] == 1 < d.get_dim_value() for i, d in enumerate(dims)):
|
|
1792
|
+
# unbroadcast
|
|
1793
|
+
in_raw = in_raw.expand([d.get_dim_value() for d in tensor_templ_dims])
|
|
1770
1794
|
if mask.raw_tensor.device.type == "meta":
|
|
1771
1795
|
# This is not supported, but also, we would anyway not know the out shape.
|
|
1772
1796
|
# However, instead of erroring, just assume some dummy mask.
|
|
@@ -1920,7 +1944,7 @@ class TorchBackend(Backend[torch.Tensor]):
|
|
|
1920
1944
|
if not out_spatial_dims:
|
|
1921
1945
|
out_spatial_dims = rf.make_conv_out_spatial_dims(
|
|
1922
1946
|
in_spatial_dims=in_spatial_dims,
|
|
1923
|
-
filter_size=
|
|
1947
|
+
filter_size=filter_size,
|
|
1924
1948
|
strides=strides or 1,
|
|
1925
1949
|
dilation_rate=dilation_rate or 1,
|
|
1926
1950
|
padding=padding,
|
|
@@ -2033,6 +2057,104 @@ class TorchBackend(Backend[torch.Tensor]):
|
|
|
2033
2057
|
out.feature_dim = out_dim
|
|
2034
2058
|
return out, out_spatial_dims
|
|
2035
2059
|
|
|
2060
|
+
# noinspection PyShadowingBuiltins
|
|
2061
|
+
@staticmethod
|
|
2062
|
+
def transposed_conv(
|
|
2063
|
+
source: Tensor,
|
|
2064
|
+
*,
|
|
2065
|
+
in_dim: Dim,
|
|
2066
|
+
out_dim: Dim,
|
|
2067
|
+
in_spatial_dims: Sequence[Dim],
|
|
2068
|
+
out_spatial_dims: Optional[Sequence[Dim]] = None,
|
|
2069
|
+
filter: Tensor,
|
|
2070
|
+
filter_size: Sequence[Dim],
|
|
2071
|
+
padding: str,
|
|
2072
|
+
remove_padding: Union[Sequence[int], int] = 0,
|
|
2073
|
+
output_padding: Optional[Union[Sequence[Optional[int]], int]] = None,
|
|
2074
|
+
strides: Optional[Sequence[int]] = None,
|
|
2075
|
+
bias: Optional[Tensor] = None,
|
|
2076
|
+
) -> Tuple[Tensor, Sequence[Dim]]:
|
|
2077
|
+
"""transposed convolution"""
|
|
2078
|
+
if not out_spatial_dims:
|
|
2079
|
+
out_spatial_dims = rf.make_transposed_conv_out_spatial_dims(
|
|
2080
|
+
in_spatial_dims=in_spatial_dims,
|
|
2081
|
+
filter_size=filter_size,
|
|
2082
|
+
strides=strides,
|
|
2083
|
+
padding=padding,
|
|
2084
|
+
output_padding=output_padding,
|
|
2085
|
+
)
|
|
2086
|
+
assert remove_padding == 0 # not implemented yet otherwise...
|
|
2087
|
+
if strides is None:
|
|
2088
|
+
strides = [fs.dimension for fs in filter_size]
|
|
2089
|
+
filter_dims = (in_dim, out_dim) + tuple(filter_size)
|
|
2090
|
+
filter = filter.copy_transpose(filter_dims)
|
|
2091
|
+
batch_dims = [d for d in source.dims if d not in (in_dim,) + tuple(in_spatial_dims)]
|
|
2092
|
+
# Torch conv expects (N,C,<spatial dims>) as shape.
|
|
2093
|
+
source = source.copy_transpose(batch_dims + [in_dim] + list(in_spatial_dims))
|
|
2094
|
+
if len(batch_dims) == 1:
|
|
2095
|
+
src_raw = source.raw_tensor
|
|
2096
|
+
else:
|
|
2097
|
+
src_raw = torch.reshape(
|
|
2098
|
+
source.raw_tensor,
|
|
2099
|
+
# potentially merge batch dims all together
|
|
2100
|
+
[-1, in_dim.get_dim_value()] + [d.get_dim_value() for d in in_spatial_dims],
|
|
2101
|
+
)
|
|
2102
|
+
if padding == "same":
|
|
2103
|
+
raise NotImplementedError("transposed_conv with padding='same' not implemented")
|
|
2104
|
+
if padding == "valid":
|
|
2105
|
+
padding_val = 0
|
|
2106
|
+
else:
|
|
2107
|
+
raise ValueError(f"invalid padding {padding!r}, expected 'same' or 'valid'")
|
|
2108
|
+
if len(filter_size) == 1:
|
|
2109
|
+
out_raw = torch.nn.functional.conv_transpose1d(
|
|
2110
|
+
src_raw,
|
|
2111
|
+
weight=filter.raw_tensor,
|
|
2112
|
+
bias=bias.raw_tensor if bias is not None else None,
|
|
2113
|
+
stride=strides,
|
|
2114
|
+
padding=padding_val,
|
|
2115
|
+
output_padding=output_padding or 0,
|
|
2116
|
+
)
|
|
2117
|
+
elif len(filter_size) == 2:
|
|
2118
|
+
out_raw = torch.nn.functional.conv_transpose2d(
|
|
2119
|
+
src_raw,
|
|
2120
|
+
weight=filter.raw_tensor,
|
|
2121
|
+
bias=bias.raw_tensor if bias is not None else None,
|
|
2122
|
+
stride=strides,
|
|
2123
|
+
padding=padding_val,
|
|
2124
|
+
output_padding=output_padding or 0,
|
|
2125
|
+
)
|
|
2126
|
+
elif len(filter_size) == 3:
|
|
2127
|
+
out_raw = torch.nn.functional.conv_transpose3d(
|
|
2128
|
+
src_raw,
|
|
2129
|
+
weight=filter.raw_tensor,
|
|
2130
|
+
bias=bias.raw_tensor if bias is not None else None,
|
|
2131
|
+
stride=strides,
|
|
2132
|
+
padding=padding_val,
|
|
2133
|
+
output_padding=output_padding or 0,
|
|
2134
|
+
)
|
|
2135
|
+
else:
|
|
2136
|
+
raise ValueError(f"invalid number of filter dims {filter_size}, expected 1, 2, or 3")
|
|
2137
|
+
if remove_padding:
|
|
2138
|
+
if isinstance(remove_padding, int):
|
|
2139
|
+
remove_padding = [remove_padding] * len(out_spatial_dims)
|
|
2140
|
+
assert len(remove_padding) == len(out_spatial_dims)
|
|
2141
|
+
slices = [slice(None)] * out_raw.ndim
|
|
2142
|
+
for i, pad in enumerate(remove_padding):
|
|
2143
|
+
if pad > 0:
|
|
2144
|
+
slices[2 + i] = slice(0, -pad)
|
|
2145
|
+
out_raw = out_raw[tuple(slices)]
|
|
2146
|
+
out = Tensor(
|
|
2147
|
+
"transposed_conv",
|
|
2148
|
+
dims=batch_dims + [out_dim] + list(out_spatial_dims),
|
|
2149
|
+
dtype=TorchBackend.get_dtype_name_raw(out_raw),
|
|
2150
|
+
)
|
|
2151
|
+
if len(batch_dims) == 1:
|
|
2152
|
+
out.raw_tensor = out_raw
|
|
2153
|
+
else:
|
|
2154
|
+
out.raw_tensor = torch.reshape(out_raw, [d.get_dim_value() for d in out.dims])
|
|
2155
|
+
out.feature_dim = out_dim
|
|
2156
|
+
return out, out_spatial_dims
|
|
2157
|
+
|
|
2036
2158
|
@staticmethod
|
|
2037
2159
|
def pool(
|
|
2038
2160
|
source: Tensor,
|
returnn/torch/frontend/bridge.py
CHANGED
|
@@ -136,6 +136,15 @@ class RFModuleAsPTModule(torch.nn.Module):
|
|
|
136
136
|
def _get_name(self):
|
|
137
137
|
return self._rf_module.__class__.__name__ + "[RF→PT]"
|
|
138
138
|
|
|
139
|
+
def __repr__(self) -> str:
|
|
140
|
+
"""
|
|
141
|
+
Return a custom repr for Sequential/ModuleList that compresses repeated module representations if possible,
|
|
142
|
+
otherwise fallback to default behavior.
|
|
143
|
+
"""
|
|
144
|
+
if _can_use_compact_repr(self):
|
|
145
|
+
return _repr_compact(self)
|
|
146
|
+
return super().__repr__()
|
|
147
|
+
|
|
139
148
|
@property
|
|
140
149
|
def rf_module(self) -> rf.Module:
|
|
141
150
|
"""RF module"""
|
|
@@ -193,3 +202,55 @@ class RFModuleAsPTModule(torch.nn.Module):
|
|
|
193
202
|
# See similar logic in torch.nn.Module._apply.
|
|
194
203
|
pt_param = torch.nn.Parameter(tensor, tensor.requires_grad)
|
|
195
204
|
rf_param.raw_tensor = pt_param
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def _can_use_compact_repr(self: RFModuleAsPTModule) -> bool:
|
|
208
|
+
return list(self._modules.keys()) == [str(i) for i in range(len(self._modules))]
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def _repr_compact(self: RFModuleAsPTModule) -> str:
|
|
212
|
+
"""
|
|
213
|
+
Return a custom repr for Sequential/ModuleList that compresses repeated module representations.
|
|
214
|
+
Code copied and adapted from torch.nn.ModuleList.__repr__.
|
|
215
|
+
"""
|
|
216
|
+
list_of_reprs = [repr(item) for item in self._modules.values()]
|
|
217
|
+
if len(list_of_reprs) == 0:
|
|
218
|
+
return self._get_name() + "()"
|
|
219
|
+
|
|
220
|
+
start_end_indices = [[0, 0]]
|
|
221
|
+
repeated_blocks = [list_of_reprs[0]]
|
|
222
|
+
for i, r in enumerate(list_of_reprs[1:], 1):
|
|
223
|
+
if r == repeated_blocks[-1]:
|
|
224
|
+
start_end_indices[-1][1] += 1
|
|
225
|
+
continue
|
|
226
|
+
|
|
227
|
+
start_end_indices.append([i, i])
|
|
228
|
+
repeated_blocks.append(r)
|
|
229
|
+
|
|
230
|
+
lines = []
|
|
231
|
+
main_str = self._get_name() + "("
|
|
232
|
+
for (start_id, end_id), b in zip(start_end_indices, repeated_blocks):
|
|
233
|
+
local_repr = f"({start_id}): {b}" # default repr
|
|
234
|
+
|
|
235
|
+
if start_id != end_id:
|
|
236
|
+
n = end_id - start_id + 1
|
|
237
|
+
local_repr = f"({start_id}-{end_id}): {n} x {b}"
|
|
238
|
+
|
|
239
|
+
local_repr = _add_indent(local_repr, 2)
|
|
240
|
+
lines.append(local_repr)
|
|
241
|
+
|
|
242
|
+
main_str += "\n " + "\n ".join(lines) + "\n"
|
|
243
|
+
main_str += ")"
|
|
244
|
+
return main_str
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def _add_indent(s_: str, num_spaces: int) -> str:
|
|
248
|
+
s = s_.split("\n")
|
|
249
|
+
# don't do anything for single-line stuff
|
|
250
|
+
if len(s) == 1:
|
|
251
|
+
return s_
|
|
252
|
+
first = s.pop(0)
|
|
253
|
+
s = [(num_spaces * " ") + line for line in s]
|
|
254
|
+
s = "\n".join(s)
|
|
255
|
+
s = first + "\n" + s
|
|
256
|
+
return s
|
|
@@ -71,7 +71,13 @@ def help_on_torch_exception(
|
|
|
71
71
|
if not count_frames:
|
|
72
72
|
exc_ext.append("(No module call frames.)")
|
|
73
73
|
|
|
74
|
-
if
|
|
74
|
+
if (
|
|
75
|
+
# KeyError formatting would be wrong, showing `KeyError: "enc_spatial_dim\n\nStep idx: 0\..."`
|
|
76
|
+
not isinstance(exc, KeyError)
|
|
77
|
+
and len(exc.args) == 1
|
|
78
|
+
and isinstance(exc.args[0], str)
|
|
79
|
+
and not always_direct_print
|
|
80
|
+
):
|
|
75
81
|
exc.args = ("\n".join([exc.args[0], ""] + exc_ext),)
|
|
76
82
|
else:
|
|
77
83
|
for msg in exc_ext:
|
returnn/util/basic.py
CHANGED
|
@@ -365,12 +365,9 @@ def get_checkpoint_filepattern(filepath):
|
|
|
365
365
|
:return: CheckpointLoader compatible filepattern
|
|
366
366
|
:rtype: str
|
|
367
367
|
"""
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
return filepath[: -len(".index")]
|
|
372
|
-
elif filepath.endswith(".pt"):
|
|
373
|
-
return filepath[: -len(".pt")]
|
|
368
|
+
for ext in [".meta", ".index", ".pt"]:
|
|
369
|
+
if filepath.endswith(ext):
|
|
370
|
+
return filepath[: -len(ext)]
|
|
374
371
|
return filepath
|
|
375
372
|
|
|
376
373
|
|
returnn/util/better_exchook.py
CHANGED
|
@@ -1093,6 +1093,7 @@ def format_tb(
|
|
|
1093
1093
|
with_color=None,
|
|
1094
1094
|
with_vars=None,
|
|
1095
1095
|
clear_frames=True,
|
|
1096
|
+
colorize=None,
|
|
1096
1097
|
):
|
|
1097
1098
|
"""
|
|
1098
1099
|
Formats a traceback into a list of strings, each corresponding to one frame.
|
|
@@ -1110,11 +1111,14 @@ def format_tb(
|
|
|
1110
1111
|
That will potentially fix some mem leaks regarding locals, so it can be important.
|
|
1111
1112
|
Also see https://github.com/python/cpython/issues/113939.
|
|
1112
1113
|
However, any further access to frame locals will not work (e.g., if you want to use a debugger afterward).
|
|
1114
|
+
:param colorize: for compat with Python >=3.13, currently ignored
|
|
1113
1115
|
:return: list of strings, each corresponding to one frame in the traceback.
|
|
1114
1116
|
Each string contains the file name, line number, function name, source code line, maybe relevant variables,
|
|
1115
1117
|
etc., and a final newline.
|
|
1116
1118
|
:rtype: list[str]
|
|
1117
1119
|
"""
|
|
1120
|
+
if colorize is not None and with_color is None:
|
|
1121
|
+
with_color = colorize
|
|
1118
1122
|
color = Color(enable=with_color)
|
|
1119
1123
|
output = _OutputLinesCollector(color=color)
|
|
1120
1124
|
|
returnn/util/debug.py
CHANGED
|
@@ -704,7 +704,7 @@ def check_py_traces_rf_to_pt_equal(
|
|
|
704
704
|
"""
|
|
705
705
|
import random
|
|
706
706
|
import torch
|
|
707
|
-
from returnn.tensor import
|
|
707
|
+
from returnn.tensor import Dim
|
|
708
708
|
import returnn.frontend as rf
|
|
709
709
|
|
|
710
710
|
# noinspection PyProtectedMember
|
|
@@ -715,9 +715,18 @@ def check_py_traces_rf_to_pt_equal(
|
|
|
715
715
|
def _get_entry(trace, func, i, name, j):
|
|
716
716
|
return trace[func][i][name][j]
|
|
717
717
|
|
|
718
|
+
def _get_entry_attr(trace, func, i, name, j):
|
|
719
|
+
name, attr = name.split(".", 1)
|
|
720
|
+
obj = trace[func][i][name][j]
|
|
721
|
+
return eval(f"{name}.{attr}", {name: obj})
|
|
722
|
+
|
|
718
723
|
def _resolve_dim(dim: Union[Dim, str]) -> Dim:
|
|
719
724
|
if isinstance(dim, Dim):
|
|
720
725
|
return dim
|
|
726
|
+
elif isinstance(dim, str) and "." in dim:
|
|
727
|
+
dim = _get_entry_attr(trace_rf, *check_rf[:2], dim, -1)
|
|
728
|
+
assert isinstance(dim, Dim)
|
|
729
|
+
return dim
|
|
721
730
|
elif isinstance(dim, str):
|
|
722
731
|
dim = _get_entry(trace_rf, *check_rf[:2], dim, -1)
|
|
723
732
|
assert isinstance(dim, Dim)
|
|
@@ -763,7 +772,7 @@ def check_py_traces_rf_to_pt_equal(
|
|
|
763
772
|
if len(indices) > 5:
|
|
764
773
|
msgs.append(" non-matching ...")
|
|
765
774
|
non_matching.append("\n".join(msgs_prefix + msgs))
|
|
766
|
-
print(
|
|
775
|
+
print(" mismatch!")
|
|
767
776
|
for msg in msgs:
|
|
768
777
|
print(msg)
|
|
769
778
|
|
returnn/util/file_cache.py
CHANGED
|
@@ -426,7 +426,21 @@ class FileCache:
|
|
|
426
426
|
orig_mtime_ns = os.stat(src_filename).st_mtime_ns
|
|
427
427
|
FileInfo(mtime_ns=orig_mtime_ns).save(info_file_name)
|
|
428
428
|
|
|
429
|
-
|
|
429
|
+
try:
|
|
430
|
+
_copy_with_prealloc(src_filename, dst_tmp_filename)
|
|
431
|
+
except Exception:
|
|
432
|
+
# Cleanup if it was created already.
|
|
433
|
+
# That avoids some of the ambiguity of the existence of the .copy file.
|
|
434
|
+
# https://github.com/rwth-i6/returnn/issues/1785
|
|
435
|
+
try:
|
|
436
|
+
os.remove(dst_tmp_filename)
|
|
437
|
+
except FileNotFoundError:
|
|
438
|
+
pass
|
|
439
|
+
try:
|
|
440
|
+
os.remove(info_file_name)
|
|
441
|
+
except FileNotFoundError: # not really expected here, but safe to ignore
|
|
442
|
+
pass
|
|
443
|
+
raise
|
|
430
444
|
os.rename(dst_tmp_filename, dst_filename)
|
|
431
445
|
|
|
432
446
|
@staticmethod
|
returnn/util/task_system.py
CHANGED
|
@@ -671,7 +671,7 @@ class Pickler(_BasePickler):
|
|
|
671
671
|
return
|
|
672
672
|
# For some reason, Numpy fromstring/tostring is faster than Numpy loads/dumps.
|
|
673
673
|
self.save(make_numpy_ndarray_fromstring)
|
|
674
|
-
self.save((obj.
|
|
674
|
+
self.save((obj.tobytes(), str(obj.dtype), obj.shape))
|
|
675
675
|
self.write(pickle.REDUCE)
|
|
676
676
|
|
|
677
677
|
dispatch[numpy.ndarray] = save_ndarray
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: returnn
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.20260109.93428
|
|
4
4
|
Summary: The RWTH extensible training framework for universal recurrent neural networks
|
|
5
5
|
Home-page: https://github.com/rwth-i6/returnn/
|
|
6
6
|
Author: Albert Zeyer
|
|
@@ -36,7 +36,7 @@ Welcome to RETURNN
|
|
|
36
36
|
`RETURNN paper 2018 <https://arxiv.org/abs/1805.05225>`_.
|
|
37
37
|
|
|
38
38
|
RETURNN - RWTH extensible training framework for universal recurrent neural networks,
|
|
39
|
-
is a
|
|
39
|
+
is a PyTorch/TensorFlow-based implementation of modern recurrent neural network architectures.
|
|
40
40
|
It is optimized for fast and reliable training of recurrent neural networks in a multi-GPU environment.
|
|
41
41
|
|
|
42
42
|
The high-level features and goals of RETURNN are:
|
|
@@ -1,14 +1,14 @@
|
|
|
1
|
-
returnn/PKG-INFO,sha256=
|
|
1
|
+
returnn/PKG-INFO,sha256=8G2OFR-V5IlE98f0vmLneA27jg9-B7eN973G7vJpj0I,5215
|
|
2
2
|
returnn/__init__.py,sha256=biBtRsM0WZ406vShaeH-9WFoqJ8XwTbn6g0EeFJ7l8E,1012
|
|
3
3
|
returnn/__main__.py,sha256=lHyZcu_0yc9f7Vf_Kfdy9PmeU0T76XVXnpalHi5WKro,31740
|
|
4
4
|
returnn/__old_mod_loader__.py,sha256=nvsNY-xELdS_IPNkv66Q9Rmvg4dbGW0-EBRDcCmctos,7654
|
|
5
5
|
returnn/__setup__.py,sha256=22kQn2fh11iPM0hLb2Fy5sLmoU1JGvmDxXRYuRgQkwU,4659
|
|
6
|
-
returnn/_setup_info_generated.py,sha256=
|
|
7
|
-
returnn/config.py,sha256=
|
|
6
|
+
returnn/_setup_info_generated.py,sha256=RHjC4xFQRTza5prANYrhwttWMqAEZoLiKwzMpCmll80,77
|
|
7
|
+
returnn/config.py,sha256=JK8EjDsUdyY2c90s0KY1rLD1kesVfz6vRT0gxy_AQ5I,29142
|
|
8
8
|
returnn/forward_iface.py,sha256=A_OJiaXsX4MlXQRzST86ylyxSUZbC402PQL1REcqHjM,911
|
|
9
9
|
returnn/learning_rate_control.py,sha256=ZvWryAn_tv9DhV8sh1LV3eE34Yltl3On3mYZAG4hR9s,34684
|
|
10
10
|
returnn/log.py,sha256=WoTDv4XDovgvgXa7iiav-nA8pb25lOEzndbnVrDLfUo,12319
|
|
11
|
-
returnn/native_op.cpp,sha256=
|
|
11
|
+
returnn/native_op.cpp,sha256=itGDV05Nqg59g37qXAt-Z4c-rX7eDRQFY9efDPjaVlg,38613
|
|
12
12
|
returnn/native_op.py,sha256=4_NnvfNxsM8GE_FsD6yOg6PZegqIdtJ3Sl1GdBWmFvg,244424
|
|
13
13
|
returnn/pretrain.py,sha256=MHiXJZqkQFmDVyaYsGpd_Acv20wxl7Pr6s6qJzAT2FI,22648
|
|
14
14
|
returnn/datasets/__init__.py,sha256=PvDlfDOaaopIeUIt0OSvHD2eHZkdkyE-sjMXf35EH5U,390
|
|
@@ -21,13 +21,13 @@ returnn/datasets/distrib_files.py,sha256=48edqdf7YpnPJ-TOis3Mz5U9A2DSxfiYT1HCMSt
|
|
|
21
21
|
returnn/datasets/generating.py,sha256=o9-JZ2s5QKssux6GcSaM3oivf_PE6nhSOeytRyGB7pQ,99574
|
|
22
22
|
returnn/datasets/hdf.py,sha256=v5sjBenURR9Z-g7AQ9tsL84yDSye5RtbLpym3M6HSDE,67833
|
|
23
23
|
returnn/datasets/huggingface.py,sha256=ls9WMR6gUcMgGksl80g0An1az5Xjya_V3ojbbbsZqrU,20047
|
|
24
|
-
returnn/datasets/lm.py,sha256=
|
|
24
|
+
returnn/datasets/lm.py,sha256=CXl_g-Z28RWlBTzx35uC4r_GCwOP05LIsUp0iSi6JG4,100652
|
|
25
25
|
returnn/datasets/map.py,sha256=kOBJVZmwDhLsOplzDNByIfa0NRSUaMo2Lsy36lBvxrM,10907
|
|
26
|
-
returnn/datasets/meta.py,sha256=
|
|
26
|
+
returnn/datasets/meta.py,sha256=hTtfwINIxP2S4JQ5IQXzvTh2MixwxzeF06pPTW36yl0,101456
|
|
27
27
|
returnn/datasets/multi_proc.py,sha256=BClXq0fActi1XQa4vcMhHmhYF0Q-fnnDzlIlbBM6_DM,22614
|
|
28
28
|
returnn/datasets/normalization_data.py,sha256=J3njQCMvWAbIAVPepO2L_Xdau9eWYB7Zyd6STeGzTbc,14615
|
|
29
29
|
returnn/datasets/numpy_dump.py,sha256=wl8bKIKAlff2HPJPtuu5wBg3TLOf16d2wLVB4lLAwTM,5158
|
|
30
|
-
returnn/datasets/postprocessing.py,sha256=
|
|
30
|
+
returnn/datasets/postprocessing.py,sha256=Yy7rSzpB8z6PkUTZsPE_AN9Di8FPdNn617JTCV7L-VI,42453
|
|
31
31
|
returnn/datasets/raw_wav.py,sha256=M7eTHp4CTtLQf3yPTiJY-mSJYgZNxkGV9IFN9J1dq_4,9144
|
|
32
32
|
returnn/datasets/sprint.py,sha256=JAs5dOmdteSOwA7YQcTF9KaTCtGfRjiyJUZClSr85pY,55502
|
|
33
33
|
returnn/datasets/stereo.py,sha256=PkowC91bZWihIYuIZgyGgPcNwgq5jBvyxxu1nER-VhM,17633
|
|
@@ -35,7 +35,7 @@ returnn/datasets/text_dict.py,sha256=xOWwZc5xGVg2Ic1Ezbm-uEcj_i7ajxqRfPn_TihQbzc
|
|
|
35
35
|
returnn/datasets/util/__init__.py,sha256=rEKhSD6fyhDiQF-x7dUQMwa29JZu72SDm7mYcCcLghY,52
|
|
36
36
|
returnn/datasets/util/feature_extraction.py,sha256=axtXDb9wcNpOmyhmW3WJUj5xda29TKkKvOcGGvq7ExA,23923
|
|
37
37
|
returnn/datasets/util/strings.py,sha256=pP8pmXhArkssYqmPOLuxEG9gsko891ZxrWiai86qbLE,412
|
|
38
|
-
returnn/datasets/util/vocabulary.py,sha256=
|
|
38
|
+
returnn/datasets/util/vocabulary.py,sha256=994cHmRI3Yy8mHL79oCMrJITRIS9su11V2zizF5__Fo,31389
|
|
39
39
|
returnn/engine/__init__.py,sha256=br7hpn8i_hIBi2uTQfnN3BF9g5DREYa_mQi0_Nvlu6o,228
|
|
40
40
|
returnn/engine/base.py,sha256=0n4FtB_B2H3W_9KdoLr0P7YPER-hVkbk69pwFqsqmqw,18467
|
|
41
41
|
returnn/engine/batch.py,sha256=amXW8mGspuSQjo00JdisE2eOLy5Ij1weWWzkE-lXSJM,9912
|
|
@@ -81,16 +81,16 @@ returnn/frontend/_cache.py,sha256=Uao2xzfvVaKABk1fkxcpXzxKIGJaI9FwwlTvvoNUstk,85
|
|
|
81
81
|
returnn/frontend/_numpy_backend.py,sha256=fZjks7p3dgxVZ6tSDazTTgBxNjJqXjfqgw_7mA7rDEE,9066
|
|
82
82
|
returnn/frontend/_random_journal.py,sha256=_ktP_mjgx8vtQQGX_DofdhewJj0aPiczefTWeemPkmo,5457
|
|
83
83
|
returnn/frontend/_utils.py,sha256=uVQldGHyYKIyhSEmumJ04ix5eP5tjZw4CEC0w6-zhyQ,12074
|
|
84
|
-
returnn/frontend/array_.py,sha256=
|
|
85
|
-
returnn/frontend/attention.py,sha256=
|
|
84
|
+
returnn/frontend/array_.py,sha256=bZwTgNkMsGiSP6TVgI7bxY6zZMjcs9TVsHlajYrHUoA,56791
|
|
85
|
+
returnn/frontend/attention.py,sha256=bFD9Ei6GxSi-BC1OfueDyTIE-51a3dKKZOWdSIbz7l8,46633
|
|
86
86
|
returnn/frontend/backend.py,sha256=iQ9w4xl8Ea7bgpb0VUaCKq50rV5Bl2E5J8Rhd-oqD_c,883
|
|
87
87
|
returnn/frontend/build_from_dict.py,sha256=rfWa2rjjhIR_kIQED_nMrygrQBunS6unegzWTLVbC98,3017
|
|
88
88
|
returnn/frontend/cond.py,sha256=gh6wg0aSbAJQfKRv4BQAu-EfPWtWPLFjgc8IaPPFmwg,1023
|
|
89
89
|
returnn/frontend/const.py,sha256=A5fP9w6Akv56d89pPvdoZaXvC9ZTYcexepnS9O2clOc,3945
|
|
90
90
|
returnn/frontend/container.py,sha256=wF3OlQN7WlOVmmdapUth_Unha3DVf6h1B7okBJAuJDA,8011
|
|
91
91
|
returnn/frontend/control_flow_ctx.py,sha256=v17CsNwRnZYe8GdMtGJt2ftibfxMCGK1i0l-GX5ILu0,699
|
|
92
|
-
returnn/frontend/conv.py,sha256=
|
|
93
|
-
returnn/frontend/device.py,sha256=
|
|
92
|
+
returnn/frontend/conv.py,sha256=RbVyFGspn40VNT1B-KWWaDBBUhd7VFhKTN-V_SrwPlU,39514
|
|
93
|
+
returnn/frontend/device.py,sha256=gX7zPRZrnhjMgpgg6aACtE7Lg6qYlzerYbeCTiRhxhw,1665
|
|
94
94
|
returnn/frontend/dims.py,sha256=_HDU-Kxn3pApicFkm0F4Fs-ZAuF1gKXG8rroQHCFQQI,13073
|
|
95
95
|
returnn/frontend/dropout.py,sha256=TjqZCKDIOBeHr14-NCemOm9m3p84LxQuPH1DvRAYg88,5028
|
|
96
96
|
returnn/frontend/dtype.py,sha256=Ooc5BrcNrTp6XShuFEV9g5V6-niuy4ImP_Lt_Qgq3jE,1886
|
|
@@ -101,8 +101,8 @@ returnn/frontend/init.py,sha256=bVB7bpghaY8DI_HL0mkB_9z95onWnIX2zlW4hlMYnRw,7494
|
|
|
101
101
|
returnn/frontend/label_smoothing.py,sha256=lxmaowNr61sCMzMewqHhu1r0CcklYfhLXlFnBu8DeAU,5676
|
|
102
102
|
returnn/frontend/linear.py,sha256=xRUjnkD3MTWDezSaYATBYJQ2fa1RhKMNrTuhC54hhVs,2252
|
|
103
103
|
returnn/frontend/loop.py,sha256=t-z6ke1X03I2aPUEqLYmVZWyMzfW3IedFvKUGc-TCX8,16160
|
|
104
|
-
returnn/frontend/loss.py,sha256=
|
|
105
|
-
returnn/frontend/math_.py,sha256=
|
|
104
|
+
returnn/frontend/loss.py,sha256=aSKzjhjIikeNJqzcUBBlaBTXILuAoW6wrmsExGtJJBY,8572
|
|
105
|
+
returnn/frontend/math_.py,sha256=wIWYtjcIEV_QXNJiNT1lYsVRQdNLxtBbTYiAPr_OR3Y,18442
|
|
106
106
|
returnn/frontend/matmul.py,sha256=xkueyxzSDz8MsYaWxPSjmV2Yy-tcaiOQDXbFt1IQM2A,1944
|
|
107
107
|
returnn/frontend/module.py,sha256=nt35I9xyHuH42qobLHGUFoNI5-mVieAtA36SqK6NhpY,11065
|
|
108
108
|
returnn/frontend/nested.py,sha256=PKsKWHwE2SI19DjZ9vRI8q4-ywIGMK3-TTUuqdXrVlM,15592
|
|
@@ -139,16 +139,16 @@ returnn/frontend/decoder/__init__.py,sha256=A-koKyPVlXp_V_2bk6GKZ1Xfv4rYIcfxGMXQ
|
|
|
139
139
|
returnn/frontend/decoder/transformer.py,sha256=64Z1IY_WcDuj8Ti73BGwbT_grrEpxBl5mIsBZkqJzHQ,24650
|
|
140
140
|
returnn/frontend/encoder/__init__.py,sha256=0QGLlujRIKx3zBREeShza_-xhGIxj73zbd7t-g1m-ho,17
|
|
141
141
|
returnn/frontend/encoder/base.py,sha256=A759EwCYAmSi-kzXz1vaTjR2l59TvNGQlzaNdp3UOKs,2109
|
|
142
|
-
returnn/frontend/encoder/conformer.py,sha256=
|
|
142
|
+
returnn/frontend/encoder/conformer.py,sha256=I1OeaU2P7lm-N_ODS_P4BVQaplJR4Ies1Yd7Lr9mdFw,22225
|
|
143
143
|
returnn/frontend/encoder/conformer_v2.py,sha256=vAYdT8m2Zzg3IIZZafeccClFHU1_c9T-EgBOsHadQPA,7701
|
|
144
144
|
returnn/frontend/encoder/e_branchformer.py,sha256=SZdhpb90FaQdpzgvSOtFPLbLCa0NdycbB5Z4vMoY4TM,12279
|
|
145
|
-
returnn/frontend/encoder/transformer.py,sha256=
|
|
145
|
+
returnn/frontend/encoder/transformer.py,sha256=0-ku9A8r_w3USQd0aAQ0fdPvFILNWcGaGZ7g3SE-Xjo,11656
|
|
146
146
|
returnn/import_/__init__.py,sha256=L2dKxWCcn0fz_7H7OS-zw5i5Yrljjjh_d61dEcFP_JY,243
|
|
147
147
|
returnn/import_/common.py,sha256=0cmvyd7NtMLH55IskEoSDtkcMwChxLhauV2UZ4mK68I,8148
|
|
148
148
|
returnn/import_/git.py,sha256=IXBVOybQAHf5OlMfVY6oZ-7eiDYPG0OR7MyDJKcVHSM,13961
|
|
149
149
|
returnn/import_/import_.py,sha256=q_NQRbfK5TsALakUxixE0SCqDccfGh6wkquCmJ-3s6w,798
|
|
150
150
|
returnn/sprint/__init__.py,sha256=bKRS04_tJdp-z6Rmv4Fm3hGD9M3UVRcXtvcS8VG12KA,64
|
|
151
|
-
returnn/sprint/cache.py,sha256=
|
|
151
|
+
returnn/sprint/cache.py,sha256=9QxVIUoDXo2WGkenof7vuh8vqBXShf0Dr4twiLsygaQ,34976
|
|
152
152
|
returnn/sprint/control.py,sha256=FFjpvoktLgp4ETxaMVKrwiegnAQLW5lD_VF4OG3ttUw,31133
|
|
153
153
|
returnn/sprint/error_signals.py,sha256=FvXscTpbLWKRKsK3slPT6QfU5FytLUNblB7Ogup_L7k,23348
|
|
154
154
|
returnn/sprint/extern_interface.py,sha256=l-v1X-Yg0UpTFe7Y3c4FwWOqpSNuv9Oy5EzqlKWUMlE,12055
|
|
@@ -164,7 +164,7 @@ returnn/tensor/dim.py,sha256=652DlcSe6o6l5OyY5xt9Yigij_Xry-ToG9AemMX3roY,4208
|
|
|
164
164
|
returnn/tensor/marked_dim.py,sha256=Ae2hQIb5QixRU2gDhQEm0tmYt8TmomWoGERB414jR8o,1884
|
|
165
165
|
returnn/tensor/tensor.py,sha256=IIHbDu0D_aX8U4LKTm5ThD_fuoGhn98B9EyvVBsPJ3E,9083
|
|
166
166
|
returnn/tensor/tensor_dict.py,sha256=-20YPbXfRDE9WurkfQM-Mw6H8ouaBGL_90SDmK0b4cw,7534
|
|
167
|
-
returnn/tensor/utils.py,sha256=
|
|
167
|
+
returnn/tensor/utils.py,sha256=GoA4J7Cm8Q1e-NjkGEvCPMOqa4-KdCUjyGLnGTAPlDk,9957
|
|
168
168
|
returnn/tf/__init__.py,sha256=X4g2LFCFTl0uiybMRkfBY8AYkgMa6HX0vVxxTk0nMiE,88
|
|
169
169
|
returnn/tf/compat.py,sha256=NkAkdlR37m2d9qh3i33sIfEGilOaFBeCofAQpQwnZpY,1632
|
|
170
170
|
returnn/tf/data_pipeline.py,sha256=iNkNHv5PiGcudlajG8eO336rPD3hya5kWMDrjhWa4jA,36632
|
|
@@ -172,13 +172,13 @@ returnn/tf/distributed.py,sha256=PCLspuNg4XP4ZX3Q444IlohUJEy0Dc8rp8YlmDqVbEc,151
|
|
|
172
172
|
returnn/tf/engine.py,sha256=nhAMSEVUIf6Onm8jaRkT2CuY5XbOV5CEeWyOMUk67kY,146610
|
|
173
173
|
returnn/tf/horovod.py,sha256=Dpv_3wZxB8q8Gqk6xah4iJ4vKGKWWg1-7PPhpSMPlec,5404
|
|
174
174
|
returnn/tf/hyper_param_tuning.py,sha256=IfVRYYz-oSwOa2E7-vwh-pnWL4j-StHHbSYv7VbvcPE,31619
|
|
175
|
-
returnn/tf/native_op.py,sha256=
|
|
176
|
-
returnn/tf/network.py,sha256=
|
|
175
|
+
returnn/tf/native_op.py,sha256=LuwPj-0lMBDZeP9q79cI1FuIYudnMavUlKsTQWMrkl4,77382
|
|
176
|
+
returnn/tf/network.py,sha256=ZBo5qXOZHBJLjv2E8y9APeiRIpz5KEQdc6GN3rl6LBM,224668
|
|
177
177
|
returnn/tf/sprint.py,sha256=Yqjh0-6sCWHpdDPQCzHKx7TwQCOjJyjfd0KHtnYdd-8,5471
|
|
178
178
|
returnn/tf/updater.py,sha256=RcvoGnjBcObbLfLHH_mDRSY2lTeLyNoAFsZpHUiIgRY,72036
|
|
179
179
|
returnn/tf/frontend_layers/README.md,sha256=P4vVl_EK-4jT55m40mq-K4Nr9yFY0tJR5fmDzTHSDFE,1096
|
|
180
180
|
returnn/tf/frontend_layers/__init__.py,sha256=MGUn7rv6fOefbtkX-5pq6fC1T6Y5h0oh1uOPSEcv1_I,506
|
|
181
|
-
returnn/tf/frontend_layers/_backend.py,sha256=
|
|
181
|
+
returnn/tf/frontend_layers/_backend.py,sha256=_YYQ-lV2srx-DwKIiaKacYkINOisPIstaPzzC7eRsaY,47652
|
|
182
182
|
returnn/tf/frontend_layers/_utils.py,sha256=ijByaDOqPDod5mZC9EoTkt8PHBEODXHsWbkwDOF9XW4,4205
|
|
183
183
|
returnn/tf/frontend_layers/cond.py,sha256=bGd_g2tzpKXO218Xk-so59vFPJF-jF_ZvoZIU-1qBzw,14832
|
|
184
184
|
returnn/tf/frontend_layers/config_entry_points.py,sha256=t01RWOiaZohzuqPXX-MLV0P5yCOfE0dz-9dZ77_pK4c,5751
|
|
@@ -194,13 +194,13 @@ returnn/tf/frontend_low_level/__init__.py,sha256=34469k3KzMUIGowxReOZnbf6WdTjxY7
|
|
|
194
194
|
returnn/tf/frontend_low_level/_backend.py,sha256=Hv838I2eyOP2qVNWs5DJxseyxUbAET2lm0ZZcbW_CsE,24991
|
|
195
195
|
returnn/tf/layers/__init__.py,sha256=Ngu-X84nWFgz7ndDu88DqoZ-5lUMMTQWH4g7N8pSoCg,72
|
|
196
196
|
returnn/tf/layers/base.py,sha256=sUxEfh6WxaHWHG7O3cfxB6gG6YpEHkFKUJVayKvTBSI,152968
|
|
197
|
-
returnn/tf/layers/basic.py,sha256=
|
|
197
|
+
returnn/tf/layers/basic.py,sha256=jKzfRhBJgt5_tgIATde2kdza5u3aCqACx7BFyClngno,614277
|
|
198
198
|
returnn/tf/layers/rec.py,sha256=3f6M_5aAMPvx7aAHdPV3VSFRHf7tjpp8lrXSzmk1I5c,548435
|
|
199
199
|
returnn/tf/layers/segmental_model.py,sha256=wUyDZGr-eTVIIQWcsHLML0wtOxuWn_NFKOIrUKQcvoI,21515
|
|
200
200
|
returnn/tf/layers/signal_processing.py,sha256=vRlkN7k7otk9_Qdv0qr_l6V0VT5Q6dO2MxwZWb2HH2M,52693
|
|
201
201
|
returnn/tf/layers/variable.py,sha256=G1dIEoq0iQsXp-uOAUPTaBKHSOQfx7Sn-spD8MRv0HM,11446
|
|
202
202
|
returnn/tf/util/__init__.py,sha256=mEg5jNVbQBLO2TGwO4Ff2F5qQN5_Zg4hAAQfX5taeec,92
|
|
203
|
-
returnn/tf/util/basic.py,sha256=
|
|
203
|
+
returnn/tf/util/basic.py,sha256=ezK-XBKQcscVKmCL43wsieiUZntHUVOZwhDZtVItOqg,304088
|
|
204
204
|
returnn/tf/util/data.py,sha256=AlSa0r_IaXtjKG1q1vxUybFazpjt4lUX8LYq0STJv-w,29471
|
|
205
205
|
returnn/tf/util/gradient_checkpoint.py,sha256=_1NGAmNZ5NiGhFYVRWvBV5yejt-EZWbbvxNWHbESp5Q,7426
|
|
206
206
|
returnn/tf/util/ken_lm.py,sha256=R60UAoywriuDIeQ2Hk3Vm_waf2Hxxc88ofzEw6X6Sd4,17313
|
|
@@ -208,7 +208,7 @@ returnn/tf/util/open_fst.py,sha256=sZRDw4TbxvhGqpGdUJWy1ebvlZm4_RPhygpRw9uLAOQ,1
|
|
|
208
208
|
returnn/torch/README.md,sha256=jzJ2FpOHW02vxN69yKaV97C9LI-hmvjBglKfdZXIDdc,85
|
|
209
209
|
returnn/torch/__init__.py,sha256=MHEUyNHB20Vy89uKAqZoj6FxJKF1Gq3HW-i6ra1pNcI,24
|
|
210
210
|
returnn/torch/distributed.py,sha256=_lyJR71HIoCHpMi5GztGM7YwrX54Am8zSkjnDkE1Lbk,7524
|
|
211
|
-
returnn/torch/engine.py,sha256=
|
|
211
|
+
returnn/torch/engine.py,sha256=XaJhVpF181sf8M1iXAs3u0zr37VVUG3SW81-DIZgg3g,81280
|
|
212
212
|
returnn/torch/updater.py,sha256=nNd1mBPQyvIB096BEFi0KKmRI-U3jnRETzb743p2B9c,32064
|
|
213
213
|
returnn/torch/data/__init__.py,sha256=6cLNEi8KoGI12PF6akN7mI_mtjlx-0hcQAfMYoExwik,132
|
|
214
214
|
returnn/torch/data/extern_data.py,sha256=5al706ZaYtHWLp5VH2vS-rW69YXP3NHyOFRKY0WY714,7810
|
|
@@ -217,9 +217,9 @@ returnn/torch/data/queued_data_iter.py,sha256=PoOsGHdHVZjTmcyfq_ZOw--P6hyfTdmAWI
|
|
|
217
217
|
returnn/torch/data/returnn_dataset_wrapper.py,sha256=fMahf05G0SPYm6HxSQpVm8JhsIHons-i1Ce4aQv4IjM,8332
|
|
218
218
|
returnn/torch/data/tensor_utils.py,sha256=-Teqi--LLbt6q_5mDRdoHZHmPgSdC83W706ukif_YiU,1284
|
|
219
219
|
returnn/torch/frontend/__init__.py,sha256=AA48HZnC17ASuKA0EWy8loZ-Bib_yUtqF4T1wYvjst4,62
|
|
220
|
-
returnn/torch/frontend/_backend.py,sha256=
|
|
220
|
+
returnn/torch/frontend/_backend.py,sha256=8EBRGN0jY5rl9Z5-wd4kvoDesssWcVDVXNl25-bG8cA,108882
|
|
221
221
|
returnn/torch/frontend/_rand.py,sha256=1JgIkV2XmpgJD86zXZ-NCAe-QuoP2swr6NaS1oz3Qa8,1830
|
|
222
|
-
returnn/torch/frontend/bridge.py,sha256=
|
|
222
|
+
returnn/torch/frontend/bridge.py,sha256=RBtAIlYWn_AC-GaHWperrOncPjMLWAOrU30pWk2789A,9775
|
|
223
223
|
returnn/torch/frontend/raw_ops.py,sha256=lF0h-KtYYsdaaqQADylVZp9qzPskOOXA4MfmYDyx5IU,296
|
|
224
224
|
returnn/torch/optim/README.md,sha256=0iH5FiKb7iDrVK5n8V6yCh4ciCFG2YSbyh7lPneT5ik,360
|
|
225
225
|
returnn/torch/optim/__init__.py,sha256=yxdbnOkXAHzZ_t6cHi6zn5x_DQNlLZJ-KxZByHTIg1U,29
|
|
@@ -229,18 +229,18 @@ returnn/torch/util/__init__.py,sha256=AOXYUjzPm0XrzFJCPAXo9Jj_FvqD1XH3FfKtho80Vl
|
|
|
229
229
|
returnn/torch/util/array_.py,sha256=ell3VZvn01SLtF9Pw2fvPzFNO-XDQ7tSB9VCrVSKmSA,2556
|
|
230
230
|
returnn/torch/util/debug_inf_nan.py,sha256=fmzSSTJJyLf7i5yDWRHLeDI0gxvadeqLE8RxMuSHx_4,6398
|
|
231
231
|
returnn/torch/util/diagnose_gpu.py,sha256=_yswLmwR8Q2rCsv2jI5FUQNBT__453jBmiWYwazdu20,6808
|
|
232
|
-
returnn/torch/util/exception_helper.py,sha256=
|
|
232
|
+
returnn/torch/util/exception_helper.py,sha256=54IzlsXYp6E_rEEWIpgppkFid9stb-2PZVRU8d5mFNE,4497
|
|
233
233
|
returnn/torch/util/gradient_checkpoint.py,sha256=iLy-FB65DC8O6LxzmMvFjnSdpIVpko87ppIvRKAbtpQ,27995
|
|
234
234
|
returnn/torch/util/module.py,sha256=MXHIrF9Isu575DDJIa81212ULKwdqu1oOLxDVZecVSk,1693
|
|
235
235
|
returnn/torch/util/scaled_gradient.py,sha256=C5e79mpqtxdtw08OTSy413TSBSlOertRisc-ioiFIaU,3191
|
|
236
236
|
returnn/util/__init__.py,sha256=UIG1qw4idqhW71BV60ha7h9PktxvEVcBIu0lYRossK8,336
|
|
237
|
-
returnn/util/basic.py,sha256=
|
|
238
|
-
returnn/util/better_exchook.py,sha256=
|
|
237
|
+
returnn/util/basic.py,sha256=rFeg3XwjNcNDbBgjkhisStbjTFA8CEfIrdwHjfdkJKw,143212
|
|
238
|
+
returnn/util/better_exchook.py,sha256=hOKazwv2q2-d0XMfxkJXMbLZyNTtraV3jPHplFcrMsg,71014
|
|
239
239
|
returnn/util/bpe.py,sha256=LWFhICZsEOnMwNws0lybPNzKRX6rSr8yKCvP65vjl9Y,19656
|
|
240
240
|
returnn/util/collect_outputs_dict.py,sha256=CjpsftoMgmvyE4wNKTO6F-QQ_44QHXcOZIXMUMQVZ-8,2637
|
|
241
|
-
returnn/util/debug.py,sha256=
|
|
241
|
+
returnn/util/debug.py,sha256=0ED4etMKG9lVqU0HPKEiCK-HoS8hBgnQza444QCE6ec,28576
|
|
242
242
|
returnn/util/debug_helpers.py,sha256=0EINLK4uLtoSt5_kHs1M2NIFpMd0S7i4c4rx90U4fJk,2914
|
|
243
|
-
returnn/util/file_cache.py,sha256=
|
|
243
|
+
returnn/util/file_cache.py,sha256=8xE4zMQi38g7ZIGwNohd13_CgjzpIs18ILxFCKttzxE,29439
|
|
244
244
|
returnn/util/fsa.py,sha256=k2lJ8tyf_g44Xk1EPVLwDwpP4spoMTqIigDVOWocQHY,59177
|
|
245
245
|
returnn/util/literal_py_to_pickle.py,sha256=3dnjWPeeiDT2xp4bRDgIf9yddx7b1AG7mOKEn_jiSl8,2173
|
|
246
246
|
returnn/util/lru_cache.py,sha256=7Q5H3a8b07E8e1iB7PA9jCpRnxMJZOFS2KO07cy0gqk,11446
|
|
@@ -252,11 +252,11 @@ returnn/util/py-to-pickle.cpp,sha256=ByU4cwy5MGEihaoYiRo1sSsJfYn10_riDwVqSHRLwp8
|
|
|
252
252
|
returnn/util/py_ext_mod_compiler.py,sha256=I1w9laIPqJbQGb2lFp-3llBjORS-217ZGIbPCp6PIes,1708
|
|
253
253
|
returnn/util/result_with_reason.py,sha256=6jS7caYrZADrb8o-CpQnTfskZb3fTNMcKU-JlnIh6Kg,359
|
|
254
254
|
returnn/util/sig_proc.py,sha256=Tjz0VOAVyqu2qDCF5HZ1JjALjcFsHcNkcd96WgZeKfE,7265
|
|
255
|
-
returnn/util/task_system.py,sha256=
|
|
255
|
+
returnn/util/task_system.py,sha256=7Dz7Nvi_1-o5pDv9OZYdAnlJw6OSvgbYUmQ72P0Fgkw,26002
|
|
256
256
|
returnn/util/train_proc_manager.py,sha256=Pjht28k6uz6BNQ47uW6Gf880iyq5q4wx7P_K2tmoAM8,3266
|
|
257
257
|
returnn/util/watch_memory.py,sha256=BR5P2kvBN6UI81cE0_1WAA6Hd1SByLbBaiDxvLhPOew,4213
|
|
258
|
-
returnn-1.
|
|
259
|
-
returnn-1.
|
|
260
|
-
returnn-1.
|
|
261
|
-
returnn-1.
|
|
262
|
-
returnn-1.
|
|
258
|
+
returnn-1.20260109.93428.dist-info/LICENSE,sha256=ywBD_U2aD4vpuoIgNAsjIGBYydl0tVKll3De0Z8s77c,11041
|
|
259
|
+
returnn-1.20260109.93428.dist-info/METADATA,sha256=8G2OFR-V5IlE98f0vmLneA27jg9-B7eN973G7vJpj0I,5215
|
|
260
|
+
returnn-1.20260109.93428.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
|
261
|
+
returnn-1.20260109.93428.dist-info/top_level.txt,sha256=Lsn4WZc5Pbfk0-xDQOgnFCxOoqxL4CyeM3N1TFbJncw,8
|
|
262
|
+
returnn-1.20260109.93428.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|