returnn 1.20250226.104737__py3-none-any.whl → 1.20250226.115259__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of returnn might be problematic. Click here for more details.
- returnn/PKG-INFO +1 -1
- returnn/_setup_info_generated.py +2 -2
- returnn/tensor/_dim_extra.py +4 -4
- returnn/torch/util/debug_inf_nan.py +17 -13
- {returnn-1.20250226.104737.dist-info → returnn-1.20250226.115259.dist-info}/METADATA +1 -1
- {returnn-1.20250226.104737.dist-info → returnn-1.20250226.115259.dist-info}/RECORD +9 -9
- {returnn-1.20250226.104737.dist-info → returnn-1.20250226.115259.dist-info}/LICENSE +0 -0
- {returnn-1.20250226.104737.dist-info → returnn-1.20250226.115259.dist-info}/WHEEL +0 -0
- {returnn-1.20250226.104737.dist-info → returnn-1.20250226.115259.dist-info}/top_level.txt +0 -0
returnn/PKG-INFO
CHANGED
returnn/_setup_info_generated.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
version = '1.20250226.
|
|
2
|
-
long_version = '1.20250226.
|
|
1
|
+
version = '1.20250226.115259'
|
|
2
|
+
long_version = '1.20250226.115259+git.0d32534'
|
returnn/tensor/_dim_extra.py
CHANGED
|
@@ -390,15 +390,15 @@ class _DimMixin:
|
|
|
390
390
|
if dim_extra:
|
|
391
391
|
dim_extra.cache_dyn_size_ext_dev.clear()
|
|
392
392
|
dim_extra.cache_seq_mask.clear()
|
|
393
|
+
if dim.dyn_size_ext is not None or dim.dimension is None:
|
|
394
|
+
dim_extra.cache_dim_math.clear()
|
|
395
|
+
else:
|
|
396
|
+
dim_extra.cache_dim_math.clear_dynamic()
|
|
393
397
|
if only_self:
|
|
394
398
|
return
|
|
395
399
|
if dim_extra:
|
|
396
400
|
# Any dims via dim math could also contain raw tensors,
|
|
397
401
|
# so iterate through them.
|
|
398
|
-
if dim.dyn_size_ext is not None or dim.dimension is None:
|
|
399
|
-
dim_extra.cache_dim_math.clear()
|
|
400
|
-
else:
|
|
401
|
-
dim_extra.cache_dim_math.clear_dynamic()
|
|
402
402
|
queue += dim_extra.cache_dim_math.values()
|
|
403
403
|
if dim_extra.same_as:
|
|
404
404
|
queue.append(dim_extra.same_as)
|
|
@@ -39,6 +39,7 @@ from io import TextIOBase
|
|
|
39
39
|
import traceback
|
|
40
40
|
from types import FrameType
|
|
41
41
|
import torch
|
|
42
|
+
import tree
|
|
42
43
|
|
|
43
44
|
# noinspection PyProtectedMember
|
|
44
45
|
from torch.utils._python_dispatch import TorchDispatchMode
|
|
@@ -96,6 +97,7 @@ def debug_inf_nan(
|
|
|
96
97
|
|
|
97
98
|
# For efficiency, and to be less spammy
|
|
98
99
|
_TraceFuncNameBlacklist = {
|
|
100
|
+
"aten::empty.memory_format",
|
|
99
101
|
"aten::zeros_like",
|
|
100
102
|
"aten::ones_like",
|
|
101
103
|
"aten::full",
|
|
@@ -113,6 +115,7 @@ _TraceFuncNameBlacklist = {
|
|
|
113
115
|
"aten::split_with_sizes",
|
|
114
116
|
"aten::slice.Tensor",
|
|
115
117
|
"aten::select.int",
|
|
118
|
+
"aten::max_pool2d_with_indices",
|
|
116
119
|
}
|
|
117
120
|
|
|
118
121
|
|
|
@@ -140,19 +143,20 @@ class _TraceOps(TorchDispatchMode):
|
|
|
140
143
|
if self.report_every_op_call:
|
|
141
144
|
print(f"--- op {func.name()}", file=self.file)
|
|
142
145
|
out = func(*args, **kwargs)
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
self.
|
|
146
|
+
for out_ in tree.flatten(out):
|
|
147
|
+
if isinstance(out_, torch.Tensor):
|
|
148
|
+
with no_python_dispatcher():
|
|
149
|
+
got_nan_inf_t = torch.stack([torch.isnan(out_).any(), torch.isinf(out_).any()]).cpu()
|
|
150
|
+
got_nan = got_nan_inf_t[0].item()
|
|
151
|
+
got_inf = got_nan_inf_t[1].item()
|
|
152
|
+
if got_nan or got_inf:
|
|
153
|
+
s = "/".join([s_ for s_, b in [("nan", got_nan), ("inf", got_inf)] if b])
|
|
154
|
+
print(f"--> {s} in {func}: {out_}", file=self.file)
|
|
155
|
+
traceback.print_list(
|
|
156
|
+
_extract_stack_up_to(skip_top_num_frames=1, root_frame=self.root_frame), file=self.file
|
|
157
|
+
)
|
|
158
|
+
if self.stop_reporting_after_first_inf_nan:
|
|
159
|
+
self.enabled = False
|
|
156
160
|
return out
|
|
157
161
|
|
|
158
162
|
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
returnn/PKG-INFO,sha256=
|
|
1
|
+
returnn/PKG-INFO,sha256=2Ws--V5aicc3WJ-I6OrqPbbrvVNTH3Cnno6L7yeIyKY,5215
|
|
2
2
|
returnn/__init__.py,sha256=biBtRsM0WZ406vShaeH-9WFoqJ8XwTbn6g0EeFJ7l8E,1012
|
|
3
3
|
returnn/__main__.py,sha256=qBFbuB1yN3adgVM5pXt2-Yq9vorjRNchNPL8kDKx44M,31752
|
|
4
4
|
returnn/__old_mod_loader__.py,sha256=nvsNY-xELdS_IPNkv66Q9Rmvg4dbGW0-EBRDcCmctos,7654
|
|
5
5
|
returnn/__setup__.py,sha256=22kQn2fh11iPM0hLb2Fy5sLmoU1JGvmDxXRYuRgQkwU,4659
|
|
6
|
-
returnn/_setup_info_generated.py,sha256=
|
|
6
|
+
returnn/_setup_info_generated.py,sha256=3ur2a8rg2h6MJe2vAo7Tq4axfkV1GYJMcaQdnsmshb8,77
|
|
7
7
|
returnn/config.py,sha256=3tmKhB6FnQZaNdtcYsiB61JnEY--iZ2qmJ4yq0b6tE0,29140
|
|
8
8
|
returnn/forward_iface.py,sha256=A_OJiaXsX4MlXQRzST86ylyxSUZbC402PQL1REcqHjM,911
|
|
9
9
|
returnn/learning_rate_control.py,sha256=ZvWryAn_tv9DhV8sh1LV3eE34Yltl3On3mYZAG4hR9s,34684
|
|
@@ -154,7 +154,7 @@ returnn/sprint/extern_interface.py,sha256=l-v1X-Yg0UpTFe7Y3c4FwWOqpSNuv9Oy5EzqlK
|
|
|
154
154
|
returnn/sprint/interface.py,sha256=_IGNQlOFcJcwsSeVkKcM-y8g2NDJv07jFhii47KfWtg,36490
|
|
155
155
|
returnn/tensor/README.md,sha256=X6BqcRLrPLPnwF9yR69uqIFrMnNluj9pBkOPHwNgzuo,501
|
|
156
156
|
returnn/tensor/__init__.py,sha256=on6j5PEOQpck50UcsR4nJzJSDmoVy34z1Oq4efv6Ax0,154
|
|
157
|
-
returnn/tensor/_dim_extra.py,sha256=
|
|
157
|
+
returnn/tensor/_dim_extra.py,sha256=kL_nnGNaRpKIQLlvCo6TJ35WynS_jIssNZusFmtOAE0,122551
|
|
158
158
|
returnn/tensor/_tensor_extra.py,sha256=v8oacDyrNMlDTRF0XR0LcU04snr5I1D9_yidw1ZWKk4,164859
|
|
159
159
|
returnn/tensor/_tensor_mixin_base.py,sha256=H5z86I0NejxrSgMH1c5oXQzBqS6L9HpvP4y7oegBaSc,643
|
|
160
160
|
returnn/tensor/_tensor_op_overloads.py,sha256=kVVcnYtcZdW7Vjj78V1Im_yVX2M2r6dUTgeiAQZ37X0,5449
|
|
@@ -226,7 +226,7 @@ returnn/torch/optim/lion.py,sha256=jV_qfwyyO5HAgqW94caap-ALkVjU688RpRgkZyLNZ5Y,5
|
|
|
226
226
|
returnn/torch/util/README.md,sha256=AW-6ueWhgcwDcm57md6sm227QXNkvLnlRLwaH7NlS-w,193
|
|
227
227
|
returnn/torch/util/__init__.py,sha256=AOXYUjzPm0XrzFJCPAXo9Jj_FvqD1XH3FfKtho80Vl8,26
|
|
228
228
|
returnn/torch/util/array_.py,sha256=ell3VZvn01SLtF9Pw2fvPzFNO-XDQ7tSB9VCrVSKmSA,2556
|
|
229
|
-
returnn/torch/util/debug_inf_nan.py,sha256=
|
|
229
|
+
returnn/torch/util/debug_inf_nan.py,sha256=pXAHwgyn1aimLjD-XUblY2syBRCK0J20ioWgpvWfHvg,6400
|
|
230
230
|
returnn/torch/util/diagnose_gpu.py,sha256=PYMmSk7iQ-jC3RXKKNXlYx1Q744C0LXqz0SB6ympwQg,5844
|
|
231
231
|
returnn/torch/util/exception_helper.py,sha256=4e7YEf9D42aAUEkM3uSjnOxpNEYgtyPSpNV0-1L6PSU,4319
|
|
232
232
|
returnn/torch/util/gradient_checkpoint.py,sha256=iLy-FB65DC8O6LxzmMvFjnSdpIVpko87ppIvRKAbtpQ,27995
|
|
@@ -253,8 +253,8 @@ returnn/util/sig_proc.py,sha256=Tjz0VOAVyqu2qDCF5HZ1JjALjcFsHcNkcd96WgZeKfE,7265
|
|
|
253
253
|
returnn/util/task_system.py,sha256=y4sMVXQ25Qd2z0rx03uOlXlkE-jbCYC1Sjfn-XlraVU,26003
|
|
254
254
|
returnn/util/train_proc_manager.py,sha256=Pjht28k6uz6BNQ47uW6Gf880iyq5q4wx7P_K2tmoAM8,3266
|
|
255
255
|
returnn/util/watch_memory.py,sha256=BR5P2kvBN6UI81cE0_1WAA6Hd1SByLbBaiDxvLhPOew,4213
|
|
256
|
-
returnn-1.20250226.
|
|
257
|
-
returnn-1.20250226.
|
|
258
|
-
returnn-1.20250226.
|
|
259
|
-
returnn-1.20250226.
|
|
260
|
-
returnn-1.20250226.
|
|
256
|
+
returnn-1.20250226.115259.dist-info/LICENSE,sha256=ywBD_U2aD4vpuoIgNAsjIGBYydl0tVKll3De0Z8s77c,11041
|
|
257
|
+
returnn-1.20250226.115259.dist-info/METADATA,sha256=2Ws--V5aicc3WJ-I6OrqPbbrvVNTH3Cnno6L7yeIyKY,5215
|
|
258
|
+
returnn-1.20250226.115259.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
|
|
259
|
+
returnn-1.20250226.115259.dist-info/top_level.txt,sha256=Lsn4WZc5Pbfk0-xDQOgnFCxOoqxL4CyeM3N1TFbJncw,8
|
|
260
|
+
returnn-1.20250226.115259.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|