returnn 1.20231018.70522__tar.gz → 1.20231018.94805__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (432) hide show
  1. {returnn-1.20231018.70522/returnn.egg-info → returnn-1.20231018.94805}/PKG-INFO +1 -1
  2. returnn-1.20231018.94805/_setup_info_generated.py +2 -0
  3. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/_backend.py +7 -4
  4. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/_native/module.cpp +1 -1
  5. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/_native/module.hpp +7 -7
  6. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/_native/tensor_ops.cpp +78 -101
  7. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/_native/tensor_ops.hpp +3 -2
  8. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/_utils.py +4 -8
  9. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/loop.py +0 -4
  10. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/tensor_array.py +0 -4
  11. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/engine.py +2 -2
  12. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/frontend/_backend.py +12 -5
  13. {returnn-1.20231018.70522 → returnn-1.20231018.94805/returnn.egg-info}/PKG-INFO +1 -1
  14. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_torch_internal_frontend.py +0 -18
  15. returnn-1.20231018.70522/_setup_info_generated.py +0 -2
  16. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/.editorconfig +0 -0
  17. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/.gitignore +0 -0
  18. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/.gitmodules +0 -0
  19. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/.kateconfig +0 -0
  20. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/CHANGELOG.md +0 -0
  21. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/CODEOWNERS +0 -0
  22. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/CONTRIBUTING.md +0 -0
  23. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/LICENSE +0 -0
  24. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/MANIFEST.in +0 -0
  25. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/README.rst +0 -0
  26. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/__init__.py +0 -0
  27. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/12AX.cluster_map +0 -0
  28. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/_setup_returnn_env.py +0 -0
  29. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-fwd.config +0 -0
  30. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-horovod-mpi.py +0 -0
  31. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-horovod-mpi.py.sh +0 -0
  32. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-horovod-mpi.sh +0 -0
  33. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-hyper-param-tuning.config +0 -0
  34. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-iter-dataset.py +0 -0
  35. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-list-devices.py +0 -0
  36. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-lua-torch-layer.config +0 -0
  37. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-pretrain.config +0 -0
  38. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-record-and-push-to-webserver.py +0 -0
  39. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-returnn-as-framework.py +0 -0
  40. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-rf-pt-benchmark.py +0 -0
  41. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-rf.config +0 -0
  42. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-rhn-enwik8.config +0 -0
  43. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-sprint-interface.py +0 -0
  44. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-att-copy.config +0 -0
  45. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-attention.config +0 -0
  46. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-chunking-blstm.12ax.config +0 -0
  47. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-contribrnn-lstm.12ax.config +0 -0
  48. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-enc-dec.config +0 -0
  49. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-hard-att-copy.config +0 -0
  50. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-lstm-benchmark.py +0 -0
  51. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-maxgradnorm-lstm.12ax.config +0 -0
  52. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-native-lstm-lowmem.12ax.config +0 -0
  53. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-native-lstm.12ax.config +0 -0
  54. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-native-lstm2.12ax.config +0 -0
  55. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-native-lstm2.12ax.tuned.config +0 -0
  56. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-neural-transducer.12ax.config +0 -0
  57. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-rec-explicit-lstm.config +0 -0
  58. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-rec-explicit-rnn.config +0 -0
  59. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-rec-self-att.config +0 -0
  60. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-search-compiled-graph.py +0 -0
  61. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-tf-vanilla-lstm.12ax.config +0 -0
  62. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-timit-lstm-ctc.config +0 -0
  63. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-torch.config +0 -0
  64. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo-upd-mult-model.lstm.12ax.config +0 -0
  65. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/demo.sh +0 -0
  66. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/IAM_lines/a01-000u-00.png +0 -0
  67. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/IAM_lines/a01-007-04.png +0 -0
  68. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/IAM_lines/a01-007-06.png +0 -0
  69. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/README.txt +0 -0
  70. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/chars.txt +0 -0
  71. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/config_demo +0 -0
  72. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/config_fwd +0 -0
  73. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/config_real +0 -0
  74. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/create_IAM_dataset.py +0 -0
  75. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/decode.py +0 -0
  76. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/features/raw/demo.h5 +0 -0
  77. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/go.sh +0 -0
  78. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/lines.txt +0 -0
  79. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/split/eval.txt +0 -0
  80. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/split/train.txt +0 -0
  81. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/IAM/split/valid.txt +0 -0
  82. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/README.md +0 -0
  83. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/artificial/create_test_h5.py +0 -0
  84. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/artificial/forwardconfig +0 -0
  85. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/artificial/go.sh +0 -0
  86. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/artificial/trainconfig +0 -0
  87. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/artificial_rgb/create_test_h5.py +0 -0
  88. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/artificial_rgb/forwardconfig +0 -0
  89. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/artificial_rgb/go.sh +0 -0
  90. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/demos/mdlstm/artificial_rgb/trainconfig +0 -0
  91. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/pyproject.toml +0 -0
  92. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/requirements.txt +0 -0
  93. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/__init__.py +0 -0
  94. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/__main__.py +0 -0
  95. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/__old_mod_loader__.py +0 -0
  96. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/__setup__.py +0 -0
  97. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/config.py +0 -0
  98. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/__init__.py +0 -0
  99. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/audio.py +0 -0
  100. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/basic.py +0 -0
  101. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/bundle_file.py +0 -0
  102. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/cached.py +0 -0
  103. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/cached2.py +0 -0
  104. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/generating.py +0 -0
  105. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/hdf.py +0 -0
  106. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/lm.py +0 -0
  107. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/map.py +0 -0
  108. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/meta.py +0 -0
  109. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/multi_proc.py +0 -0
  110. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/normalization_data.py +0 -0
  111. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/numpy_dump.py +0 -0
  112. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/raw_wav.py +0 -0
  113. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/sprint.py +0 -0
  114. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/stereo.py +0 -0
  115. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/util/__init__.py +0 -0
  116. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/util/feature_extraction.py +0 -0
  117. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/util/strings.py +0 -0
  118. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/datasets/util/vocabulary.py +0 -0
  119. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/engine/__init__.py +0 -0
  120. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/engine/base.py +0 -0
  121. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/engine/batch.py +0 -0
  122. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/__init__.py +0 -0
  123. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/__main__.py +0 -0
  124. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/.git +0 -0
  125. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/.gitignore +0 -0
  126. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/LICENSE +0 -0
  127. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/README.md +0 -0
  128. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/aligner.gif +0 -0
  129. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/check.png +0 -0
  130. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/core.cu +0 -0
  131. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/core.h +0 -0
  132. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/core_cpu.cpp +0 -0
  133. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/pytorch_binding/LICENSE +0 -0
  134. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/pytorch_binding/MANIFEST.in +0 -0
  135. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/pytorch_binding/README.md +0 -0
  136. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/pytorch_binding/binding.cpp +0 -0
  137. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/pytorch_binding/core.cu +0 -0
  138. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/pytorch_binding/core.h +0 -0
  139. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/pytorch_binding/requirements.txt +0 -0
  140. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/pytorch_binding/setup.py +0 -0
  141. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/pytorch_binding/warp_rna/__init__.py +0 -0
  142. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/pytorch_binding/warp_rna/test.py +0 -0
  143. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/ref_rna.py +0 -0
  144. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/setup.py +0 -0
  145. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/src/warp_rna_op.cc +0 -0
  146. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/src/warp_rna_op_kernel_tmpl.h +0 -0
  147. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/warp_rna/__init__.py +0 -0
  148. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/WarpRna/warp-rna/test.cpp +0 -0
  149. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/__init__.py +0 -0
  150. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/graph_editor/README.md +0 -0
  151. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/graph_editor/__init__.py +0 -0
  152. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/graph_editor/edit.py +0 -0
  153. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/graph_editor/reroute.py +0 -0
  154. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/graph_editor/select.py +0 -0
  155. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/graph_editor/subgraph.py +0 -0
  156. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/graph_editor/transform.py +0 -0
  157. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/extern/graph_editor/util.py +0 -0
  158. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/forward_iface.py +0 -0
  159. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/__init__.py +0 -0
  160. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/_native/__init__.py +0 -0
  161. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/_native/backend.cpp +0 -0
  162. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/_native/backend.hpp +0 -0
  163. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/_native/py_utils.hpp +0 -0
  164. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/_numpy_backend.py +0 -0
  165. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/_random_journal.py +0 -0
  166. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/array_.py +0 -0
  167. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/attention.py +0 -0
  168. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/audio/__init__.py +0 -0
  169. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/audio/mel.py +0 -0
  170. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/audio/specaugment.py +0 -0
  171. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/backend.py +0 -0
  172. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/cond.py +0 -0
  173. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/const.py +0 -0
  174. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/container.py +0 -0
  175. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/control_flow_ctx.py +0 -0
  176. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/conv.py +0 -0
  177. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/device.py +0 -0
  178. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/dims.py +0 -0
  179. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/dropout.py +0 -0
  180. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/dtype.py +0 -0
  181. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/encoder/__init__.py +0 -0
  182. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/encoder/base.py +0 -0
  183. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/encoder/conformer.py +0 -0
  184. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/gradient.py +0 -0
  185. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/graph.py +0 -0
  186. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/init.py +0 -0
  187. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/label_smoothing.py +0 -0
  188. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/linear.py +0 -0
  189. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/loss.py +0 -0
  190. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/math_.py +0 -0
  191. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/matmul.py +0 -0
  192. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/module.py +0 -0
  193. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/normalization.py +0 -0
  194. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/parameter.py +0 -0
  195. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/rand.py +0 -0
  196. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/rec.py +0 -0
  197. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/reduce.py +0 -0
  198. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/run_ctx.py +0 -0
  199. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/signal.py +0 -0
  200. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/state.py +0 -0
  201. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/frontend/types.py +0 -0
  202. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/import_/__init__.py +0 -0
  203. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/import_/common.py +0 -0
  204. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/import_/git.py +0 -0
  205. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/import_/import_.py +0 -0
  206. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/learning_rate_control.py +0 -0
  207. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/log.py +0 -0
  208. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/native_op.cpp +0 -0
  209. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/native_op.py +0 -0
  210. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/pretrain.py +0 -0
  211. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/sprint/__init__.py +0 -0
  212. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/sprint/cache.py +0 -0
  213. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/sprint/control.py +0 -0
  214. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/sprint/error_signals.py +0 -0
  215. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/sprint/extern_interface.py +0 -0
  216. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/sprint/interface.py +0 -0
  217. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tensor/README.md +0 -0
  218. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tensor/__init__.py +0 -0
  219. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tensor/_dim_extra.py +0 -0
  220. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tensor/_tensor_extra.py +0 -0
  221. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tensor/_tensor_mixin_base.py +0 -0
  222. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tensor/_tensor_op_overloads.py +0 -0
  223. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tensor/control_flow_ctx.py +0 -0
  224. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tensor/dim.py +0 -0
  225. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tensor/marked_dim.py +0 -0
  226. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tensor/tensor.py +0 -0
  227. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tensor/tensor_dict.py +0 -0
  228. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tensor/utils.py +0 -0
  229. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/__init__.py +0 -0
  230. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/compat.py +0 -0
  231. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/data_pipeline.py +0 -0
  232. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/distributed.py +0 -0
  233. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/engine.py +0 -0
  234. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_layers/README.md +0 -0
  235. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_layers/__init__.py +0 -0
  236. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_layers/_backend.py +0 -0
  237. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_layers/_utils.py +0 -0
  238. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_layers/cond.py +0 -0
  239. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_layers/config_entry_points.py +0 -0
  240. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_layers/debug_eager_mode.py +0 -0
  241. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_layers/dims.py +0 -0
  242. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_layers/layer.py +0 -0
  243. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_layers/loop.py +0 -0
  244. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_layers/make_layer.py +0 -0
  245. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_layers/masked_computation.py +0 -0
  246. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_layers/parameter_assign.py +0 -0
  247. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_layers/prev_tensor_ref.py +0 -0
  248. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_low_level/__init__.py +0 -0
  249. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/frontend_low_level/_backend.py +0 -0
  250. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/horovod.py +0 -0
  251. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/hyper_param_tuning.py +0 -0
  252. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/layers/__init__.py +0 -0
  253. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/layers/base.py +0 -0
  254. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/layers/basic.py +0 -0
  255. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/layers/rec.py +0 -0
  256. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/layers/segmental_model.py +0 -0
  257. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/layers/signal_processing.py +0 -0
  258. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/layers/variable.py +0 -0
  259. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/native_op.py +0 -0
  260. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/network.py +0 -0
  261. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/sprint.py +0 -0
  262. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/updater.py +0 -0
  263. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/util/__init__.py +0 -0
  264. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/util/basic.py +0 -0
  265. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/util/data.py +0 -0
  266. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/util/gradient_checkpoint.py +0 -0
  267. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/util/ken_lm.py +0 -0
  268. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/tf/util/open_fst.py +0 -0
  269. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/README.md +0 -0
  270. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/__init__.py +0 -0
  271. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/data/__init__.py +0 -0
  272. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/data/extern_data.py +0 -0
  273. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/data/pipeline.py +0 -0
  274. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/data/returnn_dataset_wrapper.py +0 -0
  275. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/data/tensor_utils.py +0 -0
  276. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/distributed.py +0 -0
  277. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/frontend/__init__.py +0 -0
  278. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/frontend/_rand.py +0 -0
  279. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/frontend/bridge.py +0 -0
  280. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/frontend/raw_ops.py +0 -0
  281. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/functional/README.md +0 -0
  282. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/functional/__init__.py +0 -0
  283. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/functional/scaled_gradient.py +0 -0
  284. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/torch/updater.py +0 -0
  285. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/__init__.py +0 -0
  286. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/basic.py +0 -0
  287. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/better_exchook.py +0 -0
  288. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/bpe.py +0 -0
  289. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/debug.py +0 -0
  290. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/debug_helpers.py +0 -0
  291. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/fsa.py +0 -0
  292. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/literal_py_to_pickle.py +0 -0
  293. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/math.py +0 -0
  294. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/native_code_compiler.py +0 -0
  295. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/pprint.py +0 -0
  296. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/py-to-pickle.cpp +0 -0
  297. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/py_compat.py +0 -0
  298. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/py_ext_mod_compiler.py +0 -0
  299. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/result_with_reason.py +0 -0
  300. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/sig_proc.py +0 -0
  301. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn/util/task_system.py +0 -0
  302. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn.egg-info/SOURCES.txt +0 -0
  303. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn.egg-info/dependency_links.txt +0 -0
  304. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/returnn.egg-info/top_level.txt +0 -0
  305. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/rnn.py +0 -0
  306. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/setup.cfg +0 -0
  307. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/setup.py +0 -0
  308. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/DummySprintExec.py +0 -0
  309. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/PyCharm-inspection-profile.xml +0 -0
  310. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/PyCharm.idea/.gitignore +0 -0
  311. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/PyCharm.idea/.name +0 -0
  312. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/PyCharm.idea/codeStyleSettings.xml +0 -0
  313. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/PyCharm.idea/codeStyles/Project.xml +0 -0
  314. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/PyCharm.idea/codeStyles/codeStyleConfig.xml +0 -0
  315. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/PyCharm.idea/inspectionProfiles/Project_Default.xml +0 -0
  316. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/PyCharm.idea/inspectionProfiles/profiles_settings.xml +0 -0
  317. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/PyCharm.idea/misc.xml +0 -0
  318. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/PyCharm.idea/modules.xml +0 -0
  319. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/PyCharm.idea/returnn.iml +0 -0
  320. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/PyCharm.idea/scopes/scope_settings.xml +0 -0
  321. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/_set_num_threads1.py +0 -0
  322. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/_setup_returnn_env.py +0 -0
  323. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/_setup_test_env.py +0 -0
  324. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/bpe-unicode-demo.codes +0 -0
  325. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/bpe-unicode-demo.vocab +0 -0
  326. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/lexicon_opt.fst +0 -0
  327. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/lexicon_opt.isyms +0 -0
  328. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/lexicon_opt.jpg +0 -0
  329. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/lexicon_opt.osyms +0 -0
  330. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/lint_common.py +0 -0
  331. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/pycharm-inspect.py +0 -0
  332. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/pylint.py +0 -0
  333. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/returnn-as-framework.py +0 -0
  334. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/rf_utils.py +0 -0
  335. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/spelling.dic +0 -0
  336. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_Config.py +0 -0
  337. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_Dataset.py +0 -0
  338. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_Fsa.py +0 -0
  339. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_GeneratingDataset.py +0 -0
  340. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_HDFDataset.py +0 -0
  341. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_LearningRateControl.py +0 -0
  342. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_Log.py +0 -0
  343. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_MultiProcDataset.py +0 -0
  344. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_Pretrain.py +0 -0
  345. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_ResNet.py +0 -0
  346. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_SprintDataset.py +0 -0
  347. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_SprintInterface.py +0 -0
  348. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_TFEngine.py +0 -0
  349. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_TFNativeOp.py +0 -0
  350. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_TFNetworkLayer.py +0 -0
  351. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_TFNetworkRecLayer.py +0 -0
  352. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_TFNetworkSigProcLayer.py +0 -0
  353. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_TFUpdater.py +0 -0
  354. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_TFUtil.py +0 -0
  355. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_TF_determinism.py +0 -0
  356. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_TaskSystem.py +0 -0
  357. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_TaskSystem_SharedMem.py +0 -0
  358. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_TranslationDataset.py +0 -0
  359. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_Util.py +0 -0
  360. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_demos.py +0 -0
  361. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_fork_exec.py +0 -0
  362. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_hdf_dump.py +0 -0
  363. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_array.py +0 -0
  364. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_attention.py +0 -0
  365. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_base.py +0 -0
  366. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_cond.py +0 -0
  367. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_const.py +0 -0
  368. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_container.py +0 -0
  369. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_conv.py +0 -0
  370. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_encoder_conformer.py +0 -0
  371. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_gradient.py +0 -0
  372. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_label_smoothing.py +0 -0
  373. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_loop.py +0 -0
  374. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_math.py +0 -0
  375. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_normalization.py +0 -0
  376. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_rec.py +0 -0
  377. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_reduce.py +0 -0
  378. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_rf_signal.py +0 -0
  379. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_tensor.py +0 -0
  380. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_tools.py +0 -0
  381. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_torch_dataset.py +0 -0
  382. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_torch_engine.py +0 -0
  383. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tests/test_torch_frontend.py +0 -0
  384. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/_setup_returnn_env.py +0 -0
  385. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/analyze-dataset-batches.py +0 -0
  386. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/bliss-collect-seq-lens.py +0 -0
  387. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/bliss-dump-text.py +0 -0
  388. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/bliss-get-segment-names.py +0 -0
  389. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/bliss-to-ogg-zip.py +0 -0
  390. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/bpe-create-lexicon.py +0 -0
  391. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/calculate-word-error-rate.py +0 -0
  392. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/cleanup-old-models.py +0 -0
  393. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/collect-orth-symbols.py +0 -0
  394. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/collect-words.py +0 -0
  395. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/compile_native_op.py +0 -0
  396. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/compile_tf_graph.py +0 -0
  397. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/debug-dump-search-scores.py +0 -0
  398. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/debug-plot-search-scores.py +0 -0
  399. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/dump-dataset-raw-strings.py +0 -0
  400. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/dump-dataset.py +0 -0
  401. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/dump-forward-stats.py +0 -0
  402. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/dump-forward.py +0 -0
  403. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/dump-network-json.py +0 -0
  404. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/dump-pickle.py +0 -0
  405. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/extract_state_tying_from_dataset.py +0 -0
  406. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/get-attention-weights.py +0 -0
  407. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/get-best-model-epoch.py +0 -0
  408. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/hdf_dump.py +0 -0
  409. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/hdf_dump_translation_dataset.py +0 -0
  410. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/import-blocks-mt-model.py +0 -0
  411. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/import-t2t-mt-model.py +0 -0
  412. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/.gitignore +0 -0
  413. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/Makefile +0 -0
  414. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/README.md +0 -0
  415. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/example/README.md +0 -0
  416. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/example/libs_list +0 -0
  417. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/example/network.040/i600_m600_m600.sgd_b16_lr0_cl2.newbobabs.config +0 -0
  418. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/example/network.040/i600_m600_m600.sgd_b16_lr0_cl2.newbobabs.keep_over_epoch.lstm2.config +0 -0
  419. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/example/rescore_lattice.sh +0 -0
  420. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/example/state_vars_list +0 -0
  421. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/example/tensor_names_list +0 -0
  422. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/file.h +0 -0
  423. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/htklatticerescorer.cc +0 -0
  424. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/htklatticerescorer.h +0 -0
  425. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/main.cc +0 -0
  426. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/rescorer.h +0 -0
  427. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/vocabulary.cc +0 -0
  428. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/lattice_rescorer/vocabulary.h +0 -0
  429. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/tf_avg_checkpoints.py +0 -0
  430. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/tf_inspect_checkpoint.py +0 -0
  431. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/tf_inspect_summary_log.py +0 -0
  432. {returnn-1.20231018.70522 → returnn-1.20231018.94805}/tools/torch_export_to_onnx.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: returnn
3
- Version: 1.20231018.70522
3
+ Version: 1.20231018.94805
4
4
  Summary: The RWTH extensible training framework for universal recurrent neural networks
5
5
  Home-page: https://github.com/rwth-i6/returnn/
6
6
  Author: Albert Zeyer
@@ -0,0 +1,2 @@
1
+ version = '1.20231018.094805'
2
+ long_version = '1.20231018.094805+git.74e0e94'
@@ -791,11 +791,13 @@ class Backend(Generic[T]):
791
791
  a,
792
792
  b,
793
793
  name=kind,
794
- res_dtype="bool",
794
+ copy_sparse_dim=False,
795
795
  allow_broadcast_all_sources=allow_broadcast_all_sources,
796
796
  dim_order=dim_order,
797
797
  )
798
- out.raw_tensor = cls.compare_raw(a_raw, kind, b_raw)
798
+ out_raw = cls.compare_raw(a_raw, kind, b_raw)
799
+ out.dtype = cls.get_dtype_name_raw(out_raw)
800
+ out.raw_tensor = out_raw
799
801
  return out
800
802
 
801
803
  @classmethod
@@ -816,11 +818,12 @@ class Backend(Generic[T]):
816
818
  a,
817
819
  b,
818
820
  name=kind,
819
- res_dtype=None,
820
821
  allow_broadcast_all_sources=allow_broadcast_all_sources,
821
822
  dim_order=dim_order,
822
823
  )
823
- out.raw_tensor = cls.combine_raw(a_raw, kind, b_raw)
824
+ out_raw = cls.combine_raw(a_raw, kind, b_raw)
825
+ out.dtype = cls.get_dtype_name_raw(out_raw)
826
+ out.raw_tensor = out_raw
824
827
  return out
825
828
 
826
829
  @staticmethod
@@ -377,7 +377,7 @@ bool PyModuleState::_torchTensorDTypesInit() {
377
377
  PyObjectScopedRef mod = PyImport_ImportModule("torch");
378
378
  if(!mod) return false;
379
379
 
380
- int i = 0;
380
+ unsigned int i = 0;
381
381
  #define AddDType(dtype_) \
382
382
  assert(i < sizeof(_torchTensorDTypes)/sizeof(_torchTensorDTypes[0])); \
383
383
  _torchTensorDTypes[i].dtype = PyObject_GetAttrString(mod, #dtype_); \
@@ -112,17 +112,17 @@ public:
112
112
 
113
113
  int pyTraverse(visitproc visit, void *arg) {
114
114
  Py_VISIT(_notSpecified);
115
- for(int i = 0; i < sizeof(_rawTensorTypes)/sizeof(_rawTensorTypes[0]); ++i)
115
+ for(unsigned int i = 0; i < sizeof(_rawTensorTypes)/sizeof(_rawTensorTypes[0]); ++i)
116
116
  Py_VISIT(_rawTensorTypes[i]);
117
117
  Py_VISIT(_tensorType);
118
118
  Py_VISIT(_dimType);
119
119
  Py_VISIT(_globalBackend);
120
120
  Py_VISIT(_backendTensorTypeDispatchTable);
121
- for(int i = 0; i < NumBackendsWithCachedOps * NumTOps; ++i)
121
+ for(unsigned int i = 0; i < NumBackendsWithCachedOps * NumTOps; ++i)
122
122
  Py_VISIT(_cachedOps[i]);
123
123
  Py_VISIT(_torchTensorType);
124
124
  Py_VISIT(_torchBackend);
125
- for(int i = 0; i < sizeof(_torchTensorDTypes)/sizeof(_torchTensorDTypes[0]); ++i) {
125
+ for(unsigned int i = 0; i < sizeof(_torchTensorDTypes)/sizeof(_torchTensorDTypes[0]); ++i) {
126
126
  Py_VISIT(_torchTensorDTypes[i].dtype);
127
127
  Py_VISIT(_torchTensorDTypes[i].name);
128
128
  }
@@ -133,17 +133,17 @@ public:
133
133
  int pyClear() {
134
134
  Py_CLEAR(_notSpecified);
135
135
  _rawTensorTypesLen = 0;
136
- for(int i = 0; i < sizeof(_rawTensorTypes)/sizeof(_rawTensorTypes[0]); ++i)
136
+ for(unsigned int i = 0; i < sizeof(_rawTensorTypes)/sizeof(_rawTensorTypes[0]); ++i)
137
137
  Py_CLEAR(_rawTensorTypes[i]);
138
138
  Py_CLEAR(_tensorType);
139
139
  Py_CLEAR(_dimType);
140
140
  Py_CLEAR(_globalBackend);
141
141
  Py_CLEAR(_backendTensorTypeDispatchTable);
142
- for(int i = 0; i < NumBackendsWithCachedOps * NumTOps; ++i)
142
+ for(unsigned int i = 0; i < NumBackendsWithCachedOps * NumTOps; ++i)
143
143
  Py_CLEAR(_cachedOps[i]);
144
144
  Py_CLEAR(_torchTensorType);
145
145
  Py_CLEAR(_torchBackend);
146
- for(int i = 0; i < sizeof(_torchTensorDTypes)/sizeof(_torchTensorDTypes[0]); ++i) {
146
+ for(unsigned int i = 0; i < sizeof(_torchTensorDTypes)/sizeof(_torchTensorDTypes[0]); ++i) {
147
147
  Py_CLEAR(_torchTensorDTypes[i].dtype);
148
148
  Py_CLEAR(_torchTensorDTypes[i].name);
149
149
  }
@@ -176,7 +176,7 @@ public:
176
176
  return NULL;
177
177
  }
178
178
  }
179
- for(int i = 0; i < sizeof(_torchTensorDTypes)/sizeof(_torchTensorDTypes[0]); ++i) {
179
+ for(unsigned int i = 0; i < sizeof(_torchTensorDTypes)/sizeof(_torchTensorDTypes[0]); ++i) {
180
180
  if(_torchTensorDTypes[i].dtype == dtype)
181
181
  return _torchTensorDTypes[i].name;
182
182
  }
@@ -18,8 +18,8 @@ PyObject* tensorCopy(
18
18
  {
19
19
  PyObjectScopedRef rawTensor = PyObject_GetAttrString(tensor, "_raw_tensor");
20
20
  if(rawTensor == Py_None)
21
- return tensorCopyTemplate(modState, tensor, name, NULL);
22
- PyObjectScopedRef res = tensorCopyTemplate(modState, tensor, name, NULL);
21
+ return tensorCopyTemplate(modState, tensor, name);
22
+ PyObjectScopedRef res = tensorCopyTemplate(modState, tensor, name);
23
23
  if(!res) return NULL;
24
24
  if(PyObject_SetAttrString(res, "_raw_tensor", rawTensor) < 0) return NULL;
25
25
  return res.release();
@@ -64,7 +64,7 @@ PyObject* tensorCopyTemplate(
64
64
  PyModuleState* modState,
65
65
  PyObject* tensor,
66
66
  const char* name,
67
- const char* dtype)
67
+ PyObject* dtype)
68
68
  {
69
69
  PyObjectScopedRef version = PyObject_GetAttrString(tensor, "version");
70
70
  if(!version) return NULL;
@@ -99,8 +99,10 @@ PyObject* tensorCopyTemplate(
99
99
  if(!name_) return NULL;
100
100
  if(PyDict_SetItemString(kwargs, "name", name_) < 0) return NULL;
101
101
  }
102
- {
103
- PyObjectScopedRef dtype_ = dtype ? PyUnicode_FromString(dtype) : PyObject_GetAttrString(tensor, "dtype");
102
+ if(dtype && dtype != Py_None) {
103
+ if(PyDict_SetItemString(kwargs, "dtype", dtype) < 0) return NULL;
104
+ } else {
105
+ PyObjectScopedRef dtype_ = PyObject_GetAttrString(tensor, "dtype");
104
106
  if(!dtype_) return NULL;
105
107
  if(PyDict_SetItemString(kwargs, "dtype", dtype_) < 0) return NULL;
106
108
  }
@@ -147,17 +149,22 @@ PyObject* tensorCopyTemplateSimple(
147
149
  PyModuleState* modState,
148
150
  PyObject* tensor,
149
151
  const char* name_,
150
- const char* dtype_)
152
+ PyObject* dtype,
153
+ bool copySparseDim)
151
154
  {
152
155
  PyObjectScopedRef name = name_ ? PyUnicode_FromString(name_) : PyObject_GetAttrString(tensor, "name");
153
156
  if(!name) return NULL;
154
- PyObjectScopedRef dtype = dtype_ ? PyUnicode_FromString(dtype_) : PyObject_GetAttrString(tensor, "dtype");
155
- if(!dtype) return NULL;
157
+ PyObjectScopedRef dtype_;
158
+ if(!dtype) {
159
+ dtype_ = PyObject_GetAttrString(tensor, "dtype");
160
+ if(!dtype_) return NULL;
161
+ dtype = dtype_.get();
162
+ }
156
163
  PyObjectScopedRef dims = PyObject_GetAttrString(tensor, "_dims");
157
164
  if(!dims) return NULL;
158
165
 
159
166
  PyObjectScopedRef res = PyObject_CallFunctionObjArgs(
160
- modState->tensorType(), name.get(), dims.get(), dtype.get(), NULL);
167
+ modState->tensorType(), name.get(), dims.get(), dtype, NULL);
161
168
  if(!res) return NULL;
162
169
 
163
170
  {
@@ -167,7 +174,7 @@ PyObject* tensorCopyTemplateSimple(
167
174
  if(PyObject_SetAttrString(res, "_feature_dim_axis", feature_dim_axis) < 0)
168
175
  return NULL;
169
176
  }
170
- if(!dtype_) {
177
+ if(copySparseDim) {
171
178
  PyObjectScopedRef sparse_dim = PyObject_GetAttrString(tensor, "sparse_dim");
172
179
  if(!sparse_dim) return NULL;
173
180
  if(sparse_dim != Py_None)
@@ -300,8 +307,8 @@ PyObject* pyTensorCopyTemplate(PyObject *self, PyObject *args, PyObject *kwargs)
300
307
  static const char *kwlist[] = { "tensor", "name", "dtype", NULL };
301
308
  PyObject* tensor;
302
309
  const char* name = NULL;
303
- const char* dtype = NULL;
304
- if(!PyArg_ParseTupleAndKeywords(args, kwargs, "O|z$z:tensor_copy_template",
310
+ PyObject* dtype = NULL;
311
+ if(!PyArg_ParseTupleAndKeywords(args, kwargs, "O|z$O:tensor_copy_template",
305
312
  (char**) kwlist, &tensor, &name, &dtype))
306
313
  return NULL;
307
314
 
@@ -389,9 +396,9 @@ static bool _isMatchingDimTagsAndRawShape(PyObject* dimTags, PyObject* rawShape,
389
396
  }
390
397
 
391
398
  static bool _checkTensorRawTensorAssignForBackendWithCachedOps(
392
- PyModuleState* modState, BackendWithCachedOps backendId, const char* funcName, PyObject* tensor, PyObject* rawTensor
399
+ PyModuleState* modState, BackendWithCachedOps backendId, const char* funcName, PyObject* tensor, PyObject* rawTensor, bool checkDtype = true
393
400
  ) {
394
- {
401
+ if(checkDtype) {
395
402
  PyObject* getDTypeOp = modState->cachedOp(TOp_GetDType, backendId);
396
403
  if(!getDTypeOp) return false;
397
404
  PyObjectScopedRef dtype = PyObject_GetAttrString(tensor, "dtype");
@@ -528,8 +535,8 @@ static bool _getPermutationSupersetToSubset(const char* funcName, ASeqT subset,
528
535
  ++count;
529
536
  }
530
537
  else if(candidates.size() > 1) {
531
- size_t maxMatchPriorityIdx;
532
- long maxMatchPriority;
538
+ size_t maxMatchPriorityIdx = 0;
539
+ long maxMatchPriority = -1;
533
540
  int countSameMatchPriority = 0;
534
541
  for(size_t j = 0; j < candidates.size(); ++j) {
535
542
  PyObject* dim_ = subset.getItem(candidates[j]);
@@ -667,7 +674,7 @@ template<bool bIsSubset>
667
674
  static PyObject* _compareOrCombine_subsetDims(
668
675
  PyModuleState* modState,
669
676
  const char* rawOpName, bool resultIsBool,
670
- PyObject* permuteOp, PyObject* reshapeOp, PyObject* getShapeOp, PyObject* rawOp,
677
+ PyObject* permuteOp, PyObject* reshapeOp, PyObject* getShapeOp, PyObject* getDtypeOp, PyObject* rawOp,
671
678
  PyObject* a, PyObject* b,
672
679
  PyObject* aRawTensor, PyObject* bRawTensor,
673
680
  PyTupleOrListStaticRef<true> aDims, PyTupleOrListStaticRef<true> bDims,
@@ -681,12 +688,14 @@ static PyObject* _compareOrCombine_subsetDims(
681
688
  rawTensorExt = _permuteAndExtend(rawOpName, permuteOp, reshapeOp, getShapeOp, a, aDims, aRawTensor, bDims, outPermutation);
682
689
 
683
690
  // Now create the result.
684
- PyObjectScopedRef res = tensorCopyTemplateSimple(modState, bIsSubset ? a : b, rawOpName, resultIsBool ? "bool" : NULL);
685
- if(!res) return NULL;
686
691
  PyObjectScopedRef resRawTensor = PyObject_CallFunctionObjArgs(
687
692
  rawOp, bIsSubset ? aRawTensor : rawTensorExt.get(), bIsSubset ? rawTensorExt.get() : bRawTensor, NULL);
688
693
  if(!resRawTensor) return NULL;
689
- if(PyObject_SetAttrString(res, "raw_tensor", resRawTensor) < 0) return NULL;
694
+ PyObjectScopedRef dtype = PyObject_CallFunctionObjArgs(getDtypeOp, resRawTensor.get(), NULL);
695
+ if(!dtype) return NULL;
696
+ PyObjectScopedRef res = tensorCopyTemplateSimple(modState, bIsSubset ? a : b, rawOpName, dtype, !resultIsBool);
697
+ if(!res) return NULL;
698
+ if(PyObject_SetAttrString(res, "_raw_tensor", resRawTensor) < 0) return NULL;
690
699
  return res.release();
691
700
  }
692
701
 
@@ -956,7 +965,7 @@ static PyObject* tensorCopyCompatibleToDims(const char* funcName, PyModuleState*
956
965
  }
957
966
 
958
967
  if(outRawTensor)
959
- if(PyObject_SetAttrString(outTensor, "raw_tensor", outRawTensor) < 0)
968
+ if(PyObject_SetAttrString(outTensor, "_raw_tensor", outRawTensor) < 0)
960
969
  return NULL;
961
970
 
962
971
  if(versionInt == 1) {
@@ -1010,12 +1019,12 @@ static PyObject* compareOrCombine(
1010
1019
  bool resultIsBool,
1011
1020
  PyModuleState* modState,
1012
1021
  const char* rawOpName,
1013
- PyObject* rawOp, PyObject* permuteOp, PyObject* reshapeOp, PyObject* getShapeOp, PyObject* convertToTensorLikeOp,
1022
+ PyObject* rawOp, PyObject* permuteOp, PyObject* reshapeOp, PyObject* getShapeOp, PyObject* getDtypeOp, PyObject* convertToTensorLikeOp,
1014
1023
  bool needConvertToTensor,
1015
1024
  bool allowBroadcastAllSources,
1016
1025
  PyObject* dimOrder
1017
1026
  ) {
1018
- if(!rawOp || !permuteOp || !reshapeOp || !getShapeOp || !convertToTensorLikeOp) return NULL;
1027
+ if(!rawOp || !permuteOp || !reshapeOp || !getShapeOp || !getDtypeOp || !convertToTensorLikeOp) return NULL;
1019
1028
 
1020
1029
  {
1021
1030
  int a_is_tensor = PyObject_IsInstance(a, modState->tensorType());
@@ -1032,8 +1041,6 @@ static PyObject* compareOrCombine(
1032
1041
  return NULL;
1033
1042
  }
1034
1043
  // assume the non-Tensor obj is is scalar
1035
- PyObjectScopedRef res = tensorCopyTemplateSimple(modState, a_is_tensor ? a : b, rawOpName, resultIsBool ? "bool" : NULL);
1036
- if(!res) return NULL;
1037
1044
  PyObjectScopedRef aRawTensor, bRawTensor;
1038
1045
  if(a_is_tensor) {
1039
1046
  assert(a_is_tensor && !b_is_tensor);
@@ -1056,7 +1063,11 @@ static PyObject* compareOrCombine(
1056
1063
  PyObjectScopedRef resRawTensor = PyObject_CallFunctionObjArgs(
1057
1064
  rawOp, aRawTensor ? aRawTensor.get() : a, bRawTensor.get() ? bRawTensor.get() : b, NULL);
1058
1065
  if(!resRawTensor) return NULL;
1059
- if(PyObject_SetAttrString(res, "raw_tensor", resRawTensor) < 0) return NULL;
1066
+ PyObjectScopedRef dtype = PyObject_CallFunctionObjArgs(getDtypeOp, resRawTensor.get(), NULL);
1067
+ if(!dtype) return NULL;
1068
+ PyObjectScopedRef res = tensorCopyTemplateSimple(modState, a_is_tensor ? a : b, rawOpName, dtype, !resultIsBool);
1069
+ if(!res) return NULL;
1070
+ if(PyObject_SetAttrString(res, "_raw_tensor", resRawTensor) < 0) return NULL;
1060
1071
  return res.release();
1061
1072
  }
1062
1073
  if(!a_is_tensor && !b_is_tensor) {
@@ -1066,31 +1077,6 @@ static PyObject* compareOrCombine(
1066
1077
  // both are Tensor
1067
1078
  }
1068
1079
 
1069
- if(!resultIsBool) {
1070
- PyObjectScopedRef aDtype = PyObject_GetAttrString(a, "dtype");
1071
- if(!aDtype) return NULL;
1072
- if(!PyUnicode_Check(aDtype)) {
1073
- PyErr_Format(
1074
- PyExc_TypeError,
1075
- "compareOrCombine: a.dtype did not return a string, from dtype %R", aDtype.get());
1076
- return NULL;
1077
- }
1078
- PyObjectScopedRef bDtype = PyObject_GetAttrString(b, "dtype");
1079
- if(!bDtype) return NULL;
1080
- if(!PyUnicode_Check(bDtype)) {
1081
- PyErr_Format(
1082
- PyExc_TypeError,
1083
- "compareOrCombine: b.dtype did not return a string, from dtype %R", bDtype.get());
1084
- return NULL;
1085
- }
1086
- if(PyUnicode_Compare(aDtype, bDtype) != 0) {
1087
- PyErr_Format(
1088
- PyExc_ValueError,
1089
- "compareOrCombine: a.dtype != b.dtype, from a.dtype %R and b.dtype %R", aDtype.get(), bDtype.get());
1090
- return NULL;
1091
- }
1092
- }
1093
-
1094
1080
  PyObjectScopedRef aDims = PyObject_GetAttrString(a, "_dims");
1095
1081
  if(!aDims) return NULL;
1096
1082
  if(!PyTuple_Check(aDims)) {
@@ -1125,31 +1111,37 @@ static PyObject* compareOrCombine(
1125
1111
 
1126
1112
  // first very fast path check, check exact identity of dims
1127
1113
  if(_isSameSeqFast(aDimsSeq, bDimsSeq) && (dimOrder == Py_None || _isSameSeqFast(aDimsSeq, dimOrderSeq))) {
1128
- PyObjectScopedRef res = tensorCopyTemplateSimple(modState, a, rawOpName, resultIsBool ? "bool" : NULL);
1129
- if(!res) return NULL;
1130
1114
  PyObjectScopedRef resRawTensor = PyObject_CallFunctionObjArgs(rawOp, aRawTensor.get(), bRawTensor.get(), NULL);
1131
1115
  if(!resRawTensor) return NULL;
1132
- if(PyObject_SetAttrString(res, "raw_tensor", resRawTensor) < 0) return NULL;
1116
+ PyObjectScopedRef dtype = PyObject_CallFunctionObjArgs(getDtypeOp, resRawTensor.get(), NULL);
1117
+ if(!dtype) return NULL;
1118
+ PyObjectScopedRef res = tensorCopyTemplateSimple(modState, a, rawOpName, dtype, !resultIsBool);
1119
+ if(!res) return NULL;
1120
+ if(PyObject_SetAttrString(res, "_raw_tensor", resRawTensor) < 0) return NULL;
1133
1121
  return res.release();
1134
1122
  }
1135
1123
 
1136
1124
  // check b is scalar
1137
1125
  if(bDimsSeq.size() == 0 && (dimOrder == Py_None || _isSameSeqFast(aDimsSeq, dimOrderSeq))) {
1138
- PyObjectScopedRef res = tensorCopyTemplateSimple(modState, a, rawOpName, resultIsBool ? "bool" : NULL);
1139
- if(!res) return NULL;
1140
1126
  PyObjectScopedRef resRawTensor = PyObject_CallFunctionObjArgs(rawOp, aRawTensor.get(), bRawTensor.get(), NULL);
1141
1127
  if(!resRawTensor) return NULL;
1142
- if(PyObject_SetAttrString(res, "raw_tensor", resRawTensor) < 0) return NULL;
1128
+ PyObjectScopedRef dtype = PyObject_CallFunctionObjArgs(getDtypeOp, resRawTensor.get(), NULL);
1129
+ if(!dtype) return NULL;
1130
+ PyObjectScopedRef res = tensorCopyTemplateSimple(modState, a, rawOpName, dtype, !resultIsBool);
1131
+ if(!res) return NULL;
1132
+ if(PyObject_SetAttrString(res, "_raw_tensor", resRawTensor) < 0) return NULL;
1143
1133
  return res.release();
1144
1134
  }
1145
1135
 
1146
1136
  // check a is scalar
1147
1137
  if(aDimsSeq.size() == 0 && (dimOrder == Py_None || _isSameSeqFast(bDimsSeq, dimOrderSeq))) {
1148
- PyObjectScopedRef res = tensorCopyTemplateSimple(modState, b, rawOpName, resultIsBool ? "bool" : NULL);
1149
- if(!res) return NULL;
1150
1138
  PyObjectScopedRef resRawTensor = PyObject_CallFunctionObjArgs(rawOp, aRawTensor.get(), bRawTensor.get(), NULL);
1151
1139
  if(!resRawTensor) return NULL;
1152
- if(PyObject_SetAttrString(res, "raw_tensor", resRawTensor) < 0) return NULL;
1140
+ PyObjectScopedRef dtype = PyObject_CallFunctionObjArgs(getDtypeOp, resRawTensor.get(), NULL);
1141
+ if(!dtype) return NULL;
1142
+ PyObjectScopedRef res = tensorCopyTemplateSimple(modState, b, rawOpName, dtype, !resultIsBool);
1143
+ if(!res) return NULL;
1144
+ if(PyObject_SetAttrString(res, "_raw_tensor", resRawTensor) < 0) return NULL;
1153
1145
  return res.release();
1154
1146
  }
1155
1147
 
@@ -1159,7 +1151,7 @@ static PyObject* compareOrCombine(
1159
1151
  if(_isSeqSubsetFast(bDimsSeq, aDimsSeq, outPermutation) && (dimOrder == Py_None || _isSameSeqFast(aDimsSeq, dimOrderSeq)))
1160
1152
  return _compareOrCombine_subsetDims<true>(
1161
1153
  modState, rawOpName, resultIsBool,
1162
- permuteOp, reshapeOp, getShapeOp, rawOp,
1154
+ permuteOp, reshapeOp, getShapeOp, getDtypeOp, rawOp,
1163
1155
  a, b,
1164
1156
  aRawTensor, bRawTensor,
1165
1157
  aDimsSeq, bDimsSeq,
@@ -1172,7 +1164,7 @@ static PyObject* compareOrCombine(
1172
1164
  if(_isSeqSubsetFast(aDimsSeq, bDimsSeq, outPermutation) && (dimOrder == Py_None || _isSameSeqFast(bDimsSeq, dimOrderSeq)))
1173
1165
  return _compareOrCombine_subsetDims<false>(
1174
1166
  modState, rawOpName, resultIsBool,
1175
- permuteOp, reshapeOp, getShapeOp, rawOp,
1167
+ permuteOp, reshapeOp, getShapeOp, getDtypeOp, rawOp,
1176
1168
  a, b,
1177
1169
  aRawTensor, bRawTensor,
1178
1170
  aDimsSeq, bDimsSeq,
@@ -1185,7 +1177,7 @@ static PyObject* compareOrCombine(
1185
1177
  if(_isSeqSubsetReorderFast(bDimsSeq, aDimsSeq, outPermutation) && (dimOrder == Py_None || _isSameSeqFast(aDimsSeq, dimOrderSeq)))
1186
1178
  return _compareOrCombine_subsetDims<true>(
1187
1179
  modState, rawOpName, resultIsBool,
1188
- permuteOp, reshapeOp, getShapeOp, rawOp,
1180
+ permuteOp, reshapeOp, getShapeOp, getDtypeOp, rawOp,
1189
1181
  a, b,
1190
1182
  aRawTensor, bRawTensor,
1191
1183
  aDimsSeq, bDimsSeq,
@@ -1198,7 +1190,7 @@ static PyObject* compareOrCombine(
1198
1190
  if(_isSeqSubsetReorderFast(aDimsSeq, bDimsSeq, outPermutation) && (dimOrder == Py_None || _isSameSeqFast(bDimsSeq, dimOrderSeq)))
1199
1191
  return _compareOrCombine_subsetDims<false>(
1200
1192
  modState, rawOpName, resultIsBool,
1201
- permuteOp, reshapeOp, getShapeOp, rawOp,
1193
+ permuteOp, reshapeOp, getShapeOp, getDtypeOp, rawOp,
1202
1194
  a, b,
1203
1195
  aRawTensor, bRawTensor,
1204
1196
  aDimsSeq, bDimsSeq,
@@ -1306,17 +1298,7 @@ static PyObject* compareOrCombine(
1306
1298
  PyList_SET_ITEM(allDims.get(), i, outDims[i]);
1307
1299
  }
1308
1300
 
1309
- PyObjectScopedRef res;
1310
- {
1311
- PyObjectScopedRef name = PyUnicode_FromString(rawOpName);
1312
- if(!name) return NULL;
1313
- PyObjectScopedRef dtype = resultIsBool ? PyUnicode_InternFromString("bool") : PyObject_GetAttrString(a, "dtype");
1314
- if(!dtype) return NULL;
1315
- res = PyObject_CallFunctionObjArgs(
1316
- modState->tensorType(), name.get(), allDims.get(), dtype.get(), NULL);
1317
- if(!res) return NULL;
1318
- }
1319
-
1301
+ PyObjectScopedRef resRawTensor;
1320
1302
  {
1321
1303
  std::vector<int> outPermutation;
1322
1304
  PyObjectScopedRef aRawTensorExt = _permuteAndExtend(
@@ -1330,10 +1312,21 @@ static PyObject* compareOrCombine(
1330
1312
  b, bDimsSeq, bRawTensor,
1331
1313
  allDimsSeq, outPermutation);
1332
1314
  if(!bRawTensorExt) return NULL;
1333
- PyObjectScopedRef resRawTensor = PyObject_CallFunctionObjArgs(
1315
+ resRawTensor = PyObject_CallFunctionObjArgs(
1334
1316
  rawOp, aRawTensorExt.get(), bRawTensorExt.get(), NULL);
1335
1317
  if(!resRawTensor) return NULL;
1336
- if(PyObject_SetAttrString(res, "raw_tensor", resRawTensor) < 0) return NULL;
1318
+ }
1319
+
1320
+ PyObjectScopedRef res;
1321
+ {
1322
+ PyObjectScopedRef name = PyUnicode_FromString(rawOpName);
1323
+ if(!name) return NULL;
1324
+ PyObjectScopedRef dtype = PyObject_CallFunctionObjArgs(getDtypeOp, resRawTensor.get(), NULL);
1325
+ if(!dtype) return NULL;
1326
+ res = PyObject_CallFunctionObjArgs(
1327
+ modState->tensorType(), name.get(), allDims.get(), dtype.get(), NULL);
1328
+ if(!res) return NULL;
1329
+ if(PyObject_SetAttrString(res, "_raw_tensor", resRawTensor) < 0) return NULL;
1337
1330
  }
1338
1331
 
1339
1332
  {
@@ -1389,6 +1382,7 @@ static PyObject* compareOrCombineViaCached(
1389
1382
  modState->cachedOp(TOp_Permute, backendId),
1390
1383
  modState->cachedOp(TOp_Reshape, backendId),
1391
1384
  modState->cachedOp(TOp_GetShape, backendId),
1385
+ modState->cachedOp(TOp_GetDType, backendId),
1392
1386
  modState->cachedOp(TOp_ConvertToTensorLike, backendId),
1393
1387
  needConvertToTensor,
1394
1388
  allowBroadcastAllSources,
@@ -1701,31 +1695,14 @@ static PyObject* _tensorUnaryFunc(PyModuleState* modState, PyObject* tensor) {
1701
1695
  if(!func) return NULL;
1702
1696
  PyObjectScopedRef resRawTensor = PyObject_CallFunctionObjArgs(func, rawTensor.get(), NULL);
1703
1697
  if(!resRawTensor) return NULL;
1704
- PyObjectScopedRef dtype;
1705
- const char* dtypeStr = NULL;
1706
- if(op == TOp_Abs) {
1707
- /*
1708
- In case of abs() on a complex tensor, the result is a float tensor.
1709
- In principle, the logic should be like::
1710
- if in_dtype.startswith("complex"):
1711
- num_bits = int(out.dtype[len("complex") :])
1712
- out_dtype = f"float{num_bits // 2}"
1713
- For simplicity, we just take over whatever dtype the result has.
1714
- */
1715
- PyObject* getDtypeOp = modState->cachedOp(TOp_GetDType, BWCO_Torch);
1716
- if(!getDtypeOp) return NULL;
1717
- dtype = PyObject_CallFunctionObjArgs(getDtypeOp, resRawTensor.get(), NULL);
1718
- if(!dtype) return NULL;
1719
- if(!PyUnicode_Check(dtype)) {
1720
- PyErr_Format(PyExc_TypeError, "tensor_abs: expected dtype to be str, got %R", dtype.get());
1721
- return NULL;
1722
- }
1723
- dtypeStr = PyUnicode_AsUTF8(dtype);
1724
- if(!dtypeStr) return NULL;
1725
- }
1726
- PyObjectScopedRef res = tensorCopyTemplateSimple(modState, tensor, rawOpName(op), dtypeStr);
1698
+ PyObject* getDtypeOp = modState->cachedOp(TOp_GetDType, BWCO_Torch);
1699
+ if(!getDtypeOp) return NULL;
1700
+ // Just overtake the result dtype. In case of abs(), it might change, but maybe also in other cases.
1701
+ PyObjectScopedRef dtype = PyObject_CallFunctionObjArgs(getDtypeOp, resRawTensor.get(), NULL);
1702
+ if(!dtype) return NULL;
1703
+ PyObjectScopedRef res = tensorCopyTemplateSimple(modState, tensor, rawOpName(op), dtype);
1727
1704
  if(!res) return NULL;
1728
- if(!_checkTensorRawTensorAssignForBackendWithCachedOps(modState, BWCO_Torch, rawOpName(op), res, resRawTensor))
1705
+ if(!_checkTensorRawTensorAssignForBackendWithCachedOps(modState, BWCO_Torch, rawOpName(op), res, resRawTensor, false))
1729
1706
  return NULL;
1730
1707
  if(PyObject_SetAttrString(res, "_raw_tensor", resRawTensor) < 0) return NULL;
1731
1708
  return res.release();
@@ -8,8 +8,9 @@ class PyModuleState;
8
8
  // generic
9
9
 
10
10
  PyObject* tensorCopy(PyModuleState* modState, PyObject* tensor, const char* name = NULL);
11
- PyObject* tensorCopyTemplate(PyModuleState* modState, PyObject* tensor, const char* name = NULL, const char* dtype = NULL);
12
- PyObject* tensorCopyTemplateSimple(PyModuleState* modState, PyObject* tensor, const char* name_ = NULL, const char* dtype_ = NULL);
11
+ PyObject* tensorCopyTemplate(PyModuleState* modState, PyObject* tensor, const char* name = NULL, PyObject* dtype = NULL);
12
+ PyObject* tensorCopyTemplateSimple(
13
+ PyModuleState* modState, PyObject* tensor, const char* name_ = NULL, PyObject* dtype = NULL, bool copySparseDim = true);
13
14
 
14
15
  // exported Python functions {
15
16
 
@@ -58,7 +58,7 @@ def bin_op_out_template(
58
58
  b: Union[Tensor[T], int, float, numpy.number],
59
59
  *,
60
60
  name: str,
61
- res_dtype: Optional[str],
61
+ copy_sparse_dim: bool = True,
62
62
  allow_broadcast_all_sources: Optional[bool] = None,
63
63
  dim_order: Optional[Sequence[Dim]] = None,
64
64
  allow_scalar: bool = True,
@@ -70,7 +70,7 @@ def bin_op_out_template(
70
70
  :param a:
71
71
  :param b:
72
72
  :param name: for returned Tensor. no other functionality
73
- :param res_dtype: if not given, infer from a and b
73
+ :param copy_sparse_dim:
74
74
  :param allow_broadcast_all_sources: if True, it is allowed that neither a nor b has all dims of the result.
75
75
  Not needed when out_dims is specified explicitly.
76
76
  :param dim_order: defines the order of the resulting dims. if None, it is automatically inferred from a and b.
@@ -93,10 +93,6 @@ def bin_op_out_template(
93
93
  # sanity checks
94
94
  # noinspection PyProtectedMember
95
95
  assert a._raw_backend == b._raw_backend, "Cannot combine tensors from two different frontends, e.g. TF and PT"
96
- if res_dtype is None:
97
- assert (
98
- a.dtype == b.dtype
99
- ), f"For now only operations with Tensors of the same dtypes are supported, got {a} and {b}"
100
96
  all_dims = []
101
97
  for dim in a.dims + b.dims:
102
98
  if dim in all_dims:
@@ -125,9 +121,9 @@ def bin_op_out_template(
125
121
  raise TypeError(f"invalid type for allow_broadcast_all_sources: {type(allow_broadcast_all_sources)}")
126
122
  if dim_order:
127
123
  all_dims.sort(key=lambda d: dim_order.index(d) if d in dim_order else len(dim_order))
128
- out = Tensor(name, dims=all_dims, dtype=res_dtype or src_dtype)
124
+ out = Tensor(name, dims=all_dims, dtype=src_dtype)
129
125
  out.feature_dim = res_feature_dim(a, b)
130
- if not res_dtype:
126
+ if copy_sparse_dim:
131
127
  out.sparse_dim = res_sparse_dim(a, b)
132
128
  if not allow_scalar or a.dims:
133
129
  a_raw = a.copy_compatible_to_dims_raw(all_dims)
@@ -236,10 +236,6 @@ def _check_matching_loop_var_templates(loop_var_templates: S, loop_vars: S):
236
236
  f"loop var {path} template {template} does not match var {x}, "
237
237
  f"different dims (no matter the order) {template.dims} vs {x.dims}"
238
238
  )
239
- assert template.dtype == x.dtype, (
240
- f"loop var {path} template {template} does not match var {x}, "
241
- f"different dtype {template.dtype} vs {x.dtype}"
242
- )
243
239
  assert template.sparse_dim == x.sparse_dim, (
244
240
  f"loop var {path} template {template} does not match var {x}, "
245
241
  f"different sparse_dim {template.sparse_dim} vs {x.sparse_dim}"
@@ -94,10 +94,6 @@ class TensorArray:
94
94
  f"TensorArray push_back: template {self.tensor_template} does not match tensor {tensor},"
95
95
  f" dims different, {self.tensor_template.dims} vs {tensor.dims}"
96
96
  )
97
- assert tensor.dtype == self.tensor_template.dtype, (
98
- f"TensorArray push_back: template {self.tensor_template} does not match tensor {tensor},"
99
- f" dtype different, {self.tensor_template.dtype} vs {tensor.dtype}"
100
- )
101
97
  assert tensor.sparse_dim == self.tensor_template.sparse_dim, (
102
98
  f"TensorArray push_back: template {self.tensor_template} does not match tensor {tensor},"
103
99
  f" sparse_dim different, {self.tensor_template.sparse_dim} vs {tensor.sparse_dim}"
@@ -95,7 +95,7 @@ class Engine(EngineBase):
95
95
  self._torch_distributed_class = torch_distributed.get("class", None)
96
96
  self._torch_distributed_options = torch_distributed.get("options", None)
97
97
 
98
- amp_options = self.config.typed_value("torch_amp")
98
+ amp_options = self.config.opt_typed_value("torch_amp")
99
99
  grad_scaler_opts = self.config.typed_value("grad_scaler", NotSpecified)
100
100
  if amp_options is not None:
101
101
  self._use_autocast = True
@@ -284,7 +284,7 @@ class Engine(EngineBase):
284
284
  step_idx % self._accum_grad_multiple_step
285
285
  ) != (self._accum_grad_multiple_step - 1) else nullcontext():
286
286
  if self._grad_scaler is not None:
287
- self._grad_scaler.scale(total_loss).backward()
287
+ self._grad_scaler.scale(total_loss.raw_tensor).backward()
288
288
  else:
289
289
  total_loss.raw_tensor.backward()
290
290
 
@@ -494,7 +494,9 @@ class TorchBackend(Backend[torch.Tensor]):
494
494
  mask = tensor.get_sequence_mask_broadcast(axis=axis)
495
495
  inf_value = get_global_inf_value()
496
496
  tensor.raw_tensor = torch.where(mask, tensor.raw_tensor, -inf_value)
497
- out.raw_tensor = torch.softmax(tensor.raw_tensor, dim=tensor.dims.index(axis))
497
+ out_raw = torch.softmax(tensor.raw_tensor, dim=tensor.dims.index(axis))
498
+ out.dtype = TorchBackend.get_dtype_name_raw(out_raw)
499
+ out.raw_tensor = out_raw
498
500
  return out
499
501
 
500
502
  @staticmethod
@@ -511,7 +513,9 @@ class TorchBackend(Backend[torch.Tensor]):
511
513
  mask = tensor.get_sequence_mask_broadcast(axis=axis)
512
514
  inf_value = get_global_inf_value()
513
515
  tensor.raw_tensor = torch.where(mask, tensor.raw_tensor, -inf_value)
514
- out.raw_tensor = torch.log_softmax(tensor.raw_tensor, dim=tensor.dims.index(axis))
516
+ out_raw = torch.log_softmax(tensor.raw_tensor, dim=tensor.dims.index(axis))
517
+ out.dtype = TorchBackend.get_dtype_name_raw(out_raw)
518
+ out.raw_tensor = out_raw
515
519
  return out
516
520
 
517
521
  @staticmethod
@@ -955,7 +959,6 @@ class TorchBackend(Backend[torch.Tensor]):
955
959
 
956
960
  if use_mask and any(dim.dyn_size_ext for dim in reduce):
957
961
  raise NotImplementedError("masking in matmul reduce not yet implemented")
958
- assert a.dtype == b.dtype, f"matmul: dtypes do not match: {a} vs {b}"
959
962
 
960
963
  a_dims = a.dims
961
964
  b_dims = b.dims
@@ -1041,7 +1044,9 @@ class TorchBackend(Backend[torch.Tensor]):
1041
1044
  b_unique_dims = [b_dims[i] for i in b_unique_axes]
1042
1045
  result_dims = common_dims + a_unique_dims + b_unique_dims
1043
1046
 
1044
- result_tensor = Tensor(name="dot", dims=result_dims, raw_tensor=raw_result, dtype=a.dtype)
1047
+ result_tensor = Tensor(
1048
+ name="dot", dims=result_dims, raw_tensor=raw_result, dtype=TorchBackend.get_dtype_name_raw(raw_result)
1049
+ )
1045
1050
 
1046
1051
  return result_tensor
1047
1052
 
@@ -1561,7 +1566,9 @@ class TorchBackend(Backend[torch.Tensor]):
1561
1566
  )
1562
1567
  else:
1563
1568
  raise ValueError(f"invalid number of filter dims {filter_size}, expected 1, 2, or 3")
1564
- out = Tensor("conv", dims=batch_dims + [out_dim] + list(out_spatial_dims), dtype=source.dtype)
1569
+ out = Tensor(
1570
+ "conv", dims=batch_dims + [out_dim] + list(out_spatial_dims), dtype=TorchBackend.get_dtype_name_raw(out_raw)
1571
+ )
1565
1572
  if len(batch_dims) == 1:
1566
1573
  out.raw_tensor = out_raw
1567
1574
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: returnn
3
- Version: 1.20231018.70522
3
+ Version: 1.20231018.94805
4
4
  Summary: The RWTH extensible training framework for universal recurrent neural networks
5
5
  Home-page: https://github.com/rwth-i6/returnn/
6
6
  Author: Albert Zeyer