returnn 1.20240613.164354__tar.gz → 1.20240617.171744__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of returnn might be problematic. Click here for more details.

Files changed (448) hide show
  1. {returnn-1.20240613.164354/returnn.egg-info → returnn-1.20240617.171744}/PKG-INFO +1 -1
  2. returnn-1.20240617.171744/_setup_info_generated.py +2 -0
  3. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/distrib_files.py +1 -1
  4. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/__init__.py +1 -0
  5. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/_backend.py +48 -4
  6. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/_numpy_backend.py +3 -3
  7. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/array_.py +43 -0
  8. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/math_.py +18 -0
  9. returnn-1.20240617.171744/returnn/frontend/piecewise_linear.py +55 -0
  10. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tensor/_dim_extra.py +40 -35
  11. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_layers/_backend.py +2 -2
  12. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_low_level/_backend.py +3 -3
  13. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/frontend/_backend.py +59 -3
  14. returnn-1.20240617.171744/returnn/util/math.py +28 -0
  15. {returnn-1.20240613.164354 → returnn-1.20240617.171744/returnn.egg-info}/PKG-INFO +1 -1
  16. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn.egg-info/SOURCES.txt +2 -0
  17. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_Util.py +20 -0
  18. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_array.py +45 -0
  19. returnn-1.20240617.171744/tests/test_rf_piecewise_linear.py +49 -0
  20. returnn-1.20240613.164354/_setup_info_generated.py +0 -2
  21. returnn-1.20240613.164354/returnn/util/math.py +0 -11
  22. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/.editorconfig +0 -0
  23. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/.gitignore +0 -0
  24. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/.gitmodules +0 -0
  25. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/.kateconfig +0 -0
  26. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/CHANGELOG.md +0 -0
  27. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/CODEOWNERS +0 -0
  28. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/CONTRIBUTING.md +0 -0
  29. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/LICENSE +0 -0
  30. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/MANIFEST.in +0 -0
  31. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/README.rst +0 -0
  32. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/__init__.py +0 -0
  33. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/12AX.cluster_map +0 -0
  34. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/_setup_returnn_env.py +0 -0
  35. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-fwd.config +0 -0
  36. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-horovod-mpi.py +0 -0
  37. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-horovod-mpi.py.sh +0 -0
  38. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-horovod-mpi.sh +0 -0
  39. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-hyper-param-tuning.config +0 -0
  40. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-iter-dataset.py +0 -0
  41. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-list-devices.py +0 -0
  42. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-lua-torch-layer.config +0 -0
  43. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-pretrain.config +0 -0
  44. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-record-and-push-to-webserver.py +0 -0
  45. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-returnn-as-framework.py +0 -0
  46. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-rf-pt-benchmark.py +0 -0
  47. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-rf.config +0 -0
  48. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-rhn-enwik8.config +0 -0
  49. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-sprint-interface.py +0 -0
  50. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-att-copy.config +0 -0
  51. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-attention.config +0 -0
  52. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-chunking-blstm.12ax.config +0 -0
  53. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-contribrnn-lstm.12ax.config +0 -0
  54. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-enc-dec.config +0 -0
  55. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-hard-att-copy.config +0 -0
  56. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-lstm-benchmark.py +0 -0
  57. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-maxgradnorm-lstm.12ax.config +0 -0
  58. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-native-lstm-lowmem.12ax.config +0 -0
  59. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-native-lstm.12ax.config +0 -0
  60. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-native-lstm2.12ax.config +0 -0
  61. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-native-lstm2.12ax.tuned.config +0 -0
  62. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-neural-transducer.12ax.config +0 -0
  63. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-rec-explicit-lstm.config +0 -0
  64. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-rec-explicit-rnn.config +0 -0
  65. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-rec-self-att.config +0 -0
  66. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-search-compiled-graph.py +0 -0
  67. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-tf-vanilla-lstm.12ax.config +0 -0
  68. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-timit-lstm-ctc.config +0 -0
  69. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-torch.config +0 -0
  70. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo-upd-mult-model.lstm.12ax.config +0 -0
  71. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/demo.sh +0 -0
  72. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/IAM_lines/a01-000u-00.png +0 -0
  73. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/IAM_lines/a01-007-04.png +0 -0
  74. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/IAM_lines/a01-007-06.png +0 -0
  75. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/README.txt +0 -0
  76. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/chars.txt +0 -0
  77. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/config_demo +0 -0
  78. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/config_fwd +0 -0
  79. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/config_real +0 -0
  80. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/create_IAM_dataset.py +0 -0
  81. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/decode.py +0 -0
  82. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/features/raw/demo.h5 +0 -0
  83. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/go.sh +0 -0
  84. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/lines.txt +0 -0
  85. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/split/eval.txt +0 -0
  86. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/split/train.txt +0 -0
  87. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/IAM/split/valid.txt +0 -0
  88. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/README.md +0 -0
  89. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/artificial/create_test_h5.py +0 -0
  90. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/artificial/forwardconfig +0 -0
  91. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/artificial/go.sh +0 -0
  92. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/artificial/trainconfig +0 -0
  93. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/artificial_rgb/create_test_h5.py +0 -0
  94. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/artificial_rgb/forwardconfig +0 -0
  95. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/artificial_rgb/go.sh +0 -0
  96. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/demos/mdlstm/artificial_rgb/trainconfig +0 -0
  97. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/pyproject.toml +0 -0
  98. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/requirements.txt +0 -0
  99. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/__init__.py +0 -0
  100. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/__main__.py +0 -0
  101. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/__old_mod_loader__.py +0 -0
  102. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/__setup__.py +0 -0
  103. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/config.py +0 -0
  104. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/__init__.py +0 -0
  105. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/audio.py +0 -0
  106. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/basic.py +0 -0
  107. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/bundle_file.py +0 -0
  108. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/cached.py +0 -0
  109. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/cached2.py +0 -0
  110. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/generating.py +0 -0
  111. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/hdf.py +0 -0
  112. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/lm.py +0 -0
  113. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/map.py +0 -0
  114. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/meta.py +0 -0
  115. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/multi_proc.py +0 -0
  116. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/normalization_data.py +0 -0
  117. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/numpy_dump.py +0 -0
  118. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/raw_wav.py +0 -0
  119. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/sprint.py +0 -0
  120. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/stereo.py +0 -0
  121. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/util/__init__.py +0 -0
  122. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/util/feature_extraction.py +0 -0
  123. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/util/strings.py +0 -0
  124. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/datasets/util/vocabulary.py +0 -0
  125. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/engine/__init__.py +0 -0
  126. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/engine/base.py +0 -0
  127. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/engine/batch.py +0 -0
  128. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/__init__.py +0 -0
  129. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/__main__.py +0 -0
  130. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/.git +0 -0
  131. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/.gitignore +0 -0
  132. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/LICENSE +0 -0
  133. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/README.md +0 -0
  134. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/aligner.gif +0 -0
  135. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/check.png +0 -0
  136. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/core.cu +0 -0
  137. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/core.h +0 -0
  138. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/core_cpu.cpp +0 -0
  139. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/pytorch_binding/LICENSE +0 -0
  140. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/pytorch_binding/MANIFEST.in +0 -0
  141. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/pytorch_binding/README.md +0 -0
  142. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/pytorch_binding/binding.cpp +0 -0
  143. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/pytorch_binding/core.cu +0 -0
  144. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/pytorch_binding/core.h +0 -0
  145. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/pytorch_binding/requirements.txt +0 -0
  146. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/pytorch_binding/setup.py +0 -0
  147. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/pytorch_binding/warp_rna/__init__.py +0 -0
  148. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/pytorch_binding/warp_rna/test.py +0 -0
  149. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/ref_rna.py +0 -0
  150. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/setup.py +0 -0
  151. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/src/warp_rna_op.cc +0 -0
  152. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/src/warp_rna_op_kernel_tmpl.h +0 -0
  153. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/warp_rna/__init__.py +0 -0
  154. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/WarpRna/warp-rna/test.cpp +0 -0
  155. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/__init__.py +0 -0
  156. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/graph_editor/README.md +0 -0
  157. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/graph_editor/__init__.py +0 -0
  158. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/graph_editor/edit.py +0 -0
  159. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/graph_editor/reroute.py +0 -0
  160. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/graph_editor/select.py +0 -0
  161. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/graph_editor/subgraph.py +0 -0
  162. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/graph_editor/transform.py +0 -0
  163. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/extern/graph_editor/util.py +0 -0
  164. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/forward_iface.py +0 -0
  165. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/_native/__init__.py +0 -0
  166. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/_native/backend.cpp +0 -0
  167. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/_native/backend.hpp +0 -0
  168. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/_native/module.cpp +0 -0
  169. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/_native/module.hpp +0 -0
  170. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/_native/py_utils.hpp +0 -0
  171. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/_native/tensor_ops.cpp +0 -0
  172. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/_native/tensor_ops.hpp +0 -0
  173. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/_random_journal.py +0 -0
  174. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/_utils.py +0 -0
  175. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/attention.py +0 -0
  176. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/audio/__init__.py +0 -0
  177. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/audio/mel.py +0 -0
  178. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/audio/specaugment.py +0 -0
  179. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/backend.py +0 -0
  180. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/cond.py +0 -0
  181. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/const.py +0 -0
  182. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/container.py +0 -0
  183. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/control_flow_ctx.py +0 -0
  184. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/conv.py +0 -0
  185. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/decoder/__init__.py +0 -0
  186. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/decoder/transformer.py +0 -0
  187. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/device.py +0 -0
  188. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/dims.py +0 -0
  189. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/dropout.py +0 -0
  190. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/dtype.py +0 -0
  191. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/encoder/__init__.py +0 -0
  192. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/encoder/base.py +0 -0
  193. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/encoder/conformer.py +0 -0
  194. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/gradient.py +0 -0
  195. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/graph.py +0 -0
  196. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/hooks.py +0 -0
  197. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/init.py +0 -0
  198. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/label_smoothing.py +0 -0
  199. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/linear.py +0 -0
  200. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/loop.py +0 -0
  201. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/loss.py +0 -0
  202. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/matmul.py +0 -0
  203. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/module.py +0 -0
  204. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/normalization.py +0 -0
  205. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/parameter.py +0 -0
  206. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/rand.py +0 -0
  207. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/rec.py +0 -0
  208. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/reduce.py +0 -0
  209. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/run_ctx.py +0 -0
  210. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/signal.py +0 -0
  211. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/state.py +0 -0
  212. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/tensor_array.py +0 -0
  213. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/frontend/types.py +0 -0
  214. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/import_/__init__.py +0 -0
  215. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/import_/common.py +0 -0
  216. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/import_/git.py +0 -0
  217. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/import_/import_.py +0 -0
  218. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/learning_rate_control.py +0 -0
  219. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/log.py +0 -0
  220. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/native_op.cpp +0 -0
  221. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/native_op.py +0 -0
  222. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/pretrain.py +0 -0
  223. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/sprint/__init__.py +0 -0
  224. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/sprint/cache.py +0 -0
  225. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/sprint/control.py +0 -0
  226. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/sprint/error_signals.py +0 -0
  227. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/sprint/extern_interface.py +0 -0
  228. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/sprint/interface.py +0 -0
  229. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tensor/README.md +0 -0
  230. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tensor/__init__.py +0 -0
  231. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tensor/_tensor_extra.py +0 -0
  232. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tensor/_tensor_mixin_base.py +0 -0
  233. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tensor/_tensor_op_overloads.py +0 -0
  234. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tensor/control_flow_ctx.py +0 -0
  235. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tensor/dim.py +0 -0
  236. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tensor/marked_dim.py +0 -0
  237. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tensor/tensor.py +0 -0
  238. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tensor/tensor_dict.py +0 -0
  239. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tensor/utils.py +0 -0
  240. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/__init__.py +0 -0
  241. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/compat.py +0 -0
  242. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/data_pipeline.py +0 -0
  243. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/distributed.py +0 -0
  244. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/engine.py +0 -0
  245. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_layers/README.md +0 -0
  246. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_layers/__init__.py +0 -0
  247. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_layers/_utils.py +0 -0
  248. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_layers/cond.py +0 -0
  249. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_layers/config_entry_points.py +0 -0
  250. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_layers/debug_eager_mode.py +0 -0
  251. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_layers/dims.py +0 -0
  252. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_layers/layer.py +0 -0
  253. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_layers/loop.py +0 -0
  254. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_layers/make_layer.py +0 -0
  255. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_layers/masked_computation.py +0 -0
  256. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_layers/parameter_assign.py +0 -0
  257. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_layers/prev_tensor_ref.py +0 -0
  258. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/frontend_low_level/__init__.py +0 -0
  259. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/horovod.py +0 -0
  260. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/hyper_param_tuning.py +0 -0
  261. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/layers/__init__.py +0 -0
  262. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/layers/base.py +0 -0
  263. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/layers/basic.py +0 -0
  264. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/layers/rec.py +0 -0
  265. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/layers/segmental_model.py +0 -0
  266. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/layers/signal_processing.py +0 -0
  267. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/layers/variable.py +0 -0
  268. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/native_op.py +0 -0
  269. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/network.py +0 -0
  270. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/sprint.py +0 -0
  271. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/updater.py +0 -0
  272. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/util/__init__.py +0 -0
  273. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/util/basic.py +0 -0
  274. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/util/data.py +0 -0
  275. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/util/gradient_checkpoint.py +0 -0
  276. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/util/ken_lm.py +0 -0
  277. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/tf/util/open_fst.py +0 -0
  278. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/README.md +0 -0
  279. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/__init__.py +0 -0
  280. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/data/__init__.py +0 -0
  281. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/data/extern_data.py +0 -0
  282. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/data/pipeline.py +0 -0
  283. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/data/queued_data_iter.py +0 -0
  284. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/data/returnn_dataset_wrapper.py +0 -0
  285. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/data/tensor_utils.py +0 -0
  286. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/distributed.py +0 -0
  287. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/engine.py +0 -0
  288. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/frontend/__init__.py +0 -0
  289. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/frontend/_rand.py +0 -0
  290. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/frontend/bridge.py +0 -0
  291. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/frontend/raw_ops.py +0 -0
  292. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/updater.py +0 -0
  293. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/util/README.md +0 -0
  294. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/util/__init__.py +0 -0
  295. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/util/diagnose_gpu.py +0 -0
  296. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/torch/util/scaled_gradient.py +0 -0
  297. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/__init__.py +0 -0
  298. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/basic.py +0 -0
  299. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/better_exchook.py +0 -0
  300. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/bpe.py +0 -0
  301. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/debug.py +0 -0
  302. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/debug_helpers.py +0 -0
  303. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/file_cache.py +0 -0
  304. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/fsa.py +0 -0
  305. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/literal_py_to_pickle.py +0 -0
  306. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/multi_proc_non_daemonic_spawn.py +0 -0
  307. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/native_code_compiler.py +0 -0
  308. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/pprint.py +0 -0
  309. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/py-to-pickle.cpp +0 -0
  310. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/py_compat.py +0 -0
  311. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/py_ext_mod_compiler.py +0 -0
  312. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/result_with_reason.py +0 -0
  313. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/sig_proc.py +0 -0
  314. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/task_system.py +0 -0
  315. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/train_proc_manager.py +0 -0
  316. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn/util/watch_memory.py +0 -0
  317. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn.egg-info/dependency_links.txt +0 -0
  318. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/returnn.egg-info/top_level.txt +0 -0
  319. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/rnn.py +0 -0
  320. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/setup.cfg +0 -0
  321. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/setup.py +0 -0
  322. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/DummySprintExec.py +0 -0
  323. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/PyCharm-inspection-profile.xml +0 -0
  324. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/PyCharm.idea/.gitignore +0 -0
  325. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/PyCharm.idea/.name +0 -0
  326. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/PyCharm.idea/codeStyleSettings.xml +0 -0
  327. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/PyCharm.idea/codeStyles/Project.xml +0 -0
  328. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/PyCharm.idea/codeStyles/codeStyleConfig.xml +0 -0
  329. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/PyCharm.idea/inspectionProfiles/Project_Default.xml +0 -0
  330. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/PyCharm.idea/inspectionProfiles/profiles_settings.xml +0 -0
  331. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/PyCharm.idea/misc.xml +0 -0
  332. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/PyCharm.idea/modules.xml +0 -0
  333. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/PyCharm.idea/returnn.iml +0 -0
  334. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/PyCharm.idea/scopes/scope_settings.xml +0 -0
  335. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/_set_num_threads1.py +0 -0
  336. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/_setup_returnn_env.py +0 -0
  337. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/_setup_test_env.py +0 -0
  338. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/bpe-unicode-demo.codes +0 -0
  339. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/bpe-unicode-demo.vocab +0 -0
  340. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/lexicon_opt.fst +0 -0
  341. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/lexicon_opt.isyms +0 -0
  342. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/lexicon_opt.jpg +0 -0
  343. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/lexicon_opt.osyms +0 -0
  344. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/lint_common.py +0 -0
  345. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/pycharm-inspect.py +0 -0
  346. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/pylint.py +0 -0
  347. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/returnn-as-framework.py +0 -0
  348. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/rf_utils.py +0 -0
  349. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/spelling.dic +0 -0
  350. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_Config.py +0 -0
  351. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_Dataset.py +0 -0
  352. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_Fsa.py +0 -0
  353. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_GeneratingDataset.py +0 -0
  354. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_HDFDataset.py +0 -0
  355. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_LearningRateControl.py +0 -0
  356. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_Log.py +0 -0
  357. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_MultiProcDataset.py +0 -0
  358. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_Pretrain.py +0 -0
  359. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_ResNet.py +0 -0
  360. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_SprintDataset.py +0 -0
  361. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_SprintInterface.py +0 -0
  362. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_TFEngine.py +0 -0
  363. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_TFNativeOp.py +0 -0
  364. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_TFNetworkLayer.py +0 -0
  365. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_TFNetworkRecLayer.py +0 -0
  366. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_TFNetworkSigProcLayer.py +0 -0
  367. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_TFUpdater.py +0 -0
  368. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_TFUtil.py +0 -0
  369. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_TF_determinism.py +0 -0
  370. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_TaskSystem.py +0 -0
  371. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_TaskSystem_SharedMem.py +0 -0
  372. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_TranslationDataset.py +0 -0
  373. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_demos.py +0 -0
  374. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_fork_exec.py +0 -0
  375. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_hdf_dump.py +0 -0
  376. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_attention.py +0 -0
  377. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_base.py +0 -0
  378. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_cond.py +0 -0
  379. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_const.py +0 -0
  380. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_container.py +0 -0
  381. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_conv.py +0 -0
  382. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_encoder_conformer.py +0 -0
  383. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_gradient.py +0 -0
  384. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_label_smoothing.py +0 -0
  385. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_loop.py +0 -0
  386. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_math.py +0 -0
  387. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_normalization.py +0 -0
  388. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_rec.py +0 -0
  389. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_reduce.py +0 -0
  390. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_rf_signal.py +0 -0
  391. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_tensor.py +0 -0
  392. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_tools.py +0 -0
  393. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_torch_dataset.py +0 -0
  394. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_torch_engine.py +0 -0
  395. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_torch_frontend.py +0 -0
  396. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tests/test_torch_internal_frontend.py +0 -0
  397. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/_setup_returnn_env.py +0 -0
  398. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/analyze-dataset-batches.py +0 -0
  399. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/bliss-collect-seq-lens.py +0 -0
  400. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/bliss-dump-text.py +0 -0
  401. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/bliss-get-segment-names.py +0 -0
  402. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/bliss-to-ogg-zip.py +0 -0
  403. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/bpe-create-lexicon.py +0 -0
  404. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/calculate-word-error-rate.py +0 -0
  405. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/cleanup-old-models.py +0 -0
  406. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/collect-orth-symbols.py +0 -0
  407. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/collect-words.py +0 -0
  408. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/compile_native_op.py +0 -0
  409. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/compile_tf_graph.py +0 -0
  410. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/debug-dump-search-scores.py +0 -0
  411. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/debug-plot-search-scores.py +0 -0
  412. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/dump-dataset-raw-strings.py +0 -0
  413. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/dump-dataset.py +0 -0
  414. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/dump-forward-stats.py +0 -0
  415. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/dump-forward.py +0 -0
  416. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/dump-network-json.py +0 -0
  417. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/dump-pickle.py +0 -0
  418. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/extract_state_tying_from_dataset.py +0 -0
  419. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/get-attention-weights.py +0 -0
  420. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/get-best-model-epoch.py +0 -0
  421. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/hdf_dump.py +0 -0
  422. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/hdf_dump_translation_dataset.py +0 -0
  423. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/import-blocks-mt-model.py +0 -0
  424. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/import-t2t-mt-model.py +0 -0
  425. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/.gitignore +0 -0
  426. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/Makefile +0 -0
  427. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/README.md +0 -0
  428. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/example/README.md +0 -0
  429. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/example/libs_list +0 -0
  430. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/example/network.040/i600_m600_m600.sgd_b16_lr0_cl2.newbobabs.config +0 -0
  431. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/example/network.040/i600_m600_m600.sgd_b16_lr0_cl2.newbobabs.keep_over_epoch.lstm2.config +0 -0
  432. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/example/rescore_lattice.sh +0 -0
  433. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/example/state_vars_list +0 -0
  434. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/example/tensor_names_list +0 -0
  435. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/file.h +0 -0
  436. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/htklatticerescorer.cc +0 -0
  437. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/htklatticerescorer.h +0 -0
  438. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/main.cc +0 -0
  439. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/rescorer.h +0 -0
  440. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/vocabulary.cc +0 -0
  441. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/lattice_rescorer/vocabulary.h +0 -0
  442. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/tf_avg_checkpoints.py +0 -0
  443. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/tf_inspect_checkpoint.py +0 -0
  444. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/tf_inspect_summary_log.py +0 -0
  445. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/torch_avg_checkpoints.py +0 -0
  446. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/torch_export_to_onnx.py +0 -0
  447. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/torch_inspect_checkpoint.py +0 -0
  448. {returnn-1.20240613.164354 → returnn-1.20240617.171744}/tools/torch_inspect_checkpoint_and_opt.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: returnn
3
- Version: 1.20240613.164354
3
+ Version: 1.20240617.171744
4
4
  Summary: The RWTH extensible training framework for universal recurrent neural networks
5
5
  Home-page: https://github.com/rwth-i6/returnn/
6
6
  Author: Albert Zeyer
@@ -0,0 +1,2 @@
1
+ version = '1.20240617.171744'
2
+ long_version = '1.20240617.171744+git.ef21456'
@@ -301,7 +301,7 @@ class DistributeFilesDataset(CachedDataset2):
301
301
  if "seq_ordering" not in dataset_dict and "seq_order_control_dataset" not in dataset_dict:
302
302
  raise ValueError(
303
303
  f"{self}: sub dataset should have explicit seq_ordering "
304
- "(or seq_order_control_dataset for MetaDataset), got: {dataset_dict}"
304
+ f"(or seq_order_control_dataset for MetaDataset), got: {dataset_dict}"
305
305
  )
306
306
  self._lazy_init_file_cache_proc()
307
307
  dataset_dict, exit_hook = self._file_cache.handle_cached_files_in_config(dataset_dict)
@@ -40,6 +40,7 @@ from .math_ import *
40
40
  from .matmul import *
41
41
  from .normalization import *
42
42
  from .parameter import *
43
+ from .piecewise_linear import *
43
44
  from .rand import *
44
45
  from .rec import *
45
46
  from .reduce import *
@@ -135,14 +135,14 @@ class Backend(Generic[T]):
135
135
  raise NotImplementedError
136
136
 
137
137
  @staticmethod
138
- def get_shape_raw(raw_tensor: T) -> Union[T, Tuple[Union[int, T]]]:
138
+ def get_shape_raw(raw_tensor: T) -> Union[T, Tuple[Union[int, T], ...]]:
139
139
  """
140
140
  :return: shape of raw tensor
141
141
  """
142
142
  raise NotImplementedError
143
143
 
144
144
  @staticmethod
145
- def get_shape_tuple_raw(raw_tensor: T) -> Tuple[Union[int, T]]:
145
+ def get_shape_tuple_raw(raw_tensor: T) -> Tuple[Union[int, T], ...]:
146
146
  """
147
147
  :return: shape of raw tensor. assumes that ndim is known.
148
148
  In eager frameworks, all dims are int.
@@ -150,7 +150,7 @@ class Backend(Generic[T]):
150
150
  raise NotImplementedError
151
151
 
152
152
  @staticmethod
153
- def get_known_shape_raw(raw_tensor: T) -> Tuple[Optional[int]]:
153
+ def get_known_shape_raw(raw_tensor: T) -> Tuple[Optional[int], ...]:
154
154
  """
155
155
  :return: shape of raw tensor, int for static known, None otherwise. assumes that ndim is known.
156
156
  This will not create any ops.
@@ -159,7 +159,7 @@ class Backend(Generic[T]):
159
159
  raise NotImplementedError
160
160
 
161
161
  @staticmethod
162
- def set_known_shape_raw(raw_tensor: T, shape: Tuple[Optional[int]]) -> None:
162
+ def set_known_shape_raw(raw_tensor: T, shape: Tuple[Optional[int], ...]) -> None:
163
163
  """
164
164
  Sets the known shape of the raw tensor.
165
165
  This is only supported in graph-based frameworks,
@@ -506,6 +506,15 @@ class Backend(Generic[T]):
506
506
  """
507
507
  raise NotImplementedError
508
508
 
509
+ @staticmethod
510
+ def stack(sources: Sequence[Tensor], *, out_dim: Dim) -> Tensor:
511
+ """
512
+ :param sources:
513
+ :param out_dim:
514
+ :return: stacked tensor
515
+ """
516
+ raise NotImplementedError
517
+
509
518
  # Restrict the possible activation function names,
510
519
  # to not get unexpected behavior,
511
520
  # or unwanted incompatibilities.
@@ -953,6 +962,22 @@ class Backend(Generic[T]):
953
962
  """where"""
954
963
  raise NotImplementedError
955
964
 
965
+ @staticmethod
966
+ def search_sorted(
967
+ sorted_seq: Tensor, values: Tensor, *, axis: Dim, side: str = "left", out_dtype: str = "int32"
968
+ ) -> Tensor:
969
+ """
970
+ :param sorted_seq: [SharedDims...,axis], sequence of numbers, sorted low to high in the given axis.
971
+ :param values: [SharedDims...,OtherDims...], sequence of numbers to search for in ``sorted_seq``.
972
+ :param axis:
973
+ :param side: "left" or "right"
974
+ :param out_dtype:
975
+ :return: [SharedDims...,OtherDims...] -> axis, indices in axis in ``sorted_seq`` such that
976
+ sorted_seq[i-1] < value <= sorted_seq[i] if side=="left",
977
+ sorted_seq[i-1] <= value < sorted_seq[i] if side=="right".
978
+ """
979
+ raise NotImplementedError
980
+
956
981
  @staticmethod
957
982
  def clip_by_value(
958
983
  x: Tensor,
@@ -964,6 +989,25 @@ class Backend(Generic[T]):
964
989
  """clip by value"""
965
990
  raise NotImplementedError
966
991
 
992
+ @staticmethod
993
+ def lerp(
994
+ start: Tensor, end: Tensor, weight: Union[float, Tensor], *, allow_broadcast_all_sources: bool = False
995
+ ) -> Tensor:
996
+ """
997
+ Linear interpolation between start and end.
998
+ (Some backends might provide an optimized version of this.)
999
+
1000
+ :param start:
1001
+ :param end:
1002
+ :param weight: scalar or tensor
1003
+ :param allow_broadcast_all_sources:
1004
+ :return: start + weight * (end - start)
1005
+ """
1006
+ # Default implementation.
1007
+ if not allow_broadcast_all_sources:
1008
+ return start + weight * (end - start)
1009
+ return rf.combine_bc(start, "+", rf.combine_bc(weight, "*", rf.combine_bc(end, "-", start)))
1010
+
967
1011
  @staticmethod
968
1012
  def matmul(a: Tensor[T], b: Tensor[T], *, reduce: Union[Dim, Sequence[Dim]], use_mask: bool = True) -> Tensor[T]:
969
1013
  """
@@ -52,14 +52,14 @@ class NumpyBackend(Backend[numpy.ndarray]):
52
52
  return raw_tensor.ndim
53
53
 
54
54
  @staticmethod
55
- def get_shape_raw(raw_tensor: numpy.ndarray) -> Tuple[int]:
55
+ def get_shape_raw(raw_tensor: numpy.ndarray) -> Tuple[int, ...]:
56
56
  """
57
57
  :return: shape of raw tensor
58
58
  """
59
59
  return raw_tensor.shape
60
60
 
61
61
  @staticmethod
62
- def get_shape_tuple_raw(raw_tensor: numpy.ndarray) -> Tuple[int]:
62
+ def get_shape_tuple_raw(raw_tensor: numpy.ndarray) -> Tuple[int, ...]:
63
63
  """
64
64
  :return: shape of raw tensor. assumes that ndim is known.
65
65
  In eager frameworks, all dims are int.
@@ -67,7 +67,7 @@ class NumpyBackend(Backend[numpy.ndarray]):
67
67
  return raw_tensor.shape
68
68
 
69
69
  @staticmethod
70
- def get_known_shape_raw(raw_tensor: numpy.ndarray) -> Tuple[int]:
70
+ def get_known_shape_raw(raw_tensor: numpy.ndarray) -> Tuple[int, ...]:
71
71
  """
72
72
  :return: shape of raw tensor, int for static known, None otherwise. assumes that ndim is known.
73
73
  This will not create any ops.
@@ -29,6 +29,7 @@ __all__ = [
29
29
  "concat_features",
30
30
  "pad",
31
31
  "cum_concat_step",
32
+ "stack",
32
33
  "masked_select",
33
34
  "masked_scatter",
34
35
  "sequence_mask",
@@ -39,6 +40,7 @@ __all__ = [
39
40
  "shift_right",
40
41
  "reverse_sequence",
41
42
  "where",
43
+ "search_sorted",
42
44
  "sparse_to_dense",
43
45
  "one_hot",
44
46
  ]
@@ -352,6 +354,13 @@ def concat(
352
354
  ) -> Tuple[Tensor, Dim]:
353
355
  """
354
356
  Concatenates multiple sources in the specified dimension.
357
+
358
+ Also see :func:`stack`.
359
+
360
+ :param sources: list of (tensor, dim) pairs. dim is the axis to concatenate on.
361
+ :param allow_broadcast: if True, the sources can have different dims, and the result will be broadcasted.
362
+ :param out_dim: reuse existing dim for the resulting concatenated dim, if given
363
+ :return: concatenated tensor, out_dim
355
364
  """
356
365
  assert sources
357
366
  if not allow_broadcast:
@@ -490,6 +499,23 @@ def cum_concat_step(
490
499
  )
491
500
 
492
501
 
502
+ def stack(sources: Sequence[Tensor], *, out_dim: Optional[Dim] = None) -> Tuple[Tensor, Dim]:
503
+ """
504
+ Stack the sources in a new dimension.
505
+ All sources must have the same shape.
506
+
507
+ :param sources:
508
+ :param out_dim: if given, use this as the new dim
509
+ :return: stacked tensor, out_dim
510
+ """
511
+ if not sources:
512
+ raise ValueError("no sources to stack")
513
+ if not out_dim:
514
+ out_dim = Dim(len(sources), name="stack")
515
+ # noinspection PyProtectedMember
516
+ return sources[0]._raw_backend.stack(sources, out_dim=out_dim), out_dim
517
+
518
+
493
519
  def masked_select(
494
520
  tensor: Tensor, *, mask: Tensor, dims: Sequence[Dim], out_dim: Optional[Dim] = None
495
521
  ) -> Tuple[Tensor, Dim]:
@@ -749,6 +775,23 @@ def where(
749
775
  return cond._raw_backend.where(cond, true_, false_, allow_broadcast_all_sources=allow_broadcast_all_sources)
750
776
 
751
777
 
778
+ def search_sorted(
779
+ sorted_seq: Tensor, values: Tensor, *, axis: Dim, side: str = "left", out_dtype: str = "int32"
780
+ ) -> Tensor:
781
+ """
782
+ :param sorted_seq: [SharedDims...,axis], sequence of numbers, sorted low to high in the given axis.
783
+ :param values: [SharedDims...,OtherDims...], sequence of numbers to search for in ``sorted_seq``.
784
+ :param axis:
785
+ :param side: "left" or "right"
786
+ :param out_dtype:
787
+ :return: [SharedDims...,OtherDims...] -> axis, indices in axis in ``sorted_seq`` such that
788
+ sorted_seq[i-1] < value <= sorted_seq[i] if side=="left",
789
+ sorted_seq[i-1] <= value < sorted_seq[i] if side=="right".
790
+ """
791
+ # noinspection PyProtectedMember
792
+ return sorted_seq._raw_backend.search_sorted(sorted_seq, values, axis=axis, side=side, out_dtype=out_dtype)
793
+
794
+
752
795
  def sparse_to_dense(
753
796
  labels: Union[Tensor, rf.RawTensorTypes],
754
797
  *,
@@ -66,6 +66,7 @@ __all__ = [
66
66
  "softmax",
67
67
  "log_softmax",
68
68
  "gating",
69
+ "lerp",
69
70
  ]
70
71
 
71
72
 
@@ -567,3 +568,20 @@ def gating(
567
568
 
568
569
  a, b = rf.split(x, axis=axis, out_dims=[out_dim, out_dim])
569
570
  return act_func(a) * gate_func(b), out_dim
571
+
572
+
573
+ def lerp(
574
+ start: Tensor, end: Tensor, weight: Union[float, Tensor], *, allow_broadcast_all_sources: bool = False
575
+ ) -> Tensor:
576
+ """
577
+ Linear interpolation between start and end.
578
+ (Some backends might provide an optimized version of this.)
579
+
580
+ :param start:
581
+ :param end:
582
+ :param weight: scalar or tensor
583
+ :param allow_broadcast_all_sources:
584
+ :return: start + weight * (end - start)
585
+ """
586
+ # noinspection PyProtectedMember
587
+ return start._raw_backend.lerp(start, end, weight, allow_broadcast_all_sources=allow_broadcast_all_sources)
@@ -0,0 +1,55 @@
1
+ """
2
+ Piecewise linear function
3
+ """
4
+
5
+ from __future__ import annotations
6
+ from typing import Union, Dict
7
+ import numpy as np
8
+ from returnn.tensor import Tensor, Dim
9
+ import returnn.frontend as rf
10
+
11
+
12
+ __all__ = ["PiecewiseLinear"]
13
+
14
+
15
+ class PiecewiseLinear(rf.Module):
16
+ """
17
+ Piecewise linear function.
18
+ """
19
+
20
+ def __init__(self, points: Dict[Union[int, float], Union[float, Tensor]]):
21
+ """
22
+ :param points: dict of key -> value pairs.
23
+ """
24
+ super().__init__()
25
+ if not points:
26
+ raise ValueError(f"{self}: points must not be empty")
27
+ self._points_sorted = sorted(points.items())
28
+ self.points_dim = Dim(len(self._points_sorted), name="pcw_schd_pieces")
29
+ # Note: Use rf.Parameter to work around deepcopy issue. https://github.com/rwth-i6/returnn/issues/1541
30
+ self._keys = rf.Parameter(
31
+ rf.convert_to_tensor(
32
+ np.array([k for k, _ in self._points_sorted], dtype=rf.get_default_float_dtype()),
33
+ dims=[self.points_dim],
34
+ ),
35
+ auxiliary=True,
36
+ )
37
+ self._values = rf.Parameter(
38
+ rf.stack([rf.convert_to_tensor(v) for _, v in self._points_sorted], out_dim=self.points_dim)[0],
39
+ auxiliary=True,
40
+ )
41
+
42
+ def __call__(self, x: Tensor) -> Tensor:
43
+ """
44
+ :param x: (x_dims...) -> value in keys
45
+ :return: y: (x_dims...,y_dims...) -> value in values
46
+ """
47
+ index = rf.search_sorted(self._keys, x, axis=self.points_dim)
48
+ index = rf.clip_by_value(index, 1, self.points_dim.dimension - 1)
49
+ x_start = rf.gather(self._keys, indices=index - 1)
50
+ x_end = rf.gather(self._keys, indices=index)
51
+ x_frac = (x - x_start) / (x_end - x_start)
52
+ x_frac = rf.clip_by_value(x_frac, 0.0, 1.0)
53
+ y_start = rf.gather(self._values, indices=index - 1)
54
+ y_end = rf.gather(self._values, indices=index)
55
+ return rf.lerp(y_start, y_end, x_frac)
@@ -4,7 +4,7 @@ or just rarely used attribs, such that we can save memory for the common case.
4
4
  """
5
5
 
6
6
  from __future__ import annotations
7
- from typing import TYPE_CHECKING, Optional, Union, Tuple, Sequence, Dict, List, Callable
7
+ from typing import TYPE_CHECKING, Optional, Union, Any, Tuple, Sequence, Dict, List, Set, Callable
8
8
  import operator
9
9
 
10
10
  from returnn.util.basic import Entity
@@ -1286,15 +1286,15 @@ class _DimMixin:
1286
1286
  self: Dim,
1287
1287
  other: Dim,
1288
1288
  *,
1289
- ignore_feature_dim=False,
1290
- allow_same_feature_dim=False,
1291
- allow_same_spatial_dim=None,
1292
- treat_feature_as_spatial=False,
1293
- broadcast_matches=False,
1294
- unknown_spatial_matches=False,
1295
- undefined_matches=False,
1296
- derived_matches=False,
1297
- allow_old_behavior=False,
1289
+ ignore_feature_dim: bool = False,
1290
+ allow_same_feature_dim: bool = False,
1291
+ allow_same_spatial_dim: Optional[bool] = None,
1292
+ treat_feature_as_spatial: bool = False,
1293
+ broadcast_matches: bool = False,
1294
+ unknown_spatial_matches: bool = False,
1295
+ undefined_matches: bool = False,
1296
+ derived_matches: bool = False,
1297
+ allow_old_behavior: bool = False,
1298
1298
  ) -> bool:
1299
1299
  """
1300
1300
  Compares self to other for equality.
@@ -1307,16 +1307,16 @@ class _DimMixin:
1307
1307
  and might potentially change in the future.
1308
1308
  https://github.com/rwth-i6/returnn/issues/634
1309
1309
 
1310
- :param Dim other:
1311
- :param bool ignore_feature_dim:
1312
- :param bool allow_same_feature_dim:
1313
- :param bool|None allow_same_spatial_dim:
1314
- :param bool treat_feature_as_spatial:
1315
- :param bool broadcast_matches:
1316
- :param bool unknown_spatial_matches:
1317
- :param bool undefined_matches:
1318
- :param bool derived_matches:
1319
- :param bool allow_old_behavior: useful e.g. for find_matching_dim_map
1310
+ :param other:
1311
+ :param ignore_feature_dim:
1312
+ :param allow_same_feature_dim:
1313
+ :param allow_same_spatial_dim:
1314
+ :param treat_feature_as_spatial:
1315
+ :param broadcast_matches:
1316
+ :param unknown_spatial_matches:
1317
+ :param undefined_matches:
1318
+ :param derived_matches:
1319
+ :param allow_old_behavior: useful e.g. for find_matching_dim_map
1320
1320
  """
1321
1321
  if self is other: # first some fast path check
1322
1322
  return True
@@ -1820,12 +1820,13 @@ class _DimMixin:
1820
1820
  self._make_extra().copy_same_as = other
1821
1821
 
1822
1822
  @classmethod
1823
- def get_existing_tag_from_collection(cls, other, tags, is_equal_opts=None):
1823
+ def get_existing_tag_from_collection(
1824
+ cls, other: Dim, tags: Union[Sequence[Dim], Set[Dim]], is_equal_opts: Optional[Dict[str, Any]] = None
1825
+ ) -> Optional[Dim]:
1824
1826
  """
1825
- :param Dim other:
1826
- :param list[Dim]|tuple[Dim]|set[Dim] tags:
1827
- :param dict[str]|None is_equal_opts: passed to Dim.is_equal
1828
- :rtype: Dim|None
1827
+ :param other:
1828
+ :param tags:
1829
+ :param is_equal_opts: passed to Dim.is_equal
1829
1830
  """
1830
1831
  if is_equal_opts is None:
1831
1832
  is_equal_opts = {}
@@ -1842,13 +1843,17 @@ class _DimMixin:
1842
1843
  return None
1843
1844
 
1844
1845
  @classmethod
1845
- def get_all_dimension_tags(cls, data_list, is_equal_opts=None, unique_separate_axes=True):
1846
- """
1847
- :param list[_t.Tensor] data_list:
1848
- :param dict[str]|None is_equal_opts: passed to Dim.is_equal
1849
- :param bool unique_separate_axes: e.g. data_list=[Data with shape (B,5,5,10)] results in 4 dim tags, not 3.
1846
+ def get_all_dimension_tags(
1847
+ cls,
1848
+ data_list: List[_t.Tensor],
1849
+ is_equal_opts: Optional[Dict[str, Any]] = None,
1850
+ unique_separate_axes: bool = True,
1851
+ ) -> Tuple[List[Dim], util.DictRefKeys[_t.Tensor, List[Dim]]]:
1852
+ """
1853
+ :param data_list:
1854
+ :param is_equal_opts: passed to Dim.is_equal
1855
+ :param unique_separate_axes: e.g. data_list=[Data with shape (B,5,5,10)] results in 4 dim tags, not 3.
1850
1856
  :return: list of dimension tags, dict for data -> list of dimension tags (for each axis)
1851
- :rtype: (list[Dim], util.DictRefKeys[_t.Tensor, list[Dim]])
1852
1857
  """
1853
1858
  tags = []
1854
1859
  data_axes_dict = util.DictRefKeys() # type: util.DictRefKeys[_t.Tensor, List[Dim]]
@@ -2323,11 +2328,11 @@ class Op:
2323
2328
  Op on :class:`Dim` which results in a derived :class:`Dim`.
2324
2329
  """
2325
2330
 
2326
- def __init__(self, kind, inputs, attribs=None):
2331
+ def __init__(self, kind: str, inputs: List[Dim], attribs: Optional[Dict[str, Any]] = None):
2327
2332
  """
2328
- :param str kind: "add", "sub", "mul", "ceildiv"
2329
- :param list[Dim] inputs:
2330
- :param dict[str]|None attribs:
2333
+ :param kind: "add", "sub", "mul", "ceildiv"
2334
+ :param inputs:
2335
+ :param attribs:
2331
2336
  """
2332
2337
  self.kind = kind
2333
2338
  self.inputs = inputs
@@ -151,12 +151,12 @@ class ReturnnLayersBackend(Backend[Layer]):
151
151
  raise NotImplementedError
152
152
 
153
153
  @staticmethod
154
- def get_shape_tuple_raw(raw_tensor: Layer) -> Tuple[Union[int, Layer]]:
154
+ def get_shape_tuple_raw(raw_tensor: Layer) -> Tuple[Union[int, Layer], ...]:
155
155
  """shape"""
156
156
  raise NotImplementedError
157
157
 
158
158
  @staticmethod
159
- def get_known_shape_raw(raw_tensor: Layer) -> Tuple[Optional[int]]:
159
+ def get_known_shape_raw(raw_tensor: Layer) -> Tuple[Optional[int], ...]:
160
160
  """known shape"""
161
161
  return raw_tensor.tensor.batch_shape
162
162
 
@@ -72,7 +72,7 @@ class TFBackend(Backend[tf.Tensor]):
72
72
  return tf.shape(raw_tensor)
73
73
 
74
74
  @staticmethod
75
- def get_shape_tuple_raw(raw_tensor: tf.Tensor) -> Tuple[Union[int, tf.Tensor]]:
75
+ def get_shape_tuple_raw(raw_tensor: tf.Tensor) -> Tuple[Union[int, tf.Tensor], ...]:
76
76
  """
77
77
  :return: shape of raw tensor. assumes that ndim is known
78
78
  """
@@ -87,14 +87,14 @@ class TFBackend(Backend[tf.Tensor]):
87
87
  return tuple(shape)
88
88
 
89
89
  @staticmethod
90
- def get_known_shape_raw(raw_tensor: tf.Tensor) -> Tuple[Optional[int]]:
90
+ def get_known_shape_raw(raw_tensor: tf.Tensor) -> Tuple[Optional[int], ...]:
91
91
  """
92
92
  :return: shape of raw tensor, int for static known, None otherwise. assumes that ndim is known.
93
93
  """
94
94
  return tuple(raw_tensor.shape.as_list())
95
95
 
96
96
  @staticmethod
97
- def set_known_shape_raw(raw_tensor: tf.Tensor, shape: Tuple[Optional[int]]) -> None:
97
+ def set_known_shape_raw(raw_tensor: tf.Tensor, shape: Tuple[Optional[int], ...]) -> None:
98
98
  """
99
99
  wrap tf.Tensor.set_shape
100
100
  """
@@ -109,19 +109,19 @@ class TorchBackend(Backend[torch.Tensor]):
109
109
  return raw_tensor.dim()
110
110
 
111
111
  @staticmethod
112
- def get_shape_raw(raw_tensor: torch.Tensor) -> Tuple[int]:
112
+ def get_shape_raw(raw_tensor: torch.Tensor) -> Tuple[int, ...]:
113
113
  """shape"""
114
114
  return tuple(raw_tensor.shape)
115
115
 
116
116
  @staticmethod
117
- def get_shape_tuple_raw(raw_tensor: torch.Tensor) -> Tuple[int]:
117
+ def get_shape_tuple_raw(raw_tensor: torch.Tensor) -> Tuple[int, ...]:
118
118
  """
119
119
  :return: shape of raw tensor
120
120
  """
121
121
  return tuple(raw_tensor.shape)
122
122
 
123
123
  @staticmethod
124
- def get_known_shape_raw(raw_tensor: torch.Tensor) -> Tuple[Optional[int]]:
124
+ def get_known_shape_raw(raw_tensor: torch.Tensor) -> Tuple[Optional[int], ...]:
125
125
  """
126
126
  :return: shape of raw tensor; here for PyTorch the full shape is always known
127
127
  """
@@ -501,6 +501,14 @@ class TorchBackend(Backend[torch.Tensor]):
501
501
  out.raw_tensor = torch.cat((prev_accum.raw_tensor, source_raw), dim=prev_accum.get_axis_from_description(axis))
502
502
  return out
503
503
 
504
+ @staticmethod
505
+ def stack(sources: Sequence[Tensor], *, out_dim: Dim) -> Tensor:
506
+ """stack"""
507
+ out_dims = (out_dim,) + sources[0].dims
508
+ out = Tensor("stack", dims=out_dims, dtype=sources[0].dtype, sparse_dim=sources[0].sparse_dim)
509
+ out.raw_tensor = torch.stack([s.copy_compatible_to_dims_raw(out_dims[1:]) for s in sources], dim=0)
510
+ return out
511
+
504
512
  @staticmethod
505
513
  def activation_raw(raw_tensor: torch.Tensor, func: str) -> torch.Tensor:
506
514
  """
@@ -1094,6 +1102,38 @@ class TorchBackend(Backend[torch.Tensor]):
1094
1102
  out.raw_tensor = torch.where(cond_bc_raw, true_bc_raw, false_bc_raw)
1095
1103
  return out
1096
1104
 
1105
+ @staticmethod
1106
+ def search_sorted(
1107
+ sorted_seq: Tensor, values: Tensor, *, axis: Dim, side: str = "left", out_dtype: str = "int32"
1108
+ ) -> Tensor:
1109
+ """search sorted"""
1110
+ if out_dtype == "int32":
1111
+ out_int32 = True
1112
+ elif out_dtype == "int64":
1113
+ out_int32 = False
1114
+ else:
1115
+ raise NotImplementedError(f"search_sorted: out_dtype {out_dtype} not supported")
1116
+ if axis not in sorted_seq.dims:
1117
+ raise ValueError(f"search_sorted: axis {axis} not in sorted_seqs {sorted_seq}")
1118
+ if axis.need_masking():
1119
+ raise NotImplementedError(f"search_sorted: dynamic axis {axis} not supported")
1120
+ sorted_seq_dims = [dim for dim in sorted_seq.dims if dim != axis] + [axis]
1121
+ for dim in sorted_seq_dims[:-1]:
1122
+ if dim not in values.dims:
1123
+ raise ValueError(f"search_sorted: dim {dim} in sorted_seq {sorted_seq} but not in values {values}")
1124
+ values_rem_dims = [dim for dim in values.dims if dim not in sorted_seq_dims[:-1]]
1125
+ values_dims = sorted_seq_dims[:-1] + values_rem_dims
1126
+ sorted_seq_raw: torch.Tensor = sorted_seq.copy_compatible_to_dims_raw(sorted_seq_dims)
1127
+ values_raw: torch.Tensor = values.copy_compatible_to_dims_raw(values_dims)
1128
+ if len(values_rem_dims) != 1:
1129
+ values_raw = values_raw.reshape(values_raw.shape[: len(sorted_seq_dims[:-1])] + (-1,))
1130
+ out = Tensor("search_sorted", dims=sorted_seq_dims[:-1] + values_rem_dims, dtype=out_dtype, sparse_dim=axis)
1131
+ out_raw = torch.searchsorted(sorted_seq_raw, values_raw, side=side, out_int32=out_int32)
1132
+ if len(values_rem_dims) != 1:
1133
+ out_raw = out_raw.reshape([dim.get_dim_value() for dim in out.dims])
1134
+ out.raw_tensor = out_raw
1135
+ return out
1136
+
1097
1137
  @staticmethod
1098
1138
  def clip_by_value(
1099
1139
  x: Tensor,
@@ -1119,6 +1159,22 @@ class TorchBackend(Backend[torch.Tensor]):
1119
1159
  out.raw_tensor = torch.clamp(x_bc_raw, min_bc_raw, max_bc_raw)
1120
1160
  return out
1121
1161
 
1162
+ @staticmethod
1163
+ def lerp(
1164
+ start: Tensor, end: Tensor, weight: Union[float, Tensor], *, allow_broadcast_all_sources: bool = False
1165
+ ) -> Tensor:
1166
+ """lerp"""
1167
+ weight = rf.convert_to_tensor(weight, _backend=TorchBackend, device=start.device)
1168
+ out = Tensor.get_common_data(
1169
+ [start, end, weight], allow_broadcast_all_sources=allow_broadcast_all_sources, name="lerp"
1170
+ )
1171
+ out.raw_tensor = torch.lerp(
1172
+ start.copy_compatible_to_dims_raw(out.dims),
1173
+ end.copy_compatible_to_dims_raw(out.dims),
1174
+ weight.copy_compatible_to_dims_raw(out.dims),
1175
+ )
1176
+ return out
1177
+
1122
1178
  @staticmethod
1123
1179
  def matmul(a: _TT, b: _TT, *, reduce: Union[Dim, Sequence[Dim]], use_mask: bool = True) -> _TT:
1124
1180
  """
@@ -0,0 +1,28 @@
1
+ """
2
+ Some mathematical functions, in pure NumPy.
3
+ """
4
+
5
+ from __future__ import annotations
6
+ from typing import Union, Dict
7
+ import numpy
8
+
9
+
10
+ def next_power_of_two(n: int) -> int:
11
+ """next power of two, >= n"""
12
+ return 2 ** (int(n - 1).bit_length())
13
+
14
+
15
+ class PiecewiseLinear:
16
+ """
17
+ Piecewise linear function.
18
+ """
19
+
20
+ def __init__(self, values: Dict[Union[int, float], Union[int, float]]):
21
+ self._sorted_items = sorted(values.items())
22
+ self._sorted_keys = numpy.array([x for x, _ in self._sorted_items])
23
+ self._sorted_values = numpy.array([y for _, y in self._sorted_items])
24
+
25
+ def __call__(self, x: Union[int, float]) -> Union[int, float]:
26
+ steps = self._sorted_keys
27
+ values = self._sorted_values
28
+ return numpy.interp(x, steps, values)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: returnn
3
- Version: 1.20240613.164354
3
+ Version: 1.20240617.171744
4
4
  Summary: The RWTH extensible training framework for universal recurrent neural networks
5
5
  Home-page: https://github.com/rwth-i6/returnn/
6
6
  Author: Albert Zeyer
@@ -184,6 +184,7 @@ returnn/frontend/matmul.py
184
184
  returnn/frontend/module.py
185
185
  returnn/frontend/normalization.py
186
186
  returnn/frontend/parameter.py
187
+ returnn/frontend/piecewise_linear.py
187
188
  returnn/frontend/rand.py
188
189
  returnn/frontend/rec.py
189
190
  returnn/frontend/reduce.py
@@ -368,6 +369,7 @@ tests/test_rf_label_smoothing.py
368
369
  tests/test_rf_loop.py
369
370
  tests/test_rf_math.py
370
371
  tests/test_rf_normalization.py
372
+ tests/test_rf_piecewise_linear.py
371
373
  tests/test_rf_rec.py
372
374
  tests/test_rf_reduce.py
373
375
  tests/test_rf_signal.py
@@ -96,6 +96,26 @@ def test_slice_pad_zeros():
96
96
  assert_equal(list(slice_pad_zeros(np.array([1, 2, 3, 4]), begin=2, end=6)), [3, 4, 0, 0])
97
97
 
98
98
 
99
+ def test_math_PiecewiseLinear():
100
+ from returnn.util.math import PiecewiseLinear
101
+
102
+ eps = 1e-5
103
+ f = PiecewiseLinear({1: 2, 3: 4, 5: 1})
104
+ assert_equal(f(0), 2)
105
+ assert_equal(f(1 - eps), 2)
106
+ assert_equal(f(1), 2)
107
+ assert_almost_equal(f(1 + eps), 2, decimal=4)
108
+ assert_equal(f(2), 3)
109
+ assert_almost_equal(f(3 - eps), 4, decimal=4)
110
+ assert_equal(f(3), 4)
111
+ assert_almost_equal(f(3 + eps), 4, decimal=4)
112
+ assert_equal(f(4), 2.5)
113
+ assert_almost_equal(f(5 - eps), 1, decimal=4)
114
+ assert_equal(f(5), 1)
115
+ assert_equal(f(5 + eps), 1)
116
+ assert_equal(f(6), 1)
117
+
118
+
99
119
  def test_parse_orthography_into_symbols():
100
120
  assert_equal(list("hi"), parse_orthography_into_symbols("hi"))
101
121
  assert_equal(list(" hello "), parse_orthography_into_symbols(" hello "))