returnn 1.20240610.120719__tar.gz → 1.20240613.164354__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of returnn might be problematic. Click here for more details.

Files changed (445) hide show
  1. {returnn-1.20240610.120719/returnn.egg-info → returnn-1.20240613.164354}/PKG-INFO +1 -1
  2. returnn-1.20240613.164354/_setup_info_generated.py +2 -0
  3. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/basic.py +1 -1
  4. returnn-1.20240610.120719/returnn/datasets/concat_files.py → returnn-1.20240613.164354/returnn/datasets/distrib_files.py +13 -11
  5. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/dims.py +34 -7
  6. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/normalization.py +23 -5
  7. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tensor/_tensor_extra.py +13 -10
  8. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/frontend/_backend.py +7 -17
  9. {returnn-1.20240610.120719 → returnn-1.20240613.164354/returnn.egg-info}/PKG-INFO +1 -1
  10. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn.egg-info/SOURCES.txt +1 -1
  11. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_Dataset.py +8 -8
  12. returnn-1.20240610.120719/_setup_info_generated.py +0 -2
  13. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/.editorconfig +0 -0
  14. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/.gitignore +0 -0
  15. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/.gitmodules +0 -0
  16. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/.kateconfig +0 -0
  17. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/CHANGELOG.md +0 -0
  18. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/CODEOWNERS +0 -0
  19. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/CONTRIBUTING.md +0 -0
  20. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/LICENSE +0 -0
  21. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/MANIFEST.in +0 -0
  22. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/README.rst +0 -0
  23. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/__init__.py +0 -0
  24. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/12AX.cluster_map +0 -0
  25. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/_setup_returnn_env.py +0 -0
  26. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-fwd.config +0 -0
  27. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-horovod-mpi.py +0 -0
  28. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-horovod-mpi.py.sh +0 -0
  29. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-horovod-mpi.sh +0 -0
  30. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-hyper-param-tuning.config +0 -0
  31. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-iter-dataset.py +0 -0
  32. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-list-devices.py +0 -0
  33. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-lua-torch-layer.config +0 -0
  34. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-pretrain.config +0 -0
  35. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-record-and-push-to-webserver.py +0 -0
  36. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-returnn-as-framework.py +0 -0
  37. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-rf-pt-benchmark.py +0 -0
  38. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-rf.config +0 -0
  39. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-rhn-enwik8.config +0 -0
  40. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-sprint-interface.py +0 -0
  41. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-att-copy.config +0 -0
  42. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-attention.config +0 -0
  43. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-chunking-blstm.12ax.config +0 -0
  44. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-contribrnn-lstm.12ax.config +0 -0
  45. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-enc-dec.config +0 -0
  46. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-hard-att-copy.config +0 -0
  47. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-lstm-benchmark.py +0 -0
  48. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-maxgradnorm-lstm.12ax.config +0 -0
  49. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-native-lstm-lowmem.12ax.config +0 -0
  50. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-native-lstm.12ax.config +0 -0
  51. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-native-lstm2.12ax.config +0 -0
  52. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-native-lstm2.12ax.tuned.config +0 -0
  53. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-neural-transducer.12ax.config +0 -0
  54. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-rec-explicit-lstm.config +0 -0
  55. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-rec-explicit-rnn.config +0 -0
  56. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-rec-self-att.config +0 -0
  57. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-search-compiled-graph.py +0 -0
  58. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-tf-vanilla-lstm.12ax.config +0 -0
  59. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-timit-lstm-ctc.config +0 -0
  60. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-torch.config +0 -0
  61. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo-upd-mult-model.lstm.12ax.config +0 -0
  62. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/demo.sh +0 -0
  63. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/IAM_lines/a01-000u-00.png +0 -0
  64. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/IAM_lines/a01-007-04.png +0 -0
  65. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/IAM_lines/a01-007-06.png +0 -0
  66. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/README.txt +0 -0
  67. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/chars.txt +0 -0
  68. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/config_demo +0 -0
  69. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/config_fwd +0 -0
  70. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/config_real +0 -0
  71. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/create_IAM_dataset.py +0 -0
  72. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/decode.py +0 -0
  73. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/features/raw/demo.h5 +0 -0
  74. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/go.sh +0 -0
  75. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/lines.txt +0 -0
  76. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/split/eval.txt +0 -0
  77. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/split/train.txt +0 -0
  78. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/IAM/split/valid.txt +0 -0
  79. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/README.md +0 -0
  80. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/artificial/create_test_h5.py +0 -0
  81. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/artificial/forwardconfig +0 -0
  82. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/artificial/go.sh +0 -0
  83. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/artificial/trainconfig +0 -0
  84. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/artificial_rgb/create_test_h5.py +0 -0
  85. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/artificial_rgb/forwardconfig +0 -0
  86. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/artificial_rgb/go.sh +0 -0
  87. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/demos/mdlstm/artificial_rgb/trainconfig +0 -0
  88. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/pyproject.toml +0 -0
  89. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/requirements.txt +0 -0
  90. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/__init__.py +0 -0
  91. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/__main__.py +0 -0
  92. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/__old_mod_loader__.py +0 -0
  93. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/__setup__.py +0 -0
  94. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/config.py +0 -0
  95. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/__init__.py +0 -0
  96. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/audio.py +0 -0
  97. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/bundle_file.py +0 -0
  98. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/cached.py +0 -0
  99. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/cached2.py +0 -0
  100. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/generating.py +0 -0
  101. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/hdf.py +0 -0
  102. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/lm.py +0 -0
  103. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/map.py +0 -0
  104. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/meta.py +0 -0
  105. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/multi_proc.py +0 -0
  106. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/normalization_data.py +0 -0
  107. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/numpy_dump.py +0 -0
  108. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/raw_wav.py +0 -0
  109. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/sprint.py +0 -0
  110. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/stereo.py +0 -0
  111. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/util/__init__.py +0 -0
  112. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/util/feature_extraction.py +0 -0
  113. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/util/strings.py +0 -0
  114. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/datasets/util/vocabulary.py +0 -0
  115. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/engine/__init__.py +0 -0
  116. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/engine/base.py +0 -0
  117. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/engine/batch.py +0 -0
  118. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/__init__.py +0 -0
  119. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/__main__.py +0 -0
  120. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/.git +0 -0
  121. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/.gitignore +0 -0
  122. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/LICENSE +0 -0
  123. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/README.md +0 -0
  124. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/aligner.gif +0 -0
  125. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/check.png +0 -0
  126. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/core.cu +0 -0
  127. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/core.h +0 -0
  128. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/core_cpu.cpp +0 -0
  129. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/pytorch_binding/LICENSE +0 -0
  130. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/pytorch_binding/MANIFEST.in +0 -0
  131. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/pytorch_binding/README.md +0 -0
  132. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/pytorch_binding/binding.cpp +0 -0
  133. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/pytorch_binding/core.cu +0 -0
  134. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/pytorch_binding/core.h +0 -0
  135. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/pytorch_binding/requirements.txt +0 -0
  136. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/pytorch_binding/setup.py +0 -0
  137. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/pytorch_binding/warp_rna/__init__.py +0 -0
  138. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/pytorch_binding/warp_rna/test.py +0 -0
  139. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/ref_rna.py +0 -0
  140. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/setup.py +0 -0
  141. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/src/warp_rna_op.cc +0 -0
  142. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/src/warp_rna_op_kernel_tmpl.h +0 -0
  143. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/warp_rna/__init__.py +0 -0
  144. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/WarpRna/warp-rna/test.cpp +0 -0
  145. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/__init__.py +0 -0
  146. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/graph_editor/README.md +0 -0
  147. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/graph_editor/__init__.py +0 -0
  148. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/graph_editor/edit.py +0 -0
  149. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/graph_editor/reroute.py +0 -0
  150. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/graph_editor/select.py +0 -0
  151. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/graph_editor/subgraph.py +0 -0
  152. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/graph_editor/transform.py +0 -0
  153. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/extern/graph_editor/util.py +0 -0
  154. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/forward_iface.py +0 -0
  155. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/__init__.py +0 -0
  156. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/_backend.py +0 -0
  157. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/_native/__init__.py +0 -0
  158. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/_native/backend.cpp +0 -0
  159. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/_native/backend.hpp +0 -0
  160. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/_native/module.cpp +0 -0
  161. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/_native/module.hpp +0 -0
  162. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/_native/py_utils.hpp +0 -0
  163. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/_native/tensor_ops.cpp +0 -0
  164. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/_native/tensor_ops.hpp +0 -0
  165. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/_numpy_backend.py +0 -0
  166. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/_random_journal.py +0 -0
  167. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/_utils.py +0 -0
  168. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/array_.py +0 -0
  169. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/attention.py +0 -0
  170. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/audio/__init__.py +0 -0
  171. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/audio/mel.py +0 -0
  172. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/audio/specaugment.py +0 -0
  173. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/backend.py +0 -0
  174. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/cond.py +0 -0
  175. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/const.py +0 -0
  176. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/container.py +0 -0
  177. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/control_flow_ctx.py +0 -0
  178. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/conv.py +0 -0
  179. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/decoder/__init__.py +0 -0
  180. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/decoder/transformer.py +0 -0
  181. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/device.py +0 -0
  182. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/dropout.py +0 -0
  183. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/dtype.py +0 -0
  184. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/encoder/__init__.py +0 -0
  185. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/encoder/base.py +0 -0
  186. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/encoder/conformer.py +0 -0
  187. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/gradient.py +0 -0
  188. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/graph.py +0 -0
  189. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/hooks.py +0 -0
  190. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/init.py +0 -0
  191. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/label_smoothing.py +0 -0
  192. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/linear.py +0 -0
  193. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/loop.py +0 -0
  194. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/loss.py +0 -0
  195. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/math_.py +0 -0
  196. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/matmul.py +0 -0
  197. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/module.py +0 -0
  198. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/parameter.py +0 -0
  199. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/rand.py +0 -0
  200. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/rec.py +0 -0
  201. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/reduce.py +0 -0
  202. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/run_ctx.py +0 -0
  203. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/signal.py +0 -0
  204. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/state.py +0 -0
  205. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/tensor_array.py +0 -0
  206. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/frontend/types.py +0 -0
  207. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/import_/__init__.py +0 -0
  208. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/import_/common.py +0 -0
  209. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/import_/git.py +0 -0
  210. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/import_/import_.py +0 -0
  211. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/learning_rate_control.py +0 -0
  212. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/log.py +0 -0
  213. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/native_op.cpp +0 -0
  214. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/native_op.py +0 -0
  215. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/pretrain.py +0 -0
  216. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/sprint/__init__.py +0 -0
  217. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/sprint/cache.py +0 -0
  218. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/sprint/control.py +0 -0
  219. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/sprint/error_signals.py +0 -0
  220. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/sprint/extern_interface.py +0 -0
  221. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/sprint/interface.py +0 -0
  222. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tensor/README.md +0 -0
  223. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tensor/__init__.py +0 -0
  224. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tensor/_dim_extra.py +0 -0
  225. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tensor/_tensor_mixin_base.py +0 -0
  226. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tensor/_tensor_op_overloads.py +0 -0
  227. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tensor/control_flow_ctx.py +0 -0
  228. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tensor/dim.py +0 -0
  229. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tensor/marked_dim.py +0 -0
  230. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tensor/tensor.py +0 -0
  231. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tensor/tensor_dict.py +0 -0
  232. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tensor/utils.py +0 -0
  233. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/__init__.py +0 -0
  234. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/compat.py +0 -0
  235. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/data_pipeline.py +0 -0
  236. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/distributed.py +0 -0
  237. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/engine.py +0 -0
  238. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_layers/README.md +0 -0
  239. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_layers/__init__.py +0 -0
  240. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_layers/_backend.py +0 -0
  241. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_layers/_utils.py +0 -0
  242. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_layers/cond.py +0 -0
  243. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_layers/config_entry_points.py +0 -0
  244. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_layers/debug_eager_mode.py +0 -0
  245. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_layers/dims.py +0 -0
  246. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_layers/layer.py +0 -0
  247. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_layers/loop.py +0 -0
  248. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_layers/make_layer.py +0 -0
  249. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_layers/masked_computation.py +0 -0
  250. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_layers/parameter_assign.py +0 -0
  251. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_layers/prev_tensor_ref.py +0 -0
  252. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_low_level/__init__.py +0 -0
  253. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/frontend_low_level/_backend.py +0 -0
  254. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/horovod.py +0 -0
  255. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/hyper_param_tuning.py +0 -0
  256. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/layers/__init__.py +0 -0
  257. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/layers/base.py +0 -0
  258. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/layers/basic.py +0 -0
  259. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/layers/rec.py +0 -0
  260. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/layers/segmental_model.py +0 -0
  261. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/layers/signal_processing.py +0 -0
  262. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/layers/variable.py +0 -0
  263. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/native_op.py +0 -0
  264. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/network.py +0 -0
  265. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/sprint.py +0 -0
  266. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/updater.py +0 -0
  267. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/util/__init__.py +0 -0
  268. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/util/basic.py +0 -0
  269. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/util/data.py +0 -0
  270. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/util/gradient_checkpoint.py +0 -0
  271. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/util/ken_lm.py +0 -0
  272. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/tf/util/open_fst.py +0 -0
  273. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/README.md +0 -0
  274. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/__init__.py +0 -0
  275. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/data/__init__.py +0 -0
  276. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/data/extern_data.py +0 -0
  277. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/data/pipeline.py +0 -0
  278. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/data/queued_data_iter.py +0 -0
  279. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/data/returnn_dataset_wrapper.py +0 -0
  280. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/data/tensor_utils.py +0 -0
  281. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/distributed.py +0 -0
  282. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/engine.py +0 -0
  283. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/frontend/__init__.py +0 -0
  284. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/frontend/_rand.py +0 -0
  285. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/frontend/bridge.py +0 -0
  286. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/frontend/raw_ops.py +0 -0
  287. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/updater.py +0 -0
  288. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/util/README.md +0 -0
  289. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/util/__init__.py +0 -0
  290. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/util/diagnose_gpu.py +0 -0
  291. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/torch/util/scaled_gradient.py +0 -0
  292. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/__init__.py +0 -0
  293. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/basic.py +0 -0
  294. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/better_exchook.py +0 -0
  295. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/bpe.py +0 -0
  296. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/debug.py +0 -0
  297. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/debug_helpers.py +0 -0
  298. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/file_cache.py +0 -0
  299. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/fsa.py +0 -0
  300. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/literal_py_to_pickle.py +0 -0
  301. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/math.py +0 -0
  302. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/multi_proc_non_daemonic_spawn.py +0 -0
  303. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/native_code_compiler.py +0 -0
  304. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/pprint.py +0 -0
  305. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/py-to-pickle.cpp +0 -0
  306. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/py_compat.py +0 -0
  307. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/py_ext_mod_compiler.py +0 -0
  308. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/result_with_reason.py +0 -0
  309. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/sig_proc.py +0 -0
  310. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/task_system.py +0 -0
  311. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/train_proc_manager.py +0 -0
  312. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn/util/watch_memory.py +0 -0
  313. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn.egg-info/dependency_links.txt +0 -0
  314. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/returnn.egg-info/top_level.txt +0 -0
  315. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/rnn.py +0 -0
  316. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/setup.cfg +0 -0
  317. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/setup.py +0 -0
  318. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/DummySprintExec.py +0 -0
  319. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/PyCharm-inspection-profile.xml +0 -0
  320. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/PyCharm.idea/.gitignore +0 -0
  321. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/PyCharm.idea/.name +0 -0
  322. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/PyCharm.idea/codeStyleSettings.xml +0 -0
  323. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/PyCharm.idea/codeStyles/Project.xml +0 -0
  324. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/PyCharm.idea/codeStyles/codeStyleConfig.xml +0 -0
  325. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/PyCharm.idea/inspectionProfiles/Project_Default.xml +0 -0
  326. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/PyCharm.idea/inspectionProfiles/profiles_settings.xml +0 -0
  327. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/PyCharm.idea/misc.xml +0 -0
  328. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/PyCharm.idea/modules.xml +0 -0
  329. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/PyCharm.idea/returnn.iml +0 -0
  330. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/PyCharm.idea/scopes/scope_settings.xml +0 -0
  331. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/_set_num_threads1.py +0 -0
  332. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/_setup_returnn_env.py +0 -0
  333. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/_setup_test_env.py +0 -0
  334. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/bpe-unicode-demo.codes +0 -0
  335. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/bpe-unicode-demo.vocab +0 -0
  336. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/lexicon_opt.fst +0 -0
  337. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/lexicon_opt.isyms +0 -0
  338. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/lexicon_opt.jpg +0 -0
  339. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/lexicon_opt.osyms +0 -0
  340. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/lint_common.py +0 -0
  341. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/pycharm-inspect.py +0 -0
  342. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/pylint.py +0 -0
  343. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/returnn-as-framework.py +0 -0
  344. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/rf_utils.py +0 -0
  345. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/spelling.dic +0 -0
  346. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_Config.py +0 -0
  347. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_Fsa.py +0 -0
  348. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_GeneratingDataset.py +0 -0
  349. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_HDFDataset.py +0 -0
  350. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_LearningRateControl.py +0 -0
  351. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_Log.py +0 -0
  352. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_MultiProcDataset.py +0 -0
  353. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_Pretrain.py +0 -0
  354. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_ResNet.py +0 -0
  355. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_SprintDataset.py +0 -0
  356. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_SprintInterface.py +0 -0
  357. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_TFEngine.py +0 -0
  358. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_TFNativeOp.py +0 -0
  359. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_TFNetworkLayer.py +0 -0
  360. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_TFNetworkRecLayer.py +0 -0
  361. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_TFNetworkSigProcLayer.py +0 -0
  362. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_TFUpdater.py +0 -0
  363. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_TFUtil.py +0 -0
  364. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_TF_determinism.py +0 -0
  365. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_TaskSystem.py +0 -0
  366. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_TaskSystem_SharedMem.py +0 -0
  367. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_TranslationDataset.py +0 -0
  368. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_Util.py +0 -0
  369. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_demos.py +0 -0
  370. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_fork_exec.py +0 -0
  371. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_hdf_dump.py +0 -0
  372. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_array.py +0 -0
  373. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_attention.py +0 -0
  374. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_base.py +0 -0
  375. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_cond.py +0 -0
  376. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_const.py +0 -0
  377. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_container.py +0 -0
  378. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_conv.py +0 -0
  379. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_encoder_conformer.py +0 -0
  380. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_gradient.py +0 -0
  381. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_label_smoothing.py +0 -0
  382. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_loop.py +0 -0
  383. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_math.py +0 -0
  384. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_normalization.py +0 -0
  385. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_rec.py +0 -0
  386. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_reduce.py +0 -0
  387. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_rf_signal.py +0 -0
  388. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_tensor.py +0 -0
  389. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_tools.py +0 -0
  390. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_torch_dataset.py +0 -0
  391. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_torch_engine.py +0 -0
  392. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_torch_frontend.py +0 -0
  393. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tests/test_torch_internal_frontend.py +0 -0
  394. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/_setup_returnn_env.py +0 -0
  395. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/analyze-dataset-batches.py +0 -0
  396. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/bliss-collect-seq-lens.py +0 -0
  397. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/bliss-dump-text.py +0 -0
  398. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/bliss-get-segment-names.py +0 -0
  399. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/bliss-to-ogg-zip.py +0 -0
  400. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/bpe-create-lexicon.py +0 -0
  401. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/calculate-word-error-rate.py +0 -0
  402. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/cleanup-old-models.py +0 -0
  403. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/collect-orth-symbols.py +0 -0
  404. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/collect-words.py +0 -0
  405. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/compile_native_op.py +0 -0
  406. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/compile_tf_graph.py +0 -0
  407. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/debug-dump-search-scores.py +0 -0
  408. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/debug-plot-search-scores.py +0 -0
  409. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/dump-dataset-raw-strings.py +0 -0
  410. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/dump-dataset.py +0 -0
  411. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/dump-forward-stats.py +0 -0
  412. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/dump-forward.py +0 -0
  413. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/dump-network-json.py +0 -0
  414. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/dump-pickle.py +0 -0
  415. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/extract_state_tying_from_dataset.py +0 -0
  416. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/get-attention-weights.py +0 -0
  417. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/get-best-model-epoch.py +0 -0
  418. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/hdf_dump.py +0 -0
  419. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/hdf_dump_translation_dataset.py +0 -0
  420. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/import-blocks-mt-model.py +0 -0
  421. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/import-t2t-mt-model.py +0 -0
  422. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/.gitignore +0 -0
  423. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/Makefile +0 -0
  424. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/README.md +0 -0
  425. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/example/README.md +0 -0
  426. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/example/libs_list +0 -0
  427. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/example/network.040/i600_m600_m600.sgd_b16_lr0_cl2.newbobabs.config +0 -0
  428. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/example/network.040/i600_m600_m600.sgd_b16_lr0_cl2.newbobabs.keep_over_epoch.lstm2.config +0 -0
  429. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/example/rescore_lattice.sh +0 -0
  430. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/example/state_vars_list +0 -0
  431. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/example/tensor_names_list +0 -0
  432. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/file.h +0 -0
  433. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/htklatticerescorer.cc +0 -0
  434. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/htklatticerescorer.h +0 -0
  435. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/main.cc +0 -0
  436. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/rescorer.h +0 -0
  437. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/vocabulary.cc +0 -0
  438. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/lattice_rescorer/vocabulary.h +0 -0
  439. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/tf_avg_checkpoints.py +0 -0
  440. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/tf_inspect_checkpoint.py +0 -0
  441. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/tf_inspect_summary_log.py +0 -0
  442. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/torch_avg_checkpoints.py +0 -0
  443. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/torch_export_to_onnx.py +0 -0
  444. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/torch_inspect_checkpoint.py +0 -0
  445. {returnn-1.20240610.120719 → returnn-1.20240613.164354}/tools/torch_inspect_checkpoint_and_opt.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: returnn
3
- Version: 1.20240610.120719
3
+ Version: 1.20240613.164354
4
4
  Summary: The RWTH extensible training framework for universal recurrent neural networks
5
5
  Home-page: https://github.com/rwth-i6/returnn/
6
6
  Author: Albert Zeyer
@@ -0,0 +1,2 @@
1
+ version = '1.20240613.164354'
2
+ long_version = '1.20240613.164354+git.f006453'
@@ -1367,7 +1367,7 @@ def get_dataset_class(name: Union[str, Type[Dataset]]) -> Optional[Type[Dataset]
1367
1367
  "raw_wav",
1368
1368
  "map",
1369
1369
  "multi_proc",
1370
- "concat_files",
1370
+ "distrib_files",
1371
1371
  ]
1372
1372
  for mod_name in mod_names:
1373
1373
  mod = import_module("returnn.datasets.%s" % mod_name)
@@ -1,5 +1,5 @@
1
1
  """
2
- :class:`ConcatFilesDataset `
2
+ :class:`DistributeFilesDataset`
3
3
 
4
4
  https://github.com/rwth-i6/returnn/issues/1519
5
5
  """
@@ -23,19 +23,21 @@ from multiprocessing.connection import Connection as mpConnection
23
23
  _mp = NonDaemonicSpawnContext(process_pre_init_func=SubProcCopyGlobalConfigPreInitFunc())
24
24
 
25
25
 
26
- __all__ = ["ConcatFilesDataset"]
26
+ __all__ = ["DistributeFilesDataset"]
27
27
 
28
28
  Filename = str
29
29
  FileTree = Union[Filename, Tuple["FileTree", ...], Dict[Any, "FileTree"], List["FileTree"]]
30
30
 
31
31
 
32
- class ConcatFilesDataset(CachedDataset2):
32
+ class DistributeFilesDataset(CachedDataset2):
33
33
  """
34
- This is similar to :class:`ConcatDataset`, but instead of concatenating datasets,
35
- we distribute files over subepochs,
36
- and then create a sub dataset for every sub epoch
37
- for a given subset of the files
38
- via the given ``get_sub_epoch_dataset``.
34
+ Dataset that distributes files over subepochs and then creates a
35
+ sub dataset for every sub epoch for a given (random) subset of the files.
36
+ The sub dataset is user-defined via a function ``get_sub_epoch_dataset``.
37
+ Thus, this dataset wraps the sub datasets.
38
+
39
+ It is conceptually very similar to :class:`ConcatDataset` in the sense
40
+ that it concatenates all the sub datasets together to form one larger dataset.
39
41
 
40
42
  This scheme allows to shuffle over the files,
41
43
  which makes shuffling much more efficient over a large dataset
@@ -70,7 +72,7 @@ class ConcatFilesDataset(CachedDataset2):
70
72
  }
71
73
 
72
74
  train = {
73
- "class": "ConcatFilesDataset",
75
+ "class": "DistributeFilesDataset",
74
76
  "files": [
75
77
  "/nfs/big_data_1.hdf",
76
78
  ...
@@ -105,7 +107,7 @@ class ConcatFilesDataset(CachedDataset2):
105
107
  }
106
108
 
107
109
  train = {
108
- "class": "ConcatFilesDataset",
110
+ "class": "DistributeFilesDataset",
109
111
  "files": [
110
112
  ("/nfs/alignment_1.hdf", "/nfs/features_1.hdf"),
111
113
  ...
@@ -321,7 +323,7 @@ class ConcatFilesDataset(CachedDataset2):
321
323
  # [[1,1], [78], [120], []] or [[1,1,78], [120], [], []].
322
324
  # Or consider [5,5]+[10]*7, partition_epoch=5, which has avg size 16.
323
325
  # A simple algorithm could end up with [[5,5,10], [10,10], [10,10], [10,10], []].
324
- # See test_ConcatFilesDataset_get_files_per_sub_epochs for some test cases.
326
+ # See test_DistributeFilesDataset_get_files_per_sub_epochs for some test cases.
325
327
  assert len(files_order) >= partition_epoch
326
328
  files_per_sub_epochs = [[] for _ in range(partition_epoch)]
327
329
  assert len(files_per_sub_epochs) == partition_epoch
@@ -2,7 +2,6 @@
2
2
  Utilities for dimension tags, dimensions, axes.
3
3
  """
4
4
 
5
-
6
5
  from __future__ import annotations
7
6
  from typing import Optional, Union, TypeVar, Sequence, Tuple
8
7
  from returnn.tensor import Tensor, Dim
@@ -18,6 +17,7 @@ __all__ = [
18
17
  "replace_dim",
19
18
  "dim_match_priority_when_needed",
20
19
  "num_elements_of_shape",
20
+ "masked_fraction_of_shape",
21
21
  ]
22
22
 
23
23
 
@@ -122,11 +122,19 @@ def dim_match_priority_when_needed(dim: Dim, *other_dims: Dim) -> Dim:
122
122
  return dim
123
123
 
124
124
 
125
- def num_elements_of_shape(dims: Sequence[Dim]) -> Union[int, Tensor]:
125
+ def num_elements_of_shape(dims: Union[Dim, Sequence[Dim]], *, use_mask: bool = True) -> Union[int, Tensor]:
126
126
  """
127
127
  :param dims:
128
+ :param use_mask:
128
129
  :return: num elements of a tensor of shape dims, properly considering masking
129
130
  """
131
+ if isinstance(dims, Dim):
132
+ dims = [dims]
133
+ if not use_mask:
134
+ n = 1
135
+ for dim in dims:
136
+ n *= dim.get_dim_value_tensor()
137
+ return n
130
138
  if all(dim.is_static() for dim in dims):
131
139
  n = 1
132
140
  for dim in dims:
@@ -135,7 +143,7 @@ def num_elements_of_shape(dims: Sequence[Dim]) -> Union[int, Tensor]:
135
143
 
136
144
  n = 1
137
145
  dims = list(dims)
138
- dims.sort(key=lambda dim: -dim.dyn_size_ext.batch_ndim if dim.dyn_size_ext else 0)
146
+ dims.sort(key=lambda dim__: -dim__.dyn_size_ext.batch_ndim if dim__.dyn_size_ext else 0)
139
147
  while dims:
140
148
  dim = dims.pop(0)
141
149
  if dim.is_static():
@@ -144,10 +152,29 @@ def num_elements_of_shape(dims: Sequence[Dim]) -> Union[int, Tensor]:
144
152
  # E.g. dyn_size_ext is shape [B], and self has shape [B,T].
145
153
  # Due to the sorting of dims above, dims will be [T,B], and we will first process T.
146
154
  # We want to sum over dyn_size_ext, but then we need to remove the other dims it covers.
155
+ dims_to_reduce = []
147
156
  for dim_ in dim.dyn_size_ext.dims:
148
- assert dim_ in dims # num elements not really well-defined then
149
- assert not dim_.need_masking() # not implemented
150
- dims.remove(dim_)
151
- n_ = rf.reduce_sum(dim.dyn_size_ext, axis=dim.dyn_size_ext.dims)
157
+ if dim_ in dims:
158
+ assert not dim_.need_masking() # not implemented
159
+ dims.remove(dim_)
160
+ dims_to_reduce.append(dim_)
161
+ n_ = rf.reduce_sum(dim.dyn_size_ext, axis=dims_to_reduce) if dims_to_reduce else dim.dyn_size_ext
152
162
  n *= n_
153
163
  return n
164
+
165
+
166
+ def masked_fraction_of_shape(dims: Union[Dim, Sequence[Dim]], *, inverse: bool = False) -> Union[int, float, Tensor]:
167
+ """
168
+ :param dims:
169
+ :param inverse: if True, return the inverse of the fraction
170
+ :return: :func:`num_elements_of_shape`(dims) / prod(dims) if not inverse else prod(dims) / num_elements
171
+ """
172
+ if isinstance(dims, Dim):
173
+ dims = [dims]
174
+ if not any(dim.need_masking() for dim in dims):
175
+ return 1
176
+ num_elems_masked = num_elements_of_shape(dims)
177
+ num_elems_total = 1
178
+ for dim in dims:
179
+ num_elems_total *= dim.get_dim_value_tensor()
180
+ return (num_elems_masked / num_elems_total) if not inverse else (num_elems_total / num_elems_masked)
@@ -2,7 +2,6 @@
2
2
  Normalization functions such as batch norm
3
3
  """
4
4
 
5
-
6
5
  from __future__ import annotations
7
6
  from typing import Optional, Sequence, Union, Tuple
8
7
  from returnn.tensor import Tensor, Dim
@@ -12,15 +11,34 @@ import returnn.frontend as rf
12
11
  __all__ = ["moments", "LayerNorm", "BatchNorm", "normalize", "Normalize"]
13
12
 
14
13
 
15
- def moments(x: Tensor, axis: Union[Dim, Sequence[Dim]]) -> Tuple[Tensor, Tensor]:
14
+ def moments(
15
+ x: Tensor, axis: Union[Dim, Sequence[Dim]], *, use_mask: bool = True, correction: Union[int, float, Tensor] = 0
16
+ ) -> Tuple[Tensor, Tensor]:
16
17
  """
17
18
  :param x: input
18
- :param axis: the axis to be reduced, to calculate statistics over
19
- :return: mean, variance. it has the same shape as the input with the axis removed
19
+ :param axis: the axis (or axes) to be reduced, to calculate statistics over
20
+ :param use_mask: whether to use a mask for dynamic spatial dims in the reduction
21
+ :param correction:
22
+ The variance will be estimated by ``sum((x - mean)**2) / (n-correction)``
23
+ where ``n`` is the number of elements in the axis (or the axes)
24
+ (with ``use_mask=True``, taking masking into account, using :func:`num_elements_of_shape`).
25
+ The default ``correction=0`` will return the biased variance estimation.
26
+ ``correction=1`` is the `Bessel correction <https://en.wikipedia.org/wiki/Bessel%27s_correction>`__
27
+ and will return the unbiased variance estimation.
28
+ In PyTorch, there was an argument ``unbiased`` for this, but this changed recently to ``correction``
29
+ (`PyTorch issue #61492 <https://github.com/pytorch/pytorch/issues/61492>`__,
30
+ `Python Array API Standard
31
+ <https://data-apis.org/array-api/latest/API_specification/generated/array_api.var.html>`__).
32
+ In PyTorch, the default is ``correction=1``, which is the unbiased variance estimation,
33
+ while in most other frameworks, the default is ``correction=0``, which is the biased variance estimation.
34
+ :return: tuple (mean, variance). it has the same shape as the input with the axis removed
20
35
  """
21
36
  mean = rf.reduce_mean(x, axis=axis)
22
37
  # stop_gradient does not change the gradient here
23
- variance = rf.reduce_mean(rf.squared_difference(x, rf.stop_gradient(mean)), axis=axis)
38
+ variance = rf.reduce_mean(rf.squared_difference(x, rf.stop_gradient(mean)), axis=axis, use_mask=use_mask)
39
+ if isinstance(correction, Tensor) or correction != 0:
40
+ n = rf.num_elements_of_shape(axis, use_mask=use_mask)
41
+ variance *= n / (n - correction)
24
42
  return mean, variance
25
43
 
26
44
 
@@ -1262,13 +1262,15 @@ class _TensorMixin(_TensorMixinBase):
1262
1262
  raw_tensor = backend.transpose_raw(raw_tensor, [p for p in out_permutation if p >= 0])
1263
1263
  raw_tensor = backend.reshape_raw(raw_tensor, [raw_shape[p] if p >= 0 else 1 for p in out_permutation])
1264
1264
  out_dims = [
1265
- dims[i]
1266
- if p >= 0
1267
- else Dim(
1268
- kind=dims[i].kind,
1269
- description="%s_bc_dim1" % (dims[i].description or "unnamed"),
1270
- dimension=1,
1271
- auto_generated=True,
1265
+ (
1266
+ dims[i]
1267
+ if p >= 0
1268
+ else Dim(
1269
+ kind=dims[i].kind,
1270
+ description="%s_bc_dim1" % (dims[i].description or "unnamed"),
1271
+ dimension=1,
1272
+ auto_generated=True,
1273
+ )
1272
1274
  )
1273
1275
  for i, p in enumerate(out_permutation)
1274
1276
  ]
@@ -1300,6 +1302,7 @@ class _TensorMixin(_TensorMixinBase):
1300
1302
  """
1301
1303
  Simpler variant of :func:`copy_compatible_to` which just takes a list of dims,
1302
1304
  and uses simple Dim equality.
1305
+ This adds broadcast dims for any missing dims.
1303
1306
 
1304
1307
  :param dims:
1305
1308
  :return: raw tensor from self with dims permuted and broadcast dims added
@@ -3342,9 +3345,9 @@ class _TensorMixin(_TensorMixinBase):
3342
3345
  max_match_priority = max(dim.match_priority for dim in self.dims)
3343
3346
  return max(
3344
3347
  matching,
3345
- key=lambda ax: (max_match_priority + 1)
3346
- if (self.dims[ax] is other_axis_dim_tag)
3347
- else self.dims[ax].match_priority,
3348
+ key=lambda ax: (
3349
+ (max_match_priority + 1) if (self.dims[ax] is other_axis_dim_tag) else self.dims[ax].match_priority
3350
+ ),
3348
3351
  )
3349
3352
 
3350
3353
  other_to_self_mapping = {}
@@ -1258,7 +1258,7 @@ class TorchBackend(Backend[torch.Tensor]):
1258
1258
  assert all(isinstance(dim, Dim) for dim in axis)
1259
1259
  raw_dims = [source.get_axis_from_description(dim) for dim in axis]
1260
1260
  res_dims = [dim for i, dim in enumerate(source.dims) if i not in raw_dims]
1261
- correction_factor: Optional[torch.Tensor] = None
1261
+ correction_factor: Union[None, float, Tensor] = None
1262
1262
  if use_mask and any(dim.need_masking() for dim in axis):
1263
1263
  source = source.copy()
1264
1264
  dtype = source.raw_tensor.dtype
@@ -1270,21 +1270,7 @@ class TorchBackend(Backend[torch.Tensor]):
1270
1270
  mask_value = 0
1271
1271
  elif mode == "mean":
1272
1272
  mask_value = 0
1273
- for dim in axis:
1274
- if dim.need_masking():
1275
- total_num_el = dim.get_dim_value_tensor()
1276
- actual_num_el = dim.get_size_tensor()
1277
- num_el_reduce_dims = [dim_ for dim_ in axis if dim_ in actual_num_el.dims]
1278
- if num_el_reduce_dims:
1279
- actual_num_el = rf.reduce_sum(actual_num_el, axis=num_el_reduce_dims)
1280
- for dim_ in num_el_reduce_dims:
1281
- total_num_el *= dim_.get_dim_value_tensor()
1282
- correction_factor_ = rf.cast(total_num_el, source.dtype) / rf.cast(actual_num_el, source.dtype)
1283
- correction_factor__ = correction_factor_.copy_compatible_to_dims_raw(res_dims)
1284
- if correction_factor is None:
1285
- correction_factor = correction_factor__
1286
- else:
1287
- correction_factor *= correction_factor__
1273
+ correction_factor = rf.masked_fraction_of_shape(axis, inverse=True)
1288
1274
  else:
1289
1275
  raise NotImplementedError(f"reduce_{mode} not implemented with masking on tensor {source!r}.")
1290
1276
  for dim in axis:
@@ -1302,7 +1288,11 @@ class TorchBackend(Backend[torch.Tensor]):
1302
1288
  else:
1303
1289
  raw_result = func(source.raw_tensor, dim=raw_dims)
1304
1290
  if correction_factor is not None:
1305
- raw_result *= correction_factor.to(raw_result.device)
1291
+ raw_result *= (
1292
+ correction_factor.copy_compatible_to_dims_raw(res_dims).to(raw_result.device)
1293
+ if isinstance(correction_factor, Tensor)
1294
+ else correction_factor
1295
+ )
1306
1296
  res = Tensor(
1307
1297
  name=f"reduce_{mode}",
1308
1298
  raw_tensor=raw_result,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: returnn
3
- Version: 1.20240610.120719
3
+ Version: 1.20240613.164354
4
4
  Summary: The RWTH extensible training framework for universal recurrent neural networks
5
5
  Home-page: https://github.com/rwth-i6/returnn/
6
6
  Author: Albert Zeyer
@@ -99,7 +99,7 @@ returnn/datasets/basic.py
99
99
  returnn/datasets/bundle_file.py
100
100
  returnn/datasets/cached.py
101
101
  returnn/datasets/cached2.py
102
- returnn/datasets/concat_files.py
102
+ returnn/datasets/distrib_files.py
103
103
  returnn/datasets/generating.py
104
104
  returnn/datasets/hdf.py
105
105
  returnn/datasets/lm.py
@@ -623,13 +623,13 @@ def test_MapDatasetWrapper():
623
623
  assert res.features["data"].shape == (5, 3)
624
624
 
625
625
 
626
- def test_ConcatFilesDataset_get_files_per_sub_epochs():
627
- from returnn.datasets.concat_files import ConcatFilesDataset
626
+ def test_DistributeFilesDataset_get_files_per_sub_epochs():
627
+ from returnn.datasets.distrib_files import DistributeFilesDataset
628
628
 
629
629
  def _test(sizes: List[int], partition_epoch: int, expected: List[List[int]]):
630
630
  files = [f"file-{i}" for i in range(len(sizes))]
631
631
  file_sizes = {f: s for f, s in zip(files, sizes)}
632
- res = ConcatFilesDataset._get_files_per_sub_epochs(
632
+ res = DistributeFilesDataset._get_files_per_sub_epochs(
633
633
  partition_epoch=partition_epoch, file_sizes=file_sizes, files_order=files
634
634
  )
635
635
  assert all(res) and len(res) == partition_epoch
@@ -643,8 +643,8 @@ def test_ConcatFilesDataset_get_files_per_sub_epochs():
643
643
  _test([5, 5] + [10] * 7, 5, [[5, 5, 10], [10, 10], [10, 10], [10], [10]])
644
644
 
645
645
 
646
- def test_ConcatFilesDataset():
647
- from returnn.datasets.concat_files import ConcatFilesDataset
646
+ def test_DistributeFilesDataset():
647
+ from returnn.datasets.distrib_files import DistributeFilesDataset
648
648
  from test_HDFDataset import generate_hdf_from_other
649
649
 
650
650
  # Create a few HDF files such that we can easily verify the data later.
@@ -686,7 +686,7 @@ def test_ConcatFilesDataset():
686
686
  total_num_seqs += 1
687
687
  assert total_num_seqs == num_hdf_files * num_seqs
688
688
 
689
- # Test to load via ConcatFilesDataset.
689
+ # Test to load via DistributeFilesDataset.
690
690
 
691
691
  def _get_sub_epoch_dataset(files_subepoch: List[str]) -> Dict[str, Any]:
692
692
  return {"class": "HDFDataset", "files": files_subepoch, "seq_ordering": "default"}
@@ -695,13 +695,13 @@ def test_ConcatFilesDataset():
695
695
  assert num_hdf_files % partition_epoch == 0 # just for easier testing here
696
696
  concat_dataset = init_dataset(
697
697
  {
698
- "class": "ConcatFilesDataset",
698
+ "class": "DistributeFilesDataset",
699
699
  "files": hdf_files,
700
700
  "get_sub_epoch_dataset": _get_sub_epoch_dataset,
701
701
  "partition_epoch": partition_epoch,
702
702
  }
703
703
  )
704
- assert isinstance(concat_dataset, ConcatFilesDataset)
704
+ assert isinstance(concat_dataset, DistributeFilesDataset)
705
705
  assert concat_dataset.get_data_keys() == ["classes"]
706
706
  num_hdfs_per_part = num_hdf_files // partition_epoch
707
707
  global_seq_idx = 0
@@ -1,2 +0,0 @@
1
- version = '1.20240610.120719'
2
- long_version = '1.20240610.120719+git.cc96aee'