returnn 1.20230418.120646__tar.gz → 1.20230418.124036__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (383) hide show
  1. {returnn-1.20230418.120646/returnn.egg-info → returnn-1.20230418.124036}/PKG-INFO +1 -1
  2. returnn-1.20230418.124036/_setup_info_generated.py +2 -0
  3. returnn-1.20230418.124036/returnn/frontend/encoder/__init__.py +3 -0
  4. returnn-1.20230418.124036/returnn/frontend/encoder/base.py +71 -0
  5. returnn-1.20230418.124036/returnn/frontend/encoder/conformer.py +368 -0
  6. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/math_.py +26 -1
  7. {returnn-1.20230418.120646 → returnn-1.20230418.124036/returnn.egg-info}/PKG-INFO +1 -1
  8. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn.egg-info/SOURCES.txt +3 -0
  9. returnn-1.20230418.120646/_setup_info_generated.py +0 -2
  10. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/.editorconfig +0 -0
  11. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/.gitignore +0 -0
  12. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/.gitmodules +0 -0
  13. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/.kateconfig +0 -0
  14. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/CHANGELOG.md +0 -0
  15. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/CODEOWNERS +0 -0
  16. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/CONTRIBUTING.md +0 -0
  17. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/LICENSE +0 -0
  18. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/MANIFEST.in +0 -0
  19. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/README.rst +0 -0
  20. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/__init__.py +0 -0
  21. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/12AX.cluster_map +0 -0
  22. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/_setup_returnn_env.py +0 -0
  23. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-fwd.config +0 -0
  24. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-horovod-mpi.py +0 -0
  25. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-horovod-mpi.py.sh +0 -0
  26. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-horovod-mpi.sh +0 -0
  27. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-hyper-param-tuning.config +0 -0
  28. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-iter-dataset.py +0 -0
  29. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-list-devices.py +0 -0
  30. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-lua-torch-layer.config +0 -0
  31. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-pretrain.config +0 -0
  32. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-record-and-push-to-webserver.py +0 -0
  33. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-returnn-as-framework.py +0 -0
  34. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-rf.config +0 -0
  35. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-rhn-enwik8.config +0 -0
  36. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-sprint-interface.py +0 -0
  37. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-att-copy.config +0 -0
  38. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-attention.config +0 -0
  39. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-chunking-blstm.12ax.config +0 -0
  40. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-contribrnn-lstm.12ax.config +0 -0
  41. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-enc-dec.config +0 -0
  42. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-hard-att-copy.config +0 -0
  43. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-lstm-benchmark.py +0 -0
  44. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-maxgradnorm-lstm.12ax.config +0 -0
  45. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-native-lstm-lowmem.12ax.config +0 -0
  46. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-native-lstm.12ax.config +0 -0
  47. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-native-lstm2.12ax.config +0 -0
  48. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-native-lstm2.12ax.tuned.config +0 -0
  49. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-neural-transducer.12ax.config +0 -0
  50. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-rec-explicit-lstm.config +0 -0
  51. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-rec-explicit-rnn.config +0 -0
  52. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-rec-self-att.config +0 -0
  53. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-search-compiled-graph.py +0 -0
  54. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-tf-vanilla-lstm.12ax.config +0 -0
  55. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-timit-lstm-ctc.config +0 -0
  56. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-torch.config +0 -0
  57. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo-upd-mult-model.lstm.12ax.config +0 -0
  58. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/demo.sh +0 -0
  59. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/IAM_lines/a01-000u-00.png +0 -0
  60. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/IAM_lines/a01-007-04.png +0 -0
  61. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/IAM_lines/a01-007-06.png +0 -0
  62. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/README.txt +0 -0
  63. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/chars.txt +0 -0
  64. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/config_demo +0 -0
  65. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/config_fwd +0 -0
  66. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/config_real +0 -0
  67. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/create_IAM_dataset.py +0 -0
  68. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/decode.py +0 -0
  69. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/features/raw/demo.h5 +0 -0
  70. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/go.sh +0 -0
  71. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/lines.txt +0 -0
  72. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/split/eval.txt +0 -0
  73. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/split/train.txt +0 -0
  74. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/IAM/split/valid.txt +0 -0
  75. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/README.md +0 -0
  76. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/artificial/create_test_h5.py +0 -0
  77. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/artificial/forwardconfig +0 -0
  78. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/artificial/go.sh +0 -0
  79. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/artificial/trainconfig +0 -0
  80. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/artificial_rgb/create_test_h5.py +0 -0
  81. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/artificial_rgb/forwardconfig +0 -0
  82. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/artificial_rgb/go.sh +0 -0
  83. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/demos/mdlstm/artificial_rgb/trainconfig +0 -0
  84. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/pyproject.toml +0 -0
  85. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/requirements.txt +0 -0
  86. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/__init__.py +0 -0
  87. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/__main__.py +0 -0
  88. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/__old_mod_loader__.py +0 -0
  89. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/__setup__.py +0 -0
  90. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/config.py +0 -0
  91. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/__init__.py +0 -0
  92. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/audio.py +0 -0
  93. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/basic.py +0 -0
  94. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/bundle_file.py +0 -0
  95. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/cached.py +0 -0
  96. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/cached2.py +0 -0
  97. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/generating.py +0 -0
  98. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/hdf.py +0 -0
  99. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/lm.py +0 -0
  100. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/map.py +0 -0
  101. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/meta.py +0 -0
  102. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/multi_proc.py +0 -0
  103. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/normalization_data.py +0 -0
  104. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/numpy_dump.py +0 -0
  105. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/raw_wav.py +0 -0
  106. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/sprint.py +0 -0
  107. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/stereo.py +0 -0
  108. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/util/__init__.py +0 -0
  109. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/util/feature_extraction.py +0 -0
  110. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/datasets/util/vocabulary.py +0 -0
  111. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/engine/__init__.py +0 -0
  112. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/engine/base.py +0 -0
  113. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/engine/batch.py +0 -0
  114. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/__init__.py +0 -0
  115. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/__main__.py +0 -0
  116. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/.git +0 -0
  117. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/.gitignore +0 -0
  118. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/LICENSE +0 -0
  119. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/README.md +0 -0
  120. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/aligner.gif +0 -0
  121. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/check.png +0 -0
  122. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/core.cu +0 -0
  123. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/core.h +0 -0
  124. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/core_cpu.cpp +0 -0
  125. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/pytorch_binding/LICENSE +0 -0
  126. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/pytorch_binding/MANIFEST.in +0 -0
  127. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/pytorch_binding/README.md +0 -0
  128. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/pytorch_binding/binding.cpp +0 -0
  129. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/pytorch_binding/core.cu +0 -0
  130. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/pytorch_binding/core.h +0 -0
  131. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/pytorch_binding/requirements.txt +0 -0
  132. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/pytorch_binding/setup.py +0 -0
  133. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/pytorch_binding/warp_rna/__init__.py +0 -0
  134. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/pytorch_binding/warp_rna/test.py +0 -0
  135. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/ref_rna.py +0 -0
  136. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/setup.py +0 -0
  137. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/src/warp_rna_op.cc +0 -0
  138. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/src/warp_rna_op_kernel_tmpl.h +0 -0
  139. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/warp_rna/__init__.py +0 -0
  140. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/WarpRna/warp-rna/test.cpp +0 -0
  141. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/__init__.py +0 -0
  142. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/graph_editor/README.md +0 -0
  143. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/graph_editor/__init__.py +0 -0
  144. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/graph_editor/edit.py +0 -0
  145. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/graph_editor/reroute.py +0 -0
  146. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/graph_editor/select.py +0 -0
  147. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/graph_editor/subgraph.py +0 -0
  148. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/graph_editor/transform.py +0 -0
  149. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/extern/graph_editor/util.py +0 -0
  150. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/__init__.py +0 -0
  151. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/_backend.py +0 -0
  152. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/_numpy_backend.py +0 -0
  153. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/_utils.py +0 -0
  154. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/array_.py +0 -0
  155. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/attention.py +0 -0
  156. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/cond.py +0 -0
  157. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/const.py +0 -0
  158. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/container.py +0 -0
  159. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/control_flow_ctx.py +0 -0
  160. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/conv.py +0 -0
  161. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/dims.py +0 -0
  162. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/dropout.py +0 -0
  163. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/dtype.py +0 -0
  164. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/gradient.py +0 -0
  165. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/init.py +0 -0
  166. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/linear.py +0 -0
  167. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/loop.py +0 -0
  168. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/loss.py +0 -0
  169. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/matmul.py +0 -0
  170. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/module.py +0 -0
  171. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/normalization.py +0 -0
  172. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/parameter.py +0 -0
  173. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/rand.py +0 -0
  174. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/reduce.py +0 -0
  175. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/run_ctx.py +0 -0
  176. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/state.py +0 -0
  177. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/frontend/types.py +0 -0
  178. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/import_/__init__.py +0 -0
  179. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/import_/common.py +0 -0
  180. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/import_/git.py +0 -0
  181. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/import_/import_.py +0 -0
  182. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/learning_rate_control.py +0 -0
  183. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/log.py +0 -0
  184. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/native_op.cpp +0 -0
  185. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/native_op.py +0 -0
  186. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/pretrain.py +0 -0
  187. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/sprint/__init__.py +0 -0
  188. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/sprint/cache.py +0 -0
  189. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/sprint/control.py +0 -0
  190. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/sprint/error_signals.py +0 -0
  191. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/sprint/extern_interface.py +0 -0
  192. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/sprint/interface.py +0 -0
  193. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tensor/README.md +0 -0
  194. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tensor/__init__.py +0 -0
  195. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tensor/_dim_extra.py +0 -0
  196. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tensor/_tensor_extra.py +0 -0
  197. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tensor/_tensor_mixin_base.py +0 -0
  198. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tensor/_tensor_op_overloads.py +0 -0
  199. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tensor/control_flow_ctx.py +0 -0
  200. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tensor/dim.py +0 -0
  201. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tensor/marked_dim.py +0 -0
  202. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tensor/tensor.py +0 -0
  203. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tensor/tensor_dict.py +0 -0
  204. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/__init__.py +0 -0
  205. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/compat.py +0 -0
  206. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/data_pipeline.py +0 -0
  207. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/distributed.py +0 -0
  208. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/engine.py +0 -0
  209. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/frontend_layers/__init__.py +0 -0
  210. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/frontend_layers/_backend.py +0 -0
  211. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/frontend_layers/_utils.py +0 -0
  212. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/frontend_layers/config_entry_points.py +0 -0
  213. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/frontend_layers/debug_eager_mode.py +0 -0
  214. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/frontend_layers/dims.py +0 -0
  215. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/frontend_layers/layer.py +0 -0
  216. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/frontend_layers/make_layer.py +0 -0
  217. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/frontend_layers/prev_tensor_ref.py +0 -0
  218. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/frontend_low_level/__init__.py +0 -0
  219. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/frontend_low_level/_backend.py +0 -0
  220. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/horovod.py +0 -0
  221. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/hyper_param_tuning.py +0 -0
  222. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/layers/__init__.py +0 -0
  223. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/layers/base.py +0 -0
  224. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/layers/basic.py +0 -0
  225. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/layers/rec.py +0 -0
  226. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/layers/segmental_model.py +0 -0
  227. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/layers/signal_processing.py +0 -0
  228. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/native_op.py +0 -0
  229. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/network.py +0 -0
  230. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/sprint.py +0 -0
  231. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/updater.py +0 -0
  232. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/util/__init__.py +0 -0
  233. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/util/basic.py +0 -0
  234. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/util/data.py +0 -0
  235. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/util/ken_lm.py +0 -0
  236. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/tf/util/open_fst.py +0 -0
  237. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/torch/README.md +0 -0
  238. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/torch/__init__.py +0 -0
  239. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/torch/data/__init__.py +0 -0
  240. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/torch/data/pipeline.py +0 -0
  241. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/torch/data/returnn_dataset_wrapper.py +0 -0
  242. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/torch/data/tensor_utils.py +0 -0
  243. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/torch/engine.py +0 -0
  244. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/torch/frontend/__init__.py +0 -0
  245. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/torch/frontend/_backend.py +0 -0
  246. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/torch/frontend/_rand.py +0 -0
  247. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/torch/frontend/bridge.py +0 -0
  248. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/torch/functional/README.md +0 -0
  249. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/torch/functional/__init__.py +0 -0
  250. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/torch/updater.py +0 -0
  251. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/util/__init__.py +0 -0
  252. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/util/basic.py +0 -0
  253. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/util/better_exchook.py +0 -0
  254. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/util/bpe.py +0 -0
  255. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/util/debug.py +0 -0
  256. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/util/debug_helpers.py +0 -0
  257. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/util/fsa.py +0 -0
  258. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/util/literal_py_to_pickle.py +0 -0
  259. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/util/pprint.py +0 -0
  260. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/util/py-to-pickle.cpp +0 -0
  261. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/util/py_compat.py +0 -0
  262. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/util/sig_proc.py +0 -0
  263. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn/util/task_system.py +0 -0
  264. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn.egg-info/dependency_links.txt +0 -0
  265. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/returnn.egg-info/top_level.txt +0 -0
  266. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/rnn.py +0 -0
  267. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/setup.cfg +0 -0
  268. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/setup.py +0 -0
  269. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/DummySprintExec.py +0 -0
  270. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/PyCharm-inspection-profile.xml +0 -0
  271. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/PyCharm.idea/.gitignore +0 -0
  272. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/PyCharm.idea/.name +0 -0
  273. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/PyCharm.idea/codeStyleSettings.xml +0 -0
  274. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/PyCharm.idea/codeStyles/Project.xml +0 -0
  275. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/PyCharm.idea/codeStyles/codeStyleConfig.xml +0 -0
  276. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/PyCharm.idea/inspectionProfiles/Project_Default.xml +0 -0
  277. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/PyCharm.idea/inspectionProfiles/profiles_settings.xml +0 -0
  278. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/PyCharm.idea/misc.xml +0 -0
  279. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/PyCharm.idea/modules.xml +0 -0
  280. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/PyCharm.idea/returnn.iml +0 -0
  281. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/PyCharm.idea/scopes/scope_settings.xml +0 -0
  282. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/_set_num_threads1.py +0 -0
  283. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/_setup_returnn_env.py +0 -0
  284. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/_setup_test_env.py +0 -0
  285. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/bpe-unicode-demo.codes +0 -0
  286. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/bpe-unicode-demo.vocab +0 -0
  287. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/lexicon_opt.fst +0 -0
  288. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/lexicon_opt.isyms +0 -0
  289. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/lexicon_opt.jpg +0 -0
  290. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/lexicon_opt.osyms +0 -0
  291. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/lint_common.py +0 -0
  292. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/pycharm-inspect.py +0 -0
  293. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/pylint.py +0 -0
  294. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/returnn-as-framework.py +0 -0
  295. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/rf_utils.py +0 -0
  296. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/spelling.dic +0 -0
  297. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_Config.py +0 -0
  298. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_Dataset.py +0 -0
  299. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_Fsa.py +0 -0
  300. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_GeneratingDataset.py +0 -0
  301. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_HDFDataset.py +0 -0
  302. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_LearningRateControl.py +0 -0
  303. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_Log.py +0 -0
  304. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_MultiProcDataset.py +0 -0
  305. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_PTDataset.py +0 -0
  306. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_Pretrain.py +0 -0
  307. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_ResNet.py +0 -0
  308. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_SprintDataset.py +0 -0
  309. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_SprintInterface.py +0 -0
  310. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_TFEngine.py +0 -0
  311. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_TFNativeOp.py +0 -0
  312. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_TFNetworkLayer.py +0 -0
  313. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_TFNetworkRecLayer.py +0 -0
  314. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_TFNetworkSigProcLayer.py +0 -0
  315. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_TFUpdater.py +0 -0
  316. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_TFUtil.py +0 -0
  317. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_TF_determinism.py +0 -0
  318. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_TaskSystem.py +0 -0
  319. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_TaskSystem_SharedMem.py +0 -0
  320. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_TranslationDataset.py +0 -0
  321. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_Util.py +0 -0
  322. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_demos.py +0 -0
  323. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_fork_exec.py +0 -0
  324. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_hdf_dump.py +0 -0
  325. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_rf_array.py +0 -0
  326. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_rf_attention.py +0 -0
  327. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_rf_base.py +0 -0
  328. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_rf_container.py +0 -0
  329. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_rf_conv.py +0 -0
  330. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_rf_math.py +0 -0
  331. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_rf_normalization.py +0 -0
  332. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_tensor.py +0 -0
  333. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_tools.py +0 -0
  334. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_torch_frontend.py +0 -0
  335. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tests/test_torch_internal_frontend.py +0 -0
  336. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/_setup_returnn_env.py +0 -0
  337. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/analyze-dataset-batches.py +0 -0
  338. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/bliss-collect-seq-lens.py +0 -0
  339. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/bliss-dump-text.py +0 -0
  340. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/bliss-get-segment-names.py +0 -0
  341. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/bliss-to-ogg-zip.py +0 -0
  342. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/bpe-create-lexicon.py +0 -0
  343. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/calculate-word-error-rate.py +0 -0
  344. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/cleanup-old-models.py +0 -0
  345. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/collect-orth-symbols.py +0 -0
  346. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/collect-words.py +0 -0
  347. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/compile_native_op.py +0 -0
  348. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/compile_tf_graph.py +0 -0
  349. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/debug-dump-search-scores.py +0 -0
  350. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/debug-plot-search-scores.py +0 -0
  351. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/dump-dataset-raw-strings.py +0 -0
  352. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/dump-dataset.py +0 -0
  353. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/dump-forward-stats.py +0 -0
  354. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/dump-forward.py +0 -0
  355. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/dump-network-json.py +0 -0
  356. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/dump-pickle.py +0 -0
  357. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/extract_state_tying_from_dataset.py +0 -0
  358. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/get-attention-weights.py +0 -0
  359. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/get-best-model-epoch.py +0 -0
  360. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/hdf_dump.py +0 -0
  361. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/hdf_dump_translation_dataset.py +0 -0
  362. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/import-blocks-mt-model.py +0 -0
  363. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/import-t2t-mt-model.py +0 -0
  364. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/.gitignore +0 -0
  365. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/Makefile +0 -0
  366. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/README.md +0 -0
  367. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/example/README.md +0 -0
  368. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/example/libs_list +0 -0
  369. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/example/network.040/i600_m600_m600.sgd_b16_lr0_cl2.newbobabs.config +0 -0
  370. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/example/network.040/i600_m600_m600.sgd_b16_lr0_cl2.newbobabs.keep_over_epoch.lstm2.config +0 -0
  371. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/example/rescore_lattice.sh +0 -0
  372. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/example/state_vars_list +0 -0
  373. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/example/tensor_names_list +0 -0
  374. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/file.h +0 -0
  375. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/htklatticerescorer.cc +0 -0
  376. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/htklatticerescorer.h +0 -0
  377. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/main.cc +0 -0
  378. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/rescorer.h +0 -0
  379. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/vocabulary.cc +0 -0
  380. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/lattice_rescorer/vocabulary.h +0 -0
  381. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/tf_avg_checkpoints.py +0 -0
  382. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/tf_inspect_checkpoint.py +0 -0
  383. {returnn-1.20230418.120646 → returnn-1.20230418.124036}/tools/tf_inspect_summary_log.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: returnn
3
- Version: 1.20230418.120646
3
+ Version: 1.20230418.124036
4
4
  Summary: The RWTH extensible training framework for universal recurrent neural networks
5
5
  Home-page: https://github.com/rwth-i6/returnn/
6
6
  Author: Albert Zeyer
@@ -0,0 +1,2 @@
1
+ version = '1.20230418.124036'
2
+ long_version = '1.20230418.124036+git.2ec964a'
@@ -0,0 +1,3 @@
1
+ """
2
+ Encoders
3
+ """
@@ -0,0 +1,71 @@
1
+ """
2
+ Base interface for any kind of encoder.
3
+
4
+ This is basically any generic function x -> y.
5
+
6
+ Note that in practice, when designing some model,
7
+ this interface is even not needed,
8
+ because you only care about the final encoded vectors,
9
+ and not how you got there.
10
+ Automatic differentiation will automatically
11
+ also train the encoder.
12
+ So, for most purpose, e.g. for a decoder (see :mod:`..decoder.base`),
13
+ you only care about some encoded vector of type :class:`Tensor`.
14
+ """
15
+
16
+ from __future__ import annotations
17
+ from typing import Tuple, Union
18
+ from abc import ABC
19
+ from returnn.tensor import Tensor, Dim
20
+ import returnn.frontend as rf
21
+
22
+
23
+ class IEncoder(rf.Module, ABC):
24
+ """
25
+ Generic encoder interface
26
+
27
+ The encoder is a function x -> y.
28
+ The input can potentially be sparse or dense.
29
+ The output is dense with feature dim `out_dim`.
30
+ """
31
+
32
+ out_dim: Dim
33
+
34
+ def __call__(self, source: Tensor) -> Tensor:
35
+ """
36
+ Encode the input
37
+ """
38
+ raise NotImplementedError
39
+
40
+
41
+ class ISeqFramewiseEncoder(rf.Module, ABC):
42
+ """
43
+ This specializes IEncoder that it operates on a sequence.
44
+ The output sequence length here is the same as the input.
45
+ """
46
+
47
+ out_dim: Dim
48
+
49
+ def __call__(self, source: Tensor, *, spatial_dim: Dim) -> Tensor:
50
+ raise NotImplementedError
51
+
52
+
53
+ class ISeqDownsamplingEncoder(rf.Module, ABC):
54
+ """
55
+ This is more specific than IEncoder in that it operates on a sequence.
56
+ The output sequence length here is shorter than the input.
57
+
58
+ This is a common scenario for speech recognition
59
+ where the input might be on 10ms/frame
60
+ and the output might cover 30ms/frame or 60ms/frame or so.
61
+ """
62
+
63
+ out_dim: Dim
64
+ # In most cases (pooling, conv), the output sequence length will bei ceildiv(input_seq_len, factor)
65
+ # and factor is an integer.
66
+ # However, this is not a hard condition.
67
+ # The downsampling factor only describes the linear factor in the limit.
68
+ downsample_factor: Union[int, float]
69
+
70
+ def __call__(self, source: Tensor, *, in_spatial_dim: Dim) -> Tuple[Tensor, Dim]:
71
+ raise NotImplementedError
@@ -0,0 +1,368 @@
1
+ """
2
+ Conformer model, variant of Transformer with additional convolution, introduced for speech recognition.
3
+ Ref: https://arxiv.org/abs/2005.08100
4
+
5
+ About details of the specific implementation and other implementations, see:
6
+ https://github.com/rwth-i6/returnn_common/issues/233
7
+ """
8
+
9
+ from __future__ import annotations
10
+ from typing import Optional, Union, Any, Tuple, List, Dict, Callable
11
+ import copy as _copy
12
+ from returnn.tensor import Tensor, Dim
13
+ import returnn.frontend as rf
14
+ from returnn.util.basic import NotSpecified
15
+ from .base import ISeqDownsamplingEncoder
16
+
17
+
18
+ class ConformerPositionwiseFeedForward(rf.Module):
19
+ """
20
+ Conformer position-wise feedforward neural network layer
21
+ FF -> Activation -> Dropout -> FF
22
+ """
23
+
24
+ def __init__(self, out_dim: Dim, *, ff_dim: Dim, dropout: float, activation: Callable[[Tensor], Tensor]):
25
+ """
26
+ :param out_dim: output feature dimension
27
+ :param ff_dim: dimension of the feed-forward layers
28
+ :param dropout: dropout value
29
+ :param activation: activation function
30
+ """
31
+ super().__init__()
32
+
33
+ self.out_dim = out_dim
34
+ self.dropout = dropout
35
+ self.activation = activation
36
+
37
+ self.linear_ff = rf.Linear(out_dim, ff_dim)
38
+ self.linear_out = rf.Linear(ff_dim, out_dim)
39
+
40
+ def __call__(self, inp: Tensor) -> Tensor:
41
+ """forward"""
42
+ x_ff1 = self.linear_ff(inp)
43
+ x_act = self.activation(x_ff1)
44
+ x_drop = rf.dropout(x_act, axis=self.linear_ff.out_dim, drop_prob=self.dropout)
45
+ x_ff2 = self.linear_out(x_drop)
46
+ return x_ff2
47
+
48
+
49
+ class ConformerConvBlock(rf.Module):
50
+ """
51
+ Conformer convolution block
52
+ FF -> GLU -> depthwise conv -> BN -> Swish -> FF
53
+ """
54
+
55
+ def __init__(self, out_dim: Dim, *, kernel_size: int, norm: Union[rf.BatchNorm, Any]):
56
+ """
57
+ :param out_dim: output feature dimension
58
+ :param kernel_size: kernel size of depthwise convolution
59
+ :param norm: Batch norm originally
60
+ """
61
+ super().__init__()
62
+ self.out_dim = out_dim
63
+
64
+ self.positionwise_conv1 = rf.Linear(out_dim, 2 * out_dim)
65
+ self.depthwise_conv = rf.Conv1d(
66
+ out_dim, out_dim, filter_size=kernel_size, groups=out_dim.dimension, padding="same"
67
+ )
68
+ self.positionwise_conv2 = rf.Linear(out_dim, out_dim)
69
+ self.norm = norm
70
+
71
+ def __call__(self, inp: Tensor, *, spatial_dim: Dim) -> Tensor:
72
+ """forward"""
73
+ x_conv1 = self.positionwise_conv1(inp)
74
+ x_act, _ = rf.gating(x_conv1)
75
+ x_depthwise_conv, _ = self.depthwise_conv(x_act, in_spatial_dim=spatial_dim)
76
+ x_normed = self.norm(x_depthwise_conv)
77
+ x_swish = rf.swish(x_normed)
78
+ x_conv2 = self.positionwise_conv2(x_swish)
79
+ return x_conv2
80
+
81
+
82
+ class ConformerConvSubsample(ISeqDownsamplingEncoder):
83
+ """
84
+ Conv 2D block with optional max-pooling or striding.
85
+
86
+ References:
87
+
88
+ https://github.com/espnet/espnet/blob/4138010fb66ad27a43e8bee48a4932829a0847ae/espnet/nets/pytorch_backend/transformer/subsampling.py#L162
89
+ https://github.com/rwth-i6/returnn-experiments/blob/5852e21f44d5450909dee29d89020f1b8d36aa68/2022-swb-conformer-hybrid-sat/table_1_and_2/reduced_dim.config#L226
90
+ (actually the latter is different...)
91
+
92
+ To get the ESPnet case, for example Conv2dSubsampling6, use these options
93
+ (out_dim is the model dim of the encoder)
94
+
95
+ out_dims=[out_dim, out_dim], # ESPnet standard, but this might be too large
96
+ filter_sizes=[3, 5],
97
+ strides=[2, 3],
98
+ padding="valid",
99
+ """
100
+
101
+ def __init__(
102
+ self,
103
+ in_dim: Dim,
104
+ *,
105
+ out_dims: List[Dim],
106
+ filter_sizes: List[Union[int, Tuple[int, int]]],
107
+ strides: Optional[List[Union[int, Tuple[int, int]]]] = None,
108
+ pool_sizes: Optional[List[Tuple[int, int]]] = None,
109
+ activation: Callable[[Tensor], Tensor] = rf.relu,
110
+ padding: str = "same",
111
+ ):
112
+ """
113
+ :param out_dims: the number of output channels. last element is the output feature dimension
114
+ :param filter_sizes: a list of filter sizes for the conv layer
115
+ :param pool_sizes: a list of pooling factors applied after conv layer
116
+ :param activation: the activation function
117
+ :param padding: 'same' or 'valid'
118
+ """
119
+ super().__init__()
120
+
121
+ self.pool_sizes = pool_sizes
122
+ self.activation = activation
123
+
124
+ self.conv_layers: rf.ModuleList[rf.Conv2d] = rf.ModuleList()
125
+ if strides is None:
126
+ strides = [1] * len(out_dims)
127
+ assert len(out_dims) == len(filter_sizes) == len(strides) > 0
128
+ self._dummy_in_dim = Dim(1, name="dummy-input-feature-dim")
129
+ self.in_dim = in_dim
130
+ prev_out_dim = self._dummy_in_dim
131
+ second_spatial_dim = in_dim
132
+ for i, (filter_size, stride, out_dim) in enumerate(zip(filter_sizes, strides, out_dims)):
133
+ conv = rf.Conv2d(prev_out_dim, out_dim, filter_size=filter_size, strides=stride, padding=padding)
134
+ self.conv_layers.append(conv)
135
+ (second_spatial_dim,) = rf.make_conv_out_spatial_dims(
136
+ [second_spatial_dim], filter_size=conv.filter_size[1], strides=conv.strides[1], padding=padding
137
+ )
138
+ if self.pool_sizes and i < len(self.pool_sizes):
139
+ (second_spatial_dim,) = rf.make_conv_out_spatial_dims(
140
+ [second_spatial_dim],
141
+ filter_size=self.pool_sizes[i][1],
142
+ strides=self.pool_sizes[i][1],
143
+ padding="same",
144
+ )
145
+ prev_out_dim = out_dim
146
+ self._final_second_spatial_dim = second_spatial_dim
147
+ self.out_dim = second_spatial_dim * prev_out_dim
148
+
149
+ def __call__(self, source: Tensor, *, in_spatial_dim: Dim) -> Tuple[Tensor, Dim]:
150
+ """forward"""
151
+ assert self.in_dim in source.dims_set
152
+ in_spatial_dims = [in_spatial_dim, self.in_dim]
153
+ in_dim = self._dummy_in_dim
154
+ x = rf.expand_dim(source, dim=in_dim)
155
+ for i, conv_layer in enumerate(self.conv_layers):
156
+ x, in_spatial_dims = conv_layer(x, in_spatial_dims=in_spatial_dims)
157
+ in_dim = conv_layer.out_dim
158
+ x = self.activation(x)
159
+ if self.pool_sizes and i < len(self.pool_sizes):
160
+ x, in_spatial_dims = rf.pool2d(
161
+ x, in_spatial_dims=in_spatial_dims, pool_size=self.pool_sizes[i], padding="same", mode="max"
162
+ )
163
+ x, in_spatial_dims[-1] = rf.replace_dim(x, out_dim=self._final_second_spatial_dim, in_dim=in_spatial_dims[-1])
164
+ out, _ = rf.merge_dims(x, dims=[self._final_second_spatial_dim, in_dim])
165
+ return out, in_spatial_dims[0]
166
+
167
+
168
+ class ConformerEncoderLayer(rf.Module):
169
+ """
170
+ Represents a conformer block
171
+ """
172
+
173
+ def __init__(
174
+ self,
175
+ out_dim: Dim = Dim(512, name="conformer-enc-default-out-dim"),
176
+ *,
177
+ ff_dim: Dim = NotSpecified,
178
+ ff_activation: Callable[[Tensor], Tensor] = rf.swish,
179
+ dropout: float = 0.1,
180
+ conv_kernel_size: int = 32,
181
+ conv_norm: Union[rf.BatchNorm, type, Any] = NotSpecified,
182
+ conv_norm_opts: Optional[Dict[str, Any]] = None,
183
+ num_heads: int = 4,
184
+ self_att: Optional[Union[rf.RelPosSelfAttention, rf.Module, type, Any]] = None,
185
+ self_att_opts: Optional[Dict[str, Any]] = None,
186
+ att_dropout: float = 0.1,
187
+ ):
188
+ """
189
+ :param out_dim: the output feature dimension
190
+ :param ff_dim: the dimension of feed-forward layers. 2048 originally, or 4 times out_dim
191
+ :param ff_activation: activation function for feed-forward network
192
+ :param dropout: the dropout value for the FF block
193
+ :param conv_kernel_size: the kernel size of depthwise convolution in the conv block
194
+ :param conv_norm: used for the conv block. Batch norm originally
195
+ :param conv_norm_opts: for nn.BatchNorm or other conv_norm type.
196
+ In case of nn.BatchNorm, uses use_mask=False by default.
197
+ use_mask means whether to properly mask the spatial dim in batch norm.
198
+ Most existing implementations don't do this. Except of RETURNN.
199
+ It's faster when you don't do this.
200
+ :param num_heads: the number of attention heads
201
+ :param self_att: the self-attention layer. RelPosSelfAttention originally and default
202
+ :param self_att_opts: options for the self-attention layer, for :class:`nn.RelPosSelfAttention`
203
+ :param att_dropout: attention dropout value
204
+ """
205
+ super().__init__()
206
+
207
+ self.dropout = dropout
208
+ self.out_dim = out_dim
209
+
210
+ if ff_dim is None:
211
+ ff_dim = 4 * out_dim
212
+ self.ffn1 = ConformerPositionwiseFeedForward(
213
+ out_dim=out_dim, ff_dim=ff_dim, dropout=dropout, activation=ff_activation
214
+ )
215
+ self.ffn1_layer_norm = rf.LayerNorm(out_dim)
216
+
217
+ self.ffn2 = ConformerPositionwiseFeedForward(
218
+ out_dim=out_dim, ff_dim=ff_dim, dropout=dropout, activation=ff_activation
219
+ )
220
+ self.ffn2_layer_norm = rf.LayerNorm(out_dim)
221
+
222
+ if conv_norm is NotSpecified or conv_norm is rf.BatchNorm:
223
+ conv_norm_opts = conv_norm_opts.copy() if conv_norm_opts else {}
224
+ conv_norm_opts.setdefault("use_mask", False)
225
+ conv_norm = rf.BatchNorm(out_dim, **conv_norm_opts)
226
+ elif isinstance(conv_norm, type):
227
+ conv_norm = conv_norm(out_dim, **(conv_norm_opts or {}))
228
+ self.conv_block = ConformerConvBlock(out_dim=out_dim, kernel_size=conv_kernel_size, norm=conv_norm)
229
+ self.conv_layer_norm = rf.LayerNorm(out_dim)
230
+
231
+ if self_att is None or isinstance(self_att, type):
232
+ self_att_opts_ = dict(
233
+ in_dim=out_dim,
234
+ proj_dim=out_dim,
235
+ key_dim_total=out_dim,
236
+ value_dim_total=out_dim,
237
+ num_heads=num_heads,
238
+ att_dropout=att_dropout,
239
+ )
240
+ if self_att_opts:
241
+ self_att_opts_.update(self_att_opts)
242
+ if self_att is None:
243
+ self.self_att = rf.RelPosSelfAttention(**self_att_opts_)
244
+ else:
245
+ self.self_att = self_att(**self_att_opts_)
246
+ else:
247
+ self.self_att = self_att
248
+ self.self_att_layer_norm = rf.LayerNorm(out_dim)
249
+
250
+ self.final_layer_norm = rf.LayerNorm(out_dim)
251
+
252
+ def __call__(self, inp: Tensor, *, spatial_dim: Dim) -> Tensor:
253
+ """forward"""
254
+ # FFN
255
+ x_ffn1_ln = self.ffn1_layer_norm(inp)
256
+ x_ffn1 = self.ffn1(x_ffn1_ln)
257
+ x_ffn1_out = 0.5 * rf.dropout(x_ffn1, axis=self.out_dim, drop_prob=self.dropout) + inp
258
+
259
+ # MHSA
260
+ x_mhsa_ln = self.self_att_layer_norm(x_ffn1_out)
261
+ x_mhsa = self.self_att(x_mhsa_ln, axis=spatial_dim)
262
+ x_mhsa = rf.dropout(x_mhsa, axis=self.out_dim, drop_prob=self.dropout)
263
+ x_mhsa_out = x_mhsa + x_ffn1_out
264
+
265
+ # Conv
266
+ x_conv_ln = self.conv_layer_norm(x_mhsa_out)
267
+ x_conv = self.conv_block(x_conv_ln, spatial_dim=spatial_dim)
268
+ x_conv_out = rf.dropout(x_conv, axis=self.out_dim, drop_prob=self.dropout) + x_mhsa_out
269
+
270
+ # FFN
271
+ x_ffn2_ln = self.ffn2_layer_norm(x_conv_out)
272
+ x_ffn2 = self.ffn2(x_ffn2_ln)
273
+ x_ffn2_out = 0.5 * rf.dropout(x_ffn2, axis=self.out_dim, drop_prob=self.dropout) + x_conv_out
274
+
275
+ # last LN layer
276
+ return self.final_layer_norm(x_ffn2_out)
277
+
278
+
279
+ class ConformerEncoder(ISeqDownsamplingEncoder):
280
+ """
281
+ Represents Conformer encoder architecture
282
+ """
283
+
284
+ def __init__(
285
+ self,
286
+ in_dim: Dim,
287
+ out_dim: Dim = Dim(512, name="conformer-enc-default-out-dim"),
288
+ *,
289
+ num_layers: int,
290
+ input_layer: Union[ConformerConvSubsample, ISeqDownsamplingEncoder, rf.Module, Any],
291
+ input_dropout: float = 0.1,
292
+ ff_dim: Dim = NotSpecified,
293
+ ff_activation: Callable[[Tensor], Tensor] = rf.swish,
294
+ dropout: float = 0.1,
295
+ conv_kernel_size: int = 32,
296
+ conv_norm: Union[rf.BatchNorm, type, Any] = NotSpecified,
297
+ num_heads: int = 4,
298
+ att_dropout: float = 0.1,
299
+ encoder_layer: Optional[Union[ConformerEncoderLayer, rf.Module, type, Any]] = None,
300
+ encoder_layer_opts: Optional[Dict[str, Any]] = None,
301
+ ):
302
+ """
303
+ :param out_dim: the output feature dimension
304
+ :param num_layers: the number of encoder layers
305
+ :param input_layer: input/frontend/prenet with potential subsampling.
306
+ (x, in_spatial_dim) -> (y, out_spatial_dim)
307
+ :param input_dropout: applied after input_projection(input_layer(x))
308
+ :param ff_dim: the dimension of feed-forward layers. 2048 originally, or 4 times out_dim
309
+ :param ff_activation: activation function for feed-forward network
310
+ :param dropout: the dropout value for the FF block
311
+ :param conv_kernel_size: the kernel size of depthwise convolution in the conv block
312
+ :param conv_norm: used for the conv block. Batch norm originally
313
+ :param num_heads: the number of attention heads
314
+ :param att_dropout: attention dropout value
315
+ :param encoder_layer: an instance of :class:`ConformerEncoderLayer` or similar
316
+ :param encoder_layer_opts: options for the encoder layer
317
+ """
318
+ super().__init__()
319
+
320
+ self.in_dim = in_dim
321
+ self.out_dim = out_dim
322
+ self.dropout = dropout
323
+
324
+ # TODO once we figured out good defaults, we would create ConformerConvSubsample here when not given
325
+ self.input_layer = input_layer
326
+ self.input_projection = rf.Linear(
327
+ self.input_layer.out_dim if self.input_layer else self.in_dim, self.out_dim, with_bias=False
328
+ )
329
+ self.input_dropout = input_dropout
330
+
331
+ if not encoder_layer or isinstance(encoder_layer, type):
332
+ encoder_layer_opts_ = dict(
333
+ out_dim=out_dim,
334
+ ff_dim=ff_dim,
335
+ ff_activation=ff_activation,
336
+ dropout=dropout,
337
+ conv_kernel_size=conv_kernel_size,
338
+ conv_norm=conv_norm,
339
+ num_heads=num_heads,
340
+ att_dropout=att_dropout,
341
+ )
342
+ if encoder_layer_opts:
343
+ encoder_layer_opts_.update(encoder_layer_opts)
344
+ if not encoder_layer:
345
+ encoder_layer = ConformerEncoderLayer(**encoder_layer_opts_)
346
+ elif isinstance(encoder_layer, type):
347
+ encoder_layer = encoder_layer(**encoder_layer_opts_)
348
+ else:
349
+ raise TypeError(f"unexpected encoder_layer {encoder_layer!r}")
350
+
351
+ self.layers = rf.Sequential(_copy.deepcopy(encoder_layer) for _ in range(num_layers))
352
+
353
+ def __call__(
354
+ self,
355
+ source: Tensor,
356
+ *,
357
+ in_spatial_dim: Dim,
358
+ collected_outputs: Optional[Dict[str, Tensor]] = None,
359
+ ) -> Tuple[Tensor, Dim]:
360
+ """forward"""
361
+ if self.input_layer:
362
+ x_subsample, out_spatial_dim = self.input_layer(source, in_spatial_dim=in_spatial_dim)
363
+ else:
364
+ x_subsample, out_spatial_dim = source, in_spatial_dim
365
+ x_linear = self.input_projection(x_subsample)
366
+ x = rf.dropout(x_linear, axis=self.input_projection.out_dim, drop_prob=self.input_dropout)
367
+ x = self.layers(x, spatial_dim=out_spatial_dim, collected_outputs=collected_outputs)
368
+ return x, out_spatial_dim
@@ -4,9 +4,10 @@ Math ops
4
4
 
5
5
  from __future__ import annotations
6
6
  import typing
7
- from typing import Optional, Sequence, Union
7
+ from typing import Optional, Sequence, Union, Tuple
8
8
  import numpy
9
9
  from returnn.tensor import Tensor, Dim
10
+ import returnn.frontend as rf
10
11
  from .types import RawTensorTypes as _RawTensorTypes
11
12
 
12
13
  __all__ = [
@@ -53,8 +54,10 @@ __all__ = [
53
54
  "elu",
54
55
  "selu",
55
56
  "silu",
57
+ "swish",
56
58
  "softmax",
57
59
  "log_softmax",
60
+ "gating",
58
61
  ]
59
62
 
60
63
 
@@ -444,3 +447,25 @@ def log_softmax(a: Tensor, *, axis: Dim, use_mask: bool = True) -> Tensor:
444
447
  """log_softmax"""
445
448
  # noinspection PyProtectedMember
446
449
  return a._raw_backend.log_softmax(a, axis=axis, use_mask=use_mask)
450
+
451
+
452
+ def gating(
453
+ x: Tensor, *, axis: Optional[Dim] = None, gate_func=sigmoid, act_func=identity, out_dim: Optional[Dim] = None
454
+ ) -> Tuple[Tensor, Dim]:
455
+ """
456
+ Like in gated linear unit (GLU): https://arxiv.org/abs/1612.08083
457
+ GLU refers also to the linear transformation before the gating -- this is why this function is not called GLU.
458
+ GLU uses gate_func=sigmoid and act_func=identity (the defaults here).
459
+
460
+ There are other potential gating variants you might be interested at.
461
+ See for example: https://arxiv.org/abs/2002.05202, e.g. gate_func=gelu.
462
+ """
463
+ if axis is None:
464
+ assert x.feature_dim is not None, f"gating {x}: need tensor with feature dim set, or explicit `axis`"
465
+ axis = x.feature_dim
466
+ assert axis.is_static() and axis.dimension % 2 == 0, f"gating {x}: need static dim, and even, got {axis}"
467
+ if not out_dim:
468
+ out_dim = axis.div_left(2)
469
+
470
+ a, b = rf.split(x, axis=axis, out_dims=[out_dim, out_dim])
471
+ return act_func(a) * gate_func(b), out_dim
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: returnn
3
- Version: 1.20230418.120646
3
+ Version: 1.20230418.124036
4
4
  Summary: The RWTH extensible training framework for universal recurrent neural networks
5
5
  Home-page: https://github.com/rwth-i6/returnn/
6
6
  Author: Albert Zeyer
@@ -179,6 +179,9 @@ returnn/frontend/reduce.py
179
179
  returnn/frontend/run_ctx.py
180
180
  returnn/frontend/state.py
181
181
  returnn/frontend/types.py
182
+ returnn/frontend/encoder/__init__.py
183
+ returnn/frontend/encoder/base.py
184
+ returnn/frontend/encoder/conformer.py
182
185
  returnn/import_/__init__.py
183
186
  returnn/import_/common.py
184
187
  returnn/import_/git.py
@@ -1,2 +0,0 @@
1
- version = '1.20230418.120646'
2
- long_version = '1.20230418.120646+git.2ae0c92'