returnn 1.20240205.110004__tar.gz → 1.20240206.450__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of returnn might be problematic. Click here for more details.
- {returnn-1.20240205.110004/returnn.egg-info → returnn-1.20240206.450}/PKG-INFO +1 -1
- returnn-1.20240206.450/_setup_info_generated.py +2 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/__main__.py +46 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/config.py +1 -1
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/generating.py +82 -1
- returnn-1.20240206.450/returnn/tensor/utils.py +237 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/frontend/_backend.py +11 -3
- {returnn-1.20240205.110004 → returnn-1.20240206.450/returnn.egg-info}/PKG-INFO +1 -1
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/rf_utils.py +1 -1
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_signal.py +9 -7
- returnn-1.20240205.110004/_setup_info_generated.py +0 -2
- returnn-1.20240205.110004/returnn/tensor/utils.py +0 -118
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/.editorconfig +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/.gitignore +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/.gitmodules +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/.kateconfig +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/CHANGELOG.md +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/CODEOWNERS +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/CONTRIBUTING.md +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/LICENSE +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/MANIFEST.in +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/README.rst +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/12AX.cluster_map +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/_setup_returnn_env.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-fwd.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-horovod-mpi.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-horovod-mpi.py.sh +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-horovod-mpi.sh +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-hyper-param-tuning.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-iter-dataset.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-list-devices.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-lua-torch-layer.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-pretrain.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-record-and-push-to-webserver.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-returnn-as-framework.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-rf-pt-benchmark.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-rf.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-rhn-enwik8.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-sprint-interface.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-att-copy.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-attention.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-chunking-blstm.12ax.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-contribrnn-lstm.12ax.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-enc-dec.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-hard-att-copy.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-lstm-benchmark.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-maxgradnorm-lstm.12ax.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-native-lstm-lowmem.12ax.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-native-lstm.12ax.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-native-lstm2.12ax.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-native-lstm2.12ax.tuned.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-neural-transducer.12ax.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-rec-explicit-lstm.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-rec-explicit-rnn.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-rec-self-att.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-search-compiled-graph.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-tf-vanilla-lstm.12ax.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-timit-lstm-ctc.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-torch.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo-upd-mult-model.lstm.12ax.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/demo.sh +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/IAM_lines/a01-000u-00.png +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/IAM_lines/a01-007-04.png +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/IAM_lines/a01-007-06.png +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/README.txt +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/chars.txt +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/config_demo +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/config_fwd +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/config_real +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/create_IAM_dataset.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/decode.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/features/raw/demo.h5 +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/go.sh +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/lines.txt +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/split/eval.txt +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/split/train.txt +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/IAM/split/valid.txt +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/README.md +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/artificial/create_test_h5.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/artificial/forwardconfig +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/artificial/go.sh +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/artificial/trainconfig +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/artificial_rgb/create_test_h5.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/artificial_rgb/forwardconfig +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/artificial_rgb/go.sh +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/demos/mdlstm/artificial_rgb/trainconfig +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/pyproject.toml +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/requirements.txt +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/__old_mod_loader__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/__setup__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/audio.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/basic.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/bundle_file.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/cached.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/cached2.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/hdf.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/lm.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/map.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/meta.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/multi_proc.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/normalization_data.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/numpy_dump.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/raw_wav.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/sprint.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/stereo.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/util/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/util/feature_extraction.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/util/strings.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/datasets/util/vocabulary.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/engine/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/engine/base.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/engine/batch.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/__main__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/.git +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/.gitignore +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/LICENSE +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/README.md +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/aligner.gif +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/check.png +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/core.cu +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/core.h +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/core_cpu.cpp +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/pytorch_binding/LICENSE +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/pytorch_binding/MANIFEST.in +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/pytorch_binding/README.md +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/pytorch_binding/binding.cpp +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/pytorch_binding/core.cu +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/pytorch_binding/core.h +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/pytorch_binding/requirements.txt +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/pytorch_binding/setup.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/pytorch_binding/warp_rna/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/pytorch_binding/warp_rna/test.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/ref_rna.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/setup.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/src/warp_rna_op.cc +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/src/warp_rna_op_kernel_tmpl.h +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/warp_rna/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/WarpRna/warp-rna/test.cpp +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/graph_editor/README.md +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/graph_editor/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/graph_editor/edit.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/graph_editor/reroute.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/graph_editor/select.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/graph_editor/subgraph.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/graph_editor/transform.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/extern/graph_editor/util.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/forward_iface.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/_backend.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/_native/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/_native/backend.cpp +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/_native/backend.hpp +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/_native/module.cpp +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/_native/module.hpp +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/_native/py_utils.hpp +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/_native/tensor_ops.cpp +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/_native/tensor_ops.hpp +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/_numpy_backend.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/_random_journal.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/_utils.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/array_.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/attention.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/audio/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/audio/mel.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/audio/specaugment.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/backend.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/cond.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/const.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/container.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/control_flow_ctx.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/conv.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/decoder/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/decoder/transformer.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/device.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/dims.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/dropout.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/dtype.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/encoder/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/encoder/base.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/encoder/conformer.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/gradient.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/graph.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/hooks.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/init.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/label_smoothing.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/linear.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/loop.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/loss.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/math_.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/matmul.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/module.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/normalization.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/parameter.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/rand.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/rec.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/reduce.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/run_ctx.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/signal.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/state.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/tensor_array.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/frontend/types.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/import_/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/import_/common.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/import_/git.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/import_/import_.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/learning_rate_control.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/log.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/native_op.cpp +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/native_op.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/pretrain.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/sprint/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/sprint/cache.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/sprint/control.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/sprint/error_signals.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/sprint/extern_interface.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/sprint/interface.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tensor/README.md +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tensor/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tensor/_dim_extra.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tensor/_tensor_extra.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tensor/_tensor_mixin_base.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tensor/_tensor_op_overloads.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tensor/control_flow_ctx.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tensor/dim.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tensor/marked_dim.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tensor/tensor.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tensor/tensor_dict.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/compat.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/data_pipeline.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/distributed.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/engine.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_layers/README.md +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_layers/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_layers/_backend.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_layers/_utils.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_layers/cond.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_layers/config_entry_points.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_layers/debug_eager_mode.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_layers/dims.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_layers/layer.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_layers/loop.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_layers/make_layer.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_layers/masked_computation.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_layers/parameter_assign.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_layers/prev_tensor_ref.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_low_level/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/frontend_low_level/_backend.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/horovod.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/hyper_param_tuning.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/layers/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/layers/base.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/layers/basic.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/layers/rec.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/layers/segmental_model.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/layers/signal_processing.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/layers/variable.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/native_op.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/network.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/sprint.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/updater.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/util/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/util/basic.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/util/data.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/util/gradient_checkpoint.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/util/ken_lm.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/tf/util/open_fst.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/README.md +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/data/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/data/extern_data.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/data/pipeline.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/data/queued_data_iter.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/data/returnn_dataset_wrapper.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/data/tensor_utils.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/distributed.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/engine.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/frontend/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/frontend/_rand.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/frontend/bridge.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/frontend/raw_ops.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/updater.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/util/README.md +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/util/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/util/diagnose_gpu.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/torch/util/scaled_gradient.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/__init__.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/basic.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/better_exchook.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/bpe.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/debug.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/debug_helpers.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/fsa.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/literal_py_to_pickle.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/math.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/multi_proc_non_daemonic_spawn.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/native_code_compiler.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/pprint.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/py-to-pickle.cpp +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/py_compat.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/py_ext_mod_compiler.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/result_with_reason.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/sig_proc.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/task_system.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/train_proc_manager.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn/util/watch_memory.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn.egg-info/SOURCES.txt +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn.egg-info/dependency_links.txt +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/returnn.egg-info/top_level.txt +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/rnn.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/setup.cfg +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/setup.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/DummySprintExec.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/PyCharm-inspection-profile.xml +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/PyCharm.idea/.gitignore +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/PyCharm.idea/.name +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/PyCharm.idea/codeStyleSettings.xml +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/PyCharm.idea/codeStyles/Project.xml +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/PyCharm.idea/codeStyles/codeStyleConfig.xml +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/PyCharm.idea/inspectionProfiles/Project_Default.xml +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/PyCharm.idea/inspectionProfiles/profiles_settings.xml +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/PyCharm.idea/misc.xml +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/PyCharm.idea/modules.xml +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/PyCharm.idea/returnn.iml +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/PyCharm.idea/scopes/scope_settings.xml +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/_set_num_threads1.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/_setup_returnn_env.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/_setup_test_env.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/bpe-unicode-demo.codes +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/bpe-unicode-demo.vocab +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/lexicon_opt.fst +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/lexicon_opt.isyms +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/lexicon_opt.jpg +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/lexicon_opt.osyms +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/lint_common.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/pycharm-inspect.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/pylint.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/returnn-as-framework.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/spelling.dic +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_Config.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_Dataset.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_Fsa.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_GeneratingDataset.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_HDFDataset.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_LearningRateControl.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_Log.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_MultiProcDataset.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_Pretrain.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_ResNet.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_SprintDataset.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_SprintInterface.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_TFEngine.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_TFNativeOp.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_TFNetworkLayer.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_TFNetworkRecLayer.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_TFNetworkSigProcLayer.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_TFUpdater.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_TFUtil.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_TF_determinism.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_TaskSystem.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_TaskSystem_SharedMem.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_TranslationDataset.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_Util.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_demos.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_fork_exec.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_hdf_dump.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_array.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_attention.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_base.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_cond.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_const.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_container.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_conv.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_encoder_conformer.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_gradient.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_label_smoothing.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_loop.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_math.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_normalization.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_rec.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_rf_reduce.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_tensor.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_tools.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_torch_dataset.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_torch_engine.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_torch_frontend.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tests/test_torch_internal_frontend.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/_setup_returnn_env.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/analyze-dataset-batches.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/bliss-collect-seq-lens.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/bliss-dump-text.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/bliss-get-segment-names.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/bliss-to-ogg-zip.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/bpe-create-lexicon.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/calculate-word-error-rate.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/cleanup-old-models.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/collect-orth-symbols.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/collect-words.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/compile_native_op.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/compile_tf_graph.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/debug-dump-search-scores.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/debug-plot-search-scores.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/dump-dataset-raw-strings.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/dump-dataset.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/dump-forward-stats.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/dump-forward.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/dump-network-json.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/dump-pickle.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/extract_state_tying_from_dataset.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/get-attention-weights.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/get-best-model-epoch.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/hdf_dump.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/hdf_dump_translation_dataset.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/import-blocks-mt-model.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/import-t2t-mt-model.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/.gitignore +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/Makefile +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/README.md +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/example/README.md +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/example/libs_list +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/example/network.040/i600_m600_m600.sgd_b16_lr0_cl2.newbobabs.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/example/network.040/i600_m600_m600.sgd_b16_lr0_cl2.newbobabs.keep_over_epoch.lstm2.config +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/example/rescore_lattice.sh +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/example/state_vars_list +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/example/tensor_names_list +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/file.h +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/htklatticerescorer.cc +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/htklatticerescorer.h +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/main.cc +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/rescorer.h +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/vocabulary.cc +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/lattice_rescorer/vocabulary.h +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/tf_avg_checkpoints.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/tf_inspect_checkpoint.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/tf_inspect_summary_log.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/torch_avg_checkpoints.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/torch_export_to_onnx.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/torch_inspect_checkpoint.py +0 -0
- {returnn-1.20240205.110004 → returnn-1.20240206.450}/tools/torch_inspect_checkpoint_and_opt.py +0 -0
|
@@ -219,6 +219,50 @@ def init_data():
|
|
|
219
219
|
train_data, extra_train = load_data(config, train_cache_bytes, "train")
|
|
220
220
|
|
|
221
221
|
|
|
222
|
+
def setup_dummy_datasets():
|
|
223
|
+
"""setup config to use :class:`DummyGenericDataset` instead of the normal datasets"""
|
|
224
|
+
from binascii import crc32
|
|
225
|
+
|
|
226
|
+
extern_data = config.typed_value("extern_data")
|
|
227
|
+
assert extern_data, "must define extern_data to setup dummy datasets"
|
|
228
|
+
num_seqs = config.int("dummy_dataset_num_seqs", None)
|
|
229
|
+
if config.bool_or_other("train"):
|
|
230
|
+
train_num_seqs = config.int("train_dummy_dataset_num_seqs", num_seqs)
|
|
231
|
+
if not train_num_seqs:
|
|
232
|
+
train_num_seqs = 1000
|
|
233
|
+
if not num_seqs:
|
|
234
|
+
num_seqs = max(train_num_seqs // 20, 1)
|
|
235
|
+
config.set("train", {"class": "DummyGenericDataset", "data_template": extern_data, "num_seqs": train_num_seqs})
|
|
236
|
+
if not num_seqs:
|
|
237
|
+
num_seqs = 100
|
|
238
|
+
for key in ["dev", "eval", "forward_data", "search_data"]:
|
|
239
|
+
if config.bool_or_other(key):
|
|
240
|
+
config.set(
|
|
241
|
+
key,
|
|
242
|
+
{
|
|
243
|
+
"class": "DummyGenericDataset",
|
|
244
|
+
"data_template": extern_data,
|
|
245
|
+
"num_seqs": num_seqs,
|
|
246
|
+
"fixed_random_seed": crc32(key.encode("utf8")),
|
|
247
|
+
},
|
|
248
|
+
)
|
|
249
|
+
if config.bool_or_other("eval_datasets"):
|
|
250
|
+
eval_datasets = config.typed_value("eval_datasets")
|
|
251
|
+
assert isinstance(eval_datasets, dict)
|
|
252
|
+
config.set(
|
|
253
|
+
"eval_datasets",
|
|
254
|
+
{
|
|
255
|
+
key: {
|
|
256
|
+
"class": "DummyGenericDataset",
|
|
257
|
+
"data_template": extern_data,
|
|
258
|
+
"num_seqs": num_seqs,
|
|
259
|
+
"fixed_random_seed": crc32(key.encode("utf8")),
|
|
260
|
+
}
|
|
261
|
+
for key in eval_datasets.keys()
|
|
262
|
+
},
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
|
|
222
266
|
def print_task_properties():
|
|
223
267
|
"""
|
|
224
268
|
print information about used data
|
|
@@ -437,6 +481,8 @@ def init(config_filename=None, command_line_options=(), config_updates=None, ext
|
|
|
437
481
|
startup_callback = config.typed_value("startup_callback")
|
|
438
482
|
startup_callback(config=config)
|
|
439
483
|
if need_data():
|
|
484
|
+
if config.bool("use_dummy_datasets", False):
|
|
485
|
+
setup_dummy_datasets()
|
|
440
486
|
init_data()
|
|
441
487
|
print_task_properties()
|
|
442
488
|
init_engine()
|
|
@@ -4,13 +4,14 @@ Some datasets for artificially generated data.
|
|
|
4
4
|
|
|
5
5
|
from __future__ import annotations
|
|
6
6
|
|
|
7
|
-
from typing import Optional, Sequence
|
|
7
|
+
from typing import Optional, Union, Any, Sequence, List, Tuple, Dict
|
|
8
8
|
import numpy
|
|
9
9
|
import sys
|
|
10
10
|
import typing
|
|
11
11
|
|
|
12
12
|
from returnn.util.basic import class_idx_seq_to_1_of_k, CollectionReadCheckCovered
|
|
13
13
|
from returnn.log import log
|
|
14
|
+
from returnn.tensor import Tensor, Dim, TensorDict
|
|
14
15
|
|
|
15
16
|
from .util.feature_extraction import ExtractAudioFeatures
|
|
16
17
|
from .util.vocabulary import *
|
|
@@ -954,6 +955,86 @@ class DummyDatasetMultipleDataKeys(DummyDataset):
|
|
|
954
955
|
return DatasetSeq(seq_idx=seq_idx, features=features, targets=None)
|
|
955
956
|
|
|
956
957
|
|
|
958
|
+
class DummyGenericDataset(GeneratingDataset):
|
|
959
|
+
"""
|
|
960
|
+
Generate some random dummy data based on a tensor dict (like ``extern_data``).
|
|
961
|
+
"""
|
|
962
|
+
|
|
963
|
+
_getnewargs_exclude_attrs = Dataset._getnewargs_exclude_attrs.union(("input_dim", "output_dim"))
|
|
964
|
+
|
|
965
|
+
def __init__(
|
|
966
|
+
self,
|
|
967
|
+
data_template: Union[TensorDict, Dict[str, Union[Tensor, Dict[str, Any]]]],
|
|
968
|
+
num_seqs: int,
|
|
969
|
+
*,
|
|
970
|
+
seq_lens: Union[None, int, Tuple[int, int], Dict[Union[str, Dim, None], Union[int, Tuple[int, int]]]] = None,
|
|
971
|
+
**kwargs,
|
|
972
|
+
):
|
|
973
|
+
"""
|
|
974
|
+
:param data_template: describes each tensor
|
|
975
|
+
:param num_seqs:
|
|
976
|
+
:param seq_lens: either fixed seq len, or take randint. per data key, or per dim, or same for all
|
|
977
|
+
"""
|
|
978
|
+
from returnn.tensor.utils import tensor_dict_dims_random_seq_len_min_max
|
|
979
|
+
|
|
980
|
+
data_template_ = TensorDict()
|
|
981
|
+
data_template_.update(data_template, auto_convert=True)
|
|
982
|
+
data_template = data_template_
|
|
983
|
+
# Map Tensor to old-style dims, currently required by the Dataset API.
|
|
984
|
+
old_style_dims = {k: (v.dim, v.ndim) for k, v in data_template.data.items()}
|
|
985
|
+
super().__init__(input_dim=None, output_dim=old_style_dims, num_seqs=num_seqs, **kwargs)
|
|
986
|
+
self.data_template = data_template
|
|
987
|
+
self.seq_lens = seq_lens
|
|
988
|
+
self._dyn_dims, self._dyn_lens_min_max = tensor_dict_dims_random_seq_len_min_max(data_template, seq_lens)
|
|
989
|
+
|
|
990
|
+
def get_data_keys(self) -> List[str]:
|
|
991
|
+
"""data keys"""
|
|
992
|
+
return list(self.data_template.data.keys())
|
|
993
|
+
|
|
994
|
+
def get_target_list(self) -> List[str]:
|
|
995
|
+
"""target keys"""
|
|
996
|
+
res = []
|
|
997
|
+
if "classes" in self.data_template.data:
|
|
998
|
+
res.append("classes")
|
|
999
|
+
res.extend([k for k, v in self.data_template.data.items() if not v.available_for_inference and k != "classes"])
|
|
1000
|
+
return res
|
|
1001
|
+
|
|
1002
|
+
def get_data_dtype(self, key: str) -> str:
|
|
1003
|
+
"""dtype"""
|
|
1004
|
+
return self.data_template.data[key].dtype
|
|
1005
|
+
|
|
1006
|
+
def is_data_sparse(self, key: str) -> bool:
|
|
1007
|
+
"""sparse"""
|
|
1008
|
+
return self.data_template.data[key].sparse
|
|
1009
|
+
|
|
1010
|
+
def get_data_shape(self, key: str) -> List[int]:
|
|
1011
|
+
"""
|
|
1012
|
+
:returns get_data(*, key).shape[1:], i.e. num-frames excluded
|
|
1013
|
+
"""
|
|
1014
|
+
return list(self.data_template.data[key].shape[1:])
|
|
1015
|
+
|
|
1016
|
+
def generate_seq(self, seq_idx: int) -> DatasetSeq:
|
|
1017
|
+
"""generate seq (assuming self.random is in a correct state)"""
|
|
1018
|
+
return DatasetSeq(seq_idx=seq_idx, features={k: self._generate_data(k) for k in self.data_template.data.keys()})
|
|
1019
|
+
|
|
1020
|
+
def _generate_data(self, key: str) -> numpy.ndarray:
|
|
1021
|
+
"""generate for specific data key. assumes that self.random is in a correct state"""
|
|
1022
|
+
from returnn.tensor.utils import get_random_seq_lens_for_dyn_dims
|
|
1023
|
+
|
|
1024
|
+
seq_lens = get_random_seq_lens_for_dyn_dims(self._dyn_dims, self._dyn_lens_min_max, rnd=self.random)
|
|
1025
|
+
templ: Tensor = self.data_template.data[key]
|
|
1026
|
+
shape = [
|
|
1027
|
+
seq_lens[dim][0] if dim.is_dynamic() else dim.dimension for dim in templ.dims if not dim.is_batch_dim()
|
|
1028
|
+
]
|
|
1029
|
+
if templ.sparse_dim:
|
|
1030
|
+
return self.random.randint(0, templ.sparse_dim.dimension, shape, dtype=templ.dtype)
|
|
1031
|
+
if templ.dtype.startswith("float"):
|
|
1032
|
+
return self.random.standard_normal(shape).astype(templ.dtype)
|
|
1033
|
+
if templ.dtype.startswith("int") or templ.dtype.startswith("uint"):
|
|
1034
|
+
return self.random.randint(0, 10, shape, dtype=templ.dtype)
|
|
1035
|
+
raise NotImplementedError(f"{self} generate: data key {key!r} template {templ}")
|
|
1036
|
+
|
|
1037
|
+
|
|
957
1038
|
class StaticDataset(CachedDataset2):
|
|
958
1039
|
"""
|
|
959
1040
|
Provide all the data as a list of dict of numpy arrays.
|
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Some helper utils.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
from typing import Optional, Union, Sequence, Dict, List, Tuple
|
|
7
|
+
import numpy
|
|
8
|
+
from returnn.tensor import Tensor, Dim, TensorDict, batch_dim
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def tensor_dict_fill_random_numpy_(
|
|
12
|
+
tensor_dict: TensorDict,
|
|
13
|
+
*,
|
|
14
|
+
rnd: Union[int, numpy.random.RandomState] = 42,
|
|
15
|
+
dyn_dim_max_sizes: Optional[Dict[Dim, int]] = None,
|
|
16
|
+
dyn_dim_min_sizes: Optional[Dict[Dim, int]] = None,
|
|
17
|
+
):
|
|
18
|
+
"""
|
|
19
|
+
Random fill with NumPy arrays.
|
|
20
|
+
|
|
21
|
+
:param tensor_dict:
|
|
22
|
+
:param rnd:
|
|
23
|
+
:param dyn_dim_max_sizes: you can specify max sizes for dim tags with dynamic sizes.
|
|
24
|
+
The fill random code makes sure that there is at least one entry where we reach the max size,
|
|
25
|
+
so that the dim value will be the max size.
|
|
26
|
+
:param dyn_dim_min_sizes:
|
|
27
|
+
"""
|
|
28
|
+
if not isinstance(rnd, numpy.random.RandomState):
|
|
29
|
+
rnd = numpy.random.RandomState(rnd)
|
|
30
|
+
for v in tensor_dict.data.values():
|
|
31
|
+
tensor_fill_random_numpy_(v, rnd=rnd, dyn_dim_max_sizes=dyn_dim_max_sizes, dyn_dim_min_sizes=dyn_dim_min_sizes)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def tensor_fill_random_numpy_(
|
|
35
|
+
x: Tensor,
|
|
36
|
+
*,
|
|
37
|
+
min_val: int = 0,
|
|
38
|
+
max_val: Optional[int] = None,
|
|
39
|
+
rnd: numpy.random.RandomState,
|
|
40
|
+
dyn_dim_max_sizes: Optional[Dict[Dim, int]] = None,
|
|
41
|
+
dyn_dim_min_sizes: Optional[Dict[Dim, int]] = None,
|
|
42
|
+
) -> bool:
|
|
43
|
+
"""fill. return whether sth was filled"""
|
|
44
|
+
if dyn_dim_max_sizes is None:
|
|
45
|
+
dyn_dim_max_sizes = {}
|
|
46
|
+
if dyn_dim_min_sizes is None:
|
|
47
|
+
dyn_dim_min_sizes = {}
|
|
48
|
+
filled = False
|
|
49
|
+
while True:
|
|
50
|
+
have_unfilled = False
|
|
51
|
+
filled_this_round = False
|
|
52
|
+
|
|
53
|
+
for dim in x.dims:
|
|
54
|
+
if dim.is_batch_dim() and not dim.dyn_size_ext:
|
|
55
|
+
dim.dyn_size_ext = Tensor("batch", [], dtype="int32")
|
|
56
|
+
if dim.is_dynamic() and not dim.dyn_size_ext:
|
|
57
|
+
dim.dyn_size_ext = Tensor(dim.name or "time", dims=[batch_dim], dtype="int32")
|
|
58
|
+
if not dim.dyn_size_ext:
|
|
59
|
+
continue
|
|
60
|
+
if tensor_fill_random_numpy_(
|
|
61
|
+
dim.dyn_size_ext,
|
|
62
|
+
min_val=dyn_dim_min_sizes.get(dim, 2),
|
|
63
|
+
max_val=dyn_dim_max_sizes.get(dim, None),
|
|
64
|
+
rnd=rnd,
|
|
65
|
+
dyn_dim_max_sizes=dyn_dim_max_sizes,
|
|
66
|
+
):
|
|
67
|
+
if dim in dyn_dim_max_sizes:
|
|
68
|
+
# Make sure at least one of the dyn sizes matches the max size.
|
|
69
|
+
i = rnd.randint(0, dim.dyn_size_ext.raw_tensor.size)
|
|
70
|
+
dim.dyn_size_ext.raw_tensor.flat[i] = dyn_dim_max_sizes[dim]
|
|
71
|
+
if dim in dyn_dim_min_sizes:
|
|
72
|
+
j = rnd.randint(0, dim.dyn_size_ext.raw_tensor.size - 1)
|
|
73
|
+
if j >= i:
|
|
74
|
+
j += 1
|
|
75
|
+
dim.dyn_size_ext.raw_tensor.flat[j] = dyn_dim_min_sizes[dim]
|
|
76
|
+
elif dim in dyn_dim_min_sizes:
|
|
77
|
+
raise Exception(f"also define {dim} in dyn_dim_max_sizes, not just dyn_dim_min_sizes")
|
|
78
|
+
filled = True
|
|
79
|
+
filled_this_round = True
|
|
80
|
+
if dim.dyn_size_ext.raw_tensor is None:
|
|
81
|
+
have_unfilled = True
|
|
82
|
+
elif not isinstance(dim.dyn_size_ext.raw_tensor, numpy.ndarray):
|
|
83
|
+
have_unfilled = True
|
|
84
|
+
|
|
85
|
+
if have_unfilled:
|
|
86
|
+
assert filled_this_round, f"should have filled something, {x}"
|
|
87
|
+
|
|
88
|
+
if not have_unfilled:
|
|
89
|
+
break
|
|
90
|
+
|
|
91
|
+
if x.raw_tensor is not None:
|
|
92
|
+
if not isinstance(x.raw_tensor, numpy.ndarray):
|
|
93
|
+
x.raw_tensor = None
|
|
94
|
+
|
|
95
|
+
if x.raw_tensor is None:
|
|
96
|
+
shape = [d.get_dim_value() for d in x.dims]
|
|
97
|
+
if x.dtype.startswith("int"):
|
|
98
|
+
if max_val is None:
|
|
99
|
+
max_val = rnd.randint(5, 20)
|
|
100
|
+
if x.sparse_dim and x.sparse_dim.dimension is not None:
|
|
101
|
+
max_val = x.sparse_dim.dimension
|
|
102
|
+
x.raw_tensor = rnd.randint(min_val, max_val, size=shape, dtype=x.dtype)
|
|
103
|
+
elif x.dtype == "bool":
|
|
104
|
+
x.raw_tensor = rnd.randint(0, 2, size=shape, dtype=x.dtype)
|
|
105
|
+
elif x.dtype.startswith("float"):
|
|
106
|
+
x.raw_tensor = rnd.normal(0.0, 1.0, size=shape).astype(x.dtype)
|
|
107
|
+
elif x.dtype.startswith("complex"):
|
|
108
|
+
real = rnd.normal(0.0, 1.0, size=shape)
|
|
109
|
+
imag = rnd.normal(0.0, 1.0, size=shape)
|
|
110
|
+
x.raw_tensor = (real + 1j * imag).astype(x.dtype)
|
|
111
|
+
else:
|
|
112
|
+
raise NotImplementedError(f"not implemented for {x} dtype {x.dtype}")
|
|
113
|
+
filled = True
|
|
114
|
+
|
|
115
|
+
assert isinstance(x.raw_tensor, numpy.ndarray)
|
|
116
|
+
|
|
117
|
+
return filled
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def tensor_dict_dims_random_seq_len_min_max(
|
|
121
|
+
tensor_dict: TensorDict,
|
|
122
|
+
seq_lens: Union[None, int, Tuple[int, int], Dict[Union[str, Dim, None], Union[int, Tuple[int, int]]]] = None,
|
|
123
|
+
) -> Tuple[List[Dim], Dict[Dim, Tuple[int, int]]]:
|
|
124
|
+
"""
|
|
125
|
+
This is specifically intended to prepare the list of all dynamic dims from the tensor dict
|
|
126
|
+
and the seq_len_min_max for :func:`get_random_seq_lens_for_dyn_dims`.
|
|
127
|
+
|
|
128
|
+
:param tensor_dict:
|
|
129
|
+
:param seq_lens: either fixed seq len, or take randint. per data key, or per dim, or same for all
|
|
130
|
+
:return: dims, seq_len_min_max
|
|
131
|
+
"""
|
|
132
|
+
if seq_lens is None:
|
|
133
|
+
seq_lens = {}
|
|
134
|
+
if not isinstance(seq_lens, dict):
|
|
135
|
+
seq_lens = {None: seq_lens}
|
|
136
|
+
seq_lens: Dict[Union[str, Dim, None], Union[int, Tuple[int, int]]]
|
|
137
|
+
|
|
138
|
+
# Collect all dyn dim tags, including derived_from_op ones.
|
|
139
|
+
# The order will be sorted such that derived_from_op roots come first.
|
|
140
|
+
visited_dims = set()
|
|
141
|
+
dims = []
|
|
142
|
+
seq_len_min_max = {} # Also collect seq_len_min_max.
|
|
143
|
+
for k, v in tensor_dict.data.items():
|
|
144
|
+
for dim in v.dims:
|
|
145
|
+
if dim.is_dynamic() and dim not in visited_dims and not dim.is_batch_dim():
|
|
146
|
+
queue = [dim]
|
|
147
|
+
offset = len(dims)
|
|
148
|
+
while queue:
|
|
149
|
+
dim = queue.pop(0)
|
|
150
|
+
if not dim.is_dynamic():
|
|
151
|
+
continue
|
|
152
|
+
if dim in visited_dims:
|
|
153
|
+
continue
|
|
154
|
+
visited_dims.add(dim)
|
|
155
|
+
dims.insert(offset, dim)
|
|
156
|
+
dim.reset_batch_and_raw()
|
|
157
|
+
if dim.derived_from_op:
|
|
158
|
+
queue.extend(dim.derived_from_op.inputs)
|
|
159
|
+
else:
|
|
160
|
+
# Need to specify seq_len_min_max.
|
|
161
|
+
if dim in seq_lens or k in seq_lens or None in seq_lens:
|
|
162
|
+
if dim in seq_lens:
|
|
163
|
+
size = seq_lens[dim]
|
|
164
|
+
elif k in seq_lens:
|
|
165
|
+
size = seq_lens[k]
|
|
166
|
+
else:
|
|
167
|
+
size = seq_lens[None]
|
|
168
|
+
if isinstance(size, int):
|
|
169
|
+
size = (size, size)
|
|
170
|
+
else:
|
|
171
|
+
assert (
|
|
172
|
+
isinstance(size, tuple)
|
|
173
|
+
and len(size) == 2
|
|
174
|
+
and all(isinstance(s, int) for s in size)
|
|
175
|
+
and 0 <= size[0] <= size[1]
|
|
176
|
+
), f"invalid size {size!r} in seq lens {seq_lens}"
|
|
177
|
+
else:
|
|
178
|
+
if v.shape in {(None,), (None, 1)} and v.dtype.startswith("float"):
|
|
179
|
+
# Assume raw audio data samples, take longer seq lens by default, assume 16khz.
|
|
180
|
+
size = (1_000, 8_000)
|
|
181
|
+
else:
|
|
182
|
+
size = (5, 15)
|
|
183
|
+
seq_len_min_max[dim] = size
|
|
184
|
+
|
|
185
|
+
return dims, seq_len_min_max
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def get_random_seq_lens_for_dyn_dims(
|
|
189
|
+
dims: Sequence[Dim],
|
|
190
|
+
seq_len_min_max: Dict[Dim, Tuple[int, int]],
|
|
191
|
+
*,
|
|
192
|
+
batch_size: int = 1,
|
|
193
|
+
rnd: Union[int, numpy.random.RandomState] = 1337,
|
|
194
|
+
) -> Dict[Dim, numpy.ndarray]:
|
|
195
|
+
"""
|
|
196
|
+
Make random seq lens for dims.
|
|
197
|
+
|
|
198
|
+
Note that dim tags are not actually modified here,
|
|
199
|
+
as we need to have this in a safe way,
|
|
200
|
+
which might run in parallel to the main thread.
|
|
201
|
+
|
|
202
|
+
:param dims: Note that the order matter, as we use complete_dyn_size() (or equivalent).
|
|
203
|
+
:param seq_len_min_max:
|
|
204
|
+
:param batch_size:
|
|
205
|
+
:param rnd:
|
|
206
|
+
"""
|
|
207
|
+
if not isinstance(rnd, numpy.random.RandomState):
|
|
208
|
+
rnd = numpy.random.RandomState(rnd)
|
|
209
|
+
|
|
210
|
+
gen_dims = {}
|
|
211
|
+
for dim in dims:
|
|
212
|
+
if dim not in gen_dims:
|
|
213
|
+
if dim.derived_from_op:
|
|
214
|
+
# If we get a KeyError for the following, the order of dims is invalid.
|
|
215
|
+
values = [gen_dims[dim_] for dim_ in dim.derived_from_op.inputs]
|
|
216
|
+
kind = dim.derived_from_op.kind
|
|
217
|
+
a = values[0]
|
|
218
|
+
for b in values[1:]:
|
|
219
|
+
if kind == "add":
|
|
220
|
+
a = numpy.maximum(a + b, 0)
|
|
221
|
+
elif kind == "sub":
|
|
222
|
+
a = numpy.maximum(a - b, 0)
|
|
223
|
+
elif kind == "mul":
|
|
224
|
+
a = a * b
|
|
225
|
+
elif kind in ("floordiv", "truediv"): # truediv assumes there is no remainder
|
|
226
|
+
a = a // b
|
|
227
|
+
elif kind == "ceildiv":
|
|
228
|
+
a = -(-a // b)
|
|
229
|
+
else:
|
|
230
|
+
raise ValueError("unknown op kind %r" % kind)
|
|
231
|
+
gen_dims[dim] = a
|
|
232
|
+
continue
|
|
233
|
+
|
|
234
|
+
min_, max_ = seq_len_min_max[dim]
|
|
235
|
+
gen_dims[dim] = rnd.randint(min_, max_ + 1, size=[batch_size], dtype=numpy.int32)
|
|
236
|
+
|
|
237
|
+
return gen_dims
|
|
@@ -236,9 +236,11 @@ class TorchBackend(Backend[torch.Tensor]):
|
|
|
236
236
|
out.raw_tensor,
|
|
237
237
|
scale=scale.raw_tensor if isinstance(scale, Tensor) else scale,
|
|
238
238
|
shift=shift.raw_tensor if isinstance(shift, Tensor) else shift,
|
|
239
|
-
scale_shift_by_sum_over_axis=
|
|
240
|
-
|
|
241
|
-
|
|
239
|
+
scale_shift_by_sum_over_axis=(
|
|
240
|
+
x.get_axis_from_description(scale_shift_by_sum_over_axis, allow_int=False)
|
|
241
|
+
if scale_shift_by_sum_over_axis is not None
|
|
242
|
+
else None
|
|
243
|
+
),
|
|
242
244
|
)
|
|
243
245
|
return out
|
|
244
246
|
|
|
@@ -1841,6 +1843,12 @@ class TorchBackend(Backend[torch.Tensor]):
|
|
|
1841
1843
|
# we get the same output seq length in both cases.
|
|
1842
1844
|
x_raw = torch.nn.functional.pad(x_raw, (0, (fft_length - frame_length)))
|
|
1843
1845
|
|
|
1846
|
+
if frame_length > x_raw.shape[1]:
|
|
1847
|
+
# Torch does not really support the empty case.
|
|
1848
|
+
y = Tensor("stft", dims=batch_dims + [out_dim, out_spatial_dim], feature_dim=out_dim, dtype="complex64")
|
|
1849
|
+
y.raw_tensor = torch.zeros([d.get_dim_value() for d in y.dims], dtype=torch.complex64)
|
|
1850
|
+
return y
|
|
1851
|
+
|
|
1844
1852
|
if window_enforce_even:
|
|
1845
1853
|
frame_length -= frame_length % 2
|
|
1846
1854
|
|
|
@@ -90,7 +90,7 @@ def run_model(
|
|
|
90
90
|
if v_pt.dtype.startswith("int"):
|
|
91
91
|
assert v_tf.dtype.startswith("int") # allow maybe different bit depth
|
|
92
92
|
else:
|
|
93
|
-
assert v_pt.dtype == v_tf.dtype
|
|
93
|
+
assert v_pt.dtype == v_tf.dtype, f"PT dtype {v_pt.dtype} vs TF dtype {v_tf.dtype}"
|
|
94
94
|
assert bool(v_pt.sparse_dim) == bool(v_tf.sparse_dim)
|
|
95
95
|
if v_pt.sparse_dim:
|
|
96
96
|
_check_dim(v_pt.sparse_dim, v_tf.sparse_dim)
|
|
@@ -18,16 +18,18 @@ def test_stft():
|
|
|
18
18
|
}
|
|
19
19
|
)
|
|
20
20
|
|
|
21
|
-
class _Net(rf.Module):
|
|
22
|
-
def __call__(self, x: rf.Tensor, *, in_spatial_dim: Dim) -> Tuple[Tensor, Dim, Dim]:
|
|
23
|
-
return rf.stft(x, in_spatial_dim=in_spatial_dim, frame_step=3, frame_length=5, fft_length=6)
|
|
24
|
-
|
|
25
21
|
# noinspection PyShadowingNames
|
|
26
|
-
def _forward_step(*, model:
|
|
27
|
-
|
|
22
|
+
def _forward_step(*, model: rf.Module, extern_data: TensorDict):
|
|
23
|
+
model # unused # noqa
|
|
24
|
+
x = extern_data["data"]
|
|
25
|
+
out, out_spatial_dim, out_dim = rf.stft(x, in_spatial_dim=time_dim, frame_step=3, frame_length=5, fft_length=6)
|
|
28
26
|
out.mark_as_default_output(shape=(batch_dim, out_spatial_dim, out_dim))
|
|
27
|
+
out, out_spatial_dim, out_dim = rf.stft(x, in_spatial_dim=time_dim, frame_step=3, frame_length=32)
|
|
28
|
+
out.mark_as_output("zero_len", shape=(batch_dim, out_spatial_dim, out_dim))
|
|
29
|
+
out, out_spatial_dim, out_dim = rf.stft(x, in_spatial_dim=time_dim, frame_step=3, frame_length=5)
|
|
30
|
+
out.mark_as_output("normal", shape=(batch_dim, out_spatial_dim, out_dim))
|
|
29
31
|
|
|
30
|
-
run_model(extern_data, lambda *, epoch, step:
|
|
32
|
+
run_model(extern_data, lambda *, epoch, step: rf.Module(), _forward_step)
|
|
31
33
|
|
|
32
34
|
|
|
33
35
|
def test_mel_filterbank():
|
|
@@ -1,118 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Some helper utils.
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
from __future__ import annotations
|
|
7
|
-
from typing import Optional, Union, Dict
|
|
8
|
-
import numpy
|
|
9
|
-
from returnn.tensor import Tensor, Dim, TensorDict, batch_dim
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
def tensor_dict_fill_random_numpy_(
|
|
13
|
-
tensor_dict: TensorDict,
|
|
14
|
-
*,
|
|
15
|
-
rnd: Union[int, numpy.random.RandomState] = 42,
|
|
16
|
-
dyn_dim_max_sizes: Optional[Dict[Dim, int]] = None,
|
|
17
|
-
dyn_dim_min_sizes: Optional[Dict[Dim, int]] = None,
|
|
18
|
-
):
|
|
19
|
-
"""
|
|
20
|
-
Random fill with NumPy arrays.
|
|
21
|
-
|
|
22
|
-
:param tensor_dict:
|
|
23
|
-
:param rnd:
|
|
24
|
-
:param dyn_dim_max_sizes: you can specify max sizes for dim tags with dynamic sizes.
|
|
25
|
-
The fill random code makes sure that there is at least one entry where we reach the max size,
|
|
26
|
-
so that the dim value will be the max size.
|
|
27
|
-
:param dyn_dim_min_sizes:
|
|
28
|
-
"""
|
|
29
|
-
if not isinstance(rnd, numpy.random.RandomState):
|
|
30
|
-
rnd = numpy.random.RandomState(rnd)
|
|
31
|
-
for v in tensor_dict.data.values():
|
|
32
|
-
tensor_fill_random_numpy_(v, rnd=rnd, dyn_dim_max_sizes=dyn_dim_max_sizes, dyn_dim_min_sizes=dyn_dim_min_sizes)
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
def tensor_fill_random_numpy_(
|
|
36
|
-
x: Tensor,
|
|
37
|
-
*,
|
|
38
|
-
min_val: int = 0,
|
|
39
|
-
max_val: Optional[int] = None,
|
|
40
|
-
rnd: numpy.random.RandomState,
|
|
41
|
-
dyn_dim_max_sizes: Optional[Dict[Dim, int]] = None,
|
|
42
|
-
dyn_dim_min_sizes: Optional[Dict[Dim, int]] = None,
|
|
43
|
-
) -> bool:
|
|
44
|
-
"""fill. return whether sth was filled"""
|
|
45
|
-
if dyn_dim_max_sizes is None:
|
|
46
|
-
dyn_dim_max_sizes = {}
|
|
47
|
-
if dyn_dim_min_sizes is None:
|
|
48
|
-
dyn_dim_min_sizes = {}
|
|
49
|
-
filled = False
|
|
50
|
-
while True:
|
|
51
|
-
have_unfilled = False
|
|
52
|
-
filled_this_round = False
|
|
53
|
-
|
|
54
|
-
for dim in x.dims:
|
|
55
|
-
if dim.is_batch_dim() and not dim.dyn_size_ext:
|
|
56
|
-
dim.dyn_size_ext = Tensor("batch", [], dtype="int32")
|
|
57
|
-
if dim.is_dynamic() and not dim.dyn_size_ext:
|
|
58
|
-
dim.dyn_size_ext = Tensor(dim.name or "time", dims=[batch_dim], dtype="int32")
|
|
59
|
-
if not dim.dyn_size_ext:
|
|
60
|
-
continue
|
|
61
|
-
if tensor_fill_random_numpy_(
|
|
62
|
-
dim.dyn_size_ext,
|
|
63
|
-
min_val=dyn_dim_min_sizes.get(dim, 2),
|
|
64
|
-
max_val=dyn_dim_max_sizes.get(dim, None),
|
|
65
|
-
rnd=rnd,
|
|
66
|
-
dyn_dim_max_sizes=dyn_dim_max_sizes,
|
|
67
|
-
):
|
|
68
|
-
if dim in dyn_dim_max_sizes:
|
|
69
|
-
# Make sure at least one of the dyn sizes matches the max size.
|
|
70
|
-
i = rnd.randint(0, dim.dyn_size_ext.raw_tensor.size)
|
|
71
|
-
dim.dyn_size_ext.raw_tensor.flat[i] = dyn_dim_max_sizes[dim]
|
|
72
|
-
if dim in dyn_dim_min_sizes:
|
|
73
|
-
j = rnd.randint(0, dim.dyn_size_ext.raw_tensor.size - 1)
|
|
74
|
-
if j >= i:
|
|
75
|
-
j += 1
|
|
76
|
-
dim.dyn_size_ext.raw_tensor.flat[j] = dyn_dim_min_sizes[dim]
|
|
77
|
-
elif dim in dyn_dim_min_sizes:
|
|
78
|
-
raise Exception(f"also define {dim} in dyn_dim_max_sizes, not just dyn_dim_min_sizes")
|
|
79
|
-
filled = True
|
|
80
|
-
filled_this_round = True
|
|
81
|
-
if dim.dyn_size_ext.raw_tensor is None:
|
|
82
|
-
have_unfilled = True
|
|
83
|
-
elif not isinstance(dim.dyn_size_ext.raw_tensor, numpy.ndarray):
|
|
84
|
-
have_unfilled = True
|
|
85
|
-
|
|
86
|
-
if have_unfilled:
|
|
87
|
-
assert filled_this_round, f"should have filled something, {x}"
|
|
88
|
-
|
|
89
|
-
if not have_unfilled:
|
|
90
|
-
break
|
|
91
|
-
|
|
92
|
-
if x.raw_tensor is not None:
|
|
93
|
-
if not isinstance(x.raw_tensor, numpy.ndarray):
|
|
94
|
-
x.raw_tensor = None
|
|
95
|
-
|
|
96
|
-
if x.raw_tensor is None:
|
|
97
|
-
shape = [d.get_dim_value() for d in x.dims]
|
|
98
|
-
if x.dtype.startswith("int"):
|
|
99
|
-
if max_val is None:
|
|
100
|
-
max_val = rnd.randint(5, 20)
|
|
101
|
-
if x.sparse_dim and x.sparse_dim.dimension is not None:
|
|
102
|
-
max_val = x.sparse_dim.dimension
|
|
103
|
-
x.raw_tensor = rnd.randint(min_val, max_val, size=shape, dtype=x.dtype)
|
|
104
|
-
elif x.dtype == "bool":
|
|
105
|
-
x.raw_tensor = rnd.randint(0, 2, size=shape, dtype=x.dtype)
|
|
106
|
-
elif x.dtype.startswith("float"):
|
|
107
|
-
x.raw_tensor = rnd.normal(0.0, 1.0, size=shape).astype(x.dtype)
|
|
108
|
-
elif x.dtype.startswith("complex"):
|
|
109
|
-
real = rnd.normal(0.0, 1.0, size=shape)
|
|
110
|
-
imag = rnd.normal(0.0, 1.0, size=shape)
|
|
111
|
-
x.raw_tensor = (real + 1j * imag).astype(x.dtype)
|
|
112
|
-
else:
|
|
113
|
-
raise NotImplementedError(f"not implemented for {x} dtype {x.dtype}")
|
|
114
|
-
filled = True
|
|
115
|
-
|
|
116
|
-
assert isinstance(x.raw_tensor, numpy.ndarray)
|
|
117
|
-
|
|
118
|
-
return filled
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|