returnn 1.20250226.132109__tar.gz → 1.20250227.110407__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of returnn might be problematic. Click here for more details.
- {returnn-1.20250226.132109/returnn.egg-info → returnn-1.20250227.110407}/PKG-INFO +1 -1
- returnn-1.20250227.110407/_setup_info_generated.py +2 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/basic.py +48 -13
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/cached2.py +9 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/distrib_files.py +2 -1
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/meta.py +6 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/multi_proc.py +10 -1
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/numpy_dump.py +1 -1
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/postprocessing.py +62 -11
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/raw_wav.py +1 -1
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/sprint.py +2 -2
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/stereo.py +2 -2
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/_numpy_backend.py +29 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tensor/_dim_extra.py +0 -4
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tensor/_tensor_extra.py +1 -1
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tensor/tensor_dict.py +15 -1
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/data/pipeline.py +131 -3
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/data/returnn_dataset_wrapper.py +17 -3
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/engine.py +11 -2
- {returnn-1.20250226.132109 → returnn-1.20250227.110407/returnn.egg-info}/PKG-INFO +1 -1
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/rf_utils.py +101 -9
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_Dataset.py +10 -2
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_torch_engine.py +25 -1
- returnn-1.20250226.132109/_setup_info_generated.py +0 -2
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/.editorconfig +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/.gitignore +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/.gitmodules +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/.kateconfig +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/CHANGELOG.md +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/CODEOWNERS +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/CONTRIBUTING.md +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/LICENSE +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/MANIFEST.in +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/README.rst +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/12AX.cluster_map +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/_setup_returnn_env.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-fwd.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-horovod-mpi.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-horovod-mpi.py.sh +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-horovod-mpi.sh +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-hyper-param-tuning.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-iter-dataset.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-list-devices.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-lua-torch-layer.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-pretrain.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-record-and-push-to-webserver.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-returnn-as-framework.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-rf-pt-benchmark.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-rf.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-rhn-enwik8.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-sprint-interface.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-att-copy.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-attention.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-chunking-blstm.12ax.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-contribrnn-lstm.12ax.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-enc-dec.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-hard-att-copy.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-lstm-benchmark.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-maxgradnorm-lstm.12ax.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-native-lstm-lowmem.12ax.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-native-lstm.12ax.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-native-lstm2.12ax.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-native-lstm2.12ax.tuned.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-neural-transducer.12ax.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-rec-explicit-lstm.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-rec-explicit-rnn.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-rec-self-att.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-search-compiled-graph.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-tf-vanilla-lstm.12ax.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-timit-lstm-ctc.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-torch.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo-upd-mult-model.lstm.12ax.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/demo.sh +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/IAM_lines/a01-000u-00.png +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/IAM_lines/a01-007-04.png +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/IAM_lines/a01-007-06.png +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/README.txt +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/chars.txt +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/config_demo +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/config_fwd +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/config_real +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/create_IAM_dataset.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/decode.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/features/raw/demo.h5 +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/go.sh +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/lines.txt +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/split/eval.txt +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/split/train.txt +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/IAM/split/valid.txt +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/README.md +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/artificial/create_test_h5.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/artificial/forwardconfig +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/artificial/go.sh +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/artificial/trainconfig +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/artificial_rgb/create_test_h5.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/artificial_rgb/forwardconfig +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/artificial_rgb/go.sh +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/demos/mdlstm/artificial_rgb/trainconfig +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/pyproject.toml +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/requirements.txt +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/__main__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/__old_mod_loader__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/__setup__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/config.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/audio.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/bundle_file.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/cached.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/generating.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/hdf.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/lm.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/map.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/normalization_data.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/text_dict.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/util/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/util/feature_extraction.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/util/strings.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/datasets/util/vocabulary.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/engine/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/engine/base.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/engine/batch.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/__main__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/.git +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/.gitignore +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/LICENSE +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/README.md +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/aligner.gif +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/check.png +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/core.cu +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/core.h +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/core_cpu.cpp +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/pytorch_binding/LICENSE +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/pytorch_binding/MANIFEST.in +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/pytorch_binding/README.md +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/pytorch_binding/binding.cpp +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/pytorch_binding/core.cu +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/pytorch_binding/core.h +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/pytorch_binding/requirements.txt +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/pytorch_binding/setup.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/pytorch_binding/warp_rna/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/pytorch_binding/warp_rna/test.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/ref_rna.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/setup.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/src/warp_rna_op.cc +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/src/warp_rna_op_kernel_tmpl.h +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/tensorflow_binding/warp_rna/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/WarpRna/warp-rna/test.cpp +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/graph_editor/README.md +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/graph_editor/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/graph_editor/edit.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/graph_editor/reroute.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/graph_editor/select.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/graph_editor/subgraph.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/graph_editor/transform.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/extern/graph_editor/util.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/forward_iface.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/_backend.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/_cache.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/_native/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/_native/backend.cpp +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/_native/backend.hpp +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/_native/module.cpp +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/_native/module.hpp +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/_native/py_utils.hpp +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/_native/tensor_ops.cpp +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/_native/tensor_ops.hpp +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/_random_journal.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/_utils.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/array_.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/attention.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/audio/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/audio/mel.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/audio/specaugment.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/backend.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/build_from_dict.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/cond.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/const.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/container.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/control_flow_ctx.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/conv.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/conversions/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/conversions/espnet_e_branchformer.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/conversions/hf_llama.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/conversions/torch_nn.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/decoder/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/decoder/transformer.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/device.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/dims.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/dropout.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/dtype.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/encoder/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/encoder/base.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/encoder/conformer.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/encoder/conformer_v2.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/encoder/e_branchformer.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/encoder/transformer.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/gradient.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/graph.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/hooks.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/init.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/label_smoothing.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/linear.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/loop.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/loss.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/math_.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/matmul.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/module.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/nested.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/normalization.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/parameter.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/parametrizations.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/parametrize.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/piecewise_linear.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/rand.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/rec.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/reduce.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/run_ctx.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/signal.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/state.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/stepwise_scheduler.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/tensor_array.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/frontend/types.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/import_/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/import_/common.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/import_/git.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/import_/import_.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/learning_rate_control.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/log.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/native_op.cpp +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/native_op.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/pretrain.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/sprint/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/sprint/cache.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/sprint/control.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/sprint/error_signals.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/sprint/extern_interface.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/sprint/interface.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tensor/README.md +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tensor/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tensor/_tensor_mixin_base.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tensor/_tensor_op_overloads.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tensor/control_flow_ctx.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tensor/dim.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tensor/marked_dim.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tensor/tensor.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tensor/utils.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/compat.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/data_pipeline.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/distributed.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/engine.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_layers/README.md +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_layers/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_layers/_backend.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_layers/_utils.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_layers/cond.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_layers/config_entry_points.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_layers/debug_eager_mode.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_layers/dims.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_layers/layer.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_layers/loop.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_layers/make_layer.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_layers/masked_computation.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_layers/parameter_assign.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_layers/prev_tensor_ref.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_low_level/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/frontend_low_level/_backend.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/horovod.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/hyper_param_tuning.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/layers/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/layers/base.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/layers/basic.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/layers/rec.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/layers/segmental_model.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/layers/signal_processing.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/layers/variable.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/native_op.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/network.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/sprint.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/updater.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/util/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/util/basic.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/util/data.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/util/gradient_checkpoint.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/util/ken_lm.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/tf/util/open_fst.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/README.md +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/data/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/data/extern_data.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/data/queued_data_iter.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/data/tensor_utils.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/distributed.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/frontend/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/frontend/_backend.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/frontend/_rand.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/frontend/bridge.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/frontend/raw_ops.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/optim/README.md +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/optim/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/optim/lion.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/updater.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/util/README.md +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/util/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/util/array_.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/util/debug_inf_nan.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/util/diagnose_gpu.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/util/exception_helper.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/util/gradient_checkpoint.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/util/module.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/torch/util/scaled_gradient.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/__init__.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/basic.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/better_exchook.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/bpe.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/debug.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/debug_helpers.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/file_cache.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/fsa.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/literal_py_to_pickle.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/lru_cache.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/math.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/multi_proc_non_daemonic_spawn.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/native_code_compiler.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/pprint.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/py-to-pickle.cpp +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/py_ext_mod_compiler.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/result_with_reason.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/sig_proc.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/task_system.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/train_proc_manager.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn/util/watch_memory.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn.egg-info/SOURCES.txt +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn.egg-info/dependency_links.txt +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn.egg-info/requires.txt +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/returnn.egg-info/top_level.txt +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/rnn.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/setup.cfg +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/setup.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/DummySprintExec.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/PyCharm-inspection-profile.xml +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/PyCharm.idea/.gitignore +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/PyCharm.idea/.name +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/PyCharm.idea/codeStyleSettings.xml +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/PyCharm.idea/codeStyles/Project.xml +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/PyCharm.idea/codeStyles/codeStyleConfig.xml +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/PyCharm.idea/inspectionProfiles/Project_Default.xml +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/PyCharm.idea/inspectionProfiles/profiles_settings.xml +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/PyCharm.idea/misc.xml +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/PyCharm.idea/modules.xml +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/PyCharm.idea/returnn.iml +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/PyCharm.idea/scopes/scope_settings.xml +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/_set_num_threads1.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/_setup_returnn_env.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/_setup_test_env.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/bpe-unicode-demo.codes +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/bpe-unicode-demo.vocab +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/lexicon_opt.fst +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/lexicon_opt.isyms +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/lexicon_opt.jpg +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/lexicon_opt.osyms +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/lint_common.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/pycharm-inspect.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/pylint.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/returnn-as-framework.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/spelling.dic +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_Config.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_Fsa.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_GeneratingDataset.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_HDFDataset.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_LearningRateControl.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_Log.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_MultiProcDataset.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_Pretrain.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_ResNet.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_SprintDataset.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_SprintInterface.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_TFEngine.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_TFNativeOp.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_TFNetworkLayer.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_TFNetworkRecLayer.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_TFNetworkSigProcLayer.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_TFUpdater.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_TFUtil.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_TF_determinism.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_TaskSystem.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_TaskSystem_SharedMem.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_TranslationDataset.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_Util.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_demos.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_fork_exec.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_hdf_dump.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_array.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_attention.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_base.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_cond.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_const.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_container.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_conv.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_decoder_transformer.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_encoder_conformer.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_gradient.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_label_smoothing.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_loop.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_math.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_normalization.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_piecewise_linear.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_rec.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_reduce.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_rf_signal.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_tensor.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_threading.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_tools.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_torch_dataset.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_torch_frontend.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_torch_internal_frontend.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/test_torch_util.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tests/torch_utils.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/_setup_returnn_env.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/analyze-dataset-batches.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/bliss-collect-seq-lens.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/bliss-dump-text.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/bliss-get-segment-names.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/bliss-to-ogg-zip.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/bpe-create-lexicon.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/calculate-word-error-rate.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/cleanup-old-models.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/collect-orth-symbols.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/collect-words.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/compile_native_op.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/compile_tf_graph.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/debug-dump-search-scores.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/debug-plot-search-scores.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/dump-dataset-raw-strings.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/dump-dataset.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/dump-forward-stats.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/dump-forward.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/dump-network-json.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/dump-pickle.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/extract_state_tying_from_dataset.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/get-attention-weights.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/get-best-model-epoch.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/hdf_dump.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/hdf_dump_translation_dataset.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/import-blocks-mt-model.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/import-t2t-mt-model.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/.gitignore +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/Makefile +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/README.md +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/example/README.md +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/example/libs_list +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/example/network.040/i600_m600_m600.sgd_b16_lr0_cl2.newbobabs.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/example/network.040/i600_m600_m600.sgd_b16_lr0_cl2.newbobabs.keep_over_epoch.lstm2.config +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/example/rescore_lattice.sh +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/example/state_vars_list +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/example/tensor_names_list +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/file.h +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/htklatticerescorer.cc +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/htklatticerescorer.h +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/main.cc +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/rescorer.h +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/vocabulary.cc +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/lattice_rescorer/vocabulary.h +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/tf_avg_checkpoints.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/tf_inspect_checkpoint.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/tf_inspect_summary_log.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/torch_avg_checkpoints.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/torch_export_to_onnx.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/torch_inspect_checkpoint.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/torch_inspect_checkpoint_and_opt.py +0 -0
- {returnn-1.20250226.132109 → returnn-1.20250227.110407}/tools/torch_scale_tuning.py +0 -0
|
@@ -16,6 +16,7 @@ from threading import RLock
|
|
|
16
16
|
from random import Random, random
|
|
17
17
|
import sys
|
|
18
18
|
import os
|
|
19
|
+
import math
|
|
19
20
|
import numpy
|
|
20
21
|
import functools
|
|
21
22
|
import typing
|
|
@@ -937,28 +938,51 @@ class Dataset:
|
|
|
937
938
|
else:
|
|
938
939
|
# We don't know. So:
|
|
939
940
|
# Some monotonic increasing function in [0,1] which never reaches 1.
|
|
940
|
-
|
|
941
|
+
return max(1.0e-10, (1 - 1 / ((seq_idx**0.5) / 100 + 1)) * 0.99)
|
|
941
942
|
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
def get_complete_frac(self, seq_idx):
|
|
943
|
+
def get_complete_frac(self, sorted_seq_idx: int, *, allow_only_lr_suitable: bool = False) -> Optional[float]:
|
|
945
944
|
"""
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
945
|
+
Tries to calculate exactly how much of the current epoch is completed when
|
|
946
|
+
having processed seq ``sorted_seq_idx``.
|
|
947
|
+
|
|
948
|
+
``sorted_seq_idx`` cannot be less than the seq index of the previously loaded seqs.
|
|
949
|
+
|
|
950
|
+
:param sorted_seq_idx: sorted seq idx
|
|
951
|
+
:param allow_only_lr_suitable: only return a value when that value is suitable/accurate enough
|
|
952
|
+
to base LR scheduling on it. If false, this function will return an approximative value
|
|
953
|
+
when the exact value cannot be calculated (due to unknown ``num_seqs``).
|
|
954
|
+
Approximative values can be appropriate for e.g. progress bars.
|
|
955
|
+
:return: continuous value in (0, 1] which represents how much of the current epoch
|
|
956
|
+
is completed after ``sorted_seq_idx``.
|
|
957
|
+
If ``allow_only_lr_suitable=True``, returns ``None`` if the value cannot be calculated such
|
|
958
|
+
that it is accurate enough for LR scheduling, and otherwises bases ``epoch_continuous`` on it
|
|
959
|
+
for any dynamic learning rate scheduling.
|
|
960
|
+
As ``sorted_seq_idx`` is monotonic, the return value is also guaranteed to be monotonic.
|
|
951
961
|
"""
|
|
952
962
|
# noinspection PyBroadException
|
|
953
963
|
try:
|
|
954
964
|
num_seqs = self.num_seqs
|
|
955
965
|
except Exception: # num_seqs not always available
|
|
966
|
+
if allow_only_lr_suitable:
|
|
967
|
+
return None
|
|
968
|
+
|
|
956
969
|
# noinspection PyBroadException
|
|
957
970
|
try:
|
|
958
971
|
num_seqs = self.estimated_num_seqs
|
|
959
972
|
except Exception: # also not always available
|
|
960
973
|
num_seqs = None # ignore
|
|
961
|
-
|
|
974
|
+
|
|
975
|
+
if math.isinf(num_seqs):
|
|
976
|
+
if allow_only_lr_suitable:
|
|
977
|
+
# cannot compute meaningful complete_frac for infinite num_seqs
|
|
978
|
+
return None
|
|
979
|
+
else:
|
|
980
|
+
num_seqs = None
|
|
981
|
+
|
|
982
|
+
assert (
|
|
983
|
+
num_seqs is None or 0 <= sorted_seq_idx < num_seqs
|
|
984
|
+
), f"{self}: invalid seq indices: 0 <= seq_idx ({sorted_seq_idx}) < num_seqs ({num_seqs}) violated"
|
|
985
|
+
return self.generic_complete_frac(sorted_seq_idx, num_seqs)
|
|
962
986
|
|
|
963
987
|
@property
|
|
964
988
|
def num_seqs(self) -> int:
|
|
@@ -1375,16 +1399,27 @@ class DatasetSeq:
|
|
|
1375
1399
|
Encapsulates all data for one sequence.
|
|
1376
1400
|
"""
|
|
1377
1401
|
|
|
1378
|
-
def __init__(
|
|
1402
|
+
def __init__(
|
|
1403
|
+
self,
|
|
1404
|
+
seq_idx: int,
|
|
1405
|
+
features,
|
|
1406
|
+
*,
|
|
1407
|
+
targets=None,
|
|
1408
|
+
seq_tag: Optional[str] = None,
|
|
1409
|
+
complete_frac: Optional[float] = None,
|
|
1410
|
+
):
|
|
1379
1411
|
"""
|
|
1380
|
-
:param
|
|
1412
|
+
:param seq_idx: sorted seq idx in the Dataset
|
|
1381
1413
|
:param numpy.ndarray|dict[str,numpy.ndarray] features: format 2d (time,feature) (float)
|
|
1382
1414
|
:param dict[str,numpy.ndarray]|numpy.ndarray|None targets: name -> format 1d (time) (idx of output-feature)
|
|
1383
|
-
:param
|
|
1415
|
+
:param seq_tag: sequence name / tag
|
|
1416
|
+
:param complete_frac: continuous value in (0, 1] which represents how much of the current epoch
|
|
1417
|
+
has been consumed when this seq is processed
|
|
1384
1418
|
"""
|
|
1385
1419
|
assert isinstance(seq_idx, (int, numpy.integer))
|
|
1386
1420
|
self.seq_idx = int(seq_idx)
|
|
1387
1421
|
self.seq_tag = seq_tag or ("seq-%i" % seq_idx)
|
|
1422
|
+
self.complete_frac = complete_frac
|
|
1388
1423
|
if not isinstance(features, dict):
|
|
1389
1424
|
assert isinstance(features, numpy.ndarray)
|
|
1390
1425
|
features = {"data": features}
|
|
@@ -228,6 +228,15 @@ class CachedDataset2(Dataset):
|
|
|
228
228
|
keys.remove("data")
|
|
229
229
|
return keys
|
|
230
230
|
|
|
231
|
+
def get_complete_frac(self, sorted_seq_idx, **kwargs):
|
|
232
|
+
"""
|
|
233
|
+
:return: fractional completion value for the given sorted_seq_idx
|
|
234
|
+
"""
|
|
235
|
+
seq = self._get_seq(sorted_seq_idx)
|
|
236
|
+
if seq is not None and seq.complete_frac is not None:
|
|
237
|
+
return seq.complete_frac
|
|
238
|
+
return super().get_complete_frac(sorted_seq_idx, **kwargs)
|
|
239
|
+
|
|
231
240
|
def is_data_sparse(self, key):
|
|
232
241
|
"""
|
|
233
242
|
:param str key: e.g. "data" or "classes"
|
|
@@ -601,7 +601,8 @@ def _worker_proc_loop(
|
|
|
601
601
|
dataset.load_seqs(next_seq_idx, next_seq_idx + 1)
|
|
602
602
|
seq_tag = dataset.get_tag(next_seq_idx)
|
|
603
603
|
features = {data_key: dataset.get_data(next_seq_idx, data_key) for data_key in dataset.get_data_keys()}
|
|
604
|
-
|
|
604
|
+
complete_frac = dataset.get_complete_frac(next_seq_idx, allow_only_lr_suitable=True)
|
|
605
|
+
res = DatasetSeq(seq_idx=next_seq_idx, seq_tag=seq_tag, features=features, complete_frac=complete_frac)
|
|
605
606
|
cache.append(res)
|
|
606
607
|
next_seq_idx += 1
|
|
607
608
|
return True
|
|
@@ -554,6 +554,12 @@ class MetaDataset(CachedDataset2):
|
|
|
554
554
|
"""
|
|
555
555
|
return self.seq_list_ordered[self.default_dataset_key][sorted_seq_idx]
|
|
556
556
|
|
|
557
|
+
def get_complete_frac(self, sorted_seq_idx: int, **kwargs) -> Optional[float]:
|
|
558
|
+
"""
|
|
559
|
+
:param sorted_seq_idx:
|
|
560
|
+
"""
|
|
561
|
+
return self.datasets[self.default_dataset_key].get_complete_frac(sorted_seq_idx, **kwargs)
|
|
562
|
+
|
|
557
563
|
def get_data_keys(self) -> List[str]:
|
|
558
564
|
"""data keys"""
|
|
559
565
|
return sorted(self.data_keys)
|
|
@@ -75,6 +75,7 @@ class MultiProcDataset(CachedDataset2):
|
|
|
75
75
|
self._seq_order_proc_parent_conn = None # type: Optional[mpConnection]
|
|
76
76
|
self._seq_order_proc = None # type: Optional[mp.Process]
|
|
77
77
|
self._worker_procs = None # type: Optional[List[mp.Process]]
|
|
78
|
+
self._cur_max_complete_frac: Optional[float] = None
|
|
78
79
|
|
|
79
80
|
if _meta_info_cache:
|
|
80
81
|
# This allows to skip the lazy init in self.initialize().
|
|
@@ -246,7 +247,8 @@ class MultiProcDataset(CachedDataset2):
|
|
|
246
247
|
dataset.load_seqs(next_seq_idx, next_seq_idx + 1)
|
|
247
248
|
seq_tag = dataset.get_tag(next_seq_idx)
|
|
248
249
|
features = {data_key: dataset.get_data(next_seq_idx, data_key) for data_key in dataset.get_data_keys()}
|
|
249
|
-
|
|
250
|
+
complete_frac = dataset.get_complete_frac(next_seq_idx, allow_only_lr_suitable=True)
|
|
251
|
+
res = DatasetSeq(seq_idx=next_seq_idx, seq_tag=seq_tag, features=features, complete_frac=complete_frac)
|
|
250
252
|
cache.append(res)
|
|
251
253
|
next_seq_idx += 1
|
|
252
254
|
return True
|
|
@@ -403,6 +405,7 @@ class MultiProcDataset(CachedDataset2):
|
|
|
403
405
|
return True
|
|
404
406
|
|
|
405
407
|
self._lazy_init()
|
|
408
|
+
self._cur_max_complete_frac = 0.0
|
|
406
409
|
|
|
407
410
|
if self._sharding_method == "dedicated":
|
|
408
411
|
for worker_conn in self._worker_parent_conns:
|
|
@@ -441,6 +444,12 @@ class MultiProcDataset(CachedDataset2):
|
|
|
441
444
|
if data is None:
|
|
442
445
|
return None
|
|
443
446
|
assert isinstance(data, DatasetSeq)
|
|
447
|
+
# The complete_frac values from the subprocesses are not necessarily monotonic
|
|
448
|
+
# due to rounding errors in the sharding and such.
|
|
449
|
+
# We therefore fix them up here. This is valid due to monotonicity of `seq_idx`.
|
|
450
|
+
max_comp_frac = max(data.complete_frac, self._cur_max_complete_frac)
|
|
451
|
+
data.complete_frac = max_comp_frac
|
|
452
|
+
self._cur_max_complete_frac = max_comp_frac
|
|
444
453
|
data.seq_idx = seq_idx
|
|
445
454
|
return data
|
|
446
455
|
|
|
@@ -154,4 +154,4 @@ class NumpyDumpDataset(Dataset):
|
|
|
154
154
|
def _add_cache_seq(self, seq_idx, features, targets):
|
|
155
155
|
last_seq_idx = self._get_cache_last_seq_idx()
|
|
156
156
|
assert seq_idx == last_seq_idx + 1
|
|
157
|
-
self.cached_seqs += [DatasetSeq(seq_idx, features, targets)]
|
|
157
|
+
self.cached_seqs += [DatasetSeq(seq_idx, features, targets=targets)]
|
|
@@ -5,6 +5,7 @@ Provides :class:`PostprocessingDataset`.
|
|
|
5
5
|
from __future__ import annotations
|
|
6
6
|
|
|
7
7
|
from itertools import islice
|
|
8
|
+
import numpy
|
|
8
9
|
from numpy.random import RandomState
|
|
9
10
|
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, TypeVar
|
|
10
11
|
|
|
@@ -57,6 +58,14 @@ class PostprocessingDataset(CachedDataset2):
|
|
|
57
58
|
},
|
|
58
59
|
}
|
|
59
60
|
|
|
61
|
+
The postprocessor functions operate on ``TensorDict``s, which have entries for
|
|
62
|
+
all data keys in the underlying dataset.
|
|
63
|
+
|
|
64
|
+
There may also be additional "meta" entries in the tensor dicts, like ``complete_frac``
|
|
65
|
+
and ``seq_tag``.
|
|
66
|
+
These should be copied over in a manner that is reasonable for the use case at hand and
|
|
67
|
+
ensures forwards compatibility as well as reasonably possible.
|
|
68
|
+
|
|
60
69
|
The dataset itself does not support its own seq ordering and relies on the wrapped
|
|
61
70
|
dataset for seq ordering instead. Specifying a ``seq_ordering`` other than ``default``
|
|
62
71
|
results in an error.
|
|
@@ -155,7 +164,10 @@ class PostprocessingDataset(CachedDataset2):
|
|
|
155
164
|
self._out_tensor_dict_template = self._in_tensor_dict_template.copy_template()
|
|
156
165
|
self.labels = self._dataset.labels.copy()
|
|
157
166
|
# update only after _out_tensor_dict_template has been created from _in_tensor_dict_template
|
|
158
|
-
self._in_tensor_dict_template.update(
|
|
167
|
+
self._in_tensor_dict_template.update(
|
|
168
|
+
{"complete_frac": {"dims": (), "dtype": "float32"}, "seq_tag": {"dims": (), "dtype": "string"}},
|
|
169
|
+
auto_convert=True,
|
|
170
|
+
)
|
|
159
171
|
self.num_outputs = {
|
|
160
172
|
k: (t.sparse_dim.size if t.sparse_dim else t.shape[-1] if len(t.shape) > 0 else 1, t.ndim)
|
|
161
173
|
for k, t in self._out_tensor_dict_template.data.items()
|
|
@@ -222,6 +234,15 @@ class PostprocessingDataset(CachedDataset2):
|
|
|
222
234
|
""":return: dtype of data entry `key`"""
|
|
223
235
|
return self._out_tensor_dict_template.data[key].dtype
|
|
224
236
|
|
|
237
|
+
def get_total_num_seqs(self, *, fast=False):
|
|
238
|
+
""":return: total num seqs excluding partition_epoch"""
|
|
239
|
+
if self._map_seq_stream is not None:
|
|
240
|
+
raise util.OptionalNotImplementedError(
|
|
241
|
+
f"{self}: get_total_num_seqs not allowed when map_seq_stream is set."
|
|
242
|
+
)
|
|
243
|
+
assert self._dataset is not None
|
|
244
|
+
return self._dataset.get_total_num_seqs(fast=fast)
|
|
245
|
+
|
|
225
246
|
def supports_sharding(self) -> bool:
|
|
226
247
|
""":return: whether this dataset supports sharding"""
|
|
227
248
|
assert self._dataset is not None
|
|
@@ -249,11 +270,12 @@ class PostprocessingDataset(CachedDataset2):
|
|
|
249
270
|
assert loaded_seq_idx <= seq_idx, "_collect_single_seq must be done monotonically"
|
|
250
271
|
if loaded_seq_idx != seq_idx:
|
|
251
272
|
continue
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
seq_idx=seq_idx,
|
|
255
|
-
seq_tag=str(tensor_dict["seq_tag"].raw_tensor),
|
|
273
|
+
complete_frac = (
|
|
274
|
+
float(tensor_dict.data["complete_frac"].raw_tensor) if "complete_frac" in tensor_dict.data else None
|
|
256
275
|
)
|
|
276
|
+
seq_tag = str(tensor_dict.data["seq_tag"].raw_tensor) if "seq_tag" in tensor_dict.data else f"seq-{seq_idx}"
|
|
277
|
+
features = {k: t.raw_tensor for k, t in tensor_dict.data.items() if k not in ["complete_frac", "seq_tag"]}
|
|
278
|
+
seq = DatasetSeq(complete_frac=complete_frac, features=features, seq_idx=seq_idx, seq_tag=seq_tag)
|
|
257
279
|
return seq
|
|
258
280
|
|
|
259
281
|
def _build_mapping_iter(self) -> Iterator[TensorDict]:
|
|
@@ -262,8 +284,20 @@ class PostprocessingDataset(CachedDataset2):
|
|
|
262
284
|
"""
|
|
263
285
|
|
|
264
286
|
def _validate_tensor_dict_iter(inner: Iterator[TensorDict]) -> Iterator[TensorDict]:
|
|
287
|
+
last_complete_frac = 0.0
|
|
265
288
|
for t_dict in inner:
|
|
266
|
-
assert
|
|
289
|
+
assert isinstance(t_dict, TensorDict), (
|
|
290
|
+
f"postprocessing mapper function must produce a {TensorDict.__name__}, "
|
|
291
|
+
f"but got a {type(t_dict).__name__}"
|
|
292
|
+
)
|
|
293
|
+
if "complete_frac" in t_dict.data: # sanity check complete_frac
|
|
294
|
+
complete_frac = float(t_dict.data["complete_frac"].raw_tensor)
|
|
295
|
+
assert 0.0 <= complete_frac <= 1.0, f"complete_frac must be in [0, 1], but got {complete_frac}"
|
|
296
|
+
assert complete_frac >= last_complete_frac, (
|
|
297
|
+
"complete_frac must be monotonically increasing, "
|
|
298
|
+
f"but got {complete_frac} after {last_complete_frac}"
|
|
299
|
+
)
|
|
300
|
+
last_complete_frac = complete_frac
|
|
267
301
|
for data_key, out_t in self._out_tensor_dict_template.data.items():
|
|
268
302
|
in_t = t_dict.data[data_key]
|
|
269
303
|
assert (
|
|
@@ -294,8 +328,14 @@ class PostprocessingDataset(CachedDataset2):
|
|
|
294
328
|
tensor_dict = self._in_tensor_dict_template.copy_template()
|
|
295
329
|
for data_key in data_keys:
|
|
296
330
|
tensor_dict.data[data_key].raw_tensor = self._dataset.get_data(seq_index, data_key)
|
|
297
|
-
|
|
298
|
-
|
|
331
|
+
|
|
332
|
+
complete_frac = self._dataset.get_complete_frac(seq_index, allow_only_lr_suitable=True)
|
|
333
|
+
comp_frac_raw_tensor = None
|
|
334
|
+
if complete_frac is not None:
|
|
335
|
+
comp_frac_raw_tensor = numpy.array(complete_frac, dtype=numpy.float32)
|
|
336
|
+
tensor_dict.data["complete_frac"].raw_tensor = comp_frac_raw_tensor
|
|
337
|
+
seq_tag_raw_tensor = str_to_numpy_array(self._dataset.get_tag(seq_index))
|
|
338
|
+
tensor_dict.data["seq_tag"].raw_tensor = seq_tag_raw_tensor
|
|
299
339
|
|
|
300
340
|
if self._map_seq is not None:
|
|
301
341
|
tensor_dict = self._map_seq(
|
|
@@ -305,10 +345,16 @@ class PostprocessingDataset(CachedDataset2):
|
|
|
305
345
|
tensor_dict, TensorDict
|
|
306
346
|
), f"map_seq must produce a {TensorDict.__name__}, but produced {type(tensor_dict).__name__}"
|
|
307
347
|
|
|
308
|
-
# Re-adding the
|
|
309
|
-
# add/drop any segments w/ the non-iterator postprocessing function.
|
|
348
|
+
# Re-adding the seq_tag/complete_frac here causes no harm in case they are dropped
|
|
349
|
+
# since we don't add/drop any segments w/ the non-iterator postprocessing function.
|
|
350
|
+
if "complete_frac" not in tensor_dict.data and comp_frac_raw_tensor is not None:
|
|
351
|
+
tensor_dict.data["complete_frac"] = Tensor(
|
|
352
|
+
"complete_frac", dims=(), dtype="float32", raw_tensor=comp_frac_raw_tensor
|
|
353
|
+
)
|
|
310
354
|
if "seq_tag" not in tensor_dict.data:
|
|
311
|
-
tensor_dict.data["seq_tag"]
|
|
355
|
+
tensor_dict.data["seq_tag"] = Tensor(
|
|
356
|
+
"seq_tag", dims=(), dtype="string", raw_tensor=seq_tag_raw_tensor
|
|
357
|
+
)
|
|
312
358
|
|
|
313
359
|
if self._seq_list_for_validation is not None:
|
|
314
360
|
seq_tag = self._seq_list_for_validation[seq_index]
|
|
@@ -366,7 +412,12 @@ class LaplaceOrdering(Callable[[Iterator[TensorDict]], Iterator[TensorDict]]):
|
|
|
366
412
|
seq_buffer = list(islice(iterator, self.num_seqs_per_bin))
|
|
367
413
|
has_ended = False
|
|
368
414
|
while True:
|
|
415
|
+
# Make sure to not reorder the monotonically increasing values for complete_frac
|
|
416
|
+
# so that the trainer can calculate the appropriate learning rates.
|
|
417
|
+
complete_frac_values = [tdict.data["complete_frac"].raw_tensor for tdict in seq_buffer]
|
|
369
418
|
seq_buffer.sort(key=self._get_seq_len, reverse=is_down_phase)
|
|
419
|
+
for sorted_item, comp_frac in zip(seq_buffer, complete_frac_values):
|
|
420
|
+
sorted_item.data["complete_frac"].raw_tensor = comp_frac
|
|
370
421
|
|
|
371
422
|
next_seq_buffer = []
|
|
372
423
|
|
|
@@ -83,7 +83,7 @@ class RawWavDataset(CachedDataset2):
|
|
|
83
83
|
inputFeatures = inputFeatures.astype(np.float32)
|
|
84
84
|
if outputFeatures is not None:
|
|
85
85
|
outputFeatures = outputFeatures.astype(np.float32)
|
|
86
|
-
return DatasetSeq(seq_idx, inputFeatures, outputFeatures)
|
|
86
|
+
return DatasetSeq(seq_idx, inputFeatures, targets=outputFeatures)
|
|
87
87
|
|
|
88
88
|
def _get_num_outputs(self, num_outputs):
|
|
89
89
|
"""
|
|
@@ -504,7 +504,7 @@ class SprintDatasetBase(Dataset):
|
|
|
504
504
|
assert seq_idx + 1 == self.next_seq_to_be_added
|
|
505
505
|
self.cond.wait()
|
|
506
506
|
|
|
507
|
-
self.added_data += [DatasetSeq(seq_idx, features, targets, seq_tag=segment_name)]
|
|
507
|
+
self.added_data += [DatasetSeq(seq_idx, features, targets=targets, seq_tag=segment_name)]
|
|
508
508
|
self.cond.notify_all()
|
|
509
509
|
return seq_idx
|
|
510
510
|
|
|
@@ -588,7 +588,7 @@ class SprintDatasetBase(Dataset):
|
|
|
588
588
|
"""
|
|
589
589
|
self._complete_frac = frac
|
|
590
590
|
|
|
591
|
-
def get_complete_frac(self, seq_idx):
|
|
591
|
+
def get_complete_frac(self, seq_idx, **kwargs):
|
|
592
592
|
"""
|
|
593
593
|
:param int seq_idx:
|
|
594
594
|
:rtype: float
|
|
@@ -349,7 +349,7 @@ class StereoHdfDataset(StereoDataset):
|
|
|
349
349
|
elif targets.shape[1] == 1:
|
|
350
350
|
targets = np.reshape(targets.astype(np.int32), (targets.shape[0],))
|
|
351
351
|
|
|
352
|
-
return DatasetSeq(seq_idx, inputFeatures, targets)
|
|
352
|
+
return DatasetSeq(seq_idx, inputFeatures, targets=targets)
|
|
353
353
|
|
|
354
354
|
@staticmethod
|
|
355
355
|
def _normalizeVector(v, mean, variance):
|
|
@@ -438,4 +438,4 @@ class DatasetWithTimeContext(StereoHdfDataset):
|
|
|
438
438
|
targets = None
|
|
439
439
|
if "classes" in originalSeq.get_data_keys():
|
|
440
440
|
targets = originalSeq.get_data("classes")
|
|
441
|
-
return DatasetSeq(seq_idx, inputFeatures, targets)
|
|
441
|
+
return DatasetSeq(seq_idx, inputFeatures, targets=targets)
|
|
@@ -159,6 +159,35 @@ class NumpyBackend(Backend[numpy.ndarray]):
|
|
|
159
159
|
res = numpy.array(res)
|
|
160
160
|
return res
|
|
161
161
|
|
|
162
|
+
@staticmethod
|
|
163
|
+
def where(
|
|
164
|
+
cond: Tensor,
|
|
165
|
+
true_: Union[Tensor, rf.RawTensorTypes],
|
|
166
|
+
false_: Union[Tensor, rf.RawTensorTypes],
|
|
167
|
+
*,
|
|
168
|
+
allow_broadcast_all_sources: bool = False,
|
|
169
|
+
) -> Tensor:
|
|
170
|
+
"""where"""
|
|
171
|
+
if isinstance(true_, Tensor):
|
|
172
|
+
dtype = true_.dtype
|
|
173
|
+
elif isinstance(false_, Tensor):
|
|
174
|
+
dtype = false_.dtype
|
|
175
|
+
else:
|
|
176
|
+
dtype = None
|
|
177
|
+
true_ = rf.convert_to_tensor(true_, _backend=NumpyBackend, dtype=dtype)
|
|
178
|
+
false_ = rf.convert_to_tensor(false_, _backend=NumpyBackend, dtype=dtype)
|
|
179
|
+
out = Tensor.get_common_data(
|
|
180
|
+
[true_, false_, cond], allow_broadcast_all_sources=allow_broadcast_all_sources, name="where"
|
|
181
|
+
)
|
|
182
|
+
out.dtype = true_.dtype
|
|
183
|
+
out.sparse_dim = true_.sparse_dim or false_.sparse_dim
|
|
184
|
+
out.feature_dim = true_.feature_dim or false_.feature_dim
|
|
185
|
+
cond_bc_raw = cond.copy_compatible_to_dims_raw(out.dims)
|
|
186
|
+
true_bc_raw = true_.copy_compatible_to_dims_raw(out.dims)
|
|
187
|
+
false_bc_raw = false_.copy_compatible_to_dims_raw(out.dims)
|
|
188
|
+
out.raw_tensor = numpy.where(cond_bc_raw, true_bc_raw, false_bc_raw)
|
|
189
|
+
return out
|
|
190
|
+
|
|
162
191
|
@staticmethod
|
|
163
192
|
def range_over_dim(dim: Dim, *, dtype: Optional[str] = None, device: Optional[str] = None) -> Tensor[numpy.ndarray]:
|
|
164
193
|
"""
|
|
@@ -422,14 +422,10 @@ class _DimMixin:
|
|
|
422
422
|
:param func: operates inplace
|
|
423
423
|
"""
|
|
424
424
|
dyn_size_ext = self.dyn_size_ext.copy() if self.dyn_size_ext is not None else None
|
|
425
|
-
dyn_size_ext_max = self._dyn_size_max_value if self._dyn_size_max_value is not None else None
|
|
426
425
|
self.reset_raw(only_self=True)
|
|
427
426
|
if dyn_size_ext is not None:
|
|
428
427
|
func(dyn_size_ext)
|
|
429
|
-
if dyn_size_ext_max is not None:
|
|
430
|
-
func(dyn_size_ext_max)
|
|
431
428
|
self.dyn_size_ext = dyn_size_ext
|
|
432
|
-
self._dyn_size_max_value = dyn_size_ext_max
|
|
433
429
|
|
|
434
430
|
def _can_use_in_ctx(self, ctx):
|
|
435
431
|
"""
|
|
@@ -2966,7 +2966,7 @@ class _TensorMixin(_TensorMixinBase):
|
|
|
2966
2966
|
mask = None
|
|
2967
2967
|
for axis in axes:
|
|
2968
2968
|
mask_ = self._dims[axis].get_mask(dim_order=self.dims, device=self.device)
|
|
2969
|
-
mask = rf.
|
|
2969
|
+
mask = rf.combine_bc(mask, "logical_and", mask_) if mask is not None else mask_
|
|
2970
2970
|
assert isinstance(mask, _t.Tensor)
|
|
2971
2971
|
res = rf.where(mask, self, mask_value)
|
|
2972
2972
|
if use_padding_info:
|
|
@@ -9,7 +9,7 @@ We also might have model_outputs in the user config.
|
|
|
9
9
|
"""
|
|
10
10
|
|
|
11
11
|
from __future__ import annotations
|
|
12
|
-
from typing import Optional, Union, Any, Type, Dict, Sequence
|
|
12
|
+
from typing import Optional, Union, Any, Type, Dict, Sequence, List
|
|
13
13
|
from .tensor import Tensor
|
|
14
14
|
from .dim import Dim
|
|
15
15
|
|
|
@@ -160,6 +160,20 @@ class TensorDict:
|
|
|
160
160
|
assert dim.size == raw_tensor_dict[key_]
|
|
161
161
|
visited_dims.add(dim)
|
|
162
162
|
|
|
163
|
+
def all_dims(self) -> List[Dim]:
|
|
164
|
+
"""
|
|
165
|
+
:return: list of dims
|
|
166
|
+
"""
|
|
167
|
+
visited_dims = set()
|
|
168
|
+
out = []
|
|
169
|
+
for key, value in self.data.items():
|
|
170
|
+
for dim in value.dims:
|
|
171
|
+
if dim in visited_dims:
|
|
172
|
+
continue
|
|
173
|
+
out.append(dim)
|
|
174
|
+
visited_dims.add(dim)
|
|
175
|
+
return out
|
|
176
|
+
|
|
163
177
|
|
|
164
178
|
def _convert_to_tensor(opts: _TensorT, *, name: Optional[str] = None) -> Tensor:
|
|
165
179
|
"""
|
|
@@ -21,6 +21,7 @@ other PyTorch datasets more directly, including also HuggingFace datasets.
|
|
|
21
21
|
|
|
22
22
|
from __future__ import annotations
|
|
23
23
|
import bisect
|
|
24
|
+
import itertools
|
|
24
25
|
from typing import Optional, Any, Sequence, Tuple, Union, List, Dict, Callable
|
|
25
26
|
import sys
|
|
26
27
|
from copy import deepcopy
|
|
@@ -65,6 +66,9 @@ def collate_batch(batch: List[Dict[str, numpy.ndarray]]) -> Dict[str, Union[torc
|
|
|
65
66
|
if key in ("num_seqs", "epoch"):
|
|
66
67
|
res[key] = batch[0][key] # it should always be the same
|
|
67
68
|
continue
|
|
69
|
+
elif key == "complete_frac":
|
|
70
|
+
res[key] = max(sample[key] for sample in batch)
|
|
71
|
+
continue
|
|
68
72
|
ls = [create_tensor(sample[key]) for sample in batch]
|
|
69
73
|
if not ls:
|
|
70
74
|
raise ValueError("batch is empty?")
|
|
@@ -122,7 +126,7 @@ class ChunkingIterDataPipe(torch.utils.data.IterDataPipe):
|
|
|
122
126
|
|
|
123
127
|
if not chunking_data_keys:
|
|
124
128
|
chunking_data_keys = list(data_dict.keys()) # use all if not configured separately
|
|
125
|
-
chunking_data_key_black_list = ["seq_tag", "seq_idx", "num_seqs", "epoch"]
|
|
129
|
+
chunking_data_key_black_list = ["seq_tag", "seq_idx", "num_seqs", "epoch", "complete_frac"]
|
|
126
130
|
for key in chunking_data_key_black_list:
|
|
127
131
|
if key in chunking_data_keys:
|
|
128
132
|
chunking_data_keys.remove(key)
|
|
@@ -269,8 +273,15 @@ class BatchingIterDataPipe(torch.utils.data.IterDataPipe):
|
|
|
269
273
|
epoch = int(data_dict["epoch"])
|
|
270
274
|
seq_idx = int(data_dict["seq_idx"])
|
|
271
275
|
num_seqs = int(data_dict["num_seqs"]) # >=1 if known, otherwise -1
|
|
272
|
-
|
|
273
|
-
|
|
276
|
+
complete_frac = float(data_dict["complete_frac"]) # >= 0 if known, otherwise -1
|
|
277
|
+
epoch_continuous = (
|
|
278
|
+
epoch - 1 + complete_frac
|
|
279
|
+
if complete_frac >= 0.0
|
|
280
|
+
else (epoch - 1 + (seq_idx + 1) / num_seqs)
|
|
281
|
+
if num_seqs > 0
|
|
282
|
+
else None
|
|
283
|
+
)
|
|
284
|
+
return {"epoch": epoch, "epoch_continuous": epoch_continuous, "seq_idx": seq_idx, **get_fwd_compat_kwargs()}
|
|
274
285
|
|
|
275
286
|
def __iter__(self):
|
|
276
287
|
"""
|
|
@@ -455,6 +466,123 @@ class LenFilterDataPipe(torch.utils.data.IterDataPipe):
|
|
|
455
466
|
raise Exception(f"{self.__class__.__name__}.__getitem__ not supported")
|
|
456
467
|
|
|
457
468
|
|
|
469
|
+
class ShufflingDataPipe(torch.utils.data.IterDataPipe):
|
|
470
|
+
"""
|
|
471
|
+
Data pipe that is similar to ``torch.utils.data.datapipes.iter.Shuffler``,
|
|
472
|
+
but it will keep certain data keys of the batches in order while shuffling the rest.
|
|
473
|
+
|
|
474
|
+
Used for e.g. ``complete_frac`` and ``seq_idx``.
|
|
475
|
+
"""
|
|
476
|
+
|
|
477
|
+
def __init__(
|
|
478
|
+
self,
|
|
479
|
+
dataset: torch.utils.data.IterableDataset,
|
|
480
|
+
*,
|
|
481
|
+
buffer_size: int,
|
|
482
|
+
monotonic_data_keys: Sequence[str],
|
|
483
|
+
seed: Optional[int] = None,
|
|
484
|
+
):
|
|
485
|
+
"""
|
|
486
|
+
:param dataset: batches dataset to shuffle
|
|
487
|
+
:param buffer_size: buffer size for shuffling
|
|
488
|
+
:param monotonic_data_keys: data keys that will be excluded from shuffling/keep their order
|
|
489
|
+
:param seed: random seed
|
|
490
|
+
"""
|
|
491
|
+
super().__init__()
|
|
492
|
+
|
|
493
|
+
self._dataset = dataset
|
|
494
|
+
self._buffer: List[List[Dict[str, Any]]] = []
|
|
495
|
+
self._next_buffer: List[List[Dict[str, Any]]] = []
|
|
496
|
+
assert buffer_size > 0
|
|
497
|
+
self._buffer_size = buffer_size
|
|
498
|
+
self._monotonic_data_keys = monotonic_data_keys
|
|
499
|
+
self._rng = numpy.random.RandomState()
|
|
500
|
+
self._seed = seed
|
|
501
|
+
|
|
502
|
+
def __iter__(self):
|
|
503
|
+
# The implementation is very similar to the PostprocessingDataset's combinator LaplaceOrdering.
|
|
504
|
+
|
|
505
|
+
data_iter = iter(self._dataset)
|
|
506
|
+
|
|
507
|
+
self._buffer.extend(itertools.islice(data_iter, self._buffer_size))
|
|
508
|
+
has_ended = False
|
|
509
|
+
|
|
510
|
+
while True:
|
|
511
|
+
# Make sure to not reorder the monotonic values from self._monotonic_data_keys.
|
|
512
|
+
# These can contain things like complete_frac, which should be kept in order.
|
|
513
|
+
ordered_data = {
|
|
514
|
+
key: [data_dict[key] for batch in self._buffer for data_dict in batch]
|
|
515
|
+
for key in self._monotonic_data_keys
|
|
516
|
+
}
|
|
517
|
+
self._rng.shuffle(self._buffer)
|
|
518
|
+
for key in self._monotonic_data_keys:
|
|
519
|
+
data_dicts = [data_dict for batch in self._buffer for data_dict in batch]
|
|
520
|
+
assert len(data_dicts) == len(ordered_data[key])
|
|
521
|
+
for ordered_value, data_dict in zip(ordered_data[key], data_dicts):
|
|
522
|
+
data_dict[key] = ordered_value
|
|
523
|
+
|
|
524
|
+
for item in self._buffer:
|
|
525
|
+
yield item
|
|
526
|
+
|
|
527
|
+
try:
|
|
528
|
+
if not has_ended:
|
|
529
|
+
self._next_buffer.append(next(data_iter))
|
|
530
|
+
except StopIteration:
|
|
531
|
+
has_ended = True
|
|
532
|
+
|
|
533
|
+
if len(self._buffer) < self._buffer_size:
|
|
534
|
+
assert has_ended and not self._next_buffer
|
|
535
|
+
break
|
|
536
|
+
|
|
537
|
+
self._buffer.clear()
|
|
538
|
+
self._buffer, self._next_buffer = self._next_buffer, self._buffer
|
|
539
|
+
|
|
540
|
+
def set_seed(self, seed: int) -> ShufflingDataPipe:
|
|
541
|
+
"""
|
|
542
|
+
Sets the seed for the next invocation of ``__iter__``, for compatibility with
|
|
543
|
+
``torch.utils.data.graph_settings.apply_random_seed``.
|
|
544
|
+
"""
|
|
545
|
+
self._seed = seed % (2**32) # seed must be within [0, 2**32) for seeding RandomState
|
|
546
|
+
return self
|
|
547
|
+
|
|
548
|
+
def reset(self):
|
|
549
|
+
"""resets the internal state of the data pipe"""
|
|
550
|
+
if self._seed is None:
|
|
551
|
+
self._seed = int(torch.empty((), dtype=torch.int32).random_().item())
|
|
552
|
+
self._rng.seed(self._seed)
|
|
553
|
+
self._seed = None
|
|
554
|
+
|
|
555
|
+
def __getstate__(self):
|
|
556
|
+
state = (
|
|
557
|
+
self._dataset,
|
|
558
|
+
self._buffer,
|
|
559
|
+
self._next_buffer,
|
|
560
|
+
self._buffer_size,
|
|
561
|
+
self._monotonic_data_keys,
|
|
562
|
+
self._rng.get_state(),
|
|
563
|
+
self._seed,
|
|
564
|
+
)
|
|
565
|
+
if torch.utils.data.IterDataPipe.getstate_hook is not None:
|
|
566
|
+
return torch.utils.data.IterDataPipe.getstate_hook(state)
|
|
567
|
+
return state
|
|
568
|
+
|
|
569
|
+
def __setstate__(self, state):
|
|
570
|
+
(
|
|
571
|
+
self._dataset,
|
|
572
|
+
self._buffer,
|
|
573
|
+
self._next_buffer,
|
|
574
|
+
self._buffer_size,
|
|
575
|
+
self._monotonic_data_keys,
|
|
576
|
+
rng_state,
|
|
577
|
+
self._seed,
|
|
578
|
+
) = state
|
|
579
|
+
self._rng = numpy.random.RandomState()
|
|
580
|
+
self._rng.set_state(rng_state)
|
|
581
|
+
|
|
582
|
+
def __getitem__(self, index):
|
|
583
|
+
raise Exception(f"{self.__class__.__name__}.__getitem__ not supported")
|
|
584
|
+
|
|
585
|
+
|
|
458
586
|
def create_data_loader_from_batches(
|
|
459
587
|
batches_dataset: torch.utils.data.Dataset, loader_opts: Optional[Dict[str, Any]] = None
|
|
460
588
|
) -> torch.utils.data.DataLoader:
|