sleap-nn 0.1.0a2__tar.gz → 0.1.0a4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (316) hide show
  1. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/.github/workflows/docs.yml +1 -1
  2. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/PKG-INFO +13 -1
  3. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/cli.md +103 -0
  4. sleap_nn-0.1.0a4/docs/export.md +431 -0
  5. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/index.md +2 -0
  6. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/inference.md +39 -0
  7. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/mkdocs.yml +2 -1
  8. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/pyproject.toml +4 -0
  9. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/__init__.py +1 -1
  10. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/architectures/convnext.py +5 -0
  11. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/architectures/encoder_decoder.py +25 -6
  12. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/architectures/swint.py +8 -0
  13. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/cli.py +168 -39
  14. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/evaluation.py +8 -0
  15. sleap_nn-0.1.0a4/sleap_nn/export/__init__.py +21 -0
  16. sleap_nn-0.1.0a4/sleap_nn/export/cli.py +1778 -0
  17. sleap_nn-0.1.0a4/sleap_nn/export/exporters/__init__.py +51 -0
  18. sleap_nn-0.1.0a4/sleap_nn/export/exporters/onnx_exporter.py +80 -0
  19. sleap_nn-0.1.0a4/sleap_nn/export/exporters/tensorrt_exporter.py +291 -0
  20. sleap_nn-0.1.0a4/sleap_nn/export/metadata.py +225 -0
  21. sleap_nn-0.1.0a4/sleap_nn/export/predictors/__init__.py +63 -0
  22. sleap_nn-0.1.0a4/sleap_nn/export/predictors/base.py +22 -0
  23. sleap_nn-0.1.0a4/sleap_nn/export/predictors/onnx.py +154 -0
  24. sleap_nn-0.1.0a4/sleap_nn/export/predictors/tensorrt.py +312 -0
  25. sleap_nn-0.1.0a4/sleap_nn/export/utils.py +307 -0
  26. sleap_nn-0.1.0a4/sleap_nn/export/wrappers/__init__.py +25 -0
  27. sleap_nn-0.1.0a4/sleap_nn/export/wrappers/base.py +96 -0
  28. sleap_nn-0.1.0a4/sleap_nn/export/wrappers/bottomup.py +243 -0
  29. sleap_nn-0.1.0a4/sleap_nn/export/wrappers/bottomup_multiclass.py +195 -0
  30. sleap_nn-0.1.0a4/sleap_nn/export/wrappers/centered_instance.py +56 -0
  31. sleap_nn-0.1.0a4/sleap_nn/export/wrappers/centroid.py +58 -0
  32. sleap_nn-0.1.0a4/sleap_nn/export/wrappers/single_instance.py +83 -0
  33. sleap_nn-0.1.0a4/sleap_nn/export/wrappers/topdown.py +180 -0
  34. sleap_nn-0.1.0a4/sleap_nn/export/wrappers/topdown_multiclass.py +304 -0
  35. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/inference/peak_finding.py +47 -17
  36. sleap_nn-0.1.0a4/sleap_nn/inference/postprocessing.py +284 -0
  37. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/inference/predictors.py +213 -106
  38. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/predict.py +35 -7
  39. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/train.py +64 -0
  40. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/training/callbacks.py +69 -22
  41. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/training/lightning_modules.py +332 -30
  42. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/training/model_trainer.py +67 -67
  43. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn.egg-info/PKG-INFO +13 -1
  44. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn.egg-info/SOURCES.txt +34 -0
  45. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn.egg-info/requires.txt +15 -0
  46. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/conftest.py +1 -0
  47. sleap_nn-0.1.0a4/tests/export/__init__.py +1 -0
  48. sleap_nn-0.1.0a4/tests/export/conftest.py +420 -0
  49. sleap_nn-0.1.0a4/tests/export/test_cli.py +349 -0
  50. sleap_nn-0.1.0a4/tests/export/test_gpu.py +221 -0
  51. sleap_nn-0.1.0a4/tests/export/test_metadata.py +490 -0
  52. sleap_nn-0.1.0a4/tests/export/test_onnx_export.py +200 -0
  53. sleap_nn-0.1.0a4/tests/export/test_onnx_predictor.py +285 -0
  54. sleap_nn-0.1.0a4/tests/export/test_predictor_factory.py +148 -0
  55. sleap_nn-0.1.0a4/tests/export/test_utils.py +804 -0
  56. sleap_nn-0.1.0a4/tests/export/test_wrappers.py +775 -0
  57. sleap_nn-0.1.0a4/tests/fixtures/predictors.py +86 -0
  58. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/inference/test_peak_finding.py +79 -0
  59. sleap_nn-0.1.0a4/tests/inference/test_postprocessing.py +450 -0
  60. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/test_cli.py +7 -7
  61. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/test_predict.py +36 -90
  62. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/test_train.py +69 -57
  63. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/training/test_callbacks.py +56 -17
  64. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/training/test_lightning_modules.py +27 -0
  65. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/training/test_model_trainer.py +43 -17
  66. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/uv.lock +801 -84
  67. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/.claude/commands/coverage.md +0 -0
  68. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/.claude/commands/lint.md +0 -0
  69. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/.claude/commands/pr-description.md +0 -0
  70. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/.claude/skills/investigation/SKILL.md +0 -0
  71. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/.dockerignore +0 -0
  72. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/.github/workflows/build.yml +0 -0
  73. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/.github/workflows/ci.yml +0 -0
  74. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/.github/workflows/codespell.yml +0 -0
  75. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/.gitignore +0 -0
  76. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/CLAUDE.md +0 -0
  77. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/CONTRIBUTING.md +0 -0
  78. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/LICENSE +0 -0
  79. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/README.md +0 -0
  80. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/codecov.yml +0 -0
  81. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/assets/favicon.ico +0 -0
  82. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/assets/sleap-logo.png +0 -0
  83. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/colab_notebooks/Training_with_sleap_nn_on_colab.ipynb +0 -0
  84. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/colab_notebooks/index.md +0 -0
  85. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/config.md +0 -0
  86. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/core_components.md +0 -0
  87. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/example_notebooks.md +0 -0
  88. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/installation.md +0 -0
  89. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/models.md +0 -0
  90. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/sample_configs/config_bottomup_convnext.yaml +0 -0
  91. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/sample_configs/config_bottomup_unet_large_rf.yaml +0 -0
  92. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/sample_configs/config_bottomup_unet_medium_rf.yaml +0 -0
  93. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/sample_configs/config_centroid_swint.yaml +0 -0
  94. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/sample_configs/config_centroid_unet.yaml +0 -0
  95. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/sample_configs/config_multi_class_bottomup_unet.yaml +0 -0
  96. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/sample_configs/config_single_instance_unet_large_rf.yaml +0 -0
  97. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/sample_configs/config_single_instance_unet_medium_rf.yaml +0 -0
  98. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/sample_configs/config_topdown_centered_instance_unet_large_rf.yaml +0 -0
  99. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/sample_configs/config_topdown_centered_instance_unet_medium_rf.yaml +0 -0
  100. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/sample_configs/config_topdown_multi_class_centered_instance_unet.yaml +0 -0
  101. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/step_by_step_tutorial.md +0 -0
  102. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/docs/training.md +0 -0
  103. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/example_notebooks/README.md +0 -0
  104. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/example_notebooks/augmentation_guide.py +0 -0
  105. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/example_notebooks/receptive_field_guide.py +0 -0
  106. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/example_notebooks/training_demo.py +0 -0
  107. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/scripts/cov_summary.py +0 -0
  108. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/scripts/gen_changelog.py +0 -0
  109. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/scripts/gen_ref_pages.py +0 -0
  110. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/setup.cfg +0 -0
  111. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/.DS_Store +0 -0
  112. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/architectures/__init__.py +0 -0
  113. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/architectures/common.py +0 -0
  114. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/architectures/heads.py +0 -0
  115. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/architectures/model.py +0 -0
  116. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/architectures/unet.py +0 -0
  117. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/architectures/utils.py +0 -0
  118. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/config/__init__.py +0 -0
  119. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/config/data_config.py +0 -0
  120. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/config/get_config.py +0 -0
  121. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/config/model_config.py +0 -0
  122. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/config/trainer_config.py +0 -0
  123. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/config/training_job_config.py +0 -0
  124. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/config/utils.py +0 -0
  125. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/data/__init__.py +0 -0
  126. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/data/augmentation.py +0 -0
  127. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/data/confidence_maps.py +0 -0
  128. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/data/custom_datasets.py +0 -0
  129. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/data/edge_maps.py +0 -0
  130. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/data/identity.py +0 -0
  131. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/data/instance_centroids.py +0 -0
  132. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/data/instance_cropping.py +0 -0
  133. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/data/normalization.py +0 -0
  134. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/data/providers.py +0 -0
  135. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/data/resizing.py +0 -0
  136. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/data/utils.py +0 -0
  137. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/inference/__init__.py +0 -0
  138. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/inference/bottomup.py +0 -0
  139. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/inference/identity.py +0 -0
  140. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/inference/paf_grouping.py +0 -0
  141. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/inference/provenance.py +0 -0
  142. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/inference/single_instance.py +0 -0
  143. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/inference/topdown.py +0 -0
  144. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/inference/utils.py +0 -0
  145. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/legacy_models.py +0 -0
  146. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/system_info.py +0 -0
  147. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/tracking/__init__.py +0 -0
  148. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/tracking/candidates/__init__.py +0 -0
  149. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/tracking/candidates/fixed_window.py +0 -0
  150. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/tracking/candidates/local_queues.py +0 -0
  151. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/tracking/track_instance.py +0 -0
  152. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/tracking/tracker.py +0 -0
  153. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/tracking/utils.py +0 -0
  154. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/training/__init__.py +0 -0
  155. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/training/losses.py +0 -0
  156. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn/training/utils.py +0 -0
  157. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn.egg-info/dependency_links.txt +0 -0
  158. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn.egg-info/entry_points.txt +0 -0
  159. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/sleap_nn.egg-info/top_level.txt +0 -0
  160. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/__init__.py +0 -0
  161. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/architectures/test_architecture_utils.py +0 -0
  162. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/architectures/test_common.py +0 -0
  163. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/architectures/test_convnext.py +0 -0
  164. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/architectures/test_encoder_decoder.py +0 -0
  165. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/architectures/test_heads.py +0 -0
  166. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/architectures/test_model.py +0 -0
  167. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/architectures/test_swint.py +0 -0
  168. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/architectures/test_unet.py +0 -0
  169. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/datasets/centered_pair_small.mp4 +0 -0
  170. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/datasets/minimal_instance.pkg.slp +0 -0
  171. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/datasets/small_robot.mp4 +0 -0
  172. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/datasets/small_robot_minimal.slp +0 -0
  173. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/inference/minimal_bboxes.pt +0 -0
  174. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/inference/minimal_cms.pt +0 -0
  175. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/get_dummy_activations.py +0 -0
  176. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/min_tracks_2node.UNet.bottomup_multiclass/best_model.h5 +0 -0
  177. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/min_tracks_2node.UNet.bottomup_multiclass/dummy_activations.h5 +0 -0
  178. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/min_tracks_2node.UNet.bottomup_multiclass/initial_config.json +0 -0
  179. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/min_tracks_2node.UNet.bottomup_multiclass/training_config.json +0 -0
  180. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/min_tracks_2node.UNet.topdown_multiclass/best_model.h5 +0 -0
  181. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/min_tracks_2node.UNet.topdown_multiclass/dummy_activations.h5 +0 -0
  182. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/min_tracks_2node.UNet.topdown_multiclass/initial_config.json +0 -0
  183. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/min_tracks_2node.UNet.topdown_multiclass/training_config.json +0 -0
  184. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/best_model.h5 +0 -0
  185. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/dummy_activations.h5 +0 -0
  186. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/initial_config.json +0 -0
  187. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/labels_gt.train.slp +0 -0
  188. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/labels_gt.val.slp +0 -0
  189. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/labels_pr.train.slp +0 -0
  190. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/labels_pr.val.slp +0 -0
  191. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/metrics.train.npz +0 -0
  192. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/metrics.val.npz +0 -0
  193. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/training_config.json +0 -0
  194. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/training_log.csv +0 -0
  195. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/best_model.h5 +0 -0
  196. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/dummy_activations.h5 +0 -0
  197. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/initial_config.json +0 -0
  198. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/labels_gt.train.slp +0 -0
  199. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/labels_gt.val.slp +0 -0
  200. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/labels_pr.train.slp +0 -0
  201. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/labels_pr.val.slp +0 -0
  202. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/metrics.train.npz +0 -0
  203. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/metrics.val.npz +0 -0
  204. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/training_config.json +0 -0
  205. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/training_log.csv +0 -0
  206. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centroid/best_model.h5 +0 -0
  207. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centroid/dummy_activations.h5 +0 -0
  208. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centroid/initial_config.json +0 -0
  209. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centroid/labels_gt.train.slp +0 -0
  210. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centroid/labels_gt.val.slp +0 -0
  211. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centroid/labels_pr.train.slp +0 -0
  212. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centroid/labels_pr.val.slp +0 -0
  213. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centroid/metrics.train.npz +0 -0
  214. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centroid/metrics.val.npz +0 -0
  215. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centroid/training_config.json +0 -0
  216. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_instance.UNet.centroid/training_log.csv +0 -0
  217. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_robot.UNet.single_instance/best_model.h5 +0 -0
  218. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_robot.UNet.single_instance/dummy_activations.h5 +0 -0
  219. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_robot.UNet.single_instance/initial_config.json +0 -0
  220. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_robot.UNet.single_instance/labels_gt.train.slp +0 -0
  221. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_robot.UNet.single_instance/labels_gt.val.slp +0 -0
  222. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_robot.UNet.single_instance/training_config.json +0 -0
  223. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_models/minimal_robot.UNet.single_instance/training_log.csv +0 -0
  224. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_sleap_json_configs/bottomup_multiclass_training_config.json +0 -0
  225. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_sleap_json_configs/bottomup_training_config.json +0 -0
  226. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_sleap_json_configs/centered_instance_training_config.json +0 -0
  227. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_sleap_json_configs/centered_instance_with_scaling_training_config.json +0 -0
  228. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_sleap_json_configs/centroid_training_config.json +0 -0
  229. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_sleap_json_configs/single_instance_training_config.json +0 -0
  230. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/legacy_sleap_json_configs/topdown_training_config.json +0 -0
  231. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_bottomup/best.ckpt +0 -0
  232. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_bottomup/initial_config.yaml +0 -0
  233. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_bottomup/labels_train_gt_0.slp +0 -0
  234. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_bottomup/labels_val_gt_0.slp +0 -0
  235. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_bottomup/training_config.yaml +0 -0
  236. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_bottomup/training_log.csv +0 -0
  237. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_centered_instance/best.ckpt +0 -0
  238. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_centered_instance/initial_config.yaml +0 -0
  239. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_centered_instance/labels_train_gt_0.slp +0 -0
  240. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_centered_instance/labels_val_gt_0.slp +0 -0
  241. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_centered_instance/training_config.yaml +0 -0
  242. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_centered_instance/training_log.csv +0 -0
  243. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_centroid/best.ckpt +0 -0
  244. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_centroid/initial_config.yaml +0 -0
  245. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_centroid/labels_train_gt_0.slp +0 -0
  246. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_centroid/labels_val_gt_0.slp +0 -0
  247. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_centroid/training_config.yaml +0 -0
  248. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_centroid/training_log.csv +0 -0
  249. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_multiclass_bottomup/best.ckpt +0 -0
  250. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_multiclass_bottomup/initial_config.yaml +0 -0
  251. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_multiclass_bottomup/labels_train_gt_0.slp +0 -0
  252. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_multiclass_bottomup/labels_val_gt_0.slp +0 -0
  253. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_multiclass_bottomup/training_config.yaml +0 -0
  254. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_multiclass_bottomup/training_log.csv +0 -0
  255. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_multiclass_centered_instance/best.ckpt +0 -0
  256. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_multiclass_centered_instance/initial_config.yaml +0 -0
  257. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_multiclass_centered_instance/labels_train_gt_0.slp +0 -0
  258. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_multiclass_centered_instance/labels_val_gt_0.slp +0 -0
  259. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_multiclass_centered_instance/training_config.yaml +0 -0
  260. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_multiclass_centered_instance/training_log.csv +0 -0
  261. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_single_instance/best.ckpt +0 -0
  262. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_single_instance/initial_config.yaml +0 -0
  263. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_single_instance/labels_train_gt_0.slp +0 -0
  264. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_single_instance/labels_val_gt_0.slp +0 -0
  265. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_single_instance/training_config.yaml +0 -0
  266. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/minimal_instance_single_instance/training_log.csv +0 -0
  267. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/single_instance_with_metrics/best.ckpt +0 -0
  268. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/single_instance_with_metrics/initial_config.yaml +0 -0
  269. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/single_instance_with_metrics/labels_train_gt_0.slp +0 -0
  270. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/single_instance_with_metrics/labels_val_gt_0.slp +0 -0
  271. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/single_instance_with_metrics/pred_test.slp +0 -0
  272. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/single_instance_with_metrics/pred_train_0.slp +0 -0
  273. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/single_instance_with_metrics/pred_val_0.slp +0 -0
  274. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/single_instance_with_metrics/test_pred_metrics.npz +0 -0
  275. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/single_instance_with_metrics/train_0_pred_metrics.npz +0 -0
  276. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/single_instance_with_metrics/training_config.yaml +0 -0
  277. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/single_instance_with_metrics/training_log.csv +0 -0
  278. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/assets/model_ckpts/single_instance_with_metrics/val_0_pred_metrics.npz +0 -0
  279. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/config/test_config_utils.py +0 -0
  280. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/config/test_data_config.py +0 -0
  281. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/config/test_model_config.py +0 -0
  282. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/config/test_trainer_config.py +0 -0
  283. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/config/test_training_job_config.py +0 -0
  284. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/data/test_augmentation.py +0 -0
  285. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/data/test_confmaps.py +0 -0
  286. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/data/test_custom_datasets.py +0 -0
  287. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/data/test_edge_maps.py +0 -0
  288. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/data/test_identity.py +0 -0
  289. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/data/test_instance_centroids.py +0 -0
  290. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/data/test_instance_cropping.py +0 -0
  291. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/data/test_normalization.py +0 -0
  292. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/data/test_providers.py +0 -0
  293. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/data/test_resizing.py +0 -0
  294. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/data/test_utils.py +0 -0
  295. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/fixtures/__init__.py +0 -0
  296. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/fixtures/datasets.py +0 -0
  297. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/fixtures/inference.py +0 -0
  298. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/fixtures/legacy_models.py +0 -0
  299. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/fixtures/legacy_sleap_json_configs.py +0 -0
  300. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/fixtures/model_ckpts.py +0 -0
  301. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/inference/__init__.py +0 -0
  302. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/inference/test_bottomup.py +0 -0
  303. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/inference/test_paf_grouping.py +0 -0
  304. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/inference/test_predictors.py +0 -0
  305. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/inference/test_provenance.py +0 -0
  306. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/inference/test_single_instance.py +0 -0
  307. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/inference/test_topdown.py +0 -0
  308. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/inference/test_utils.py +0 -0
  309. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/test_evaluation.py +0 -0
  310. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/test_legacy_models.py +0 -0
  311. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/test_system_info.py +0 -0
  312. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/test_version.py +0 -0
  313. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/tracking/candidates/test_fixed_window.py +0 -0
  314. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/tracking/candidates/test_local_queues.py +0 -0
  315. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/tracking/test_tracker.py +0 -0
  316. {sleap_nn-0.1.0a2 → sleap_nn-0.1.0a4}/tests/training/test_training_utils.py +0 -0
@@ -57,7 +57,7 @@ jobs:
57
57
  env:
58
58
  GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
59
59
  run: |
60
- uv run mike deploy --allow-empty --push "${{ github.event.release.tag_name }}"
60
+ uv run mike deploy --update-aliases --allow-empty --push "${{ github.event.release.tag_name }}" prerelease
61
61
 
62
62
  - name: Build and upload docs (dev)
63
63
  if: ${{ github.event_name == 'push' }}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sleap-nn
3
- Version: 0.1.0a2
3
+ Version: 0.1.0a4
4
4
  Summary: Neural network backend for training and inference for animal pose estimation.
5
5
  Author-email: Divya Seshadri Murali <dimurali@salk.edu>, Elizabeth Berrigan <eberrigan@salk.edu>, Vincent Tu <vitu@ucsd.edu>, Liezl Maree <lmaree@salk.edu>, David Samy <davidasamy@gmail.com>, Talmo Pereira <talmo@salk.edu>
6
6
  License: BSD-3-Clause
@@ -32,6 +32,7 @@ Requires-Dist: hydra-core
32
32
  Requires-Dist: jupyter
33
33
  Requires-Dist: jupyterlab
34
34
  Requires-Dist: pyzmq
35
+ Requires-Dist: rich-click>=1.9.5
35
36
  Provides-Extra: torch
36
37
  Requires-Dist: torch; extra == "torch"
37
38
  Requires-Dist: torchvision>=0.20.0; extra == "torch"
@@ -47,6 +48,17 @@ Requires-Dist: torchvision>=0.20.0; extra == "torch-cuda128"
47
48
  Provides-Extra: torch-cuda130
48
49
  Requires-Dist: torch; extra == "torch-cuda130"
49
50
  Requires-Dist: torchvision>=0.20.0; extra == "torch-cuda130"
51
+ Provides-Extra: export
52
+ Requires-Dist: onnx>=1.15.0; extra == "export"
53
+ Requires-Dist: onnxruntime>=1.16.0; extra == "export"
54
+ Requires-Dist: onnxscript>=0.1.0; extra == "export"
55
+ Provides-Extra: export-gpu
56
+ Requires-Dist: onnx>=1.15.0; extra == "export-gpu"
57
+ Requires-Dist: onnxruntime-gpu>=1.16.0; extra == "export-gpu"
58
+ Requires-Dist: onnxscript>=0.1.0; extra == "export-gpu"
59
+ Provides-Extra: tensorrt
60
+ Requires-Dist: tensorrt>=10.0.0; extra == "tensorrt"
61
+ Requires-Dist: torch-tensorrt>=2.0.0; extra == "tensorrt"
50
62
  Dynamic: license-file
51
63
 
52
64
  # sleap-nn
@@ -9,6 +9,8 @@ This page provides a quick reference for all `sleap-nn` command-line interface (
9
9
  | [`sleap-nn train`](#sleap-nn-train) | Train pose estimation models | [Training Guide](training.md) |
10
10
  | [`sleap-nn track`](#sleap-nn-track) | Run inference and/or tracking | [Inference Guide](inference.md) |
11
11
  | [`sleap-nn eval`](#sleap-nn-eval) | Evaluate predictions against ground truth | [Inference Guide](inference.md#evaluation-metrics) |
12
+ | [`sleap-nn export`](#sleap-nn-export) | Export models to ONNX/TensorRT | [Export Guide](export.md) |
13
+ | [`sleap-nn predict`](#sleap-nn-predict) | Run inference on exported models | [Export Guide](export.md#sleap-nn-predict) |
12
14
  | [`sleap-nn system`](#sleap-nn-system) | Display system info and GPU diagnostics | - |
13
15
 
14
16
  ## Global Options
@@ -170,6 +172,14 @@ sleap-nn track -i labels.slp -t
170
172
  | `--integral_refinement` | Refinement method (`integral` or `None`) | `integral` |
171
173
  | `--integral_patch_size` | Patch size for integral refinement | `5` |
172
174
 
175
+ #### Filtering Overlapping Instances
176
+
177
+ | Option | Description | Default |
178
+ |--------|-------------|---------|
179
+ | `--filter_overlapping` | Enable filtering of overlapping instances | `False` |
180
+ | `--filter_overlapping_method` | Similarity method: `iou` (bbox) or `oks` (keypoints) | `iou` |
181
+ | `--filter_overlapping_threshold` | Similarity threshold (higher = less filtering) | `0.8` |
182
+
173
183
  #### Tracking Options
174
184
 
175
185
  | Option | Description | Default |
@@ -256,6 +266,98 @@ See [Evaluation Metrics](inference.md#evaluation-metrics) for more details.
256
266
 
257
267
  ---
258
268
 
269
+ ## `sleap-nn export`
270
+
271
+ !!! warning "Experimental"
272
+ This command is experimental. See the [Export Guide](export.md) for details.
273
+
274
+ Export trained models to ONNX and/or TensorRT format for optimized inference.
275
+
276
+ ```bash
277
+ sleap-nn export MODEL_PATH [MODEL_PATH_2] -o OUTPUT_DIR [options]
278
+ ```
279
+
280
+ ### Arguments
281
+
282
+ | Argument | Description |
283
+ |----------|-------------|
284
+ | `MODEL_PATH` | Path to trained model checkpoint directory |
285
+ | `MODEL_PATH_2` | Second model path for top-down (centroid + instance) |
286
+
287
+ ### Options
288
+
289
+ | Option | Short | Description | Default |
290
+ |--------|-------|-------------|---------|
291
+ | `--output-dir` | `-o` | Output directory for exported models | Required |
292
+ | `--format` | `-f` | Export format: `onnx`, `tensorrt`, or `both` | `onnx` |
293
+ | `--precision` | | TensorRT precision: `fp32` or `fp16` | `fp16` |
294
+ | `--max-instances` | `-n` | Maximum instances per frame | `20` |
295
+ | `--max-batch-size` | `-b` | Maximum batch size for dynamic shapes | `8` |
296
+ | `--input-scale` | | Input resolution scale factor | `1.0` |
297
+ | `--device` | | Device for export: `cuda` or `cpu` | `cuda` |
298
+
299
+ ### Examples
300
+
301
+ ```bash
302
+ # Export single model to ONNX
303
+ sleap-nn export models/single_instance -o exports/model --format onnx
304
+
305
+ # Export to both ONNX and TensorRT
306
+ sleap-nn export models/bottomup -o exports/model --format both
307
+
308
+ # Export top-down (combined centroid + instance)
309
+ sleap-nn export models/centroid models/centered_instance -o exports/topdown
310
+ ```
311
+
312
+ See [Export Guide](export.md) for detailed documentation.
313
+
314
+ ---
315
+
316
+ ## `sleap-nn predict`
317
+
318
+ !!! warning "Experimental"
319
+ This command is experimental. See the [Export Guide](export.md) for details.
320
+
321
+ Run inference on exported ONNX/TensorRT models.
322
+
323
+ ```bash
324
+ sleap-nn predict EXPORT_DIR INPUT_PATH [options]
325
+ ```
326
+
327
+ ### Arguments
328
+
329
+ | Argument | Description |
330
+ |----------|-------------|
331
+ | `EXPORT_DIR` | Path to exported model directory |
332
+ | `INPUT_PATH` | Path to video file |
333
+
334
+ ### Options
335
+
336
+ | Option | Short | Description | Default |
337
+ |--------|-------|-------------|---------|
338
+ | `--output` | `-o` | Output path for predictions (`.slp`) | `<input>.predictions.slp` |
339
+ | `--runtime` | `-r` | Runtime: `auto`, `onnx`, or `tensorrt` | `auto` |
340
+ | `--batch-size` | `-b` | Inference batch size | `4` |
341
+ | `--n-frames` | `-n` | Number of frames to process (0 = all) | `0` |
342
+ | `--device` | | Device: `auto`, `cuda`, or `cpu` | `auto` |
343
+
344
+ ### Examples
345
+
346
+ ```bash
347
+ # Basic inference with auto-detected runtime
348
+ sleap-nn predict exports/model video.mp4 -o predictions.slp
349
+
350
+ # Use TensorRT for maximum speed
351
+ sleap-nn predict exports/model video.mp4 -o predictions.slp --runtime tensorrt
352
+
353
+ # Process first 1000 frames with batch size 8
354
+ sleap-nn predict exports/model video.mp4 -o predictions.slp -n 1000 -b 8
355
+ ```
356
+
357
+ See [Export Guide](export.md#sleap-nn-predict) for detailed documentation.
358
+
359
+ ---
360
+
259
361
  ## `sleap-nn system`
260
362
 
261
363
  Display system information and GPU diagnostics. Useful for troubleshooting GPU issues and verifying your installation.
@@ -310,3 +412,4 @@ This command is particularly helpful when:
310
412
  - [Configuration Guide](config.md) - Full config reference
311
413
  - [Training Guide](training.md) - Detailed training documentation
312
414
  - [Inference Guide](inference.md) - Detailed inference documentation
415
+ - [Export Guide](export.md) - Model export and fast inference
@@ -0,0 +1,431 @@
1
+ # Model Export and Fast Inference
2
+
3
+ !!! warning "Experimental Feature"
4
+ The export module is **experimental** and under active development. APIs and behavior may change in future releases. Please report issues at [github.com/talmolab/sleap-nn/issues](https://github.com/talmolab/sleap-nn/issues).
5
+
6
+ This guide covers exporting trained SLEAP models to optimized formats (ONNX and TensorRT) for high-performance inference without the full training environment.
7
+
8
+ ## Overview
9
+
10
+ The export module provides:
11
+
12
+ - **ONNX export** - Portable models that run on any platform with ONNX Runtime
13
+ - **TensorRT export** - Maximum performance on NVIDIA GPUs with FP16 optimization
14
+ - **Unified prediction CLI** - Run inference on exported models with SLP output
15
+
16
+ ### When to Use Exported Models
17
+
18
+ | Use Case | Recommended Format |
19
+ |----------|-------------------|
20
+ | Cross-platform deployment | ONNX |
21
+ | Maximum GPU throughput | TensorRT FP16 |
22
+ | CPU-only inference | ONNX |
23
+ | Embedding in applications | ONNX or TensorRT |
24
+ | Production pipelines | TensorRT FP16 |
25
+
26
+ ### Performance Comparison
27
+
28
+ Benchmarks on **NVIDIA RTX A6000** (48 GB).
29
+
30
+ **Batch size 1** (latency-optimized):
31
+
32
+ | Model Type | Resolution | PyTorch | ONNX-GPU | TensorRT FP16 | Speedup |
33
+ |------------|------------|---------|----------|---------------|---------|
34
+ | Single Instance | 192×192 | 1.8 ms | 1.3 ms | 0.31 ms | 5.9x |
35
+ | Centroid | 1024×1024 | 2.5 ms | 2.7 ms | 0.77 ms | 3.2x |
36
+ | Top-Down | 1024×1024 | 11.4 ms | 9.7 ms | 2.31 ms | 4.9x |
37
+ | Bottom-Up | 1024×1280 | 12.3 ms | 9.6 ms | 2.52 ms | 4.9x |
38
+ | Multiclass Top-Down | 1024×1024 | 8.3 ms | 9.1 ms | 1.84 ms | 4.5x |
39
+ | Multiclass Bottom-Up | 1024×1024 | 9.4 ms | 9.4 ms | 2.64 ms | 3.6x |
40
+
41
+ **Batch size 8** (throughput-optimized):
42
+
43
+ | Model Type | Resolution | PyTorch | ONNX-GPU | TensorRT FP16 | Speedup |
44
+ |------------|------------|---------|----------|---------------|---------|
45
+ | Single Instance | 192×192 | 3,111 FPS | 3,165 FPS | 11,039 FPS | 3.5x |
46
+ | Centroid | 1024×1024 | 453 FPS | 474 FPS | 1,829 FPS | 4.0x |
47
+ | Top-Down | 1024×1024 | 94 FPS | 122 FPS | 525 FPS | 5.6x |
48
+ | Bottom-Up | 1024×1280 | 113 FPS | 121 FPS | 524 FPS | 4.6x |
49
+ | Multiclass Top-Down | 1024×1024 | 127 FPS | 145 FPS | 735 FPS | 5.8x |
50
+ | Multiclass Bottom-Up | 1024×1024 | 116 FPS | 120 FPS | 470 FPS | 4.1x |
51
+
52
+ *Speedup is relative to PyTorch baseline.*
53
+
54
+ ---
55
+
56
+ ## Installation
57
+
58
+ Export functionality requires additional dependencies:
59
+
60
+ ```bash
61
+ # For ONNX export (CPU inference)
62
+ pip install sleap-nn[export]
63
+
64
+ # For ONNX with GPU inference
65
+ pip install sleap-nn[export-gpu]
66
+
67
+ # For TensorRT export (NVIDIA GPUs only)
68
+ pip install sleap-nn[tensorrt]
69
+ ```
70
+
71
+ Or install all export dependencies:
72
+
73
+ ```bash
74
+ pip install sleap-nn[export-gpu,tensorrt]
75
+ ```
76
+
77
+ ---
78
+
79
+ ## Quick Start
80
+
81
+ ### Export a Model
82
+
83
+ ```bash
84
+ # Export to ONNX only
85
+ sleap-nn export /path/to/model -o exports/my_model --format onnx
86
+
87
+ # Export to TensorRT FP16 (includes ONNX)
88
+ sleap-nn export /path/to/model -o exports/my_model --format both
89
+
90
+ # Export top-down model (centroid + instance)
91
+ sleap-nn export /path/to/centroid_model /path/to/instance_model \
92
+ -o exports/topdown --format both
93
+ ```
94
+
95
+ ### Run Inference
96
+
97
+ ```bash
98
+ # Run inference on exported model
99
+ sleap-nn predict exports/my_model video.mp4 -o predictions.slp
100
+
101
+ # Use TensorRT for maximum speed
102
+ sleap-nn predict exports/my_model video.mp4 -o predictions.slp --runtime tensorrt
103
+
104
+ # Specify batch size
105
+ sleap-nn predict exports/my_model video.mp4 -o predictions.slp --batch-size 8
106
+ ```
107
+
108
+ ---
109
+
110
+ ## `sleap-nn export`
111
+
112
+ Export trained models to ONNX and/or TensorRT format.
113
+
114
+ ```bash
115
+ sleap-nn export MODEL_PATH [MODEL_PATH_2] [options]
116
+ ```
117
+
118
+ ### Arguments
119
+
120
+ | Argument | Description |
121
+ |----------|-------------|
122
+ | `MODEL_PATH` | Path to trained model checkpoint directory |
123
+ | `MODEL_PATH_2` | Second model path for top-down (centroid + instance) |
124
+
125
+ ### Options
126
+
127
+ | Option | Short | Description | Default |
128
+ |--------|-------|-------------|---------|
129
+ | `--output-dir` | `-o` | Output directory for exported models | Required |
130
+ | `--format` | `-f` | Export format: `onnx`, `tensorrt`, or `both` | `onnx` |
131
+ | `--precision` | | TensorRT precision: `fp32` or `fp16` | `fp16` |
132
+ | `--max-instances` | `-n` | Maximum instances per frame | `20` |
133
+ | `--max-batch-size` | `-b` | Maximum batch size for dynamic shapes | `8` |
134
+ | `--input-scale` | | Input resolution scale factor | `1.0` |
135
+ | `--device` | | Device for export: `cuda` or `cpu` | `cuda` |
136
+ | `--opset-version` | | ONNX opset version | `17` |
137
+
138
+ ### Examples
139
+
140
+ **Single Instance Model**
141
+
142
+ ```bash
143
+ sleap-nn export models/single_instance.n=1000 \
144
+ -o exports/fly_single \
145
+ --format both
146
+ ```
147
+
148
+ **Top-Down Model (Combined)**
149
+
150
+ For top-down inference, export both centroid and instance models together:
151
+
152
+ ```bash
153
+ sleap-nn export models/centroid.n=1000 models/centered_instance.n=1000 \
154
+ -o exports/fly_topdown \
155
+ --format both \
156
+ --max-instances 20
157
+ ```
158
+
159
+ **Bottom-Up Model**
160
+
161
+ ```bash
162
+ sleap-nn export models/bottomup.n=2000 \
163
+ -o exports/mouse_bottomup \
164
+ --format both \
165
+ --max-instances 10
166
+ ```
167
+
168
+ **Multiclass Models**
169
+
170
+ Export models with supervised identity (class) labels:
171
+
172
+ ```bash
173
+ # Bottom-up multiclass
174
+ sleap-nn export models/multi_class_bottomup.n=1000 \
175
+ -o exports/flies_multiclass \
176
+ --format onnx
177
+
178
+ # Top-down multiclass (centroid + multiclass instance)
179
+ sleap-nn export models/centroid.n=1000 models/multi_class_topdown.n=1000 \
180
+ -o exports/flies_multiclass_topdown \
181
+ --format both
182
+ ```
183
+
184
+ ### Output Files
185
+
186
+ After export, the output directory contains:
187
+
188
+ ```
189
+ exports/my_model/
190
+ ├── model.onnx # ONNX model
191
+ ├── model.onnx.metadata.json # Model metadata
192
+ ├── model.trt # TensorRT engine (if --format both/tensorrt)
193
+ └── model.trt.metadata.json # TensorRT metadata
194
+ ```
195
+
196
+ ---
197
+
198
+ ## `sleap-nn predict`
199
+
200
+ Run inference on exported models.
201
+
202
+ ```bash
203
+ sleap-nn predict EXPORT_DIR INPUT_PATH [options]
204
+ ```
205
+
206
+ ### Arguments
207
+
208
+ | Argument | Description |
209
+ |----------|-------------|
210
+ | `EXPORT_DIR` | Path to exported model directory |
211
+ | `INPUT_PATH` | Path to video file (`.mp4`, `.avi`, etc.) |
212
+
213
+ ### Options
214
+
215
+ | Option | Short | Description | Default |
216
+ |--------|-------|-------------|---------|
217
+ | `--output` | `-o` | Output path for predictions (`.slp`) | `<input>.predictions.slp` |
218
+ | `--runtime` | `-r` | Runtime: `auto`, `onnx`, or `tensorrt` | `auto` |
219
+ | `--batch-size` | `-b` | Inference batch size | `4` |
220
+ | `--n-frames` | `-n` | Number of frames to process (0 = all) | `0` |
221
+ | `--device` | | Device: `auto`, `cuda`, or `cpu` | `auto` |
222
+
223
+ ### Examples
224
+
225
+ **Basic Inference**
226
+
227
+ ```bash
228
+ sleap-nn predict exports/my_model video.mp4 -o predictions.slp
229
+ ```
230
+
231
+ **High-Throughput TensorRT**
232
+
233
+ ```bash
234
+ sleap-nn predict exports/my_model video.mp4 \
235
+ -o predictions.slp \
236
+ --runtime tensorrt \
237
+ --batch-size 8
238
+ ```
239
+
240
+ **Process Specific Frames**
241
+
242
+ ```bash
243
+ sleap-nn predict exports/my_model video.mp4 \
244
+ -o predictions.slp \
245
+ --n-frames 1000
246
+ ```
247
+
248
+ **CPU Inference**
249
+
250
+ ```bash
251
+ sleap-nn predict exports/my_model video.mp4 \
252
+ -o predictions.slp \
253
+ --runtime onnx \
254
+ --device cpu
255
+ ```
256
+
257
+ ---
258
+
259
+ ## Supported Model Types
260
+
261
+ | Model Type | CLI Name | Description |
262
+ |------------|----------|-------------|
263
+ | Single Instance | `single_instance` | One animal per frame |
264
+ | Centroid | `centroid` | Centroid detection only |
265
+ | Centered Instance | `centered_instance` | Instance on cropped images |
266
+ | Top-Down | `topdown` | Centroid + instance (combined) |
267
+ | Bottom-Up | `bottomup` | Multi-instance with PAF grouping |
268
+ | Multiclass Bottom-Up | `multi_class_bottomup` | Bottom-up with identity classes |
269
+ | Multiclass Top-Down | `multi_class_topdown` | Top-down with identity classes |
270
+
271
+ ---
272
+
273
+ ## Python API
274
+
275
+ ### Export Models Programmatically
276
+
277
+ ```python
278
+ from sleap_nn.export import export_to_onnx, export_to_tensorrt
279
+ from sleap_nn.export.wrappers import SingleInstanceONNXWrapper
280
+ from sleap_nn.export.metadata import build_base_metadata
281
+
282
+ # Load your trained model
283
+ model = ... # Your trained PyTorch model
284
+
285
+ # Create wrapper
286
+ wrapper = SingleInstanceONNXWrapper(
287
+ backbone=model.backbone,
288
+ head=model.head,
289
+ max_instances=1,
290
+ )
291
+
292
+ # Export to ONNX
293
+ export_to_onnx(
294
+ wrapper,
295
+ output_path="model.onnx",
296
+ input_shape=(1, 1, 192, 192), # (B, C, H, W)
297
+ input_dtype="uint8",
298
+ )
299
+
300
+ # Export to TensorRT
301
+ export_to_tensorrt(
302
+ wrapper,
303
+ output_path="model.trt",
304
+ input_shape=(1, 1, 192, 192),
305
+ precision="fp16",
306
+ )
307
+ ```
308
+
309
+ ### Run Inference with Predictors
310
+
311
+ ```python
312
+ from sleap_nn.export.predictors import ONNXPredictor, TensorRTPredictor
313
+ import numpy as np
314
+
315
+ # Load ONNX model
316
+ predictor = ONNXPredictor("model.onnx")
317
+
318
+ # Prepare input (uint8 images)
319
+ frames = np.random.randint(0, 256, (4, 1, 192, 192), dtype=np.uint8)
320
+
321
+ # Run inference
322
+ outputs = predictor.predict(frames)
323
+ peaks = outputs["peaks"] # (B, N_nodes, 2)
324
+ peak_vals = outputs["peak_vals"] # (B, N_nodes)
325
+ ```
326
+
327
+ ### Load Exported Model Metadata
328
+
329
+ ```python
330
+ from sleap_nn.export.metadata import load_metadata
331
+
332
+ metadata = load_metadata("exports/my_model")
333
+ print(f"Model type: {metadata.model_type}")
334
+ print(f"Nodes: {metadata.node_names}")
335
+ print(f"Skeleton edges: {metadata.edge_inds}")
336
+ ```
337
+
338
+ ---
339
+
340
+ ## Technical Details
341
+
342
+ ### Input Format
343
+
344
+ Exported models expect **uint8** images in NCHW format:
345
+ - Shape: `(batch, channels, height, width)`
346
+ - Dtype: `uint8` (0-255)
347
+ - Channels: 1 (grayscale) or 3 (RGB)
348
+
349
+ The wrapper automatically normalizes to `[0, 1]` float32 internally.
350
+
351
+ ### Output Format
352
+
353
+ All models output fixed-size tensors for ONNX compatibility:
354
+
355
+ | Output | Shape | Description |
356
+ |--------|-------|-------------|
357
+ | `peaks` | `(B, N, 2)` | Keypoint coordinates (x, y) |
358
+ | `peak_vals` | `(B, N)` | Confidence scores |
359
+ | `peak_mask` | `(B, N)` | Valid peak mask (for variable counts) |
360
+
361
+ For bottom-up models, additional outputs enable instance grouping:
362
+ - `line_scores`: PAF scores for peak pairs
363
+ - `candidate_mask`: Valid candidate mask
364
+
365
+ ### TensorRT Precision
366
+
367
+ - **FP32**: Full precision, highest accuracy
368
+ - **FP16**: Half precision, ~2x faster, minimal accuracy loss
369
+
370
+ FP16 is recommended for production use on modern NVIDIA GPUs (Volta+).
371
+
372
+ ### Dynamic Shapes
373
+
374
+ Exported models support dynamic batch sizes up to `--max-batch-size`. Height and width are fixed at export time based on the training configuration.
375
+
376
+ ---
377
+
378
+ ## Troubleshooting
379
+
380
+ ### ONNX Runtime CUDA Not Available
381
+
382
+ If ONNX falls back to CPU:
383
+
384
+ ```bash
385
+ # Check available providers
386
+ python -c "import onnxruntime as ort; print(ort.get_available_providers())"
387
+ ```
388
+
389
+ Install GPU support:
390
+ ```bash
391
+ pip install onnxruntime-gpu
392
+ ```
393
+
394
+ ### TensorRT Export Fails
395
+
396
+ Ensure TensorRT is properly installed:
397
+
398
+ ```bash
399
+ python -c "import tensorrt as trt; print(trt.__version__)"
400
+ ```
401
+
402
+ Common issues:
403
+ - CUDA version mismatch with TensorRT
404
+ - Insufficient GPU memory for engine building
405
+ - Missing cuDNN libraries
406
+
407
+ ### Slow Bottom-Up Inference
408
+
409
+ Bottom-up models require CPU-side Hungarian matching for instance grouping. This is expected and cannot be accelerated with GPU export. For maximum throughput, use larger batch sizes.
410
+
411
+ ### Model Metadata Missing
412
+
413
+ If metadata files are missing, the model can still be loaded but node names and skeleton information won't be available. Re-export the model to generate metadata.
414
+
415
+ ---
416
+
417
+ ## Known Limitations
418
+
419
+ !!! info "Current Limitations"
420
+ - **Standalone centroid/centered_instance prediction**: The `sleap-nn predict` command only supports combined models (top-down, bottom-up, single-instance). Standalone centroid or centered-instance models must be used via the Python API.
421
+ - **Bottom-up instance grouping**: PAF-based grouping runs on CPU and may be slower than GPU inference for models with many keypoints.
422
+ - **TensorRT engine portability**: TensorRT engines are GPU-specific and must be regenerated when moving to different GPU hardware.
423
+ - **Dynamic image sizes**: Height and width are fixed at export time. To support different resolutions, re-export with the desired input shape.
424
+
425
+ ---
426
+
427
+ ## See Also
428
+
429
+ - [Training Guide](training.md) - Train models before export
430
+ - [Inference Guide](inference.md) - PyTorch inference with `sleap-nn track`
431
+ - [CLI Reference](cli.md) - Complete CLI documentation
@@ -111,6 +111,7 @@ To run inference:
111
111
  - **[Configuration Guide](config.md)** - Detailed explanation of all configuration parameters and how to set up your config file for training
112
112
  - **[Training Guide](training.md)** - How to train models using the CLI or Python API and advanced training options
113
113
  - **[Inference Guide](inference.md)** - How to run inference and tracking with CLI/ APIs and evaluate the models
114
+ - **[Export Guide](export.md)** - Export models to ONNX/TensorRT for high-performance inference
114
115
  - **[CLI Reference](cli.md)** - Quick reference for all command-line options
115
116
 
116
117
  #### **Tutorials**
@@ -157,4 +158,5 @@ Have questions about usage, feature requests, or want to share your experience?
157
158
  - [Configuration Guide](config.md)
158
159
  - [Training Models](training.md)
159
160
  - [Running Inference / Tracking](inference.md)
161
+ - [Model Export](export.md)
160
162
  - [API Reference](api/index.md)
@@ -126,6 +126,45 @@ sleap-nn track \
126
126
  |-----------|-------------|---------|
127
127
  | `--queue_maxsize` | Maximum size of the frame buffer queue | `8` |
128
128
 
129
+ #### Filtering Overlapping Instances
130
+
131
+ SLEAP-NN can filter out duplicate/overlapping predictions after inference using greedy non-maximum suppression (NMS). This is useful for removing redundant detections without enabling full tracking.
132
+
133
+ | Parameter | Description | Default |
134
+ |-----------|-------------|---------|
135
+ | `--filter_overlapping` | Enable filtering of overlapping instances after inference | `False` |
136
+ | `--filter_overlapping_method` | Similarity metric: `iou` (bounding box) or `oks` (keypoint-based) | `iou` |
137
+ | `--filter_overlapping_threshold` | Similarity threshold above which instances are considered duplicates | `0.8` |
138
+
139
+ **Methods:**
140
+
141
+ - **`iou`**: Uses bounding box intersection-over-union. Fast and position-based.
142
+ - **`oks`**: Uses Object Keypoint Similarity. Pose-aware, considers keypoint distances.
143
+
144
+ **Example usage:**
145
+
146
+ ```bash
147
+ # Enable filtering with default IOU method
148
+ sleap-nn track -i video.mp4 -m model/ --filter_overlapping
149
+
150
+ # Use OKS method with custom threshold
151
+ sleap-nn track -i video.mp4 -m model/ \
152
+ --filter_overlapping \
153
+ --filter_overlapping_method oks \
154
+ --filter_overlapping_threshold 0.5
155
+ ```
156
+
157
+ **Threshold guidelines:**
158
+
159
+ | Value | Effect |
160
+ |-------|--------|
161
+ | 0.3 | Aggressive - removes instances with >30% similarity |
162
+ | 0.5 | Moderate - balanced filtering |
163
+ | 0.8 | Permissive (default) - only removes highly similar instances |
164
+
165
+ !!! note "Filtering vs Tracking"
166
+ This filtering is independent of tracking and runs before the tracking step. You can use both together—filtering removes duplicates first, then tracking assigns IDs to remaining instances.
167
+
129
168
 
130
169
  ## Run inference with API
131
170
 
@@ -109,6 +109,7 @@ nav:
109
109
  - Configuration Setup: config.md
110
110
  - Training: training.md
111
111
  - Inference & Tracking: inference.md
112
+ - Exporting: export.md
112
113
  - CLI Reference: cli.md
113
114
  - Tutorials:
114
115
  - Example Notebooks: example_notebooks.md
@@ -119,4 +120,4 @@ nav:
119
120
  - Model Architectures: models.md
120
121
  - Full API: api/
121
122
  - Changelog: changelog.md
122
- - Releases: https://github.com/talmolab/sleap-nn/releases
123
+ - Releases: https://github.com/talmolab/sleap-nn/releases
@@ -48,6 +48,7 @@ dependencies = [
48
48
  "jupyter",
49
49
  "jupyterlab",
50
50
  "pyzmq",
51
+ "rich-click>=1.9.5",
51
52
  ]
52
53
  dynamic = ["version", "readme"]
53
54
 
@@ -64,6 +65,9 @@ torch-cpu = ["torch", "torchvision>=0.20.0"]
64
65
  torch-cuda118 = ["torch", "torchvision>=0.20.0"]
65
66
  torch-cuda128 = ["torch", "torchvision>=0.20.0"]
66
67
  torch-cuda130 = ["torch", "torchvision>=0.20.0"]
68
+ export = ["onnx>=1.15.0", "onnxruntime>=1.16.0", "onnxscript>=0.1.0"]
69
+ export-gpu = ["onnx>=1.15.0", "onnxruntime-gpu>=1.16.0", "onnxscript>=0.1.0"]
70
+ tensorrt = ["tensorrt>=10.0.0", "torch-tensorrt>=2.0.0"]
67
71
 
68
72
  [dependency-groups]
69
73
  # PEP 735: Dev-only dependencies (`dev` is installed automatically by `uv sync`)
@@ -50,7 +50,7 @@ logger.add(
50
50
  colorize=False,
51
51
  )
52
52
 
53
- __version__ = "0.1.0a2"
53
+ __version__ = "0.1.0a4"
54
54
 
55
55
  # Public API
56
56
  from sleap_nn.evaluation import load_metrics