sleap-nn 0.0.5__tar.gz → 0.1.0a0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sleap_nn-0.1.0a0/.claude/skills/investigation/SKILL.md +67 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/.github/workflows/ci.yml +23 -26
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/.github/workflows/docs.yml +10 -3
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/.gitignore +7 -4
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/CLAUDE.md +8 -18
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/CONTRIBUTING.md +16 -10
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/PKG-INFO +22 -32
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/README.md +13 -7
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/codecov.yml +3 -1
- sleap_nn-0.1.0a0/docs/cli.md +312 -0
- sleap_nn-0.1.0a0/docs/colab_notebooks/Training_with_sleap_nn_on_colab.ipynb +476 -0
- sleap_nn-0.1.0a0/docs/colab_notebooks/index.md +22 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/config.md +33 -7
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/index.md +13 -11
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/inference.md +44 -1
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/installation.md +112 -23
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/sample_configs/config_bottomup_convnext.yaml +1 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/sample_configs/config_bottomup_unet_large_rf.yaml +1 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/sample_configs/config_bottomup_unet_medium_rf.yaml +1 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/sample_configs/config_centroid_swint.yaml +1 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/sample_configs/config_centroid_unet.yaml +1 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/sample_configs/config_multi_class_bottomup_unet.yaml +1 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/sample_configs/config_single_instance_unet_large_rf.yaml +1 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/sample_configs/config_single_instance_unet_medium_rf.yaml +1 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/sample_configs/config_topdown_centered_instance_unet_large_rf.yaml +2 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/sample_configs/config_topdown_centered_instance_unet_medium_rf.yaml +2 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/sample_configs/config_topdown_multi_class_centered_instance_unet.yaml +2 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/training.md +98 -3
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/mkdocs.yml +2 -0
- sleap_nn-0.1.0a0/pyproject.toml +176 -0
- sleap_nn-0.1.0a0/scripts/cov_summary.py +648 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/__init__.py +6 -1
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/cli.py +142 -3
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/config/data_config.py +44 -7
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/config/get_config.py +22 -20
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/config/trainer_config.py +12 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/data/augmentation.py +54 -2
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/data/custom_datasets.py +22 -22
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/data/instance_cropping.py +70 -5
- sleap_nn-0.1.0a0/sleap_nn/data/normalization.py +88 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/data/providers.py +26 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/evaluation.py +99 -23
- sleap_nn-0.1.0a0/sleap_nn/inference/__init__.py +7 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/inference/peak_finding.py +10 -2
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/inference/predictors.py +115 -20
- sleap_nn-0.1.0a0/sleap_nn/inference/provenance.py +292 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/inference/topdown.py +55 -47
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/predict.py +187 -10
- sleap_nn-0.1.0a0/sleap_nn/system_info.py +443 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/tracking/tracker.py +8 -1
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/train.py +64 -40
- sleap_nn-0.1.0a0/sleap_nn/training/callbacks.py +664 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/training/lightning_modules.py +325 -180
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/training/model_trainer.py +308 -22
- sleap_nn-0.1.0a0/sleap_nn/training/utils.py +603 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn.egg-info/PKG-INFO +22 -32
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn.egg-info/SOURCES.txt +13 -1
- sleap_nn-0.1.0a0/sleap_nn.egg-info/requires.txt +39 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/config/test_data_config.py +38 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/config/test_model_config.py +0 -1
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/data/test_custom_datasets.py +4 -4
- sleap_nn-0.1.0a0/tests/data/test_instance_cropping.py +152 -0
- sleap_nn-0.1.0a0/tests/data/test_normalization.py +43 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/data/test_providers.py +101 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/inference/test_bottomup.py +2 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/inference/test_predictors.py +304 -2
- sleap_nn-0.1.0a0/tests/inference/test_provenance.py +291 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/inference/test_single_instance.py +1 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/inference/test_topdown.py +32 -0
- sleap_nn-0.1.0a0/tests/test_cli.py +640 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/test_evaluation.py +65 -7
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/test_predict.py +31 -1
- sleap_nn-0.1.0a0/tests/test_system_info.py +520 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/test_train.py +47 -134
- sleap_nn-0.1.0a0/tests/training/test_callbacks.py +1024 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/training/test_model_trainer.py +16 -5
- sleap_nn-0.1.0a0/tests/training/test_training_utils.py +612 -0
- sleap_nn-0.1.0a0/uv.lock +5146 -0
- sleap_nn-0.0.5/pyproject.toml +0 -158
- sleap_nn-0.0.5/sleap_nn/data/normalization.py +0 -45
- sleap_nn-0.0.5/sleap_nn/inference/__init__.py +0 -1
- sleap_nn-0.0.5/sleap_nn/training/callbacks.py +0 -352
- sleap_nn-0.0.5/sleap_nn/training/utils.py +0 -238
- sleap_nn-0.0.5/sleap_nn.egg-info/requires.txt +0 -56
- sleap_nn-0.0.5/tests/data/test_instance_cropping.py +0 -64
- sleap_nn-0.0.5/tests/data/test_normalization.py +0 -22
- sleap_nn-0.0.5/tests/test_cli.py +0 -204
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/.claude/commands/coverage.md +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/.claude/commands/lint.md +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/.claude/commands/pr-description.md +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/.dockerignore +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/.github/workflows/build.yml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/.github/workflows/codespell.yml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/LICENSE +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/assets/favicon.ico +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/assets/sleap-logo.png +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/core_components.md +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/example_notebooks.md +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/models.md +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/docs/step_by_step_tutorial.md +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/example_notebooks/README.md +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/example_notebooks/augmentation_guide.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/example_notebooks/receptive_field_guide.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/example_notebooks/training_demo.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/scripts/gen_changelog.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/scripts/gen_ref_pages.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/setup.cfg +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/.DS_Store +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/architectures/__init__.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/architectures/common.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/architectures/convnext.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/architectures/encoder_decoder.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/architectures/heads.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/architectures/model.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/architectures/swint.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/architectures/unet.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/architectures/utils.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/config/__init__.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/config/model_config.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/config/training_job_config.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/config/utils.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/data/__init__.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/data/confidence_maps.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/data/edge_maps.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/data/identity.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/data/instance_centroids.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/data/resizing.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/data/utils.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/inference/bottomup.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/inference/identity.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/inference/paf_grouping.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/inference/single_instance.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/inference/utils.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/legacy_models.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/tracking/__init__.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/tracking/candidates/__init__.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/tracking/candidates/fixed_window.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/tracking/candidates/local_queues.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/tracking/track_instance.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/tracking/utils.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/training/__init__.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn/training/losses.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn.egg-info/dependency_links.txt +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn.egg-info/entry_points.txt +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/sleap_nn.egg-info/top_level.txt +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/__init__.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/architectures/test_architecture_utils.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/architectures/test_common.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/architectures/test_convnext.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/architectures/test_encoder_decoder.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/architectures/test_heads.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/architectures/test_model.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/architectures/test_swint.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/architectures/test_unet.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/datasets/centered_pair_small.mp4 +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/datasets/minimal_instance.pkg.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/datasets/small_robot.mp4 +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/datasets/small_robot_minimal.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/inference/minimal_bboxes.pt +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/inference/minimal_cms.pt +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/get_dummy_activations.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/min_tracks_2node.UNet.bottomup_multiclass/best_model.h5 +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/min_tracks_2node.UNet.bottomup_multiclass/dummy_activations.h5 +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/min_tracks_2node.UNet.bottomup_multiclass/initial_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/min_tracks_2node.UNet.bottomup_multiclass/training_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/min_tracks_2node.UNet.topdown_multiclass/best_model.h5 +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/min_tracks_2node.UNet.topdown_multiclass/dummy_activations.h5 +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/min_tracks_2node.UNet.topdown_multiclass/initial_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/min_tracks_2node.UNet.topdown_multiclass/training_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/best_model.h5 +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/dummy_activations.h5 +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/initial_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/labels_gt.train.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/labels_gt.val.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/labels_pr.train.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/labels_pr.val.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/metrics.train.npz +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/metrics.val.npz +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/training_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.bottomup/training_log.csv +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/best_model.h5 +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/dummy_activations.h5 +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/initial_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/labels_gt.train.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/labels_gt.val.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/labels_pr.train.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/labels_pr.val.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/metrics.train.npz +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/metrics.val.npz +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/training_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centered_instance/training_log.csv +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centroid/best_model.h5 +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centroid/dummy_activations.h5 +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centroid/initial_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centroid/labels_gt.train.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centroid/labels_gt.val.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centroid/labels_pr.train.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centroid/labels_pr.val.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centroid/metrics.train.npz +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centroid/metrics.val.npz +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centroid/training_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_instance.UNet.centroid/training_log.csv +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_robot.UNet.single_instance/best_model.h5 +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_robot.UNet.single_instance/dummy_activations.h5 +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_robot.UNet.single_instance/initial_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_robot.UNet.single_instance/labels_gt.train.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_robot.UNet.single_instance/labels_gt.val.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_robot.UNet.single_instance/training_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_models/minimal_robot.UNet.single_instance/training_log.csv +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_sleap_json_configs/bottomup_multiclass_training_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_sleap_json_configs/bottomup_training_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_sleap_json_configs/centered_instance_training_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_sleap_json_configs/centered_instance_with_scaling_training_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_sleap_json_configs/centroid_training_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_sleap_json_configs/single_instance_training_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/legacy_sleap_json_configs/topdown_training_config.json +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_bottomup/best.ckpt +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_bottomup/initial_config.yaml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_bottomup/labels_train_gt_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_bottomup/labels_val_gt_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_bottomup/training_config.yaml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_bottomup/training_log.csv +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_centered_instance/best.ckpt +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_centered_instance/initial_config.yaml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_centered_instance/labels_train_gt_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_centered_instance/labels_val_gt_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_centered_instance/training_config.yaml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_centered_instance/training_log.csv +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_centroid/best.ckpt +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_centroid/initial_config.yaml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_centroid/labels_train_gt_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_centroid/labels_val_gt_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_centroid/training_config.yaml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_centroid/training_log.csv +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_multiclass_bottomup/best.ckpt +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_multiclass_bottomup/initial_config.yaml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_multiclass_bottomup/labels_train_gt_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_multiclass_bottomup/labels_val_gt_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_multiclass_bottomup/training_config.yaml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_multiclass_bottomup/training_log.csv +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_multiclass_centered_instance/best.ckpt +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_multiclass_centered_instance/initial_config.yaml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_multiclass_centered_instance/labels_train_gt_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_multiclass_centered_instance/labels_val_gt_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_multiclass_centered_instance/training_config.yaml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_multiclass_centered_instance/training_log.csv +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_single_instance/best.ckpt +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_single_instance/initial_config.yaml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_single_instance/labels_train_gt_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_single_instance/labels_val_gt_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_single_instance/training_config.yaml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/minimal_instance_single_instance/training_log.csv +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/single_instance_with_metrics/best.ckpt +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/single_instance_with_metrics/initial_config.yaml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/single_instance_with_metrics/labels_train_gt_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/single_instance_with_metrics/labels_val_gt_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/single_instance_with_metrics/pred_test.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/single_instance_with_metrics/pred_train_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/single_instance_with_metrics/pred_val_0.slp +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/single_instance_with_metrics/test_pred_metrics.npz +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/single_instance_with_metrics/train_0_pred_metrics.npz +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/single_instance_with_metrics/training_config.yaml +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/single_instance_with_metrics/training_log.csv +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/assets/model_ckpts/single_instance_with_metrics/val_0_pred_metrics.npz +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/config/test_config_utils.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/config/test_trainer_config.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/config/test_training_job_config.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/conftest.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/data/test_augmentation.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/data/test_confmaps.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/data/test_edge_maps.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/data/test_identity.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/data/test_instance_centroids.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/data/test_resizing.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/data/test_utils.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/fixtures/__init__.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/fixtures/datasets.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/fixtures/inference.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/fixtures/legacy_models.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/fixtures/legacy_sleap_json_configs.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/fixtures/model_ckpts.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/inference/__init__.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/inference/test_paf_grouping.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/inference/test_peak_finding.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/inference/test_utils.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/test_legacy_models.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/test_version.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/tracking/candidates/test_fixed_window.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/tracking/candidates/test_local_queues.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/tracking/test_tracker.py +0 -0
- {sleap_nn-0.0.5 → sleap_nn-0.1.0a0}/tests/training/test_lightning_modules.py +0 -0
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: investigation
|
|
3
|
+
description: >
|
|
4
|
+
Scaffolds a structured investigation in scratch/ for empirical research and documentation.
|
|
5
|
+
Use when the user says "start an investigation" or wants to: trace code paths or data flow
|
|
6
|
+
("trace from X to Y", "what touches X", "follow the wiring"), document system architecture
|
|
7
|
+
comprehensively ("document how the system works", "archeology"), investigate bugs
|
|
8
|
+
("figure out why X happens"), explore technical feasibility ("can we do X?"), or explore
|
|
9
|
+
design options ("explore the API", "gather context", "design alternatives").
|
|
10
|
+
Creates dated folder with README. NOT for simple code questions or single-file searches.
|
|
11
|
+
---
|
|
12
|
+
|
|
13
|
+
# Set up an investigation
|
|
14
|
+
|
|
15
|
+
## Instructions
|
|
16
|
+
|
|
17
|
+
1. Create a folder in `{REPO_ROOT}/scratch/` with the format `{YYYY-MM-DD}-{descriptive-name}`.
|
|
18
|
+
2. Create a `README.md` in this folder with: task description, background context, task checklist. Update with findings as you progress.
|
|
19
|
+
3. Create scripts and data files as needed for empirical work.
|
|
20
|
+
4. For complex investigations, split into sub-documents as patterns emerge.
|
|
21
|
+
|
|
22
|
+
## Investigation Patterns
|
|
23
|
+
|
|
24
|
+
These are common patterns, not rigid categories. Most investigations blend multiple patterns.
|
|
25
|
+
|
|
26
|
+
**Tracing** - "trace from X to Y", "what touches X", "follow the wiring"
|
|
27
|
+
- Follow call stack or data flow from a focal component to its connections
|
|
28
|
+
- Can trace forward (X → where does it go?) or backward (what leads to X?)
|
|
29
|
+
- Useful for: assessing impact of changes, understanding coupling
|
|
30
|
+
|
|
31
|
+
**System Architecture Archeology** - "document how the system works", "archeology"
|
|
32
|
+
- Comprehensive documentation of an entire system or flow for reusable reference
|
|
33
|
+
- Start from entry points, trace through all layers, document relationships exhaustively
|
|
34
|
+
- For complex systems, consider numbered sub-documents (01-cli.md, 02-data.md, etc.)
|
|
35
|
+
|
|
36
|
+
**Bug Investigation** - "figure out why X happens", "this is broken"
|
|
37
|
+
- Reproduce → trace root cause → propose fix
|
|
38
|
+
- For cross-repo bugs, consider per-repo task breakdowns
|
|
39
|
+
|
|
40
|
+
**Technical Exploration** - "can we do X?", "is this possible?", "figure out how to"
|
|
41
|
+
- Feasibility testing with proof-of-concept scripts
|
|
42
|
+
- Document what works AND what doesn't
|
|
43
|
+
|
|
44
|
+
**Design Research** - "explore the API", "gather context", "design alternatives"
|
|
45
|
+
- Understand systems and constraints before building
|
|
46
|
+
- Compare alternatives, document trade-offs
|
|
47
|
+
- Include visual artifacts (mockups, screenshots) when relevant
|
|
48
|
+
- For iterative decisions, use numbered "Design Questions" (DQ1, DQ2...) to structure review
|
|
49
|
+
|
|
50
|
+
## Best Practices
|
|
51
|
+
|
|
52
|
+
- Use `uv` with inline dependencies for standalone scripts; for scripts importing local project code, use `python` directly (or `uv run python` if env not activated)
|
|
53
|
+
- Use subagents for parallel exploration to save context
|
|
54
|
+
- Write small scripts to explore APIs interactively
|
|
55
|
+
- Generate figures/diagrams and reference inline in markdown
|
|
56
|
+
- For web servers: `npx serve -p 8080 --cors --no-clipboard &`
|
|
57
|
+
- For screenshots: use Playwright MCP for web, Qt's grab() for GUI
|
|
58
|
+
- For external package API review: clone to `scratch/repos/` for direct source access
|
|
59
|
+
|
|
60
|
+
## Important: Scratch is Gitignored
|
|
61
|
+
|
|
62
|
+
The `scratch/` directory is in `.gitignore` and will NOT be committed.
|
|
63
|
+
|
|
64
|
+
- NEVER delete anything from scratch - it doesn't need cleanup
|
|
65
|
+
- When distilling findings into PRs, include all relevant info inline
|
|
66
|
+
- Copy key findings, code, and data directly into PR descriptions
|
|
67
|
+
- PRs must be self-contained; don't reference scratch files
|
|
@@ -13,6 +13,9 @@ jobs:
|
|
|
13
13
|
lint:
|
|
14
14
|
name: Lint
|
|
15
15
|
runs-on: ubuntu-latest
|
|
16
|
+
env:
|
|
17
|
+
CUDA_VISIBLE_DEVICES: ""
|
|
18
|
+
USE_CUDA: "0"
|
|
16
19
|
steps:
|
|
17
20
|
- name: Checkout repo
|
|
18
21
|
uses: actions/checkout@v4
|
|
@@ -26,20 +29,24 @@ jobs:
|
|
|
26
29
|
run: uv python install 3.13
|
|
27
30
|
|
|
28
31
|
- name: Install dev dependencies and torch
|
|
29
|
-
run: uv sync --extra
|
|
32
|
+
run: uv sync --extra torch-cpu
|
|
30
33
|
|
|
31
34
|
- name: Run Black
|
|
32
|
-
run: uv run black --check sleap_nn tests
|
|
35
|
+
run: uv run --frozen --extra torch-cpu black --check sleap_nn tests
|
|
33
36
|
|
|
34
37
|
- name: Run Ruff
|
|
35
|
-
run: uv run ruff check sleap_nn/
|
|
38
|
+
run: uv run --frozen --extra torch-cpu ruff check sleap_nn/
|
|
36
39
|
|
|
37
40
|
tests:
|
|
38
41
|
timeout-minutes: 30
|
|
42
|
+
env:
|
|
43
|
+
CUDA_VISIBLE_DEVICES: ""
|
|
44
|
+
USE_CUDA: "0"
|
|
45
|
+
UV_FROZEN: "1"
|
|
39
46
|
strategy:
|
|
40
47
|
fail-fast: false
|
|
41
48
|
matrix:
|
|
42
|
-
os: ["ubuntu", "windows", "mac"
|
|
49
|
+
os: ["ubuntu", "windows", "mac"] # "self-hosted-gpu" temporarily disabled
|
|
43
50
|
include:
|
|
44
51
|
- os: ubuntu
|
|
45
52
|
runs-on: ubuntu-latest
|
|
@@ -47,8 +54,8 @@ jobs:
|
|
|
47
54
|
runs-on: windows-latest
|
|
48
55
|
- os: mac
|
|
49
56
|
runs-on: macos-14
|
|
50
|
-
- os: self-hosted-gpu
|
|
51
|
-
|
|
57
|
+
# - os: self-hosted-gpu
|
|
58
|
+
# runs-on: [self-hosted, puma, gpu, 2xgpu]
|
|
52
59
|
python: [3.13]
|
|
53
60
|
|
|
54
61
|
name: Tests (${{ matrix.os }}, Python ${{ matrix.python }})
|
|
@@ -69,21 +76,21 @@ jobs:
|
|
|
69
76
|
|
|
70
77
|
- name: Install dev dependencies and torch (self-hosted GPU)
|
|
71
78
|
if: matrix.os == 'self-hosted-gpu'
|
|
72
|
-
run: uv sync --python 3.13 --extra
|
|
79
|
+
run: uv sync --python 3.13 --extra torch-cuda128
|
|
73
80
|
|
|
74
81
|
- name: Install dev dependencies and torch (non-self-hosted GPU)
|
|
75
82
|
if: matrix.os != 'self-hosted-gpu'
|
|
76
|
-
run: uv sync --extra
|
|
83
|
+
run: uv sync --extra torch-cpu
|
|
77
84
|
|
|
78
85
|
- name: Print environment info
|
|
79
86
|
run: |
|
|
80
87
|
echo "=== UV Environment ==="
|
|
81
|
-
uv run python --version
|
|
82
|
-
uv run python -c "import sys; print('Python executable:', sys.executable)"
|
|
88
|
+
uv run --frozen --extra torch-cpu python --version
|
|
89
|
+
uv run --frozen --extra torch-cpu python -c "import sys; print('Python executable:', sys.executable)"
|
|
83
90
|
echo "=== UV Environment NumPy Check ==="
|
|
84
|
-
uv run python -c "import numpy; print('NumPy version:', numpy.__version__); print('NumPy location:', numpy.__file__)" || echo "NumPy import failed in uv environment"
|
|
91
|
+
uv run --frozen --extra torch-cpu python -c "import numpy; print('NumPy version:', numpy.__version__); print('NumPy location:', numpy.__file__)" || echo "NumPy import failed in uv environment"
|
|
85
92
|
echo "=== CUDA Availability Check ==="
|
|
86
|
-
uv run python -c "
|
|
93
|
+
uv run --frozen --extra torch-cpu python -c "
|
|
87
94
|
import torch
|
|
88
95
|
print(f'PyTorch version: {torch.__version__}')
|
|
89
96
|
print(f'CUDA available: {torch.cuda.is_available()}')
|
|
@@ -95,24 +102,14 @@ jobs:
|
|
|
95
102
|
else:
|
|
96
103
|
print('CUDA is not available')
|
|
97
104
|
" || echo "CUDA check failed"
|
|
98
|
-
echo "=== PIP EXECUTABLE COMPARISON ==="
|
|
99
|
-
uv run python -c "import subprocess; print('pip from uv run python:', subprocess.check_output(['pip', '--version']).decode().strip())" || echo "pip not found from python"
|
|
100
|
-
uv run pip --version || echo "uv run pip failed"
|
|
101
|
-
echo "=== UV pip list vs python -m pip list ==="
|
|
102
|
-
echo "--- uv run pip list ---"
|
|
103
|
-
uv run pip list | head -20
|
|
104
|
-
echo "--- uv run python -m pip list ---"
|
|
105
|
-
uv run python -m pip list | head -20
|
|
106
|
-
echo "=== UV ENVIRONMENT CHECK ==="
|
|
107
|
-
uv run python -c "import os; print('VIRTUAL_ENV:', os.environ.get('VIRTUAL_ENV', 'Not set'))"
|
|
108
105
|
echo "=== Import Test ==="
|
|
109
|
-
uv run python -c "import torch; import lightning; import kornia; print('All imports successful')" || echo "Import test failed"
|
|
106
|
+
uv run --frozen --extra torch-cpu python -c "import torch; import lightning; import kornia; print('All imports successful')" || echo "Import test failed"
|
|
110
107
|
|
|
111
108
|
- name: Check MPS backend (macOS only)
|
|
112
109
|
if: runner.os == 'macOS'
|
|
113
110
|
run: |
|
|
114
111
|
echo "=== macOS MPS Backend Check ==="
|
|
115
|
-
uv run python -c "
|
|
112
|
+
uv run --frozen --extra torch-cpu python -c "
|
|
116
113
|
import torch
|
|
117
114
|
print(f'PyTorch version: {torch.__version__}')
|
|
118
115
|
print(f'MPS available: {torch.backends.mps.is_available()}')
|
|
@@ -129,9 +126,9 @@ jobs:
|
|
|
129
126
|
- name: Run pytest
|
|
130
127
|
run: |
|
|
131
128
|
echo "=== Final environment check before tests ==="
|
|
132
|
-
uv run python -c "import numpy, torch, lightning, kornia; print(f'All packages available: numpy={numpy.__version__}, torch={torch.__version__}')"
|
|
129
|
+
uv run --frozen --extra torch-cpu python -c "import numpy, torch, lightning, kornia; print(f'All packages available: numpy={numpy.__version__}, torch={torch.__version__}')"
|
|
133
130
|
echo "=== Running pytest ==="
|
|
134
|
-
uv run pytest --cov=sleap_nn --cov-report=xml --durations=-1 tests/
|
|
131
|
+
uv run --frozen --extra torch-cpu pytest --cov=sleap_nn --cov-report=xml --durations=-1 tests/
|
|
135
132
|
|
|
136
133
|
- name: Upload coverage
|
|
137
134
|
uses: codecov/codecov-action@v5
|
|
@@ -32,7 +32,7 @@ jobs:
|
|
|
32
32
|
|
|
33
33
|
- name: Install dependencies
|
|
34
34
|
run: |
|
|
35
|
-
uv sync --extra
|
|
35
|
+
uv sync --extra torch-cpu --group docs
|
|
36
36
|
|
|
37
37
|
- name: Print environment info
|
|
38
38
|
run: |
|
|
@@ -45,13 +45,20 @@ jobs:
|
|
|
45
45
|
git config --global user.name "github-actions[bot]"
|
|
46
46
|
git config --global user.email "github-actions[bot]@users.noreply.github.com"
|
|
47
47
|
|
|
48
|
-
- name: Build and upload docs (release)
|
|
49
|
-
if: ${{ github.event_name == 'release' && github.event.action == 'published' }}
|
|
48
|
+
- name: Build and upload docs (stable release)
|
|
49
|
+
if: ${{ github.event_name == 'release' && github.event.action == 'published' && !github.event.release.prerelease }}
|
|
50
50
|
env:
|
|
51
51
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
52
52
|
run: |
|
|
53
53
|
uv run mike deploy --update-aliases --allow-empty --push "${{ github.event.release.tag_name }}" latest
|
|
54
54
|
|
|
55
|
+
- name: Build and upload docs (pre-release)
|
|
56
|
+
if: ${{ github.event_name == 'release' && github.event.action == 'published' && github.event.release.prerelease }}
|
|
57
|
+
env:
|
|
58
|
+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
59
|
+
run: |
|
|
60
|
+
uv run mike deploy --allow-empty --push "${{ github.event.release.tag_name }}"
|
|
61
|
+
|
|
55
62
|
- name: Build and upload docs (dev)
|
|
56
63
|
if: ${{ github.event_name == 'push' }}
|
|
57
64
|
env:
|
|
@@ -128,9 +128,6 @@ ENV/
|
|
|
128
128
|
env.bak/
|
|
129
129
|
venv.bak/
|
|
130
130
|
|
|
131
|
-
# uv lock files
|
|
132
|
-
uv.lock
|
|
133
|
-
|
|
134
131
|
# Spyder project settings
|
|
135
132
|
.spyderproject
|
|
136
133
|
.spyproject
|
|
@@ -176,4 +173,10 @@ wandb/
|
|
|
176
173
|
.DS_Store
|
|
177
174
|
|
|
178
175
|
# Development scratch folder
|
|
179
|
-
scratch/
|
|
176
|
+
scratch/
|
|
177
|
+
|
|
178
|
+
# Trained models
|
|
179
|
+
models/
|
|
180
|
+
|
|
181
|
+
# Test config files
|
|
182
|
+
test_*.yaml
|
|
@@ -2,6 +2,13 @@
|
|
|
2
2
|
|
|
3
3
|
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
|
4
4
|
|
|
5
|
+
sleap-nn is a PyTorch-based neural network backend for animal pose estimation.
|
|
6
|
+
|
|
7
|
+
## Notes
|
|
8
|
+
- We use `uv` for environment management and packaging.
|
|
9
|
+
- We use `sleap-io` (https://github.com/talmolab/sleap-io) as the I/O backend for `.slp` files, which are HDF5-based containers for labels and sometimes embedded image data (typically ending with `.pkg.slp`). Refer to https://raw.githubusercontent.com/talmolab/sleap-io/refs/heads/main/docs/examples.md for usage examples when you need to do I/O with SLP files.
|
|
10
|
+
- This package (`sleap-nn`) is used as the neural network training backend to SLEAP, which is primarily used through its frontend package `sleap` (https://github.com/talmolab/sleap).
|
|
11
|
+
|
|
5
12
|
## Common Development Commands
|
|
6
13
|
|
|
7
14
|
### Testing
|
|
@@ -15,15 +22,9 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
|
|
15
22
|
- Check formatting: `black --check sleap_nn tests`
|
|
16
23
|
- Run linter: `ruff check sleap_nn/`
|
|
17
24
|
|
|
18
|
-
### Environment Setup
|
|
19
|
-
- GPU (Windows/Linux): `mamba env create -f environment.yml`
|
|
20
|
-
- CPU (Windows/Linux/Intel Mac): `mamba env create -f environment_cpu.yml`
|
|
21
|
-
- Apple Silicon (M1/M2 Mac): `mamba env create -f environment_osx-arm64.yml`
|
|
22
|
-
- Activate environment: `mamba activate sleap-nn`
|
|
23
|
-
|
|
24
25
|
## Architecture Overview
|
|
25
26
|
|
|
26
|
-
|
|
27
|
+
The codebase follows a modular architecture:
|
|
27
28
|
|
|
28
29
|
### Core Components
|
|
29
30
|
|
|
@@ -68,14 +69,3 @@ Configurations are managed via Hydra and can be specified in YAML files (see `do
|
|
|
68
69
|
- Inference: `sleap_nn/predict.py` - Run inference on trained models
|
|
69
70
|
- CLI: `sleap_nn/cli.py` - Command-line interface (currently minimal)
|
|
70
71
|
- Evaluation: `sleap_nn/evaluation.py` - Model evaluation utilities
|
|
71
|
-
|
|
72
|
-
### Model Types
|
|
73
|
-
|
|
74
|
-
The system supports multiple model architectures for pose estimation:
|
|
75
|
-
- Single Instance: One animal per frame
|
|
76
|
-
- Centered Instance: Crop-based single instance
|
|
77
|
-
- Centroid: Animal center detection
|
|
78
|
-
- Top-Down: Centroid → Instance detection
|
|
79
|
-
- Bottom-Up: Multi-instance with PAFs
|
|
80
|
-
|
|
81
|
-
Each model type has corresponding head modules, data processing, and inference pipelines.
|
|
@@ -24,27 +24,33 @@ Thank you for your interest in contributing to sleap-nn! This guide will help yo
|
|
|
24
24
|
2. **Install sleap-nn dependencies based on your platform**\
|
|
25
25
|
|
|
26
26
|
- Sync all dependencies based on your correct wheel using `uv sync`. `uv sync` creates a `.venv` (virtual environment) inside your current working directory. This environment is only active within that directory and can't be directly accessed from outside. To use all installed packages, you must run commands with `uv run` (e.g., `uv run sleap-nn train ...` or `uv run pytest ...`).
|
|
27
|
-
- **Windows/Linux with NVIDIA GPU (CUDA
|
|
27
|
+
- **Windows/Linux with NVIDIA GPU (CUDA 13.0):**
|
|
28
|
+
|
|
29
|
+
```bash
|
|
30
|
+
uv sync --extra torch-cuda130
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
- **Windows/Linux with NVIDIA GPU (CUDA 12.8):**
|
|
28
34
|
|
|
29
35
|
```bash
|
|
30
|
-
uv sync --extra
|
|
36
|
+
uv sync --extra torch-cuda128
|
|
31
37
|
```
|
|
32
38
|
|
|
33
|
-
|
|
39
|
+
- **Windows/Linux with NVIDIA GPU (CUDA 11.8):**
|
|
34
40
|
|
|
35
41
|
```bash
|
|
36
|
-
uv sync --extra
|
|
42
|
+
uv sync --extra torch-cuda118
|
|
37
43
|
```
|
|
38
|
-
|
|
39
|
-
- **macOS with Apple Silicon (M1, M2, M3, M4) or CPU-only (no GPU or unsupported GPU):**
|
|
44
|
+
|
|
45
|
+
- **macOS with Apple Silicon (M1, M2, M3, M4) or CPU-only (no GPU or unsupported GPU):**
|
|
40
46
|
Note: Even if torch-cpu is used on macOS, the MPS backend will be available.
|
|
41
|
-
|
|
42
|
-
uv sync --extra
|
|
47
|
+
```bash
|
|
48
|
+
uv sync --extra torch-cpu
|
|
43
49
|
```
|
|
44
50
|
> **Upgrading All Dependencies**
|
|
45
51
|
> To ensure you have the latest versions of all dependencies, use the `--upgrade` flag with `uv sync`:
|
|
46
52
|
> ```bash
|
|
47
|
-
> uv sync --
|
|
53
|
+
> uv sync --upgrade
|
|
48
54
|
> ```
|
|
49
55
|
> This will upgrade all installed packages in your environment to the latest available versions compatible with your `pyproject.toml`.
|
|
50
56
|
|
|
@@ -124,7 +130,7 @@ cd sleap-nn
|
|
|
124
130
|
|
|
125
131
|
2. Install `sleap-nn` with docs dependencies:
|
|
126
132
|
```bash
|
|
127
|
-
uv sync --
|
|
133
|
+
uv sync --group docs --extra torch-cpu
|
|
128
134
|
```
|
|
129
135
|
|
|
130
136
|
3. Build and tag a new documentation version:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: sleap-nn
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.1.0a0
|
|
4
4
|
Summary: Neural network backend for training and inference for animal pose estimation.
|
|
5
5
|
Author-email: Divya Seshadri Murali <dimurali@salk.edu>, Elizabeth Berrigan <eberrigan@salk.edu>, Vincent Tu <vitu@ucsd.edu>, Liezl Maree <lmaree@salk.edu>, David Samy <davidasamy@gmail.com>, Talmo Pereira <talmo@salk.edu>
|
|
6
6
|
License: BSD-3-Clause
|
|
@@ -13,7 +13,7 @@ Classifier: Programming Language :: Python :: 3.13
|
|
|
13
13
|
Requires-Python: <3.14,>=3.11
|
|
14
14
|
Description-Content-Type: text/markdown
|
|
15
15
|
License-File: LICENSE
|
|
16
|
-
Requires-Dist: sleap-io
|
|
16
|
+
Requires-Dist: sleap-io<0.7.0,>=0.6.0
|
|
17
17
|
Requires-Dist: numpy
|
|
18
18
|
Requires-Dist: lightning
|
|
19
19
|
Requires-Dist: kornia
|
|
@@ -34,35 +34,19 @@ Requires-Dist: jupyterlab
|
|
|
34
34
|
Requires-Dist: pyzmq
|
|
35
35
|
Provides-Extra: torch
|
|
36
36
|
Requires-Dist: torch; extra == "torch"
|
|
37
|
-
Requires-Dist: torchvision
|
|
37
|
+
Requires-Dist: torchvision>=0.20.0; extra == "torch"
|
|
38
38
|
Provides-Extra: torch-cpu
|
|
39
39
|
Requires-Dist: torch; extra == "torch-cpu"
|
|
40
|
-
Requires-Dist: torchvision
|
|
40
|
+
Requires-Dist: torchvision>=0.20.0; extra == "torch-cpu"
|
|
41
41
|
Provides-Extra: torch-cuda118
|
|
42
42
|
Requires-Dist: torch; extra == "torch-cuda118"
|
|
43
|
-
Requires-Dist: torchvision
|
|
43
|
+
Requires-Dist: torchvision>=0.20.0; extra == "torch-cuda118"
|
|
44
44
|
Provides-Extra: torch-cuda128
|
|
45
45
|
Requires-Dist: torch; extra == "torch-cuda128"
|
|
46
|
-
Requires-Dist: torchvision
|
|
47
|
-
Provides-Extra:
|
|
48
|
-
Requires-Dist:
|
|
49
|
-
Requires-Dist:
|
|
50
|
-
Requires-Dist: black; extra == "dev"
|
|
51
|
-
Requires-Dist: pydocstyle; extra == "dev"
|
|
52
|
-
Requires-Dist: toml; extra == "dev"
|
|
53
|
-
Requires-Dist: twine; extra == "dev"
|
|
54
|
-
Requires-Dist: build; extra == "dev"
|
|
55
|
-
Requires-Dist: ipython; extra == "dev"
|
|
56
|
-
Requires-Dist: ruff; extra == "dev"
|
|
57
|
-
Provides-Extra: docs
|
|
58
|
-
Requires-Dist: mkdocs; extra == "docs"
|
|
59
|
-
Requires-Dist: mkdocs-material; extra == "docs"
|
|
60
|
-
Requires-Dist: mkdocs-jupyter; extra == "docs"
|
|
61
|
-
Requires-Dist: mike; extra == "docs"
|
|
62
|
-
Requires-Dist: mkdocstrings[python]; extra == "docs"
|
|
63
|
-
Requires-Dist: mkdocs-gen-files; extra == "docs"
|
|
64
|
-
Requires-Dist: mkdocs-literate-nav; extra == "docs"
|
|
65
|
-
Requires-Dist: mkdocs-section-index; extra == "docs"
|
|
46
|
+
Requires-Dist: torchvision>=0.20.0; extra == "torch-cuda128"
|
|
47
|
+
Provides-Extra: torch-cuda130
|
|
48
|
+
Requires-Dist: torch; extra == "torch-cuda130"
|
|
49
|
+
Requires-Dist: torchvision>=0.20.0; extra == "torch-cuda130"
|
|
66
50
|
Dynamic: license-file
|
|
67
51
|
|
|
68
52
|
# sleap-nn
|
|
@@ -120,22 +104,28 @@ powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
|
|
|
120
104
|
> Replace `...` with the rest of your install command as needed.
|
|
121
105
|
|
|
122
106
|
- Sync all dependencies based on your correct wheel using `uv sync`. `uv sync` creates a `.venv` (virtual environment) inside your current working directory. This environment is only active within that directory and can't be directly accessed from outside. To use all installed packages, you must run commands with `uv run` (e.g., `uv run sleap-nn train ...` or `uv run pytest ...`).
|
|
123
|
-
- **Windows/Linux with NVIDIA GPU (CUDA
|
|
107
|
+
- **Windows/Linux with NVIDIA GPU (CUDA 13.0):**
|
|
124
108
|
|
|
125
109
|
```bash
|
|
126
|
-
uv sync --extra
|
|
110
|
+
uv sync --extra torch-cuda130
|
|
127
111
|
```
|
|
128
112
|
|
|
129
113
|
- **Windows/Linux with NVIDIA GPU (CUDA 12.8):**
|
|
130
114
|
|
|
131
115
|
```bash
|
|
132
|
-
uv sync --extra
|
|
116
|
+
uv sync --extra torch-cuda128
|
|
133
117
|
```
|
|
134
|
-
|
|
135
|
-
- **
|
|
118
|
+
|
|
119
|
+
- **Windows/Linux with NVIDIA GPU (CUDA 11.8):**
|
|
120
|
+
|
|
121
|
+
```bash
|
|
122
|
+
uv sync --extra torch-cuda118
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
- **macOS with Apple Silicon (M1, M2, M3, M4) or CPU-only (no GPU or unsupported GPU):**
|
|
136
126
|
Note: Even if torch-cpu is used on macOS, the MPS backend will be available.
|
|
137
127
|
```bash
|
|
138
|
-
uv sync --extra
|
|
128
|
+
uv sync --extra torch-cpu
|
|
139
129
|
```
|
|
140
130
|
|
|
141
131
|
4. **Run tests**
|
|
@@ -152,6 +142,6 @@ powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
|
|
|
152
142
|
> **Upgrading All Dependencies**
|
|
153
143
|
> To ensure you have the latest versions of all dependencies, use the `--upgrade` flag with `uv sync`:
|
|
154
144
|
> ```bash
|
|
155
|
-
> uv sync --
|
|
145
|
+
> uv sync --upgrade
|
|
156
146
|
> ```
|
|
157
147
|
> This will upgrade all installed packages in your environment to the latest available versions compatible with your `pyproject.toml`.
|
|
@@ -53,22 +53,28 @@ powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
|
|
|
53
53
|
> Replace `...` with the rest of your install command as needed.
|
|
54
54
|
|
|
55
55
|
- Sync all dependencies based on your correct wheel using `uv sync`. `uv sync` creates a `.venv` (virtual environment) inside your current working directory. This environment is only active within that directory and can't be directly accessed from outside. To use all installed packages, you must run commands with `uv run` (e.g., `uv run sleap-nn train ...` or `uv run pytest ...`).
|
|
56
|
-
- **Windows/Linux with NVIDIA GPU (CUDA
|
|
56
|
+
- **Windows/Linux with NVIDIA GPU (CUDA 13.0):**
|
|
57
57
|
|
|
58
58
|
```bash
|
|
59
|
-
uv sync --extra
|
|
59
|
+
uv sync --extra torch-cuda130
|
|
60
60
|
```
|
|
61
61
|
|
|
62
62
|
- **Windows/Linux with NVIDIA GPU (CUDA 12.8):**
|
|
63
63
|
|
|
64
64
|
```bash
|
|
65
|
-
uv sync --extra
|
|
65
|
+
uv sync --extra torch-cuda128
|
|
66
66
|
```
|
|
67
|
-
|
|
68
|
-
- **
|
|
67
|
+
|
|
68
|
+
- **Windows/Linux with NVIDIA GPU (CUDA 11.8):**
|
|
69
|
+
|
|
70
|
+
```bash
|
|
71
|
+
uv sync --extra torch-cuda118
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
- **macOS with Apple Silicon (M1, M2, M3, M4) or CPU-only (no GPU or unsupported GPU):**
|
|
69
75
|
Note: Even if torch-cpu is used on macOS, the MPS backend will be available.
|
|
70
76
|
```bash
|
|
71
|
-
uv sync --extra
|
|
77
|
+
uv sync --extra torch-cpu
|
|
72
78
|
```
|
|
73
79
|
|
|
74
80
|
4. **Run tests**
|
|
@@ -85,6 +91,6 @@ powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
|
|
|
85
91
|
> **Upgrading All Dependencies**
|
|
86
92
|
> To ensure you have the latest versions of all dependencies, use the `--upgrade` flag with `uv sync`:
|
|
87
93
|
> ```bash
|
|
88
|
-
> uv sync --
|
|
94
|
+
> uv sync --upgrade
|
|
89
95
|
> ```
|
|
90
96
|
> This will upgrade all installed packages in your environment to the latest available versions compatible with your `pyproject.toml`.
|
|
@@ -10,9 +10,11 @@ coverage:
|
|
|
10
10
|
threshold: 2% # Less leeway with backend code.
|
|
11
11
|
paths:
|
|
12
12
|
- "sleap_nn/"
|
|
13
|
+
informational: true # Don't fail PRs on coverage
|
|
13
14
|
patch: # Only measures lines adjusted in the pull request.
|
|
14
15
|
default: false
|
|
15
16
|
package:
|
|
16
17
|
target: 95% # All backend code should be tested...
|
|
17
18
|
paths:
|
|
18
|
-
- "sleap_nn/"
|
|
19
|
+
- "sleap_nn/"
|
|
20
|
+
informational: true # Don't fail PRs on coverage
|