napari-tmidas 0.2.1__tar.gz → 0.2.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/.github/workflows/test_and_deploy.yml +65 -10
  2. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/.gitignore +6 -0
  3. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/PKG-INFO +92 -39
  4. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/README.md +63 -25
  5. napari_tmidas-0.2.4/docs/advanced_processing.md +398 -0
  6. napari_tmidas-0.2.4/docs/basic_processing.md +278 -0
  7. napari_tmidas-0.2.4/docs/cellpose_segmentation.md +231 -0
  8. napari_tmidas-0.2.4/docs/grid_view_overlay.md +229 -0
  9. napari_tmidas-0.2.4/docs/intensity_label_filter.md +129 -0
  10. napari_tmidas-0.2.4/docs/regionprops_analysis.md +96 -0
  11. napari_tmidas-0.2.4/docs/regionprops_summary.md +136 -0
  12. napari_tmidas-0.2.4/docs/trackastra_tracking.md +268 -0
  13. napari_tmidas-0.2.4/examples/grid_overlay_example.py +66 -0
  14. napari_tmidas-0.2.4/examples/intensity_filter_example.py +179 -0
  15. napari_tmidas-0.2.4/examples/regionprops_example.py +143 -0
  16. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/pyproject.toml +47 -16
  17. napari_tmidas-0.2.4/src/napari_tmidas/__init__.py +52 -0
  18. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/_crop_anything.py +1458 -499
  19. napari_tmidas-0.2.4/src/napari_tmidas/_env_manager.py +76 -0
  20. napari_tmidas-0.2.4/src/napari_tmidas/_file_conversion.py +2475 -0
  21. napari_tmidas-0.2.4/src/napari_tmidas/_file_selector.py +2410 -0
  22. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/_label_inspection.py +83 -8
  23. napari_tmidas-0.2.4/src/napari_tmidas/_processing_worker.py +309 -0
  24. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/_reader.py +6 -10
  25. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/_registry.py +15 -14
  26. napari_tmidas-0.2.4/src/napari_tmidas/_roi_colocalization.py +2312 -0
  27. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_crop_anything.py +123 -0
  28. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_env_manager.py +89 -0
  29. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_file_selector.py +90 -0
  30. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_grid_view_overlay.py +193 -0
  31. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_init.py +98 -0
  32. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_intensity_label_filter.py +222 -0
  33. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_label_inspection.py +86 -0
  34. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_processing_basic.py +500 -0
  35. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_processing_worker.py +142 -0
  36. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_regionprops_analysis.py +547 -0
  37. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_registry.py +135 -0
  38. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_scipy_filters.py +168 -0
  39. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_skimage_filters.py +259 -0
  40. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_split_channels.py +217 -0
  41. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_spotiflow.py +87 -0
  42. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_tyx_display_fix.py +142 -0
  43. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_ui_utils.py +68 -0
  44. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/_tests/test_widget.py +30 -0
  45. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_windows_basic.py +66 -0
  46. napari_tmidas-0.2.4/src/napari_tmidas/_ui_utils.py +57 -0
  47. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/_version.py +16 -3
  48. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/_widget.py +41 -4
  49. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/processing_functions/basic.py +557 -20
  50. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/processing_functions/careamics_env_manager.py +72 -99
  51. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/cellpose_env_manager.py +510 -0
  52. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/processing_functions/cellpose_segmentation.py +132 -191
  53. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/colocalization.py +697 -0
  54. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/grid_view_overlay.py +703 -0
  55. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/intensity_label_filter.py +422 -0
  56. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/regionprops_analysis.py +1280 -0
  57. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/sam2_env_manager.py +95 -0
  58. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/sam2_mp4.py +362 -0
  59. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/scipy_filters.py +452 -0
  60. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/skimage_filters.py +669 -0
  61. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/spotiflow_detection.py +949 -0
  62. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/spotiflow_env_manager.py +591 -0
  63. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/timepoint_merger.py +738 -0
  64. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/processing_functions/trackastra_tracking.py +24 -5
  65. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas.egg-info/PKG-INFO +92 -39
  66. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas.egg-info/SOURCES.txt +38 -1
  67. napari_tmidas-0.2.4/src/napari_tmidas.egg-info/requires.txt +40 -0
  68. napari_tmidas-0.2.4/test_grid_overlay.py +139 -0
  69. napari_tmidas-0.2.4/tox.ini +44 -0
  70. napari_tmidas-0.2.1/src/napari_tmidas/__init__.py +0 -22
  71. napari_tmidas-0.2.1/src/napari_tmidas/_file_conversion.py +0 -1960
  72. napari_tmidas-0.2.1/src/napari_tmidas/_file_selector.py +0 -1169
  73. napari_tmidas-0.2.1/src/napari_tmidas/_roi_colocalization.py +0 -1175
  74. napari_tmidas-0.2.1/src/napari_tmidas/_tests/__init__.py +0 -0
  75. napari_tmidas-0.2.1/src/napari_tmidas/processing_functions/cellpose_env_manager.py +0 -207
  76. napari_tmidas-0.2.1/src/napari_tmidas/processing_functions/colocalization.py +0 -240
  77. napari_tmidas-0.2.1/src/napari_tmidas/processing_functions/sam2_env_manager.py +0 -111
  78. napari_tmidas-0.2.1/src/napari_tmidas/processing_functions/sam2_mp4.py +0 -283
  79. napari_tmidas-0.2.1/src/napari_tmidas/processing_functions/scipy_filters.py +0 -57
  80. napari_tmidas-0.2.1/src/napari_tmidas/processing_functions/skimage_filters.py +0 -457
  81. napari_tmidas-0.2.1/src/napari_tmidas/processing_functions/timepoint_merger.py +0 -490
  82. napari_tmidas-0.2.1/src/napari_tmidas.egg-info/requires.txt +0 -21
  83. napari_tmidas-0.2.1/tox.ini +0 -40
  84. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/.github/dependabot.yml +0 -0
  85. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/.napari-hub/DESCRIPTION.md +0 -0
  86. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/.napari-hub/config.yml +0 -0
  87. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/.pre-commit-config.yaml +0 -0
  88. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/LICENSE +0 -0
  89. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/MANIFEST.in +0 -0
  90. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/setup.cfg +0 -0
  91. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/_sample_data.py +0 -0
  92. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/_tests/test_reader.py +0 -0
  93. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/_tests/test_sample_data.py +0 -0
  94. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/_tests/test_writer.py +0 -0
  95. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/_writer.py +0 -0
  96. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/napari.yaml +0 -0
  97. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/processing_functions/__init__.py +0 -0
  98. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/processing_functions/careamics_denoising.py +0 -0
  99. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas/processing_functions/file_compression.py +0 -0
  100. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas.egg-info/dependency_links.txt +0 -0
  101. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas.egg-info/entry_points.txt +0 -0
  102. {napari_tmidas-0.2.1 → napari_tmidas-0.2.4}/src/napari_tmidas.egg-info/top_level.txt +0 -0
@@ -23,8 +23,8 @@ jobs:
23
23
  timeout-minutes: 30
24
24
  strategy:
25
25
  matrix:
26
- platform: [ubuntu-latest, windows-latest, macos-latest]
27
- python-version: ["3.9", "3.10", "3.11", "3.12"]
26
+ platform: [ubuntu-latest, macos-latest]
27
+ python-version: ["3.10", "3.11"]
28
28
 
29
29
  steps:
30
30
  - uses: actions/checkout@v4
@@ -37,13 +37,6 @@ jobs:
37
37
  # these libraries enable testing on Qt on linux
38
38
  - uses: tlambert03/setup-qt-libs@v1
39
39
 
40
- # strategy borrowed from vispy for installing opengl libs on windows
41
- - name: Install Windows OpenGL
42
- if: runner.os == 'Windows'
43
- run: |
44
- git clone --depth 1 https://github.com/pyvista/gl-ci-helpers.git
45
- powershell gl-ci-helpers/appveyor/install_opengl.ps1
46
-
47
40
  # note: if you need dependencies from conda, considering using
48
41
  # setup-miniconda: https://github.com/conda-incubator/setup-miniconda
49
42
  # and
@@ -53,13 +46,75 @@ jobs:
53
46
  python -m pip install --upgrade pip
54
47
  python -m pip install setuptools tox tox-gh-actions
55
48
 
49
+ - name: Pip cache
50
+ uses: actions/cache@v4
51
+ with:
52
+ path: ~/.cache/pip
53
+ key: ${{ runner.os }}-pip-${{ hashFiles('pyproject.toml', 'tox.ini') }}
54
+ restore-keys: |
55
+ ${{ runner.os }}-pip-
56
+
57
+ - name: Remove .tox cache
58
+ run: rm -rf .tox
59
+
60
+ - name: Free up disk space (Linux)
61
+ if: runner.os == 'Linux'
62
+ run: |
63
+ # Remove unnecessary tools and packages to free up disk space
64
+ sudo rm -rf /usr/share/dotnet
65
+ sudo rm -rf /usr/local/lib/android
66
+ sudo rm -rf /opt/ghc
67
+ sudo rm -rf /opt/hostedtoolcache/CodeQL
68
+ sudo docker image prune --all --force
69
+ df -h
70
+
71
+ - name: Upgrade critical dependencies
72
+ run: |
73
+ python -m pip install --upgrade numpy psygnal pytest
74
+ python -m pip install --upgrade pip setuptools wheel
75
+
76
+ - name: Install system dependencies (Linux)
77
+ if: runner.os == 'Linux'
78
+ run: |
79
+ sudo apt-get update
80
+ sudo apt-get install -y \
81
+ libgl1-mesa-dev \
82
+ libglib2.0-0 \
83
+ libsm6 \
84
+ libxext6 \
85
+ libxrender1 \
86
+ libgomp1
87
+
56
88
  # this runs the platform-specific tests declared in tox.ini
57
89
  - name: Test with tox
58
90
  uses: aganders3/headless-gui@v2
59
91
  with:
60
- run: python -m tox
92
+ run: python -m tox -r -- -m "not slow"
61
93
  env:
62
94
  PLATFORM: ${{ matrix.platform }}
95
+ QT_QPA_PLATFORM: offscreen
96
+ MPLBACKEND: Agg
97
+ PYTHONUNBUFFERED: 1
98
+ PYTHONDONTWRITEBYTECODE: 1
99
+
100
+ - name: Show pytest summary (if available)
101
+ if: always()
102
+ shell: bash
103
+ run: |
104
+ set -euo pipefail
105
+ echo "Checking for coverage.xml..."
106
+ FOUND=0
107
+ if [ -f coverage.xml ]; then
108
+ echo "Found coverage.xml at repo root"; FOUND=1
109
+ fi
110
+ # Glob inside .tox envs (quiet if none)
111
+ if ls .tox/*/coverage.xml >/dev/null 2>&1; then
112
+ echo "Found coverage.xml inside .tox environment"; FOUND=1
113
+ fi
114
+ if [ "$FOUND" -eq 0 ]; then
115
+ echo "No coverage.xml found (this may be fine if tests were skipped).";
116
+ ls -al . | sed -n '1,120p'
117
+ fi
63
118
 
64
119
  - name: Coverage
65
120
  uses: codecov/codecov-action@v3
@@ -82,3 +82,9 @@ venv/
82
82
 
83
83
  # written by setuptools_scm
84
84
  **/_version.py
85
+
86
+ # Test run artifacts (not source)
87
+ slow_tests_output.txt
88
+ tox_run_output.txt
89
+ *_run_output.txt
90
+ *tests_output.txt
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: napari-tmidas
3
- Version: 0.2.1
3
+ Version: 0.2.4
4
4
  Summary: A plugin for batch processing of confocal and whole-slide microscopy images of biological tissues
5
5
  Author: Marco Meer
6
6
  Author-email: marco.meer@pm.me
@@ -41,38 +41,53 @@ Classifier: Development Status :: 2 - Pre-Alpha
41
41
  Classifier: Framework :: napari
42
42
  Classifier: Intended Audience :: Developers
43
43
  Classifier: License :: OSI Approved :: BSD License
44
- Classifier: Operating System :: OS Independent
44
+ Classifier: Operating System :: MacOS
45
+ Classifier: Operating System :: POSIX :: Linux
45
46
  Classifier: Programming Language :: Python
46
47
  Classifier: Programming Language :: Python :: 3
47
48
  Classifier: Programming Language :: Python :: 3 :: Only
48
- Classifier: Programming Language :: Python :: 3.9
49
49
  Classifier: Programming Language :: Python :: 3.10
50
50
  Classifier: Programming Language :: Python :: 3.11
51
- Classifier: Programming Language :: Python :: 3.12
52
51
  Classifier: Topic :: Scientific/Engineering :: Image Processing
53
- Requires-Python: >=3.9
52
+ Requires-Python: >=3.10
54
53
  Description-Content-Type: text/markdown
55
54
  License-File: LICENSE
56
- Requires-Dist: numpy
55
+ Requires-Dist: numpy<2.0,>=1.23.0
57
56
  Requires-Dist: magicgui
57
+ Requires-Dist: tqdm
58
58
  Requires-Dist: qtpy
59
- Requires-Dist: scikit-image
59
+ Requires-Dist: scikit-image>=0.19.0
60
+ Requires-Dist: scikit-learn-extra>=0.3.0
60
61
  Requires-Dist: pyqt5
61
- Requires-Dist: tqdm
62
- Requires-Dist: scikit-image
62
+ Requires-Dist: zarr
63
63
  Requires-Dist: ome-zarr
64
64
  Requires-Dist: napari-ome-zarr
65
- Requires-Dist: torch
66
- Requires-Dist: torchvision
67
- Requires-Dist: timm
68
- Requires-Dist: opencv-python
65
+ Requires-Dist: nd2
66
+ Requires-Dist: pylibCZIrw
67
+ Requires-Dist: readlif
68
+ Requires-Dist: tiffslide
69
+ Requires-Dist: acquifer-napari
69
70
  Provides-Extra: testing
70
71
  Requires-Dist: tox; extra == "testing"
71
- Requires-Dist: pytest; extra == "testing"
72
+ Requires-Dist: pytest>=7.0.0; extra == "testing"
72
73
  Requires-Dist: pytest-cov; extra == "testing"
73
74
  Requires-Dist: pytest-qt; extra == "testing"
75
+ Requires-Dist: pytest-timeout; extra == "testing"
74
76
  Requires-Dist: napari; extra == "testing"
75
77
  Requires-Dist: pyqt5; extra == "testing"
78
+ Requires-Dist: psygnal>=0.8.0; extra == "testing"
79
+ Provides-Extra: clustering
80
+ Requires-Dist: scikit-learn-extra>=0.3.0; extra == "clustering"
81
+ Provides-Extra: deep-learning
82
+ Requires-Dist: torch>=1.12.0; extra == "deep-learning"
83
+ Requires-Dist: torchvision>=0.13.0; extra == "deep-learning"
84
+ Requires-Dist: timm; extra == "deep-learning"
85
+ Requires-Dist: opencv-python; extra == "deep-learning"
86
+ Requires-Dist: cmake; extra == "deep-learning"
87
+ Requires-Dist: hydra-core; extra == "deep-learning"
88
+ Requires-Dist: eva-decord; extra == "deep-learning"
89
+ Provides-Extra: all
90
+ Requires-Dist: napari-tmidas[clustering,deep-learning,testing]; extra == "all"
76
91
  Dynamic: license-file
77
92
 
78
93
  # napari-tmidas
@@ -80,19 +95,21 @@ Dynamic: license-file
80
95
  [![License BSD-3](https://img.shields.io/pypi/l/napari-tmidas.svg?color=green)](https://github.com/macromeer/napari-tmidas/raw/main/LICENSE)
81
96
  [![PyPI](https://img.shields.io/pypi/v/napari-tmidas.svg?color=green)](https://pypi.org/project/napari-tmidas)
82
97
  [![Python Version](https://img.shields.io/pypi/pyversions/napari-tmidas.svg?color=green)](https://python.org)
98
+ [![Downloads](https://static.pepy.tech/badge/napari-tmidas)](https://pepy.tech/project/napari-tmidas)
83
99
  [![tests](https://github.com/macromeer/napari-tmidas/workflows/tests/badge.svg)](https://github.com/macromeer/napari-tmidas/actions)
84
- [![napari hub](https://img.shields.io/endpoint?url=https://api.napari-hub.org/shields/napari-tmidas)](https://napari-hub.org/plugins/napari-tmidas)
85
- <!-- [![codecov](https://codecov.io/gh/macromeer/napari-tmidas/branch/main/graph/badge.svg)](https://codecov.io/gh/macromeer/napari-tmidas) -->
86
- The `napari-tmidas` plugin consists of a growing collection of pipelines for fast batch processing of confocal and whole slide microscopy images of biological tissues. This is a WIP and based on the CLI version of [T-MIDAS](https://github.com/MercaderLabAnatomy/T-MIDAS).
100
+
101
+ This napari plugin consists of a growing collection of pipelines for fast batch processing of confocal and whole slide microscopy images of biological tissues. This is a WIP and based on the [T-MIDAS terminal](https://github.com/MercaderLabAnatomy/T-MIDAS).
87
102
 
88
103
  ## Features
89
- Currently, napari-tmidas provides pipelines as widgets for batch image conversion / cropping / processing, ROI colocalization and label inspection (cf. [Usage](#usage) below).
104
+ Currently, **napari-tmidas** provides pipelines as widgets for batch image conversion and processing, object cropping, label image inspection and ROI colocalization (cf. [usage](#usage) below). You can request new batch image processing features in [issues](https://github.com/MercaderLabAnatomy/napari-tmidas/issues).
90
105
 
91
106
  ## Installation
92
107
 
108
+ (Video installation guides: https://www.youtube.com/@macromeer/videos)
109
+
93
110
  First, install Napari in a virtual environment:
94
111
 
95
- mamba create -y -n napari-tmidas -c conda-forge python=3.11 tqdm
112
+ mamba create -y -n napari-tmidas -c conda-forge python=3.11
96
113
  mamba activate napari-tmidas
97
114
  python -m pip install "napari[all]"
98
115
 
@@ -100,34 +117,36 @@ Now you can install `napari-tmidas` via [pip]:
100
117
 
101
118
  pip install napari-tmidas
102
119
 
103
- It is recommended to install the latest development version. Please also regularly execute this command in the activated environment:
120
+ **For deep learning features** (Batch Crop Anything with SAM2, Spotiflow, Careamics, Trackastra), also install:
104
121
 
105
- pip install git+https://github.com/macromeer/napari-tmidas.git
122
+ pip install 'napari-tmidas[deep-learning]'
106
123
 
107
- ### Dependencies
124
+ Or install everything at once:
108
125
 
109
- To use the Batch Microscopy Image Conversion pipeline, we need some libraries to read microscopy formats:
126
+ pip install 'napari-tmidas[all]'
110
127
 
111
- # mamba activate napari-tmidas
112
- pip install nd2 readlif tiffslide pylibCZIrw acquifer-napari
128
+ It is recommended though to install the **latest development version**. Please also execute this command from time to time in the activated environment to benefit from newly added features:
113
129
 
114
- If you want to batch compress images using [Zstandard](https://github.com/facebook/zstd), use the package manager of your operating system to install it:
130
+ pip install git+https://github.com/MercaderLabAnatomy/napari-tmidas.git
115
131
 
116
- sudo apt-get install zstd # for Linux
117
- brew install zstd # for macOS
118
- choco install zstandard # for Windows
132
+ ### Additional Setup for Batch Crop Anything
119
133
 
120
- To use the Batch Crop Anything pipeline, we need to install SAM2 in the napari-tmidas environment:
134
+ To use the Batch Crop Anything pipeline with SAM2, you need to install SAM2 separately:
121
135
 
122
- # mamba activate napari-tmidas
123
- cd /opt
136
+ cd /opt # if the folder does not exist: mkdir /opt && cd /opt
124
137
  git clone https://github.com/facebookresearch/sam2.git && cd sam2
125
138
  pip install -e .
126
- wget https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_large.pt -P checkpoints/
127
- pip install decord
139
+ curl -L https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_large.pt -o checkpoints/sam2.1_hiera_large.pt
140
+ mamba install -c conda-forge ffmpeg # we also need ffmpeg
141
+
142
+ If you want to batch compress image data using [Zstandard](https://github.com/facebook/zstd), use the package manager of your operating system to install it:
128
143
 
144
+ ~~sudo apt-get install zstd~~ # Pre-installed on Linux :man_shrugging:
129
145
 
146
+ brew install zstd # for macOS (requires Homebrew)
147
+ pip install zstandard # Windows with Python >= 3.7
130
148
 
149
+ And you are done!
131
150
 
132
151
  ## Usage
133
152
 
@@ -139,19 +158,22 @@ You can then find the installed plugin in the Plugins tab.
139
158
 
140
159
  ### Microscopy Image Conversion
141
160
 
142
- You can start this pipeline via `Plugins > T-MIDAS > Batch Microscopy Image Conversion`. Currently, this pipeline supports the conversion of `.nd2, .lif, .ndpi, .czi` and acquifer data. After scanning a folder of your choice for microscopy image data, select a file in the first column of the table and preview and export any image data it contains.
161
+ Converts `.lif, .nd2, .czi, .ndpi` and Acquifer data to TIF or OME-Zarr formats. Scan a folder, select files, and export with preserved spatial metadata.
143
162
 
163
+ **Supported Formats:**
164
+ - **TIF** - Standard format for compatibility
165
+ - **OME-Zarr** - Recommended for large datasets, [spec v0.5](https://ngff.openmicroscopy.org/latest/) compliant with automatic physical metadata extraction (voxel sizes, spacing)
144
166
 
145
167
  <img src="https://github.com/user-attachments/assets/e377ca71-2f30-447d-825e-d2feebf7061b" alt="Microscopy Image Conversion Widget" style="width:75%; height:auto;">
146
168
 
147
169
 
148
170
  ### Image Processing
149
171
 
150
- 1. After opening `Plugins > T-MIDAS > Batch Image Processing`, enter the path to the folder containing the images to be processed (currently supports TIF, later also ZARR). You can also filter for filename suffix.
172
+ 1. You start with entering the path to the folder containing the images to be processed (currently supports TIF, later also ZARR) and optionally a filter for filename suffix
151
173
 
152
174
  ![image](https://github.com/user-attachments/assets/41ecb689-9abe-4371-83b5-9c5eb37069f9)
153
175
 
154
- 2. As a result, a table appears with the found images. You can click on them to inspect them in the viewer.
176
+ 2. After indexing the files, a table appears with the found images. You can click on them to inspect them in the viewer.
155
177
 
156
178
  ![image](https://github.com/user-attachments/assets/8360942a-be8f-49ec-bc25-385ee43bd601)
157
179
 
@@ -166,19 +188,50 @@ You can start this pipeline via `Plugins > T-MIDAS > Batch Microscopy Image Conv
166
188
 
167
189
  Note that whenever you click on an `Original File` or `Processed File` in the table, it will replace the one that is currently shown in the viewer. So naturally, you'd first select the original image, and then the processed image to correctly see the image pair that you want to inspect.
168
190
 
191
+
192
+ #### Processing Function Credits
193
+
194
+ The image processing capabilities are powered by several excellent open-source tools:
195
+ - [Cellpose 4](https://github.com/MouseLand/cellpose): Advanced cell segmentation
196
+ - [Trackastra](https://github.com/weigertlab/trackastra): Cell tracking and analysis
197
+ - [VisCy](https://github.com/mehta-lab/VisCy): Virtual staining using deep learning
198
+ - [CAREamics](https://github.com/CAREamics/careamics): Content-aware image restoration and enhancement
199
+ - [Spotiflow](https://github.com/weigertlab/spotiflow): Accurate and efficient spot detection for fluorescence microscopy
200
+
201
+ #### Processing Function Documentation
202
+
203
+ Detailed documentation for specific processing functions:
204
+
205
+ **Core Processing**
206
+ - [Basic Processing Functions](docs/basic_processing.md) - Label and intensity operations, channel splitting/merging, time series
207
+ - [Cellpose Segmentation](docs/cellpose_segmentation.md) - Deep learning cell/nucleus segmentation
208
+ - [TrackAstra Tracking](docs/trackastra_tracking.md) - Cell tracking across time-lapse data
209
+ - [VisCy Virtual Staining](docs/viscy_virtual_staining.md) - Virtual staining of phase/DIC images using deep learning
210
+
211
+ **Analysis and Quality Control**
212
+ - [Grid View: Intensity + Labels Overlay](docs/grid_view_overlay.md) - Visual QC for segmentation results
213
+ - [Intensity-Based Label Filtering](docs/intensity_label_filter.md) - Filter labels by signal intensity
214
+ - [Regionprops Analysis](docs/regionprops_analysis.md) - Extract quantitative properties from labels
215
+
216
+ **Advanced Processing**
217
+ - [Advanced Processing Functions](docs/advanced_processing.md) - Denoising (CAREamics), spot detection (Spotiflow), SciPy/scikit-image filters, compression, colocalization
218
+
169
219
  ### Batch Label Inspection
170
220
  If you have already segmented a folder full of images and now you want to maybe inspect and edit each label image, you can use the `Plugins > T-MIDAS > Batch Label Inspection`, which automatically saves your changes to the existing label image once you click the `Save Changes and Continue` button (bottom right).
171
221
 
172
222
  <img src="https://github.com/user-attachments/assets/0bf8c6ae-4212-449d-8183-e91b23ba740e" alt="Batch Label Inspection Widget" style="width:75%; height:auto;">
173
223
 
174
-
175
224
  ### Crop Anything
176
- This pipeline combines the Segment Anything Model (SAM) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Click the image below to see a video demo.
225
+
226
+ This pipeline combines the Segment Anything Model (SAM2; supports YX, ZYX and TYX data) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Cropping works like this: Enter 2D view and go to the first z slice where the object to be cropped is appearing. Activate/select the points layer and click on the object. Terminal shows progress. You can then proceed to select another object (always do this in 2D mode)
177
227
 
178
228
  <img src="https://github.com/user-attachments/assets/6d72c2a2-1064-4a27-b398-a9b86fcbc443" alt="Crop Anything Widget" style="width:75%; height:auto;">
179
229
 
180
230
 
231
+
232
+
181
233
  ### ROI Colocalization
234
+
182
235
  This pipeline quantifies colocalization between labeled regions of interest (ROIs) across multiple image channels. It determines the extent of overlap between ROIs in a reference channel and those in one or two other channels. The output is a table of colocalization counts. Optionally, the size of reference channel ROIs, as well as the total or median size of colocalizing ROIs in the other channels, can be included. Colocalization is determined using Boolean masking. The number of colocalizing instances is determined by counting unique label IDs within the overlapping regions. Typically, the reference channel contains larger structures, while other channels contain smaller, potentially nested, structures. For example, the reference channel might contain cell bodies, with the second and third channels containing nuclei and sub-nuclear objects, respectively.
183
236
 
184
237
  <img src="https://github.com/user-attachments/assets/2f9022a0-7b88-4588-a448-250f07a634d7" alt="ROI Colocalization Widget" style="width:75%; height:auto;">
@@ -3,19 +3,21 @@
3
3
  [![License BSD-3](https://img.shields.io/pypi/l/napari-tmidas.svg?color=green)](https://github.com/macromeer/napari-tmidas/raw/main/LICENSE)
4
4
  [![PyPI](https://img.shields.io/pypi/v/napari-tmidas.svg?color=green)](https://pypi.org/project/napari-tmidas)
5
5
  [![Python Version](https://img.shields.io/pypi/pyversions/napari-tmidas.svg?color=green)](https://python.org)
6
+ [![Downloads](https://static.pepy.tech/badge/napari-tmidas)](https://pepy.tech/project/napari-tmidas)
6
7
  [![tests](https://github.com/macromeer/napari-tmidas/workflows/tests/badge.svg)](https://github.com/macromeer/napari-tmidas/actions)
7
- [![napari hub](https://img.shields.io/endpoint?url=https://api.napari-hub.org/shields/napari-tmidas)](https://napari-hub.org/plugins/napari-tmidas)
8
- <!-- [![codecov](https://codecov.io/gh/macromeer/napari-tmidas/branch/main/graph/badge.svg)](https://codecov.io/gh/macromeer/napari-tmidas) -->
9
- The `napari-tmidas` plugin consists of a growing collection of pipelines for fast batch processing of confocal and whole slide microscopy images of biological tissues. This is a WIP and based on the CLI version of [T-MIDAS](https://github.com/MercaderLabAnatomy/T-MIDAS).
8
+
9
+ This napari plugin consists of a growing collection of pipelines for fast batch processing of confocal and whole slide microscopy images of biological tissues. This is a WIP and based on the [T-MIDAS terminal](https://github.com/MercaderLabAnatomy/T-MIDAS).
10
10
 
11
11
  ## Features
12
- Currently, napari-tmidas provides pipelines as widgets for batch image conversion / cropping / processing, ROI colocalization and label inspection (cf. [Usage](#usage) below).
12
+ Currently, **napari-tmidas** provides pipelines as widgets for batch image conversion and processing, object cropping, label image inspection and ROI colocalization (cf. [usage](#usage) below). You can request new batch image processing features in [issues](https://github.com/MercaderLabAnatomy/napari-tmidas/issues).
13
13
 
14
14
  ## Installation
15
15
 
16
+ (Video installation guides: https://www.youtube.com/@macromeer/videos)
17
+
16
18
  First, install Napari in a virtual environment:
17
19
 
18
- mamba create -y -n napari-tmidas -c conda-forge python=3.11 tqdm
20
+ mamba create -y -n napari-tmidas -c conda-forge python=3.11
19
21
  mamba activate napari-tmidas
20
22
  python -m pip install "napari[all]"
21
23
 
@@ -23,34 +25,36 @@ Now you can install `napari-tmidas` via [pip]:
23
25
 
24
26
  pip install napari-tmidas
25
27
 
26
- It is recommended to install the latest development version. Please also regularly execute this command in the activated environment:
28
+ **For deep learning features** (Batch Crop Anything with SAM2, Spotiflow, Careamics, Trackastra), also install:
27
29
 
28
- pip install git+https://github.com/macromeer/napari-tmidas.git
30
+ pip install 'napari-tmidas[deep-learning]'
29
31
 
30
- ### Dependencies
32
+ Or install everything at once:
31
33
 
32
- To use the Batch Microscopy Image Conversion pipeline, we need some libraries to read microscopy formats:
34
+ pip install 'napari-tmidas[all]'
33
35
 
34
- # mamba activate napari-tmidas
35
- pip install nd2 readlif tiffslide pylibCZIrw acquifer-napari
36
+ It is recommended though to install the **latest development version**. Please also execute this command from time to time in the activated environment to benefit from newly added features:
36
37
 
37
- If you want to batch compress images using [Zstandard](https://github.com/facebook/zstd), use the package manager of your operating system to install it:
38
+ pip install git+https://github.com/MercaderLabAnatomy/napari-tmidas.git
38
39
 
39
- sudo apt-get install zstd # for Linux
40
- brew install zstd # for macOS
41
- choco install zstandard # for Windows
40
+ ### Additional Setup for Batch Crop Anything
42
41
 
43
- To use the Batch Crop Anything pipeline, we need to install SAM2 in the napari-tmidas environment:
42
+ To use the Batch Crop Anything pipeline with SAM2, you need to install SAM2 separately:
44
43
 
45
- # mamba activate napari-tmidas
46
- cd /opt
44
+ cd /opt # if the folder does not exist: mkdir /opt && cd /opt
47
45
  git clone https://github.com/facebookresearch/sam2.git && cd sam2
48
46
  pip install -e .
49
- wget https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_large.pt -P checkpoints/
50
- pip install decord
47
+ curl -L https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_large.pt -o checkpoints/sam2.1_hiera_large.pt
48
+ mamba install -c conda-forge ffmpeg # we also need ffmpeg
51
49
 
50
+ If you want to batch compress image data using [Zstandard](https://github.com/facebook/zstd), use the package manager of your operating system to install it:
52
51
 
52
+ ~~sudo apt-get install zstd~~ # Pre-installed on Linux :man_shrugging:
53
53
 
54
+ brew install zstd # for macOS (requires Homebrew)
55
+ pip install zstandard # Windows with Python >= 3.7
56
+
57
+ And you are done!
54
58
 
55
59
  ## Usage
56
60
 
@@ -62,19 +66,22 @@ You can then find the installed plugin in the Plugins tab.
62
66
 
63
67
  ### Microscopy Image Conversion
64
68
 
65
- You can start this pipeline via `Plugins > T-MIDAS > Batch Microscopy Image Conversion`. Currently, this pipeline supports the conversion of `.nd2, .lif, .ndpi, .czi` and acquifer data. After scanning a folder of your choice for microscopy image data, select a file in the first column of the table and preview and export any image data it contains.
69
+ Converts `.lif, .nd2, .czi, .ndpi` and Acquifer data to TIF or OME-Zarr formats. Scan a folder, select files, and export with preserved spatial metadata.
66
70
 
71
+ **Supported Formats:**
72
+ - **TIF** - Standard format for compatibility
73
+ - **OME-Zarr** - Recommended for large datasets, [spec v0.5](https://ngff.openmicroscopy.org/latest/) compliant with automatic physical metadata extraction (voxel sizes, spacing)
67
74
 
68
75
  <img src="https://github.com/user-attachments/assets/e377ca71-2f30-447d-825e-d2feebf7061b" alt="Microscopy Image Conversion Widget" style="width:75%; height:auto;">
69
76
 
70
77
 
71
78
  ### Image Processing
72
79
 
73
- 1. After opening `Plugins > T-MIDAS > Batch Image Processing`, enter the path to the folder containing the images to be processed (currently supports TIF, later also ZARR). You can also filter for filename suffix.
80
+ 1. You start with entering the path to the folder containing the images to be processed (currently supports TIF, later also ZARR) and optionally a filter for filename suffix
74
81
 
75
82
  ![image](https://github.com/user-attachments/assets/41ecb689-9abe-4371-83b5-9c5eb37069f9)
76
83
 
77
- 2. As a result, a table appears with the found images. You can click on them to inspect them in the viewer.
84
+ 2. After indexing the files, a table appears with the found images. You can click on them to inspect them in the viewer.
78
85
 
79
86
  ![image](https://github.com/user-attachments/assets/8360942a-be8f-49ec-bc25-385ee43bd601)
80
87
 
@@ -89,19 +96,50 @@ You can start this pipeline via `Plugins > T-MIDAS > Batch Microscopy Image Conv
89
96
 
90
97
  Note that whenever you click on an `Original File` or `Processed File` in the table, it will replace the one that is currently shown in the viewer. So naturally, you'd first select the original image, and then the processed image to correctly see the image pair that you want to inspect.
91
98
 
99
+
100
+ #### Processing Function Credits
101
+
102
+ The image processing capabilities are powered by several excellent open-source tools:
103
+ - [Cellpose 4](https://github.com/MouseLand/cellpose): Advanced cell segmentation
104
+ - [Trackastra](https://github.com/weigertlab/trackastra): Cell tracking and analysis
105
+ - [VisCy](https://github.com/mehta-lab/VisCy): Virtual staining using deep learning
106
+ - [CAREamics](https://github.com/CAREamics/careamics): Content-aware image restoration and enhancement
107
+ - [Spotiflow](https://github.com/weigertlab/spotiflow): Accurate and efficient spot detection for fluorescence microscopy
108
+
109
+ #### Processing Function Documentation
110
+
111
+ Detailed documentation for specific processing functions:
112
+
113
+ **Core Processing**
114
+ - [Basic Processing Functions](docs/basic_processing.md) - Label and intensity operations, channel splitting/merging, time series
115
+ - [Cellpose Segmentation](docs/cellpose_segmentation.md) - Deep learning cell/nucleus segmentation
116
+ - [TrackAstra Tracking](docs/trackastra_tracking.md) - Cell tracking across time-lapse data
117
+ - [VisCy Virtual Staining](docs/viscy_virtual_staining.md) - Virtual staining of phase/DIC images using deep learning
118
+
119
+ **Analysis and Quality Control**
120
+ - [Grid View: Intensity + Labels Overlay](docs/grid_view_overlay.md) - Visual QC for segmentation results
121
+ - [Intensity-Based Label Filtering](docs/intensity_label_filter.md) - Filter labels by signal intensity
122
+ - [Regionprops Analysis](docs/regionprops_analysis.md) - Extract quantitative properties from labels
123
+
124
+ **Advanced Processing**
125
+ - [Advanced Processing Functions](docs/advanced_processing.md) - Denoising (CAREamics), spot detection (Spotiflow), SciPy/scikit-image filters, compression, colocalization
126
+
92
127
  ### Batch Label Inspection
93
128
  If you have already segmented a folder full of images and now you want to maybe inspect and edit each label image, you can use the `Plugins > T-MIDAS > Batch Label Inspection`, which automatically saves your changes to the existing label image once you click the `Save Changes and Continue` button (bottom right).
94
129
 
95
130
  <img src="https://github.com/user-attachments/assets/0bf8c6ae-4212-449d-8183-e91b23ba740e" alt="Batch Label Inspection Widget" style="width:75%; height:auto;">
96
131
 
97
-
98
132
  ### Crop Anything
99
- This pipeline combines the Segment Anything Model (SAM) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Click the image below to see a video demo.
133
+
134
+ This pipeline combines the Segment Anything Model (SAM2; supports YX, ZYX and TYX data) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Cropping works like this: Enter 2D view and go to the first z slice where the object to be cropped is appearing. Activate/select the points layer and click on the object. Terminal shows progress. You can then proceed to select another object (always do this in 2D mode)
100
135
 
101
136
  <img src="https://github.com/user-attachments/assets/6d72c2a2-1064-4a27-b398-a9b86fcbc443" alt="Crop Anything Widget" style="width:75%; height:auto;">
102
137
 
103
138
 
139
+
140
+
104
141
  ### ROI Colocalization
142
+
105
143
  This pipeline quantifies colocalization between labeled regions of interest (ROIs) across multiple image channels. It determines the extent of overlap between ROIs in a reference channel and those in one or two other channels. The output is a table of colocalization counts. Optionally, the size of reference channel ROIs, as well as the total or median size of colocalizing ROIs in the other channels, can be included. Colocalization is determined using Boolean masking. The number of colocalizing instances is determined by counting unique label IDs within the overlapping regions. Typically, the reference channel contains larger structures, while other channels contain smaller, potentially nested, structures. For example, the reference channel might contain cell bodies, with the second and third channels containing nuclei and sub-nuclear objects, respectively.
106
144
 
107
145
  <img src="https://github.com/user-attachments/assets/2f9022a0-7b88-4588-a448-250f07a634d7" alt="ROI Colocalization Widget" style="width:75%; height:auto;">