napari-tmidas 0.2.2__tar.gz → 0.2.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/.github/workflows/test_and_deploy.yml +65 -10
  2. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/.gitignore +6 -0
  3. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/PKG-INFO +71 -30
  4. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/README.md +48 -14
  5. napari_tmidas-0.2.5/docs/advanced_processing.md +398 -0
  6. napari_tmidas-0.2.5/docs/basic_processing.md +278 -0
  7. napari_tmidas-0.2.5/docs/cellpose_segmentation.md +231 -0
  8. napari_tmidas-0.2.5/docs/grid_view_overlay.md +229 -0
  9. napari_tmidas-0.2.5/docs/intensity_label_filter.md +129 -0
  10. napari_tmidas-0.2.5/docs/regionprops_analysis.md +96 -0
  11. napari_tmidas-0.2.5/docs/regionprops_summary.md +136 -0
  12. napari_tmidas-0.2.5/docs/trackastra_tracking.md +268 -0
  13. napari_tmidas-0.2.5/examples/grid_overlay_example.py +66 -0
  14. napari_tmidas-0.2.5/examples/intensity_filter_example.py +179 -0
  15. napari_tmidas-0.2.5/examples/regionprops_example.py +143 -0
  16. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/pyproject.toml +41 -18
  17. napari_tmidas-0.2.5/src/napari_tmidas/__init__.py +52 -0
  18. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/_crop_anything.py +1520 -609
  19. napari_tmidas-0.2.5/src/napari_tmidas/_env_manager.py +76 -0
  20. napari_tmidas-0.2.5/src/napari_tmidas/_file_conversion.py +2475 -0
  21. napari_tmidas-0.2.5/src/napari_tmidas/_file_selector.py +2410 -0
  22. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/_label_inspection.py +83 -8
  23. napari_tmidas-0.2.5/src/napari_tmidas/_processing_worker.py +309 -0
  24. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/_reader.py +6 -10
  25. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/_registry.py +2 -2
  26. napari_tmidas-0.2.5/src/napari_tmidas/_roi_colocalization.py +2312 -0
  27. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_crop_anything.py +123 -0
  28. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_env_manager.py +89 -0
  29. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_grid_view_overlay.py +193 -0
  30. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_init.py +98 -0
  31. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_intensity_label_filter.py +222 -0
  32. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_label_inspection.py +86 -0
  33. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_processing_basic.py +500 -0
  34. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_processing_worker.py +142 -0
  35. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_regionprops_analysis.py +547 -0
  36. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_registry.py +135 -0
  37. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_scipy_filters.py +168 -0
  38. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_skimage_filters.py +259 -0
  39. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_split_channels.py +217 -0
  40. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_spotiflow.py +87 -0
  41. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_tyx_display_fix.py +142 -0
  42. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_ui_utils.py +68 -0
  43. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/_tests/test_widget.py +30 -0
  44. napari_tmidas-0.2.5/src/napari_tmidas/_tests/test_windows_basic.py +66 -0
  45. napari_tmidas-0.2.5/src/napari_tmidas/_ui_utils.py +57 -0
  46. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/_version.py +16 -3
  47. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/_widget.py +41 -4
  48. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/processing_functions/basic.py +557 -20
  49. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/processing_functions/careamics_env_manager.py +72 -99
  50. napari_tmidas-0.2.5/src/napari_tmidas/processing_functions/cellpose_env_manager.py +510 -0
  51. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/processing_functions/cellpose_segmentation.py +132 -191
  52. napari_tmidas-0.2.5/src/napari_tmidas/processing_functions/colocalization.py +697 -0
  53. napari_tmidas-0.2.5/src/napari_tmidas/processing_functions/grid_view_overlay.py +703 -0
  54. napari_tmidas-0.2.5/src/napari_tmidas/processing_functions/intensity_label_filter.py +422 -0
  55. napari_tmidas-0.2.5/src/napari_tmidas/processing_functions/regionprops_analysis.py +1280 -0
  56. napari_tmidas-0.2.5/src/napari_tmidas/processing_functions/sam2_env_manager.py +95 -0
  57. napari_tmidas-0.2.5/src/napari_tmidas/processing_functions/sam2_mp4.py +362 -0
  58. napari_tmidas-0.2.5/src/napari_tmidas/processing_functions/scipy_filters.py +452 -0
  59. napari_tmidas-0.2.5/src/napari_tmidas/processing_functions/skimage_filters.py +669 -0
  60. napari_tmidas-0.2.5/src/napari_tmidas/processing_functions/spotiflow_detection.py +949 -0
  61. napari_tmidas-0.2.5/src/napari_tmidas/processing_functions/spotiflow_env_manager.py +591 -0
  62. napari_tmidas-0.2.5/src/napari_tmidas/processing_functions/timepoint_merger.py +738 -0
  63. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas.egg-info/PKG-INFO +71 -30
  64. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas.egg-info/SOURCES.txt +36 -1
  65. napari_tmidas-0.2.5/src/napari_tmidas.egg-info/requires.txt +40 -0
  66. napari_tmidas-0.2.5/test_grid_overlay.py +139 -0
  67. napari_tmidas-0.2.5/tox.ini +44 -0
  68. napari_tmidas-0.2.2/src/napari_tmidas/__init__.py +0 -22
  69. napari_tmidas-0.2.2/src/napari_tmidas/_file_conversion.py +0 -1960
  70. napari_tmidas-0.2.2/src/napari_tmidas/_file_selector.py +0 -1171
  71. napari_tmidas-0.2.2/src/napari_tmidas/_roi_colocalization.py +0 -1175
  72. napari_tmidas-0.2.2/src/napari_tmidas/_tests/__init__.py +0 -0
  73. napari_tmidas-0.2.2/src/napari_tmidas/_tests/test_registry.py +0 -67
  74. napari_tmidas-0.2.2/src/napari_tmidas/processing_functions/cellpose_env_manager.py +0 -207
  75. napari_tmidas-0.2.2/src/napari_tmidas/processing_functions/colocalization.py +0 -240
  76. napari_tmidas-0.2.2/src/napari_tmidas/processing_functions/sam2_env_manager.py +0 -111
  77. napari_tmidas-0.2.2/src/napari_tmidas/processing_functions/sam2_mp4.py +0 -283
  78. napari_tmidas-0.2.2/src/napari_tmidas/processing_functions/scipy_filters.py +0 -57
  79. napari_tmidas-0.2.2/src/napari_tmidas/processing_functions/skimage_filters.py +0 -457
  80. napari_tmidas-0.2.2/src/napari_tmidas/processing_functions/timepoint_merger.py +0 -490
  81. napari_tmidas-0.2.2/src/napari_tmidas.egg-info/requires.txt +0 -30
  82. napari_tmidas-0.2.2/tox.ini +0 -40
  83. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/.github/dependabot.yml +0 -0
  84. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/.napari-hub/DESCRIPTION.md +0 -0
  85. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/.napari-hub/config.yml +0 -0
  86. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/.pre-commit-config.yaml +0 -0
  87. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/LICENSE +0 -0
  88. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/MANIFEST.in +0 -0
  89. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/setup.cfg +0 -0
  90. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/_sample_data.py +0 -0
  91. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/_tests/test_file_selector.py +0 -0
  92. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/_tests/test_reader.py +0 -0
  93. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/_tests/test_sample_data.py +0 -0
  94. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/_tests/test_writer.py +0 -0
  95. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/_writer.py +0 -0
  96. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/napari.yaml +0 -0
  97. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/processing_functions/__init__.py +0 -0
  98. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/processing_functions/careamics_denoising.py +0 -0
  99. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/processing_functions/file_compression.py +0 -0
  100. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas/processing_functions/trackastra_tracking.py +0 -0
  101. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas.egg-info/dependency_links.txt +0 -0
  102. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas.egg-info/entry_points.txt +0 -0
  103. {napari_tmidas-0.2.2 → napari_tmidas-0.2.5}/src/napari_tmidas.egg-info/top_level.txt +0 -0
@@ -23,8 +23,8 @@ jobs:
23
23
  timeout-minutes: 30
24
24
  strategy:
25
25
  matrix:
26
- platform: [ubuntu-latest, windows-latest, macos-latest]
27
- python-version: ["3.9", "3.10", "3.11"]
26
+ platform: [ubuntu-latest, macos-latest]
27
+ python-version: ["3.10", "3.11"]
28
28
 
29
29
  steps:
30
30
  - uses: actions/checkout@v4
@@ -37,13 +37,6 @@ jobs:
37
37
  # these libraries enable testing on Qt on linux
38
38
  - uses: tlambert03/setup-qt-libs@v1
39
39
 
40
- # strategy borrowed from vispy for installing opengl libs on windows
41
- - name: Install Windows OpenGL
42
- if: runner.os == 'Windows'
43
- run: |
44
- git clone --depth 1 https://github.com/pyvista/gl-ci-helpers.git
45
- powershell gl-ci-helpers/appveyor/install_opengl.ps1
46
-
47
40
  # note: if you need dependencies from conda, considering using
48
41
  # setup-miniconda: https://github.com/conda-incubator/setup-miniconda
49
42
  # and
@@ -53,13 +46,75 @@ jobs:
53
46
  python -m pip install --upgrade pip
54
47
  python -m pip install setuptools tox tox-gh-actions
55
48
 
49
+ - name: Pip cache
50
+ uses: actions/cache@v4
51
+ with:
52
+ path: ~/.cache/pip
53
+ key: ${{ runner.os }}-pip-${{ hashFiles('pyproject.toml', 'tox.ini') }}
54
+ restore-keys: |
55
+ ${{ runner.os }}-pip-
56
+
57
+ - name: Remove .tox cache
58
+ run: rm -rf .tox
59
+
60
+ - name: Free up disk space (Linux)
61
+ if: runner.os == 'Linux'
62
+ run: |
63
+ # Remove unnecessary tools and packages to free up disk space
64
+ sudo rm -rf /usr/share/dotnet
65
+ sudo rm -rf /usr/local/lib/android
66
+ sudo rm -rf /opt/ghc
67
+ sudo rm -rf /opt/hostedtoolcache/CodeQL
68
+ sudo docker image prune --all --force
69
+ df -h
70
+
71
+ - name: Upgrade critical dependencies
72
+ run: |
73
+ python -m pip install --upgrade numpy psygnal pytest
74
+ python -m pip install --upgrade pip setuptools wheel
75
+
76
+ - name: Install system dependencies (Linux)
77
+ if: runner.os == 'Linux'
78
+ run: |
79
+ sudo apt-get update
80
+ sudo apt-get install -y \
81
+ libgl1-mesa-dev \
82
+ libglib2.0-0 \
83
+ libsm6 \
84
+ libxext6 \
85
+ libxrender1 \
86
+ libgomp1
87
+
56
88
  # this runs the platform-specific tests declared in tox.ini
57
89
  - name: Test with tox
58
90
  uses: aganders3/headless-gui@v2
59
91
  with:
60
- run: python -m tox
92
+ run: python -m tox -r -- -m "not slow"
61
93
  env:
62
94
  PLATFORM: ${{ matrix.platform }}
95
+ QT_QPA_PLATFORM: offscreen
96
+ MPLBACKEND: Agg
97
+ PYTHONUNBUFFERED: 1
98
+ PYTHONDONTWRITEBYTECODE: 1
99
+
100
+ - name: Show pytest summary (if available)
101
+ if: always()
102
+ shell: bash
103
+ run: |
104
+ set -euo pipefail
105
+ echo "Checking for coverage.xml..."
106
+ FOUND=0
107
+ if [ -f coverage.xml ]; then
108
+ echo "Found coverage.xml at repo root"; FOUND=1
109
+ fi
110
+ # Glob inside .tox envs (quiet if none)
111
+ if ls .tox/*/coverage.xml >/dev/null 2>&1; then
112
+ echo "Found coverage.xml inside .tox environment"; FOUND=1
113
+ fi
114
+ if [ "$FOUND" -eq 0 ]; then
115
+ echo "No coverage.xml found (this may be fine if tests were skipped).";
116
+ ls -al . | sed -n '1,120p'
117
+ fi
63
118
 
64
119
  - name: Coverage
65
120
  uses: codecov/codecov-action@v3
@@ -82,3 +82,9 @@ venv/
82
82
 
83
83
  # written by setuptools_scm
84
84
  **/_version.py
85
+
86
+ # Test run artifacts (not source)
87
+ slow_tests_output.txt
88
+ tox_run_output.txt
89
+ *_run_output.txt
90
+ *tests_output.txt
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: napari-tmidas
3
- Version: 0.2.2
3
+ Version: 0.2.5
4
4
  Summary: A plugin for batch processing of confocal and whole-slide microscopy images of biological tissues
5
5
  Author: Marco Meer
6
6
  Author-email: marco.meer@pm.me
@@ -41,46 +41,53 @@ Classifier: Development Status :: 2 - Pre-Alpha
41
41
  Classifier: Framework :: napari
42
42
  Classifier: Intended Audience :: Developers
43
43
  Classifier: License :: OSI Approved :: BSD License
44
- Classifier: Operating System :: OS Independent
44
+ Classifier: Operating System :: MacOS
45
+ Classifier: Operating System :: POSIX :: Linux
45
46
  Classifier: Programming Language :: Python
46
47
  Classifier: Programming Language :: Python :: 3
47
48
  Classifier: Programming Language :: Python :: 3 :: Only
48
- Classifier: Programming Language :: Python :: 3.9
49
49
  Classifier: Programming Language :: Python :: 3.10
50
50
  Classifier: Programming Language :: Python :: 3.11
51
51
  Classifier: Topic :: Scientific/Engineering :: Image Processing
52
- Requires-Python: >=3.9
52
+ Requires-Python: >=3.10
53
53
  Description-Content-Type: text/markdown
54
54
  License-File: LICENSE
55
- Requires-Dist: numpy
55
+ Requires-Dist: numpy<2.0,>=1.23.0
56
56
  Requires-Dist: magicgui
57
57
  Requires-Dist: tqdm
58
58
  Requires-Dist: qtpy
59
- Requires-Dist: scikit-image
59
+ Requires-Dist: scikit-image>=0.19.0
60
+ Requires-Dist: scikit-learn-extra>=0.3.0
60
61
  Requires-Dist: pyqt5
61
- Requires-Dist: tqdm
62
- Requires-Dist: scikit-image
62
+ Requires-Dist: zarr
63
63
  Requires-Dist: ome-zarr
64
64
  Requires-Dist: napari-ome-zarr
65
- Requires-Dist: torch
66
- Requires-Dist: torchvision
67
- Requires-Dist: timm
68
- Requires-Dist: opencv-python
69
- Requires-Dist: cmake
70
65
  Requires-Dist: nd2
71
66
  Requires-Dist: pylibCZIrw
72
67
  Requires-Dist: readlif
73
68
  Requires-Dist: tiffslide
74
- Requires-Dist: hydra-core
75
- Requires-Dist: eva-decord
76
69
  Requires-Dist: acquifer-napari
77
70
  Provides-Extra: testing
78
71
  Requires-Dist: tox; extra == "testing"
79
- Requires-Dist: pytest; extra == "testing"
72
+ Requires-Dist: pytest>=7.0.0; extra == "testing"
80
73
  Requires-Dist: pytest-cov; extra == "testing"
81
74
  Requires-Dist: pytest-qt; extra == "testing"
75
+ Requires-Dist: pytest-timeout; extra == "testing"
82
76
  Requires-Dist: napari; extra == "testing"
83
77
  Requires-Dist: pyqt5; extra == "testing"
78
+ Requires-Dist: psygnal>=0.8.0; extra == "testing"
79
+ Provides-Extra: clustering
80
+ Requires-Dist: scikit-learn-extra>=0.3.0; extra == "clustering"
81
+ Provides-Extra: deep-learning
82
+ Requires-Dist: torch>=1.12.0; extra == "deep-learning"
83
+ Requires-Dist: torchvision>=0.13.0; extra == "deep-learning"
84
+ Requires-Dist: timm; extra == "deep-learning"
85
+ Requires-Dist: opencv-python; extra == "deep-learning"
86
+ Requires-Dist: cmake; extra == "deep-learning"
87
+ Requires-Dist: hydra-core; extra == "deep-learning"
88
+ Requires-Dist: eva-decord; extra == "deep-learning"
89
+ Provides-Extra: all
90
+ Requires-Dist: napari-tmidas[clustering,deep-learning,testing]; extra == "all"
84
91
  Dynamic: license-file
85
92
 
86
93
  # napari-tmidas
@@ -88,13 +95,14 @@ Dynamic: license-file
88
95
  [![License BSD-3](https://img.shields.io/pypi/l/napari-tmidas.svg?color=green)](https://github.com/macromeer/napari-tmidas/raw/main/LICENSE)
89
96
  [![PyPI](https://img.shields.io/pypi/v/napari-tmidas.svg?color=green)](https://pypi.org/project/napari-tmidas)
90
97
  [![Python Version](https://img.shields.io/pypi/pyversions/napari-tmidas.svg?color=green)](https://python.org)
98
+ [![Downloads](https://static.pepy.tech/badge/napari-tmidas)](https://pepy.tech/project/napari-tmidas)
99
+ [![DOI](https://zenodo.org/badge/698257324.svg)](https://zenodo.org/badge/latestdoi/698257324)
91
100
  [![tests](https://github.com/macromeer/napari-tmidas/workflows/tests/badge.svg)](https://github.com/macromeer/napari-tmidas/actions)
92
- [![napari hub](https://img.shields.io/endpoint?url=https://api.napari-hub.org/shields/napari-tmidas)](https://napari-hub.org/plugins/napari-tmidas)
93
- <!-- [![codecov](https://codecov.io/gh/macromeer/napari-tmidas/branch/main/graph/badge.svg)](https://codecov.io/gh/macromeer/napari-tmidas) -->
94
- The `napari-tmidas` plugin consists of a growing collection of pipelines for fast batch processing of confocal and whole slide microscopy images of biological tissues. This is a WIP and based on the CLI version of [T-MIDAS](https://github.com/MercaderLabAnatomy/T-MIDAS).
101
+
102
+ This napari plugin consists of a growing collection of pipelines for fast batch processing of confocal and whole slide microscopy images of biological tissues. This is a WIP and based on the [T-MIDAS terminal](https://github.com/MercaderLabAnatomy/T-MIDAS).
95
103
 
96
104
  ## Features
97
- Currently, napari-tmidas provides pipelines as widgets for batch image conversion / cropping / processing, ROI colocalization and label inspection (cf. [Usage](#usage) below).
105
+ Currently, **napari-tmidas** provides pipelines as widgets for batch image conversion and processing, object cropping, label image inspection and ROI colocalization (cf. [usage](#usage) below). You can request new batch image processing features in [issues](https://github.com/MercaderLabAnatomy/napari-tmidas/issues).
98
106
 
99
107
  ## Installation
100
108
 
@@ -110,11 +118,21 @@ Now you can install `napari-tmidas` via [pip]:
110
118
 
111
119
  pip install napari-tmidas
112
120
 
121
+ **For deep learning features** (Batch Crop Anything with SAM2, Spotiflow, Careamics, Trackastra), also install:
122
+
123
+ pip install 'napari-tmidas[deep-learning]'
124
+
125
+ Or install everything at once:
126
+
127
+ pip install 'napari-tmidas[all]'
128
+
113
129
  It is recommended though to install the **latest development version**. Please also execute this command from time to time in the activated environment to benefit from newly added features:
114
130
 
115
- pip install git+https://github.com/macromeer/napari-tmidas.git
131
+ pip install git+https://github.com/MercaderLabAnatomy/napari-tmidas.git
116
132
 
117
- To use the Batch Crop Anything pipeline, we need to install **Segment Anything 2** (2D/3D):
133
+ ### Additional Setup for Batch Crop Anything
134
+
135
+ To use the Batch Crop Anything pipeline with SAM2, you need to install SAM2 separately:
118
136
 
119
137
  cd /opt # if the folder does not exist: mkdir /opt && cd /opt
120
138
  git clone https://github.com/facebookresearch/sam2.git && cd sam2
@@ -126,12 +144,10 @@ If you want to batch compress image data using [Zstandard](https://github.com/fa
126
144
 
127
145
  ~~sudo apt-get install zstd~~ # Pre-installed on Linux :man_shrugging:
128
146
 
129
- brew install zstd # for macOS (requires [Homebrew](https://brew.sh/)
147
+ brew install zstd # for macOS (requires Homebrew)
130
148
  pip install zstandard # Windows with Python >= 3.7
131
149
 
132
-
133
-
134
- And you are done!
150
+ And you are done!
135
151
 
136
152
  ## Usage
137
153
 
@@ -143,19 +159,22 @@ You can then find the installed plugin in the Plugins tab.
143
159
 
144
160
  ### Microscopy Image Conversion
145
161
 
146
- You can start this pipeline via `Plugins > T-MIDAS > Batch Microscopy Image Conversion`. Currently, this pipeline supports the conversion of `.nd2, .lif, .ndpi, .czi` and acquifer data. After scanning a folder of your choice for microscopy image data, select a file in the first column of the table and preview and export any image data it contains.
162
+ Converts `.lif, .nd2, .czi, .ndpi` and Acquifer data to TIF or OME-Zarr formats. Scan a folder, select files, and export with preserved spatial metadata.
147
163
 
164
+ **Supported Formats:**
165
+ - **TIF** - Standard format for compatibility
166
+ - **OME-Zarr** - Recommended for large datasets, [spec v0.5](https://ngff.openmicroscopy.org/latest/) compliant with automatic physical metadata extraction (voxel sizes, spacing)
148
167
 
149
168
  <img src="https://github.com/user-attachments/assets/e377ca71-2f30-447d-825e-d2feebf7061b" alt="Microscopy Image Conversion Widget" style="width:75%; height:auto;">
150
169
 
151
170
 
152
171
  ### Image Processing
153
172
 
154
- 1. After opening `Plugins > T-MIDAS > Batch Image Processing`, enter the path to the folder containing the images to be processed (currently supports TIF, later also ZARR). You can also filter for filename suffix.
173
+ 1. You start with entering the path to the folder containing the images to be processed (currently supports TIF, later also ZARR) and optionally a filter for filename suffix
155
174
 
156
175
  ![image](https://github.com/user-attachments/assets/41ecb689-9abe-4371-83b5-9c5eb37069f9)
157
176
 
158
- 2. As a result, a table appears with the found images. You can click on them to inspect them in the viewer.
177
+ 2. After indexing the files, a table appears with the found images. You can click on them to inspect them in the viewer.
159
178
 
160
179
  ![image](https://github.com/user-attachments/assets/8360942a-be8f-49ec-bc25-385ee43bd601)
161
180
 
@@ -176,7 +195,27 @@ Note that whenever you click on an `Original File` or `Processed File` in the ta
176
195
  The image processing capabilities are powered by several excellent open-source tools:
177
196
  - [Cellpose 4](https://github.com/MouseLand/cellpose): Advanced cell segmentation
178
197
  - [Trackastra](https://github.com/weigertlab/trackastra): Cell tracking and analysis
198
+ - [VisCy](https://github.com/mehta-lab/VisCy): Virtual staining using deep learning
179
199
  - [CAREamics](https://github.com/CAREamics/careamics): Content-aware image restoration and enhancement
200
+ - [Spotiflow](https://github.com/weigertlab/spotiflow): Accurate and efficient spot detection for fluorescence microscopy
201
+
202
+ #### Processing Function Documentation
203
+
204
+ Detailed documentation for specific processing functions:
205
+
206
+ **Core Processing**
207
+ - [Basic Processing Functions](docs/basic_processing.md) - Label and intensity operations, channel splitting/merging, time series
208
+ - [Cellpose Segmentation](docs/cellpose_segmentation.md) - Deep learning cell/nucleus segmentation
209
+ - [TrackAstra Tracking](docs/trackastra_tracking.md) - Cell tracking across time-lapse data
210
+ - [VisCy Virtual Staining](docs/viscy_virtual_staining.md) - Virtual staining of phase/DIC images using deep learning
211
+
212
+ **Analysis and Quality Control**
213
+ - [Grid View: Intensity + Labels Overlay](docs/grid_view_overlay.md) - Visual QC for segmentation results
214
+ - [Intensity-Based Label Filtering](docs/intensity_label_filter.md) - Filter labels by signal intensity
215
+ - [Regionprops Analysis](docs/regionprops_analysis.md) - Extract quantitative properties from labels
216
+
217
+ **Advanced Processing**
218
+ - [Advanced Processing Functions](docs/advanced_processing.md) - Denoising (CAREamics), spot detection (Spotiflow), SciPy/scikit-image filters, compression, colocalization
180
219
 
181
220
  ### Batch Label Inspection
182
221
  If you have already segmented a folder full of images and now you want to maybe inspect and edit each label image, you can use the `Plugins > T-MIDAS > Batch Label Inspection`, which automatically saves your changes to the existing label image once you click the `Save Changes and Continue` button (bottom right).
@@ -184,7 +223,8 @@ If you have already segmented a folder full of images and now you want to maybe
184
223
  <img src="https://github.com/user-attachments/assets/0bf8c6ae-4212-449d-8183-e91b23ba740e" alt="Batch Label Inspection Widget" style="width:75%; height:auto;">
185
224
 
186
225
  ### Crop Anything
187
- This pipeline combines the Segment Anything Model (SAM) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Cropping works like this: Enter 2D view and go to the first z slice where the object to be cropped is appearing. Activate/select the points layer and click on the object. Terminal shows progress. You can then proceed to select another object (always do this in 2D mode)
226
+
227
+ This pipeline combines the Segment Anything Model (SAM2; supports YX, ZYX and TYX data) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Cropping works like this: Enter 2D view and go to the first z slice where the object to be cropped is appearing. Activate/select the points layer and click on the object. Terminal shows progress. You can then proceed to select another object (always do this in 2D mode)
188
228
 
189
229
  <img src="https://github.com/user-attachments/assets/6d72c2a2-1064-4a27-b398-a9b86fcbc443" alt="Crop Anything Widget" style="width:75%; height:auto;">
190
230
 
@@ -192,6 +232,7 @@ This pipeline combines the Segment Anything Model (SAM) for automatic object det
192
232
 
193
233
 
194
234
  ### ROI Colocalization
235
+
195
236
  This pipeline quantifies colocalization between labeled regions of interest (ROIs) across multiple image channels. It determines the extent of overlap between ROIs in a reference channel and those in one or two other channels. The output is a table of colocalization counts. Optionally, the size of reference channel ROIs, as well as the total or median size of colocalizing ROIs in the other channels, can be included. Colocalization is determined using Boolean masking. The number of colocalizing instances is determined by counting unique label IDs within the overlapping regions. Typically, the reference channel contains larger structures, while other channels contain smaller, potentially nested, structures. For example, the reference channel might contain cell bodies, with the second and third channels containing nuclei and sub-nuclear objects, respectively.
196
237
 
197
238
  <img src="https://github.com/user-attachments/assets/2f9022a0-7b88-4588-a448-250f07a634d7" alt="ROI Colocalization Widget" style="width:75%; height:auto;">
@@ -3,13 +3,14 @@
3
3
  [![License BSD-3](https://img.shields.io/pypi/l/napari-tmidas.svg?color=green)](https://github.com/macromeer/napari-tmidas/raw/main/LICENSE)
4
4
  [![PyPI](https://img.shields.io/pypi/v/napari-tmidas.svg?color=green)](https://pypi.org/project/napari-tmidas)
5
5
  [![Python Version](https://img.shields.io/pypi/pyversions/napari-tmidas.svg?color=green)](https://python.org)
6
+ [![Downloads](https://static.pepy.tech/badge/napari-tmidas)](https://pepy.tech/project/napari-tmidas)
7
+ [![DOI](https://zenodo.org/badge/698257324.svg)](https://zenodo.org/badge/latestdoi/698257324)
6
8
  [![tests](https://github.com/macromeer/napari-tmidas/workflows/tests/badge.svg)](https://github.com/macromeer/napari-tmidas/actions)
7
- [![napari hub](https://img.shields.io/endpoint?url=https://api.napari-hub.org/shields/napari-tmidas)](https://napari-hub.org/plugins/napari-tmidas)
8
- <!-- [![codecov](https://codecov.io/gh/macromeer/napari-tmidas/branch/main/graph/badge.svg)](https://codecov.io/gh/macromeer/napari-tmidas) -->
9
- The `napari-tmidas` plugin consists of a growing collection of pipelines for fast batch processing of confocal and whole slide microscopy images of biological tissues. This is a WIP and based on the CLI version of [T-MIDAS](https://github.com/MercaderLabAnatomy/T-MIDAS).
9
+
10
+ This napari plugin consists of a growing collection of pipelines for fast batch processing of confocal and whole slide microscopy images of biological tissues. This is a WIP and based on the [T-MIDAS terminal](https://github.com/MercaderLabAnatomy/T-MIDAS).
10
11
 
11
12
  ## Features
12
- Currently, napari-tmidas provides pipelines as widgets for batch image conversion / cropping / processing, ROI colocalization and label inspection (cf. [Usage](#usage) below).
13
+ Currently, **napari-tmidas** provides pipelines as widgets for batch image conversion and processing, object cropping, label image inspection and ROI colocalization (cf. [usage](#usage) below). You can request new batch image processing features in [issues](https://github.com/MercaderLabAnatomy/napari-tmidas/issues).
13
14
 
14
15
  ## Installation
15
16
 
@@ -25,11 +26,21 @@ Now you can install `napari-tmidas` via [pip]:
25
26
 
26
27
  pip install napari-tmidas
27
28
 
29
+ **For deep learning features** (Batch Crop Anything with SAM2, Spotiflow, Careamics, Trackastra), also install:
30
+
31
+ pip install 'napari-tmidas[deep-learning]'
32
+
33
+ Or install everything at once:
34
+
35
+ pip install 'napari-tmidas[all]'
36
+
28
37
  It is recommended though to install the **latest development version**. Please also execute this command from time to time in the activated environment to benefit from newly added features:
29
38
 
30
- pip install git+https://github.com/macromeer/napari-tmidas.git
39
+ pip install git+https://github.com/MercaderLabAnatomy/napari-tmidas.git
40
+
41
+ ### Additional Setup for Batch Crop Anything
31
42
 
32
- To use the Batch Crop Anything pipeline, we need to install **Segment Anything 2** (2D/3D):
43
+ To use the Batch Crop Anything pipeline with SAM2, you need to install SAM2 separately:
33
44
 
34
45
  cd /opt # if the folder does not exist: mkdir /opt && cd /opt
35
46
  git clone https://github.com/facebookresearch/sam2.git && cd sam2
@@ -41,12 +52,10 @@ If you want to batch compress image data using [Zstandard](https://github.com/fa
41
52
 
42
53
  ~~sudo apt-get install zstd~~ # Pre-installed on Linux :man_shrugging:
43
54
 
44
- brew install zstd # for macOS (requires [Homebrew](https://brew.sh/)
55
+ brew install zstd # for macOS (requires Homebrew)
45
56
  pip install zstandard # Windows with Python >= 3.7
46
57
 
47
-
48
-
49
- And you are done!
58
+ And you are done!
50
59
 
51
60
  ## Usage
52
61
 
@@ -58,19 +67,22 @@ You can then find the installed plugin in the Plugins tab.
58
67
 
59
68
  ### Microscopy Image Conversion
60
69
 
61
- You can start this pipeline via `Plugins > T-MIDAS > Batch Microscopy Image Conversion`. Currently, this pipeline supports the conversion of `.nd2, .lif, .ndpi, .czi` and acquifer data. After scanning a folder of your choice for microscopy image data, select a file in the first column of the table and preview and export any image data it contains.
70
+ Converts `.lif, .nd2, .czi, .ndpi` and Acquifer data to TIF or OME-Zarr formats. Scan a folder, select files, and export with preserved spatial metadata.
62
71
 
72
+ **Supported Formats:**
73
+ - **TIF** - Standard format for compatibility
74
+ - **OME-Zarr** - Recommended for large datasets, [spec v0.5](https://ngff.openmicroscopy.org/latest/) compliant with automatic physical metadata extraction (voxel sizes, spacing)
63
75
 
64
76
  <img src="https://github.com/user-attachments/assets/e377ca71-2f30-447d-825e-d2feebf7061b" alt="Microscopy Image Conversion Widget" style="width:75%; height:auto;">
65
77
 
66
78
 
67
79
  ### Image Processing
68
80
 
69
- 1. After opening `Plugins > T-MIDAS > Batch Image Processing`, enter the path to the folder containing the images to be processed (currently supports TIF, later also ZARR). You can also filter for filename suffix.
81
+ 1. You start with entering the path to the folder containing the images to be processed (currently supports TIF, later also ZARR) and optionally a filter for filename suffix
70
82
 
71
83
  ![image](https://github.com/user-attachments/assets/41ecb689-9abe-4371-83b5-9c5eb37069f9)
72
84
 
73
- 2. As a result, a table appears with the found images. You can click on them to inspect them in the viewer.
85
+ 2. After indexing the files, a table appears with the found images. You can click on them to inspect them in the viewer.
74
86
 
75
87
  ![image](https://github.com/user-attachments/assets/8360942a-be8f-49ec-bc25-385ee43bd601)
76
88
 
@@ -91,7 +103,27 @@ Note that whenever you click on an `Original File` or `Processed File` in the ta
91
103
  The image processing capabilities are powered by several excellent open-source tools:
92
104
  - [Cellpose 4](https://github.com/MouseLand/cellpose): Advanced cell segmentation
93
105
  - [Trackastra](https://github.com/weigertlab/trackastra): Cell tracking and analysis
106
+ - [VisCy](https://github.com/mehta-lab/VisCy): Virtual staining using deep learning
94
107
  - [CAREamics](https://github.com/CAREamics/careamics): Content-aware image restoration and enhancement
108
+ - [Spotiflow](https://github.com/weigertlab/spotiflow): Accurate and efficient spot detection for fluorescence microscopy
109
+
110
+ #### Processing Function Documentation
111
+
112
+ Detailed documentation for specific processing functions:
113
+
114
+ **Core Processing**
115
+ - [Basic Processing Functions](docs/basic_processing.md) - Label and intensity operations, channel splitting/merging, time series
116
+ - [Cellpose Segmentation](docs/cellpose_segmentation.md) - Deep learning cell/nucleus segmentation
117
+ - [TrackAstra Tracking](docs/trackastra_tracking.md) - Cell tracking across time-lapse data
118
+ - [VisCy Virtual Staining](docs/viscy_virtual_staining.md) - Virtual staining of phase/DIC images using deep learning
119
+
120
+ **Analysis and Quality Control**
121
+ - [Grid View: Intensity + Labels Overlay](docs/grid_view_overlay.md) - Visual QC for segmentation results
122
+ - [Intensity-Based Label Filtering](docs/intensity_label_filter.md) - Filter labels by signal intensity
123
+ - [Regionprops Analysis](docs/regionprops_analysis.md) - Extract quantitative properties from labels
124
+
125
+ **Advanced Processing**
126
+ - [Advanced Processing Functions](docs/advanced_processing.md) - Denoising (CAREamics), spot detection (Spotiflow), SciPy/scikit-image filters, compression, colocalization
95
127
 
96
128
  ### Batch Label Inspection
97
129
  If you have already segmented a folder full of images and now you want to maybe inspect and edit each label image, you can use the `Plugins > T-MIDAS > Batch Label Inspection`, which automatically saves your changes to the existing label image once you click the `Save Changes and Continue` button (bottom right).
@@ -99,7 +131,8 @@ If you have already segmented a folder full of images and now you want to maybe
99
131
  <img src="https://github.com/user-attachments/assets/0bf8c6ae-4212-449d-8183-e91b23ba740e" alt="Batch Label Inspection Widget" style="width:75%; height:auto;">
100
132
 
101
133
  ### Crop Anything
102
- This pipeline combines the Segment Anything Model (SAM) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Cropping works like this: Enter 2D view and go to the first z slice where the object to be cropped is appearing. Activate/select the points layer and click on the object. Terminal shows progress. You can then proceed to select another object (always do this in 2D mode)
134
+
135
+ This pipeline combines the Segment Anything Model (SAM2; supports YX, ZYX and TYX data) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Cropping works like this: Enter 2D view and go to the first z slice where the object to be cropped is appearing. Activate/select the points layer and click on the object. Terminal shows progress. You can then proceed to select another object (always do this in 2D mode)
103
136
 
104
137
  <img src="https://github.com/user-attachments/assets/6d72c2a2-1064-4a27-b398-a9b86fcbc443" alt="Crop Anything Widget" style="width:75%; height:auto;">
105
138
 
@@ -107,6 +140,7 @@ This pipeline combines the Segment Anything Model (SAM) for automatic object det
107
140
 
108
141
 
109
142
  ### ROI Colocalization
143
+
110
144
  This pipeline quantifies colocalization between labeled regions of interest (ROIs) across multiple image channels. It determines the extent of overlap between ROIs in a reference channel and those in one or two other channels. The output is a table of colocalization counts. Optionally, the size of reference channel ROIs, as well as the total or median size of colocalizing ROIs in the other channels, can be included. Colocalization is determined using Boolean masking. The number of colocalizing instances is determined by counting unique label IDs within the overlapping regions. Typically, the reference channel contains larger structures, while other channels contain smaller, potentially nested, structures. For example, the reference channel might contain cell bodies, with the second and third channels containing nuclei and sub-nuclear objects, respectively.
111
145
 
112
146
  <img src="https://github.com/user-attachments/assets/2f9022a0-7b88-4588-a448-250f07a634d7" alt="ROI Colocalization Widget" style="width:75%; height:auto;">