napari-tmidas 0.2.2__tar.gz → 0.2.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/.github/workflows/test_and_deploy.yml +65 -10
  2. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/.gitignore +6 -0
  3. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/PKG-INFO +70 -30
  4. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/README.md +47 -14
  5. napari_tmidas-0.2.4/docs/advanced_processing.md +398 -0
  6. napari_tmidas-0.2.4/docs/basic_processing.md +278 -0
  7. napari_tmidas-0.2.4/docs/cellpose_segmentation.md +231 -0
  8. napari_tmidas-0.2.4/docs/grid_view_overlay.md +229 -0
  9. napari_tmidas-0.2.4/docs/intensity_label_filter.md +129 -0
  10. napari_tmidas-0.2.4/docs/regionprops_analysis.md +96 -0
  11. napari_tmidas-0.2.4/docs/regionprops_summary.md +136 -0
  12. napari_tmidas-0.2.4/docs/trackastra_tracking.md +268 -0
  13. napari_tmidas-0.2.4/examples/grid_overlay_example.py +66 -0
  14. napari_tmidas-0.2.4/examples/intensity_filter_example.py +179 -0
  15. napari_tmidas-0.2.4/examples/regionprops_example.py +143 -0
  16. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/pyproject.toml +41 -18
  17. napari_tmidas-0.2.4/src/napari_tmidas/__init__.py +52 -0
  18. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/_crop_anything.py +1520 -609
  19. napari_tmidas-0.2.4/src/napari_tmidas/_env_manager.py +76 -0
  20. napari_tmidas-0.2.4/src/napari_tmidas/_file_conversion.py +2475 -0
  21. napari_tmidas-0.2.4/src/napari_tmidas/_file_selector.py +2410 -0
  22. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/_label_inspection.py +83 -8
  23. napari_tmidas-0.2.4/src/napari_tmidas/_processing_worker.py +309 -0
  24. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/_reader.py +6 -10
  25. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/_registry.py +2 -2
  26. napari_tmidas-0.2.4/src/napari_tmidas/_roi_colocalization.py +2312 -0
  27. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_crop_anything.py +123 -0
  28. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_env_manager.py +89 -0
  29. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_grid_view_overlay.py +193 -0
  30. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_init.py +98 -0
  31. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_intensity_label_filter.py +222 -0
  32. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_label_inspection.py +86 -0
  33. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_processing_basic.py +500 -0
  34. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_processing_worker.py +142 -0
  35. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_regionprops_analysis.py +547 -0
  36. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_registry.py +135 -0
  37. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_scipy_filters.py +168 -0
  38. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_skimage_filters.py +259 -0
  39. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_split_channels.py +217 -0
  40. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_spotiflow.py +87 -0
  41. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_tyx_display_fix.py +142 -0
  42. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_ui_utils.py +68 -0
  43. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/_tests/test_widget.py +30 -0
  44. napari_tmidas-0.2.4/src/napari_tmidas/_tests/test_windows_basic.py +66 -0
  45. napari_tmidas-0.2.4/src/napari_tmidas/_ui_utils.py +57 -0
  46. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/_version.py +16 -3
  47. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/_widget.py +41 -4
  48. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/processing_functions/basic.py +557 -20
  49. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/processing_functions/careamics_env_manager.py +72 -99
  50. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/cellpose_env_manager.py +510 -0
  51. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/processing_functions/cellpose_segmentation.py +132 -191
  52. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/colocalization.py +697 -0
  53. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/grid_view_overlay.py +703 -0
  54. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/intensity_label_filter.py +422 -0
  55. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/regionprops_analysis.py +1280 -0
  56. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/sam2_env_manager.py +95 -0
  57. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/sam2_mp4.py +362 -0
  58. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/scipy_filters.py +452 -0
  59. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/skimage_filters.py +669 -0
  60. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/spotiflow_detection.py +949 -0
  61. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/spotiflow_env_manager.py +591 -0
  62. napari_tmidas-0.2.4/src/napari_tmidas/processing_functions/timepoint_merger.py +738 -0
  63. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas.egg-info/PKG-INFO +70 -30
  64. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas.egg-info/SOURCES.txt +36 -1
  65. napari_tmidas-0.2.4/src/napari_tmidas.egg-info/requires.txt +40 -0
  66. napari_tmidas-0.2.4/test_grid_overlay.py +139 -0
  67. napari_tmidas-0.2.4/tox.ini +44 -0
  68. napari_tmidas-0.2.2/src/napari_tmidas/__init__.py +0 -22
  69. napari_tmidas-0.2.2/src/napari_tmidas/_file_conversion.py +0 -1960
  70. napari_tmidas-0.2.2/src/napari_tmidas/_file_selector.py +0 -1171
  71. napari_tmidas-0.2.2/src/napari_tmidas/_roi_colocalization.py +0 -1175
  72. napari_tmidas-0.2.2/src/napari_tmidas/_tests/__init__.py +0 -0
  73. napari_tmidas-0.2.2/src/napari_tmidas/_tests/test_registry.py +0 -67
  74. napari_tmidas-0.2.2/src/napari_tmidas/processing_functions/cellpose_env_manager.py +0 -207
  75. napari_tmidas-0.2.2/src/napari_tmidas/processing_functions/colocalization.py +0 -240
  76. napari_tmidas-0.2.2/src/napari_tmidas/processing_functions/sam2_env_manager.py +0 -111
  77. napari_tmidas-0.2.2/src/napari_tmidas/processing_functions/sam2_mp4.py +0 -283
  78. napari_tmidas-0.2.2/src/napari_tmidas/processing_functions/scipy_filters.py +0 -57
  79. napari_tmidas-0.2.2/src/napari_tmidas/processing_functions/skimage_filters.py +0 -457
  80. napari_tmidas-0.2.2/src/napari_tmidas/processing_functions/timepoint_merger.py +0 -490
  81. napari_tmidas-0.2.2/src/napari_tmidas.egg-info/requires.txt +0 -30
  82. napari_tmidas-0.2.2/tox.ini +0 -40
  83. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/.github/dependabot.yml +0 -0
  84. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/.napari-hub/DESCRIPTION.md +0 -0
  85. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/.napari-hub/config.yml +0 -0
  86. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/.pre-commit-config.yaml +0 -0
  87. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/LICENSE +0 -0
  88. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/MANIFEST.in +0 -0
  89. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/setup.cfg +0 -0
  90. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/_sample_data.py +0 -0
  91. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/_tests/test_file_selector.py +0 -0
  92. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/_tests/test_reader.py +0 -0
  93. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/_tests/test_sample_data.py +0 -0
  94. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/_tests/test_writer.py +0 -0
  95. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/_writer.py +0 -0
  96. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/napari.yaml +0 -0
  97. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/processing_functions/__init__.py +0 -0
  98. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/processing_functions/careamics_denoising.py +0 -0
  99. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/processing_functions/file_compression.py +0 -0
  100. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas/processing_functions/trackastra_tracking.py +0 -0
  101. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas.egg-info/dependency_links.txt +0 -0
  102. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas.egg-info/entry_points.txt +0 -0
  103. {napari_tmidas-0.2.2 → napari_tmidas-0.2.4}/src/napari_tmidas.egg-info/top_level.txt +0 -0
@@ -23,8 +23,8 @@ jobs:
23
23
  timeout-minutes: 30
24
24
  strategy:
25
25
  matrix:
26
- platform: [ubuntu-latest, windows-latest, macos-latest]
27
- python-version: ["3.9", "3.10", "3.11"]
26
+ platform: [ubuntu-latest, macos-latest]
27
+ python-version: ["3.10", "3.11"]
28
28
 
29
29
  steps:
30
30
  - uses: actions/checkout@v4
@@ -37,13 +37,6 @@ jobs:
37
37
  # these libraries enable testing on Qt on linux
38
38
  - uses: tlambert03/setup-qt-libs@v1
39
39
 
40
- # strategy borrowed from vispy for installing opengl libs on windows
41
- - name: Install Windows OpenGL
42
- if: runner.os == 'Windows'
43
- run: |
44
- git clone --depth 1 https://github.com/pyvista/gl-ci-helpers.git
45
- powershell gl-ci-helpers/appveyor/install_opengl.ps1
46
-
47
40
  # note: if you need dependencies from conda, considering using
48
41
  # setup-miniconda: https://github.com/conda-incubator/setup-miniconda
49
42
  # and
@@ -53,13 +46,75 @@ jobs:
53
46
  python -m pip install --upgrade pip
54
47
  python -m pip install setuptools tox tox-gh-actions
55
48
 
49
+ - name: Pip cache
50
+ uses: actions/cache@v4
51
+ with:
52
+ path: ~/.cache/pip
53
+ key: ${{ runner.os }}-pip-${{ hashFiles('pyproject.toml', 'tox.ini') }}
54
+ restore-keys: |
55
+ ${{ runner.os }}-pip-
56
+
57
+ - name: Remove .tox cache
58
+ run: rm -rf .tox
59
+
60
+ - name: Free up disk space (Linux)
61
+ if: runner.os == 'Linux'
62
+ run: |
63
+ # Remove unnecessary tools and packages to free up disk space
64
+ sudo rm -rf /usr/share/dotnet
65
+ sudo rm -rf /usr/local/lib/android
66
+ sudo rm -rf /opt/ghc
67
+ sudo rm -rf /opt/hostedtoolcache/CodeQL
68
+ sudo docker image prune --all --force
69
+ df -h
70
+
71
+ - name: Upgrade critical dependencies
72
+ run: |
73
+ python -m pip install --upgrade numpy psygnal pytest
74
+ python -m pip install --upgrade pip setuptools wheel
75
+
76
+ - name: Install system dependencies (Linux)
77
+ if: runner.os == 'Linux'
78
+ run: |
79
+ sudo apt-get update
80
+ sudo apt-get install -y \
81
+ libgl1-mesa-dev \
82
+ libglib2.0-0 \
83
+ libsm6 \
84
+ libxext6 \
85
+ libxrender1 \
86
+ libgomp1
87
+
56
88
  # this runs the platform-specific tests declared in tox.ini
57
89
  - name: Test with tox
58
90
  uses: aganders3/headless-gui@v2
59
91
  with:
60
- run: python -m tox
92
+ run: python -m tox -r -- -m "not slow"
61
93
  env:
62
94
  PLATFORM: ${{ matrix.platform }}
95
+ QT_QPA_PLATFORM: offscreen
96
+ MPLBACKEND: Agg
97
+ PYTHONUNBUFFERED: 1
98
+ PYTHONDONTWRITEBYTECODE: 1
99
+
100
+ - name: Show pytest summary (if available)
101
+ if: always()
102
+ shell: bash
103
+ run: |
104
+ set -euo pipefail
105
+ echo "Checking for coverage.xml..."
106
+ FOUND=0
107
+ if [ -f coverage.xml ]; then
108
+ echo "Found coverage.xml at repo root"; FOUND=1
109
+ fi
110
+ # Glob inside .tox envs (quiet if none)
111
+ if ls .tox/*/coverage.xml >/dev/null 2>&1; then
112
+ echo "Found coverage.xml inside .tox environment"; FOUND=1
113
+ fi
114
+ if [ "$FOUND" -eq 0 ]; then
115
+ echo "No coverage.xml found (this may be fine if tests were skipped).";
116
+ ls -al . | sed -n '1,120p'
117
+ fi
63
118
 
64
119
  - name: Coverage
65
120
  uses: codecov/codecov-action@v3
@@ -82,3 +82,9 @@ venv/
82
82
 
83
83
  # written by setuptools_scm
84
84
  **/_version.py
85
+
86
+ # Test run artifacts (not source)
87
+ slow_tests_output.txt
88
+ tox_run_output.txt
89
+ *_run_output.txt
90
+ *tests_output.txt
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: napari-tmidas
3
- Version: 0.2.2
3
+ Version: 0.2.4
4
4
  Summary: A plugin for batch processing of confocal and whole-slide microscopy images of biological tissues
5
5
  Author: Marco Meer
6
6
  Author-email: marco.meer@pm.me
@@ -41,46 +41,53 @@ Classifier: Development Status :: 2 - Pre-Alpha
41
41
  Classifier: Framework :: napari
42
42
  Classifier: Intended Audience :: Developers
43
43
  Classifier: License :: OSI Approved :: BSD License
44
- Classifier: Operating System :: OS Independent
44
+ Classifier: Operating System :: MacOS
45
+ Classifier: Operating System :: POSIX :: Linux
45
46
  Classifier: Programming Language :: Python
46
47
  Classifier: Programming Language :: Python :: 3
47
48
  Classifier: Programming Language :: Python :: 3 :: Only
48
- Classifier: Programming Language :: Python :: 3.9
49
49
  Classifier: Programming Language :: Python :: 3.10
50
50
  Classifier: Programming Language :: Python :: 3.11
51
51
  Classifier: Topic :: Scientific/Engineering :: Image Processing
52
- Requires-Python: >=3.9
52
+ Requires-Python: >=3.10
53
53
  Description-Content-Type: text/markdown
54
54
  License-File: LICENSE
55
- Requires-Dist: numpy
55
+ Requires-Dist: numpy<2.0,>=1.23.0
56
56
  Requires-Dist: magicgui
57
57
  Requires-Dist: tqdm
58
58
  Requires-Dist: qtpy
59
- Requires-Dist: scikit-image
59
+ Requires-Dist: scikit-image>=0.19.0
60
+ Requires-Dist: scikit-learn-extra>=0.3.0
60
61
  Requires-Dist: pyqt5
61
- Requires-Dist: tqdm
62
- Requires-Dist: scikit-image
62
+ Requires-Dist: zarr
63
63
  Requires-Dist: ome-zarr
64
64
  Requires-Dist: napari-ome-zarr
65
- Requires-Dist: torch
66
- Requires-Dist: torchvision
67
- Requires-Dist: timm
68
- Requires-Dist: opencv-python
69
- Requires-Dist: cmake
70
65
  Requires-Dist: nd2
71
66
  Requires-Dist: pylibCZIrw
72
67
  Requires-Dist: readlif
73
68
  Requires-Dist: tiffslide
74
- Requires-Dist: hydra-core
75
- Requires-Dist: eva-decord
76
69
  Requires-Dist: acquifer-napari
77
70
  Provides-Extra: testing
78
71
  Requires-Dist: tox; extra == "testing"
79
- Requires-Dist: pytest; extra == "testing"
72
+ Requires-Dist: pytest>=7.0.0; extra == "testing"
80
73
  Requires-Dist: pytest-cov; extra == "testing"
81
74
  Requires-Dist: pytest-qt; extra == "testing"
75
+ Requires-Dist: pytest-timeout; extra == "testing"
82
76
  Requires-Dist: napari; extra == "testing"
83
77
  Requires-Dist: pyqt5; extra == "testing"
78
+ Requires-Dist: psygnal>=0.8.0; extra == "testing"
79
+ Provides-Extra: clustering
80
+ Requires-Dist: scikit-learn-extra>=0.3.0; extra == "clustering"
81
+ Provides-Extra: deep-learning
82
+ Requires-Dist: torch>=1.12.0; extra == "deep-learning"
83
+ Requires-Dist: torchvision>=0.13.0; extra == "deep-learning"
84
+ Requires-Dist: timm; extra == "deep-learning"
85
+ Requires-Dist: opencv-python; extra == "deep-learning"
86
+ Requires-Dist: cmake; extra == "deep-learning"
87
+ Requires-Dist: hydra-core; extra == "deep-learning"
88
+ Requires-Dist: eva-decord; extra == "deep-learning"
89
+ Provides-Extra: all
90
+ Requires-Dist: napari-tmidas[clustering,deep-learning,testing]; extra == "all"
84
91
  Dynamic: license-file
85
92
 
86
93
  # napari-tmidas
@@ -88,13 +95,13 @@ Dynamic: license-file
88
95
  [![License BSD-3](https://img.shields.io/pypi/l/napari-tmidas.svg?color=green)](https://github.com/macromeer/napari-tmidas/raw/main/LICENSE)
89
96
  [![PyPI](https://img.shields.io/pypi/v/napari-tmidas.svg?color=green)](https://pypi.org/project/napari-tmidas)
90
97
  [![Python Version](https://img.shields.io/pypi/pyversions/napari-tmidas.svg?color=green)](https://python.org)
98
+ [![Downloads](https://static.pepy.tech/badge/napari-tmidas)](https://pepy.tech/project/napari-tmidas)
91
99
  [![tests](https://github.com/macromeer/napari-tmidas/workflows/tests/badge.svg)](https://github.com/macromeer/napari-tmidas/actions)
92
- [![napari hub](https://img.shields.io/endpoint?url=https://api.napari-hub.org/shields/napari-tmidas)](https://napari-hub.org/plugins/napari-tmidas)
93
- <!-- [![codecov](https://codecov.io/gh/macromeer/napari-tmidas/branch/main/graph/badge.svg)](https://codecov.io/gh/macromeer/napari-tmidas) -->
94
- The `napari-tmidas` plugin consists of a growing collection of pipelines for fast batch processing of confocal and whole slide microscopy images of biological tissues. This is a WIP and based on the CLI version of [T-MIDAS](https://github.com/MercaderLabAnatomy/T-MIDAS).
100
+
101
+ This napari plugin consists of a growing collection of pipelines for fast batch processing of confocal and whole slide microscopy images of biological tissues. This is a WIP and based on the [T-MIDAS terminal](https://github.com/MercaderLabAnatomy/T-MIDAS).
95
102
 
96
103
  ## Features
97
- Currently, napari-tmidas provides pipelines as widgets for batch image conversion / cropping / processing, ROI colocalization and label inspection (cf. [Usage](#usage) below).
104
+ Currently, **napari-tmidas** provides pipelines as widgets for batch image conversion and processing, object cropping, label image inspection and ROI colocalization (cf. [usage](#usage) below). You can request new batch image processing features in [issues](https://github.com/MercaderLabAnatomy/napari-tmidas/issues).
98
105
 
99
106
  ## Installation
100
107
 
@@ -110,11 +117,21 @@ Now you can install `napari-tmidas` via [pip]:
110
117
 
111
118
  pip install napari-tmidas
112
119
 
120
+ **For deep learning features** (Batch Crop Anything with SAM2, Spotiflow, Careamics, Trackastra), also install:
121
+
122
+ pip install 'napari-tmidas[deep-learning]'
123
+
124
+ Or install everything at once:
125
+
126
+ pip install 'napari-tmidas[all]'
127
+
113
128
  It is recommended though to install the **latest development version**. Please also execute this command from time to time in the activated environment to benefit from newly added features:
114
129
 
115
- pip install git+https://github.com/macromeer/napari-tmidas.git
130
+ pip install git+https://github.com/MercaderLabAnatomy/napari-tmidas.git
116
131
 
117
- To use the Batch Crop Anything pipeline, we need to install **Segment Anything 2** (2D/3D):
132
+ ### Additional Setup for Batch Crop Anything
133
+
134
+ To use the Batch Crop Anything pipeline with SAM2, you need to install SAM2 separately:
118
135
 
119
136
  cd /opt # if the folder does not exist: mkdir /opt && cd /opt
120
137
  git clone https://github.com/facebookresearch/sam2.git && cd sam2
@@ -126,12 +143,10 @@ If you want to batch compress image data using [Zstandard](https://github.com/fa
126
143
 
127
144
  ~~sudo apt-get install zstd~~ # Pre-installed on Linux :man_shrugging:
128
145
 
129
- brew install zstd # for macOS (requires [Homebrew](https://brew.sh/)
146
+ brew install zstd # for macOS (requires Homebrew)
130
147
  pip install zstandard # Windows with Python >= 3.7
131
148
 
132
-
133
-
134
- And you are done!
149
+ And you are done!
135
150
 
136
151
  ## Usage
137
152
 
@@ -143,19 +158,22 @@ You can then find the installed plugin in the Plugins tab.
143
158
 
144
159
  ### Microscopy Image Conversion
145
160
 
146
- You can start this pipeline via `Plugins > T-MIDAS > Batch Microscopy Image Conversion`. Currently, this pipeline supports the conversion of `.nd2, .lif, .ndpi, .czi` and acquifer data. After scanning a folder of your choice for microscopy image data, select a file in the first column of the table and preview and export any image data it contains.
161
+ Converts `.lif, .nd2, .czi, .ndpi` and Acquifer data to TIF or OME-Zarr formats. Scan a folder, select files, and export with preserved spatial metadata.
147
162
 
163
+ **Supported Formats:**
164
+ - **TIF** - Standard format for compatibility
165
+ - **OME-Zarr** - Recommended for large datasets, [spec v0.5](https://ngff.openmicroscopy.org/latest/) compliant with automatic physical metadata extraction (voxel sizes, spacing)
148
166
 
149
167
  <img src="https://github.com/user-attachments/assets/e377ca71-2f30-447d-825e-d2feebf7061b" alt="Microscopy Image Conversion Widget" style="width:75%; height:auto;">
150
168
 
151
169
 
152
170
  ### Image Processing
153
171
 
154
- 1. After opening `Plugins > T-MIDAS > Batch Image Processing`, enter the path to the folder containing the images to be processed (currently supports TIF, later also ZARR). You can also filter for filename suffix.
172
+ 1. You start with entering the path to the folder containing the images to be processed (currently supports TIF, later also ZARR) and optionally a filter for filename suffix
155
173
 
156
174
  ![image](https://github.com/user-attachments/assets/41ecb689-9abe-4371-83b5-9c5eb37069f9)
157
175
 
158
- 2. As a result, a table appears with the found images. You can click on them to inspect them in the viewer.
176
+ 2. After indexing the files, a table appears with the found images. You can click on them to inspect them in the viewer.
159
177
 
160
178
  ![image](https://github.com/user-attachments/assets/8360942a-be8f-49ec-bc25-385ee43bd601)
161
179
 
@@ -176,7 +194,27 @@ Note that whenever you click on an `Original File` or `Processed File` in the ta
176
194
  The image processing capabilities are powered by several excellent open-source tools:
177
195
  - [Cellpose 4](https://github.com/MouseLand/cellpose): Advanced cell segmentation
178
196
  - [Trackastra](https://github.com/weigertlab/trackastra): Cell tracking and analysis
197
+ - [VisCy](https://github.com/mehta-lab/VisCy): Virtual staining using deep learning
179
198
  - [CAREamics](https://github.com/CAREamics/careamics): Content-aware image restoration and enhancement
199
+ - [Spotiflow](https://github.com/weigertlab/spotiflow): Accurate and efficient spot detection for fluorescence microscopy
200
+
201
+ #### Processing Function Documentation
202
+
203
+ Detailed documentation for specific processing functions:
204
+
205
+ **Core Processing**
206
+ - [Basic Processing Functions](docs/basic_processing.md) - Label and intensity operations, channel splitting/merging, time series
207
+ - [Cellpose Segmentation](docs/cellpose_segmentation.md) - Deep learning cell/nucleus segmentation
208
+ - [TrackAstra Tracking](docs/trackastra_tracking.md) - Cell tracking across time-lapse data
209
+ - [VisCy Virtual Staining](docs/viscy_virtual_staining.md) - Virtual staining of phase/DIC images using deep learning
210
+
211
+ **Analysis and Quality Control**
212
+ - [Grid View: Intensity + Labels Overlay](docs/grid_view_overlay.md) - Visual QC for segmentation results
213
+ - [Intensity-Based Label Filtering](docs/intensity_label_filter.md) - Filter labels by signal intensity
214
+ - [Regionprops Analysis](docs/regionprops_analysis.md) - Extract quantitative properties from labels
215
+
216
+ **Advanced Processing**
217
+ - [Advanced Processing Functions](docs/advanced_processing.md) - Denoising (CAREamics), spot detection (Spotiflow), SciPy/scikit-image filters, compression, colocalization
180
218
 
181
219
  ### Batch Label Inspection
182
220
  If you have already segmented a folder full of images and now you want to maybe inspect and edit each label image, you can use the `Plugins > T-MIDAS > Batch Label Inspection`, which automatically saves your changes to the existing label image once you click the `Save Changes and Continue` button (bottom right).
@@ -184,7 +222,8 @@ If you have already segmented a folder full of images and now you want to maybe
184
222
  <img src="https://github.com/user-attachments/assets/0bf8c6ae-4212-449d-8183-e91b23ba740e" alt="Batch Label Inspection Widget" style="width:75%; height:auto;">
185
223
 
186
224
  ### Crop Anything
187
- This pipeline combines the Segment Anything Model (SAM) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Cropping works like this: Enter 2D view and go to the first z slice where the object to be cropped is appearing. Activate/select the points layer and click on the object. Terminal shows progress. You can then proceed to select another object (always do this in 2D mode)
225
+
226
+ This pipeline combines the Segment Anything Model (SAM2; supports YX, ZYX and TYX data) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Cropping works like this: Enter 2D view and go to the first z slice where the object to be cropped is appearing. Activate/select the points layer and click on the object. Terminal shows progress. You can then proceed to select another object (always do this in 2D mode)
188
227
 
189
228
  <img src="https://github.com/user-attachments/assets/6d72c2a2-1064-4a27-b398-a9b86fcbc443" alt="Crop Anything Widget" style="width:75%; height:auto;">
190
229
 
@@ -192,6 +231,7 @@ This pipeline combines the Segment Anything Model (SAM) for automatic object det
192
231
 
193
232
 
194
233
  ### ROI Colocalization
234
+
195
235
  This pipeline quantifies colocalization between labeled regions of interest (ROIs) across multiple image channels. It determines the extent of overlap between ROIs in a reference channel and those in one or two other channels. The output is a table of colocalization counts. Optionally, the size of reference channel ROIs, as well as the total or median size of colocalizing ROIs in the other channels, can be included. Colocalization is determined using Boolean masking. The number of colocalizing instances is determined by counting unique label IDs within the overlapping regions. Typically, the reference channel contains larger structures, while other channels contain smaller, potentially nested, structures. For example, the reference channel might contain cell bodies, with the second and third channels containing nuclei and sub-nuclear objects, respectively.
196
236
 
197
237
  <img src="https://github.com/user-attachments/assets/2f9022a0-7b88-4588-a448-250f07a634d7" alt="ROI Colocalization Widget" style="width:75%; height:auto;">
@@ -3,13 +3,13 @@
3
3
  [![License BSD-3](https://img.shields.io/pypi/l/napari-tmidas.svg?color=green)](https://github.com/macromeer/napari-tmidas/raw/main/LICENSE)
4
4
  [![PyPI](https://img.shields.io/pypi/v/napari-tmidas.svg?color=green)](https://pypi.org/project/napari-tmidas)
5
5
  [![Python Version](https://img.shields.io/pypi/pyversions/napari-tmidas.svg?color=green)](https://python.org)
6
+ [![Downloads](https://static.pepy.tech/badge/napari-tmidas)](https://pepy.tech/project/napari-tmidas)
6
7
  [![tests](https://github.com/macromeer/napari-tmidas/workflows/tests/badge.svg)](https://github.com/macromeer/napari-tmidas/actions)
7
- [![napari hub](https://img.shields.io/endpoint?url=https://api.napari-hub.org/shields/napari-tmidas)](https://napari-hub.org/plugins/napari-tmidas)
8
- <!-- [![codecov](https://codecov.io/gh/macromeer/napari-tmidas/branch/main/graph/badge.svg)](https://codecov.io/gh/macromeer/napari-tmidas) -->
9
- The `napari-tmidas` plugin consists of a growing collection of pipelines for fast batch processing of confocal and whole slide microscopy images of biological tissues. This is a WIP and based on the CLI version of [T-MIDAS](https://github.com/MercaderLabAnatomy/T-MIDAS).
8
+
9
+ This napari plugin consists of a growing collection of pipelines for fast batch processing of confocal and whole slide microscopy images of biological tissues. This is a WIP and based on the [T-MIDAS terminal](https://github.com/MercaderLabAnatomy/T-MIDAS).
10
10
 
11
11
  ## Features
12
- Currently, napari-tmidas provides pipelines as widgets for batch image conversion / cropping / processing, ROI colocalization and label inspection (cf. [Usage](#usage) below).
12
+ Currently, **napari-tmidas** provides pipelines as widgets for batch image conversion and processing, object cropping, label image inspection and ROI colocalization (cf. [usage](#usage) below). You can request new batch image processing features in [issues](https://github.com/MercaderLabAnatomy/napari-tmidas/issues).
13
13
 
14
14
  ## Installation
15
15
 
@@ -25,11 +25,21 @@ Now you can install `napari-tmidas` via [pip]:
25
25
 
26
26
  pip install napari-tmidas
27
27
 
28
+ **For deep learning features** (Batch Crop Anything with SAM2, Spotiflow, Careamics, Trackastra), also install:
29
+
30
+ pip install 'napari-tmidas[deep-learning]'
31
+
32
+ Or install everything at once:
33
+
34
+ pip install 'napari-tmidas[all]'
35
+
28
36
  It is recommended though to install the **latest development version**. Please also execute this command from time to time in the activated environment to benefit from newly added features:
29
37
 
30
- pip install git+https://github.com/macromeer/napari-tmidas.git
38
+ pip install git+https://github.com/MercaderLabAnatomy/napari-tmidas.git
39
+
40
+ ### Additional Setup for Batch Crop Anything
31
41
 
32
- To use the Batch Crop Anything pipeline, we need to install **Segment Anything 2** (2D/3D):
42
+ To use the Batch Crop Anything pipeline with SAM2, you need to install SAM2 separately:
33
43
 
34
44
  cd /opt # if the folder does not exist: mkdir /opt && cd /opt
35
45
  git clone https://github.com/facebookresearch/sam2.git && cd sam2
@@ -41,12 +51,10 @@ If you want to batch compress image data using [Zstandard](https://github.com/fa
41
51
 
42
52
  ~~sudo apt-get install zstd~~ # Pre-installed on Linux :man_shrugging:
43
53
 
44
- brew install zstd # for macOS (requires [Homebrew](https://brew.sh/)
54
+ brew install zstd # for macOS (requires Homebrew)
45
55
  pip install zstandard # Windows with Python >= 3.7
46
56
 
47
-
48
-
49
- And you are done!
57
+ And you are done!
50
58
 
51
59
  ## Usage
52
60
 
@@ -58,19 +66,22 @@ You can then find the installed plugin in the Plugins tab.
58
66
 
59
67
  ### Microscopy Image Conversion
60
68
 
61
- You can start this pipeline via `Plugins > T-MIDAS > Batch Microscopy Image Conversion`. Currently, this pipeline supports the conversion of `.nd2, .lif, .ndpi, .czi` and acquifer data. After scanning a folder of your choice for microscopy image data, select a file in the first column of the table and preview and export any image data it contains.
69
+ Converts `.lif, .nd2, .czi, .ndpi` and Acquifer data to TIF or OME-Zarr formats. Scan a folder, select files, and export with preserved spatial metadata.
62
70
 
71
+ **Supported Formats:**
72
+ - **TIF** - Standard format for compatibility
73
+ - **OME-Zarr** - Recommended for large datasets, [spec v0.5](https://ngff.openmicroscopy.org/latest/) compliant with automatic physical metadata extraction (voxel sizes, spacing)
63
74
 
64
75
  <img src="https://github.com/user-attachments/assets/e377ca71-2f30-447d-825e-d2feebf7061b" alt="Microscopy Image Conversion Widget" style="width:75%; height:auto;">
65
76
 
66
77
 
67
78
  ### Image Processing
68
79
 
69
- 1. After opening `Plugins > T-MIDAS > Batch Image Processing`, enter the path to the folder containing the images to be processed (currently supports TIF, later also ZARR). You can also filter for filename suffix.
80
+ 1. You start with entering the path to the folder containing the images to be processed (currently supports TIF, later also ZARR) and optionally a filter for filename suffix
70
81
 
71
82
  ![image](https://github.com/user-attachments/assets/41ecb689-9abe-4371-83b5-9c5eb37069f9)
72
83
 
73
- 2. As a result, a table appears with the found images. You can click on them to inspect them in the viewer.
84
+ 2. After indexing the files, a table appears with the found images. You can click on them to inspect them in the viewer.
74
85
 
75
86
  ![image](https://github.com/user-attachments/assets/8360942a-be8f-49ec-bc25-385ee43bd601)
76
87
 
@@ -91,7 +102,27 @@ Note that whenever you click on an `Original File` or `Processed File` in the ta
91
102
  The image processing capabilities are powered by several excellent open-source tools:
92
103
  - [Cellpose 4](https://github.com/MouseLand/cellpose): Advanced cell segmentation
93
104
  - [Trackastra](https://github.com/weigertlab/trackastra): Cell tracking and analysis
105
+ - [VisCy](https://github.com/mehta-lab/VisCy): Virtual staining using deep learning
94
106
  - [CAREamics](https://github.com/CAREamics/careamics): Content-aware image restoration and enhancement
107
+ - [Spotiflow](https://github.com/weigertlab/spotiflow): Accurate and efficient spot detection for fluorescence microscopy
108
+
109
+ #### Processing Function Documentation
110
+
111
+ Detailed documentation for specific processing functions:
112
+
113
+ **Core Processing**
114
+ - [Basic Processing Functions](docs/basic_processing.md) - Label and intensity operations, channel splitting/merging, time series
115
+ - [Cellpose Segmentation](docs/cellpose_segmentation.md) - Deep learning cell/nucleus segmentation
116
+ - [TrackAstra Tracking](docs/trackastra_tracking.md) - Cell tracking across time-lapse data
117
+ - [VisCy Virtual Staining](docs/viscy_virtual_staining.md) - Virtual staining of phase/DIC images using deep learning
118
+
119
+ **Analysis and Quality Control**
120
+ - [Grid View: Intensity + Labels Overlay](docs/grid_view_overlay.md) - Visual QC for segmentation results
121
+ - [Intensity-Based Label Filtering](docs/intensity_label_filter.md) - Filter labels by signal intensity
122
+ - [Regionprops Analysis](docs/regionprops_analysis.md) - Extract quantitative properties from labels
123
+
124
+ **Advanced Processing**
125
+ - [Advanced Processing Functions](docs/advanced_processing.md) - Denoising (CAREamics), spot detection (Spotiflow), SciPy/scikit-image filters, compression, colocalization
95
126
 
96
127
  ### Batch Label Inspection
97
128
  If you have already segmented a folder full of images and now you want to maybe inspect and edit each label image, you can use the `Plugins > T-MIDAS > Batch Label Inspection`, which automatically saves your changes to the existing label image once you click the `Save Changes and Continue` button (bottom right).
@@ -99,7 +130,8 @@ If you have already segmented a folder full of images and now you want to maybe
99
130
  <img src="https://github.com/user-attachments/assets/0bf8c6ae-4212-449d-8183-e91b23ba740e" alt="Batch Label Inspection Widget" style="width:75%; height:auto;">
100
131
 
101
132
  ### Crop Anything
102
- This pipeline combines the Segment Anything Model (SAM) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Cropping works like this: Enter 2D view and go to the first z slice where the object to be cropped is appearing. Activate/select the points layer and click on the object. Terminal shows progress. You can then proceed to select another object (always do this in 2D mode)
133
+
134
+ This pipeline combines the Segment Anything Model (SAM2; supports YX, ZYX and TYX data) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Cropping works like this: Enter 2D view and go to the first z slice where the object to be cropped is appearing. Activate/select the points layer and click on the object. Terminal shows progress. You can then proceed to select another object (always do this in 2D mode)
103
135
 
104
136
  <img src="https://github.com/user-attachments/assets/6d72c2a2-1064-4a27-b398-a9b86fcbc443" alt="Crop Anything Widget" style="width:75%; height:auto;">
105
137
 
@@ -107,6 +139,7 @@ This pipeline combines the Segment Anything Model (SAM) for automatic object det
107
139
 
108
140
 
109
141
  ### ROI Colocalization
142
+
110
143
  This pipeline quantifies colocalization between labeled regions of interest (ROIs) across multiple image channels. It determines the extent of overlap between ROIs in a reference channel and those in one or two other channels. The output is a table of colocalization counts. Optionally, the size of reference channel ROIs, as well as the total or median size of colocalizing ROIs in the other channels, can be included. Colocalization is determined using Boolean masking. The number of colocalizing instances is determined by counting unique label IDs within the overlapping regions. Typically, the reference channel contains larger structures, while other channels contain smaller, potentially nested, structures. For example, the reference channel might contain cell bodies, with the second and third channels containing nuclei and sub-nuclear objects, respectively.
111
144
 
112
145
  <img src="https://github.com/user-attachments/assets/2f9022a0-7b88-4588-a448-250f07a634d7" alt="ROI Colocalization Widget" style="width:75%; height:auto;">