napari-tmidas 0.2.6__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,393 @@
1
+ # processing_functions/viscy_virtual_staining.py
2
+ """
3
+ Processing functions for virtual staining using VisCy (Virtual Staining of Cells using deep learning).
4
+
5
+ This module provides functionality to perform virtual staining of phase contrast or
6
+ brightfield microscopy images using the VSCyto3D deep learning model. The model predicts
7
+ nuclei and membrane channels from transmitted light (phase/DIC) images.
8
+
9
+ The VSCyto3D model is specifically designed for 3D imaging with:
10
+ - Input: Phase contrast or DIC 3D images
11
+ - Output: Two channels (nuclei and membrane)
12
+ - Architecture: fcmae-based U-Net
13
+ - Required Z-stack depth: 15 slices (model processes in batches of 15)
14
+
15
+ Reference:
16
+ Guo et al. (2024) "Revealing architectural order with quantitative label-free imaging and deep learning"
17
+ DOI: 10.7554/eLife.55502
18
+
19
+ Note: This requires the viscy library to be installed in a dedicated environment.
20
+ """
21
+ from typing import Union
22
+
23
+ import numpy as np
24
+
25
+ # Import the environment manager
26
+ from napari_tmidas.processing_functions.viscy_env_manager import (
27
+ create_viscy_env,
28
+ is_env_created,
29
+ run_viscy_in_env,
30
+ )
31
+
32
+ # Check if viscy is directly available in this environment
33
+ try:
34
+ import viscy # noqa: F401
35
+
36
+ VISCY_AVAILABLE = True
37
+ print("VisCy found in current environment. Using native import.")
38
+ except ImportError:
39
+ VISCY_AVAILABLE = False
40
+ print(
41
+ "VisCy not found in current environment. Will use dedicated environment."
42
+ )
43
+
44
+ from napari_tmidas._registry import BatchProcessingRegistry
45
+
46
+
47
+ def transpose_dimensions(img, dim_order):
48
+ """
49
+ Transpose image dimensions to match expected VisCy input (ZYX).
50
+
51
+ Parameters:
52
+ -----------
53
+ img : numpy.ndarray
54
+ Input image
55
+ dim_order : str
56
+ Dimension order of the input image (e.g., 'ZYX', 'TZYX', 'YXZ')
57
+
58
+ Returns:
59
+ --------
60
+ numpy.ndarray
61
+ Transposed image
62
+ str
63
+ New dimension order
64
+ bool
65
+ Whether the image has time dimension
66
+ """
67
+ # Handle time dimension if present
68
+ has_time = "T" in dim_order
69
+
70
+ # Standardize dimension order to ZYX or TZYX
71
+ if has_time:
72
+ target_dims = "TZYX"
73
+ transpose_order = [
74
+ dim_order.index(d) for d in target_dims if d in dim_order
75
+ ]
76
+ new_dim_order = "".join([dim_order[i] for i in transpose_order])
77
+ else:
78
+ target_dims = "ZYX"
79
+ transpose_order = [
80
+ dim_order.index(d) for d in target_dims if d in dim_order
81
+ ]
82
+ new_dim_order = "".join([dim_order[i] for i in transpose_order])
83
+
84
+ # Perform the transpose
85
+ img_transposed = np.transpose(img, transpose_order)
86
+
87
+ return img_transposed, new_dim_order, has_time
88
+
89
+
90
+ @BatchProcessingRegistry.register(
91
+ name="VisCy Virtual Staining",
92
+ suffix="_virtual_stain",
93
+ description="Virtual staining of phase/DIC images using VSCyto3D deep learning model. Predicts nuclei and membrane channels.",
94
+ parameters={
95
+ "dim_order": {
96
+ "type": str,
97
+ "default": "ZYX",
98
+ "description": "Dimension order of the input (e.g., 'ZYX', 'TZYX', 'YXZ')",
99
+ },
100
+ "z_batch_size": {
101
+ "type": int,
102
+ "default": 15,
103
+ "min": 15,
104
+ "max": 15,
105
+ "description": "Z slices per batch (must be 15 for VSCyto3D model)",
106
+ },
107
+ "output_channel": {
108
+ "type": str,
109
+ "default": "both",
110
+ "options": ["both", "nuclei", "membrane"],
111
+ "description": "Which channel(s) to output: 'both' (2 channels), 'nuclei' only, or 'membrane' only",
112
+ },
113
+ },
114
+ )
115
+ def viscy_virtual_staining(
116
+ image: np.ndarray,
117
+ dim_order: str = "ZYX",
118
+ z_batch_size: int = 15,
119
+ output_channel: str = "both",
120
+ _source_filepath: str = None, # Hidden parameter
121
+ ) -> np.ndarray:
122
+ """
123
+ Perform virtual staining on phase/DIC images using VisCy.
124
+
125
+ This function takes a 3D phase contrast or DIC microscopy image and performs
126
+ virtual staining using the VSCyto3D deep learning model. The model predicts
127
+ two channels: nuclei and membrane.
128
+
129
+ If VisCy is not available in the current environment, a dedicated virtual
130
+ environment will be created to run VisCy.
131
+
132
+ Parameters:
133
+ -----------
134
+ image : numpy.ndarray
135
+ Input image (phase contrast or DIC microscopy)
136
+ dim_order : str
137
+ Dimension order of the input (e.g., 'ZYX', 'TZYX', 'YXZ') (default: "ZYX")
138
+ z_batch_size : int
139
+ Number of Z slices to process at once (default: 15, required by VSCyto3D)
140
+ output_channel : str
141
+ Which channel(s) to output: 'both', 'nuclei', or 'membrane' (default: "both")
142
+ _source_filepath : str
143
+ Hidden parameter for potential optimization (not currently used)
144
+
145
+ Returns:
146
+ --------
147
+ numpy.ndarray
148
+ Virtual stained image
149
+ - If output_channel='both': shape (Z, 2, Y, X) or (T, Z, 2, Y, X)
150
+ - If output_channel='nuclei' or 'membrane': shape (Z, Y, X) or (T, Z, Y, X)
151
+ where channels are:
152
+ - Channel 0: Nuclei
153
+ - Channel 1: Membrane
154
+
155
+ Raises:
156
+ -------
157
+ ValueError
158
+ If input doesn't have Z dimension
159
+ If Z dimension is less than 15 slices
160
+ RuntimeError
161
+ If VisCy environment setup fails
162
+ If processing fails
163
+
164
+ Examples:
165
+ ---------
166
+ >>> # Process a 3D phase contrast image
167
+ >>> phase_image = np.random.rand(15, 512, 512) # (Z, Y, X)
168
+ >>> virtual_stain = viscy_virtual_staining(phase_image, dim_order='ZYX')
169
+ >>> virtual_stain.shape
170
+ (15, 2, 512, 512) # (Z, C, Y, X)
171
+
172
+ >>> # Get only nuclei channel
173
+ >>> nuclei = viscy_virtual_staining(phase_image, dim_order='ZYX', output_channel='nuclei')
174
+ >>> nuclei.shape
175
+ (15, 512, 512) # (Z, Y, X)
176
+ """
177
+ # Validate z_batch_size
178
+ if z_batch_size != 15:
179
+ print(
180
+ f"Warning: VSCyto3D requires z_batch_size=15, but {z_batch_size} was provided. Using 15."
181
+ )
182
+ z_batch_size = 15
183
+
184
+ # Check dimension order
185
+ if "Z" not in dim_order:
186
+ raise ValueError(
187
+ "VisCy virtual staining requires 3D images with Z dimension. "
188
+ f"Current dimension order: {dim_order}"
189
+ )
190
+
191
+ # Transpose dimensions if needed
192
+ img_transposed, new_dim_order, has_time = transpose_dimensions(
193
+ image, dim_order
194
+ )
195
+
196
+ # Check Z dimension size
197
+ z_axis = new_dim_order.index("Z")
198
+ z_size = img_transposed.shape[z_axis]
199
+
200
+ if z_size < 15:
201
+ raise ValueError(
202
+ f"VisCy virtual staining requires at least 15 Z slices. "
203
+ f"Current image has {z_size} slices. "
204
+ "Consider using a different processing method or acquiring more Z slices."
205
+ )
206
+
207
+ print(f"Processing image with shape {img_transposed.shape}")
208
+ print(f"Dimension order: {new_dim_order}")
209
+
210
+ # Process based on whether we have time dimension
211
+ if has_time:
212
+ # Process each timepoint separately
213
+ n_timepoints = img_transposed.shape[0]
214
+ print(f"Processing {n_timepoints} timepoints...")
215
+
216
+ results = []
217
+ for t in range(n_timepoints):
218
+ print(f" Processing timepoint {t+1}/{n_timepoints}...")
219
+ timepoint_img = img_transposed[t] # (Z, Y, X)
220
+
221
+ # Process this timepoint
222
+ result = _process_single_volume(
223
+ timepoint_img, z_batch_size, output_channel
224
+ )
225
+ results.append(result)
226
+
227
+ # Stack results
228
+ final_result = np.stack(results, axis=0)
229
+ print(f"✓ Processing complete. Output shape: {final_result.shape}")
230
+
231
+ else:
232
+ # Process single volume
233
+ final_result = _process_single_volume(
234
+ img_transposed, z_batch_size, output_channel
235
+ )
236
+ print(f"✓ Processing complete. Output shape: {final_result.shape}")
237
+
238
+ return final_result
239
+
240
+
241
+ def _process_single_volume(
242
+ image: np.ndarray, z_batch_size: int, output_channel: str
243
+ ) -> np.ndarray:
244
+ """
245
+ Process a single 3D volume (ZYX) through VisCy.
246
+
247
+ Parameters:
248
+ -----------
249
+ image : np.ndarray
250
+ Input image with shape (Z, Y, X)
251
+ z_batch_size : int
252
+ Number of Z slices to process at once
253
+ output_channel : str
254
+ Which channel(s) to output: 'both', 'nuclei', or 'membrane'
255
+
256
+ Returns:
257
+ --------
258
+ np.ndarray
259
+ Virtual stained image
260
+ - If output_channel='both': shape (Z, 2, Y, X)
261
+ - If output_channel='nuclei' or 'membrane': shape (Z, Y, X)
262
+ """
263
+ # Check if VisCy is available directly
264
+ if VISCY_AVAILABLE:
265
+ result = _run_viscy_native(image, z_batch_size)
266
+ else:
267
+ # Check if environment exists
268
+ if not is_env_created():
269
+ print("VisCy environment not found. Creating environment...")
270
+ print(
271
+ "This is a one-time setup and may take several minutes..."
272
+ )
273
+ try:
274
+ create_viscy_env()
275
+ print("✓ VisCy environment created successfully")
276
+ except Exception as e:
277
+ raise RuntimeError(
278
+ f"Failed to create VisCy environment: {e}"
279
+ )
280
+
281
+ # Run in dedicated environment
282
+ print("Running VisCy in dedicated environment...")
283
+ result = run_viscy_in_env(image, z_batch_size)
284
+
285
+ # result shape: (Z, 2, Y, X)
286
+ # Select output channel(s)
287
+ if output_channel == "nuclei":
288
+ return result[:, 0, :, :] # (Z, Y, X)
289
+ elif output_channel == "membrane":
290
+ return result[:, 1, :, :] # (Z, Y, X)
291
+ else: # "both"
292
+ return result # (Z, 2, Y, X)
293
+
294
+
295
+ def _run_viscy_native(image: np.ndarray, z_batch_size: int) -> np.ndarray:
296
+ """
297
+ Run VisCy natively in the current environment.
298
+
299
+ Parameters:
300
+ -----------
301
+ image : np.ndarray
302
+ Input image with shape (Z, Y, X)
303
+ z_batch_size : int
304
+ Number of Z slices to process at once
305
+
306
+ Returns:
307
+ --------
308
+ np.ndarray
309
+ Virtual stained image with shape (Z, 2, Y, X)
310
+ """
311
+ import torch
312
+ from viscy.translation.engine import VSUNet
313
+
314
+ # Get model path from environment manager
315
+ from napari_tmidas.processing_functions.viscy_env_manager import (
316
+ get_model_path,
317
+ )
318
+
319
+ model_path = get_model_path()
320
+
321
+ # Load the model
322
+ model = VSUNet.load_from_checkpoint(
323
+ model_path,
324
+ architecture="fcmae",
325
+ model_config={
326
+ "in_channels": 1,
327
+ "out_channels": 2,
328
+ "encoder_blocks": [3, 3, 9, 3],
329
+ "dims": [96, 192, 384, 768],
330
+ "decoder_conv_blocks": 2,
331
+ "stem_kernel_size": [5, 4, 4],
332
+ "in_stack_depth": 15,
333
+ "pretraining": False,
334
+ },
335
+ )
336
+ model.eval()
337
+
338
+ if torch.cuda.is_available():
339
+ model = model.cuda()
340
+ print("Using GPU for inference")
341
+ else:
342
+ print("Using CPU for inference")
343
+
344
+ # Process in batches
345
+ n_z = image.shape[0]
346
+ n_batches = (n_z + z_batch_size - 1) // z_batch_size
347
+ all_predictions = []
348
+
349
+ for batch_idx in range(n_batches):
350
+ start_z = batch_idx * z_batch_size
351
+ end_z = min((batch_idx + 1) * z_batch_size, n_z)
352
+
353
+ # Get batch
354
+ batch_data = image[start_z:end_z]
355
+ actual_size = batch_data.shape[0]
356
+
357
+ # Pad if necessary
358
+ if actual_size < z_batch_size:
359
+ pad_size = z_batch_size - actual_size
360
+ batch_data = np.pad(
361
+ batch_data, ((0, pad_size), (0, 0), (0, 0)), mode="edge"
362
+ )
363
+
364
+ # Normalize
365
+ p_low, p_high = np.percentile(batch_data, [1, 99])
366
+ batch_data = np.clip(
367
+ (batch_data - p_low) / (p_high - p_low + 1e-8), 0, 1
368
+ )
369
+
370
+ # Convert to tensor: (Z, Y, X) -> (1, 1, Z, Y, X)
371
+ batch_tensor = torch.from_numpy(batch_data.astype(np.float32))[
372
+ None, None, :, :, :
373
+ ]
374
+ if torch.cuda.is_available():
375
+ batch_tensor = batch_tensor.cuda()
376
+
377
+ # Run prediction
378
+ with torch.no_grad():
379
+ pred = model(batch_tensor) # (1, 2, Z, Y, X)
380
+
381
+ # Process output: (2, Z, Y, X) -> (Z, 2, Y, X)
382
+ pred_np = pred[0].cpu().numpy().transpose(1, 0, 2, 3)[:actual_size]
383
+ all_predictions.append(pred_np)
384
+
385
+ # Free memory
386
+ del batch_data, batch_tensor, pred
387
+ if torch.cuda.is_available():
388
+ torch.cuda.empty_cache()
389
+
390
+ # Concatenate all predictions: (Z, 2, Y, X)
391
+ full_prediction = np.concatenate(all_predictions, axis=0)
392
+
393
+ return full_prediction
@@ -0,0 +1,246 @@
1
+ Metadata-Version: 2.4
2
+ Name: napari-tmidas
3
+ Version: 0.3.1
4
+ Summary: A plugin for batch processing of confocal and whole-slide microscopy images of biological tissues
5
+ Author: Marco Meer
6
+ Author-email: marco.meer@pm.me
7
+ License:
8
+ Copyright (c) 2025, Marco Meer
9
+ All rights reserved.
10
+
11
+ Redistribution and use in source and binary forms, with or without
12
+ modification, are permitted provided that the following conditions are met:
13
+
14
+ * Redistributions of source code must retain the above copyright notice, this
15
+ list of conditions and the following disclaimer.
16
+
17
+ * Redistributions in binary form must reproduce the above copyright notice,
18
+ this list of conditions and the following disclaimer in the documentation
19
+ and/or other materials provided with the distribution.
20
+
21
+ * Neither the name of copyright holder nor the names of its
22
+ contributors may be used to endorse or promote products derived from
23
+ this software without specific prior written permission.
24
+
25
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
29
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
32
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
34
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35
+
36
+ Project-URL: Bug Tracker, https://github.com/macromeer/napari-tmidas/issues
37
+ Project-URL: Documentation, https://github.com/macromeer/napari-tmidas#README.md
38
+ Project-URL: Source Code, https://github.com/macromeer/napari-tmidas
39
+ Project-URL: User Support, https://github.com/macromeer/napari-tmidas/issues
40
+ Classifier: Development Status :: 2 - Pre-Alpha
41
+ Classifier: Framework :: napari
42
+ Classifier: Intended Audience :: Developers
43
+ Classifier: License :: OSI Approved :: BSD License
44
+ Classifier: Operating System :: MacOS
45
+ Classifier: Operating System :: POSIX :: Linux
46
+ Classifier: Programming Language :: Python
47
+ Classifier: Programming Language :: Python :: 3
48
+ Classifier: Programming Language :: Python :: 3 :: Only
49
+ Classifier: Programming Language :: Python :: 3.10
50
+ Classifier: Programming Language :: Python :: 3.11
51
+ Classifier: Topic :: Scientific/Engineering :: Image Processing
52
+ Requires-Python: >=3.10
53
+ Description-Content-Type: text/markdown
54
+ License-File: LICENSE
55
+ Requires-Dist: numpy<3.0,>=1.23.0
56
+ Requires-Dist: magicgui
57
+ Requires-Dist: tqdm
58
+ Requires-Dist: qtpy
59
+ Requires-Dist: scikit-image>=0.19.0
60
+ Requires-Dist: scikit-learn-extra>=0.3.0
61
+ Requires-Dist: pyqt5
62
+ Requires-Dist: zarr
63
+ Requires-Dist: ome-zarr
64
+ Requires-Dist: napari-ome-zarr
65
+ Requires-Dist: nd2
66
+ Requires-Dist: pylibCZIrw
67
+ Requires-Dist: readlif
68
+ Requires-Dist: tiffslide
69
+ Requires-Dist: acquifer-napari
70
+ Provides-Extra: testing
71
+ Requires-Dist: tox; extra == "testing"
72
+ Requires-Dist: pytest>=7.0.0; extra == "testing"
73
+ Requires-Dist: pytest-cov; extra == "testing"
74
+ Requires-Dist: pytest-qt; extra == "testing"
75
+ Requires-Dist: pytest-timeout; extra == "testing"
76
+ Requires-Dist: napari; extra == "testing"
77
+ Requires-Dist: pyqt5; extra == "testing"
78
+ Requires-Dist: psygnal>=0.8.0; extra == "testing"
79
+ Requires-Dist: scikit-learn-extra>=0.3.0; extra == "testing"
80
+ Provides-Extra: clustering
81
+ Requires-Dist: scikit-learn-extra>=0.3.0; extra == "clustering"
82
+ Provides-Extra: deep-learning
83
+ Requires-Dist: torch>=1.12.0; extra == "deep-learning"
84
+ Requires-Dist: torchvision>=0.13.0; extra == "deep-learning"
85
+ Requires-Dist: timm; extra == "deep-learning"
86
+ Requires-Dist: opencv-python; extra == "deep-learning"
87
+ Requires-Dist: cmake; extra == "deep-learning"
88
+ Requires-Dist: hydra-core; extra == "deep-learning"
89
+ Requires-Dist: eva-decord; extra == "deep-learning"
90
+ Provides-Extra: all
91
+ Requires-Dist: napari-tmidas[clustering,deep-learning,testing]; extra == "all"
92
+ Dynamic: license-file
93
+
94
+ # napari-tmidas
95
+
96
+ [![License BSD-3](https://img.shields.io/pypi/l/napari-tmidas.svg?color=green)](https://github.com/macromeer/napari-tmidas/raw/main/LICENSE)
97
+ [![PyPI](https://img.shields.io/pypi/v/napari-tmidas.svg?color=green)](https://pypi.org/project/napari-tmidas)
98
+ [![Python Version](https://img.shields.io/pypi/pyversions/napari-tmidas.svg?color=green)](https://python.org)
99
+ [![Downloads](https://static.pepy.tech/badge/napari-tmidas)](https://pepy.tech/project/napari-tmidas)
100
+ [![DOI](https://zenodo.org/badge/943353883.svg)](https://doi.org/10.5281/zenodo.17988815)
101
+ [![tests](https://github.com/macromeer/napari-tmidas/workflows/tests/badge.svg)](https://github.com/macromeer/napari-tmidas/actions)
102
+
103
+ **AI-powered batch processing for microscopy images**
104
+
105
+ Transform, analyze, and quantify microscopy data at scale with deep learning - from file conversion to segmentation, tracking, and analysis.
106
+
107
+ ## ✨ Key Features
108
+
109
+ 🤖 **5 AI Methods Built-In**
110
+ - Virtual staining (VisCy) • Denoising (CAREamics) • Spot detection (Spotiflow) • Segmentation (Cellpose) • Tracking (Trackastra)
111
+ - Auto-install in isolated environments • No dependency conflicts • GPU acceleration
112
+
113
+ 🔄 **Universal File Conversion**
114
+ - Convert LIF, ND2, CZI, NDPI, Acquifer → TIFF or OME-Zarr
115
+ - Preserve spatial metadata automatically
116
+
117
+ ⚡ **Batch Processing**
118
+ - Process entire folders with one click • 40+ processing functions • Progress tracking & quality control
119
+
120
+ 📊 **Complete Analysis Pipeline**
121
+ - Segmentation → Tracking → Quantification → Colocalization
122
+
123
+ ## 🚀 Quick Start
124
+
125
+ ```bash
126
+ # Install napari and the plugin
127
+ mamba create -y -n napari-tmidas -c conda-forge python=3.11
128
+ mamba activate napari-tmidas
129
+ pip install "napari[all]"
130
+ pip install napari-tmidas
131
+
132
+ # Launch napari
133
+ napari
134
+ ```
135
+
136
+ Then find napari-tmidas in the **Plugins** menu. [Watch video tutorials →](https://www.youtube.com/@macromeer/videos)
137
+
138
+ > **💡 Tip**: AI methods auto-install their dependencies on first use - no manual setup required!
139
+
140
+ ## 📖 Documentation
141
+
142
+ ### AI-Powered Methods
143
+
144
+ | Method | Description | Documentation |
145
+ |--------|-------------|---------------|
146
+ | 🎨 **VisCy** | Virtual staining from phase/DIC | [Guide](docs/viscy_virtual_staining.md) |
147
+ | 🔧 **CAREamics** | Noise2Void/CARE denoising | [Guide](docs/careamics_denoising.md) |
148
+ | 🎯 **Spotiflow** | Spot/puncta detection | [Guide](docs/spotiflow_detection.md) |
149
+ | 🔬 **Cellpose** | Cell/nucleus segmentation | [Guide](docs/cellpose_segmentation.md) |
150
+ | 📈 **Trackastra** | Cell tracking over time | [Guide](docs/trackastra_tracking.md) |
151
+
152
+ ### Core Workflows
153
+
154
+ - **[File Conversion](docs/file_conversion.md)** - Multi-format microscopy file conversion (LIF, ND2, CZI, NDPI, Acquifer)
155
+ - **[Batch Processing](docs/basic_processing.md)** - Label operations, filters, channel splitting
156
+ - **[Quality Control](docs/grid_view_overlay.md)** - Visual QC with grid overlay
157
+ - **[Quantification](docs/regionprops_analysis.md)** - Extract measurements from labels
158
+ - **[Colocalization](docs/advanced_processing.md#colocalization-analysis)** - Multi-channel ROI analysis
159
+
160
+ ### Advanced Features
161
+
162
+ - [SAM2 Crop Anything](docs/advanced_processing.md#sam2) - Interactive object cropping
163
+ - [Advanced Filters](docs/advanced_processing.md) - SciPy/scikit-image filters
164
+ - [Batch Label Inspection](docs/basic_processing.md#label-inspection) - Manual correction workflow
165
+
166
+ ## 💻 Installation
167
+
168
+ ### Step 1: Install napari
169
+
170
+ ```bash
171
+ mamba create -y -n napari-tmidas -c conda-forge python=3.11
172
+ mamba activate napari-tmidas
173
+ python -m pip install "napari[all]"
174
+ ```
175
+
176
+ ### Step 2: Install napari-tmidas
177
+
178
+ | Your Needs | Command |
179
+ |----------|---------|
180
+ | **Just process & convert images** | `pip install napari-tmidas` |
181
+ | **Need AI features** (SAM2, Cellpose, Spotiflow, etc.) | `pip install 'napari-tmidas[deep-learning]'` |
182
+ | **Want the latest dev features** | `pip install git+https://github.com/MercaderLabAnatomy/napari-tmidas.git` |
183
+
184
+ **Recommended for most users:** `pip install 'napari-tmidas[deep-learning]'`
185
+
186
+ ## 🖼️ Screenshots
187
+
188
+ <details>
189
+ <summary><b>File Conversion Widget</b></summary>
190
+
191
+ <img src="https://github.com/user-attachments/assets/e377ca71-2f30-447d-825e-d2feebf7061b" alt="File Conversion" width="600">
192
+
193
+ Convert proprietary formats to open standards with metadata preservation.
194
+ </details>
195
+
196
+ <details>
197
+ <summary><b>Batch Processing Interface</b></summary>
198
+
199
+ <img src="https://github.com/user-attachments/assets/cfe84828-c1cc-4196-9a53-5dfb82d5bfce" alt="Batch Processing" width="600">
200
+
201
+ Select files → Choose processing function → Run on entire dataset.
202
+ </details>
203
+
204
+ <details>
205
+ <summary><b>Label Inspection</b></summary>
206
+
207
+ <img src="https://github.com/user-attachments/assets/0bf8c6ae-4212-449d-8183-e91b23ba740e" alt="Label Inspection" width="600">
208
+
209
+ Inspect and manually correct segmentation results.
210
+ </details>
211
+
212
+ <details>
213
+ <summary><b>SAM2 Crop Anything</b></summary>
214
+
215
+ <img src="https://github.com/user-attachments/assets/6d72c2a2-1064-4a27-b398-a9b86fcbc443" alt="Crop Anything" width="600">
216
+
217
+ Interactive object selection and cropping with SAM2.
218
+ </details>
219
+
220
+ ## 🤝 Contributing
221
+
222
+ Contributions are welcome! Please ensure tests pass before submitting PRs:
223
+
224
+ ```bash
225
+ pip install tox
226
+ tox
227
+ ```
228
+
229
+ ## 📄 License
230
+
231
+ BSD-3 License - see [LICENSE](LICENSE) for details.
232
+
233
+ ## 🐛 Issues
234
+
235
+ Found a bug or have a feature request? [Open an issue](https://github.com/MercaderLabAnatomy/napari-tmidas/issues)
236
+
237
+ ## 🙏 Acknowledgments
238
+
239
+ Built with [napari](https://github.com/napari/napari) and powered by:
240
+ - [Cellpose](https://github.com/MouseLand/cellpose) • [VisCy](https://github.com/mehta-lab/VisCy) • [CAREamics](https://github.com/CAREamics/careamics) • [Spotiflow](https://github.com/weigertlab/spotiflow) • [Trackastra](https://github.com/weigertlab/trackastra) • [SAM2](https://github.com/facebookresearch/segment-anything-2)
241
+
242
+ ---
243
+
244
+ [PyPI]: https://pypi.org/project/napari-tmidas
245
+ [pip]: https://pypi.org/project/pip/
246
+ [tox]: https://tox.readthedocs.io/en/latest/