napari-tmidas 0.3.0__tar.gz → 0.3.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/PKG-INFO +16 -19
  2. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/README.md +15 -18
  3. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/docs/advanced_processing.md +6 -14
  4. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/docs/cellpose_segmentation.md +5 -6
  5. napari_tmidas-0.3.2/docs/crop_anything.md +329 -0
  6. napari_tmidas-0.3.2/docs/file_conversion.md +386 -0
  7. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/docs/intensity_label_filter.md +1 -9
  8. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/docs/trackastra_tracking.md +3 -8
  9. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_version.py +3 -3
  10. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/__init__.py +10 -2
  11. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/cellpose_env_manager.py +22 -0
  12. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/intensity_label_filter.py +12 -4
  13. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas.egg-info/PKG-INFO +16 -19
  14. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas.egg-info/SOURCES.txt +2 -0
  15. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/.github/dependabot.yml +0 -0
  16. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/.github/workflows/test_and_deploy.yml +0 -0
  17. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/.gitignore +0 -0
  18. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/.napari-hub/DESCRIPTION.md +0 -0
  19. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/.napari-hub/config.yml +0 -0
  20. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/.pre-commit-config.yaml +0 -0
  21. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/LICENSE +0 -0
  22. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/MANIFEST.in +0 -0
  23. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/docs/basic_processing.md +0 -0
  24. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/docs/careamics_denoising.md +0 -0
  25. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/docs/grid_view_overlay.md +0 -0
  26. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/docs/regionprops_analysis.md +0 -0
  27. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/docs/regionprops_summary.md +0 -0
  28. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/docs/spotiflow_detection.md +0 -0
  29. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/docs/viscy_virtual_staining.md +0 -0
  30. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/examples/grid_overlay_example.py +0 -0
  31. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/examples/intensity_filter_example.py +0 -0
  32. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/examples/regionprops_example.py +0 -0
  33. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/pyproject.toml +0 -0
  34. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/setup.cfg +0 -0
  35. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/__init__.py +0 -0
  36. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_crop_anything.py +0 -0
  37. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_env_manager.py +0 -0
  38. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_file_conversion.py +0 -0
  39. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_file_selector.py +0 -0
  40. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_label_inspection.py +0 -0
  41. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_processing_worker.py +0 -0
  42. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_reader.py +0 -0
  43. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_registry.py +0 -0
  44. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_roi_colocalization.py +0 -0
  45. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_sample_data.py +0 -0
  46. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_crop_anything.py +0 -0
  47. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_env_manager.py +0 -0
  48. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_file_selector.py +0 -0
  49. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_grid_view_overlay.py +0 -0
  50. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_init.py +0 -0
  51. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_intensity_label_filter.py +0 -0
  52. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_label_inspection.py +0 -0
  53. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_processing_basic.py +0 -0
  54. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_processing_worker.py +0 -0
  55. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_reader.py +0 -0
  56. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_regionprops_analysis.py +0 -0
  57. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_registry.py +0 -0
  58. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_sample_data.py +0 -0
  59. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_scipy_filters.py +0 -0
  60. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_skimage_filters.py +0 -0
  61. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_split_channels.py +0 -0
  62. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_spotiflow.py +0 -0
  63. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_tyx_display_fix.py +0 -0
  64. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_ui_utils.py +0 -0
  65. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_viscy_virtual_staining.py +0 -0
  66. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_widget.py +0 -0
  67. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_windows_basic.py +0 -0
  68. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_tests/test_writer.py +0 -0
  69. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_ui_utils.py +0 -0
  70. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_widget.py +0 -0
  71. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/_writer.py +0 -0
  72. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/napari.yaml +0 -0
  73. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/basic.py +0 -0
  74. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/careamics_denoising.py +0 -0
  75. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/careamics_env_manager.py +0 -0
  76. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/cellpose_segmentation.py +0 -0
  77. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/colocalization.py +0 -0
  78. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/file_compression.py +0 -0
  79. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/grid_view_overlay.py +0 -0
  80. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/regionprops_analysis.py +0 -0
  81. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/sam2_env_manager.py +0 -0
  82. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/sam2_mp4.py +0 -0
  83. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/scipy_filters.py +0 -0
  84. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/skimage_filters.py +0 -0
  85. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/spotiflow_detection.py +0 -0
  86. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/spotiflow_env_manager.py +0 -0
  87. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/timepoint_merger.py +0 -0
  88. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/trackastra_tracking.py +0 -0
  89. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/viscy_env_manager.py +0 -0
  90. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas/processing_functions/viscy_virtual_staining.py +0 -0
  91. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas.egg-info/dependency_links.txt +0 -0
  92. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas.egg-info/entry_points.txt +0 -0
  93. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas.egg-info/requires.txt +0 -0
  94. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/src/napari_tmidas.egg-info/top_level.txt +0 -0
  95. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/test_grid_overlay.py +0 -0
  96. {napari_tmidas-0.3.0 → napari_tmidas-0.3.2}/tox.ini +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: napari-tmidas
3
- Version: 0.3.0
3
+ Version: 0.3.2
4
4
  Summary: A plugin for batch processing of confocal and whole-slide microscopy images of biological tissues
5
5
  Author: Marco Meer
6
6
  Author-email: marco.meer@pm.me
@@ -151,11 +151,11 @@ Then find napari-tmidas in the **Plugins** menu. [Watch video tutorials →](htt
151
151
 
152
152
  ### Core Workflows
153
153
 
154
- - **[Image Conversion](docs/basic_processing.md#file-conversion)** - Multi-format microscopy file conversion
154
+ - **[File Conversion](docs/file_conversion.md)** - Multi-format microscopy file conversion (LIF, ND2, CZI, NDPI, Acquifer)
155
155
  - **[Batch Processing](docs/basic_processing.md)** - Label operations, filters, channel splitting
156
156
  - **[Quality Control](docs/grid_view_overlay.md)** - Visual QC with grid overlay
157
157
  - **[Quantification](docs/regionprops_analysis.md)** - Extract measurements from labels
158
- - **[Colocalization](docs/advanced_processing.md#colocalization)** - Multi-channel ROI analysis
158
+ - **[Colocalization](docs/advanced_processing.md#colocalization-analysis)** - Multi-channel ROI analysis
159
159
 
160
160
  ### Advanced Features
161
161
 
@@ -163,28 +163,25 @@ Then find napari-tmidas in the **Plugins** menu. [Watch video tutorials →](htt
163
163
  - [Advanced Filters](docs/advanced_processing.md) - SciPy/scikit-image filters
164
164
  - [Batch Label Inspection](docs/basic_processing.md#label-inspection) - Manual correction workflow
165
165
 
166
- ## 💻 Installation Options
166
+ ## 💻 Installation
167
167
 
168
- **Recommended (latest features):**
169
- ```bash
170
- pip install git+https://github.com/MercaderLabAnatomy/napari-tmidas.git
171
- ```
168
+ ### Step 1: Install napari
172
169
 
173
- **Stable release:**
174
170
  ```bash
175
- pip install napari-tmidas
171
+ mamba create -y -n napari-tmidas -c conda-forge python=3.11
172
+ mamba activate napari-tmidas
173
+ python -m pip install "napari[all]"
176
174
  ```
177
175
 
178
- **With deep learning (optional):**
179
- ```bash
180
- pip install 'napari-tmidas[deep-learning]' # Includes SAM2
181
- pip install 'napari-tmidas[all]' # Everything
182
- ```
176
+ ### Step 2: Install napari-tmidas
183
177
 
184
- **Additional setup for SAM2:**
185
- ```bash
186
- mamba install -c conda-forge ffmpeg # Required for video processing
187
- ```
178
+ | Your Needs | Command |
179
+ |----------|---------|
180
+ | **Just process & convert images** | `pip install napari-tmidas` |
181
+ | **Need AI features** (SAM2, Cellpose, Spotiflow, etc.) | `pip install 'napari-tmidas[deep-learning]'` |
182
+ | **Want the latest dev features** | `pip install git+https://github.com/MercaderLabAnatomy/napari-tmidas.git` |
183
+
184
+ **Recommended for most users:** `pip install 'napari-tmidas[deep-learning]'`
188
185
 
189
186
  ## 🖼️ Screenshots
190
187
 
@@ -58,11 +58,11 @@ Then find napari-tmidas in the **Plugins** menu. [Watch video tutorials →](htt
58
58
 
59
59
  ### Core Workflows
60
60
 
61
- - **[Image Conversion](docs/basic_processing.md#file-conversion)** - Multi-format microscopy file conversion
61
+ - **[File Conversion](docs/file_conversion.md)** - Multi-format microscopy file conversion (LIF, ND2, CZI, NDPI, Acquifer)
62
62
  - **[Batch Processing](docs/basic_processing.md)** - Label operations, filters, channel splitting
63
63
  - **[Quality Control](docs/grid_view_overlay.md)** - Visual QC with grid overlay
64
64
  - **[Quantification](docs/regionprops_analysis.md)** - Extract measurements from labels
65
- - **[Colocalization](docs/advanced_processing.md#colocalization)** - Multi-channel ROI analysis
65
+ - **[Colocalization](docs/advanced_processing.md#colocalization-analysis)** - Multi-channel ROI analysis
66
66
 
67
67
  ### Advanced Features
68
68
 
@@ -70,28 +70,25 @@ Then find napari-tmidas in the **Plugins** menu. [Watch video tutorials →](htt
70
70
  - [Advanced Filters](docs/advanced_processing.md) - SciPy/scikit-image filters
71
71
  - [Batch Label Inspection](docs/basic_processing.md#label-inspection) - Manual correction workflow
72
72
 
73
- ## 💻 Installation Options
73
+ ## 💻 Installation
74
74
 
75
- **Recommended (latest features):**
76
- ```bash
77
- pip install git+https://github.com/MercaderLabAnatomy/napari-tmidas.git
78
- ```
75
+ ### Step 1: Install napari
79
76
 
80
- **Stable release:**
81
77
  ```bash
82
- pip install napari-tmidas
78
+ mamba create -y -n napari-tmidas -c conda-forge python=3.11
79
+ mamba activate napari-tmidas
80
+ python -m pip install "napari[all]"
83
81
  ```
84
82
 
85
- **With deep learning (optional):**
86
- ```bash
87
- pip install 'napari-tmidas[deep-learning]' # Includes SAM2
88
- pip install 'napari-tmidas[all]' # Everything
89
- ```
83
+ ### Step 2: Install napari-tmidas
90
84
 
91
- **Additional setup for SAM2:**
92
- ```bash
93
- mamba install -c conda-forge ffmpeg # Required for video processing
94
- ```
85
+ | Your Needs | Command |
86
+ |----------|---------|
87
+ | **Just process & convert images** | `pip install napari-tmidas` |
88
+ | **Need AI features** (SAM2, Cellpose, Spotiflow, etc.) | `pip install 'napari-tmidas[deep-learning]'` |
89
+ | **Want the latest dev features** | `pip install git+https://github.com/MercaderLabAnatomy/napari-tmidas.git` |
90
+
91
+ **Recommended for most users:** `pip install 'napari-tmidas[deep-learning]'`
95
92
 
96
93
  ## 🖼️ Screenshots
97
94
 
@@ -14,18 +14,13 @@ Image denoising using **CAREamics** (Content-Aware Image Restoration). This proc
14
14
 
15
15
  ## Installation
16
16
 
17
- CAREamics can be installed in your environment or will use a dedicated environment automatically.
18
-
19
- ### Manual Installation (Recommended)
17
+ CAREamics is automatically available when you install napari-tmidas with deep learning support:
20
18
 
21
19
  ```bash
22
- mamba activate napari-tmidas
23
- pip install careamics
20
+ pip install 'napari-tmidas[deep-learning]'
24
21
  ```
25
22
 
26
- ### Automatic Installation
27
-
28
- If not detected, the plugin creates a dedicated `careamics-env` conda environment automatically.
23
+ If you installed the basic version without deep learning, the plugin will automatically create a dedicated `careamics-env` environment when first used.
29
24
 
30
25
  ## Parameters
31
26
 
@@ -116,16 +111,13 @@ Accurate spot detection for fluorescence microscopy using **Spotiflow**, a deep
116
111
 
117
112
  ## Installation
118
113
 
119
- ### Manual Installation (Recommended)
114
+ Spotiflow is automatically available when you install napari-tmidas with deep learning support:
120
115
 
121
116
  ```bash
122
- mamba activate napari-tmidas
123
- pip install spotiflow
117
+ pip install 'napari-tmidas[deep-learning]'
124
118
  ```
125
119
 
126
- ### Automatic Installation
127
-
128
- The plugin automatically creates a `spotiflow-env` conda environment if needed.
120
+ If you installed the basic version without deep learning, the plugin will automatically create a `spotiflow-env` environment when first used.
129
121
 
130
122
  ## Pre-trained Models
131
123
 
@@ -15,16 +15,15 @@ Automatic instance segmentation using **Cellpose 4 (Cellpose-SAM)** with improve
15
15
 
16
16
  ## Installation
17
17
 
18
- Cellpose can be installed in your napari-tmidas environment, or the plugin will automatically create a dedicated environment when first used.
19
-
20
- ### Manual Installation (Recommended)
18
+ Cellpose is automatically available when you install napari-tmidas with deep learning support:
21
19
 
22
20
  ```bash
23
- mamba activate napari-tmidas
24
- pip install cellpose[gui]
21
+ pip install 'napari-tmidas[deep-learning]'
25
22
  ```
26
23
 
27
- ### Automatic Installation
24
+ If you installed the basic version without deep learning, the plugin will automatically create a dedicated `cellpose-env` environment when first used.
25
+
26
+ ### What Happens on First Use
28
27
 
29
28
  If Cellpose is not detected, the plugin will:
30
29
  1. Create a dedicated `cellpose-env` conda environment
@@ -0,0 +1,329 @@
1
+ # Batch Crop Anything
2
+
3
+ Batch Crop Anything is an interactive napari plugin for intelligent image cropping and object extraction. It combines SAM2 (Segment Anything Model 2) for automatic object detection with an intuitive interface for selecting and cropping specific objects from microscopy images.
4
+
5
+ ## Overview
6
+
7
+ This plugin enables:
8
+ - **Interactive object segmentation** using AI-powered SAM2 model
9
+ - **2D and 3D processing** for single images and image stacks
10
+ - **Multi-frame propagation** for temporal datasets
11
+ - **Batch cropping** of selected objects across multiple images
12
+ - **GPU acceleration** (CUDA, Apple Silicon, CPU fallback)
13
+
14
+ ## Installation
15
+
16
+ ### Installation
17
+
18
+ Batch Crop Anything requires the deep learning components. Install with:
19
+
20
+ ```bash
21
+ pip install 'napari-tmidas[deep-learning]'
22
+ ```
23
+
24
+ ### SAM2 Setup
25
+
26
+ SAM2 is automatically downloaded on first use. However, you must manually install ffmpeg:
27
+
28
+ ```bash
29
+ # Linux (usually pre-installed)
30
+ sudo apt-get install ffmpeg
31
+
32
+ # macOS
33
+ brew install ffmpeg
34
+
35
+ # Or via conda
36
+ mamba install -c conda-forge ffmpeg
37
+ ```
38
+
39
+ Optional: Set the SAM2 path environment variable to specify a custom installation location:
40
+
41
+ ```bash
42
+ export SAM2_PATH=/path/to/sam2
43
+ ```
44
+
45
+ ## Quick Start
46
+
47
+ 1. Open napari and navigate to **Plugins → napari-tmidas → Batch Crop Anything**
48
+ 2. Select your image folder containing `.tif` or `.zarr` files
49
+ 3. Choose between **2D Mode** (single images) or **3D Mode** (stacks/time-series)
50
+ 4. Click on objects in the image to segment them with SAM2
51
+ 5. Use the interactive table to select which objects to crop
52
+ 6. Save cropped regions to disk
53
+
54
+ ## Modes
55
+
56
+ ### 2D Mode
57
+
58
+ Perfect for single 2D images or when you want to segment individual layers.
59
+
60
+ **Interactive Workflow:**
61
+ - Click on image → Creates positive point prompt
62
+ - Shift+click → Creates negative point prompt (refine boundaries)
63
+ - SAM2 segments the object at that point
64
+ - Click existing objects → Select for cropping
65
+
66
+ **Controls:**
67
+ - Sensitivity slider: Adjust detection confidence (0-100)
68
+ - Higher values → More aggressive segmentation
69
+ - Lower values → Conservative segmentation
70
+ - Next/Previous buttons: Navigate through image collection
71
+
72
+ ### 3D Mode
73
+
74
+ For volumetric data (Z-stacks) or time-series datasets (time-lapse videos).
75
+
76
+ **Data Format Recognition:**
77
+ The plugin automatically detects your data format:
78
+ - **TZYX**: Time-series with Z-stacks (e.g., time-lapse confocal)
79
+ - **TYX**: Time-series without Z dimension (e.g., 2D time-lapse)
80
+ - **ZYX**: Single Z-stack without time dimension
81
+
82
+ **Interactive Workflow:**
83
+ 1. Navigate to the **first slice** where your object appears (using the dimension slider)
84
+ 2. Click on the object in **2D view** (not 3D view)
85
+ 3. SAM2 segments the object at that frame
86
+ 4. **Automatic propagation**: Segmentation is propagated through all frames using video tracking
87
+
88
+ **Important:** Always click on the first frame containing your object. SAM2's video propagation then extends the segmentation forward through time.
89
+
90
+ **Controls:**
91
+ - Use dimension sliders to navigate frames/slices
92
+ - Sensitivity slider: Control propagation aggressiveness
93
+ - Objects persist across frames automatically
94
+
95
+ ## Interactive Controls
96
+
97
+ ### Prompt Modes
98
+
99
+ SAM2 supports two ways to specify objects:
100
+
101
+ #### Point Mode (Default)
102
+ Click on image to add point prompts. Best for complex boundaries or small objects.
103
+
104
+ | Action | Effect |
105
+ |--------|--------|
106
+ | **Click** | Add positive point (include this region) |
107
+ | **Shift+Click** | Add negative point (exclude this region) |
108
+
109
+ #### Box Mode
110
+ Draw rectangles around objects. Best for quick segmentation of simple objects.
111
+
112
+ 1. Select **Box Mode** from the UI
113
+ 2. Draw rectangle around object
114
+ 3. SAM2 segments the region inside the box
115
+ 4. Can add more rectangles for multiple objects
116
+ 5. Shift+draw to refine/subtract from existing box
117
+
118
+ **When to use each:**
119
+ - **Points**: Fine details, intricate boundaries, removing noise
120
+ - **Box**: Quick segmentation, well-defined rectangular regions, speed
121
+
122
+ ### Navigation
123
+
124
+ | Action | Effect |
125
+ |--------|--------|
126
+ | **Left/Right Click** | Navigate to adjacent frames (3D mode) |
127
+ | **Dimension Slider** | Jump to specific frame/slice (3D mode) |
128
+
129
+ ### Table Selection
130
+
131
+ The label table displays all detected objects:
132
+ - **Checkbox**: Select objects to crop
133
+ - **Object ID**: Unique identifier in segmentation
134
+ - **Area**: Size in pixels
135
+ - **Statistics**: Min/max intensity values
136
+
137
+ ### Sensitivity Control
138
+
139
+ Adjust SAM2's detection confidence:
140
+ - **Range**: 0-100
141
+ - **Default**: 50
142
+ - **Effect on 2D**: Higher values segment larger regions
143
+ - **Effect on 3D**: Higher values allow more aggressive frame-to-frame propagation
144
+
145
+ ## Output Files
146
+
147
+ When you save cropped objects, the plugin creates:
148
+
149
+ ```
150
+ output_folder/
151
+ ├── image1_object_1.tif
152
+ ├── image1_object_2.tif
153
+ ├── image2_object_1.tif
154
+ └── ...
155
+ ```
156
+
157
+ Each cropped region is:
158
+ - Extracted as a minimal bounding box
159
+ - Saved as a separate TIFF file
160
+ - Named with original image + object ID
161
+
162
+ ## Advanced Features
163
+
164
+ ### Video Conversion (3D Mode)
165
+
166
+ For 3D/4D processing, the plugin converts image stacks to MP4 format:
167
+ - **Automatic**: Conversion happens on first load
168
+ - **Cached**: MP4 files are reused if they exist
169
+ - **4D Handling**: TZYX data is projected to TYX using maximum intensity projection
170
+
171
+ ### GPU Acceleration
172
+
173
+ Device selection is automatic:
174
+ - **NVIDIA GPU**: CUDA (if available)
175
+ - **Apple Silicon**: MPS (Metal Performance Shaders)
176
+ - **CPU**: Fallback for all systems
177
+
178
+ Check console output to see which device is active.
179
+
180
+ ### Error Handling
181
+
182
+ If SAM2 initialization fails:
183
+ - Images still load without automatic segmentation
184
+ - You can still use manual annotation tools
185
+ - Check console for detailed error messages
186
+ - Verify SAM2_PATH environment variable if needed
187
+
188
+ ## Troubleshooting
189
+
190
+ ### "SAM2 not found" warning
191
+
192
+ **Solution**: Ensure SAM2 is installed or set `SAM2_PATH`:
193
+
194
+ ```bash
195
+ # Install SAM2
196
+ pip install 'napari-tmidas[deep-learning]'
197
+
198
+ # Or set path manually
199
+ export SAM2_PATH=/path/to/sam2
200
+ ```
201
+
202
+ ### Segmentation not appearing
203
+
204
+ **Possible causes:**
205
+ 1. SAM2 model not initialized (check console)
206
+ 2. Image format incompatible (must be `.tif`, `.tiff`, or `.zarr`)
207
+ 3. GPU out of memory (switch to CPU)
208
+
209
+ **Solutions:**
210
+ - Check console output for error messages
211
+ - Try reducing image size or resolution
212
+ - Enable CPU mode: `torch.device('cpu')`
213
+
214
+ ### Memory errors on GPU
215
+
216
+ **Solutions:**
217
+ - Reduce image dimensions
218
+ - Switch to CPU mode
219
+ - Close other GPU-intensive applications
220
+ - Clear GPU cache: `torch.cuda.empty_cache()`
221
+
222
+ ### Slow 3D processing
223
+
224
+ **Causes:**
225
+ - Large 4D volumes
226
+ - Limited GPU memory
227
+ - Network latency (SAM2 checkpoint download)
228
+
229
+ **Solutions:**
230
+ - Use 2D mode for individual slices
231
+ - Reduce image dimensions
232
+ - Pre-process images to smaller regions
233
+
234
+ ## File Format Support
235
+
236
+ | Format | Dimensions | Status |
237
+ |--------|-----------|--------|
238
+ | `.tif` / `.tiff` | 2D, 3D, 4D | ✓ Fully supported |
239
+ | `.zarr` | 2D, 3D, 4D | ✓ Fully supported |
240
+ | `.png` | 2D | ✗ Not supported |
241
+ | `.jpg` | 2D | ✗ Not supported |
242
+
243
+ ## Performance Tips
244
+
245
+ 1. **Pre-process large images**: Downscale to < 2 megapixels for interactive use
246
+ 2. **Use 2D mode**: For single large images, segment individual slices
247
+ 3. **GPU selection**: CUDA > MPS > CPU (in terms of speed)
248
+ 4. **Batch processing**: Process multiple small images faster than one large image
249
+ 5. **Sensitivity tuning**: Start at 50, adjust based on results
250
+
251
+ ## Dataset Examples
252
+
253
+ ### Confocal Microscopy
254
+
255
+ ```
256
+ confocal_images/
257
+ ├── sample1.tif (3D Z-stack)
258
+ ├── sample2.tif (3D Z-stack)
259
+ └── ...
260
+ ```
261
+ → Use **3D Mode**, select appropriate crop regions
262
+
263
+ ### Time-Lapse Video
264
+
265
+ ```
266
+ timelapse/
267
+ ├── embryo_t001.tif (2D)
268
+ ├── embryo_t002.tif (2D)
269
+ ├── embryo_t003.tif (2D)
270
+ └── ...
271
+ ```
272
+ → Process each timepoint with **2D Mode**, or stack into TYX format for 3D mode
273
+
274
+ ### Multi-Channel 4D Data
275
+
276
+ ```
277
+ multi_channel/
278
+ ├── raw_ch1_ch2.tif (4D: TZYX)
279
+ ├── raw_ch2_ch2.tif (4D: TZYX)
280
+ └── ...
281
+ ```
282
+ → Use **3D Mode**, plugin auto-detects dimensions
283
+
284
+ ## Related Features
285
+
286
+ - **[Basic Processing](basic_processing.md)**: Image preprocessing and filtering
287
+ - **[Cellpose Segmentation](cellpose_segmentation.md)**: Alternative segmentation method
288
+ - **[Grid View Overlay](grid_view_overlay.md)**: Visualize multiple processed images
289
+ - **[Label Inspection](label_inspection.md)**: Interactive label verification and editing
290
+
291
+ ## Technical Details
292
+
293
+ ### SAM2 Model
294
+
295
+ - **Model**: SAM2.1 Hiera Large
296
+ - **Input**: RGB images (0-1 range)
297
+ - **Output**: Binary mask for each object
298
+ - **Inference**: Single-pass prompting + optional propagation in videos
299
+
300
+ ### Device Detection
301
+
302
+ ```
303
+ macOS:
304
+ - Check for Apple Silicon (MPS) → Use MPS
305
+ - Otherwise → Use CPU
306
+
307
+ Linux/Windows:
308
+ - Check for CUDA → Use CUDA
309
+ - Otherwise → Use CPU
310
+ ```
311
+
312
+ ## References
313
+
314
+ - [SAM2 Paper](https://arxiv.org/abs/2406.07399)
315
+ - [napari Documentation](https://napari.org/)
316
+ - [napari-tmidas Repository](https://github.com/MercaderLabAnatomy/napari-tmidas)
317
+
318
+ ## Citation
319
+
320
+ If you use Batch Crop Anything in your research, please cite:
321
+
322
+ ```bibtex
323
+ @software{napari_tmidas_2024,
324
+ title = {napari-tmidas: Batch Image Processing for Microscopy},
325
+ author = {Mercader Lab},
326
+ year = {2024},
327
+ url = {https://github.com/MercaderLabAnatomy/napari-tmidas}
328
+ }
329
+ ```