napari-tmidas 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,322 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ TrackAstra Cell Tracking Module for napari-tmidas
4
+
5
+ This module integrates TrackAstra deep learning-based cell tracking into the
6
+ napari-tmidas batch processing framework. It uses a dedicated conda environment
7
+ to manage TrackAstra dependencies separately from the main environment.
8
+ """
9
+
10
+ import os
11
+ import shutil
12
+ import subprocess
13
+ from pathlib import Path
14
+
15
+ import numpy as np
16
+ from skimage.io import imread
17
+
18
+ # Add the registry import
19
+ from napari_tmidas._registry import BatchProcessingRegistry
20
+
21
+
22
+ class TrackAstraEnvManager:
23
+ """Manages the TrackAstra conda environment."""
24
+
25
+ @staticmethod
26
+ def get_conda_cmd():
27
+ """Get the conda/mamba command available on the system."""
28
+ # Try mamba first (faster)
29
+ if shutil.which("mamba"):
30
+ return "mamba"
31
+ elif shutil.which("conda"):
32
+ return "conda"
33
+ else:
34
+ raise RuntimeError(
35
+ "Neither conda nor mamba found. Please install Anaconda/Miniconda/Miniforge."
36
+ )
37
+
38
+ @staticmethod
39
+ def check_env_exists():
40
+ conda_cmd = TrackAstraEnvManager.get_conda_cmd()
41
+ try:
42
+ # Try running python --version in the env
43
+ result = subprocess.run(
44
+ [conda_cmd, "run", "-n", "trackastra", "python", "--version"],
45
+ capture_output=True,
46
+ text=True,
47
+ timeout=10,
48
+ )
49
+ return result.returncode == 0
50
+ except (subprocess.TimeoutExpired, subprocess.CalledProcessError):
51
+ return False
52
+
53
+ @staticmethod
54
+ def create_env():
55
+ """Create the TrackAstra conda environment if it doesn't exist."""
56
+ if TrackAstraEnvManager.check_env_exists():
57
+ print("TrackAstra environment already exists.")
58
+ return True
59
+
60
+ print("Creating TrackAstra conda environment...")
61
+ conda_cmd = TrackAstraEnvManager.get_conda_cmd()
62
+
63
+ # Create environment with Python 3.10 (required for TrackAstra)
64
+ env_create_cmd = [
65
+ conda_cmd,
66
+ "create",
67
+ "-n",
68
+ "trackastra",
69
+ "python=3.10",
70
+ "--no-default-packages",
71
+ "-y",
72
+ ]
73
+
74
+ try:
75
+ subprocess.run(env_create_cmd, check=True)
76
+
77
+ # Install ilpy first from conda-forge
78
+ ilpy_cmd = [
79
+ conda_cmd,
80
+ "install",
81
+ "-n",
82
+ "trackastra",
83
+ "-c",
84
+ "conda-forge",
85
+ "-c",
86
+ "gurobi",
87
+ "-c",
88
+ "funkelab",
89
+ "ilpy",
90
+ "-y",
91
+ ]
92
+ subprocess.run(ilpy_cmd, check=True)
93
+
94
+ # Install TrackAstra and other dependencies via pip
95
+ pip_packages = [
96
+ "trackastra[napari]",
97
+ "scikit-image",
98
+ "tifffile",
99
+ "torch",
100
+ "torchvision",
101
+ ]
102
+
103
+ pip_cmd = [
104
+ conda_cmd,
105
+ "run",
106
+ "-n",
107
+ "trackastra",
108
+ "pip",
109
+ "install",
110
+ ] + pip_packages
111
+
112
+ subprocess.run(pip_cmd, check=True)
113
+
114
+ print("TrackAstra environment created successfully!")
115
+ return True
116
+
117
+ except subprocess.CalledProcessError as e:
118
+ print(f"Error creating TrackAstra environment: {e}")
119
+ return False
120
+
121
+
122
+ def create_trackastra_script(img_path, mask_path, model, mode, output_path):
123
+ """Create a Python script to run TrackAstra in the dedicated environment."""
124
+ script_content = f"""
125
+ import sys
126
+ import numpy as np
127
+ from skimage.io import imread
128
+ from tifffile import imwrite
129
+ import torch
130
+ from trackastra.model import Trackastra
131
+ from trackastra.tracking import graph_to_ctc, graph_to_napari_tracks
132
+
133
+
134
+ # Load images
135
+ print('Loading images...')
136
+ img = imread('{img_path}')
137
+ mask = imread('{mask_path}')
138
+ print(f'Img shape: {{img.shape}}, Mask shape: {{mask.shape}}')
139
+
140
+
141
+ # Validate dimensions
142
+ if mask.ndim not in [3, 4]:
143
+ raise ValueError(f'Expected 3D (TYX) or 4D (TZYX) mask, got {{mask.ndim}}D')
144
+
145
+ if mask.shape[0] < 2:
146
+ raise ValueError(f'Need at least 2 timepoints, got {{mask.shape[0]}}')
147
+
148
+ model = Trackastra.from_pretrained('{model}', device="automatic")
149
+ track_graph = model.track(img, mask, mode='{mode}')
150
+ _, masks_tracked = graph_to_ctc(track_graph, mask, outdir=None)
151
+
152
+ # Save the tracked masks
153
+ imwrite('{output_path}', masks_tracked.astype(np.uint32), compression='zlib')
154
+ print(f'Saved tracked masks to: {output_path}')
155
+
156
+ """
157
+
158
+ return script_content
159
+
160
+
161
+ @BatchProcessingRegistry.register(
162
+ name="Track Cells with Trackastra",
163
+ suffix="_tracked",
164
+ description="Track cells across time using TrackAstra deep learning (expects TYX or TZYX label images)",
165
+ parameters={
166
+ "model": {
167
+ "type": str,
168
+ "default": "ctc",
169
+ "description": "general_2d (nuclei/cells/particles) or ctc (Cell Tracking Challenge; 2D/3D)",
170
+ },
171
+ "mode": {
172
+ "type": str,
173
+ "default": "greedy",
174
+ "description": "greedy (fast), ilp (accurate with divisions), greedy_nodiv",
175
+ },
176
+ "label_pattern": {
177
+ "type": str,
178
+ "default": "_labels.tif",
179
+ "description": " ",
180
+ },
181
+ },
182
+ )
183
+ def trackastra_tracking(
184
+ image: np.ndarray,
185
+ model: str = "ctc",
186
+ mode: str = "greedy",
187
+ label_pattern: str = "_labels.tif",
188
+ ) -> np.ndarray:
189
+ """
190
+ Track cells in time-lapse label images using TrackAstra.
191
+
192
+ This function takes a time series of segmentation masks and performs
193
+ automatic cell tracking using TrackAstra deep learning framework.
194
+
195
+ Expected input dimensions:
196
+ - TYX: Time series of 2D label images
197
+ - TZYX: Time series of 3D label images (will process each Z-slice separately)
198
+
199
+ Parameters:
200
+ -----------
201
+ image : np.ndarray
202
+ Input label image array with time as first dimension
203
+ model : str
204
+ TrackAstra model: 'general_2d' or 'ctc' (default: "ctc")
205
+ mode : str
206
+ Tracking mode: 'greedy', 'ilp', or 'greedy_nodiv' (default: "greedy")
207
+ label_pattern : str
208
+ To identify label images
209
+
210
+ Returns:
211
+ --------
212
+ np.ndarray
213
+ Tracked label image with consistent IDs across time
214
+ """
215
+ print(f"Input shape: {image.shape}, dtype: {image.dtype}")
216
+
217
+ # Validate input
218
+ if image.ndim < 3:
219
+ print(
220
+ "Input is not a time series (needs at least 3 dimensions). Returning unchanged."
221
+ )
222
+ return image
223
+
224
+ if image.shape[0] < 2:
225
+ print(
226
+ "Input has only one timepoint. Need at least 2 for tracking. Returning unchanged."
227
+ )
228
+ return image
229
+
230
+ # Ensure TrackAstra environment exists
231
+ if not TrackAstraEnvManager.check_env_exists():
232
+ print("TrackAstra environment not found. Creating it now...")
233
+ if not TrackAstraEnvManager.create_env():
234
+ print(
235
+ "Failed to create TrackAstra environment. Returning unchanged."
236
+ )
237
+ return image
238
+
239
+ # Get the current file path from the processing context
240
+ import inspect
241
+
242
+ img_path = None
243
+
244
+ for frame_info in inspect.stack():
245
+ frame_locals = frame_info.frame.f_locals
246
+ if "filepath" in frame_locals:
247
+ img_path = frame_locals["filepath"]
248
+ break
249
+
250
+ if img_path is None:
251
+ print("Could not determine input file path. Returning unchanged.")
252
+
253
+ temp_dir = Path(os.path.dirname(img_path))
254
+
255
+ # Create the tracking script
256
+ script_path = temp_dir / "run_tracking.py"
257
+ # Save the mask data
258
+ # For label images, use the original path as mask_path
259
+ if label_pattern in os.path.basename(img_path):
260
+ mask_path = img_path
261
+ # Find corresponding raw image by removing the label pattern
262
+ raw_base = os.path.basename(img_path).replace(label_pattern, "")
263
+ raw_path = os.path.join(os.path.dirname(img_path), raw_base + ".tif")
264
+ if not os.path.exists(raw_path):
265
+ print(f"Warning: Could not find raw image for {img_path}")
266
+ raw_path = img_path # Fallback to using label as input
267
+ else:
268
+ # For raw images, find the corresponding label image
269
+ raw_path = img_path
270
+ base_name = os.path.basename(img_path).replace(".tif", "")
271
+ mask_path = os.path.join(
272
+ os.path.dirname(img_path), base_name + label_pattern
273
+ )
274
+ if not os.path.exists(mask_path):
275
+ print(f"No label file found for {img_path}")
276
+ return image
277
+
278
+ output_path = temp_dir / os.path.basename(mask_path).replace(
279
+ label_pattern, "_tracked.tif"
280
+ )
281
+
282
+ script_content = create_trackastra_script(
283
+ str(raw_path), str(mask_path), model, mode, str(output_path)
284
+ )
285
+
286
+ with open(script_path, "w") as f:
287
+ f.write(script_content)
288
+
289
+ if label_pattern in img_path:
290
+ pass
291
+ else:
292
+ # Run TrackAstra in the dedicated environment
293
+ conda_cmd = TrackAstraEnvManager.get_conda_cmd()
294
+ cmd = [
295
+ conda_cmd,
296
+ "run",
297
+ "-n",
298
+ "trackastra",
299
+ "python",
300
+ str(script_path),
301
+ ]
302
+ print(f"Running TrackAstra with model='{model}', mode='{mode}'...")
303
+ result = subprocess.run(cmd, capture_output=True, text=True)
304
+
305
+ if result.returncode != 0:
306
+ print("TrackAstra error:")
307
+ print(result.stdout)
308
+ print(result.stderr)
309
+ print("Returning original image unchanged.")
310
+ return image
311
+
312
+ print(result.stdout)
313
+
314
+ # Load and return the tracked result
315
+ if output_path.exists():
316
+ tracked = imread(str(output_path))
317
+ print(f"Tracking completed. Output shape: {tracked.shape}")
318
+ os.remove(script_path)
319
+ return tracked
320
+ else:
321
+ print("TrackAstra did not produce output. Returning unchanged.")
322
+ return image
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: napari-tmidas
3
- Version: 0.2.0
3
+ Version: 0.2.2
4
4
  Summary: A plugin for batch processing of confocal and whole-slide microscopy images of biological tissues
5
5
  Author: Marco Meer
6
6
  Author-email: marco.meer@pm.me
@@ -48,13 +48,13 @@ Classifier: Programming Language :: Python :: 3 :: Only
48
48
  Classifier: Programming Language :: Python :: 3.9
49
49
  Classifier: Programming Language :: Python :: 3.10
50
50
  Classifier: Programming Language :: Python :: 3.11
51
- Classifier: Programming Language :: Python :: 3.12
52
51
  Classifier: Topic :: Scientific/Engineering :: Image Processing
53
52
  Requires-Python: >=3.9
54
53
  Description-Content-Type: text/markdown
55
54
  License-File: LICENSE
56
55
  Requires-Dist: numpy
57
56
  Requires-Dist: magicgui
57
+ Requires-Dist: tqdm
58
58
  Requires-Dist: qtpy
59
59
  Requires-Dist: scikit-image
60
60
  Requires-Dist: pyqt5
@@ -66,6 +66,14 @@ Requires-Dist: torch
66
66
  Requires-Dist: torchvision
67
67
  Requires-Dist: timm
68
68
  Requires-Dist: opencv-python
69
+ Requires-Dist: cmake
70
+ Requires-Dist: nd2
71
+ Requires-Dist: pylibCZIrw
72
+ Requires-Dist: readlif
73
+ Requires-Dist: tiffslide
74
+ Requires-Dist: hydra-core
75
+ Requires-Dist: eva-decord
76
+ Requires-Dist: acquifer-napari
69
77
  Provides-Extra: testing
70
78
  Requires-Dist: tox; extra == "testing"
71
79
  Requires-Dist: pytest; extra == "testing"
@@ -90,9 +98,11 @@ Currently, napari-tmidas provides pipelines as widgets for batch image conversio
90
98
 
91
99
  ## Installation
92
100
 
101
+ (Video installation guides: https://www.youtube.com/@macromeer/videos)
102
+
93
103
  First, install Napari in a virtual environment:
94
104
 
95
- mamba create -y -n napari-tmidas -c conda-forge python=3.11 tqdm
105
+ mamba create -y -n napari-tmidas -c conda-forge python=3.11
96
106
  mamba activate napari-tmidas
97
107
  python -m pip install "napari[all]"
98
108
 
@@ -100,27 +110,28 @@ Now you can install `napari-tmidas` via [pip]:
100
110
 
101
111
  pip install napari-tmidas
102
112
 
103
- It is recommended to install the latest development version:
113
+ It is recommended though to install the **latest development version**. Please also execute this command from time to time in the activated environment to benefit from newly added features:
104
114
 
105
115
  pip install git+https://github.com/macromeer/napari-tmidas.git
106
116
 
107
- ### Dependencies
108
-
109
- To use the Batch Microscopy Image Conversion pipeline, we need some libraries to read microscopy formats:
117
+ To use the Batch Crop Anything pipeline, we need to install **Segment Anything 2** (2D/3D):
110
118
 
111
- pip install nd2 readlif tiffslide pylibCZIrw acquifer-napari
119
+ cd /opt # if the folder does not exist: mkdir /opt && cd /opt
120
+ git clone https://github.com/facebookresearch/sam2.git && cd sam2
121
+ pip install -e .
122
+ curl -L https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_large.pt -o checkpoints/sam2.1_hiera_large.pt
123
+ mamba install -c conda-forge ffmpeg # we also need ffmpeg
112
124
 
113
- For the Batch Crop Anything pipeline, we need to install MobileSAM and its dependencies:
125
+ If you want to batch compress image data using [Zstandard](https://github.com/facebook/zstd), use the package manager of your operating system to install it:
114
126
 
115
- pip install git+https://github.com/ChaoningZhang/MobileSAM.git
127
+ ~~sudo apt-get install zstd~~ # Pre-installed on Linux :man_shrugging:
116
128
 
129
+ brew install zstd # for macOS (requires [Homebrew](https://brew.sh/)
130
+ pip install zstandard # Windows with Python >= 3.7
117
131
 
118
- If you want to batch compress images using [Zstandard](https://github.com/facebook/zstd), use the package manager of your operating system to install it:
119
132
 
120
- sudo apt-get install zstd # for Linux
121
- brew install zstd # for macOS
122
- choco install zstandard # for Windows
123
133
 
134
+ And you are done!
124
135
 
125
136
  ## Usage
126
137
 
@@ -153,24 +164,33 @@ You can start this pipeline via `Plugins > T-MIDAS > Batch Microscopy Image Conv
153
164
  ![image](https://github.com/user-attachments/assets/05929660-6672-4f76-89da-4f17749ccfad)
154
165
 
155
166
  4. You can click on the images in the table to show them in the viewer. For example first click on one of the `Original Files`, and then the corresponding `Processed File` to see an overlay.
156
-
167
+
157
168
  <img src="https://github.com/user-attachments/assets/cfe84828-c1cc-4196-9a53-5dfb82d5bfce" alt="Image Processing Widget" style="width:75%; height:auto;">
158
169
 
159
170
 
160
171
  Note that whenever you click on an `Original File` or `Processed File` in the table, it will replace the one that is currently shown in the viewer. So naturally, you'd first select the original image, and then the processed image to correctly see the image pair that you want to inspect.
161
172
 
173
+
174
+ #### Processing Function Credits
175
+
176
+ The image processing capabilities are powered by several excellent open-source tools:
177
+ - [Cellpose 4](https://github.com/MouseLand/cellpose): Advanced cell segmentation
178
+ - [Trackastra](https://github.com/weigertlab/trackastra): Cell tracking and analysis
179
+ - [CAREamics](https://github.com/CAREamics/careamics): Content-aware image restoration and enhancement
180
+
162
181
  ### Batch Label Inspection
163
182
  If you have already segmented a folder full of images and now you want to maybe inspect and edit each label image, you can use the `Plugins > T-MIDAS > Batch Label Inspection`, which automatically saves your changes to the existing label image once you click the `Save Changes and Continue` button (bottom right).
164
183
 
165
184
  <img src="https://github.com/user-attachments/assets/0bf8c6ae-4212-449d-8183-e91b23ba740e" alt="Batch Label Inspection Widget" style="width:75%; height:auto;">
166
185
 
167
-
168
186
  ### Crop Anything
169
- This pipeline combines the Segment Anything Model (SAM) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Click the image below to see a video demo.
187
+ This pipeline combines the Segment Anything Model (SAM) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Cropping works like this: Enter 2D view and go to the first z slice where the object to be cropped is appearing. Activate/select the points layer and click on the object. Terminal shows progress. You can then proceed to select another object (always do this in 2D mode)
170
188
 
171
189
  <img src="https://github.com/user-attachments/assets/6d72c2a2-1064-4a27-b398-a9b86fcbc443" alt="Crop Anything Widget" style="width:75%; height:auto;">
172
190
 
173
191
 
192
+
193
+
174
194
  ### ROI Colocalization
175
195
  This pipeline quantifies colocalization between labeled regions of interest (ROIs) across multiple image channels. It determines the extent of overlap between ROIs in a reference channel and those in one or two other channels. The output is a table of colocalization counts. Optionally, the size of reference channel ROIs, as well as the total or median size of colocalizing ROIs in the other channels, can be included. Colocalization is determined using Boolean masking. The number of colocalizing instances is determined by counting unique label IDs within the overlapping regions. Typically, the reference channel contains larger structures, while other channels contain smaller, potentially nested, structures. For example, the reference channel might contain cell bodies, with the second and third channels containing nuclei and sub-nuclear objects, respectively.
176
196
 
@@ -1,33 +1,40 @@
1
1
  napari_tmidas/__init__.py,sha256=YNBLESwk8jr_TlDdkSC1CwH0tf0CKHF1i2_efzLjdpk,589
2
- napari_tmidas/_crop_anything.py,sha256=NItpE6uzfeKujh8a53TDDkFN2thpKC5NGiXMpAmSnnM,45446
2
+ napari_tmidas/_crop_anything.py,sha256=2UMUddQ6mdX96FiJPKpQnG1P1atFrxlz41JYWM3933k,103088
3
3
  napari_tmidas/_file_conversion.py,sha256=V6evJmggUwOFzJO203Y5ltboHXEWNJQckZPedGRkrLI,72203
4
- napari_tmidas/_file_selector.py,sha256=sZOY0QNwyAgugsEzG5pqZLfrVeCHwjPEW2C_BHndzyI,39595
4
+ napari_tmidas/_file_selector.py,sha256=tuWxfmHuvILp70cGl6bsEyhRxKNum45GKD6-st-1hnM,43056
5
5
  napari_tmidas/_label_inspection.py,sha256=74V36y5EnGs0vWK1FC7Kui4CPLBW_SIg885PSKeZsJ4,9184
6
6
  napari_tmidas/_reader.py,sha256=A9_hdDxtVkVGmbOsbqgnARCSvpEh7GGPo7ylzmbnu8o,2485
7
- napari_tmidas/_registry.py,sha256=Oz9HFJh41MKRLeKxRuc7x7yzc-OrmoTdRFnfngFU_XE,2007
7
+ napari_tmidas/_registry.py,sha256=fxBPLFCvXtjSHfcVIRb6KI9DkqIWRUpPPg_3pD8sXns,2110
8
8
  napari_tmidas/_roi_colocalization.py,sha256=OVjdHvtFN07DgrtTX8uqbrxZL6jVwl2L3klorgW2C9k,43196
9
9
  napari_tmidas/_sample_data.py,sha256=khuv1jemz_fCjqNwEKMFf83Ju0EN4S89IKydsUMmUxw,645
10
- napari_tmidas/_version.py,sha256=iB5DfB5V6YB5Wo4JmvS-txT42QtmGaWcWp3udRT7zCI,511
10
+ napari_tmidas/_version.py,sha256=OjGGK5TcHVG44Y62aAqeJH4CskkZoY9ydbHOtCDew50,511
11
11
  napari_tmidas/_widget.py,sha256=u9uf9WILAwZg_InhFyjWInY4ej1TV1a59dR8Fe3vNF8,4794
12
12
  napari_tmidas/_writer.py,sha256=wbVfHFjjHdybSg37VR4lVmL-kdCkDZsUPDJ66AVLaFQ,1941
13
13
  napari_tmidas/napari.yaml,sha256=1Am1dA0-ZtCXk6veIT6jrMz3zwQ7dF8_p9tZTFx_vTg,2641
14
14
  napari_tmidas/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
+ napari_tmidas/_tests/test_file_selector.py,sha256=Sbu0BCXTaQAeUJLtOVjIC87AUARbi8J0bXBlMJe53ew,2687
15
16
  napari_tmidas/_tests/test_reader.py,sha256=gN_2StATLZYUL56X27ImJTVru_qSoFiY4vtgajcx3H0,975
17
+ napari_tmidas/_tests/test_registry.py,sha256=DSI6NdmuIS1sYAa3LrVge0rOS5Ycb3TXFXxol3vDyRA,2061
16
18
  napari_tmidas/_tests/test_sample_data.py,sha256=D1HU_C3hWpO3mlSW_7Z94xaYHDtxz0XUrMjQoYop9Ag,104
17
19
  napari_tmidas/_tests/test_widget.py,sha256=I_d-Cra_CTcS0QdMItg_HMphvhj0XCx81JnFyCHk9lg,2204
18
20
  napari_tmidas/_tests/test_writer.py,sha256=4_MlZM9a5So74J16_4tIOJc6pwTOw9R0-oAE_YioIx4,122
19
21
  napari_tmidas/processing_functions/__init__.py,sha256=osXY9jSgDsrwFaS6ShPHP0wGRxMuX1mHRN9EDa9l41g,1891
20
- napari_tmidas/processing_functions/basic.py,sha256=kQcuA0_yPh6MwzkopcsBghcf3wMSR7uA1QDksS5SG2o,8761
21
- napari_tmidas/processing_functions/cellpose_env_manager.py,sha256=zngS5eborsJUimFn_g1Lm_YOZk2ZNIKxceWNxOjpxEg,4885
22
- napari_tmidas/processing_functions/cellpose_segmentation.py,sha256=7BCHj_QA1QJEl1NrsuoIFAJ040_SCWw0U-U_xjCyk18,16187
22
+ napari_tmidas/processing_functions/basic.py,sha256=NJj7pjVPGZwH2H8lnDtxxK-p3JLpcayAqfmTduuPFDw,26777
23
+ napari_tmidas/processing_functions/careamics_denoising.py,sha256=DFE_6lefeqckAvx-1EqwzJSU3iR3g3ujBGRnF_fnpoM,11638
24
+ napari_tmidas/processing_functions/careamics_env_manager.py,sha256=QfmhY5CaeFboUGTxeDlQDPi9WSfeBWp56Zz_qc2luew,11219
25
+ napari_tmidas/processing_functions/cellpose_env_manager.py,sha256=EWNuHuY0PPw8_mL61ElZ58M0-DTduPKuWUdvsrmKV8I,6191
26
+ napari_tmidas/processing_functions/cellpose_segmentation.py,sha256=miRPIsrkv0jL1jNdUFwlTkmr6-m9g7U7k9ijyeatUY0,13410
23
27
  napari_tmidas/processing_functions/colocalization.py,sha256=AiTTVAcVhKuuHZhrj5IHwbzns7-GE6ewvFqhYy1L-do,7657
24
28
  napari_tmidas/processing_functions/file_compression.py,sha256=mxR-yqBdc-T1XI3StIXpW8h5xGdCOtLQjt8uoRFpDSY,6859
25
29
  napari_tmidas/processing_functions/sam2_env_manager.py,sha256=WzKOLFeu1KZRRBryKdWkDm6QJolhs3rCj-KD6Q-z9dE,2897
30
+ napari_tmidas/processing_functions/sam2_mp4.py,sha256=NF0dWar2uyP_yQWxC8e08J6198C2qxEIzQccSI_5g40,10352
26
31
  napari_tmidas/processing_functions/scipy_filters.py,sha256=kKpDAlQQ0ZNbkt77QUWi-Bwolk6MMDvtG_bZJV3MjOo,1612
27
- napari_tmidas/processing_functions/skimage_filters.py,sha256=8UiXp5Wi7V-5prPZO-NgfkVi_kEYs7RUyINiCMxqTl0,15306
28
- napari_tmidas-0.2.0.dist-info/licenses/LICENSE,sha256=tSjiOqj57exmEIfP2YVPCEeQf0cH49S6HheQR8IiY3g,1485
29
- napari_tmidas-0.2.0.dist-info/METADATA,sha256=yUMPnhgtxQ3nN_uHvazNs8k7iEc9zUZxhTxCuF2Q9Jg,11560
30
- napari_tmidas-0.2.0.dist-info/WHEEL,sha256=0CuiUZ_p9E4cD6NyLD6UG80LBXYyiSYZOKDm5lp32xk,91
31
- napari_tmidas-0.2.0.dist-info/entry_points.txt,sha256=fbVjzbJTm4aDMIBtel1Lyqvq-CwXY7wmCOo_zJ-jtRY,60
32
- napari_tmidas-0.2.0.dist-info/top_level.txt,sha256=63ybdxCZ4SeT13f_Ou4TsivGV_2Gtm_pJOXToAt30_E,14
33
- napari_tmidas-0.2.0.dist-info/RECORD,,
32
+ napari_tmidas/processing_functions/skimage_filters.py,sha256=tSBx0nal88ixxVbu5o7ojTn90HgsUTt-aA_T6XLvmyY,16320
33
+ napari_tmidas/processing_functions/timepoint_merger.py,sha256=DwL5vZBSplXt9dBBrKtMm9aH_NvT3mY7cdbeGg2OU_Y,16567
34
+ napari_tmidas/processing_functions/trackastra_tracking.py,sha256=IkFk5HoEZmKdcu5jXri4WMhHN1KTADDMxSpeYfPgSbo,9976
35
+ napari_tmidas-0.2.2.dist-info/licenses/LICENSE,sha256=tSjiOqj57exmEIfP2YVPCEeQf0cH49S6HheQR8IiY3g,1485
36
+ napari_tmidas-0.2.2.dist-info/METADATA,sha256=cY8vgH2bnjC9elLZ0uxIBVJeTckVbg7UxnV2L5u4wTc,12742
37
+ napari_tmidas-0.2.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
38
+ napari_tmidas-0.2.2.dist-info/entry_points.txt,sha256=fbVjzbJTm4aDMIBtel1Lyqvq-CwXY7wmCOo_zJ-jtRY,60
39
+ napari_tmidas-0.2.2.dist-info/top_level.txt,sha256=63ybdxCZ4SeT13f_Ou4TsivGV_2Gtm_pJOXToAt30_E,14
40
+ napari_tmidas-0.2.2.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.3.1)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5