napari-tmidas 0.1.7.1__py3-none-any.whl → 0.1.8.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- napari_tmidas/_crop_anything.py +137 -5
- napari_tmidas/_file_conversion.py +40 -18
- napari_tmidas/_file_selector.py +120 -13
- napari_tmidas/_version.py +2 -2
- napari_tmidas/processing_functions/basic.py +104 -0
- napari_tmidas/processing_functions/cellpose_env_manager.py +172 -0
- napari_tmidas/processing_functions/cellpose_segmentation.py +511 -0
- napari_tmidas/processing_functions/colocalization.py +17 -19
- napari_tmidas/processing_functions/file_compression.py +205 -0
- napari_tmidas/processing_functions/skimage_filters.py +25 -6
- {napari_tmidas-0.1.7.1.dist-info → napari_tmidas-0.1.8.5.dist-info}/METADATA +33 -9
- {napari_tmidas-0.1.7.1.dist-info → napari_tmidas-0.1.8.5.dist-info}/RECORD +16 -13
- {napari_tmidas-0.1.7.1.dist-info → napari_tmidas-0.1.8.5.dist-info}/WHEEL +1 -1
- {napari_tmidas-0.1.7.1.dist-info → napari_tmidas-0.1.8.5.dist-info}/entry_points.txt +0 -0
- {napari_tmidas-0.1.7.1.dist-info → napari_tmidas-0.1.8.5.dist-info}/licenses/LICENSE +0 -0
- {napari_tmidas-0.1.7.1.dist-info → napari_tmidas-0.1.8.5.dist-info}/top_level.txt +0 -0
|
@@ -11,8 +11,6 @@ returns statistics about their colocalization.
|
|
|
11
11
|
import numpy as np
|
|
12
12
|
from skimage import measure
|
|
13
13
|
|
|
14
|
-
from napari_tmidas._registry import BatchProcessingRegistry
|
|
15
|
-
|
|
16
14
|
|
|
17
15
|
def get_nonzero_labels(image):
|
|
18
16
|
"""Get unique, non-zero labels from an image."""
|
|
@@ -133,23 +131,23 @@ def process_single_roi(
|
|
|
133
131
|
return result
|
|
134
132
|
|
|
135
133
|
|
|
136
|
-
@BatchProcessingRegistry.register(
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
)
|
|
134
|
+
# @BatchProcessingRegistry.register(
|
|
135
|
+
# name="ROI Colocalization",
|
|
136
|
+
# suffix="_coloc",
|
|
137
|
+
# description="Analyze colocalization between ROIs in multiple channel label images",
|
|
138
|
+
# parameters={
|
|
139
|
+
# "get_sizes": {
|
|
140
|
+
# "type": bool,
|
|
141
|
+
# "default": False,
|
|
142
|
+
# "description": "Calculate size statistics",
|
|
143
|
+
# },
|
|
144
|
+
# "size_method": {
|
|
145
|
+
# "type": str,
|
|
146
|
+
# "default": "median",
|
|
147
|
+
# "description": "Method for size calculation (median or sum)",
|
|
148
|
+
# },
|
|
149
|
+
# },
|
|
150
|
+
# )
|
|
153
151
|
def roi_colocalization(image, get_sizes=False, size_method="median"):
|
|
154
152
|
"""
|
|
155
153
|
Calculate colocalization between channels for a multi-channel label image.
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
# processing_functions/file_compression.py
|
|
2
|
+
"""
|
|
3
|
+
Processing functions for compressing files using pzstd.
|
|
4
|
+
|
|
5
|
+
This module provides a function to compress files using the Zstandard compression algorithm
|
|
6
|
+
via the pzstd tool. It compresses image files after they have been processed by other functions
|
|
7
|
+
in the batch processing pipeline.
|
|
8
|
+
|
|
9
|
+
Note: This requires the pzstd tool to be installed on the system.
|
|
10
|
+
"""
|
|
11
|
+
import subprocess
|
|
12
|
+
|
|
13
|
+
import numpy as np
|
|
14
|
+
|
|
15
|
+
from napari_tmidas._file_selector import ProcessingWorker
|
|
16
|
+
from napari_tmidas._registry import BatchProcessingRegistry
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def check_pzstd_installed():
|
|
20
|
+
"""Check if pzstd is installed on the system."""
|
|
21
|
+
try:
|
|
22
|
+
subprocess.run(["pzstd", "--version"], capture_output=True, text=True)
|
|
23
|
+
return True
|
|
24
|
+
except (subprocess.SubprocessError, FileNotFoundError):
|
|
25
|
+
return False
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def compress_file(file_path, remove_source=False, compression_level=3):
|
|
29
|
+
"""
|
|
30
|
+
Compress a file using pzstd.
|
|
31
|
+
|
|
32
|
+
Parameters:
|
|
33
|
+
-----------
|
|
34
|
+
file_path : str
|
|
35
|
+
Path to the file to compress
|
|
36
|
+
remove_source : bool
|
|
37
|
+
Whether to remove the source file after compression
|
|
38
|
+
compression_level : int
|
|
39
|
+
Compression level (1-22)
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
--------
|
|
43
|
+
tuple
|
|
44
|
+
(success, compressed_file_path)
|
|
45
|
+
"""
|
|
46
|
+
compressed_file = f"{file_path}.zst"
|
|
47
|
+
command = ["pzstd", "--quiet"]
|
|
48
|
+
|
|
49
|
+
# Set compression level
|
|
50
|
+
if compression_level >= 20:
|
|
51
|
+
command.extend(["--ultra", f"-{compression_level}"])
|
|
52
|
+
else:
|
|
53
|
+
command.append(f"-{compression_level}")
|
|
54
|
+
|
|
55
|
+
# Remove source if requested
|
|
56
|
+
if remove_source:
|
|
57
|
+
command.append("--rm")
|
|
58
|
+
|
|
59
|
+
command.append(file_path)
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
result = subprocess.run(command, capture_output=True, text=True)
|
|
63
|
+
return result.returncode == 0, compressed_file
|
|
64
|
+
except (subprocess.SubprocessError, FileNotFoundError):
|
|
65
|
+
return False, None
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
@BatchProcessingRegistry.register(
|
|
69
|
+
name="Compress with Zstandard",
|
|
70
|
+
suffix="_compressed",
|
|
71
|
+
description="Compress the processed image file using Zstandard (requires pzstd to be installed)",
|
|
72
|
+
parameters={
|
|
73
|
+
"remove_source": {
|
|
74
|
+
"type": bool,
|
|
75
|
+
"default": False,
|
|
76
|
+
"description": "Remove the source file after compression",
|
|
77
|
+
},
|
|
78
|
+
"compression_level": {
|
|
79
|
+
"type": int,
|
|
80
|
+
"default": 3,
|
|
81
|
+
"min": 1,
|
|
82
|
+
"max": 22,
|
|
83
|
+
"description": "Compression level (1-22, higher = better compression but slower)",
|
|
84
|
+
},
|
|
85
|
+
},
|
|
86
|
+
)
|
|
87
|
+
def compress_with_zstandard(
|
|
88
|
+
image: np.ndarray, remove_source: bool = False, compression_level: int = 3
|
|
89
|
+
) -> np.ndarray:
|
|
90
|
+
"""
|
|
91
|
+
Process an image and compress the output file using Zstandard.
|
|
92
|
+
|
|
93
|
+
This function:
|
|
94
|
+
1. Takes an image array as input
|
|
95
|
+
2. Returns the original image unchanged (compression happens to the saved file)
|
|
96
|
+
3. The batch processing system saves the file
|
|
97
|
+
4. This function then compresses the saved file using pzstd
|
|
98
|
+
|
|
99
|
+
Parameters:
|
|
100
|
+
-----------
|
|
101
|
+
image : numpy.ndarray
|
|
102
|
+
Input image array
|
|
103
|
+
remove_source : bool
|
|
104
|
+
Whether to remove the source file after compression (default: False)
|
|
105
|
+
compression_level : int
|
|
106
|
+
Compression level (1-22) (default: 3)
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
--------
|
|
110
|
+
numpy.ndarray
|
|
111
|
+
The original image (unchanged)
|
|
112
|
+
"""
|
|
113
|
+
# Check if pzstd is installed
|
|
114
|
+
if not check_pzstd_installed():
|
|
115
|
+
print("Warning: pzstd is not installed. Compression will be skipped.")
|
|
116
|
+
return image
|
|
117
|
+
|
|
118
|
+
# Instead of trying to modify the array, set attributes on the processing function itself
|
|
119
|
+
compress_with_zstandard.compress_after_save = True
|
|
120
|
+
compress_with_zstandard.remove_source = remove_source
|
|
121
|
+
compress_with_zstandard.compression_level = compression_level
|
|
122
|
+
|
|
123
|
+
# Return the image unchanged - compression happens after saving
|
|
124
|
+
return image
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
# Monkey patch the batch processing system to compress files after saving
|
|
128
|
+
# This is a bit of a hack, but it allows us to compress files after they've been saved
|
|
129
|
+
# by the batch processing system
|
|
130
|
+
|
|
131
|
+
# Store the original save_file function
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
original_process_file = ProcessingWorker.process_file
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
# Replace it with our modified version that compresses after saving
|
|
138
|
+
def process_file_with_compression(self, filepath):
|
|
139
|
+
"""Modified process_file function that compresses files after saving."""
|
|
140
|
+
result = original_process_file(self, filepath)
|
|
141
|
+
|
|
142
|
+
# Check if there's a result and if we should compress it
|
|
143
|
+
if isinstance(result, dict):
|
|
144
|
+
# Single output file
|
|
145
|
+
if "processed_file" in result:
|
|
146
|
+
output_path = result["processed_file"]
|
|
147
|
+
# Check if the processed image had compression metadata
|
|
148
|
+
if (
|
|
149
|
+
hasattr(self.processing_func, "compress_after_save")
|
|
150
|
+
and self.processing_func.compress_after_save
|
|
151
|
+
):
|
|
152
|
+
# Get compression parameters
|
|
153
|
+
remove_source = getattr(
|
|
154
|
+
self.processing_func, "remove_source", False
|
|
155
|
+
)
|
|
156
|
+
compression_level = getattr(
|
|
157
|
+
self.processing_func, "compression_level", 3
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
# Compress the file
|
|
161
|
+
success, compressed_path = compress_file(
|
|
162
|
+
output_path, remove_source, compression_level
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
if success:
|
|
166
|
+
# Update the result with the compressed file path
|
|
167
|
+
result["processed_file"] = compressed_path
|
|
168
|
+
|
|
169
|
+
# Multiple output files
|
|
170
|
+
elif "processed_files" in result:
|
|
171
|
+
output_paths = result["processed_files"]
|
|
172
|
+
# Check if the processed image had compression metadata
|
|
173
|
+
if (
|
|
174
|
+
hasattr(self.processing_func, "compress_after_save")
|
|
175
|
+
and self.processing_func.compress_after_save
|
|
176
|
+
):
|
|
177
|
+
# Get compression parameters
|
|
178
|
+
remove_source = getattr(
|
|
179
|
+
self.processing_func, "remove_source", False
|
|
180
|
+
)
|
|
181
|
+
compression_level = getattr(
|
|
182
|
+
self.processing_func, "compression_level", 3
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
# Compress each file
|
|
186
|
+
compressed_paths = []
|
|
187
|
+
for output_path in output_paths:
|
|
188
|
+
success, compressed_path = compress_file(
|
|
189
|
+
output_path, remove_source, compression_level
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
if success:
|
|
193
|
+
compressed_paths.append(compressed_path)
|
|
194
|
+
else:
|
|
195
|
+
compressed_paths.append(output_path)
|
|
196
|
+
|
|
197
|
+
# Update the result with the compressed file paths
|
|
198
|
+
result["processed_files"] = compressed_paths
|
|
199
|
+
|
|
200
|
+
return result
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
# Apply the monkey patch if pzstd is available
|
|
204
|
+
if check_pzstd_installed():
|
|
205
|
+
ProcessingWorker.process_file = process_file_with_compression
|
|
@@ -39,7 +39,7 @@ if SKIMAGE_AVAILABLE:
|
|
|
39
39
|
@BatchProcessingRegistry.register(
|
|
40
40
|
name="Otsu Thresholding (semantic)",
|
|
41
41
|
suffix="_otsu_semantic",
|
|
42
|
-
description="Threshold image using Otsu's method",
|
|
42
|
+
description="Threshold image using Otsu's method to obtain a binary image",
|
|
43
43
|
)
|
|
44
44
|
def otsu_thresholding(image: np.ndarray) -> np.ndarray:
|
|
45
45
|
"""
|
|
@@ -54,7 +54,7 @@ if SKIMAGE_AVAILABLE:
|
|
|
54
54
|
@BatchProcessingRegistry.register(
|
|
55
55
|
name="Otsu Thresholding (instance)",
|
|
56
56
|
suffix="_otsu_labels",
|
|
57
|
-
description="Threshold image using Otsu's method",
|
|
57
|
+
description="Threshold image using Otsu's method to obtain a multi-label image",
|
|
58
58
|
)
|
|
59
59
|
def otsu_thresholding_instance(image: np.ndarray) -> np.ndarray:
|
|
60
60
|
"""
|
|
@@ -68,7 +68,7 @@ if SKIMAGE_AVAILABLE:
|
|
|
68
68
|
@BatchProcessingRegistry.register(
|
|
69
69
|
name="Manual Thresholding (8-bit)",
|
|
70
70
|
suffix="_thresh",
|
|
71
|
-
description="Threshold image using a fixed threshold",
|
|
71
|
+
description="Threshold image using a fixed threshold to obtain a binary image",
|
|
72
72
|
parameters={
|
|
73
73
|
"threshold": {
|
|
74
74
|
"type": int,
|
|
@@ -91,9 +91,9 @@ if SKIMAGE_AVAILABLE:
|
|
|
91
91
|
|
|
92
92
|
# remove small objects
|
|
93
93
|
@BatchProcessingRegistry.register(
|
|
94
|
-
name="Remove Small
|
|
94
|
+
name="Remove Small Labels",
|
|
95
95
|
suffix="_rm_small",
|
|
96
|
-
description="Remove small
|
|
96
|
+
description="Remove small labels from label images",
|
|
97
97
|
parameters={
|
|
98
98
|
"min_size": {
|
|
99
99
|
"type": int,
|
|
@@ -108,8 +108,27 @@ if SKIMAGE_AVAILABLE:
|
|
|
108
108
|
image: np.ndarray, min_size: int = 100
|
|
109
109
|
) -> np.ndarray:
|
|
110
110
|
"""
|
|
111
|
-
Remove small
|
|
111
|
+
Remove small labels from label images
|
|
112
112
|
"""
|
|
113
113
|
return skimage.morphology.remove_small_objects(
|
|
114
114
|
image, min_size=min_size
|
|
115
115
|
)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
# binary to labels
|
|
119
|
+
@BatchProcessingRegistry.register(
|
|
120
|
+
name="Binary to Labels",
|
|
121
|
+
suffix="_labels",
|
|
122
|
+
description="Convert binary images to label images (connected components)",
|
|
123
|
+
)
|
|
124
|
+
def binary_to_labels(image: np.ndarray) -> np.ndarray:
|
|
125
|
+
"""
|
|
126
|
+
Convert binary images to label images (connected components)
|
|
127
|
+
"""
|
|
128
|
+
# Make a copy of the input image to avoid modifying the original
|
|
129
|
+
label_image = image.copy()
|
|
130
|
+
|
|
131
|
+
# Convert binary image to label image using connected components
|
|
132
|
+
label_image = skimage.measure.label(label_image, connectivity=2)
|
|
133
|
+
|
|
134
|
+
return label_image
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: napari-tmidas
|
|
3
|
-
Version: 0.1.
|
|
4
|
-
Summary:
|
|
3
|
+
Version: 0.1.8.5
|
|
4
|
+
Summary: A plugin for batch processing of confocal microscopy images
|
|
5
5
|
Author: Marco Meer
|
|
6
6
|
Author-email: marco.meer@pm.me
|
|
7
7
|
License:
|
|
@@ -58,6 +58,14 @@ Requires-Dist: magicgui
|
|
|
58
58
|
Requires-Dist: qtpy
|
|
59
59
|
Requires-Dist: scikit-image
|
|
60
60
|
Requires-Dist: pyqt5
|
|
61
|
+
Requires-Dist: tqdm
|
|
62
|
+
Requires-Dist: scikit-image
|
|
63
|
+
Requires-Dist: ome-zarr
|
|
64
|
+
Requires-Dist: napari-ome-zarr
|
|
65
|
+
Requires-Dist: torch
|
|
66
|
+
Requires-Dist: torchvision
|
|
67
|
+
Requires-Dist: timm
|
|
68
|
+
Requires-Dist: opencv-python
|
|
61
69
|
Provides-Extra: testing
|
|
62
70
|
Requires-Dist: tox; extra == "testing"
|
|
63
71
|
Requires-Dist: pytest; extra == "testing"
|
|
@@ -80,7 +88,15 @@ The `napari-tmidas` plugin consists of a growing collection of pipelines for fas
|
|
|
80
88
|
## Feature Overview
|
|
81
89
|
|
|
82
90
|
1. **Image Processing**
|
|
83
|
-
- Process image folders with:
|
|
91
|
+
- Process image folders with:
|
|
92
|
+
- Gamma correction & histogram equalization
|
|
93
|
+
- Z-projection and channel splitting
|
|
94
|
+
- Gaussian & median filters
|
|
95
|
+
- Thresholding (Otsu/manual)
|
|
96
|
+
- Label cleaning & binary conversion
|
|
97
|
+
- RGB to labels conversion
|
|
98
|
+
- Cellpose 3.0 automated segmentation
|
|
99
|
+
- File compression (Zstandard)
|
|
84
100
|
|
|
85
101
|
2. **Label Inspection**
|
|
86
102
|
- Review and edit label images with auto-save
|
|
@@ -112,19 +128,27 @@ Now you can install `napari-tmidas` via [pip]:
|
|
|
112
128
|
|
|
113
129
|
pip install napari-tmidas
|
|
114
130
|
|
|
115
|
-
|
|
131
|
+
It is recommended to install the latest development version:
|
|
116
132
|
|
|
117
133
|
pip install git+https://github.com/macromeer/napari-tmidas.git
|
|
118
134
|
|
|
119
135
|
### Dependencies
|
|
120
|
-
To use the Batch Microscopy Image Conversion pipeline, we need some libraries to read microscopy formats and to write ome-zarr:
|
|
121
136
|
|
|
122
|
-
|
|
137
|
+
To use the Batch Microscopy Image Conversion pipeline, we need some libraries to read microscopy formats:
|
|
138
|
+
|
|
139
|
+
pip install nd2 readlif tiffslide pylibCZIrw acquifer-napari
|
|
123
140
|
|
|
124
141
|
For the Batch Crop Anything pipeline, we need to install MobileSAM and its dependencies:
|
|
125
142
|
|
|
126
143
|
pip install git+https://github.com/ChaoningZhang/MobileSAM.git
|
|
127
|
-
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
If you want to batch compress images using [Zstandard](https://github.com/facebook/zstd), use the package manager of your operating system to install it:
|
|
147
|
+
|
|
148
|
+
sudo apt-get install zstd # for Linux
|
|
149
|
+
brew install zstd # for macOS
|
|
150
|
+
choco install zstandard # for Windows
|
|
151
|
+
|
|
128
152
|
|
|
129
153
|
## Usage
|
|
130
154
|
|
|
@@ -169,9 +193,9 @@ If you have already segmented a folder full of images and now you want to maybe
|
|
|
169
193
|

|
|
170
194
|
|
|
171
195
|
### Crop Anything
|
|
172
|
-
This pipeline combines the Segment Anything Model (SAM) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything
|
|
196
|
+
This pipeline combines the Segment Anything Model (SAM) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Click the image below to see a video demo.
|
|
173
197
|
|
|
174
|
-

|
|
198
|
+
[](https://youtu.be/xPh0dRD_FbE)
|
|
175
199
|
|
|
176
200
|
### ROI Colocalization
|
|
177
201
|
This pipeline quantifies colocalization between labeled regions of interest (ROIs) across multiple image channels. It determines the extent of overlap between ROIs in a reference channel and those in one or two other channels. The output is a table of colocalization counts. Optionally, the size of reference channel ROIs, as well as the total or median size of colocalizing ROIs in the other channels, can be included. Colocalization is determined using Boolean masking. The number of colocalizing instances is determined by counting unique label IDs within the overlapping regions. Typically, the reference channel contains larger structures, while other channels contain smaller, potentially nested, structures. For example, the reference channel might contain cell bodies, with the second and third channels containing nuclei and sub-nuclear objects, respectively.
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
napari_tmidas/__init__.py,sha256=YNBLESwk8jr_TlDdkSC1CwH0tf0CKHF1i2_efzLjdpk,589
|
|
2
|
-
napari_tmidas/_crop_anything.py,sha256=
|
|
3
|
-
napari_tmidas/_file_conversion.py,sha256=
|
|
4
|
-
napari_tmidas/_file_selector.py,sha256=
|
|
2
|
+
napari_tmidas/_crop_anything.py,sha256=NItpE6uzfeKujh8a53TDDkFN2thpKC5NGiXMpAmSnnM,45446
|
|
3
|
+
napari_tmidas/_file_conversion.py,sha256=V6evJmggUwOFzJO203Y5ltboHXEWNJQckZPedGRkrLI,72203
|
|
4
|
+
napari_tmidas/_file_selector.py,sha256=sZOY0QNwyAgugsEzG5pqZLfrVeCHwjPEW2C_BHndzyI,39595
|
|
5
5
|
napari_tmidas/_label_inspection.py,sha256=hCxKE0zYk-qBh4ohqiZcEGLXa-3lL8p88y45p2WnE1g,7329
|
|
6
6
|
napari_tmidas/_reader.py,sha256=A9_hdDxtVkVGmbOsbqgnARCSvpEh7GGPo7ylzmbnu8o,2485
|
|
7
7
|
napari_tmidas/_registry.py,sha256=Oz9HFJh41MKRLeKxRuc7x7yzc-OrmoTdRFnfngFU_XE,2007
|
|
8
8
|
napari_tmidas/_roi_colocalization.py,sha256=OVjdHvtFN07DgrtTX8uqbrxZL6jVwl2L3klorgW2C9k,43196
|
|
9
9
|
napari_tmidas/_sample_data.py,sha256=khuv1jemz_fCjqNwEKMFf83Ju0EN4S89IKydsUMmUxw,645
|
|
10
|
-
napari_tmidas/_version.py,sha256=
|
|
10
|
+
napari_tmidas/_version.py,sha256=c4rCUXy1zrstQ6YUlnpyulFDT5n9rZNVdxW9dQYyGe0,516
|
|
11
11
|
napari_tmidas/_widget.py,sha256=u9uf9WILAwZg_InhFyjWInY4ej1TV1a59dR8Fe3vNF8,4794
|
|
12
12
|
napari_tmidas/_writer.py,sha256=wbVfHFjjHdybSg37VR4lVmL-kdCkDZsUPDJ66AVLaFQ,1941
|
|
13
13
|
napari_tmidas/napari.yaml,sha256=1Am1dA0-ZtCXk6veIT6jrMz3zwQ7dF8_p9tZTFx_vTg,2641
|
|
@@ -17,13 +17,16 @@ napari_tmidas/_tests/test_sample_data.py,sha256=D1HU_C3hWpO3mlSW_7Z94xaYHDtxz0XU
|
|
|
17
17
|
napari_tmidas/_tests/test_widget.py,sha256=I_d-Cra_CTcS0QdMItg_HMphvhj0XCx81JnFyCHk9lg,2204
|
|
18
18
|
napari_tmidas/_tests/test_writer.py,sha256=4_MlZM9a5So74J16_4tIOJc6pwTOw9R0-oAE_YioIx4,122
|
|
19
19
|
napari_tmidas/processing_functions/__init__.py,sha256=osXY9jSgDsrwFaS6ShPHP0wGRxMuX1mHRN9EDa9l41g,1891
|
|
20
|
-
napari_tmidas/processing_functions/basic.py,sha256=
|
|
21
|
-
napari_tmidas/processing_functions/
|
|
20
|
+
napari_tmidas/processing_functions/basic.py,sha256=TJFvJ9AfUp7MBseUAgryLJXdqj0gSLSKqlEPxE3s1n0,6694
|
|
21
|
+
napari_tmidas/processing_functions/cellpose_env_manager.py,sha256=zngS5eborsJUimFn_g1Lm_YOZk2ZNIKxceWNxOjpxEg,4885
|
|
22
|
+
napari_tmidas/processing_functions/cellpose_segmentation.py,sha256=7BCHj_QA1QJEl1NrsuoIFAJ040_SCWw0U-U_xjCyk18,16187
|
|
23
|
+
napari_tmidas/processing_functions/colocalization.py,sha256=AiTTVAcVhKuuHZhrj5IHwbzns7-GE6ewvFqhYy1L-do,7657
|
|
24
|
+
napari_tmidas/processing_functions/file_compression.py,sha256=mxR-yqBdc-T1XI3StIXpW8h5xGdCOtLQjt8uoRFpDSY,6859
|
|
22
25
|
napari_tmidas/processing_functions/scipy_filters.py,sha256=kKpDAlQQ0ZNbkt77QUWi-Bwolk6MMDvtG_bZJV3MjOo,1612
|
|
23
|
-
napari_tmidas/processing_functions/skimage_filters.py,sha256=
|
|
24
|
-
napari_tmidas-0.1.
|
|
25
|
-
napari_tmidas-0.1.
|
|
26
|
-
napari_tmidas-0.1.
|
|
27
|
-
napari_tmidas-0.1.
|
|
28
|
-
napari_tmidas-0.1.
|
|
29
|
-
napari_tmidas-0.1.
|
|
26
|
+
napari_tmidas/processing_functions/skimage_filters.py,sha256=6wSROKH71zwSFBOZ22zgp-4Nrq79GNd7znOitiH3Z3c,4030
|
|
27
|
+
napari_tmidas-0.1.8.5.dist-info/licenses/LICENSE,sha256=tSjiOqj57exmEIfP2YVPCEeQf0cH49S6HheQR8IiY3g,1485
|
|
28
|
+
napari_tmidas-0.1.8.5.dist-info/METADATA,sha256=bo5HfzWOBpQME6b-kkw7wkg71oQUjbU2hhN2D9lWwbA,11812
|
|
29
|
+
napari_tmidas-0.1.8.5.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
|
|
30
|
+
napari_tmidas-0.1.8.5.dist-info/entry_points.txt,sha256=fbVjzbJTm4aDMIBtel1Lyqvq-CwXY7wmCOo_zJ-jtRY,60
|
|
31
|
+
napari_tmidas-0.1.8.5.dist-info/top_level.txt,sha256=63ybdxCZ4SeT13f_Ou4TsivGV_2Gtm_pJOXToAt30_E,14
|
|
32
|
+
napari_tmidas-0.1.8.5.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|