napari-tmidas 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
napari_tmidas/_version.py CHANGED
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '0.1.6'
21
- __version_tuple__ = version_tuple = (0, 1, 6)
20
+ __version__ = version = '0.1.7'
21
+ __version_tuple__ = version_tuple = (0, 1, 7)
napari_tmidas/napari.yaml CHANGED
@@ -27,6 +27,12 @@ contributions:
27
27
  - id: napari-tmidas._file_conversion
28
28
  python_name: napari_tmidas._file_conversion:napari_experimental_provide_dock_widget
29
29
  title: Microscopy Image Converter
30
+ - id: napari-tmidas._crop_anything
31
+ python_name: napari_tmidas._crop_anything:batch_crop_anything_widget
32
+ title: Batch Crop Anything
33
+ - id: napari-tmidas._roi_colocalization
34
+ python_name: napari_tmidas._roi_colocalization:roi_colocalization_analyzer
35
+ title: Batch ROI Colocalization Analysis
30
36
  readers:
31
37
  - command: napari-tmidas.get_reader
32
38
  accepts_directories: false
@@ -49,3 +55,7 @@ contributions:
49
55
  display_name: Batch Label inspection
50
56
  - command: napari-tmidas._file_conversion
51
57
  display_name: Batch Microscopy Image Conversion
58
+ - command: napari-tmidas._crop_anything
59
+ display_name: Batch Crop Anything
60
+ - command: napari-tmidas._roi_colocalization
61
+ display_name: Batch ROI Colocalization Analysis
@@ -40,3 +40,86 @@ def gamma_correction(image: np.ndarray, gamma: float = 1.0) -> np.ndarray:
40
40
 
41
41
  # Scale back to original range and dtype
42
42
  return (corrected * max_val).clip(0, max_val).astype(image.dtype)
43
+
44
+
45
+ @BatchProcessingRegistry.register(
46
+ name="Max Z Projection",
47
+ suffix="_max_z",
48
+ description="Maximum intensity projection along the z-axis",
49
+ parameters={},
50
+ )
51
+ def max_z_projection(image: np.ndarray) -> np.ndarray:
52
+ """
53
+ Maximum intensity projection along the z-axis
54
+ """
55
+ # Determine maximum value based on dtype
56
+ max_val = (
57
+ np.iinfo(image.dtype).max
58
+ if np.issubdtype(image.dtype, np.integer)
59
+ else 1.0
60
+ )
61
+
62
+ # Normalize image to [0, 1]
63
+ normalized = image.astype(np.float32) / max_val
64
+
65
+ # Apply max z projection
66
+ projection = np.max(normalized, axis=0)
67
+
68
+ # Scale back to original range and dtype
69
+ return (projection * max_val).clip(0, max_val).astype(image.dtype)
70
+
71
+
72
+ @BatchProcessingRegistry.register(
73
+ name="Split Channels",
74
+ suffix="_split_channels",
75
+ description="Splits the color channels of the image",
76
+ parameters={
77
+ "num_channels": {
78
+ "type": "integer",
79
+ "default": 3,
80
+ "description": "Number of color channels in the image",
81
+ }
82
+ },
83
+ )
84
+ def split_channels(image: np.ndarray, num_channels: int = 3) -> np.ndarray:
85
+ """
86
+ Split the image into separate channels based on the specified number of channels.
87
+
88
+ Args:
89
+ image: Input image array (at least 3D: XYC or higher dimensions)
90
+ num_channels: Number of channels in the image (default: 3)
91
+
92
+ Returns:
93
+ Stacked array of channels with shape (num_channels, ...)
94
+ """
95
+ # Validate input
96
+ if image.ndim < 3:
97
+ raise ValueError(
98
+ "Input must be an array with at least 3 dimensions (XYC or higher)"
99
+ )
100
+
101
+ print(f"Image shape: {image.shape}")
102
+ num_channels = int(num_channels)
103
+ # Identify the channel axis
104
+ possible_axes = [
105
+ axis
106
+ for axis, dim_size in enumerate(image.shape)
107
+ if dim_size == num_channels
108
+ ]
109
+ # print(f"Possible axes: {possible_axes}")
110
+ if len(possible_axes) != 1:
111
+
112
+ raise ValueError(
113
+ f"Could not uniquely identify a channel axis with {num_channels} channels. "
114
+ f"Found {len(possible_axes)} possible axes: {possible_axes}. "
115
+ f"Image shape: {image.shape}"
116
+ )
117
+
118
+ channel_axis = possible_axes[0]
119
+ print(f"Channel axis identified: {channel_axis}")
120
+
121
+ # Split and process channels
122
+ channels = np.split(image, num_channels, axis=channel_axis)
123
+ # channels = [np.squeeze(ch, axis=channel_axis) for ch in channels]
124
+
125
+ return np.stack(channels, axis=0)
@@ -0,0 +1,242 @@
1
+ """
2
+ ROI Colocalization Processing Function
3
+
4
+ This module provides a function for batch processing to analyze colocalization
5
+ between multiple labeled regions in image stacks.
6
+
7
+ The function accepts a multi-channel input image with labeled regions and
8
+ returns statistics about their colocalization.
9
+ """
10
+
11
+ import numpy as np
12
+ from skimage import measure
13
+
14
+ from napari_tmidas._registry import BatchProcessingRegistry
15
+
16
+
17
+ def get_nonzero_labels(image):
18
+ """Get unique, non-zero labels from an image."""
19
+ mask = image != 0
20
+ labels = np.unique(image[mask])
21
+ return [int(x) for x in labels]
22
+
23
+
24
+ def count_unique_nonzero(array, mask):
25
+ """Count unique non-zero values in array where mask is True."""
26
+ unique_vals = np.unique(array[mask])
27
+ count = len(unique_vals)
28
+
29
+ # Remove 0 from count if present
30
+ if count > 0 and 0 in unique_vals:
31
+ count -= 1
32
+
33
+ return count
34
+
35
+
36
+ def calculate_coloc_size(
37
+ image_c1, image_c2, label_id, mask_c2=None, image_c3=None
38
+ ):
39
+ """Calculate the size of colocalization between channels."""
40
+ # Create mask for current ROI
41
+ mask = image_c1 == int(label_id)
42
+
43
+ # Handle mask_c2 parameter
44
+ if mask_c2 is not None:
45
+ if mask_c2:
46
+ # sizes where c2 is present
47
+ mask = mask & (image_c2 != 0)
48
+ target_image = image_c3 if image_c3 is not None else image_c2
49
+ else:
50
+ # sizes where c2 is NOT present
51
+ mask = mask & (image_c2 == 0)
52
+ if image_c3 is None:
53
+ # If no image_c3, just return count of mask pixels
54
+ return np.count_nonzero(mask)
55
+ target_image = image_c3
56
+ else:
57
+ target_image = image_c2
58
+
59
+ # Calculate size of overlap
60
+ masked_image = target_image * mask
61
+ size = np.count_nonzero(masked_image)
62
+
63
+ return int(size)
64
+
65
+
66
+ def process_single_roi(
67
+ label_id,
68
+ image_c1,
69
+ image_c2,
70
+ image_c3=None,
71
+ get_sizes=False,
72
+ roi_sizes=None,
73
+ ):
74
+ """Process a single ROI for colocalization analysis."""
75
+ # Create masks once
76
+ mask_roi = image_c1 == label_id
77
+ mask_c2 = image_c2 != 0
78
+
79
+ # Calculate counts
80
+ c2_in_c1_count = count_unique_nonzero(image_c2, mask_roi & mask_c2)
81
+
82
+ # Build the result dictionary
83
+ result = {"label_id": int(label_id), "ch2_in_ch1_count": c2_in_c1_count}
84
+
85
+ # Add size information if requested
86
+ if get_sizes:
87
+ if roi_sizes is None:
88
+ roi_sizes = {}
89
+ # Calculate sizes for current label only
90
+ area = np.sum(mask_roi)
91
+ roi_sizes[label_id] = area
92
+
93
+ size = roi_sizes.get(int(label_id), 0)
94
+ c2_in_c1_size = calculate_coloc_size(image_c1, image_c2, label_id)
95
+
96
+ result.update({"ch1_size": size, "ch2_in_ch1_size": c2_in_c1_size})
97
+
98
+ # Handle third channel if present
99
+ if image_c3 is not None:
100
+ mask_c3 = image_c3 != 0
101
+
102
+ # Calculate third channel statistics
103
+ c3_in_c2_in_c1_count = count_unique_nonzero(
104
+ image_c3, mask_roi & mask_c2 & mask_c3
105
+ )
106
+ c3_not_in_c2_but_in_c1_count = count_unique_nonzero(
107
+ image_c3, mask_roi & ~mask_c2 & mask_c3
108
+ )
109
+
110
+ result.update(
111
+ {
112
+ "ch3_in_ch2_in_ch1_count": c3_in_c2_in_c1_count,
113
+ "ch3_not_in_ch2_but_in_ch1_count": c3_not_in_c2_but_in_c1_count,
114
+ }
115
+ )
116
+
117
+ # Add size information for third channel if requested
118
+ if get_sizes:
119
+ c3_in_c2_in_c1_size = calculate_coloc_size(
120
+ image_c1, image_c2, label_id, mask_c2=True, image_c3=image_c3
121
+ )
122
+ c3_not_in_c2_but_in_c1_size = calculate_coloc_size(
123
+ image_c1, image_c2, label_id, mask_c2=False, image_c3=image_c3
124
+ )
125
+
126
+ result.update(
127
+ {
128
+ "ch3_in_ch2_in_ch1_size": c3_in_c2_in_c1_size,
129
+ "ch3_not_in_ch2_but_in_ch1_size": c3_not_in_c2_but_in_c1_size,
130
+ }
131
+ )
132
+
133
+ return result
134
+
135
+
136
+ @BatchProcessingRegistry.register(
137
+ name="ROI Colocalization",
138
+ suffix="_coloc",
139
+ description="Analyze colocalization between ROIs in multiple channel label images",
140
+ parameters={
141
+ "get_sizes": {
142
+ "type": bool,
143
+ "default": False,
144
+ "description": "Calculate size statistics",
145
+ },
146
+ "size_method": {
147
+ "type": str,
148
+ "default": "median",
149
+ "description": "Method for size calculation (median or sum)",
150
+ },
151
+ },
152
+ )
153
+ def roi_colocalization(image, get_sizes=False, size_method="median"):
154
+ """
155
+ Calculate colocalization between channels for a multi-channel label image.
156
+
157
+ This function takes a multi-channel image where each channel contains
158
+ labeled objects (segmentation masks). It analyzes how objects in one channel
159
+ overlap with objects in the other channels, and returns detailed statistics
160
+ about their colocalization relationships.
161
+
162
+ Parameters:
163
+ -----------
164
+ image : numpy.ndarray
165
+ Input image array, should have shape corresponding to a multichannel
166
+ label image (e.g., [n_channels, height, width]).
167
+ get_sizes : bool, optional
168
+ Whether to calculate size statistics for overlapping regions.
169
+ size_method : str, optional
170
+ Method for calculating size statistics ('median' or 'sum').
171
+
172
+ Returns:
173
+ --------
174
+ numpy.ndarray
175
+ Multi-channel array with colocalization results
176
+ """
177
+ # Ensure image is a stack of label images (assume first dimension is channels)
178
+ if image.ndim < 3:
179
+ # Handle single channel image - not enough for colocalization
180
+ print("Input must have multiple channels for colocalization analysis")
181
+ # Return a copy of the input with markings
182
+ return image.copy()
183
+
184
+ # Extract channels
185
+ channels = [image[i] for i in range(min(3, image.shape[0]))]
186
+ n_channels = len(channels)
187
+
188
+ if n_channels < 2:
189
+ print("Need at least 2 channels for colocalization analysis")
190
+ return image.copy()
191
+
192
+ # Assign channels
193
+ image_c1, image_c2 = channels[:2]
194
+ image_c3 = channels[2] if n_channels > 2 else None
195
+
196
+ # Get unique label IDs in image_c1
197
+ label_ids = get_nonzero_labels(image_c1)
198
+
199
+ # Process each label
200
+ results = []
201
+ roi_sizes = {}
202
+
203
+ # Pre-calculate sizes for image_c1 if needed
204
+ if get_sizes:
205
+ for prop in measure.regionprops(image_c1.astype(np.uint32)):
206
+ label = int(prop.label)
207
+ roi_sizes[label] = int(prop.area)
208
+
209
+ for label_id in label_ids:
210
+ result = process_single_roi(
211
+ label_id, image_c1, image_c2, image_c3, get_sizes, roi_sizes
212
+ )
213
+ results.append(result)
214
+
215
+ # Create a new multi-channel output image with colocalization results
216
+ # Each channel will highlight different colocalization results
217
+ out_shape = image_c1.shape
218
+
219
+ # For 2 channels: [original ch1, ch2 overlap]
220
+ # For 3 channels: [original ch1, ch2 overlap, ch3 overlap]
221
+ output_channels = n_channels
222
+
223
+ # Create output array
224
+ output = np.zeros((output_channels,) + out_shape, dtype=np.uint32)
225
+
226
+ # Fill first channel with original labels
227
+ output[0] = image_c1
228
+
229
+ # Fill second channel with ch1 labels where ch2 overlaps
230
+ for label_id in label_ids:
231
+ mask = (image_c1 == label_id) & (image_c2 != 0)
232
+ if np.any(mask):
233
+ output[1][mask] = label_id
234
+
235
+ # Fill third channel with ch1 labels where ch3 overlaps (if applicable)
236
+ if image_c3 is not None and output_channels > 2:
237
+ for label_id in label_ids:
238
+ mask = (image_c1 == label_id) & (image_c3 != 0)
239
+ if np.any(mask):
240
+ output[2][mask] = label_id
241
+
242
+ return output
@@ -20,38 +20,20 @@ from napari_tmidas._registry import BatchProcessingRegistry
20
20
 
21
21
  if SKIMAGE_AVAILABLE:
22
22
 
23
- # @BatchProcessingRegistry.register(
24
- # name="Adaptive Histogram Equalization",
25
- # suffix="_clahe",
26
- # description="Enhance contrast using Contrast Limited Adaptive Histogram Equalization",
27
- # parameters={
28
- # "kernel_size": {
29
- # "type": int,
30
- # "default": 8,
31
- # "min": 4,
32
- # "max": 64,
33
- # "description": "Size of local region for histogram equalization",
34
- # },
35
- # "clip_limit": {
36
- # "type": float,
37
- # "default": 0.01,
38
- # "min": 0.001,
39
- # "max": 0.1,
40
- # "description": "Clipping limit for contrast enhancement",
41
- # },
42
- # },
43
- # )
44
- # def adaptive_hist_eq(
45
- # image: np.ndarray, kernel_size: int = 8, clip_limit: float = 0.01
46
- # ) -> np.ndarray:
47
- # """
48
- # Apply Contrast Limited Adaptive Histogram Equalization
49
- # """
50
- # # CLAHE expects image in [0, 1] range
51
- # img_norm = skimage.exposure.rescale_intensity(image, out_range=(0, 1))
52
- # return skimage.exposure.equalize_adapthist(
53
- # img_norm, kernel_size=kernel_size, clip_limit=clip_limit
54
- # )
23
+ # Equalize histogram
24
+ @BatchProcessingRegistry.register(
25
+ name="Equalize Histogram",
26
+ suffix="_equalized",
27
+ description="Equalize histogram of image",
28
+ )
29
+ def equalize_histogram(
30
+ image: np.ndarray, clip_limit: float = 0.01
31
+ ) -> np.ndarray:
32
+ """
33
+ Equalize histogram of image
34
+ """
35
+
36
+ return skimage.exposure.equalize_hist(image)
55
37
 
56
38
  # simple otsu thresholding
57
39
  @BatchProcessingRegistry.register(
@@ -63,6 +45,8 @@ if SKIMAGE_AVAILABLE:
63
45
  """
64
46
  Threshold image using Otsu's method
65
47
  """
48
+
49
+ image = skimage.img_as_ubyte(image) # convert to 8-bit
66
50
  thresh = skimage.filters.threshold_otsu(image)
67
51
  return (image > thresh).astype(np.uint32)
68
52
 
@@ -76,6 +60,7 @@ if SKIMAGE_AVAILABLE:
76
60
  """
77
61
  Threshold image using Otsu's method
78
62
  """
63
+ image = skimage.img_as_ubyte(image) # convert to 8-bit
79
64
  thresh = skimage.filters.threshold_otsu(image)
80
65
  return skimage.measure.label(image > thresh).astype(np.uint32)
81
66
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: napari-tmidas
3
- Version: 0.1.6
3
+ Version: 0.1.7
4
4
  Summary: Tissue Microscopy Image Data Analysis Suite
5
5
  Author: Marco Meer
6
6
  Author-email: marco.meer@pm.me
@@ -75,11 +75,31 @@ Dynamic: license-file
75
75
  [![tests](https://github.com/macromeer/napari-tmidas/workflows/tests/badge.svg)](https://github.com/macromeer/napari-tmidas/actions)
76
76
  [![napari hub](https://img.shields.io/endpoint?url=https://api.napari-hub.org/shields/napari-tmidas)](https://napari-hub.org/plugins/napari-tmidas)
77
77
  <!-- [![codecov](https://codecov.io/gh/macromeer/napari-tmidas/branch/main/graph/badge.svg)](https://codecov.io/gh/macromeer/napari-tmidas) -->
78
- This Napari plugin allows you to perform batch image processing without a graphics processing unit (GPU). It will still be fast because computations will run in parallel on your central processing unit (CPU).
78
+ The `napari-tmidas` plugin consists of a growing collection of pipelines for fast batch processing of microscopy images. This is a WIP and based on the CLI version of [T-MIDAS](https://github.com/MercaderLabAnatomy/T-MIDAS).
79
79
 
80
- This plugin provides you with a growing collection of pipelines for batch image preprocessing, segmentation, regions-of-interest (ROI) analysis and other useful features.
80
+ ## Feature Overview
81
+
82
+ ### Current Pipelines
83
+ 1. **Batch Image Processing**
84
+ - Process image folders with: Gamma correction, Z-projection, channel splitting, Gaussian/median filters, thresholding (Otsu/manual), and label cleaning
85
+
86
+ 2. **Batch Label Inspection**
87
+ - Review and edit label images with auto-save
88
+
89
+ 3. **Batch Microscopy Image Conversion**
90
+ - Convert .nd2/.lif/.ndpi/.czi/acquifer → .tif/.zarr with metadata preservation
91
+
92
+ 4. **Batch Crop Anything**
93
+ - Interactive ROI selection via click interface
94
+
95
+ 5. Batch ROI Colocalization
96
+ - Count colocalized labels across multiple channels
97
+
98
+
99
+
100
+ ### Coming Soon
101
+ New features arriving April 2025
81
102
 
82
- `napari-tmidas` is a work in progress (WIP) and an evolutionary step away from the [terminal / command-line version of T-MIDAS](https://github.com/MercaderLabAnatomy/T-MIDAS).
83
103
 
84
104
  ## Installation
85
105
 
@@ -98,29 +118,35 @@ To install the latest development version:
98
118
  pip install git+https://github.com/macromeer/napari-tmidas.git
99
119
 
100
120
  ### Dependencies
101
- For the File converter, we need some libraries to read some microscopy formats and to write ome-zarr:
121
+ To use the Batch Microscopy Image Conversion pipeline, we need some libraries to read microscopy formats and to write ome-zarr:
122
+
123
+ pip install nd2 readlif tiffslide pylibCZIrw acquifer-napari ome-zarr napari-ome-zarr
102
124
 
103
- pip install nd2 readlif tiffslide pylibCZIrw acquifer-napari ome-zarr
125
+ For the Batch Crop Anything pipeline, we need to install MobileSAM and its dependencies:
104
126
 
127
+ pip install git+https://github.com/ChaoningZhang/MobileSAM.git
128
+ pip install torch torchvision timm opencv-python
105
129
 
106
130
  ## Usage
107
131
 
132
+ To use the plugin, start napari in the activated virtual environment with this terminal command:
133
+
134
+ mamba run -n napari-tmidas napari
135
+
108
136
  You can find the installed plugin here:
109
137
 
110
138
  ![image](https://github.com/user-attachments/assets/504db09a-d66e-49eb-90cd-3237024d9d7a)
111
139
 
112
140
 
113
- ### File converter
141
+ ### Batch Microscopy Image Conversion
114
142
 
115
- You might first want to batch convert microscopy image data. Currently, this plugin supports `.nd2, .lif, .ndpi, .czi` and acquifer data. After launching the file converter, you can scan a folder of your choice for microscopy image data. It will also detect series images that you can preview. Start by selecting an original image in the first column of the table. This allows you to preview or convert.
143
+ You can start this pipeline via `Plugins > T-MIDAS > Batch Microscopy Image Conversion`. Currently, this pipeline supports the conversion of `.nd2, .lif, .ndpi, .czi` and acquifer data. After scanning a folder of your choice for microscopy image data, select a file in the first column of the table and preview and export any image data it contains.
116
144
 
117
145
  ![image](https://github.com/user-attachments/assets/e377ca71-2f30-447d-825e-d2feebf7061b)
118
146
 
147
+ ### Batch File Processing
119
148
 
120
-
121
- ### File inspector
122
-
123
- 1. After opening `Plugins > T-MIDAS > File selector`, enter the path to the folder containing the images to be processed (currently supports TIF, later also ZARR). You can also filter for filename suffix.
149
+ 1. After opening `Plugins > T-MIDAS > Batch Image Processing`, enter the path to the folder containing the images to be processed (currently supports TIF, later also ZARR). You can also filter for filename suffix.
124
150
 
125
151
  ![image](https://github.com/user-attachments/assets/41ecb689-9abe-4371-83b5-9c5eb37069f9)
126
152
 
@@ -138,11 +164,15 @@ You might first want to batch convert microscopy image data. Currently, this plu
138
164
 
139
165
  Note that whenever you click on an `Original File` or `Processed File` in the table, it will replace the one that is currently shown in the viewer. So naturally, you'd first select the original image, and then the processed image to correctly see the image pair that you want to inspect.
140
166
 
141
- ### Label inspector
142
- If you have already segmented a folder full of images and now you want to maybe inspect and edit each label image, you can use the `Plugins > T-MIDAS > Label inspector`, which automatically saves your changes to the existing label image once you click the `Save Changes and Continue` button (bottom right).
167
+ ### Batch Label Inspection
168
+ If you have already segmented a folder full of images and now you want to maybe inspect and edit each label image, you can use the `Plugins > T-MIDAS > Batch Label Inspection`, which automatically saves your changes to the existing label image once you click the `Save Changes and Continue` button (bottom right).
143
169
 
144
170
  ![image](https://github.com/user-attachments/assets/0bf8c6ae-4212-449d-8183-e91b23ba740e)
145
171
 
172
+ ### Batch Crop Anything
173
+ This pipeline combines the Segment Anything Model (SAM) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`
174
+
175
+ ![image](https://github.com/user-attachments/assets/6d72c2a2-1064-4a27-b398-a9b86fcbc443)
146
176
 
147
177
 
148
178
  ## Contributing
@@ -0,0 +1,29 @@
1
+ napari_tmidas/__init__.py,sha256=YNBLESwk8jr_TlDdkSC1CwH0tf0CKHF1i2_efzLjdpk,589
2
+ napari_tmidas/_crop_anything.py,sha256=UxC0FbktgBPvxNMJtpXEATzIk0UFUO1DqRfrKy7bf30,39982
3
+ napari_tmidas/_file_conversion.py,sha256=RnEOVavzApkeJfYb0_TmH6KWca0kpgxHZ517E65OQVI,71398
4
+ napari_tmidas/_file_selector.py,sha256=8Plkoofa9nG5hSyOpQd6qjZlc8sFgBErnTwT5C24stg,34993
5
+ napari_tmidas/_label_inspection.py,sha256=hCxKE0zYk-qBh4ohqiZcEGLXa-3lL8p88y45p2WnE1g,7329
6
+ napari_tmidas/_reader.py,sha256=A9_hdDxtVkVGmbOsbqgnARCSvpEh7GGPo7ylzmbnu8o,2485
7
+ napari_tmidas/_registry.py,sha256=Oz9HFJh41MKRLeKxRuc7x7yzc-OrmoTdRFnfngFU_XE,2007
8
+ napari_tmidas/_roi_colocalization.py,sha256=OVjdHvtFN07DgrtTX8uqbrxZL6jVwl2L3klorgW2C9k,43196
9
+ napari_tmidas/_sample_data.py,sha256=khuv1jemz_fCjqNwEKMFf83Ju0EN4S89IKydsUMmUxw,645
10
+ napari_tmidas/_version.py,sha256=W_EoL8cAL4KhujvbYWEpb9NqRLbbrH0T024lJvRRWHI,511
11
+ napari_tmidas/_widget.py,sha256=u9uf9WILAwZg_InhFyjWInY4ej1TV1a59dR8Fe3vNF8,4794
12
+ napari_tmidas/_writer.py,sha256=wbVfHFjjHdybSg37VR4lVmL-kdCkDZsUPDJ66AVLaFQ,1941
13
+ napari_tmidas/napari.yaml,sha256=1Am1dA0-ZtCXk6veIT6jrMz3zwQ7dF8_p9tZTFx_vTg,2641
14
+ napari_tmidas/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
+ napari_tmidas/_tests/test_reader.py,sha256=gN_2StATLZYUL56X27ImJTVru_qSoFiY4vtgajcx3H0,975
16
+ napari_tmidas/_tests/test_sample_data.py,sha256=D1HU_C3hWpO3mlSW_7Z94xaYHDtxz0XUrMjQoYop9Ag,104
17
+ napari_tmidas/_tests/test_widget.py,sha256=I_d-Cra_CTcS0QdMItg_HMphvhj0XCx81JnFyCHk9lg,2204
18
+ napari_tmidas/_tests/test_writer.py,sha256=4_MlZM9a5So74J16_4tIOJc6pwTOw9R0-oAE_YioIx4,122
19
+ napari_tmidas/processing_functions/__init__.py,sha256=osXY9jSgDsrwFaS6ShPHP0wGRxMuX1mHRN9EDa9l41g,1891
20
+ napari_tmidas/processing_functions/basic.py,sha256=5v-_nRQK1Dssu8f3LFJIW73urmTEm3BX8lf9lrb9BBM,3690
21
+ napari_tmidas/processing_functions/colocalization.py,sha256=O0-gJFVq62lfpOWNu0bJVYKZcsO6Z3HG6FsFkYho_j4,7684
22
+ napari_tmidas/processing_functions/scipy_filters.py,sha256=kKpDAlQQ0ZNbkt77QUWi-Bwolk6MMDvtG_bZJV3MjOo,1612
23
+ napari_tmidas/processing_functions/skimage_filters.py,sha256=kOwdYVUUqIYz9h-EBrYu0sFsmKkmqoTegkDoDywFwmE,3373
24
+ napari_tmidas-0.1.7.dist-info/licenses/LICENSE,sha256=tSjiOqj57exmEIfP2YVPCEeQf0cH49S6HheQR8IiY3g,1485
25
+ napari_tmidas-0.1.7.dist-info/METADATA,sha256=I9BacR_RRXFooM5PqbTaLb-zwf8OEfw9DJkM974s59I,10174
26
+ napari_tmidas-0.1.7.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
27
+ napari_tmidas-0.1.7.dist-info/entry_points.txt,sha256=fbVjzbJTm4aDMIBtel1Lyqvq-CwXY7wmCOo_zJ-jtRY,60
28
+ napari_tmidas-0.1.7.dist-info/top_level.txt,sha256=63ybdxCZ4SeT13f_Ou4TsivGV_2Gtm_pJOXToAt30_E,14
29
+ napari_tmidas-0.1.7.dist-info/RECORD,,
@@ -1,26 +0,0 @@
1
- napari_tmidas/__init__.py,sha256=Z9mznblUlUsRyH3d4k8SxUo4iXLMwJXURbq41QzhPpo,459
2
- napari_tmidas/_file_conversion.py,sha256=Al59izNadYiqYmG-JT4TxXNifMt796x5yrvQbLc-8Eo,62547
3
- napari_tmidas/_file_selector.py,sha256=XLbqeQ4fG86gLHgmPZzrcmMTir5gpneO32KumJY8ZbM,27369
4
- napari_tmidas/_label_inspection.py,sha256=5p0heCX1xCQVYDGHe_R2gPwbZpl6sXIqLBwqbZLJKqo,6983
5
- napari_tmidas/_reader.py,sha256=A9_hdDxtVkVGmbOsbqgnARCSvpEh7GGPo7ylzmbnu8o,2485
6
- napari_tmidas/_registry.py,sha256=Oz9HFJh41MKRLeKxRuc7x7yzc-OrmoTdRFnfngFU_XE,2007
7
- napari_tmidas/_sample_data.py,sha256=khuv1jemz_fCjqNwEKMFf83Ju0EN4S89IKydsUMmUxw,645
8
- napari_tmidas/_version.py,sha256=ESbJO0YD7TYfOUv_WDIJJgWELGepEWsoyhqVifEcXPA,511
9
- napari_tmidas/_widget.py,sha256=u9uf9WILAwZg_InhFyjWInY4ej1TV1a59dR8Fe3vNF8,4794
10
- napari_tmidas/_writer.py,sha256=wbVfHFjjHdybSg37VR4lVmL-kdCkDZsUPDJ66AVLaFQ,1941
11
- napari_tmidas/napari.yaml,sha256=mY40xnwVygEjHpYGVVtGT2rkoUj-lNHsAg5y51yZbfE,2135
12
- napari_tmidas/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
- napari_tmidas/_tests/test_reader.py,sha256=gN_2StATLZYUL56X27ImJTVru_qSoFiY4vtgajcx3H0,975
14
- napari_tmidas/_tests/test_sample_data.py,sha256=D1HU_C3hWpO3mlSW_7Z94xaYHDtxz0XUrMjQoYop9Ag,104
15
- napari_tmidas/_tests/test_widget.py,sha256=I_d-Cra_CTcS0QdMItg_HMphvhj0XCx81JnFyCHk9lg,2204
16
- napari_tmidas/_tests/test_writer.py,sha256=4_MlZM9a5So74J16_4tIOJc6pwTOw9R0-oAE_YioIx4,122
17
- napari_tmidas/processing_functions/__init__.py,sha256=osXY9jSgDsrwFaS6ShPHP0wGRxMuX1mHRN9EDa9l41g,1891
18
- napari_tmidas/processing_functions/basic.py,sha256=m_Q1LwwmQ8Nto2eM7SbMw2o1wolbTr9ZqCnYzxEZy7I,1182
19
- napari_tmidas/processing_functions/scipy_filters.py,sha256=kKpDAlQQ0ZNbkt77QUWi-Bwolk6MMDvtG_bZJV3MjOo,1612
20
- napari_tmidas/processing_functions/skimage_filters.py,sha256=IsfMJTtd9Vwb47UlTEcLlXYv2CX6uTARV1CZCHY0HBw,4094
21
- napari_tmidas-0.1.6.dist-info/licenses/LICENSE,sha256=tSjiOqj57exmEIfP2YVPCEeQf0cH49S6HheQR8IiY3g,1485
22
- napari_tmidas-0.1.6.dist-info/METADATA,sha256=1SE1ALvJTwtYIBbDX0qRNFk028P3pn-38ElO2ZRDEEI,9114
23
- napari_tmidas-0.1.6.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
24
- napari_tmidas-0.1.6.dist-info/entry_points.txt,sha256=fbVjzbJTm4aDMIBtel1Lyqvq-CwXY7wmCOo_zJ-jtRY,60
25
- napari_tmidas-0.1.6.dist-info/top_level.txt,sha256=63ybdxCZ4SeT13f_Ou4TsivGV_2Gtm_pJOXToAt30_E,14
26
- napari_tmidas-0.1.6.dist-info/RECORD,,