napari-tmidas 0.1.4__tar.gz → 0.1.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/PKG-INFO +10 -6
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/README.md +7 -4
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/_file_conversion.py +398 -169
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/_version.py +2 -2
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/napari.yaml +6 -6
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas.egg-info/PKG-INFO +10 -6
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/tox.ini +8 -1
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/.github/dependabot.yml +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/.github/workflows/test_and_deploy.yml +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/.gitignore +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/.napari-hub/DESCRIPTION.md +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/.napari-hub/config.yml +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/.pre-commit-config.yaml +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/LICENSE +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/MANIFEST.in +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/pyproject.toml +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/setup.cfg +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/__init__.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/_file_selector.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/_label_inspection.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/_reader.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/_registry.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/_sample_data.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/_tests/__init__.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/_tests/test_reader.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/_tests/test_sample_data.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/_tests/test_widget.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/_tests/test_writer.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/_widget.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/_writer.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/processing_functions/__init__.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/processing_functions/basic.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/processing_functions/scipy_filters.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/processing_functions/skimage_filters.py +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas.egg-info/SOURCES.txt +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas.egg-info/dependency_links.txt +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas.egg-info/entry_points.txt +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas.egg-info/requires.txt +0 -0
- {napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: napari-tmidas
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.6
|
|
4
4
|
Summary: Tissue Microscopy Image Data Analysis Suite
|
|
5
5
|
Author: Marco Meer
|
|
6
6
|
Author-email: marco.meer@pm.me
|
|
@@ -65,6 +65,7 @@ Requires-Dist: pytest-cov; extra == "testing"
|
|
|
65
65
|
Requires-Dist: pytest-qt; extra == "testing"
|
|
66
66
|
Requires-Dist: napari; extra == "testing"
|
|
67
67
|
Requires-Dist: pyqt5; extra == "testing"
|
|
68
|
+
Dynamic: license-file
|
|
68
69
|
|
|
69
70
|
# napari-tmidas
|
|
70
71
|
|
|
@@ -74,14 +75,17 @@ Requires-Dist: pyqt5; extra == "testing"
|
|
|
74
75
|
[](https://github.com/macromeer/napari-tmidas/actions)
|
|
75
76
|
[](https://napari-hub.org/plugins/napari-tmidas)
|
|
76
77
|
<!-- [](https://codecov.io/gh/macromeer/napari-tmidas) -->
|
|
78
|
+
This Napari plugin allows you to perform batch image processing without a graphics processing unit (GPU). It will still be fast because computations will run in parallel on your central processing unit (CPU).
|
|
77
79
|
|
|
78
|
-
|
|
80
|
+
This plugin provides you with a growing collection of pipelines for batch image preprocessing, segmentation, regions-of-interest (ROI) analysis and other useful features.
|
|
81
|
+
|
|
82
|
+
`napari-tmidas` is a work in progress (WIP) and an evolutionary step away from the [terminal / command-line version of T-MIDAS](https://github.com/MercaderLabAnatomy/T-MIDAS).
|
|
79
83
|
|
|
80
84
|
## Installation
|
|
81
85
|
|
|
82
86
|
First install Napari in a virtual environment:
|
|
83
87
|
|
|
84
|
-
mamba create -y -n napari-tmidas -c conda-forge python=3.11
|
|
88
|
+
mamba create -y -n napari-tmidas -c conda-forge python=3.11 tqdm
|
|
85
89
|
mamba activate napari-tmidas
|
|
86
90
|
python -m pip install "napari[all]"
|
|
87
91
|
|
|
@@ -96,7 +100,7 @@ To install the latest development version:
|
|
|
96
100
|
### Dependencies
|
|
97
101
|
For the File converter, we need some libraries to read some microscopy formats and to write ome-zarr:
|
|
98
102
|
|
|
99
|
-
pip install nd2 readlif tiffslide pylibCZIrw ome-zarr
|
|
103
|
+
pip install nd2 readlif tiffslide pylibCZIrw acquifer-napari ome-zarr
|
|
100
104
|
|
|
101
105
|
|
|
102
106
|
## Usage
|
|
@@ -108,7 +112,7 @@ You can find the installed plugin here:
|
|
|
108
112
|
|
|
109
113
|
### File converter
|
|
110
114
|
|
|
111
|
-
You might first want to batch convert microscopy image data. Currently, this plugin supports `.nd2, .lif, .ndpi, .czi
|
|
115
|
+
You might first want to batch convert microscopy image data. Currently, this plugin supports `.nd2, .lif, .ndpi, .czi` and acquifer data. After launching the file converter, you can scan a folder of your choice for microscopy image data. It will also detect series images that you can preview. Start by selecting an original image in the first column of the table. This allows you to preview or convert.
|
|
112
116
|
|
|
113
117
|

|
|
114
118
|
|
|
@@ -6,14 +6,17 @@
|
|
|
6
6
|
[](https://github.com/macromeer/napari-tmidas/actions)
|
|
7
7
|
[](https://napari-hub.org/plugins/napari-tmidas)
|
|
8
8
|
<!-- [](https://codecov.io/gh/macromeer/napari-tmidas) -->
|
|
9
|
+
This Napari plugin allows you to perform batch image processing without a graphics processing unit (GPU). It will still be fast because computations will run in parallel on your central processing unit (CPU).
|
|
9
10
|
|
|
10
|
-
|
|
11
|
+
This plugin provides you with a growing collection of pipelines for batch image preprocessing, segmentation, regions-of-interest (ROI) analysis and other useful features.
|
|
12
|
+
|
|
13
|
+
`napari-tmidas` is a work in progress (WIP) and an evolutionary step away from the [terminal / command-line version of T-MIDAS](https://github.com/MercaderLabAnatomy/T-MIDAS).
|
|
11
14
|
|
|
12
15
|
## Installation
|
|
13
16
|
|
|
14
17
|
First install Napari in a virtual environment:
|
|
15
18
|
|
|
16
|
-
mamba create -y -n napari-tmidas -c conda-forge python=3.11
|
|
19
|
+
mamba create -y -n napari-tmidas -c conda-forge python=3.11 tqdm
|
|
17
20
|
mamba activate napari-tmidas
|
|
18
21
|
python -m pip install "napari[all]"
|
|
19
22
|
|
|
@@ -28,7 +31,7 @@ To install the latest development version:
|
|
|
28
31
|
### Dependencies
|
|
29
32
|
For the File converter, we need some libraries to read some microscopy formats and to write ome-zarr:
|
|
30
33
|
|
|
31
|
-
pip install nd2 readlif tiffslide pylibCZIrw ome-zarr
|
|
34
|
+
pip install nd2 readlif tiffslide pylibCZIrw acquifer-napari ome-zarr
|
|
32
35
|
|
|
33
36
|
|
|
34
37
|
## Usage
|
|
@@ -40,7 +43,7 @@ You can find the installed plugin here:
|
|
|
40
43
|
|
|
41
44
|
### File converter
|
|
42
45
|
|
|
43
|
-
You might first want to batch convert microscopy image data. Currently, this plugin supports `.nd2, .lif, .ndpi, .czi
|
|
46
|
+
You might first want to batch convert microscopy image data. Currently, this plugin supports `.nd2, .lif, .ndpi, .czi` and acquifer data. After launching the file converter, you can scan a folder of your choice for microscopy image data. It will also detect series images that you can preview. Start by selecting an original image in the first column of the table. This allows you to preview or convert.
|
|
44
47
|
|
|
45
48
|

|
|
46
49
|
|
|
@@ -131,6 +131,11 @@ class SeriesDetailWidget(QWidget):
|
|
|
131
131
|
self.series_selector = QComboBox()
|
|
132
132
|
layout.addWidget(self.series_selector)
|
|
133
133
|
|
|
134
|
+
# Add "Export All Series" checkbox
|
|
135
|
+
self.export_all_checkbox = QCheckBox("Export All Series")
|
|
136
|
+
self.export_all_checkbox.toggled.connect(self.toggle_export_all)
|
|
137
|
+
layout.addWidget(self.export_all_checkbox)
|
|
138
|
+
|
|
134
139
|
# Connect series selector
|
|
135
140
|
self.series_selector.currentIndexChanged.connect(self.series_selected)
|
|
136
141
|
|
|
@@ -143,11 +148,30 @@ class SeriesDetailWidget(QWidget):
|
|
|
143
148
|
self.info_label = QLabel("")
|
|
144
149
|
layout.addWidget(self.info_label)
|
|
145
150
|
|
|
151
|
+
def toggle_export_all(self, checked):
|
|
152
|
+
"""Handle toggle of export all checkbox"""
|
|
153
|
+
if self.current_file and checked:
|
|
154
|
+
# Disable series selector when exporting all
|
|
155
|
+
self.series_selector.setEnabled(not checked)
|
|
156
|
+
# Update parent with export all setting
|
|
157
|
+
self.parent.set_export_all_series(self.current_file, checked)
|
|
158
|
+
elif self.current_file:
|
|
159
|
+
# Re-enable series selector
|
|
160
|
+
self.series_selector.setEnabled(True)
|
|
161
|
+
# Update parent with currently selected series only
|
|
162
|
+
self.series_selected(self.series_selector.currentIndex())
|
|
163
|
+
# Update parent to not export all
|
|
164
|
+
self.parent.set_export_all_series(self.current_file, False)
|
|
165
|
+
|
|
146
166
|
def set_file(self, filepath: str):
|
|
147
167
|
"""Set the current file and update series list"""
|
|
148
168
|
self.current_file = filepath
|
|
149
169
|
self.series_selector.clear()
|
|
150
170
|
|
|
171
|
+
# Reset export all checkbox
|
|
172
|
+
self.export_all_checkbox.setChecked(False)
|
|
173
|
+
self.series_selector.setEnabled(True)
|
|
174
|
+
|
|
151
175
|
# Try to get series information
|
|
152
176
|
file_loader = self.parent.get_file_loader(filepath)
|
|
153
177
|
if file_loader:
|
|
@@ -311,11 +335,6 @@ class LIFLoader(FormatLoader):
|
|
|
311
335
|
f"Warning: {missing_frames} frames were missing and filled with zeros."
|
|
312
336
|
)
|
|
313
337
|
|
|
314
|
-
# Squeeze out singleton dimensions but preserve the order of remaining dimensions
|
|
315
|
-
# This can change dimension ordering if dimensions of size 1 are eliminated
|
|
316
|
-
# For example, if timepoints=1, the resulting array will have dimensions (z_stacks, channels, y_dim, x_dim)
|
|
317
|
-
series_data = np.squeeze(series_data)
|
|
318
|
-
|
|
319
338
|
return series_data
|
|
320
339
|
|
|
321
340
|
@staticmethod
|
|
@@ -323,15 +342,22 @@ class LIFLoader(FormatLoader):
|
|
|
323
342
|
try:
|
|
324
343
|
lif_file = LifFile(filepath)
|
|
325
344
|
image = lif_file.get_image(series_index)
|
|
345
|
+
axes = "".join(image.dims._fields).upper()
|
|
346
|
+
channels = image.channels
|
|
347
|
+
if channels > 1:
|
|
348
|
+
# add C to end of string
|
|
349
|
+
axes += "C"
|
|
326
350
|
|
|
327
351
|
metadata = {
|
|
328
|
-
"channels": image.channels,
|
|
329
|
-
"z_stacks": image.nz,
|
|
330
|
-
"timepoints": image.nt,
|
|
331
|
-
"
|
|
332
|
-
"
|
|
333
|
-
"
|
|
352
|
+
# "channels": image.channels,
|
|
353
|
+
# "z_stacks": image.nz,
|
|
354
|
+
# "timepoints": image.nt,
|
|
355
|
+
"axes": "TZCYX",
|
|
356
|
+
"unit": "um",
|
|
357
|
+
"resolution": image.scale[:2],
|
|
334
358
|
}
|
|
359
|
+
if image.scale[2] is not None:
|
|
360
|
+
metadata["spacing"] = image.scale[2]
|
|
335
361
|
return metadata
|
|
336
362
|
except (ValueError, FileNotFoundError):
|
|
337
363
|
return {}
|
|
@@ -366,20 +392,23 @@ class ND2Loader(FormatLoader):
|
|
|
366
392
|
|
|
367
393
|
with nd2.ND2File(filepath) as nd2_file:
|
|
368
394
|
return {
|
|
369
|
-
|
|
370
|
-
"
|
|
371
|
-
|
|
372
|
-
|
|
395
|
+
"axes": "".join(nd2_file.sizes.keys()),
|
|
396
|
+
"resolution": (
|
|
397
|
+
1 / nd2_file.voxel_size().x,
|
|
398
|
+
1 / nd2_file.voxel_size().y,
|
|
399
|
+
),
|
|
400
|
+
"unit": "um",
|
|
401
|
+
"spacing": 1 / nd2_file.voxel_size().z,
|
|
373
402
|
}
|
|
374
403
|
|
|
375
404
|
|
|
376
405
|
class TIFFSlideLoader(FormatLoader):
|
|
377
|
-
"""Loader for whole slide TIFF images (NDPI,
|
|
406
|
+
"""Loader for whole slide TIFF images (NDPI, etc.)"""
|
|
378
407
|
|
|
379
408
|
@staticmethod
|
|
380
409
|
def can_load(filepath: str) -> bool:
|
|
381
410
|
ext = filepath.lower()
|
|
382
|
-
return ext.endswith(
|
|
411
|
+
return ext.endswith(".ndpi")
|
|
383
412
|
|
|
384
413
|
@staticmethod
|
|
385
414
|
def get_series_count(filepath: str) -> int:
|
|
@@ -433,9 +462,12 @@ class TIFFSlideLoader(FormatLoader):
|
|
|
433
462
|
return {}
|
|
434
463
|
|
|
435
464
|
return {
|
|
436
|
-
"
|
|
437
|
-
"
|
|
438
|
-
|
|
465
|
+
"axes": slide.properties["tiffslide.series-axes"],
|
|
466
|
+
"resolution": (
|
|
467
|
+
slide.properties["tiffslide.mpp-x"],
|
|
468
|
+
slide.properties["tiffslide.mpp-y"],
|
|
469
|
+
),
|
|
470
|
+
"unit": "um",
|
|
439
471
|
}
|
|
440
472
|
except (ValueError, FileNotFoundError):
|
|
441
473
|
# Fall back to tifffile
|
|
@@ -517,81 +549,37 @@ class CZILoader(FormatLoader):
|
|
|
517
549
|
if series_index < 0 or series_index >= len(scenes):
|
|
518
550
|
return {}
|
|
519
551
|
|
|
520
|
-
scene_keys = list(scenes.keys())
|
|
521
|
-
scene_index = scene_keys[series_index]
|
|
522
|
-
scene = scenes[scene_index]
|
|
552
|
+
# scene_keys = list(scenes.keys())
|
|
553
|
+
# scene_index = scene_keys[series_index]
|
|
554
|
+
# scene = scenes[scene_index]
|
|
523
555
|
|
|
524
556
|
dims = czi_file.total_bounding_box
|
|
525
557
|
|
|
526
558
|
# Extract the raw metadata as an XML string
|
|
527
559
|
metadata_xml = czi_file.raw_metadata
|
|
528
560
|
|
|
529
|
-
metadata
|
|
530
|
-
"scene_index": scene_index,
|
|
531
|
-
"scene_rect": (scene[0], scene[1], scene[2], scene[3]),
|
|
532
|
-
}
|
|
533
|
-
|
|
534
|
-
# Add scale information
|
|
561
|
+
# Initialize metadata with default values
|
|
535
562
|
try:
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
563
|
+
# scales are in meters, convert to microns
|
|
564
|
+
scale_x = CZILoader.get_scales(metadata_xml, "X") * 1e6
|
|
565
|
+
scale_y = CZILoader.get_scales(metadata_xml, "Y") * 1e6
|
|
566
|
+
|
|
567
|
+
filtered_dims = {
|
|
568
|
+
k: v for k, v in dims.items() if v != (0, 1)
|
|
569
|
+
}
|
|
570
|
+
axes = "".join(filtered_dims.keys())
|
|
571
|
+
metadata = {
|
|
572
|
+
"axes": axes,
|
|
573
|
+
"resolution": (scale_x, scale_y),
|
|
574
|
+
"unit": "um",
|
|
575
|
+
}
|
|
546
576
|
|
|
547
577
|
if dims["Z"] != (0, 1):
|
|
548
578
|
scale_z = CZILoader.get_scales(metadata_xml, "Z")
|
|
549
|
-
metadata["
|
|
579
|
+
metadata["spacing"] = scale_z
|
|
550
580
|
except ValueError as e:
|
|
551
581
|
print(f"Error getting scale metadata: {e}")
|
|
552
582
|
|
|
553
|
-
# metadata = {
|
|
554
|
-
# "scene_index": scene_index,
|
|
555
|
-
# "scene_rect": (scene[0], scene[1], scene[2], scene[3]),
|
|
556
|
-
# }
|
|
557
|
-
|
|
558
|
-
# try:
|
|
559
|
-
|
|
560
|
-
# metadata.update(
|
|
561
|
-
# {
|
|
562
|
-
# "dimensions": czi_file.total_bounding_box,
|
|
563
|
-
# # "axes": czi_file.axes,
|
|
564
|
-
# # "shape": czi_file.shape,
|
|
565
|
-
# #"size": czi_file.size,
|
|
566
|
-
# "pixel_types": czi_file.pixel_types,
|
|
567
|
-
# }
|
|
568
|
-
# )
|
|
569
|
-
# except (ValueError, FileNotFoundError) as e:
|
|
570
|
-
# print(f"Error getting full metadata: {e}")
|
|
571
|
-
|
|
572
|
-
# try:
|
|
573
|
-
# metadata["channel_count"] = czi_file.get_dims_channels()[0]
|
|
574
|
-
# metadata["channel_names"] = czi_file.channel_names
|
|
575
|
-
# except (ValueError, FileNotFoundError) as e:
|
|
576
|
-
# print(f"Error getting channel metadata: {e}")
|
|
577
|
-
# try:
|
|
578
|
-
# metadata.update(
|
|
579
|
-
# {
|
|
580
|
-
# "scale_x": czi_file.scale_x,
|
|
581
|
-
# "scale_y": czi_file.scale_y,
|
|
582
|
-
# "scale_z": czi_file.scale_z,
|
|
583
|
-
# "scale_unit": czi_file.scale_unit,
|
|
584
|
-
# }
|
|
585
|
-
# )
|
|
586
|
-
# except (ValueError, FileNotFoundError) as e:
|
|
587
|
-
# print(f"Error getting scale metadata: {e}")
|
|
588
|
-
# try:
|
|
589
|
-
# xml_metadata = czi_file.meta
|
|
590
|
-
# if xml_metadata:
|
|
591
|
-
# metadata["xml_metadata"] = xml_metadata
|
|
592
|
-
# except (ValueError, FileNotFoundError) as e:
|
|
593
|
-
# print(f"Error getting XML metadata: {e}")
|
|
594
|
-
|
|
595
583
|
return metadata
|
|
596
584
|
|
|
597
585
|
except (ValueError, FileNotFoundError, RuntimeError) as e:
|
|
@@ -625,6 +613,188 @@ class CZILoader(FormatLoader):
|
|
|
625
613
|
return {}
|
|
626
614
|
|
|
627
615
|
|
|
616
|
+
class AcquiferLoader(FormatLoader):
|
|
617
|
+
"""Loader for Acquifer datasets using the acquifer_napari_plugin utility"""
|
|
618
|
+
|
|
619
|
+
# Cache for loaded datasets to avoid reloading the same directory multiple times
|
|
620
|
+
_dataset_cache = {} # {directory_path: xarray_dataset}
|
|
621
|
+
|
|
622
|
+
@staticmethod
|
|
623
|
+
def can_load(filepath: str) -> bool:
|
|
624
|
+
"""
|
|
625
|
+
Check if this is a directory that can be loaded as an Acquifer dataset
|
|
626
|
+
"""
|
|
627
|
+
if not os.path.isdir(filepath):
|
|
628
|
+
return False
|
|
629
|
+
|
|
630
|
+
try:
|
|
631
|
+
|
|
632
|
+
# Check if directory contains files
|
|
633
|
+
image_files = []
|
|
634
|
+
for root, _, files in os.walk(filepath):
|
|
635
|
+
for file in files:
|
|
636
|
+
if file.lower().endswith(
|
|
637
|
+
(".tif", ".tiff", ".png", ".jpg", ".jpeg")
|
|
638
|
+
):
|
|
639
|
+
image_files.append(os.path.join(root, file))
|
|
640
|
+
|
|
641
|
+
return bool(image_files)
|
|
642
|
+
except (ValueError, FileNotFoundError) as e:
|
|
643
|
+
print(f"Error checking Acquifer dataset: {e}")
|
|
644
|
+
return False
|
|
645
|
+
|
|
646
|
+
@staticmethod
|
|
647
|
+
def _load_dataset(directory):
|
|
648
|
+
"""Load the dataset using array_from_directory and cache it"""
|
|
649
|
+
if directory in AcquiferLoader._dataset_cache:
|
|
650
|
+
return AcquiferLoader._dataset_cache[directory]
|
|
651
|
+
|
|
652
|
+
try:
|
|
653
|
+
from acquifer_napari_plugin.utils import array_from_directory
|
|
654
|
+
|
|
655
|
+
# Check if directory contains files before trying to load
|
|
656
|
+
image_files = []
|
|
657
|
+
for root, _, files in os.walk(directory):
|
|
658
|
+
for file in files:
|
|
659
|
+
if file.lower().endswith(
|
|
660
|
+
(".tif", ".tiff", ".png", ".jpg", ".jpeg")
|
|
661
|
+
):
|
|
662
|
+
image_files.append(os.path.join(root, file))
|
|
663
|
+
|
|
664
|
+
if not image_files:
|
|
665
|
+
raise ValueError(
|
|
666
|
+
f"No image files found in directory: {directory}"
|
|
667
|
+
)
|
|
668
|
+
|
|
669
|
+
dataset = array_from_directory(directory)
|
|
670
|
+
AcquiferLoader._dataset_cache[directory] = dataset
|
|
671
|
+
return dataset
|
|
672
|
+
except (ValueError, FileNotFoundError) as e:
|
|
673
|
+
print(f"Error loading Acquifer dataset: {e}")
|
|
674
|
+
raise ValueError(f"Failed to load Acquifer dataset: {e}") from e
|
|
675
|
+
|
|
676
|
+
@staticmethod
|
|
677
|
+
def get_series_count(filepath: str) -> int:
|
|
678
|
+
"""
|
|
679
|
+
Return the number of wells as series count
|
|
680
|
+
"""
|
|
681
|
+
try:
|
|
682
|
+
dataset = AcquiferLoader._load_dataset(filepath)
|
|
683
|
+
|
|
684
|
+
# Check for Well dimension
|
|
685
|
+
if "Well" in dataset.dims:
|
|
686
|
+
return len(dataset.coords["Well"])
|
|
687
|
+
else:
|
|
688
|
+
# Single series for the whole dataset
|
|
689
|
+
return 1
|
|
690
|
+
except (ValueError, FileNotFoundError) as e:
|
|
691
|
+
print(f"Error getting series count: {e}")
|
|
692
|
+
return 0
|
|
693
|
+
|
|
694
|
+
@staticmethod
|
|
695
|
+
def load_series(filepath: str, series_index: int) -> np.ndarray:
|
|
696
|
+
"""
|
|
697
|
+
Load a specific well as a series
|
|
698
|
+
"""
|
|
699
|
+
try:
|
|
700
|
+
dataset = AcquiferLoader._load_dataset(filepath)
|
|
701
|
+
|
|
702
|
+
# If the dataset has a Well dimension, select the specific well
|
|
703
|
+
if "Well" in dataset.dims:
|
|
704
|
+
if series_index < 0 or series_index >= len(
|
|
705
|
+
dataset.coords["Well"]
|
|
706
|
+
):
|
|
707
|
+
raise ValueError(
|
|
708
|
+
f"Series index {series_index} out of range"
|
|
709
|
+
)
|
|
710
|
+
|
|
711
|
+
# Get the well value at this index
|
|
712
|
+
well_value = dataset.coords["Well"].values[series_index]
|
|
713
|
+
|
|
714
|
+
# Select the data for this well
|
|
715
|
+
well_data = dataset.sel(Well=well_value)
|
|
716
|
+
# squeeze out singleton dimensions
|
|
717
|
+
well_data = well_data.squeeze()
|
|
718
|
+
# Convert to numpy array and return
|
|
719
|
+
return well_data.values
|
|
720
|
+
else:
|
|
721
|
+
# No Well dimension, return the entire dataset
|
|
722
|
+
return dataset.values
|
|
723
|
+
|
|
724
|
+
except (ValueError, FileNotFoundError) as e:
|
|
725
|
+
print(f"Error loading series: {e}")
|
|
726
|
+
import traceback
|
|
727
|
+
|
|
728
|
+
traceback.print_exc()
|
|
729
|
+
raise ValueError(f"Failed to load series: {e}") from e
|
|
730
|
+
|
|
731
|
+
@staticmethod
|
|
732
|
+
def get_metadata(filepath: str, series_index: int) -> Dict:
|
|
733
|
+
"""
|
|
734
|
+
Extract metadata for a specific well
|
|
735
|
+
"""
|
|
736
|
+
try:
|
|
737
|
+
dataset = AcquiferLoader._load_dataset(filepath)
|
|
738
|
+
|
|
739
|
+
# Initialize with default values
|
|
740
|
+
axes = ""
|
|
741
|
+
resolution = (1.0, 1.0) # Default resolution
|
|
742
|
+
|
|
743
|
+
if "Well" in dataset.dims:
|
|
744
|
+
well_value = dataset.coords["Well"].values[series_index]
|
|
745
|
+
well_data = dataset.sel(Well=well_value)
|
|
746
|
+
well_data = well_data.squeeze() # remove singleton dimensions
|
|
747
|
+
|
|
748
|
+
# Get dimensions
|
|
749
|
+
dims = list(well_data.dims)
|
|
750
|
+
dims = [
|
|
751
|
+
item.replace("Channel", "C").replace("Time", "T")
|
|
752
|
+
for item in dims
|
|
753
|
+
]
|
|
754
|
+
axes = "".join(dims)
|
|
755
|
+
|
|
756
|
+
# Try to get the first image file in the directory for metadata
|
|
757
|
+
image_files = []
|
|
758
|
+
for root, _, files in os.walk(filepath):
|
|
759
|
+
for file in files:
|
|
760
|
+
if file.lower().endswith((".tif", ".tiff")):
|
|
761
|
+
image_files.append(os.path.join(root, file))
|
|
762
|
+
|
|
763
|
+
if image_files:
|
|
764
|
+
sample_file = image_files[0]
|
|
765
|
+
try:
|
|
766
|
+
# acquifer_metadata.getPixelSize_um(sample_file) is deprecated, get values after --PX in filename
|
|
767
|
+
pattern = re.compile(r"--PX(\d+)")
|
|
768
|
+
match = pattern.search(sample_file)
|
|
769
|
+
if match:
|
|
770
|
+
pixel_size = float(match.group(1)) * 10**-4
|
|
771
|
+
|
|
772
|
+
resolution = (pixel_size, pixel_size)
|
|
773
|
+
except (ValueError, FileNotFoundError) as e:
|
|
774
|
+
print(f"Warning: Could not get pixel size: {e}")
|
|
775
|
+
else:
|
|
776
|
+
# If no Well dimension, use dimensions from the dataset
|
|
777
|
+
dims = list(dataset.dims)
|
|
778
|
+
dims = [
|
|
779
|
+
item.replace("Channel", "C").replace("Time", "T")
|
|
780
|
+
for item in dims
|
|
781
|
+
]
|
|
782
|
+
axes = "".join(dims)
|
|
783
|
+
|
|
784
|
+
metadata = {
|
|
785
|
+
"axes": axes,
|
|
786
|
+
"resolution": resolution,
|
|
787
|
+
"unit": "um",
|
|
788
|
+
"filepath": filepath,
|
|
789
|
+
}
|
|
790
|
+
print(f"Extracted metadata: {metadata}")
|
|
791
|
+
return metadata
|
|
792
|
+
|
|
793
|
+
except (ValueError, FileNotFoundError) as e:
|
|
794
|
+
print(f"Error getting metadata: {e}")
|
|
795
|
+
return {}
|
|
796
|
+
|
|
797
|
+
|
|
628
798
|
class ScanFolderWorker(QThread):
|
|
629
799
|
"""Worker thread for scanning folders"""
|
|
630
800
|
|
|
@@ -640,22 +810,37 @@ class ScanFolderWorker(QThread):
|
|
|
640
810
|
def run(self):
|
|
641
811
|
try:
|
|
642
812
|
found_files = []
|
|
643
|
-
|
|
813
|
+
all_items = []
|
|
644
814
|
|
|
645
|
-
#
|
|
646
|
-
|
|
647
|
-
|
|
815
|
+
# Get both files and potential Acquifer directories
|
|
816
|
+
include_directories = "acquifer" in [
|
|
817
|
+
f.lower() for f in self.filters
|
|
818
|
+
]
|
|
648
819
|
|
|
649
|
-
#
|
|
650
|
-
|
|
651
|
-
for root, _, files in os.walk(self.folder):
|
|
820
|
+
# Count items to scan
|
|
821
|
+
for root, dirs, files in os.walk(self.folder):
|
|
652
822
|
for file in files:
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
self.
|
|
823
|
+
if any(
|
|
824
|
+
file.lower().endswith(f)
|
|
825
|
+
for f in self.filters
|
|
826
|
+
if f.lower() != "acquifer"
|
|
827
|
+
):
|
|
828
|
+
all_items.append(os.path.join(root, file))
|
|
656
829
|
|
|
657
|
-
|
|
658
|
-
|
|
830
|
+
# Add potential Acquifer directories
|
|
831
|
+
if include_directories:
|
|
832
|
+
for dir_name in dirs:
|
|
833
|
+
dir_path = os.path.join(root, dir_name)
|
|
834
|
+
if AcquiferLoader.can_load(dir_path):
|
|
835
|
+
all_items.append(dir_path)
|
|
836
|
+
|
|
837
|
+
# Scan all items
|
|
838
|
+
total_items = len(all_items)
|
|
839
|
+
for i, item_path in enumerate(all_items):
|
|
840
|
+
if i % 10 == 0:
|
|
841
|
+
self.progress.emit(i, total_items)
|
|
842
|
+
|
|
843
|
+
found_files.append(item_path)
|
|
659
844
|
|
|
660
845
|
self.finished.emit(found_files)
|
|
661
846
|
except (ValueError, FileNotFoundError) as e:
|
|
@@ -812,82 +997,83 @@ class ConversionWorker(QThread):
|
|
|
812
997
|
):
|
|
813
998
|
"""Save image data as TIFF with optional metadata"""
|
|
814
999
|
try:
|
|
815
|
-
#
|
|
1000
|
+
# Basic save without metadata
|
|
816
1001
|
if metadata is None:
|
|
817
1002
|
tifffile.imwrite(output_path, image_data, compression="zstd")
|
|
818
|
-
print(f"Saved TIFF file without metadata: {output_path}")
|
|
819
1003
|
return
|
|
820
1004
|
|
|
821
|
-
#
|
|
822
|
-
tiff_metadata = {}
|
|
1005
|
+
# Always preserve resolution if it exists
|
|
823
1006
|
resolution = None
|
|
824
|
-
|
|
1007
|
+
if "resolution" in metadata:
|
|
1008
|
+
resolution = tuple(float(r) for r in metadata["resolution"])
|
|
825
1009
|
|
|
826
|
-
|
|
827
|
-
# Extract resolution information for TIFF tags
|
|
828
|
-
scale_x = metadata.get("scale_x")
|
|
829
|
-
scale_y = metadata.get("scale_y")
|
|
830
|
-
# scale_unit = metadata.get("scale_unit")
|
|
831
|
-
|
|
832
|
-
if all([scale_x, scale_y, scale_x > 0, scale_y > 0]):
|
|
833
|
-
# For TIFF, resolution is specified as pixels per resolution unit
|
|
834
|
-
# So we need to invert the scale (which is microns/pixel)
|
|
835
|
-
# Convert from microns/pixel to pixels/cm
|
|
836
|
-
x_res = 10000 / scale_x # 10000 microns = 1 cm
|
|
837
|
-
y_res = 10000 / scale_y
|
|
838
|
-
resolution = (x_res, y_res)
|
|
839
|
-
resolution_unit = "CENTIMETER"
|
|
840
|
-
|
|
841
|
-
# Include all other metadata
|
|
842
|
-
for key, value in metadata.items():
|
|
843
|
-
if (
|
|
844
|
-
isinstance(value, (str, int, float, bool))
|
|
845
|
-
or isinstance(value, (list, tuple))
|
|
846
|
-
and all(
|
|
847
|
-
isinstance(x, (str, int, float, bool))
|
|
848
|
-
for x in value
|
|
849
|
-
)
|
|
850
|
-
):
|
|
851
|
-
tiff_metadata[key] = value
|
|
852
|
-
elif isinstance(value, dict):
|
|
853
|
-
# For dictionaries, convert to a simple JSON string
|
|
854
|
-
try:
|
|
855
|
-
import json
|
|
1010
|
+
axes = metadata.get("axes", "")
|
|
856
1011
|
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
print(f"Warning: Error processing metadata for TIFF: {str(e)}")
|
|
862
|
-
|
|
863
|
-
# Save with metadata, resolution, and compression
|
|
864
|
-
save_args = {"compression": "zstd", "metadata": tiff_metadata}
|
|
865
|
-
|
|
866
|
-
# Add resolution parameters if available
|
|
867
|
-
if resolution is not None:
|
|
868
|
-
save_args["resolution"] = resolution
|
|
869
|
-
|
|
870
|
-
if resolution_unit is not None:
|
|
871
|
-
save_args["resolutionunit"] = resolution_unit
|
|
1012
|
+
# Handle different dimension cases appropriately
|
|
1013
|
+
if len(image_data.shape) > 2 and any(ax in axes for ax in "ZC"):
|
|
1014
|
+
# Hyperstack case (3D+ with channels or z-slices)
|
|
1015
|
+
imagej_order = "TZCYX"
|
|
872
1016
|
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
1017
|
+
if axes != imagej_order:
|
|
1018
|
+
print(
|
|
1019
|
+
f"Original axes: {axes}, Target order: {imagej_order}"
|
|
1020
|
+
)
|
|
877
1021
|
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
1022
|
+
# Filter to valid axes
|
|
1023
|
+
valid_axes = [ax for ax in axes if ax in imagej_order]
|
|
1024
|
+
if len(valid_axes) < len(axes):
|
|
1025
|
+
print(f"Dropping axes: {set(axes)-set(imagej_order)}")
|
|
1026
|
+
source_idx = [
|
|
1027
|
+
i
|
|
1028
|
+
for i, ax in enumerate(axes)
|
|
1029
|
+
if ax in imagej_order
|
|
1030
|
+
]
|
|
1031
|
+
image_data = np.moveaxis(
|
|
1032
|
+
image_data, source_idx, range(len(valid_axes))
|
|
1033
|
+
)
|
|
1034
|
+
axes = "".join(valid_axes)
|
|
1035
|
+
|
|
1036
|
+
# Add missing dims
|
|
1037
|
+
for ax in reversed(imagej_order):
|
|
1038
|
+
if ax not in axes:
|
|
1039
|
+
print(f"Adding {ax} dimension")
|
|
1040
|
+
axes = ax + axes
|
|
1041
|
+
image_data = np.expand_dims(image_data, axis=0)
|
|
1042
|
+
|
|
1043
|
+
# Final reordering
|
|
1044
|
+
source_idx = [axes.index(ax) for ax in imagej_order]
|
|
1045
|
+
image_data = np.moveaxis(
|
|
1046
|
+
image_data, source_idx, range(len(imagej_order))
|
|
1047
|
+
)
|
|
1048
|
+
metadata["axes"] = imagej_order
|
|
881
1049
|
|
|
882
|
-
|
|
1050
|
+
tifffile.imwrite(
|
|
1051
|
+
output_path,
|
|
1052
|
+
image_data,
|
|
1053
|
+
metadata=metadata,
|
|
1054
|
+
resolution=resolution,
|
|
1055
|
+
imagej=True,
|
|
1056
|
+
compression="zstd",
|
|
1057
|
+
)
|
|
1058
|
+
else:
|
|
1059
|
+
# 2D case - save without hyperstack metadata but keep resolution
|
|
1060
|
+
save_metadata = (
|
|
1061
|
+
{"resolution": metadata["resolution"]}
|
|
1062
|
+
if "resolution" in metadata
|
|
1063
|
+
else None
|
|
1064
|
+
)
|
|
1065
|
+
tifffile.imwrite(
|
|
1066
|
+
output_path,
|
|
1067
|
+
image_data,
|
|
1068
|
+
metadata=save_metadata,
|
|
1069
|
+
resolution=resolution,
|
|
1070
|
+
imagej=False,
|
|
1071
|
+
compression="zstd",
|
|
1072
|
+
)
|
|
883
1073
|
|
|
884
|
-
tifffile.imwrite(output_path, image_data, **save_args)
|
|
885
|
-
print(f"Saved TIFF file with metadata: {output_path}")
|
|
886
1074
|
except (ValueError, FileNotFoundError) as e:
|
|
887
|
-
print(f"Error
|
|
888
|
-
# Try a last resort, basic save without any options
|
|
1075
|
+
print(f"Error: {str(e)}")
|
|
889
1076
|
tifffile.imwrite(output_path, image_data)
|
|
890
|
-
print(f"Saved TIFF file with fallback method: {output_path}")
|
|
891
1077
|
|
|
892
1078
|
def _save_zarr(
|
|
893
1079
|
self, image_data: np.ndarray, output_path: str, metadata: dict = None
|
|
@@ -1003,7 +1189,7 @@ class ConversionWorker(QThread):
|
|
|
1003
1189
|
print(f"Warning: Could not add metadata to Zarr: {str(e)}")
|
|
1004
1190
|
|
|
1005
1191
|
return output_path
|
|
1006
|
-
except
|
|
1192
|
+
except (ValueError, FileNotFoundError) as e:
|
|
1007
1193
|
print(f"Error in _save_zarr: {str(e)}")
|
|
1008
1194
|
# For zarr, we don't have a simpler fallback method, so re-raise
|
|
1009
1195
|
raise
|
|
@@ -1046,11 +1232,20 @@ class MicroscopyImageConverterWidget(QWidget):
|
|
|
1046
1232
|
self.viewer = viewer
|
|
1047
1233
|
|
|
1048
1234
|
# Register format loaders
|
|
1049
|
-
self.loaders = [
|
|
1235
|
+
self.loaders = [
|
|
1236
|
+
LIFLoader,
|
|
1237
|
+
ND2Loader,
|
|
1238
|
+
TIFFSlideLoader,
|
|
1239
|
+
CZILoader,
|
|
1240
|
+
AcquiferLoader,
|
|
1241
|
+
]
|
|
1050
1242
|
|
|
1051
1243
|
# Selected series for conversion
|
|
1052
1244
|
self.selected_series = {} # {filepath: series_index}
|
|
1053
1245
|
|
|
1246
|
+
# Track files that should export all series
|
|
1247
|
+
self.export_all_series = {} # {filepath: boolean}
|
|
1248
|
+
|
|
1054
1249
|
# Working threads
|
|
1055
1250
|
self.scan_worker = None
|
|
1056
1251
|
self.conversion_worker = None
|
|
@@ -1076,9 +1271,9 @@ class MicroscopyImageConverterWidget(QWidget):
|
|
|
1076
1271
|
filter_label = QLabel("File Filter:")
|
|
1077
1272
|
self.filter_edit = QLineEdit()
|
|
1078
1273
|
self.filter_edit.setPlaceholderText(
|
|
1079
|
-
".lif, .nd2, .ndpi, .czi (comma separated)"
|
|
1274
|
+
".lif, .nd2, .ndpi, .czi, acquifer (comma separated)"
|
|
1080
1275
|
)
|
|
1081
|
-
self.filter_edit.setText(".lif,.nd2,.ndpi,.czi")
|
|
1276
|
+
self.filter_edit.setText(".lif,.nd2,.ndpi,.czi, acquifer")
|
|
1082
1277
|
scan_button = QPushButton("Scan Folder")
|
|
1083
1278
|
scan_button.clicked.connect(self.scan_folder)
|
|
1084
1279
|
|
|
@@ -1288,7 +1483,9 @@ class MicroscopyImageConverterWidget(QWidget):
|
|
|
1288
1483
|
QMessageBox.critical(self, "Error", error_message)
|
|
1289
1484
|
|
|
1290
1485
|
def get_file_type(self, filepath: str) -> str:
|
|
1291
|
-
"""Determine the file type based on extension"""
|
|
1486
|
+
"""Determine the file type based on extension or directory type"""
|
|
1487
|
+
if os.path.isdir(filepath) and AcquiferLoader.can_load(filepath):
|
|
1488
|
+
return "Acquifer"
|
|
1292
1489
|
ext = filepath.lower()
|
|
1293
1490
|
if ext.endswith(".lif"):
|
|
1294
1491
|
return "LIF"
|
|
@@ -1298,8 +1495,6 @@ class MicroscopyImageConverterWidget(QWidget):
|
|
|
1298
1495
|
return "Slide"
|
|
1299
1496
|
elif ext.endswith(".czi"):
|
|
1300
1497
|
return "CZI"
|
|
1301
|
-
elif ext.endswith((".tif", ".tiff")):
|
|
1302
|
-
return "TIFF"
|
|
1303
1498
|
return "Unknown"
|
|
1304
1499
|
|
|
1305
1500
|
def get_file_loader(self, filepath: str) -> Optional[FormatLoader]:
|
|
@@ -1317,6 +1512,15 @@ class MicroscopyImageConverterWidget(QWidget):
|
|
|
1317
1512
|
"""Set the selected series for a file"""
|
|
1318
1513
|
self.selected_series[filepath] = series_index
|
|
1319
1514
|
|
|
1515
|
+
def set_export_all_series(self, filepath: str, export_all: bool):
|
|
1516
|
+
"""Set whether to export all series for a file"""
|
|
1517
|
+
self.export_all_series[filepath] = export_all
|
|
1518
|
+
|
|
1519
|
+
# If exporting all, we still need a default series in selected_series
|
|
1520
|
+
# for files that are marked for export all
|
|
1521
|
+
if export_all and filepath not in self.selected_series:
|
|
1522
|
+
self.selected_series[filepath] = 0
|
|
1523
|
+
|
|
1320
1524
|
def load_image(self, filepath: str):
|
|
1321
1525
|
"""Load an image file into the viewer"""
|
|
1322
1526
|
loader = self.get_file_loader(filepath)
|
|
@@ -1381,10 +1585,35 @@ class MicroscopyImageConverterWidget(QWidget):
|
|
|
1381
1585
|
return
|
|
1382
1586
|
|
|
1383
1587
|
# Create files to convert list
|
|
1384
|
-
files_to_convert = [
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
1588
|
+
files_to_convert = []
|
|
1589
|
+
|
|
1590
|
+
for filepath, series_index in self.selected_series.items():
|
|
1591
|
+
# Check if we should export all series for this file
|
|
1592
|
+
if self.export_all_series.get(filepath, False):
|
|
1593
|
+
# Get the number of series for this file
|
|
1594
|
+
loader = self.get_file_loader(filepath)
|
|
1595
|
+
if loader:
|
|
1596
|
+
try:
|
|
1597
|
+
series_count = loader.get_series_count(filepath)
|
|
1598
|
+
# Add all series for this file
|
|
1599
|
+
for i in range(series_count):
|
|
1600
|
+
files_to_convert.append((filepath, i))
|
|
1601
|
+
except (ValueError, FileNotFoundError) as e:
|
|
1602
|
+
self.status_label.setText(
|
|
1603
|
+
f"Error getting series count: {str(e)}"
|
|
1604
|
+
)
|
|
1605
|
+
QMessageBox.warning(
|
|
1606
|
+
self,
|
|
1607
|
+
"Error",
|
|
1608
|
+
f"Could not get series count for {Path(filepath).name}: {str(e)}",
|
|
1609
|
+
)
|
|
1610
|
+
else:
|
|
1611
|
+
# Just add the selected series
|
|
1612
|
+
files_to_convert.append((filepath, series_index))
|
|
1613
|
+
|
|
1614
|
+
if not files_to_convert:
|
|
1615
|
+
self.status_label.setText("No valid files to convert")
|
|
1616
|
+
return
|
|
1388
1617
|
|
|
1389
1618
|
# Set up and start the conversion worker thread
|
|
1390
1619
|
self.conversion_worker = ConversionWorker(
|
|
@@ -1408,7 +1637,7 @@ class MicroscopyImageConverterWidget(QWidget):
|
|
|
1408
1637
|
self.conversion_progress.setValue(0)
|
|
1409
1638
|
self.cancel_button.setVisible(True)
|
|
1410
1639
|
self.status_label.setText(
|
|
1411
|
-
f"Starting conversion of {len(files_to_convert)} files..."
|
|
1640
|
+
f"Starting conversion of {len(files_to_convert)} files/series..."
|
|
1412
1641
|
)
|
|
1413
1642
|
|
|
1414
1643
|
# Start conversion
|
|
@@ -20,13 +20,13 @@ contributions:
|
|
|
20
20
|
title: Load sample data from T-MIDAS
|
|
21
21
|
- id: napari-tmidas._label_inspection # hyphen!
|
|
22
22
|
python_name: napari_tmidas._label_inspection:label_inspector_widget # underscore!
|
|
23
|
-
title: Label
|
|
23
|
+
title: Label Inspector
|
|
24
24
|
- id: napari-tmidas.file_selector
|
|
25
25
|
python_name: napari_tmidas._file_selector:napari_experimental_provide_dock_widget
|
|
26
|
-
title:
|
|
26
|
+
title: Batch Image Processing
|
|
27
27
|
- id: napari-tmidas._file_conversion
|
|
28
28
|
python_name: napari_tmidas._file_conversion:napari_experimental_provide_dock_widget
|
|
29
|
-
title:
|
|
29
|
+
title: Microscopy Image Converter
|
|
30
30
|
readers:
|
|
31
31
|
- command: napari-tmidas.get_reader
|
|
32
32
|
accepts_directories: false
|
|
@@ -44,8 +44,8 @@ contributions:
|
|
|
44
44
|
key: unique_id.1
|
|
45
45
|
widgets:
|
|
46
46
|
- command: napari-tmidas.file_selector
|
|
47
|
-
display_name:
|
|
47
|
+
display_name: Batch Image Processing
|
|
48
48
|
- command: napari-tmidas._label_inspection
|
|
49
|
-
display_name: Label
|
|
49
|
+
display_name: Batch Label inspection
|
|
50
50
|
- command: napari-tmidas._file_conversion
|
|
51
|
-
display_name:
|
|
51
|
+
display_name: Batch Microscopy Image Conversion
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: napari-tmidas
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.6
|
|
4
4
|
Summary: Tissue Microscopy Image Data Analysis Suite
|
|
5
5
|
Author: Marco Meer
|
|
6
6
|
Author-email: marco.meer@pm.me
|
|
@@ -65,6 +65,7 @@ Requires-Dist: pytest-cov; extra == "testing"
|
|
|
65
65
|
Requires-Dist: pytest-qt; extra == "testing"
|
|
66
66
|
Requires-Dist: napari; extra == "testing"
|
|
67
67
|
Requires-Dist: pyqt5; extra == "testing"
|
|
68
|
+
Dynamic: license-file
|
|
68
69
|
|
|
69
70
|
# napari-tmidas
|
|
70
71
|
|
|
@@ -74,14 +75,17 @@ Requires-Dist: pyqt5; extra == "testing"
|
|
|
74
75
|
[](https://github.com/macromeer/napari-tmidas/actions)
|
|
75
76
|
[](https://napari-hub.org/plugins/napari-tmidas)
|
|
76
77
|
<!-- [](https://codecov.io/gh/macromeer/napari-tmidas) -->
|
|
78
|
+
This Napari plugin allows you to perform batch image processing without a graphics processing unit (GPU). It will still be fast because computations will run in parallel on your central processing unit (CPU).
|
|
77
79
|
|
|
78
|
-
|
|
80
|
+
This plugin provides you with a growing collection of pipelines for batch image preprocessing, segmentation, regions-of-interest (ROI) analysis and other useful features.
|
|
81
|
+
|
|
82
|
+
`napari-tmidas` is a work in progress (WIP) and an evolutionary step away from the [terminal / command-line version of T-MIDAS](https://github.com/MercaderLabAnatomy/T-MIDAS).
|
|
79
83
|
|
|
80
84
|
## Installation
|
|
81
85
|
|
|
82
86
|
First install Napari in a virtual environment:
|
|
83
87
|
|
|
84
|
-
mamba create -y -n napari-tmidas -c conda-forge python=3.11
|
|
88
|
+
mamba create -y -n napari-tmidas -c conda-forge python=3.11 tqdm
|
|
85
89
|
mamba activate napari-tmidas
|
|
86
90
|
python -m pip install "napari[all]"
|
|
87
91
|
|
|
@@ -96,7 +100,7 @@ To install the latest development version:
|
|
|
96
100
|
### Dependencies
|
|
97
101
|
For the File converter, we need some libraries to read some microscopy formats and to write ome-zarr:
|
|
98
102
|
|
|
99
|
-
pip install nd2 readlif tiffslide pylibCZIrw ome-zarr
|
|
103
|
+
pip install nd2 readlif tiffslide pylibCZIrw acquifer-napari ome-zarr
|
|
100
104
|
|
|
101
105
|
|
|
102
106
|
## Usage
|
|
@@ -108,7 +112,7 @@ You can find the installed plugin here:
|
|
|
108
112
|
|
|
109
113
|
### File converter
|
|
110
114
|
|
|
111
|
-
You might first want to batch convert microscopy image data. Currently, this plugin supports `.nd2, .lif, .ndpi, .czi
|
|
115
|
+
You might first want to batch convert microscopy image data. Currently, this plugin supports `.nd2, .lif, .ndpi, .czi` and acquifer data. After launching the file converter, you can scan a folder of your choice for microscopy image data. It will also detect series images that you can preview. Start by selecting an original image in the first column of the table. This allows you to preview or convert.
|
|
112
116
|
|
|
113
117
|

|
|
114
118
|
|
|
@@ -17,6 +17,8 @@ PLATFORM =
|
|
|
17
17
|
windows-latest: windows
|
|
18
18
|
|
|
19
19
|
[testenv]
|
|
20
|
+
setenv =
|
|
21
|
+
PYTHONPATH = {toxinidir}/src
|
|
20
22
|
platform =
|
|
21
23
|
macos: darwin
|
|
22
24
|
linux: linux
|
|
@@ -30,4 +32,9 @@ passenv =
|
|
|
30
32
|
PYVISTA_OFF_SCREEN
|
|
31
33
|
extras =
|
|
32
34
|
testing
|
|
33
|
-
|
|
35
|
+
deps =
|
|
36
|
+
pytest
|
|
37
|
+
pytest-cov
|
|
38
|
+
napari
|
|
39
|
+
numpy
|
|
40
|
+
commands = pytest --cov=napari_tmidas --cov-report=xml
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/processing_functions/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{napari_tmidas-0.1.4 → napari_tmidas-0.1.6}/src/napari_tmidas/processing_functions/scipy_filters.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|