napari-tmidas 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- napari_tmidas/_file_conversion.py +1706 -0
- napari_tmidas/_file_selector.py +357 -60
- napari_tmidas/_label_inspection.py +87 -26
- napari_tmidas/_version.py +2 -2
- napari_tmidas/napari.yaml +9 -4
- napari_tmidas/processing_functions/basic.py +24 -42
- napari_tmidas/processing_functions/skimage_filters.py +60 -43
- {napari_tmidas-0.1.3.dist-info → napari_tmidas-0.1.5.dist-info}/METADATA +35 -12
- {napari_tmidas-0.1.3.dist-info → napari_tmidas-0.1.5.dist-info}/RECORD +13 -12
- {napari_tmidas-0.1.3.dist-info → napari_tmidas-0.1.5.dist-info}/WHEEL +1 -1
- {napari_tmidas-0.1.3.dist-info → napari_tmidas-0.1.5.dist-info}/entry_points.txt +0 -0
- {napari_tmidas-0.1.3.dist-info → napari_tmidas-0.1.5.dist-info/licenses}/LICENSE +0 -0
- {napari_tmidas-0.1.3.dist-info → napari_tmidas-0.1.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1706 @@
|
|
|
1
|
+
import concurrent.futures
|
|
2
|
+
import os
|
|
3
|
+
import re
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Dict, List, Optional, Tuple
|
|
6
|
+
|
|
7
|
+
import napari
|
|
8
|
+
import nd2 # https://github.com/tlambert03/nd2
|
|
9
|
+
import numpy as np
|
|
10
|
+
import tifffile
|
|
11
|
+
import zarr
|
|
12
|
+
from magicgui import magicgui
|
|
13
|
+
from ome_zarr.writer import write_image # https://github.com/ome/ome-zarr-py
|
|
14
|
+
from pylibCZIrw import czi # https://github.com/ZEISS/pylibczirw
|
|
15
|
+
from qtpy.QtCore import Qt, QThread, Signal
|
|
16
|
+
from qtpy.QtWidgets import (
|
|
17
|
+
QApplication,
|
|
18
|
+
QCheckBox,
|
|
19
|
+
QComboBox,
|
|
20
|
+
QFileDialog,
|
|
21
|
+
QHBoxLayout,
|
|
22
|
+
QHeaderView,
|
|
23
|
+
QLabel,
|
|
24
|
+
QLineEdit,
|
|
25
|
+
QMessageBox,
|
|
26
|
+
QProgressBar,
|
|
27
|
+
QPushButton,
|
|
28
|
+
QTableWidget,
|
|
29
|
+
QTableWidgetItem,
|
|
30
|
+
QVBoxLayout,
|
|
31
|
+
QWidget,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
# Format-specific readers
|
|
35
|
+
from readlif.reader import (
|
|
36
|
+
LifFile, # https://github.com/Arcadia-Science/readlif
|
|
37
|
+
)
|
|
38
|
+
from tiffslide import TiffSlide # https://github.com/Bayer-Group/tiffslide
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class SeriesTableWidget(QTableWidget):
|
|
42
|
+
"""
|
|
43
|
+
Custom table widget to display original files and their series
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
def __init__(self, viewer: napari.Viewer):
|
|
47
|
+
super().__init__()
|
|
48
|
+
self.viewer = viewer
|
|
49
|
+
|
|
50
|
+
# Configure table
|
|
51
|
+
self.setColumnCount(2)
|
|
52
|
+
self.setHorizontalHeaderLabels(["Original Files", "Series"])
|
|
53
|
+
self.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
|
|
54
|
+
|
|
55
|
+
# Track file mappings
|
|
56
|
+
self.file_data = (
|
|
57
|
+
{}
|
|
58
|
+
) # {filepath: {type: file_type, series: [list_of_series]}}
|
|
59
|
+
|
|
60
|
+
# Currently loaded images
|
|
61
|
+
self.current_file = None
|
|
62
|
+
self.current_series = None
|
|
63
|
+
|
|
64
|
+
# Connect selection signals
|
|
65
|
+
self.cellClicked.connect(self.handle_cell_click)
|
|
66
|
+
|
|
67
|
+
def add_file(self, filepath: str, file_type: str, series_count: int):
|
|
68
|
+
"""Add a file to the table with series information"""
|
|
69
|
+
row = self.rowCount()
|
|
70
|
+
self.insertRow(row)
|
|
71
|
+
|
|
72
|
+
# Original file item
|
|
73
|
+
original_item = QTableWidgetItem(os.path.basename(filepath))
|
|
74
|
+
original_item.setData(Qt.UserRole, filepath)
|
|
75
|
+
self.setItem(row, 0, original_item)
|
|
76
|
+
|
|
77
|
+
# Series info
|
|
78
|
+
series_info = (
|
|
79
|
+
f"{series_count} series"
|
|
80
|
+
if series_count >= 0
|
|
81
|
+
else "Not a series file"
|
|
82
|
+
)
|
|
83
|
+
series_item = QTableWidgetItem(series_info)
|
|
84
|
+
self.setItem(row, 1, series_item)
|
|
85
|
+
|
|
86
|
+
# Store file info
|
|
87
|
+
self.file_data[filepath] = {
|
|
88
|
+
"type": file_type,
|
|
89
|
+
"series_count": series_count,
|
|
90
|
+
"row": row,
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
def handle_cell_click(self, row: int, column: int):
|
|
94
|
+
"""Handle cell click to show series details or load image"""
|
|
95
|
+
if column == 0:
|
|
96
|
+
# Get filepath from the clicked cell
|
|
97
|
+
item = self.item(row, 0)
|
|
98
|
+
if item:
|
|
99
|
+
filepath = item.data(Qt.UserRole)
|
|
100
|
+
file_info = self.file_data.get(filepath)
|
|
101
|
+
|
|
102
|
+
if file_info and file_info["series_count"] > 0:
|
|
103
|
+
# Update the current file
|
|
104
|
+
self.current_file = filepath
|
|
105
|
+
|
|
106
|
+
# Signal to show series details
|
|
107
|
+
self.parent().show_series_details(filepath)
|
|
108
|
+
else:
|
|
109
|
+
# Not a series file, just load the image
|
|
110
|
+
self.parent().load_image(filepath)
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
class SeriesDetailWidget(QWidget):
|
|
114
|
+
"""Widget to display and select series from a file"""
|
|
115
|
+
|
|
116
|
+
def __init__(self, parent, viewer: napari.Viewer):
|
|
117
|
+
super().__init__()
|
|
118
|
+
self.parent = parent
|
|
119
|
+
self.viewer = viewer
|
|
120
|
+
self.current_file = None
|
|
121
|
+
self.max_series = 0
|
|
122
|
+
|
|
123
|
+
# Create layout
|
|
124
|
+
layout = QVBoxLayout()
|
|
125
|
+
self.setLayout(layout)
|
|
126
|
+
|
|
127
|
+
# Series selection widgets
|
|
128
|
+
self.series_label = QLabel("Select Series:")
|
|
129
|
+
layout.addWidget(self.series_label)
|
|
130
|
+
|
|
131
|
+
self.series_selector = QComboBox()
|
|
132
|
+
layout.addWidget(self.series_selector)
|
|
133
|
+
|
|
134
|
+
# Add "Export All Series" checkbox
|
|
135
|
+
self.export_all_checkbox = QCheckBox("Export All Series")
|
|
136
|
+
self.export_all_checkbox.toggled.connect(self.toggle_export_all)
|
|
137
|
+
layout.addWidget(self.export_all_checkbox)
|
|
138
|
+
|
|
139
|
+
# Connect series selector
|
|
140
|
+
self.series_selector.currentIndexChanged.connect(self.series_selected)
|
|
141
|
+
|
|
142
|
+
# Add preview button
|
|
143
|
+
preview_button = QPushButton("Preview Selected Series")
|
|
144
|
+
preview_button.clicked.connect(self.preview_series)
|
|
145
|
+
layout.addWidget(preview_button)
|
|
146
|
+
|
|
147
|
+
# Add info label
|
|
148
|
+
self.info_label = QLabel("")
|
|
149
|
+
layout.addWidget(self.info_label)
|
|
150
|
+
|
|
151
|
+
def toggle_export_all(self, checked):
|
|
152
|
+
"""Handle toggle of export all checkbox"""
|
|
153
|
+
if self.current_file and checked:
|
|
154
|
+
# Disable series selector when exporting all
|
|
155
|
+
self.series_selector.setEnabled(not checked)
|
|
156
|
+
# Update parent with export all setting
|
|
157
|
+
self.parent.set_export_all_series(self.current_file, checked)
|
|
158
|
+
elif self.current_file:
|
|
159
|
+
# Re-enable series selector
|
|
160
|
+
self.series_selector.setEnabled(True)
|
|
161
|
+
# Update parent with currently selected series only
|
|
162
|
+
self.series_selected(self.series_selector.currentIndex())
|
|
163
|
+
# Update parent to not export all
|
|
164
|
+
self.parent.set_export_all_series(self.current_file, False)
|
|
165
|
+
|
|
166
|
+
def set_file(self, filepath: str):
|
|
167
|
+
"""Set the current file and update series list"""
|
|
168
|
+
self.current_file = filepath
|
|
169
|
+
self.series_selector.clear()
|
|
170
|
+
|
|
171
|
+
# Reset export all checkbox
|
|
172
|
+
self.export_all_checkbox.setChecked(False)
|
|
173
|
+
self.series_selector.setEnabled(True)
|
|
174
|
+
|
|
175
|
+
# Try to get series information
|
|
176
|
+
file_loader = self.parent.get_file_loader(filepath)
|
|
177
|
+
if file_loader:
|
|
178
|
+
try:
|
|
179
|
+
series_count = file_loader.get_series_count(filepath)
|
|
180
|
+
self.max_series = series_count
|
|
181
|
+
for i in range(series_count):
|
|
182
|
+
self.series_selector.addItem(f"Series {i}", i)
|
|
183
|
+
|
|
184
|
+
# Set info text
|
|
185
|
+
if series_count > 0:
|
|
186
|
+
metadata = file_loader.get_metadata(filepath, 0)
|
|
187
|
+
if metadata:
|
|
188
|
+
self.info_label.setText(
|
|
189
|
+
f"File contains {series_count} series."
|
|
190
|
+
)
|
|
191
|
+
else:
|
|
192
|
+
self.info_label.setText(
|
|
193
|
+
f"File contains {series_count} series. No additional metadata available."
|
|
194
|
+
)
|
|
195
|
+
else:
|
|
196
|
+
self.info_label.setText("No series found in this file.")
|
|
197
|
+
except FileNotFoundError:
|
|
198
|
+
self.info_label.setText("File not found.")
|
|
199
|
+
except PermissionError:
|
|
200
|
+
self.info_label.setText(
|
|
201
|
+
"Permission denied when accessing the file."
|
|
202
|
+
)
|
|
203
|
+
except ValueError as e:
|
|
204
|
+
self.info_label.setText(f"Invalid data in file: {str(e)}")
|
|
205
|
+
except OSError as e:
|
|
206
|
+
self.info_label.setText(f"I/O error occurred: {str(e)}")
|
|
207
|
+
|
|
208
|
+
def series_selected(self, index: int):
|
|
209
|
+
"""Handle series selection"""
|
|
210
|
+
if index >= 0 and self.current_file:
|
|
211
|
+
series_index = self.series_selector.itemData(index)
|
|
212
|
+
|
|
213
|
+
# Validate series index
|
|
214
|
+
if series_index >= self.max_series:
|
|
215
|
+
self.info_label.setText(
|
|
216
|
+
f"Error: Series index {series_index} out of range (max: {self.max_series-1})"
|
|
217
|
+
)
|
|
218
|
+
return
|
|
219
|
+
|
|
220
|
+
# Update parent with selected series
|
|
221
|
+
self.parent.set_selected_series(self.current_file, series_index)
|
|
222
|
+
|
|
223
|
+
def preview_series(self):
|
|
224
|
+
"""Preview the selected series in Napari"""
|
|
225
|
+
if self.current_file and self.series_selector.currentIndex() >= 0:
|
|
226
|
+
series_index = self.series_selector.itemData(
|
|
227
|
+
self.series_selector.currentIndex()
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
# Validate series index
|
|
231
|
+
if series_index >= self.max_series:
|
|
232
|
+
self.info_label.setText(
|
|
233
|
+
f"Error: Series index {series_index} out of range (max: {self.max_series-1})"
|
|
234
|
+
)
|
|
235
|
+
return
|
|
236
|
+
|
|
237
|
+
file_loader = self.parent.get_file_loader(self.current_file)
|
|
238
|
+
|
|
239
|
+
try:
|
|
240
|
+
# Load the series
|
|
241
|
+
image_data = file_loader.load_series(
|
|
242
|
+
self.current_file, series_index
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
# Clear existing layers and display the image
|
|
246
|
+
self.viewer.layers.clear()
|
|
247
|
+
self.viewer.add_image(
|
|
248
|
+
image_data,
|
|
249
|
+
name=f"{Path(self.current_file).stem} - Series {series_index}",
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
# Update status
|
|
253
|
+
self.viewer.status = f"Previewing {Path(self.current_file).name} - Series {series_index}"
|
|
254
|
+
except (ValueError, FileNotFoundError) as e:
|
|
255
|
+
self.viewer.status = f"Error loading series: {str(e)}"
|
|
256
|
+
QMessageBox.warning(
|
|
257
|
+
self, "Error", f"Could not load series: {str(e)}"
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
class FormatLoader:
|
|
262
|
+
"""Base class for format loaders"""
|
|
263
|
+
|
|
264
|
+
@staticmethod
|
|
265
|
+
def can_load(filepath: str) -> bool:
|
|
266
|
+
raise NotImplementedError()
|
|
267
|
+
|
|
268
|
+
@staticmethod
|
|
269
|
+
def get_series_count(filepath: str) -> int:
|
|
270
|
+
raise NotImplementedError()
|
|
271
|
+
|
|
272
|
+
@staticmethod
|
|
273
|
+
def load_series(filepath: str, series_index: int) -> np.ndarray:
|
|
274
|
+
raise NotImplementedError()
|
|
275
|
+
|
|
276
|
+
@staticmethod
|
|
277
|
+
def get_metadata(filepath: str, series_index: int) -> Dict:
|
|
278
|
+
return {}
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
class LIFLoader(FormatLoader):
|
|
282
|
+
"""Loader for Leica LIF files"""
|
|
283
|
+
|
|
284
|
+
@staticmethod
|
|
285
|
+
def can_load(filepath: str) -> bool:
|
|
286
|
+
return filepath.lower().endswith(".lif")
|
|
287
|
+
|
|
288
|
+
@staticmethod
|
|
289
|
+
def get_series_count(filepath: str) -> int:
|
|
290
|
+
try:
|
|
291
|
+
lif_file = LifFile(filepath)
|
|
292
|
+
# Directly use the iterator, no need to load all images into a list
|
|
293
|
+
return sum(1 for _ in lif_file.get_iter_image())
|
|
294
|
+
except (ValueError, FileNotFoundError):
|
|
295
|
+
return 0
|
|
296
|
+
|
|
297
|
+
@staticmethod
|
|
298
|
+
def load_series(filepath: str, series_index: int) -> np.ndarray:
|
|
299
|
+
lif_file = LifFile(filepath)
|
|
300
|
+
image = lif_file.get_image(series_index)
|
|
301
|
+
|
|
302
|
+
# Extract dimensions
|
|
303
|
+
channels = image.channels
|
|
304
|
+
z_stacks = image.nz
|
|
305
|
+
timepoints = image.nt
|
|
306
|
+
x_dim, y_dim = image.dims[0], image.dims[1]
|
|
307
|
+
|
|
308
|
+
# Create an array to hold the entire series
|
|
309
|
+
series_shape = (
|
|
310
|
+
timepoints,
|
|
311
|
+
z_stacks,
|
|
312
|
+
channels,
|
|
313
|
+
y_dim,
|
|
314
|
+
x_dim,
|
|
315
|
+
) # Corrected shape
|
|
316
|
+
series_data = np.zeros(series_shape, dtype=np.uint16)
|
|
317
|
+
|
|
318
|
+
# Populate the array
|
|
319
|
+
missing_frames = 0
|
|
320
|
+
for t in range(timepoints):
|
|
321
|
+
for z in range(z_stacks):
|
|
322
|
+
for c in range(channels):
|
|
323
|
+
# Get the frame and convert to numpy array
|
|
324
|
+
frame = image.get_frame(z=z, t=t, c=c)
|
|
325
|
+
if frame:
|
|
326
|
+
series_data[t, z, c, :, :] = np.array(frame)
|
|
327
|
+
else:
|
|
328
|
+
missing_frames += 1
|
|
329
|
+
series_data[t, z, c, :, :] = np.zeros(
|
|
330
|
+
(y_dim, x_dim), dtype=np.uint16
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
if missing_frames > 0:
|
|
334
|
+
print(
|
|
335
|
+
f"Warning: {missing_frames} frames were missing and filled with zeros."
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
return series_data
|
|
339
|
+
|
|
340
|
+
@staticmethod
|
|
341
|
+
def get_metadata(filepath: str, series_index: int) -> Dict:
|
|
342
|
+
try:
|
|
343
|
+
lif_file = LifFile(filepath)
|
|
344
|
+
image = lif_file.get_image(series_index)
|
|
345
|
+
axes = "".join(image.dims._fields).upper()
|
|
346
|
+
channels = image.channels
|
|
347
|
+
if channels > 1:
|
|
348
|
+
# add C to end of string
|
|
349
|
+
axes += "C"
|
|
350
|
+
|
|
351
|
+
metadata = {
|
|
352
|
+
# "channels": image.channels,
|
|
353
|
+
# "z_stacks": image.nz,
|
|
354
|
+
# "timepoints": image.nt,
|
|
355
|
+
"axes": "TZCYX",
|
|
356
|
+
"unit": "um",
|
|
357
|
+
"resolution": image.scale[:2],
|
|
358
|
+
}
|
|
359
|
+
if image.scale[2] is not None:
|
|
360
|
+
metadata["spacing"] = image.scale[2]
|
|
361
|
+
return metadata
|
|
362
|
+
except (ValueError, FileNotFoundError):
|
|
363
|
+
return {}
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
class ND2Loader(FormatLoader):
|
|
367
|
+
"""Loader for Nikon ND2 files"""
|
|
368
|
+
|
|
369
|
+
@staticmethod
|
|
370
|
+
def can_load(filepath: str) -> bool:
|
|
371
|
+
return filepath.lower().endswith(".nd2")
|
|
372
|
+
|
|
373
|
+
@staticmethod
|
|
374
|
+
def get_series_count(filepath: str) -> int:
|
|
375
|
+
|
|
376
|
+
# ND2 files typically have a single series with multiple channels/dimensions
|
|
377
|
+
return 1
|
|
378
|
+
|
|
379
|
+
@staticmethod
|
|
380
|
+
def load_series(filepath: str, series_index: int) -> np.ndarray:
|
|
381
|
+
if series_index != 0:
|
|
382
|
+
raise ValueError("ND2 files only support series index 0")
|
|
383
|
+
|
|
384
|
+
with nd2.ND2File(filepath) as nd2_file:
|
|
385
|
+
# Convert to numpy array
|
|
386
|
+
return nd2_file.asarray()
|
|
387
|
+
|
|
388
|
+
@staticmethod
|
|
389
|
+
def get_metadata(filepath: str, series_index: int) -> Dict:
|
|
390
|
+
if series_index != 0:
|
|
391
|
+
return {}
|
|
392
|
+
|
|
393
|
+
with nd2.ND2File(filepath) as nd2_file:
|
|
394
|
+
return {
|
|
395
|
+
"axes": "".join(nd2_file.sizes.keys()),
|
|
396
|
+
"resolution": (
|
|
397
|
+
1 / nd2_file.voxel_size().x,
|
|
398
|
+
1 / nd2_file.voxel_size().y,
|
|
399
|
+
),
|
|
400
|
+
"unit": "um",
|
|
401
|
+
"spacing": 1 / nd2_file.voxel_size().z,
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
|
|
405
|
+
class TIFFSlideLoader(FormatLoader):
|
|
406
|
+
"""Loader for whole slide TIFF images (NDPI, etc.)"""
|
|
407
|
+
|
|
408
|
+
@staticmethod
|
|
409
|
+
def can_load(filepath: str) -> bool:
|
|
410
|
+
ext = filepath.lower()
|
|
411
|
+
return ext.endswith(".ndpi")
|
|
412
|
+
|
|
413
|
+
@staticmethod
|
|
414
|
+
def get_series_count(filepath: str) -> int:
|
|
415
|
+
try:
|
|
416
|
+
with TiffSlide(filepath) as slide:
|
|
417
|
+
# NDPI typically has a main image and several levels (pyramid)
|
|
418
|
+
return len(slide.level_dimensions)
|
|
419
|
+
except (ValueError, FileNotFoundError):
|
|
420
|
+
# Try standard tifffile if TiffSlide fails
|
|
421
|
+
try:
|
|
422
|
+
with tifffile.TiffFile(filepath) as tif:
|
|
423
|
+
return len(tif.series)
|
|
424
|
+
except (ValueError, FileNotFoundError):
|
|
425
|
+
return 0
|
|
426
|
+
|
|
427
|
+
@staticmethod
|
|
428
|
+
def load_series(filepath: str, series_index: int) -> np.ndarray:
|
|
429
|
+
try:
|
|
430
|
+
# First try TiffSlide for whole slide images
|
|
431
|
+
with TiffSlide(filepath) as slide:
|
|
432
|
+
if series_index < 0 or series_index >= len(
|
|
433
|
+
slide.level_dimensions
|
|
434
|
+
):
|
|
435
|
+
raise ValueError(
|
|
436
|
+
f"Series index {series_index} out of range"
|
|
437
|
+
)
|
|
438
|
+
|
|
439
|
+
# Get dimensions for the level
|
|
440
|
+
width, height = slide.level_dimensions[series_index]
|
|
441
|
+
# Read the entire level
|
|
442
|
+
return np.array(
|
|
443
|
+
slide.read_region((0, 0), series_index, (width, height))
|
|
444
|
+
)
|
|
445
|
+
except (ValueError, FileNotFoundError):
|
|
446
|
+
# Fall back to tifffile
|
|
447
|
+
with tifffile.TiffFile(filepath) as tif:
|
|
448
|
+
if series_index < 0 or series_index >= len(tif.series):
|
|
449
|
+
raise ValueError(
|
|
450
|
+
f"Series index {series_index} out of range"
|
|
451
|
+
) from None
|
|
452
|
+
|
|
453
|
+
return tif.series[series_index].asarray()
|
|
454
|
+
|
|
455
|
+
@staticmethod
|
|
456
|
+
def get_metadata(filepath: str, series_index: int) -> Dict:
|
|
457
|
+
try:
|
|
458
|
+
with TiffSlide(filepath) as slide:
|
|
459
|
+
if series_index < 0 or series_index >= len(
|
|
460
|
+
slide.level_dimensions
|
|
461
|
+
):
|
|
462
|
+
return {}
|
|
463
|
+
|
|
464
|
+
return {
|
|
465
|
+
"axes": slide.properties["tiffslide.series-axes"],
|
|
466
|
+
"resolution": (
|
|
467
|
+
slide.properties["tiffslide.mpp-x"],
|
|
468
|
+
slide.properties["tiffslide.mpp-y"],
|
|
469
|
+
),
|
|
470
|
+
"unit": "um",
|
|
471
|
+
}
|
|
472
|
+
except (ValueError, FileNotFoundError):
|
|
473
|
+
# Fall back to tifffile
|
|
474
|
+
with tifffile.TiffFile(filepath) as tif:
|
|
475
|
+
if series_index < 0 or series_index >= len(tif.series):
|
|
476
|
+
return {}
|
|
477
|
+
|
|
478
|
+
series = tif.series[series_index]
|
|
479
|
+
return {
|
|
480
|
+
"shape": series.shape,
|
|
481
|
+
"dtype": str(series.dtype),
|
|
482
|
+
"axes": series.axes,
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
|
|
486
|
+
class CZILoader(FormatLoader):
|
|
487
|
+
"""Loader for Zeiss CZI files
|
|
488
|
+
https://github.com/ZEISS/pylibczirw
|
|
489
|
+
"""
|
|
490
|
+
|
|
491
|
+
@staticmethod
|
|
492
|
+
def can_load(filepath: str) -> bool:
|
|
493
|
+
return filepath.lower().endswith(".czi")
|
|
494
|
+
|
|
495
|
+
@staticmethod
|
|
496
|
+
def get_series_count(filepath: str) -> int:
|
|
497
|
+
try:
|
|
498
|
+
with czi.open_czi(filepath) as czi_file:
|
|
499
|
+
scenes = czi_file.scenes_bounding_rectangle
|
|
500
|
+
return len(scenes)
|
|
501
|
+
except (ValueError, FileNotFoundError):
|
|
502
|
+
return 0
|
|
503
|
+
|
|
504
|
+
@staticmethod
|
|
505
|
+
def load_series(filepath: str, series_index: int) -> np.ndarray:
|
|
506
|
+
try:
|
|
507
|
+
with czi.open_czi(filepath) as czi_file:
|
|
508
|
+
scenes = czi_file.scenes_bounding_rectangle
|
|
509
|
+
|
|
510
|
+
if series_index < 0 or series_index >= len(scenes):
|
|
511
|
+
raise ValueError(
|
|
512
|
+
f"Scene index {series_index} out of range"
|
|
513
|
+
)
|
|
514
|
+
|
|
515
|
+
scene_keys = list(scenes.keys())
|
|
516
|
+
scene_index = scene_keys[series_index]
|
|
517
|
+
|
|
518
|
+
# You might need to specify pixel_type if automatic detection fails
|
|
519
|
+
image = czi_file.read(scene=scene_index)
|
|
520
|
+
return image
|
|
521
|
+
except (ValueError, FileNotFoundError) as e:
|
|
522
|
+
print(f"Error loading series: {e}")
|
|
523
|
+
raise # Re-raise the exception after logging
|
|
524
|
+
|
|
525
|
+
@staticmethod
|
|
526
|
+
def get_scales(metadata_xml, dim):
|
|
527
|
+
pattern = re.compile(
|
|
528
|
+
r'<Distance[^>]*Id="'
|
|
529
|
+
+ re.escape(dim)
|
|
530
|
+
+ r'"[^>]*>.*?<Value[^>]*>(.*?)</Value>',
|
|
531
|
+
re.DOTALL,
|
|
532
|
+
)
|
|
533
|
+
match = pattern.search(metadata_xml)
|
|
534
|
+
|
|
535
|
+
if match:
|
|
536
|
+
scale = float(match.group(1))
|
|
537
|
+
# convert to microns
|
|
538
|
+
scale = scale * 1e6
|
|
539
|
+
return scale
|
|
540
|
+
else:
|
|
541
|
+
return None # Fixed: return a single None value instead of (None, None, None)
|
|
542
|
+
|
|
543
|
+
@staticmethod
|
|
544
|
+
def get_metadata(filepath: str, series_index: int) -> Dict:
|
|
545
|
+
try:
|
|
546
|
+
with czi.open_czi(filepath) as czi_file:
|
|
547
|
+
scenes = czi_file.scenes_bounding_rectangle
|
|
548
|
+
|
|
549
|
+
if series_index < 0 or series_index >= len(scenes):
|
|
550
|
+
return {}
|
|
551
|
+
|
|
552
|
+
# scene_keys = list(scenes.keys())
|
|
553
|
+
# scene_index = scene_keys[series_index]
|
|
554
|
+
# scene = scenes[scene_index]
|
|
555
|
+
|
|
556
|
+
dims = czi_file.total_bounding_box
|
|
557
|
+
|
|
558
|
+
# Extract the raw metadata as an XML string
|
|
559
|
+
metadata_xml = czi_file.raw_metadata
|
|
560
|
+
|
|
561
|
+
# Initialize metadata with default values
|
|
562
|
+
try:
|
|
563
|
+
# scales are in meters, convert to microns
|
|
564
|
+
scale_x = CZILoader.get_scales(metadata_xml, "X") * 1e6
|
|
565
|
+
scale_y = CZILoader.get_scales(metadata_xml, "Y") * 1e6
|
|
566
|
+
|
|
567
|
+
filtered_dims = {
|
|
568
|
+
k: v for k, v in dims.items() if v != (0, 1)
|
|
569
|
+
}
|
|
570
|
+
axes = "".join(filtered_dims.keys())
|
|
571
|
+
metadata = {
|
|
572
|
+
"axes": axes,
|
|
573
|
+
"resolution": (scale_x, scale_y),
|
|
574
|
+
"unit": "um",
|
|
575
|
+
}
|
|
576
|
+
|
|
577
|
+
if dims["Z"] != (0, 1):
|
|
578
|
+
scale_z = CZILoader.get_scales(metadata_xml, "Z")
|
|
579
|
+
metadata["spacing"] = scale_z
|
|
580
|
+
except ValueError as e:
|
|
581
|
+
print(f"Error getting scale metadata: {e}")
|
|
582
|
+
|
|
583
|
+
return metadata
|
|
584
|
+
|
|
585
|
+
except (ValueError, FileNotFoundError, RuntimeError) as e:
|
|
586
|
+
print(f"Error getting metadata: {e}")
|
|
587
|
+
return {}
|
|
588
|
+
|
|
589
|
+
@staticmethod
|
|
590
|
+
def get_physical_pixel_size(
|
|
591
|
+
filepath: str, series_index: int
|
|
592
|
+
) -> Dict[str, float]:
|
|
593
|
+
try:
|
|
594
|
+
with czi.open_czi(filepath) as czi_file:
|
|
595
|
+
scenes = czi_file.scenes_bounding_rectangle
|
|
596
|
+
|
|
597
|
+
if series_index < 0 or series_index >= len(scenes):
|
|
598
|
+
raise ValueError(
|
|
599
|
+
f"Scene index {series_index} out of range"
|
|
600
|
+
)
|
|
601
|
+
|
|
602
|
+
# scene_keys = list(scenes.keys())
|
|
603
|
+
# scene_index = scene_keys[series_index]
|
|
604
|
+
|
|
605
|
+
# Get scale information
|
|
606
|
+
scale_x = czi_file.scale_x
|
|
607
|
+
scale_y = czi_file.scale_y
|
|
608
|
+
scale_z = czi_file.scale_z
|
|
609
|
+
|
|
610
|
+
return {"X": scale_x, "Y": scale_y, "Z": scale_z}
|
|
611
|
+
except (ValueError, FileNotFoundError) as e:
|
|
612
|
+
print(f"Error getting pixel size: {str(e)}")
|
|
613
|
+
return {}
|
|
614
|
+
|
|
615
|
+
|
|
616
|
+
class AcquiferLoader(FormatLoader):
|
|
617
|
+
"""Loader for Acquifer datasets using the acquifer_napari_plugin utility"""
|
|
618
|
+
|
|
619
|
+
# Cache for loaded datasets to avoid reloading the same directory multiple times
|
|
620
|
+
_dataset_cache = {} # {directory_path: xarray_dataset}
|
|
621
|
+
|
|
622
|
+
@staticmethod
|
|
623
|
+
def can_load(filepath: str) -> bool:
|
|
624
|
+
"""
|
|
625
|
+
Check if this is a directory that can be loaded as an Acquifer dataset
|
|
626
|
+
"""
|
|
627
|
+
if not os.path.isdir(filepath):
|
|
628
|
+
return False
|
|
629
|
+
|
|
630
|
+
try:
|
|
631
|
+
|
|
632
|
+
# Check if directory contains files
|
|
633
|
+
image_files = []
|
|
634
|
+
for root, _, files in os.walk(filepath):
|
|
635
|
+
for file in files:
|
|
636
|
+
if file.lower().endswith(
|
|
637
|
+
(".tif", ".tiff", ".png", ".jpg", ".jpeg")
|
|
638
|
+
):
|
|
639
|
+
image_files.append(os.path.join(root, file))
|
|
640
|
+
|
|
641
|
+
return bool(image_files)
|
|
642
|
+
except (ValueError, FileNotFoundError) as e:
|
|
643
|
+
print(f"Error checking Acquifer dataset: {e}")
|
|
644
|
+
return False
|
|
645
|
+
|
|
646
|
+
@staticmethod
|
|
647
|
+
def _load_dataset(directory):
|
|
648
|
+
"""Load the dataset using array_from_directory and cache it"""
|
|
649
|
+
if directory in AcquiferLoader._dataset_cache:
|
|
650
|
+
return AcquiferLoader._dataset_cache[directory]
|
|
651
|
+
|
|
652
|
+
try:
|
|
653
|
+
from acquifer_napari_plugin.utils import array_from_directory
|
|
654
|
+
|
|
655
|
+
# Check if directory contains files before trying to load
|
|
656
|
+
image_files = []
|
|
657
|
+
for root, _, files in os.walk(directory):
|
|
658
|
+
for file in files:
|
|
659
|
+
if file.lower().endswith(
|
|
660
|
+
(".tif", ".tiff", ".png", ".jpg", ".jpeg")
|
|
661
|
+
):
|
|
662
|
+
image_files.append(os.path.join(root, file))
|
|
663
|
+
|
|
664
|
+
if not image_files:
|
|
665
|
+
raise ValueError(
|
|
666
|
+
f"No image files found in directory: {directory}"
|
|
667
|
+
)
|
|
668
|
+
|
|
669
|
+
dataset = array_from_directory(directory)
|
|
670
|
+
AcquiferLoader._dataset_cache[directory] = dataset
|
|
671
|
+
return dataset
|
|
672
|
+
except (ValueError, FileNotFoundError) as e:
|
|
673
|
+
print(f"Error loading Acquifer dataset: {e}")
|
|
674
|
+
raise ValueError(f"Failed to load Acquifer dataset: {e}") from e
|
|
675
|
+
|
|
676
|
+
@staticmethod
|
|
677
|
+
def get_series_count(filepath: str) -> int:
|
|
678
|
+
"""
|
|
679
|
+
Return the number of wells as series count
|
|
680
|
+
"""
|
|
681
|
+
try:
|
|
682
|
+
dataset = AcquiferLoader._load_dataset(filepath)
|
|
683
|
+
|
|
684
|
+
# Check for Well dimension
|
|
685
|
+
if "Well" in dataset.dims:
|
|
686
|
+
return len(dataset.coords["Well"])
|
|
687
|
+
else:
|
|
688
|
+
# Single series for the whole dataset
|
|
689
|
+
return 1
|
|
690
|
+
except (ValueError, FileNotFoundError) as e:
|
|
691
|
+
print(f"Error getting series count: {e}")
|
|
692
|
+
return 0
|
|
693
|
+
|
|
694
|
+
@staticmethod
|
|
695
|
+
def load_series(filepath: str, series_index: int) -> np.ndarray:
|
|
696
|
+
"""
|
|
697
|
+
Load a specific well as a series
|
|
698
|
+
"""
|
|
699
|
+
try:
|
|
700
|
+
dataset = AcquiferLoader._load_dataset(filepath)
|
|
701
|
+
|
|
702
|
+
# If the dataset has a Well dimension, select the specific well
|
|
703
|
+
if "Well" in dataset.dims:
|
|
704
|
+
if series_index < 0 or series_index >= len(
|
|
705
|
+
dataset.coords["Well"]
|
|
706
|
+
):
|
|
707
|
+
raise ValueError(
|
|
708
|
+
f"Series index {series_index} out of range"
|
|
709
|
+
)
|
|
710
|
+
|
|
711
|
+
# Get the well value at this index
|
|
712
|
+
well_value = dataset.coords["Well"].values[series_index]
|
|
713
|
+
|
|
714
|
+
# Select the data for this well
|
|
715
|
+
well_data = dataset.sel(Well=well_value)
|
|
716
|
+
# squeeze out singleton dimensions
|
|
717
|
+
well_data = well_data.squeeze()
|
|
718
|
+
# Convert to numpy array and return
|
|
719
|
+
return well_data.values
|
|
720
|
+
else:
|
|
721
|
+
# No Well dimension, return the entire dataset
|
|
722
|
+
return dataset.values
|
|
723
|
+
|
|
724
|
+
except (ValueError, FileNotFoundError) as e:
|
|
725
|
+
print(f"Error loading series: {e}")
|
|
726
|
+
import traceback
|
|
727
|
+
|
|
728
|
+
traceback.print_exc()
|
|
729
|
+
raise ValueError(f"Failed to load series: {e}") from e
|
|
730
|
+
|
|
731
|
+
@staticmethod
|
|
732
|
+
def get_metadata(filepath: str, series_index: int) -> Dict:
|
|
733
|
+
"""
|
|
734
|
+
Extract metadata for a specific well
|
|
735
|
+
"""
|
|
736
|
+
try:
|
|
737
|
+
dataset = AcquiferLoader._load_dataset(filepath)
|
|
738
|
+
|
|
739
|
+
# Initialize with default values
|
|
740
|
+
axes = ""
|
|
741
|
+
resolution = (1.0, 1.0) # Default resolution
|
|
742
|
+
|
|
743
|
+
if "Well" in dataset.dims:
|
|
744
|
+
well_value = dataset.coords["Well"].values[series_index]
|
|
745
|
+
well_data = dataset.sel(Well=well_value)
|
|
746
|
+
well_data = well_data.squeeze() # remove singleton dimensions
|
|
747
|
+
|
|
748
|
+
# Get dimensions
|
|
749
|
+
dims = list(well_data.dims)
|
|
750
|
+
dims = [
|
|
751
|
+
item.replace("Channel", "C").replace("Time", "T")
|
|
752
|
+
for item in dims
|
|
753
|
+
]
|
|
754
|
+
axes = "".join(dims)
|
|
755
|
+
|
|
756
|
+
# Try to get the first image file in the directory for metadata
|
|
757
|
+
image_files = []
|
|
758
|
+
for root, _, files in os.walk(filepath):
|
|
759
|
+
for file in files:
|
|
760
|
+
if file.lower().endswith((".tif", ".tiff")):
|
|
761
|
+
image_files.append(os.path.join(root, file))
|
|
762
|
+
|
|
763
|
+
if image_files:
|
|
764
|
+
sample_file = image_files[0]
|
|
765
|
+
try:
|
|
766
|
+
# acquifer_metadata.getPixelSize_um(sample_file) is deprecated, get values after --PX in filename
|
|
767
|
+
pattern = re.compile(r"--PX(\d+)")
|
|
768
|
+
match = pattern.search(sample_file)
|
|
769
|
+
if match:
|
|
770
|
+
pixel_size = float(match.group(1)) * 10**-4
|
|
771
|
+
|
|
772
|
+
resolution = (pixel_size, pixel_size)
|
|
773
|
+
except (ValueError, FileNotFoundError) as e:
|
|
774
|
+
print(f"Warning: Could not get pixel size: {e}")
|
|
775
|
+
else:
|
|
776
|
+
# If no Well dimension, use dimensions from the dataset
|
|
777
|
+
dims = list(dataset.dims)
|
|
778
|
+
dims = [
|
|
779
|
+
item.replace("Channel", "C").replace("Time", "T")
|
|
780
|
+
for item in dims
|
|
781
|
+
]
|
|
782
|
+
axes = "".join(dims)
|
|
783
|
+
|
|
784
|
+
metadata = {
|
|
785
|
+
"axes": axes,
|
|
786
|
+
"resolution": resolution,
|
|
787
|
+
"unit": "um",
|
|
788
|
+
"filepath": filepath,
|
|
789
|
+
}
|
|
790
|
+
print(f"Extracted metadata: {metadata}")
|
|
791
|
+
return metadata
|
|
792
|
+
|
|
793
|
+
except (ValueError, FileNotFoundError) as e:
|
|
794
|
+
print(f"Error getting metadata: {e}")
|
|
795
|
+
return {}
|
|
796
|
+
|
|
797
|
+
|
|
798
|
+
class ScanFolderWorker(QThread):
|
|
799
|
+
"""Worker thread for scanning folders"""
|
|
800
|
+
|
|
801
|
+
progress = Signal(int, int) # current, total
|
|
802
|
+
finished = Signal(list) # list of found files
|
|
803
|
+
error = Signal(str) # error message
|
|
804
|
+
|
|
805
|
+
def __init__(self, folder: str, filters: List[str]):
|
|
806
|
+
super().__init__()
|
|
807
|
+
self.folder = folder
|
|
808
|
+
self.filters = filters
|
|
809
|
+
|
|
810
|
+
def run(self):
|
|
811
|
+
try:
|
|
812
|
+
found_files = []
|
|
813
|
+
all_items = []
|
|
814
|
+
|
|
815
|
+
# Get both files and potential Acquifer directories
|
|
816
|
+
include_directories = "acquifer" in [
|
|
817
|
+
f.lower() for f in self.filters
|
|
818
|
+
]
|
|
819
|
+
|
|
820
|
+
# Count items to scan
|
|
821
|
+
for root, dirs, files in os.walk(self.folder):
|
|
822
|
+
for file in files:
|
|
823
|
+
if any(
|
|
824
|
+
file.lower().endswith(f)
|
|
825
|
+
for f in self.filters
|
|
826
|
+
if f.lower() != "acquifer"
|
|
827
|
+
):
|
|
828
|
+
all_items.append(os.path.join(root, file))
|
|
829
|
+
|
|
830
|
+
# Add potential Acquifer directories
|
|
831
|
+
if include_directories:
|
|
832
|
+
for dir_name in dirs:
|
|
833
|
+
dir_path = os.path.join(root, dir_name)
|
|
834
|
+
if AcquiferLoader.can_load(dir_path):
|
|
835
|
+
all_items.append(dir_path)
|
|
836
|
+
|
|
837
|
+
# Scan all items
|
|
838
|
+
total_items = len(all_items)
|
|
839
|
+
for i, item_path in enumerate(all_items):
|
|
840
|
+
if i % 10 == 0:
|
|
841
|
+
self.progress.emit(i, total_items)
|
|
842
|
+
|
|
843
|
+
found_files.append(item_path)
|
|
844
|
+
|
|
845
|
+
self.finished.emit(found_files)
|
|
846
|
+
except (ValueError, FileNotFoundError) as e:
|
|
847
|
+
self.error.emit(str(e))
|
|
848
|
+
|
|
849
|
+
|
|
850
|
+
class ConversionWorker(QThread):
|
|
851
|
+
"""Worker thread for file conversion"""
|
|
852
|
+
|
|
853
|
+
progress = Signal(int, int, str) # current, total, filename
|
|
854
|
+
file_done = Signal(str, bool, str) # filepath, success, error message
|
|
855
|
+
finished = Signal(int) # number of successfully converted files
|
|
856
|
+
|
|
857
|
+
def __init__(
|
|
858
|
+
self,
|
|
859
|
+
files_to_convert: List[Tuple[str, int]],
|
|
860
|
+
output_folder: str,
|
|
861
|
+
use_zarr: bool,
|
|
862
|
+
file_loader_func,
|
|
863
|
+
):
|
|
864
|
+
super().__init__()
|
|
865
|
+
self.files_to_convert = files_to_convert
|
|
866
|
+
self.output_folder = output_folder
|
|
867
|
+
self.use_zarr = use_zarr
|
|
868
|
+
self.get_file_loader = file_loader_func
|
|
869
|
+
self.running = True
|
|
870
|
+
|
|
871
|
+
def run(self):
|
|
872
|
+
success_count = 0
|
|
873
|
+
for i, (filepath, series_index) in enumerate(self.files_to_convert):
|
|
874
|
+
if not self.running:
|
|
875
|
+
break
|
|
876
|
+
|
|
877
|
+
# Update progress
|
|
878
|
+
self.progress.emit(
|
|
879
|
+
i + 1, len(self.files_to_convert), Path(filepath).name
|
|
880
|
+
)
|
|
881
|
+
|
|
882
|
+
try:
|
|
883
|
+
# Get loader
|
|
884
|
+
loader = self.get_file_loader(filepath)
|
|
885
|
+
if not loader:
|
|
886
|
+
self.file_done.emit(
|
|
887
|
+
filepath, False, "Unsupported file format"
|
|
888
|
+
)
|
|
889
|
+
continue
|
|
890
|
+
|
|
891
|
+
# Load series - this is the critical part that must succeed
|
|
892
|
+
try:
|
|
893
|
+
image_data = loader.load_series(filepath, series_index)
|
|
894
|
+
except (ValueError, FileNotFoundError) as e:
|
|
895
|
+
self.file_done.emit(
|
|
896
|
+
filepath, False, f"Failed to load image: {str(e)}"
|
|
897
|
+
)
|
|
898
|
+
continue
|
|
899
|
+
|
|
900
|
+
# Try to extract metadata - but don't fail if this doesn't work
|
|
901
|
+
metadata = None
|
|
902
|
+
try:
|
|
903
|
+
metadata = (
|
|
904
|
+
loader.get_metadata(filepath, series_index) or {}
|
|
905
|
+
)
|
|
906
|
+
print(f"Extracted metadata keys: {list(metadata.keys())}")
|
|
907
|
+
except (ValueError, FileNotFoundError) as e:
|
|
908
|
+
print(f"Warning: Failed to extract metadata: {str(e)}")
|
|
909
|
+
metadata = {}
|
|
910
|
+
|
|
911
|
+
# Generate output filename
|
|
912
|
+
base_name = Path(filepath).stem
|
|
913
|
+
|
|
914
|
+
# Determine format based on size and settings
|
|
915
|
+
estimated_size_bytes = (
|
|
916
|
+
np.prod(image_data.shape) * image_data.itemsize
|
|
917
|
+
)
|
|
918
|
+
use_zarr = self.use_zarr or (
|
|
919
|
+
estimated_size_bytes > 4 * 1024 * 1024 * 1024
|
|
920
|
+
)
|
|
921
|
+
|
|
922
|
+
# Set up the output path
|
|
923
|
+
if use_zarr:
|
|
924
|
+
output_path = os.path.join(
|
|
925
|
+
self.output_folder,
|
|
926
|
+
f"{base_name}_series{series_index}.zarr",
|
|
927
|
+
)
|
|
928
|
+
else:
|
|
929
|
+
output_path = os.path.join(
|
|
930
|
+
self.output_folder,
|
|
931
|
+
f"{base_name}_series{series_index}.tif",
|
|
932
|
+
)
|
|
933
|
+
|
|
934
|
+
# The crucial part - save the file with separate try/except for each save method
|
|
935
|
+
save_success = False
|
|
936
|
+
error_message = ""
|
|
937
|
+
|
|
938
|
+
try:
|
|
939
|
+
if use_zarr:
|
|
940
|
+
# First try with metadata
|
|
941
|
+
try:
|
|
942
|
+
if metadata:
|
|
943
|
+
self._save_zarr(
|
|
944
|
+
image_data, output_path, metadata
|
|
945
|
+
)
|
|
946
|
+
else:
|
|
947
|
+
self._save_zarr(image_data, output_path)
|
|
948
|
+
except (ValueError, FileNotFoundError) as e:
|
|
949
|
+
print(
|
|
950
|
+
f"Warning: Failed to save with metadata, trying without: {str(e)}"
|
|
951
|
+
)
|
|
952
|
+
# If that fails, try without metadata
|
|
953
|
+
self._save_zarr(image_data, output_path, None)
|
|
954
|
+
else:
|
|
955
|
+
# First try with metadata
|
|
956
|
+
try:
|
|
957
|
+
if metadata:
|
|
958
|
+
self._save_tif(
|
|
959
|
+
image_data, output_path, metadata
|
|
960
|
+
)
|
|
961
|
+
else:
|
|
962
|
+
self._save_tif(image_data, output_path)
|
|
963
|
+
except (ValueError, FileNotFoundError) as e:
|
|
964
|
+
print(
|
|
965
|
+
f"Warning: Failed to save with metadata, trying without: {str(e)}"
|
|
966
|
+
)
|
|
967
|
+
# If that fails, try without metadata
|
|
968
|
+
self._save_tif(image_data, output_path, None)
|
|
969
|
+
|
|
970
|
+
save_success = True
|
|
971
|
+
except (ValueError, FileNotFoundError) as e:
|
|
972
|
+
error_message = f"Failed to save file: {str(e)}"
|
|
973
|
+
print(f"Error in save operation: {error_message}")
|
|
974
|
+
save_success = False
|
|
975
|
+
|
|
976
|
+
if save_success:
|
|
977
|
+
success_count += 1
|
|
978
|
+
self.file_done.emit(
|
|
979
|
+
filepath, True, f"Saved to {output_path}"
|
|
980
|
+
)
|
|
981
|
+
else:
|
|
982
|
+
self.file_done.emit(filepath, False, error_message)
|
|
983
|
+
|
|
984
|
+
except (ValueError, FileNotFoundError) as e:
|
|
985
|
+
print(f"Unexpected error during conversion: {str(e)}")
|
|
986
|
+
self.file_done.emit(
|
|
987
|
+
filepath, False, f"Unexpected error: {str(e)}"
|
|
988
|
+
)
|
|
989
|
+
|
|
990
|
+
self.finished.emit(success_count)
|
|
991
|
+
|
|
992
|
+
def stop(self):
|
|
993
|
+
self.running = False
|
|
994
|
+
|
|
995
|
+
def _save_tif(
|
|
996
|
+
self, image_data: np.ndarray, output_path: str, metadata: dict = None
|
|
997
|
+
):
|
|
998
|
+
"""Save image data as TIFF with optional metadata"""
|
|
999
|
+
try:
|
|
1000
|
+
# Basic save without metadata
|
|
1001
|
+
if metadata is None:
|
|
1002
|
+
tifffile.imwrite(output_path, image_data, compression="zstd")
|
|
1003
|
+
return
|
|
1004
|
+
|
|
1005
|
+
# Always preserve resolution if it exists
|
|
1006
|
+
resolution = None
|
|
1007
|
+
if "resolution" in metadata:
|
|
1008
|
+
resolution = tuple(float(r) for r in metadata["resolution"])
|
|
1009
|
+
|
|
1010
|
+
axes = metadata.get("axes", "")
|
|
1011
|
+
|
|
1012
|
+
# Handle different dimension cases appropriately
|
|
1013
|
+
if len(image_data.shape) > 2 and any(ax in axes for ax in "ZC"):
|
|
1014
|
+
# Hyperstack case (3D+ with channels or z-slices)
|
|
1015
|
+
imagej_order = "TZCYX"
|
|
1016
|
+
|
|
1017
|
+
if axes != imagej_order:
|
|
1018
|
+
print(
|
|
1019
|
+
f"Original axes: {axes}, Target order: {imagej_order}"
|
|
1020
|
+
)
|
|
1021
|
+
|
|
1022
|
+
# Filter to valid axes
|
|
1023
|
+
valid_axes = [ax for ax in axes if ax in imagej_order]
|
|
1024
|
+
if len(valid_axes) < len(axes):
|
|
1025
|
+
print(f"Dropping axes: {set(axes)-set(imagej_order)}")
|
|
1026
|
+
source_idx = [
|
|
1027
|
+
i
|
|
1028
|
+
for i, ax in enumerate(axes)
|
|
1029
|
+
if ax in imagej_order
|
|
1030
|
+
]
|
|
1031
|
+
image_data = np.moveaxis(
|
|
1032
|
+
image_data, source_idx, range(len(valid_axes))
|
|
1033
|
+
)
|
|
1034
|
+
axes = "".join(valid_axes)
|
|
1035
|
+
|
|
1036
|
+
# Add missing dims
|
|
1037
|
+
for ax in reversed(imagej_order):
|
|
1038
|
+
if ax not in axes:
|
|
1039
|
+
print(f"Adding {ax} dimension")
|
|
1040
|
+
axes = ax + axes
|
|
1041
|
+
image_data = np.expand_dims(image_data, axis=0)
|
|
1042
|
+
|
|
1043
|
+
# Final reordering
|
|
1044
|
+
source_idx = [axes.index(ax) for ax in imagej_order]
|
|
1045
|
+
image_data = np.moveaxis(
|
|
1046
|
+
image_data, source_idx, range(len(imagej_order))
|
|
1047
|
+
)
|
|
1048
|
+
metadata["axes"] = imagej_order
|
|
1049
|
+
|
|
1050
|
+
tifffile.imwrite(
|
|
1051
|
+
output_path,
|
|
1052
|
+
image_data,
|
|
1053
|
+
metadata=metadata,
|
|
1054
|
+
resolution=resolution,
|
|
1055
|
+
imagej=True,
|
|
1056
|
+
compression="zstd",
|
|
1057
|
+
)
|
|
1058
|
+
else:
|
|
1059
|
+
# 2D case - save without hyperstack metadata but keep resolution
|
|
1060
|
+
save_metadata = (
|
|
1061
|
+
{"resolution": metadata["resolution"]}
|
|
1062
|
+
if "resolution" in metadata
|
|
1063
|
+
else None
|
|
1064
|
+
)
|
|
1065
|
+
tifffile.imwrite(
|
|
1066
|
+
output_path,
|
|
1067
|
+
image_data,
|
|
1068
|
+
metadata=save_metadata,
|
|
1069
|
+
resolution=resolution,
|
|
1070
|
+
imagej=False,
|
|
1071
|
+
compression="zstd",
|
|
1072
|
+
)
|
|
1073
|
+
|
|
1074
|
+
except (ValueError, FileNotFoundError) as e:
|
|
1075
|
+
print(f"Error: {str(e)}")
|
|
1076
|
+
tifffile.imwrite(output_path, image_data)
|
|
1077
|
+
|
|
1078
|
+
def _save_zarr(
|
|
1079
|
+
self, image_data: np.ndarray, output_path: str, metadata: dict = None
|
|
1080
|
+
):
|
|
1081
|
+
"""Save image data as OME-Zarr format with optional metadata."""
|
|
1082
|
+
try:
|
|
1083
|
+
# Determine optimal chunk size
|
|
1084
|
+
target_chunk_size = 1024 * 1024 # 1MB
|
|
1085
|
+
item_size = image_data.itemsize
|
|
1086
|
+
chunks = self._calculate_chunks(
|
|
1087
|
+
image_data.shape, target_chunk_size, item_size
|
|
1088
|
+
)
|
|
1089
|
+
|
|
1090
|
+
# Determine appropriate axes based on dimensions
|
|
1091
|
+
ndim = len(image_data.shape)
|
|
1092
|
+
default_axes = "TCZYX"
|
|
1093
|
+
axes = (
|
|
1094
|
+
default_axes[-ndim:]
|
|
1095
|
+
if ndim <= 5
|
|
1096
|
+
else "".join([f"D{i}" for i in range(ndim)])
|
|
1097
|
+
)
|
|
1098
|
+
|
|
1099
|
+
# Create the zarr store
|
|
1100
|
+
store = zarr.DirectoryStore(output_path)
|
|
1101
|
+
|
|
1102
|
+
# Set up transformations if possible
|
|
1103
|
+
coordinate_transformations = None
|
|
1104
|
+
if metadata:
|
|
1105
|
+
try:
|
|
1106
|
+
# Extract scale information if present
|
|
1107
|
+
scales = []
|
|
1108
|
+
for _i, ax in enumerate(axes):
|
|
1109
|
+
scale = 1.0 # Default scale
|
|
1110
|
+
|
|
1111
|
+
# Try to find scale for this axis
|
|
1112
|
+
scale_key = f"scale_{ax.lower()}"
|
|
1113
|
+
if scale_key in metadata:
|
|
1114
|
+
try:
|
|
1115
|
+
scale_value = float(metadata[scale_key])
|
|
1116
|
+
if scale_value > 0: # Only use valid values
|
|
1117
|
+
scale = scale_value
|
|
1118
|
+
except (ValueError, TypeError):
|
|
1119
|
+
pass
|
|
1120
|
+
|
|
1121
|
+
scales.append(scale)
|
|
1122
|
+
|
|
1123
|
+
# Only create transformations if we have non-default scales
|
|
1124
|
+
if any(s != 1.0 for s in scales):
|
|
1125
|
+
coordinate_transformations = [
|
|
1126
|
+
{"type": "scale", "scale": scales}
|
|
1127
|
+
]
|
|
1128
|
+
except (ValueError, FileNotFoundError) as e:
|
|
1129
|
+
print(
|
|
1130
|
+
f"Warning: Could not process coordinate transformations: {str(e)}"
|
|
1131
|
+
)
|
|
1132
|
+
coordinate_transformations = None
|
|
1133
|
+
|
|
1134
|
+
# Write the image data using the OME-Zarr writer
|
|
1135
|
+
write_options = {
|
|
1136
|
+
"image": image_data,
|
|
1137
|
+
"group": store,
|
|
1138
|
+
"axes": axes,
|
|
1139
|
+
"chunks": chunks,
|
|
1140
|
+
"compression": "zstd",
|
|
1141
|
+
"compression_opts": {"level": 3},
|
|
1142
|
+
}
|
|
1143
|
+
|
|
1144
|
+
# Add transformations if available
|
|
1145
|
+
if coordinate_transformations:
|
|
1146
|
+
write_options["coordinate_transformations"] = (
|
|
1147
|
+
coordinate_transformations
|
|
1148
|
+
)
|
|
1149
|
+
|
|
1150
|
+
write_image(**write_options)
|
|
1151
|
+
print(f"Saved OME-Zarr image data: {output_path}")
|
|
1152
|
+
|
|
1153
|
+
# Try to add metadata
|
|
1154
|
+
if metadata:
|
|
1155
|
+
try:
|
|
1156
|
+
# Access the root group
|
|
1157
|
+
root = zarr.group(store)
|
|
1158
|
+
|
|
1159
|
+
# Add OMERO metadata
|
|
1160
|
+
if "omero" not in root:
|
|
1161
|
+
root.create_group("omero")
|
|
1162
|
+
omero_metadata = root["omero"]
|
|
1163
|
+
omero_metadata.attrs["version"] = "0.4"
|
|
1164
|
+
|
|
1165
|
+
# Add original metadata in a separate group
|
|
1166
|
+
if "original_metadata" not in root:
|
|
1167
|
+
metadata_group = root.create_group("original_metadata")
|
|
1168
|
+
else:
|
|
1169
|
+
metadata_group = root["original_metadata"]
|
|
1170
|
+
|
|
1171
|
+
# Add metadata as attributes, safely converting types
|
|
1172
|
+
for key, value in metadata.items():
|
|
1173
|
+
try:
|
|
1174
|
+
# Try to store directly if it's a simple type
|
|
1175
|
+
if isinstance(
|
|
1176
|
+
value, (str, int, float, bool, type(None))
|
|
1177
|
+
):
|
|
1178
|
+
metadata_group.attrs[key] = value
|
|
1179
|
+
else:
|
|
1180
|
+
# Otherwise convert to string
|
|
1181
|
+
metadata_group.attrs[key] = str(value)
|
|
1182
|
+
except (ValueError, TypeError) as e:
|
|
1183
|
+
print(
|
|
1184
|
+
f"Warning: Could not store metadata key '{key}': {str(e)}"
|
|
1185
|
+
)
|
|
1186
|
+
|
|
1187
|
+
print(f"Added metadata to OME-Zarr file: {output_path}")
|
|
1188
|
+
except (ValueError, FileNotFoundError) as e:
|
|
1189
|
+
print(f"Warning: Could not add metadata to Zarr: {str(e)}")
|
|
1190
|
+
|
|
1191
|
+
return output_path
|
|
1192
|
+
except (ValueError, FileNotFoundError) as e:
|
|
1193
|
+
print(f"Error in _save_zarr: {str(e)}")
|
|
1194
|
+
# For zarr, we don't have a simpler fallback method, so re-raise
|
|
1195
|
+
raise
|
|
1196
|
+
|
|
1197
|
+
def _calculate_chunks(self, shape, target_size, item_size):
|
|
1198
|
+
"""Calculate appropriate chunk sizes for zarr storage"""
|
|
1199
|
+
try:
|
|
1200
|
+
total_elements = np.prod(shape)
|
|
1201
|
+
elements_per_chunk = target_size // item_size
|
|
1202
|
+
|
|
1203
|
+
chunk_shape = list(shape)
|
|
1204
|
+
for i in reversed(range(len(chunk_shape))):
|
|
1205
|
+
# Guard against division by zero
|
|
1206
|
+
if total_elements > 0 and chunk_shape[i] > 0:
|
|
1207
|
+
chunk_shape[i] = max(
|
|
1208
|
+
1,
|
|
1209
|
+
int(
|
|
1210
|
+
elements_per_chunk
|
|
1211
|
+
/ (total_elements / chunk_shape[i])
|
|
1212
|
+
),
|
|
1213
|
+
)
|
|
1214
|
+
break
|
|
1215
|
+
|
|
1216
|
+
# Ensure chunks aren't larger than dimensions
|
|
1217
|
+
for i in range(len(chunk_shape)):
|
|
1218
|
+
chunk_shape[i] = min(chunk_shape[i], shape[i])
|
|
1219
|
+
|
|
1220
|
+
return tuple(chunk_shape)
|
|
1221
|
+
except (ValueError, FileNotFoundError) as e:
|
|
1222
|
+
print(f"Warning: Error calculating chunks: {str(e)}")
|
|
1223
|
+
# Return a default chunk size that's safe
|
|
1224
|
+
return tuple(min(512, d) for d in shape)
|
|
1225
|
+
|
|
1226
|
+
|
|
1227
|
+
class MicroscopyImageConverterWidget(QWidget):
|
|
1228
|
+
"""Main widget for microscopy image conversion to TIF/ZARR"""
|
|
1229
|
+
|
|
1230
|
+
def __init__(self, viewer: napari.Viewer):
|
|
1231
|
+
super().__init__()
|
|
1232
|
+
self.viewer = viewer
|
|
1233
|
+
|
|
1234
|
+
# Register format loaders
|
|
1235
|
+
self.loaders = [
|
|
1236
|
+
LIFLoader,
|
|
1237
|
+
ND2Loader,
|
|
1238
|
+
TIFFSlideLoader,
|
|
1239
|
+
CZILoader,
|
|
1240
|
+
AcquiferLoader,
|
|
1241
|
+
]
|
|
1242
|
+
|
|
1243
|
+
# Selected series for conversion
|
|
1244
|
+
self.selected_series = {} # {filepath: series_index}
|
|
1245
|
+
|
|
1246
|
+
# Track files that should export all series
|
|
1247
|
+
self.export_all_series = {} # {filepath: boolean}
|
|
1248
|
+
|
|
1249
|
+
# Working threads
|
|
1250
|
+
self.scan_worker = None
|
|
1251
|
+
self.conversion_worker = None
|
|
1252
|
+
|
|
1253
|
+
# Create layout
|
|
1254
|
+
main_layout = QVBoxLayout()
|
|
1255
|
+
self.setLayout(main_layout)
|
|
1256
|
+
|
|
1257
|
+
# File selection widgets
|
|
1258
|
+
folder_layout = QHBoxLayout()
|
|
1259
|
+
folder_label = QLabel("Input Folder:")
|
|
1260
|
+
self.folder_edit = QLineEdit()
|
|
1261
|
+
browse_button = QPushButton("Browse...")
|
|
1262
|
+
browse_button.clicked.connect(self.browse_folder)
|
|
1263
|
+
|
|
1264
|
+
folder_layout.addWidget(folder_label)
|
|
1265
|
+
folder_layout.addWidget(self.folder_edit)
|
|
1266
|
+
folder_layout.addWidget(browse_button)
|
|
1267
|
+
main_layout.addLayout(folder_layout)
|
|
1268
|
+
|
|
1269
|
+
# File filter widgets
|
|
1270
|
+
filter_layout = QHBoxLayout()
|
|
1271
|
+
filter_label = QLabel("File Filter:")
|
|
1272
|
+
self.filter_edit = QLineEdit()
|
|
1273
|
+
self.filter_edit.setPlaceholderText(
|
|
1274
|
+
".lif, .nd2, .ndpi, .czi, acquifer (comma separated)"
|
|
1275
|
+
)
|
|
1276
|
+
self.filter_edit.setText(".lif,.nd2,.ndpi,.czi, acquifer")
|
|
1277
|
+
scan_button = QPushButton("Scan Folder")
|
|
1278
|
+
scan_button.clicked.connect(self.scan_folder)
|
|
1279
|
+
|
|
1280
|
+
filter_layout.addWidget(filter_label)
|
|
1281
|
+
filter_layout.addWidget(self.filter_edit)
|
|
1282
|
+
filter_layout.addWidget(scan_button)
|
|
1283
|
+
main_layout.addLayout(filter_layout)
|
|
1284
|
+
|
|
1285
|
+
# Progress bar for scanning
|
|
1286
|
+
self.scan_progress = QProgressBar()
|
|
1287
|
+
self.scan_progress.setVisible(False)
|
|
1288
|
+
main_layout.addWidget(self.scan_progress)
|
|
1289
|
+
|
|
1290
|
+
# Files and series tables
|
|
1291
|
+
tables_layout = QHBoxLayout()
|
|
1292
|
+
|
|
1293
|
+
# Files table
|
|
1294
|
+
self.files_table = SeriesTableWidget(viewer)
|
|
1295
|
+
tables_layout.addWidget(self.files_table)
|
|
1296
|
+
|
|
1297
|
+
# Series details widget
|
|
1298
|
+
self.series_widget = SeriesDetailWidget(self, viewer)
|
|
1299
|
+
tables_layout.addWidget(self.series_widget)
|
|
1300
|
+
|
|
1301
|
+
main_layout.addLayout(tables_layout)
|
|
1302
|
+
|
|
1303
|
+
# Conversion options
|
|
1304
|
+
options_layout = QVBoxLayout()
|
|
1305
|
+
|
|
1306
|
+
# Output format selection
|
|
1307
|
+
format_layout = QHBoxLayout()
|
|
1308
|
+
format_label = QLabel("Output Format:")
|
|
1309
|
+
self.tif_radio = QCheckBox("TIF (< 4GB)")
|
|
1310
|
+
self.tif_radio.setChecked(True)
|
|
1311
|
+
self.zarr_radio = QCheckBox("ZARR (> 4GB)")
|
|
1312
|
+
|
|
1313
|
+
# Make checkboxes mutually exclusive like radio buttons
|
|
1314
|
+
self.tif_radio.toggled.connect(
|
|
1315
|
+
lambda checked: (
|
|
1316
|
+
self.zarr_radio.setChecked(not checked) if checked else None
|
|
1317
|
+
)
|
|
1318
|
+
)
|
|
1319
|
+
self.zarr_radio.toggled.connect(
|
|
1320
|
+
lambda checked: (
|
|
1321
|
+
self.tif_radio.setChecked(not checked) if checked else None
|
|
1322
|
+
)
|
|
1323
|
+
)
|
|
1324
|
+
|
|
1325
|
+
format_layout.addWidget(format_label)
|
|
1326
|
+
format_layout.addWidget(self.tif_radio)
|
|
1327
|
+
format_layout.addWidget(self.zarr_radio)
|
|
1328
|
+
options_layout.addLayout(format_layout)
|
|
1329
|
+
|
|
1330
|
+
# Output folder selection
|
|
1331
|
+
output_layout = QHBoxLayout()
|
|
1332
|
+
output_label = QLabel("Output Folder:")
|
|
1333
|
+
self.output_edit = QLineEdit()
|
|
1334
|
+
output_browse = QPushButton("Browse...")
|
|
1335
|
+
output_browse.clicked.connect(self.browse_output)
|
|
1336
|
+
|
|
1337
|
+
output_layout.addWidget(output_label)
|
|
1338
|
+
output_layout.addWidget(self.output_edit)
|
|
1339
|
+
output_layout.addWidget(output_browse)
|
|
1340
|
+
options_layout.addLayout(output_layout)
|
|
1341
|
+
|
|
1342
|
+
main_layout.addLayout(options_layout)
|
|
1343
|
+
|
|
1344
|
+
# Conversion progress bar
|
|
1345
|
+
self.conversion_progress = QProgressBar()
|
|
1346
|
+
self.conversion_progress.setVisible(False)
|
|
1347
|
+
main_layout.addWidget(self.conversion_progress)
|
|
1348
|
+
|
|
1349
|
+
# Conversion and cancel buttons
|
|
1350
|
+
button_layout = QHBoxLayout()
|
|
1351
|
+
convert_button = QPushButton("Convert Selected Files")
|
|
1352
|
+
convert_button.clicked.connect(self.convert_files)
|
|
1353
|
+
self.cancel_button = QPushButton("Cancel")
|
|
1354
|
+
self.cancel_button.clicked.connect(self.cancel_operation)
|
|
1355
|
+
self.cancel_button.setVisible(False)
|
|
1356
|
+
|
|
1357
|
+
button_layout.addWidget(convert_button)
|
|
1358
|
+
button_layout.addWidget(self.cancel_button)
|
|
1359
|
+
main_layout.addLayout(button_layout)
|
|
1360
|
+
|
|
1361
|
+
# Status label
|
|
1362
|
+
self.status_label = QLabel("")
|
|
1363
|
+
main_layout.addWidget(self.status_label)
|
|
1364
|
+
|
|
1365
|
+
def cancel_operation(self):
|
|
1366
|
+
"""Cancel current operation"""
|
|
1367
|
+
if self.scan_worker and self.scan_worker.isRunning():
|
|
1368
|
+
self.scan_worker.terminate()
|
|
1369
|
+
self.scan_worker = None
|
|
1370
|
+
self.status_label.setText("Scanning cancelled")
|
|
1371
|
+
|
|
1372
|
+
if self.conversion_worker and self.conversion_worker.isRunning():
|
|
1373
|
+
self.conversion_worker.stop()
|
|
1374
|
+
self.status_label.setText("Conversion cancelled")
|
|
1375
|
+
|
|
1376
|
+
self.scan_progress.setVisible(False)
|
|
1377
|
+
self.conversion_progress.setVisible(False)
|
|
1378
|
+
self.cancel_button.setVisible(False)
|
|
1379
|
+
|
|
1380
|
+
def browse_folder(self):
|
|
1381
|
+
"""Open a folder browser dialog"""
|
|
1382
|
+
folder = QFileDialog.getExistingDirectory(self, "Select Input Folder")
|
|
1383
|
+
if folder:
|
|
1384
|
+
self.folder_edit.setText(folder)
|
|
1385
|
+
|
|
1386
|
+
def browse_output(self):
|
|
1387
|
+
"""Open a folder browser dialog for output folder"""
|
|
1388
|
+
folder = QFileDialog.getExistingDirectory(self, "Select Output Folder")
|
|
1389
|
+
if folder:
|
|
1390
|
+
self.output_edit.setText(folder)
|
|
1391
|
+
|
|
1392
|
+
def scan_folder(self):
|
|
1393
|
+
"""Scan the selected folder for image files"""
|
|
1394
|
+
folder = self.folder_edit.text()
|
|
1395
|
+
if not folder or not os.path.isdir(folder):
|
|
1396
|
+
self.status_label.setText("Please select a valid folder")
|
|
1397
|
+
return
|
|
1398
|
+
|
|
1399
|
+
# Get file filters
|
|
1400
|
+
filters = [
|
|
1401
|
+
f.strip() for f in self.filter_edit.text().split(",") if f.strip()
|
|
1402
|
+
]
|
|
1403
|
+
if not filters:
|
|
1404
|
+
filters = [".lif", ".nd2", ".ndpi", ".czi"]
|
|
1405
|
+
|
|
1406
|
+
# Clear existing files
|
|
1407
|
+
self.files_table.setRowCount(0)
|
|
1408
|
+
self.files_table.file_data.clear()
|
|
1409
|
+
|
|
1410
|
+
# Set up and start the worker thread
|
|
1411
|
+
self.scan_worker = ScanFolderWorker(folder, filters)
|
|
1412
|
+
self.scan_worker.progress.connect(self.update_scan_progress)
|
|
1413
|
+
self.scan_worker.finished.connect(self.process_found_files)
|
|
1414
|
+
self.scan_worker.error.connect(self.show_error)
|
|
1415
|
+
|
|
1416
|
+
# Show progress bar and start worker
|
|
1417
|
+
self.scan_progress.setVisible(True)
|
|
1418
|
+
self.scan_progress.setValue(0)
|
|
1419
|
+
self.cancel_button.setVisible(True)
|
|
1420
|
+
self.status_label.setText("Scanning folder...")
|
|
1421
|
+
self.scan_worker.start()
|
|
1422
|
+
|
|
1423
|
+
def update_scan_progress(self, current, total):
|
|
1424
|
+
"""Update the scan progress bar"""
|
|
1425
|
+
if total > 0:
|
|
1426
|
+
self.scan_progress.setValue(int(current * 100 / total))
|
|
1427
|
+
|
|
1428
|
+
def process_found_files(self, found_files):
|
|
1429
|
+
"""Process the list of found files after scanning is complete"""
|
|
1430
|
+
# Hide progress bar
|
|
1431
|
+
self.scan_progress.setVisible(False)
|
|
1432
|
+
self.cancel_button.setVisible(False)
|
|
1433
|
+
|
|
1434
|
+
# Process files
|
|
1435
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
1436
|
+
# Process files in parallel to get series counts
|
|
1437
|
+
futures = {}
|
|
1438
|
+
for filepath in found_files:
|
|
1439
|
+
file_type = self.get_file_type(filepath)
|
|
1440
|
+
if file_type:
|
|
1441
|
+
loader = self.get_file_loader(filepath)
|
|
1442
|
+
if loader:
|
|
1443
|
+
future = executor.submit(
|
|
1444
|
+
loader.get_series_count, filepath
|
|
1445
|
+
)
|
|
1446
|
+
futures[future] = (filepath, file_type)
|
|
1447
|
+
|
|
1448
|
+
# Process results as they complete
|
|
1449
|
+
file_count = len(found_files)
|
|
1450
|
+
processed = 0
|
|
1451
|
+
|
|
1452
|
+
for i, future in enumerate(
|
|
1453
|
+
concurrent.futures.as_completed(futures)
|
|
1454
|
+
):
|
|
1455
|
+
processed = i + 1
|
|
1456
|
+
filepath, file_type = futures[future]
|
|
1457
|
+
|
|
1458
|
+
try:
|
|
1459
|
+
series_count = future.result()
|
|
1460
|
+
# Add file to table
|
|
1461
|
+
self.files_table.add_file(
|
|
1462
|
+
filepath, file_type, series_count
|
|
1463
|
+
)
|
|
1464
|
+
except (ValueError, FileNotFoundError) as e:
|
|
1465
|
+
print(f"Error processing {filepath}: {str(e)}")
|
|
1466
|
+
# Add file with error indication
|
|
1467
|
+
self.files_table.add_file(filepath, file_type, -1)
|
|
1468
|
+
|
|
1469
|
+
# Update status periodically
|
|
1470
|
+
if processed % 5 == 0 or processed == file_count:
|
|
1471
|
+
self.status_label.setText(
|
|
1472
|
+
f"Processed {processed}/{file_count} files..."
|
|
1473
|
+
)
|
|
1474
|
+
QApplication.processEvents()
|
|
1475
|
+
|
|
1476
|
+
self.status_label.setText(f"Found {len(found_files)} files")
|
|
1477
|
+
|
|
1478
|
+
def show_error(self, error_message):
|
|
1479
|
+
"""Show error message"""
|
|
1480
|
+
self.status_label.setText(f"Error: {error_message}")
|
|
1481
|
+
self.scan_progress.setVisible(False)
|
|
1482
|
+
self.cancel_button.setVisible(False)
|
|
1483
|
+
QMessageBox.critical(self, "Error", error_message)
|
|
1484
|
+
|
|
1485
|
+
def get_file_type(self, filepath: str) -> str:
|
|
1486
|
+
"""Determine the file type based on extension or directory type"""
|
|
1487
|
+
if os.path.isdir(filepath) and AcquiferLoader.can_load(filepath):
|
|
1488
|
+
return "Acquifer"
|
|
1489
|
+
ext = filepath.lower()
|
|
1490
|
+
if ext.endswith(".lif"):
|
|
1491
|
+
return "LIF"
|
|
1492
|
+
elif ext.endswith(".nd2"):
|
|
1493
|
+
return "ND2"
|
|
1494
|
+
elif ext.endswith((".ndpi", ".svs")):
|
|
1495
|
+
return "Slide"
|
|
1496
|
+
elif ext.endswith(".czi"):
|
|
1497
|
+
return "CZI"
|
|
1498
|
+
return "Unknown"
|
|
1499
|
+
|
|
1500
|
+
def get_file_loader(self, filepath: str) -> Optional[FormatLoader]:
|
|
1501
|
+
"""Get the appropriate loader for the file type"""
|
|
1502
|
+
for loader in self.loaders:
|
|
1503
|
+
if loader.can_load(filepath):
|
|
1504
|
+
return loader
|
|
1505
|
+
return None
|
|
1506
|
+
|
|
1507
|
+
def show_series_details(self, filepath: str):
|
|
1508
|
+
"""Show details for the series in the selected file"""
|
|
1509
|
+
self.series_widget.set_file(filepath)
|
|
1510
|
+
|
|
1511
|
+
def set_selected_series(self, filepath: str, series_index: int):
|
|
1512
|
+
"""Set the selected series for a file"""
|
|
1513
|
+
self.selected_series[filepath] = series_index
|
|
1514
|
+
|
|
1515
|
+
def set_export_all_series(self, filepath: str, export_all: bool):
|
|
1516
|
+
"""Set whether to export all series for a file"""
|
|
1517
|
+
self.export_all_series[filepath] = export_all
|
|
1518
|
+
|
|
1519
|
+
# If exporting all, we still need a default series in selected_series
|
|
1520
|
+
# for files that are marked for export all
|
|
1521
|
+
if export_all and filepath not in self.selected_series:
|
|
1522
|
+
self.selected_series[filepath] = 0
|
|
1523
|
+
|
|
1524
|
+
def load_image(self, filepath: str):
|
|
1525
|
+
"""Load an image file into the viewer"""
|
|
1526
|
+
loader = self.get_file_loader(filepath)
|
|
1527
|
+
if not loader:
|
|
1528
|
+
self.viewer.status = f"Unsupported file format: {filepath}"
|
|
1529
|
+
return
|
|
1530
|
+
|
|
1531
|
+
try:
|
|
1532
|
+
# For non-series files, just load the first series
|
|
1533
|
+
series_index = 0
|
|
1534
|
+
image_data = loader.load_series(filepath, series_index)
|
|
1535
|
+
|
|
1536
|
+
# Clear existing layers and display the image
|
|
1537
|
+
self.viewer.layers.clear()
|
|
1538
|
+
self.viewer.add_image(image_data, name=f"{Path(filepath).stem}")
|
|
1539
|
+
|
|
1540
|
+
# Update status
|
|
1541
|
+
self.viewer.status = f"Loaded {Path(filepath).name}"
|
|
1542
|
+
except (ValueError, FileNotFoundError) as e:
|
|
1543
|
+
self.viewer.status = f"Error loading image: {str(e)}"
|
|
1544
|
+
QMessageBox.warning(
|
|
1545
|
+
self, "Error", f"Could not load image: {str(e)}"
|
|
1546
|
+
)
|
|
1547
|
+
|
|
1548
|
+
def is_output_folder_valid(self, folder):
|
|
1549
|
+
"""Check if the output folder is valid and writable"""
|
|
1550
|
+
if not folder:
|
|
1551
|
+
self.status_label.setText("Please specify an output folder")
|
|
1552
|
+
return False
|
|
1553
|
+
|
|
1554
|
+
# Check if folder exists, if not try to create it
|
|
1555
|
+
if not os.path.exists(folder):
|
|
1556
|
+
try:
|
|
1557
|
+
os.makedirs(folder)
|
|
1558
|
+
except (FileNotFoundError, PermissionError) as e:
|
|
1559
|
+
self.status_label.setText(
|
|
1560
|
+
f"Cannot create output folder: {str(e)}"
|
|
1561
|
+
)
|
|
1562
|
+
return False
|
|
1563
|
+
|
|
1564
|
+
# Check if folder is writable
|
|
1565
|
+
if not os.access(folder, os.W_OK):
|
|
1566
|
+
self.status_label.setText("Output folder is not writable")
|
|
1567
|
+
return False
|
|
1568
|
+
|
|
1569
|
+
return True
|
|
1570
|
+
|
|
1571
|
+
def convert_files(self):
|
|
1572
|
+
"""Convert selected files to TIF or ZARR"""
|
|
1573
|
+
# Check if any files are selected
|
|
1574
|
+
if not self.selected_series:
|
|
1575
|
+
self.status_label.setText("No files selected for conversion")
|
|
1576
|
+
return
|
|
1577
|
+
|
|
1578
|
+
# Check output folder
|
|
1579
|
+
output_folder = self.output_edit.text()
|
|
1580
|
+
if not output_folder:
|
|
1581
|
+
output_folder = os.path.join(self.folder_edit.text(), "converted")
|
|
1582
|
+
|
|
1583
|
+
# Validate output folder
|
|
1584
|
+
if not self.is_output_folder_valid(output_folder):
|
|
1585
|
+
return
|
|
1586
|
+
|
|
1587
|
+
# Create files to convert list
|
|
1588
|
+
files_to_convert = []
|
|
1589
|
+
|
|
1590
|
+
for filepath, series_index in self.selected_series.items():
|
|
1591
|
+
# Check if we should export all series for this file
|
|
1592
|
+
if self.export_all_series.get(filepath, False):
|
|
1593
|
+
# Get the number of series for this file
|
|
1594
|
+
loader = self.get_file_loader(filepath)
|
|
1595
|
+
if loader:
|
|
1596
|
+
try:
|
|
1597
|
+
series_count = loader.get_series_count(filepath)
|
|
1598
|
+
# Add all series for this file
|
|
1599
|
+
for i in range(series_count):
|
|
1600
|
+
files_to_convert.append((filepath, i))
|
|
1601
|
+
except (ValueError, FileNotFoundError) as e:
|
|
1602
|
+
self.status_label.setText(
|
|
1603
|
+
f"Error getting series count: {str(e)}"
|
|
1604
|
+
)
|
|
1605
|
+
QMessageBox.warning(
|
|
1606
|
+
self,
|
|
1607
|
+
"Error",
|
|
1608
|
+
f"Could not get series count for {Path(filepath).name}: {str(e)}",
|
|
1609
|
+
)
|
|
1610
|
+
else:
|
|
1611
|
+
# Just add the selected series
|
|
1612
|
+
files_to_convert.append((filepath, series_index))
|
|
1613
|
+
|
|
1614
|
+
if not files_to_convert:
|
|
1615
|
+
self.status_label.setText("No valid files to convert")
|
|
1616
|
+
return
|
|
1617
|
+
|
|
1618
|
+
# Set up and start the conversion worker thread
|
|
1619
|
+
self.conversion_worker = ConversionWorker(
|
|
1620
|
+
files_to_convert=files_to_convert,
|
|
1621
|
+
output_folder=output_folder,
|
|
1622
|
+
use_zarr=self.zarr_radio.isChecked(),
|
|
1623
|
+
file_loader_func=self.get_file_loader,
|
|
1624
|
+
)
|
|
1625
|
+
|
|
1626
|
+
# Connect signals
|
|
1627
|
+
self.conversion_worker.progress.connect(
|
|
1628
|
+
self.update_conversion_progress
|
|
1629
|
+
)
|
|
1630
|
+
self.conversion_worker.file_done.connect(
|
|
1631
|
+
self.handle_file_conversion_result
|
|
1632
|
+
)
|
|
1633
|
+
self.conversion_worker.finished.connect(self.conversion_completed)
|
|
1634
|
+
|
|
1635
|
+
# Show progress bar and start worker
|
|
1636
|
+
self.conversion_progress.setVisible(True)
|
|
1637
|
+
self.conversion_progress.setValue(0)
|
|
1638
|
+
self.cancel_button.setVisible(True)
|
|
1639
|
+
self.status_label.setText(
|
|
1640
|
+
f"Starting conversion of {len(files_to_convert)} files/series..."
|
|
1641
|
+
)
|
|
1642
|
+
|
|
1643
|
+
# Start conversion
|
|
1644
|
+
self.conversion_worker.start()
|
|
1645
|
+
|
|
1646
|
+
def update_conversion_progress(self, current, total, filename):
|
|
1647
|
+
"""Update conversion progress bar and status"""
|
|
1648
|
+
if total > 0:
|
|
1649
|
+
self.conversion_progress.setValue(int(current * 100 / total))
|
|
1650
|
+
self.status_label.setText(
|
|
1651
|
+
f"Converting {filename} ({current}/{total})..."
|
|
1652
|
+
)
|
|
1653
|
+
|
|
1654
|
+
def handle_file_conversion_result(self, filepath, success, message):
|
|
1655
|
+
"""Handle result of a single file conversion"""
|
|
1656
|
+
filename = Path(filepath).name
|
|
1657
|
+
if success:
|
|
1658
|
+
print(f"Successfully converted: {filename} - {message}")
|
|
1659
|
+
else:
|
|
1660
|
+
print(f"Failed to convert: {filename} - {message}")
|
|
1661
|
+
QMessageBox.warning(
|
|
1662
|
+
self,
|
|
1663
|
+
"Conversion Warning",
|
|
1664
|
+
f"Error converting {filename}: {message}",
|
|
1665
|
+
)
|
|
1666
|
+
|
|
1667
|
+
def conversion_completed(self, success_count):
|
|
1668
|
+
"""Handle completion of all conversions"""
|
|
1669
|
+
self.conversion_progress.setVisible(False)
|
|
1670
|
+
self.cancel_button.setVisible(False)
|
|
1671
|
+
|
|
1672
|
+
output_folder = self.output_edit.text()
|
|
1673
|
+
if not output_folder:
|
|
1674
|
+
output_folder = os.path.join(self.folder_edit.text(), "converted")
|
|
1675
|
+
if success_count > 0:
|
|
1676
|
+
self.status_label.setText(
|
|
1677
|
+
f"Successfully converted {success_count} files to {output_folder}"
|
|
1678
|
+
)
|
|
1679
|
+
else:
|
|
1680
|
+
self.status_label.setText("No files were converted")
|
|
1681
|
+
|
|
1682
|
+
|
|
1683
|
+
# Create a MagicGUI widget that creates and returns the converter widget
|
|
1684
|
+
@magicgui(
|
|
1685
|
+
call_button="Start Microscopy Image Converter",
|
|
1686
|
+
layout="vertical",
|
|
1687
|
+
)
|
|
1688
|
+
def microscopy_converter(viewer: napari.Viewer):
|
|
1689
|
+
"""
|
|
1690
|
+
Start the microscopy image converter tool
|
|
1691
|
+
"""
|
|
1692
|
+
# Create the converter widget
|
|
1693
|
+
converter_widget = MicroscopyImageConverterWidget(viewer)
|
|
1694
|
+
|
|
1695
|
+
# Add to viewer
|
|
1696
|
+
viewer.window.add_dock_widget(
|
|
1697
|
+
converter_widget, name="Microscopy Image Converter", area="right"
|
|
1698
|
+
)
|
|
1699
|
+
|
|
1700
|
+
return converter_widget
|
|
1701
|
+
|
|
1702
|
+
|
|
1703
|
+
# This is what napari calls to get the widget
|
|
1704
|
+
def napari_experimental_provide_dock_widget():
|
|
1705
|
+
"""Provide the converter widget to Napari"""
|
|
1706
|
+
return microscopy_converter
|