napari-tmidas 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1175 @@
1
+ """
2
+ ROI Colocalization Analysis for Napari
3
+ -------------------------------------
4
+ This module provides a GUI for analyzing colocalization between ROIs in multiple channel label images.
5
+ It can process images with 2 or 3 channels and generate statistics about their overlap.
6
+
7
+ The colocalization analysis counts how many labels from one channel overlap with regions in another channel,
8
+ and can optionally calculate sizes of these overlapping regions.
9
+ """
10
+
11
+ import concurrent.futures
12
+
13
+ # contextlib is used to suppress exceptions
14
+ import contextlib
15
+ import csv
16
+ import os
17
+ from collections import defaultdict
18
+ from difflib import SequenceMatcher
19
+
20
+ import numpy as np
21
+ import tifffile
22
+ from magicgui import magic_factory
23
+ from napari.viewer import Viewer
24
+ from qtpy.QtCore import Qt, QThread, Signal
25
+ from qtpy.QtWidgets import (
26
+ QCheckBox,
27
+ QFileDialog,
28
+ QFormLayout,
29
+ QHBoxLayout,
30
+ QHeaderView,
31
+ QLabel,
32
+ QLineEdit,
33
+ QProgressBar,
34
+ QPushButton,
35
+ QSpinBox,
36
+ QTableWidget,
37
+ QTableWidgetItem,
38
+ QVBoxLayout,
39
+ QWidget,
40
+ )
41
+ from skimage import measure
42
+
43
+
44
+ def longest_common_substring(s1, s2):
45
+ """Finds the longest common substring between two strings."""
46
+ matcher = SequenceMatcher(None, s1, s2)
47
+ match = matcher.find_longest_match(0, len(s1), 0, len(s2))
48
+ substring = s1[match.a : match.a + match.size]
49
+ print(f"Longest common substring between '{s1}' and '{s2}': '{substring}'")
50
+ return substring
51
+
52
+
53
+ def group_files_by_common_substring(file_lists, channels):
54
+ """
55
+ Groups files across channels based on the longest common substring in their filenames.
56
+
57
+ Args:
58
+ file_lists (dict): A dictionary where keys are channel names and values are lists of file paths.
59
+ channels (list): A list of channel names corresponding to the keys in file_lists.
60
+
61
+ Returns:
62
+ dict: A dictionary where keys are common substrings (without suffixes) and values are lists of file paths grouped by substring.
63
+ """
64
+ # Extract the base filenames for each channel
65
+ base_files = {
66
+ channel: [os.path.basename(file) for file in file_lists[channel]]
67
+ for channel in channels
68
+ }
69
+
70
+ # Create a dictionary to store groups
71
+ groups = defaultdict(lambda: {channel: None for channel in channels})
72
+
73
+ # Iterate over all files in the first channel
74
+ for file1 in base_files[channels[0]]:
75
+ # Start with the first file as the "common substring"
76
+ common_substring = file1
77
+
78
+ # Iterate over the other channels to find matching files
79
+ matched_files = {channels[0]: file1}
80
+ for channel in channels[1:]:
81
+ best_match = None
82
+ best_common = ""
83
+
84
+ # Compare the current common substring with files in the current channel
85
+ for file2 in base_files[channel]:
86
+ current_common = longest_common_substring(
87
+ common_substring, file2
88
+ )
89
+ if len(current_common) > len(best_common):
90
+ best_match = file2
91
+ best_common = current_common
92
+
93
+ # If a match is found, update the common substring and store the match
94
+ if best_match:
95
+ common_substring = best_common
96
+ matched_files[channel] = best_match
97
+ else:
98
+ # If no match is found, skip this file
99
+ break
100
+
101
+ # If matches were found for all channels, add them to the group
102
+ if len(matched_files) == len(channels):
103
+ # Strip suffixes from the common substring
104
+ stripped_common_substring = common_substring.rsplit("_", 1)[0]
105
+ groups[stripped_common_substring] = {
106
+ channel: file_lists[channel][
107
+ base_files[channel].index(matched_files[channel])
108
+ ]
109
+ for channel in channels
110
+ }
111
+
112
+ # Filter out incomplete groups (e.g., missing files for required channels)
113
+ valid_groups = {
114
+ key: list(group.values())
115
+ for key, group in groups.items()
116
+ if all(group[channel] for channel in channels)
117
+ }
118
+
119
+ return valid_groups
120
+
121
+
122
+ class ColocalizationWorker(QThread):
123
+ """Worker thread for processing label images"""
124
+
125
+ progress_updated = Signal(int) # Current progress
126
+ file_processed = Signal(dict) # Results for a processed file
127
+ processing_finished = Signal() # Signal when all processing is done
128
+ error_occurred = Signal(str, str) # filepath, error message
129
+
130
+ def __init__(
131
+ self,
132
+ file_pairs,
133
+ channel_names,
134
+ get_sizes=False,
135
+ size_method="median",
136
+ output_folder=None,
137
+ ):
138
+ super().__init__()
139
+ self.file_pairs = file_pairs
140
+ self.channel_names = channel_names
141
+ self.get_sizes = get_sizes
142
+ self.size_method = size_method
143
+ self.output_folder = output_folder
144
+ self.stop_requested = False
145
+ self.thread_count = max(1, (os.cpu_count() or 4) - 1) # Default value
146
+
147
+ def run(self):
148
+ """Process files in a separate thread"""
149
+ # Track processed files
150
+ processed_files_info = []
151
+ total_files = len(self.file_pairs)
152
+
153
+ # Create output folder if it doesn't exist
154
+ csv_path = None
155
+ if self.output_folder:
156
+ try:
157
+ # Make sure the directory exists with all parent directories
158
+ os.makedirs(self.output_folder, exist_ok=True)
159
+
160
+ # Set up CSV path
161
+ channels_str = "_".join(self.channel_names)
162
+ csv_path = os.path.join(
163
+ self.output_folder, f"{channels_str}_colocalization.csv"
164
+ )
165
+
166
+ # Create CSV header
167
+ header = [
168
+ "Filename",
169
+ f"{self.channel_names[0]}_label_id",
170
+ f"{self.channel_names[1]}_in_{self.channel_names[0]}_count",
171
+ ]
172
+
173
+ if self.get_sizes:
174
+ header.extend(
175
+ [
176
+ f"{self.channel_names[0]}_size",
177
+ f"{self.channel_names[1]}_in_{self.channel_names[0]}_size",
178
+ ]
179
+ )
180
+
181
+ if len(self.channel_names) == 3:
182
+ header.extend(
183
+ [
184
+ f"{self.channel_names[2]}_in_{self.channel_names[1]}_in_{self.channel_names[0]}_count",
185
+ f"{self.channel_names[2]}_not_in_{self.channel_names[1]}_but_in_{self.channel_names[0]}_count",
186
+ ]
187
+ )
188
+
189
+ if self.get_sizes:
190
+ header.extend(
191
+ [
192
+ f"{self.channel_names[2]}_in_{self.channel_names[1]}_in_{self.channel_names[0]}_size",
193
+ f"{self.channel_names[2]}_not_in_{self.channel_names[1]}_but_in_{self.channel_names[0]}_size",
194
+ ]
195
+ )
196
+
197
+ # print(f"CSV Header: {header}")
198
+
199
+ # check if the file already exists and overwrite it
200
+ if os.path.exists(csv_path):
201
+ # If it exists, remove it
202
+ os.remove(csv_path) # this
203
+ # if it fails, tell the user to delete it manually:
204
+ if os.path.exists(csv_path):
205
+ raise Exception(
206
+ f"Failed to remove existing CSV file: {csv_path}"
207
+ )
208
+
209
+ # Try to create and initialize CSV file
210
+ with open(csv_path, "w", newline="") as csvfile:
211
+ writer = csv.writer(csvfile)
212
+ writer.writerow(header)
213
+
214
+ except (Exception, FileNotFoundError) as e:
215
+ import traceback
216
+
217
+ traceback.print_exc()
218
+ csv_path = None
219
+ self.error_occurred.emit(
220
+ "CSV file", f"Failed to set up CSV file: {str(e)}"
221
+ )
222
+
223
+ # Create a thread pool for concurrent processing
224
+ with concurrent.futures.ThreadPoolExecutor(
225
+ max_workers=self.thread_count
226
+ ) as executor:
227
+ # Submit tasks
228
+ future_to_file = {
229
+ executor.submit(self.process_file_pair, file_pair): file_pair
230
+ for file_pair in self.file_pairs
231
+ }
232
+
233
+ # Process as they complete
234
+ for i, future in enumerate(
235
+ concurrent.futures.as_completed(future_to_file)
236
+ ):
237
+ # Check if cancellation was requested
238
+ if self.stop_requested:
239
+ break
240
+
241
+ file_pair = future_to_file[future]
242
+ try:
243
+ result = future.result()
244
+ if result:
245
+ processed_files_info.append(result)
246
+ self.file_processed.emit(result)
247
+
248
+ # Write to CSV if output folder is specified and CSV setup worked
249
+ if csv_path and "csv_rows" in result:
250
+ try:
251
+ with open(
252
+ csv_path, "a", newline=""
253
+ ) as csvfile:
254
+ writer = csv.writer(csvfile)
255
+ writer.writerows(result["csv_rows"])
256
+ except (Exception, FileNotFoundError) as e:
257
+ # Log the error but continue processing
258
+ print(f"Error writing to CSV file: {str(e)}")
259
+
260
+ except (Exception, ValueError) as e:
261
+ import traceback
262
+
263
+ traceback.print_exc()
264
+ self.error_occurred.emit(str(file_pair), str(e))
265
+
266
+ # Update progress
267
+ self.progress_updated.emit(int((i + 1) / total_files * 100))
268
+
269
+ # Signal that processing is complete
270
+ self.processing_finished.emit()
271
+
272
+ def process_file_pair(self, file_pair):
273
+ """Process a pair of files containing label images"""
274
+ try:
275
+ # Extract file paths
276
+ filepath_c1 = file_pair[0] # Channel 1
277
+ filepath_c2 = file_pair[1] # Channel 2
278
+ filepath_c3 = (
279
+ file_pair[2] if len(file_pair) > 2 else None
280
+ ) # Channel 3 (optional)
281
+
282
+ # Load label images
283
+ image_c1 = tifffile.imread(filepath_c1)
284
+ image_c2 = tifffile.imread(filepath_c2)
285
+ image_c3 = tifffile.imread(filepath_c3) if filepath_c3 else None
286
+
287
+ # Ensure all images have the same shape
288
+ if image_c1.shape != image_c2.shape:
289
+ raise ValueError(
290
+ f"Image shapes don't match: {image_c1.shape} vs {image_c2.shape}"
291
+ )
292
+ if filepath_c3 and image_c1.shape != image_c3.shape:
293
+ raise ValueError(
294
+ f"Image shapes don't match: {image_c1.shape} vs {image_c3.shape}"
295
+ )
296
+
297
+ # Get base filename for the output
298
+ base_filename = os.path.basename(filepath_c1)
299
+
300
+ # Process colocalization
301
+ results = self.process_colocalization(
302
+ base_filename, image_c1, image_c2, image_c3
303
+ )
304
+
305
+ # Generate output image if needed
306
+ if self.output_folder:
307
+ self.save_output_image(results, file_pair)
308
+
309
+ return results
310
+
311
+ except (Exception, ValueError) as e:
312
+ import traceback
313
+
314
+ traceback.print_exc()
315
+ raise ValueError(f"Error processing {file_pair}: {str(e)}") from e
316
+
317
+ def process_colocalization(
318
+ self, filename, image_c1, image_c2, image_c3=None
319
+ ):
320
+ """Process colocalization between channels"""
321
+ # Get unique label IDs in image_c1
322
+ label_ids = self.get_nonzero_labels(image_c1)
323
+
324
+ # Pre-calculate sizes for image_c1 if needed
325
+ roi_sizes = {}
326
+ if self.get_sizes:
327
+ roi_sizes = self.calculate_all_rois_size(image_c1)
328
+
329
+ # Process each label
330
+ csv_rows = []
331
+ results = []
332
+
333
+ for label_id in label_ids:
334
+ row = self.process_single_roi(
335
+ filename, label_id, image_c1, image_c2, image_c3, roi_sizes
336
+ )
337
+ csv_rows.append(row)
338
+
339
+ # Extract results as dictionary
340
+ result_dict = {"label_id": label_id, "ch2_in_ch1_count": row[2]}
341
+
342
+ idx = 3
343
+ if self.get_sizes:
344
+ result_dict["ch1_size"] = row[idx]
345
+ result_dict["ch2_in_ch1_size"] = row[idx + 1]
346
+ idx += 2
347
+
348
+ if image_c3 is not None:
349
+ result_dict["ch3_in_ch2_in_ch1_count"] = row[idx]
350
+ result_dict["ch3_not_in_ch2_but_in_ch1_count"] = row[idx + 1]
351
+ idx += 2
352
+
353
+ if self.get_sizes:
354
+ result_dict["ch3_in_ch2_in_ch1_size"] = row[idx]
355
+ result_dict["ch3_not_in_ch2_but_in_ch1_size"] = row[
356
+ idx + 1
357
+ ]
358
+
359
+ results.append(result_dict)
360
+
361
+ # Create output
362
+ output = {
363
+ "filename": filename,
364
+ "results": results,
365
+ "csv_rows": csv_rows,
366
+ }
367
+
368
+ return output
369
+
370
+ def process_single_roi(
371
+ self, filename, label_id, image_c1, image_c2, image_c3, roi_sizes
372
+ ):
373
+ """Process a single ROI for colocalization analysis."""
374
+ # Create masks once
375
+ mask_roi = image_c1 == label_id
376
+ mask_c2 = image_c2 != 0
377
+
378
+ # Calculate counts
379
+ c2_in_c1_count = self.count_unique_nonzero(
380
+ image_c2, mask_roi & mask_c2
381
+ )
382
+
383
+ # Build the result row
384
+ row = [filename, int(label_id), c2_in_c1_count]
385
+
386
+ # Add size information if requested
387
+ if self.get_sizes:
388
+ size = roi_sizes.get(int(label_id), 0)
389
+ c2_in_c1_size = self.calculate_coloc_size(
390
+ image_c1, image_c2, label_id
391
+ )
392
+ row.extend([size, c2_in_c1_size])
393
+
394
+ # Handle third channel if present
395
+ if image_c3 is not None:
396
+ mask_c3 = image_c3 != 0
397
+
398
+ # Calculate third channel statistics
399
+ c3_in_c2_in_c1_count = self.count_unique_nonzero(
400
+ image_c3, mask_roi & mask_c2 & mask_c3
401
+ )
402
+ c3_not_in_c2_but_in_c1_count = self.count_unique_nonzero(
403
+ image_c3, mask_roi & ~mask_c2 & mask_c3
404
+ )
405
+
406
+ row.extend([c3_in_c2_in_c1_count, c3_not_in_c2_but_in_c1_count])
407
+
408
+ # Add size information for third channel if requested
409
+ if self.get_sizes:
410
+ c3_in_c2_in_c1_size = self.calculate_coloc_size(
411
+ image_c1,
412
+ image_c2,
413
+ label_id,
414
+ mask_c2=True,
415
+ image_c3=image_c3,
416
+ )
417
+ c3_not_in_c2_but_in_c1_size = self.calculate_coloc_size(
418
+ image_c1,
419
+ image_c2,
420
+ label_id,
421
+ mask_c2=False,
422
+ image_c3=image_c3,
423
+ )
424
+ row.extend([c3_in_c2_in_c1_size, c3_not_in_c2_but_in_c1_size])
425
+
426
+ return row
427
+
428
+ def save_output_image(self, results, file_pair):
429
+ """Generate and save visualization of colocalization results"""
430
+ if not self.output_folder:
431
+ return
432
+
433
+ try:
434
+ # Load images again to avoid memory issues
435
+ filepath_c1 = file_pair[0] # Channel 1
436
+ image_c1 = tifffile.imread(filepath_c1)
437
+
438
+ # Try to load channel 2 as well
439
+ try:
440
+ # filepath_c2 = file_pair[1] # Channel 2
441
+ # image_c2 = tifffile.imread(filepath_c2)
442
+ has_c2 = True
443
+ except (FileNotFoundError, IndexError):
444
+ has_c2 = False
445
+
446
+ # Try to load channel 3 if available
447
+ has_c3 = False
448
+ if len(file_pair) > 2:
449
+ contextlib.suppress(FileNotFoundError, IndexError)
450
+
451
+ # Create output filename
452
+ channels_str = "_".join(self.channel_names)
453
+ base_name = os.path.splitext(os.path.basename(filepath_c1))[0]
454
+ output_path = os.path.join(
455
+ self.output_folder, f"{base_name}_{channels_str}_coloc.tif"
456
+ )
457
+
458
+ # Create a more informative visualization
459
+ # Start with the original first channel labels
460
+ output_image = np.zeros((3,) + image_c1.shape, dtype=np.uint32)
461
+
462
+ # First layer: original labels from channel 1
463
+ output_image[0] = image_c1.copy()
464
+
465
+ # Process results to create visualization
466
+ if "results" in results:
467
+ # Second layer: labels that have overlap with channel 2
468
+ if has_c2:
469
+ ch2_overlap = np.zeros_like(image_c1)
470
+ for result in results["results"]:
471
+ label_id = result["label_id"]
472
+ if result["ch2_in_ch1_count"] > 0:
473
+ # This label has overlap with channel 2
474
+ mask = image_c1 == label_id
475
+ ch2_overlap[mask] = label_id
476
+ output_image[1] = ch2_overlap
477
+
478
+ # Third layer: labels that have overlap with channel 3
479
+ if has_c3:
480
+ ch3_overlap = np.zeros_like(image_c1)
481
+ for result in results["results"]:
482
+ label_id = result["label_id"]
483
+ if (
484
+ "ch3_in_ch2_in_ch1_count" in result
485
+ and result["ch3_in_ch2_in_ch1_count"] > 0
486
+ ):
487
+ # This label has overlap with channel 3
488
+ mask = image_c1 == label_id
489
+ ch3_overlap[mask] = label_id
490
+ output_image[2] = ch3_overlap
491
+
492
+ # Save the visualization output
493
+ tifffile.imwrite(output_path, output_image, compression="zlib")
494
+
495
+ # Add the output path to the results
496
+ results["output_path"] = output_path
497
+
498
+ except (Exception, FileNotFoundError) as e:
499
+ print(f"Error saving output image: {str(e)}")
500
+ import traceback
501
+
502
+ traceback.print_exc()
503
+
504
+ # Helper functions
505
+ def get_nonzero_labels(self, image):
506
+ """Get unique, non-zero labels from an image."""
507
+ mask = image != 0
508
+ labels = np.unique(image[mask])
509
+ return [int(x) for x in labels]
510
+
511
+ def count_unique_nonzero(self, array, mask):
512
+ """Count unique non-zero values in array where mask is True."""
513
+ unique_vals = np.unique(array[mask])
514
+ count = len(unique_vals)
515
+
516
+ # Remove 0 from count if present
517
+ if count > 0 and 0 in unique_vals:
518
+ count -= 1
519
+
520
+ return count
521
+
522
+ def calculate_all_rois_size(self, image):
523
+ """Calculate sizes of all ROIs in the given image."""
524
+ sizes = {}
525
+ try:
526
+ # Convert to int32 to avoid potential overflow issues with regionprops
527
+ image_int = image.astype(np.uint32)
528
+ for prop in measure.regionprops(image_int):
529
+ label = int(prop.label)
530
+ sizes[label] = int(prop.area)
531
+ except (Exception, ValueError) as e:
532
+ print(f"Error calculating ROI sizes: {str(e)}")
533
+ return sizes
534
+
535
+ def calculate_coloc_size(
536
+ self, image_c1, image_c2, label_id, mask_c2=None, image_c3=None
537
+ ):
538
+ """Calculate the size of colocalization between channels."""
539
+ # Create mask for current ROI
540
+ mask = image_c1 == int(label_id)
541
+
542
+ # Handle mask_c2 parameter
543
+ if mask_c2 is not None:
544
+ if mask_c2:
545
+ # sizes where c2 is present
546
+ mask = mask & (image_c2 != 0)
547
+ target_image = image_c3 if image_c3 is not None else image_c2
548
+ else:
549
+ # sizes where c2 is NOT present
550
+ mask = mask & (image_c2 == 0)
551
+ if image_c3 is None:
552
+ # If no image_c3, just return count of mask pixels
553
+ return np.count_nonzero(mask)
554
+ target_image = image_c3
555
+ else:
556
+ target_image = image_c2
557
+
558
+ # Calculate size of overlap
559
+ masked_image = target_image * mask
560
+ size = np.count_nonzero(masked_image)
561
+
562
+ return int(size)
563
+
564
+ def stop(self):
565
+ """Request worker to stop processing"""
566
+ self.stop_requested = True
567
+
568
+
569
+ class ColocalizationResultsWidget(QWidget):
570
+ """Widget to display colocalization results"""
571
+
572
+ def __init__(self, viewer, channel_names):
573
+ super().__init__()
574
+ self.viewer = viewer
575
+ self.channel_names = channel_names
576
+ self.file_results = {} # Store results by filename
577
+
578
+ # Create layout
579
+ self.layout = QVBoxLayout()
580
+ self.setLayout(self.layout)
581
+
582
+ # Add information label at top
583
+ info_label = QLabel(
584
+ "Click on a result to view it in the viewer. For more detailed results please check the generated CSV file."
585
+ )
586
+ info_label.setWordWrap(True)
587
+ info_label.setStyleSheet("font-style: italic;")
588
+ self.layout.addWidget(info_label)
589
+
590
+ # Create results table
591
+ self.table = QTableWidget()
592
+ self.table.setColumnCount(2) # Just two columns
593
+ self.table.setHorizontalHeaderLabels(["Identifier", "Coloc Count"])
594
+ self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
595
+ self.table.cellClicked.connect(
596
+ self.on_table_clicked
597
+ ) # Connect cell click event
598
+ self.layout.addWidget(self.table)
599
+
600
+ # Add explanation for coloc count
601
+ count_explanation = QLabel(
602
+ "Coloc Count: Number of objects with colocalization"
603
+ )
604
+ count_explanation.setStyleSheet("font-style: italic;")
605
+ self.layout.addWidget(count_explanation)
606
+
607
+ def add_result(self, result):
608
+ """Add a result to the table."""
609
+ filename = result["filename"]
610
+ self.file_results[filename] = result
611
+
612
+ # Add to table
613
+ row = self.table.rowCount()
614
+ self.table.insertRow(row)
615
+
616
+ # Use the common substring as the identifier
617
+ identifier = result.get("common_substring", filename)
618
+ id_item = QTableWidgetItem(identifier)
619
+ id_item.setToolTip(filename) # Show full filename on hover
620
+ id_item.setData(Qt.UserRole, filename) # Store for reference
621
+ self.table.setItem(row, 0, id_item)
622
+
623
+ # Label count for colocalization
624
+ if "csv_rows" in result and result["csv_rows"]:
625
+ ch2_in_ch1_counts = [r[2] for r in result["csv_rows"]]
626
+ total_coloc = sum(1 for c in ch2_in_ch1_counts if c > 0)
627
+ count_item = QTableWidgetItem(f"{total_coloc} ")
628
+ else:
629
+ count_item = QTableWidgetItem("0 ")
630
+ self.table.setItem(row, 1, count_item)
631
+
632
+ # If there's an output file, store it with the row
633
+ if "output_path" in result:
634
+ # Store output path as data in all cells
635
+ for col in range(2):
636
+ item = self.table.item(row, col)
637
+ if item:
638
+ item.setData(Qt.UserRole + 1, result["output_path"])
639
+
640
+ def _extract_identifier(self, filename):
641
+ """
642
+ Extract the identifier for the given filename.
643
+
644
+ This method assumes that the longest common substring (used as the key in
645
+ `group_files_by_common_substring`) is already available in the results.
646
+ """
647
+ # Check if the filename exists in the results
648
+ if filename in self.file_results:
649
+ # Use the common substring (key) as the identifier
650
+ return self.file_results[filename].get(
651
+ "common_substring", filename
652
+ )
653
+
654
+ # Fallback to the base filename if no common substring is available
655
+ return os.path.splitext(os.path.basename(filename))[0]
656
+
657
+ def on_table_clicked(self, row, column):
658
+ """Handle clicking on a table cell"""
659
+ # Get the filename from the row
660
+ filename_item = self.table.item(row, 0)
661
+ if not filename_item:
662
+ return
663
+
664
+ filename = filename_item.data(Qt.UserRole)
665
+ if filename not in self.file_results:
666
+ return
667
+
668
+ # Get the result object
669
+ # result = self.file_results[filename]
670
+
671
+ # Get output path if available (stored in UserRole+1)
672
+ item = self.table.item(row, column)
673
+ output_path = item.data(Qt.UserRole + 1) if item else None
674
+
675
+ # Display result visualization
676
+ if output_path and os.path.exists(output_path):
677
+ # Clear existing layers
678
+ self.viewer.layers.clear()
679
+
680
+ # Load and display the visualization
681
+ try:
682
+ image = tifffile.imread(output_path)
683
+ self.viewer.add_labels(
684
+ image,
685
+ name=f"Colocalization: {os.path.basename(output_path)}",
686
+ )
687
+ self.viewer.status = (
688
+ f"Loaded visualization for {os.path.basename(filename)}"
689
+ )
690
+ except (Exception, FileNotFoundError) as e:
691
+ self.viewer.status = f"Error loading visualization: {str(e)}"
692
+ else:
693
+ self.viewer.status = "No visualization available for this result"
694
+
695
+
696
+ class ColocalizationAnalysisWidget(QWidget):
697
+ """
698
+ Widget for ROI colocalization analysis
699
+ """
700
+
701
+ def __init__(
702
+ self, viewer: Viewer, channel_folders=None, channel_patterns=None
703
+ ):
704
+ super().__init__()
705
+ self.viewer = viewer
706
+ self.channel_folders = channel_folders or []
707
+ self.channel_patterns = channel_patterns or []
708
+ self.file_pairs = [] # Will hold matched files for analysis
709
+ self.file_results = {} # Store results by filename
710
+ self.worker = None
711
+
712
+ # Ensure default channel names are set
713
+ self.channel_names = ["CH1", "CH2", "CH3"][
714
+ : len(self.channel_folders) or 3
715
+ ]
716
+
717
+ # Create main layout
718
+ layout = QVBoxLayout()
719
+ self.setLayout(layout)
720
+
721
+ # Channel selection section
722
+ # channels_layout = QFormLayout()
723
+
724
+ # Channel 1 (primary/reference channel)
725
+ self.ch1_label = QLabel("Channel 1 (Reference):")
726
+ self.ch1_folder = QLineEdit()
727
+ self.ch1_pattern = QLineEdit()
728
+ self.ch1_pattern.setPlaceholderText("*_labels.tif")
729
+ self.ch1_browse = QPushButton("Browse...")
730
+ self.ch1_browse.clicked.connect(lambda: self.browse_folder(0))
731
+
732
+ ch1_layout = QHBoxLayout()
733
+ ch1_layout.addWidget(self.ch1_label)
734
+ ch1_layout.addWidget(self.ch1_folder)
735
+ ch1_layout.addWidget(self.ch1_pattern)
736
+ ch1_layout.addWidget(self.ch1_browse)
737
+ layout.addLayout(ch1_layout)
738
+
739
+ # Channel 2
740
+ self.ch2_label = QLabel("Channel 2:")
741
+ self.ch2_folder = QLineEdit()
742
+ self.ch2_pattern = QLineEdit()
743
+ self.ch2_pattern.setPlaceholderText("*_labels.tif")
744
+ self.ch2_browse = QPushButton("Browse...")
745
+ self.ch2_browse.clicked.connect(lambda: self.browse_folder(1))
746
+
747
+ ch2_layout = QHBoxLayout()
748
+ ch2_layout.addWidget(self.ch2_label)
749
+ ch2_layout.addWidget(self.ch2_folder)
750
+ ch2_layout.addWidget(self.ch2_pattern)
751
+ ch2_layout.addWidget(self.ch2_browse)
752
+ layout.addLayout(ch2_layout)
753
+
754
+ # Channel 3 (optional)
755
+ self.ch3_label = QLabel("Channel 3 (Optional):")
756
+ self.ch3_folder = QLineEdit()
757
+ self.ch3_pattern = QLineEdit()
758
+ self.ch3_pattern.setPlaceholderText("*_labels.tif")
759
+ self.ch3_browse = QPushButton("Browse...")
760
+ self.ch3_browse.clicked.connect(lambda: self.browse_folder(2))
761
+
762
+ ch3_layout = QHBoxLayout()
763
+ ch3_layout.addWidget(self.ch3_label)
764
+ ch3_layout.addWidget(self.ch3_folder)
765
+ ch3_layout.addWidget(self.ch3_pattern)
766
+ ch3_layout.addWidget(self.ch3_browse)
767
+ layout.addLayout(ch3_layout)
768
+
769
+ # Analysis options
770
+ options_layout = QFormLayout()
771
+
772
+ # Get sizes option
773
+ self.get_sizes_checkbox = QCheckBox("Calculate Region Sizes")
774
+ options_layout.addRow(self.get_sizes_checkbox)
775
+
776
+ # Size calculation method
777
+ self.size_method_layout = QHBoxLayout()
778
+ self.size_method_label = QLabel("Size Calculation Method:")
779
+ self.size_method_median = QCheckBox("Median")
780
+ self.size_method_median.setChecked(True)
781
+ self.size_method_sum = QCheckBox("Sum")
782
+
783
+ # Connect to make them mutually exclusive
784
+ self.size_method_median.toggled.connect(
785
+ lambda checked: (
786
+ self.size_method_sum.setChecked(not checked)
787
+ if checked
788
+ else None
789
+ )
790
+ )
791
+ self.size_method_sum.toggled.connect(
792
+ lambda checked: (
793
+ self.size_method_median.setChecked(not checked)
794
+ if checked
795
+ else None
796
+ )
797
+ )
798
+
799
+ self.size_method_layout.addWidget(self.size_method_label)
800
+ self.size_method_layout.addWidget(self.size_method_median)
801
+ self.size_method_layout.addWidget(self.size_method_sum)
802
+ options_layout.addRow(self.size_method_layout)
803
+
804
+ layout.addLayout(options_layout)
805
+
806
+ # Output folder selection
807
+ output_layout = QHBoxLayout()
808
+ output_label = QLabel("Output Folder:")
809
+ self.output_folder = QLineEdit()
810
+ output_browse = QPushButton("Browse...")
811
+ output_browse.clicked.connect(self.browse_output)
812
+
813
+ output_layout.addWidget(output_label)
814
+ output_layout.addWidget(self.output_folder)
815
+ output_layout.addWidget(output_browse)
816
+ layout.addLayout(output_layout)
817
+
818
+ # Thread count selector
819
+ thread_layout = QHBoxLayout()
820
+ thread_label = QLabel("Number of threads:")
821
+ thread_layout.addWidget(thread_label)
822
+
823
+ self.thread_count = QSpinBox()
824
+ self.thread_count.setMinimum(1)
825
+ self.thread_count.setMaximum(os.cpu_count() or 4)
826
+ self.thread_count.setValue(max(1, (os.cpu_count() or 4) - 1))
827
+ thread_layout.addWidget(self.thread_count)
828
+
829
+ layout.addLayout(thread_layout)
830
+
831
+ # Find matching files button
832
+ find_button = QPushButton("Find Matching Files")
833
+ find_button.clicked.connect(self.find_matching_files)
834
+ layout.addWidget(find_button)
835
+
836
+ # Match results label
837
+ self.match_label = QLabel("No files matched yet")
838
+ layout.addWidget(self.match_label)
839
+
840
+ # Progress bar (hidden initially)
841
+ self.progress_bar = QProgressBar()
842
+ self.progress_bar.setRange(0, 100)
843
+ self.progress_bar.setValue(0)
844
+ self.progress_bar.setVisible(False)
845
+ layout.addWidget(self.progress_bar)
846
+
847
+ # Start/cancel buttons
848
+ button_layout = QHBoxLayout()
849
+
850
+ self.analyze_button = QPushButton("Start Colocalization Analysis")
851
+ self.analyze_button.clicked.connect(self.start_analysis)
852
+ self.analyze_button.setEnabled(False) # Disabled until files are found
853
+
854
+ self.cancel_button = QPushButton("Cancel Analysis")
855
+ self.cancel_button.clicked.connect(self.cancel_analysis)
856
+ self.cancel_button.setEnabled(False) # Disabled initially
857
+
858
+ button_layout.addWidget(self.analyze_button)
859
+ button_layout.addWidget(self.cancel_button)
860
+ layout.addLayout(button_layout)
861
+
862
+ # Status label
863
+ self.status_label = QLabel("")
864
+ layout.addWidget(self.status_label)
865
+
866
+ # Results widget (will be created when needed)
867
+ self.results_widget = None
868
+
869
+ # Fill in values if provided
870
+ if self.channel_folders:
871
+ if len(self.channel_folders) > 0:
872
+ self.ch1_folder.setText(self.channel_folders[0])
873
+ if len(self.channel_folders) > 1:
874
+ self.ch2_folder.setText(self.channel_folders[1])
875
+ if len(self.channel_folders) > 2:
876
+ self.ch3_folder.setText(self.channel_folders[2])
877
+
878
+ if self.channel_patterns:
879
+ if len(self.channel_patterns) > 0:
880
+ self.ch1_pattern.setText(self.channel_patterns[0])
881
+ if len(self.channel_patterns) > 1:
882
+ self.ch2_pattern.setText(self.channel_patterns[1])
883
+ if len(self.channel_patterns) > 2:
884
+ self.ch3_pattern.setText(self.channel_patterns[2])
885
+
886
+ def browse_folder(self, channel_index):
887
+ """Browse for a channel folder"""
888
+ folder = QFileDialog.getExistingDirectory(
889
+ self,
890
+ "Select Channel Folder",
891
+ os.path.expanduser("~"),
892
+ QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks,
893
+ )
894
+ if folder:
895
+ if channel_index == 0:
896
+ self.ch1_folder.setText(folder)
897
+ elif channel_index == 1:
898
+ self.ch2_folder.setText(folder)
899
+ elif channel_index == 2:
900
+ self.ch3_folder.setText(folder)
901
+
902
+ def browse_output(self):
903
+ """Browse for output folder"""
904
+ folder = QFileDialog.getExistingDirectory(
905
+ self,
906
+ "Select Output Folder",
907
+ os.path.expanduser("~"),
908
+ QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks,
909
+ )
910
+ if folder:
911
+ self.output_folder.setText(folder)
912
+
913
+ def find_matching_files(self):
914
+ """Find matching files across channels using the updated grouping function."""
915
+ # Get channel folders and patterns
916
+ ch1_folder = self.ch1_folder.text().strip()
917
+ ch1_pattern = self.ch1_pattern.text().strip() or "*_labels.tif"
918
+
919
+ ch2_folder = self.ch2_folder.text().strip()
920
+ ch2_pattern = self.ch2_pattern.text().strip() or "*_labels.tif"
921
+
922
+ ch3_folder = self.ch3_folder.text().strip()
923
+ ch3_pattern = self.ch3_pattern.text().strip() or "*_labels.tif"
924
+
925
+ # Validate required folders
926
+ if not ch1_folder or not os.path.isdir(ch1_folder):
927
+ self.status_label.setText(
928
+ "Channel 1 folder is required and must exist"
929
+ )
930
+ return
931
+
932
+ if not ch2_folder or not os.path.isdir(ch2_folder):
933
+ self.status_label.setText(
934
+ "Channel 2 folder is required and must exist"
935
+ )
936
+ return
937
+
938
+ # Find files in each folder
939
+ import glob
940
+
941
+ ch1_files = sorted(glob.glob(os.path.join(ch1_folder, ch1_pattern)))
942
+ ch2_files = sorted(glob.glob(os.path.join(ch2_folder, ch2_pattern)))
943
+
944
+ # Check if third channel is provided
945
+ use_ch3 = bool(ch3_folder and os.path.isdir(ch3_folder))
946
+ if use_ch3:
947
+ ch3_files = sorted(
948
+ glob.glob(os.path.join(ch3_folder, ch3_pattern))
949
+ )
950
+ else:
951
+ ch3_files = []
952
+
953
+ # Prepare file lists for grouping
954
+ file_lists = {
955
+ "CH1": ch1_files,
956
+ "CH2": ch2_files,
957
+ }
958
+ if use_ch3:
959
+ file_lists["CH3"] = ch3_files
960
+
961
+ # Group files by common substring
962
+ grouped_files = group_files_by_common_substring(
963
+ file_lists, list(file_lists.keys())
964
+ )
965
+
966
+ # Convert grouped files into file pairs/triplets and store the common substring
967
+ self.file_pairs = []
968
+ for common_substring, files in grouped_files.items():
969
+ print(f"Group key (common substring): {common_substring}")
970
+ self.file_pairs.append(tuple(files))
971
+ for file in files:
972
+ # Store the stripped common substring in the results
973
+ self.file_results[file] = {
974
+ "common_substring": common_substring
975
+ }
976
+ print(f"Stored {file} with group key: {common_substring}")
977
+
978
+ # Update status
979
+ if self.file_pairs:
980
+ count = len(self.file_pairs)
981
+ channels = 3 if use_ch3 else 2
982
+ self.match_label.setText(
983
+ f"Found {count} matching file sets across {channels} channels"
984
+ )
985
+ self.analyze_button.setEnabled(True)
986
+ self.status_label.setText("Ready to analyze")
987
+ else:
988
+ self.match_label.setText("No matching files found across channels")
989
+ self.analyze_button.setEnabled(False)
990
+ self.status_label.setText("No files to analyze")
991
+
992
+ def start_analysis(self):
993
+ """Start the colocalization analysis"""
994
+ if not self.file_pairs:
995
+ self.status_label.setText("No file pairs to analyze")
996
+ return
997
+
998
+ # Get settings
999
+ get_sizes = self.get_sizes_checkbox.isChecked()
1000
+ size_method = (
1001
+ "median" if self.size_method_median.isChecked() else "sum"
1002
+ )
1003
+ output_folder = self.output_folder.text().strip()
1004
+
1005
+ # Create output folder if it doesn't exist and is specified
1006
+ if output_folder:
1007
+ try:
1008
+ # Create all necessary directories
1009
+ os.makedirs(output_folder, exist_ok=True)
1010
+
1011
+ # Try to create a test file to check write permissions
1012
+ test_path = os.path.join(output_folder, ".test_write")
1013
+ try:
1014
+ with open(test_path, "w") as f:
1015
+ f.write("test")
1016
+ os.remove(test_path) # Clean up after test
1017
+ except (PermissionError, OSError) as e:
1018
+ self.status_label.setText(
1019
+ f"Cannot write to output folder: {str(e)}"
1020
+ )
1021
+ return
1022
+
1023
+ except (OSError, PermissionError) as e:
1024
+ self.status_label.setText(
1025
+ f"Error creating output folder: {str(e)}"
1026
+ )
1027
+ return
1028
+
1029
+ # Update UI
1030
+ self.progress_bar.setValue(0)
1031
+ self.progress_bar.setVisible(True)
1032
+ self.analyze_button.setEnabled(False)
1033
+ self.cancel_button.setEnabled(True)
1034
+
1035
+ # Create worker thread
1036
+ self.worker = ColocalizationWorker(
1037
+ self.file_pairs,
1038
+ self.channel_names,
1039
+ get_sizes,
1040
+ size_method,
1041
+ output_folder,
1042
+ )
1043
+
1044
+ # Set thread count
1045
+ self.worker.thread_count = self.thread_count.value()
1046
+
1047
+ # Connect signals
1048
+ self.worker.progress_updated.connect(self.update_progress)
1049
+ self.worker.file_processed.connect(self.file_processed)
1050
+ self.worker.processing_finished.connect(self.processing_finished)
1051
+ self.worker.error_occurred.connect(self.processing_error)
1052
+
1053
+ # Start processing
1054
+ self.worker.start()
1055
+
1056
+ # Update status
1057
+ self.status_label.setText(
1058
+ f"Processing {len(self.file_pairs)} file pairs with {self.thread_count.value()} threads"
1059
+ )
1060
+
1061
+ # Create results widget if needed
1062
+ if not self.results_widget:
1063
+ self.results_widget = ColocalizationResultsWidget(
1064
+ self.viewer, self.channel_names
1065
+ )
1066
+ self.viewer.window.add_dock_widget(
1067
+ self.results_widget,
1068
+ name="Colocalization Results",
1069
+ area="right",
1070
+ )
1071
+
1072
+ def update_progress(self, value):
1073
+ """Update the progress bar"""
1074
+ self.progress_bar.setValue(value)
1075
+
1076
+ def file_processed(self, result):
1077
+ """Handle a processed file result"""
1078
+ if self.results_widget:
1079
+ self.results_widget.add_result(result)
1080
+
1081
+ def processing_finished(self):
1082
+ """Handle processing completion"""
1083
+ # Update UI
1084
+ self.progress_bar.setValue(100)
1085
+ self.analyze_button.setEnabled(True)
1086
+ self.cancel_button.setEnabled(False)
1087
+
1088
+ # Clean up worker - safely
1089
+ if self.worker:
1090
+ if self.worker.isRunning():
1091
+ # This shouldn't happen, but just in case
1092
+ self.worker.stop()
1093
+ self.worker.wait()
1094
+ self.worker = None
1095
+
1096
+ # Update status
1097
+ self.status_label.setText("Analysis complete")
1098
+
1099
+ # Hide progress bar after a delay - use QTimer instead of threading
1100
+ from qtpy.QtCore import QTimer
1101
+
1102
+ QTimer.singleShot(2000, lambda: self.progress_bar.setVisible(False))
1103
+
1104
+ def processing_error(self, filepath, error_msg):
1105
+ """Handle processing errors"""
1106
+ print(f"Error processing {filepath}: {error_msg}")
1107
+ self.status_label.setText(f"Error: {error_msg}")
1108
+
1109
+ def cancel_analysis(self):
1110
+ """Cancel the current processing operation"""
1111
+ if self.worker and self.worker.isRunning():
1112
+ self.worker.stop()
1113
+ # Wait for the worker to finish with timeout
1114
+ if not self.worker.wait(1000): # Wait up to 1 second
1115
+ # Force termination if it doesn't respond
1116
+ self.worker.terminate()
1117
+ self.worker.wait()
1118
+
1119
+ # Clear the worker reference
1120
+ self.worker = None
1121
+
1122
+ # Update UI
1123
+ self.analyze_button.setEnabled(True)
1124
+ self.cancel_button.setEnabled(False)
1125
+ self.status_label.setText("Analysis cancelled")
1126
+ self.progress_bar.setVisible(False)
1127
+
1128
+
1129
+ # This is the key change: use magic_factory to create a widget that Napari can understand
1130
+ @magic_factory(call_button="Start ROI Colocalization Analysis")
1131
+ def roi_colocalization_analyzer(viewer: Viewer):
1132
+ """
1133
+ Analyze colocalization between ROIs in multiple channel label images.
1134
+
1135
+ This tool helps find and measure overlaps between labeled regions across
1136
+ different channels, generating statistics such as overlap counts and sizes.
1137
+ """
1138
+ # Create the analysis widget
1139
+ analysis_widget = ColocalizationAnalysisWidget(viewer)
1140
+
1141
+ # Add to viewer
1142
+ viewer.window.add_dock_widget(
1143
+ analysis_widget, name="ROI Colocalization Analysis", area="right"
1144
+ )
1145
+
1146
+ # Instead of using destroyed signal which doesn't exist,
1147
+ # we can use the removed event from napari's dock widget
1148
+ def _on_widget_removed(event):
1149
+ if hasattr(analysis_widget, "closeEvent"):
1150
+ # Call closeEvent to properly clean up
1151
+ analysis_widget.closeEvent(None)
1152
+
1153
+ # Make sure we clean up on our own closeEvent as well
1154
+ original_close = getattr(analysis_widget, "closeEvent", lambda x: None)
1155
+
1156
+ def enhanced_close_event(event):
1157
+ # Make sure worker threads are stopped
1158
+ if (
1159
+ hasattr(analysis_widget, "worker")
1160
+ and analysis_widget.worker
1161
+ and analysis_widget.worker.isRunning()
1162
+ ):
1163
+ analysis_widget.worker.stop()
1164
+ if not analysis_widget.worker.wait(1000):
1165
+ analysis_widget.worker.terminate()
1166
+ analysis_widget.worker.wait()
1167
+ analysis_widget.worker = None
1168
+
1169
+ # Call original closeEvent
1170
+ original_close(event)
1171
+
1172
+ # Replace the closeEvent
1173
+ analysis_widget.closeEvent = enhanced_close_event
1174
+
1175
+ return analysis_widget