napari-tmidas 0.2.2__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. napari_tmidas/__init__.py +35 -5
  2. napari_tmidas/_crop_anything.py +1520 -609
  3. napari_tmidas/_env_manager.py +76 -0
  4. napari_tmidas/_file_conversion.py +1646 -1131
  5. napari_tmidas/_file_selector.py +1455 -216
  6. napari_tmidas/_label_inspection.py +83 -8
  7. napari_tmidas/_processing_worker.py +309 -0
  8. napari_tmidas/_reader.py +6 -10
  9. napari_tmidas/_registry.py +2 -2
  10. napari_tmidas/_roi_colocalization.py +1221 -84
  11. napari_tmidas/_tests/test_crop_anything.py +123 -0
  12. napari_tmidas/_tests/test_env_manager.py +89 -0
  13. napari_tmidas/_tests/test_grid_view_overlay.py +193 -0
  14. napari_tmidas/_tests/test_init.py +98 -0
  15. napari_tmidas/_tests/test_intensity_label_filter.py +222 -0
  16. napari_tmidas/_tests/test_label_inspection.py +86 -0
  17. napari_tmidas/_tests/test_processing_basic.py +500 -0
  18. napari_tmidas/_tests/test_processing_worker.py +142 -0
  19. napari_tmidas/_tests/test_regionprops_analysis.py +547 -0
  20. napari_tmidas/_tests/test_registry.py +70 -2
  21. napari_tmidas/_tests/test_scipy_filters.py +168 -0
  22. napari_tmidas/_tests/test_skimage_filters.py +259 -0
  23. napari_tmidas/_tests/test_split_channels.py +217 -0
  24. napari_tmidas/_tests/test_spotiflow.py +87 -0
  25. napari_tmidas/_tests/test_tyx_display_fix.py +142 -0
  26. napari_tmidas/_tests/test_ui_utils.py +68 -0
  27. napari_tmidas/_tests/test_widget.py +30 -0
  28. napari_tmidas/_tests/test_windows_basic.py +66 -0
  29. napari_tmidas/_ui_utils.py +57 -0
  30. napari_tmidas/_version.py +16 -3
  31. napari_tmidas/_widget.py +41 -4
  32. napari_tmidas/processing_functions/basic.py +557 -20
  33. napari_tmidas/processing_functions/careamics_env_manager.py +72 -99
  34. napari_tmidas/processing_functions/cellpose_env_manager.py +415 -112
  35. napari_tmidas/processing_functions/cellpose_segmentation.py +132 -191
  36. napari_tmidas/processing_functions/colocalization.py +513 -56
  37. napari_tmidas/processing_functions/grid_view_overlay.py +703 -0
  38. napari_tmidas/processing_functions/intensity_label_filter.py +422 -0
  39. napari_tmidas/processing_functions/regionprops_analysis.py +1280 -0
  40. napari_tmidas/processing_functions/sam2_env_manager.py +53 -69
  41. napari_tmidas/processing_functions/sam2_mp4.py +274 -195
  42. napari_tmidas/processing_functions/scipy_filters.py +403 -8
  43. napari_tmidas/processing_functions/skimage_filters.py +424 -212
  44. napari_tmidas/processing_functions/spotiflow_detection.py +949 -0
  45. napari_tmidas/processing_functions/spotiflow_env_manager.py +591 -0
  46. napari_tmidas/processing_functions/timepoint_merger.py +334 -86
  47. {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/METADATA +71 -30
  48. napari_tmidas-0.2.5.dist-info/RECORD +63 -0
  49. napari_tmidas/_tests/__init__.py +0 -0
  50. napari_tmidas-0.2.2.dist-info/RECORD +0 -40
  51. {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/WHEEL +0 -0
  52. {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/entry_points.txt +0 -0
  53. {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/licenses/LICENSE +0 -0
  54. {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1280 @@
1
+ # processing_functions/regionprops_analysis.py
2
+ # ruff: noqa: SIM105, BLE001
3
+ """
4
+ Processing function for calculating region properties of label images.
5
+
6
+ This module provides functionality to extract region properties (regionprops) from
7
+ label images in a folder and save them to a single CSV file. The function is
8
+ dimension-agnostic and treats dimensions like T (time) or C (channel) as grouping
9
+ variables, adding corresponding columns to the output.
10
+ """
11
+
12
+ import inspect
13
+ import os
14
+ from pathlib import Path
15
+ from typing import Dict, List, Optional, Tuple
16
+
17
+ import numpy as np
18
+ from skimage import measure
19
+
20
+ from napari_tmidas._registry import BatchProcessingRegistry
21
+
22
+ # Lazy import for pandas (optional dependency)
23
+ try: # noqa: SIM105
24
+ import pandas as pd
25
+
26
+ _HAS_PANDAS = True
27
+ except ImportError:
28
+ pd = None
29
+ _HAS_PANDAS = False
30
+
31
+ # Global dict to track CSV files created per folder (for header management)
32
+ _REGIONPROPS_CSV_FILES = {}
33
+
34
+
35
+ def reset_regionprops_cache():
36
+ """
37
+ Reset the cache of CSV files.
38
+
39
+ Call this function if you want to reprocess folders that were already
40
+ processed in the current session.
41
+ """
42
+ global _REGIONPROPS_CSV_FILES
43
+ _REGIONPROPS_CSV_FILES.clear()
44
+ print("🔄 Regionprops analysis cache cleared")
45
+
46
+
47
+ def get_current_filepath() -> Optional[str]:
48
+ """
49
+ Extract the current file path from the call stack.
50
+
51
+ Returns:
52
+ str or None: The filepath being processed, or None if not found
53
+ """
54
+ for frame_info in inspect.stack():
55
+ frame_locals = frame_info.frame.f_locals
56
+ if "filepath" in frame_locals:
57
+ return frame_locals["filepath"]
58
+ return None
59
+
60
+
61
+ def load_label_image(filepath: str) -> np.ndarray:
62
+ """
63
+ Load a label image from file.
64
+
65
+ Parameters:
66
+ -----------
67
+ filepath : str
68
+ Path to the label image file
69
+
70
+ Returns:
71
+ --------
72
+ np.ndarray
73
+ Loaded label image
74
+ """
75
+ ext = os.path.splitext(filepath)[1].lower()
76
+
77
+ if ext == ".npy":
78
+ return np.load(filepath)
79
+ elif ext in {".tif", ".tiff", ".ome.tif", ".ome.tiff"}:
80
+ try: # noqa: SIM105
81
+ import tifffile
82
+
83
+ return tifffile.imread(filepath)
84
+ except ImportError:
85
+ from skimage.io import imread
86
+
87
+ return imread(filepath)
88
+ else:
89
+ from skimage.io import imread
90
+
91
+ return imread(filepath)
92
+
93
+
94
+ def find_label_images(
95
+ folder_path: str,
96
+ extensions: List[str] = None,
97
+ intensity_suffix: str = None,
98
+ ) -> List[str]:
99
+ """
100
+ Find all label image files in a folder.
101
+
102
+ Parameters:
103
+ -----------
104
+ folder_path : str
105
+ Path to the folder containing label images
106
+ extensions : List[str], optional
107
+ List of file extensions to look for
108
+ intensity_suffix : str, optional
109
+ If provided, only return files that contain this suffix in their name
110
+ This prevents finding both label and intensity images when they're in the same folder
111
+
112
+ Returns:
113
+ --------
114
+ List[str]
115
+ Sorted list of label image file paths
116
+ """
117
+ if extensions is None:
118
+ extensions = [".tif", ".tiff", ".npy", ".png"]
119
+
120
+ folder = Path(folder_path)
121
+ if not folder.exists():
122
+ raise ValueError(f"Folder does not exist: {folder_path}")
123
+
124
+ # Find all label image files
125
+ label_files = []
126
+ for ext in extensions:
127
+ label_files.extend(folder.glob(f"*{ext}"))
128
+
129
+ # Filter to only label images if intensity_suffix provided
130
+ if intensity_suffix:
131
+ label_files = [f for f in label_files if intensity_suffix in f.name]
132
+
133
+ if not label_files:
134
+ raise ValueError(
135
+ f"No label image files found in folder: {folder_path}"
136
+ )
137
+
138
+ # Sort files by name
139
+ label_files.sort(key=lambda x: x.name)
140
+
141
+ return [str(f) for f in label_files]
142
+
143
+
144
+ def parse_dimensions_from_shape(
145
+ shape: Tuple[int, ...], ndim: int
146
+ ) -> Dict[str, int]:
147
+ """
148
+ Parse dimension information from image shape.
149
+
150
+ For images with more than 3 dimensions, tries to infer which dimensions
151
+ correspond to T (time), C (channel), Z (depth), Y (height), X (width).
152
+
153
+ Parameters:
154
+ -----------
155
+ shape : Tuple[int, ...]
156
+ Shape of the image array
157
+ ndim : int
158
+ Number of dimensions
159
+
160
+ Returns:
161
+ --------
162
+ Dict[str, int]
163
+ Dictionary mapping dimension names to their sizes
164
+ """
165
+ dim_info = {}
166
+
167
+ if ndim == 2:
168
+ # YX
169
+ dim_info["Y"] = shape[0]
170
+ dim_info["X"] = shape[1]
171
+ elif ndim == 3:
172
+ # Assume ZYX (could also be TYX or CYX)
173
+ dim_info["Z"] = shape[0]
174
+ dim_info["Y"] = shape[1]
175
+ dim_info["X"] = shape[2]
176
+ elif ndim == 4:
177
+ # Assume TZYX or CZYX
178
+ dim_info["T"] = shape[0]
179
+ dim_info["Z"] = shape[1]
180
+ dim_info["Y"] = shape[2]
181
+ dim_info["X"] = shape[3]
182
+ elif ndim == 5:
183
+ # Assume TCZYX
184
+ dim_info["T"] = shape[0]
185
+ dim_info["C"] = shape[1]
186
+ dim_info["Z"] = shape[2]
187
+ dim_info["Y"] = shape[3]
188
+ dim_info["X"] = shape[4]
189
+ else:
190
+ # For other dimensions, just number them
191
+ for i, size in enumerate(shape):
192
+ dim_info[f"dim_{i}"] = size
193
+
194
+ return dim_info
195
+
196
+
197
+ def extract_regionprops_recursive(
198
+ image: np.ndarray,
199
+ intensity_image: np.ndarray = None,
200
+ prefix_dims: Dict[str, int] = None,
201
+ current_dim: int = 0,
202
+ max_spatial_dims: int = 3,
203
+ dimension_order: str = "Auto",
204
+ properties: List[str] = None,
205
+ ) -> List[Dict]:
206
+ """
207
+ Recursively extract regionprops from a multi-dimensional label image.
208
+
209
+ This function handles images with arbitrary dimensions by recursively
210
+ processing each slice along non-spatial dimensions.
211
+
212
+ Parameters:
213
+ -----------
214
+ image : np.ndarray
215
+ Label image array
216
+ intensity_image : np.ndarray, optional
217
+ Intensity image for measuring mean/max/min intensity values
218
+ prefix_dims : Dict[str, int], optional
219
+ Dictionary of dimension indices processed so far (for grouping)
220
+ current_dim : int
221
+ Current dimension being processed
222
+ max_spatial_dims : int
223
+ Maximum number of spatial dimensions to process as a single unit
224
+ dimension_order : str
225
+ Dimension order string (e.g., "TZYX", "CZYX", "Auto")
226
+ properties : List[str], optional
227
+ List of properties to extract
228
+
229
+ Returns:
230
+ --------
231
+ List[Dict]
232
+ List of dictionaries containing regionprops for each label
233
+ """
234
+ if prefix_dims is None:
235
+ prefix_dims = {}
236
+
237
+ if properties is None:
238
+ properties = [
239
+ "label",
240
+ "area",
241
+ "centroid",
242
+ "bbox",
243
+ "mean_intensity",
244
+ "median_intensity",
245
+ "std_intensity",
246
+ "max_intensity",
247
+ "min_intensity",
248
+ ]
249
+
250
+ results = []
251
+
252
+ # Determine if we should process this as spatial data
253
+ ndim = image.ndim
254
+
255
+ # If we have 2 or 3 dimensions left, treat as spatial and extract regionprops
256
+ if ndim <= max_spatial_dims:
257
+ # Skip empty images (huge performance boost)
258
+ if image.max() == 0:
259
+ return results
260
+
261
+ # Extract regionprops for this spatial slice
262
+ # Use cache=False for better memory efficiency with large datasets
263
+ try: # noqa: SIM105
264
+ # Pass intensity image if provided
265
+ regions = measure.regionprops(
266
+ image.astype(int), intensity_image=intensity_image, cache=False
267
+ )
268
+
269
+ for region in regions:
270
+ props = prefix_dims.copy()
271
+
272
+ # Always include label and area (renamed to 'size' for output)
273
+ props["label"] = int(region.label)
274
+ if "area" in properties:
275
+ props["size"] = int(region.area)
276
+
277
+ # Add centroid coordinates if requested
278
+ if "centroid" in properties:
279
+ centroid = region.centroid
280
+ if ndim == 2:
281
+ props["centroid_y"] = float(centroid[0])
282
+ props["centroid_x"] = float(centroid[1])
283
+ elif ndim == 3:
284
+ props["centroid_z"] = float(centroid[0])
285
+ props["centroid_y"] = float(centroid[1])
286
+ props["centroid_x"] = float(centroid[2])
287
+
288
+ # Add bounding box if requested
289
+ if "bbox" in properties:
290
+ bbox = region.bbox
291
+ if ndim == 2:
292
+ props["bbox_min_y"] = int(bbox[0])
293
+ props["bbox_min_x"] = int(bbox[1])
294
+ props["bbox_max_y"] = int(bbox[2])
295
+ props["bbox_max_x"] = int(bbox[3])
296
+ elif ndim == 3:
297
+ props["bbox_min_z"] = int(bbox[0])
298
+ props["bbox_min_y"] = int(bbox[1])
299
+ props["bbox_min_x"] = int(bbox[2])
300
+ props["bbox_max_z"] = int(bbox[3])
301
+ props["bbox_max_y"] = int(bbox[4])
302
+ props["bbox_max_x"] = int(bbox[5])
303
+
304
+ # Add other properties if requested (only for 2D, as some aren't available for 3D)
305
+ if ndim == 2:
306
+ if "perimeter" in properties:
307
+ try: # noqa: SIM105
308
+ props["perimeter"] = float(region.perimeter)
309
+ except (
310
+ NotImplementedError,
311
+ AttributeError,
312
+ ):
313
+ pass
314
+
315
+ if "eccentricity" in properties:
316
+ try: # noqa: SIM105
317
+ props["eccentricity"] = float(region.eccentricity)
318
+ except (
319
+ NotImplementedError,
320
+ AttributeError,
321
+ ):
322
+ pass
323
+
324
+ if "solidity" in properties:
325
+ try: # noqa: SIM105
326
+ props["solidity"] = float(region.solidity)
327
+ except (
328
+ NotImplementedError,
329
+ AttributeError,
330
+ ):
331
+ pass
332
+
333
+ if "major_axis_length" in properties:
334
+ try: # noqa: SIM105
335
+ props["major_axis_length"] = float(
336
+ region.major_axis_length
337
+ )
338
+ except (
339
+ NotImplementedError,
340
+ AttributeError,
341
+ ):
342
+ pass
343
+
344
+ if "minor_axis_length" in properties:
345
+ try: # noqa: SIM105
346
+ props["minor_axis_length"] = float(
347
+ region.minor_axis_length
348
+ )
349
+ except (
350
+ NotImplementedError,
351
+ AttributeError,
352
+ ):
353
+ pass
354
+
355
+ if "orientation" in properties:
356
+ try: # noqa: SIM105
357
+ props["orientation"] = float(region.orientation)
358
+ except (
359
+ NotImplementedError,
360
+ AttributeError,
361
+ ):
362
+ pass
363
+
364
+ # Add extent if requested (available for both 2D and 3D)
365
+ if "extent" in properties:
366
+ try: # noqa: SIM105
367
+ props["extent"] = float(region.extent)
368
+ except (
369
+ NotImplementedError,
370
+ AttributeError,
371
+ ):
372
+ pass
373
+
374
+ # Add intensity measurements if intensity image was provided and requested
375
+ if intensity_image is not None:
376
+ if "mean_intensity" in properties:
377
+ try: # noqa: SIM105
378
+ props["mean_intensity"] = float(
379
+ region.mean_intensity
380
+ )
381
+ except (NotImplementedError, AttributeError) as e:
382
+ print(f"⚠️ Could not extract mean_intensity: {e}")
383
+
384
+ if "median_intensity" in properties:
385
+ try: # noqa: SIM105
386
+ # Median intensity requires accessing the intensity values
387
+ props["median_intensity"] = float(
388
+ np.median(region.intensity_image[region.image])
389
+ )
390
+ except (
391
+ NotImplementedError,
392
+ AttributeError,
393
+ Exception,
394
+ ) as e:
395
+ print(
396
+ f"⚠️ Could not extract median_intensity: {e}"
397
+ )
398
+
399
+ if "std_intensity" in properties:
400
+ try: # noqa: SIM105
401
+ # Standard deviation of intensity
402
+ props["std_intensity"] = float(
403
+ np.std(region.intensity_image[region.image])
404
+ )
405
+ except (
406
+ NotImplementedError,
407
+ AttributeError,
408
+ Exception,
409
+ ) as e:
410
+ print(f"⚠️ Could not extract std_intensity: {e}")
411
+
412
+ if "max_intensity" in properties:
413
+ try: # noqa: SIM105
414
+ props["max_intensity"] = float(
415
+ region.max_intensity
416
+ )
417
+ except (NotImplementedError, AttributeError) as e:
418
+ print(f"⚠️ Could not extract max_intensity: {e}")
419
+
420
+ if "min_intensity" in properties:
421
+ try: # noqa: SIM105
422
+ props["min_intensity"] = float(
423
+ region.min_intensity
424
+ )
425
+ except (NotImplementedError, AttributeError) as e:
426
+ print(f"⚠️ Could not extract min_intensity: {e}")
427
+
428
+ results.append(props)
429
+ except Exception as e: # noqa: BLE001
430
+ print(f"Warning: Error extracting regionprops: {e}")
431
+
432
+ else:
433
+ # Recurse along the first dimension
434
+ dim_name = None
435
+
436
+ # Use dimension_order to determine dimension names
437
+ if dimension_order != "Auto":
438
+ # Parse the dimension order string to identify T, C, Z dimensions
439
+ # The dimension_order describes the FULL shape of the original image
440
+ # We need to map current_dim to the correct position in that string
441
+ dim_order_upper = dimension_order.upper()
442
+
443
+ # The current_dim tells us which dimension index we're at
444
+ # Simply use it to index into the dimension_order string
445
+ if current_dim < len(dim_order_upper):
446
+ dim_char = dim_order_upper[current_dim]
447
+ # Use the actual dimension character (T, C, Z, Y, X)
448
+ if dim_char in "TCZ":
449
+ dim_name = dim_char
450
+ else:
451
+ # Y or X - shouldn't reach here as these are spatial
452
+ dim_name = f"dim_{current_dim}"
453
+ else:
454
+ dim_name = f"dim_{current_dim}"
455
+ else:
456
+ # Auto mode: Try to infer dimension name based on position
457
+ # Assume common conventions: TYX, ZYX, TZYX, CZYX, TCZYX, CYX
458
+ total_dims = current_dim + ndim
459
+ if total_dims == 3:
460
+ # 3D: likely TYX, ZYX, or CYX - assume T at position 0
461
+ # (T is most common for timelapse data)
462
+ dim_name = "T" if current_dim == 0 else f"dim_{current_dim}"
463
+ elif total_dims == 4:
464
+ # 4D: likely TZYX or CZYX - assume T at position 0
465
+ dim_name = "T" if current_dim == 0 else f"dim_{current_dim}"
466
+ elif total_dims == 5:
467
+ # 5D: likely TCZYX
468
+ if current_dim == 0:
469
+ dim_name = "T"
470
+ elif current_dim == 1:
471
+ dim_name = "C"
472
+ else:
473
+ dim_name = f"dim_{current_dim}"
474
+ else:
475
+ dim_name = f"dim_{current_dim}"
476
+
477
+ # Process each slice along this dimension
478
+ for idx in range(image.shape[0]):
479
+ slice_dims = prefix_dims.copy()
480
+ slice_dims[dim_name] = idx
481
+
482
+ slice_data = image[idx]
483
+ slice_intensity = (
484
+ intensity_image[idx] if intensity_image is not None else None
485
+ )
486
+
487
+ slice_results = extract_regionprops_recursive(
488
+ slice_data,
489
+ intensity_image=slice_intensity,
490
+ prefix_dims=slice_dims,
491
+ current_dim=current_dim + 1,
492
+ max_spatial_dims=max_spatial_dims,
493
+ dimension_order=dimension_order,
494
+ properties=properties,
495
+ )
496
+ results.extend(slice_results)
497
+
498
+ return results
499
+
500
+
501
+ def analyze_folder_regionprops(
502
+ folder_path: str,
503
+ output_csv: str,
504
+ max_spatial_dims: int = 3,
505
+ dimension_order: str = "Auto",
506
+ properties: List[str] = None,
507
+ intensity_suffix: str = None,
508
+ ):
509
+ """
510
+ Analyze all label images in a folder and save regionprops to CSV.
511
+
512
+ Parameters:
513
+ -----------
514
+ folder_path : str
515
+ Path to folder containing label images
516
+ output_csv : str
517
+ Path to output CSV file
518
+ max_spatial_dims : int
519
+ Maximum number of spatial dimensions (2 or 3)
520
+ dimension_order : str
521
+ Dimension order string (e.g., "TZYX", "CZYX", "Auto")
522
+ properties : List[str], optional
523
+ List of properties to extract
524
+ intensity_suffix : str, optional
525
+ Suffix to replace in label filename to find matching intensity image.
526
+ E.g., if label is "image_semantic_otsu.tif" and intensity is "image.tif",
527
+ use intensity_suffix="_semantic_otsu.tif" (replaces with ".tif")
528
+
529
+ Returns:
530
+ --------
531
+ DataFrame
532
+ DataFrame containing all regionprops
533
+ """
534
+ if not _HAS_PANDAS:
535
+ raise ImportError(
536
+ "pandas is required for regionprops analysis. "
537
+ "Install it with: pip install pandas"
538
+ )
539
+
540
+ print(f"🔍 Analyzing label images in: {folder_path}")
541
+ if dimension_order != "Auto":
542
+ print(f" Using dimension order: {dimension_order}")
543
+
544
+ # Find all label image files
545
+ # Pass intensity_suffix to filter only label images (not intensity images)
546
+ label_files = find_label_images(
547
+ folder_path, intensity_suffix=intensity_suffix
548
+ )
549
+ print(f"Found {len(label_files)} label image files")
550
+
551
+ all_results = []
552
+
553
+ for file_idx, filepath in enumerate(label_files):
554
+ filename = os.path.basename(filepath)
555
+ print(
556
+ f"Processing {file_idx + 1}/{len(label_files)}: {filename}",
557
+ end="",
558
+ flush=True,
559
+ )
560
+
561
+ try: # noqa: SIM105
562
+ # Load label image
563
+ label_image = load_label_image(filepath)
564
+
565
+ # Skip completely empty images
566
+ if label_image.max() == 0:
567
+ print(" - empty, skipped")
568
+ continue
569
+
570
+ # Load intensity image if suffix provided
571
+ intensity_image = None
572
+ if intensity_suffix:
573
+ # Find matching intensity image by replacing suffix
574
+ label_path = Path(filepath)
575
+ label_filename = label_path.name
576
+
577
+ if intensity_suffix in label_filename:
578
+ # Replace the suffix (e.g., "_semantic_otsu.tif" -> ".tif")
579
+ intensity_filename = label_filename.replace(
580
+ intensity_suffix, ".tif"
581
+ )
582
+ intensity_path = label_path.parent / intensity_filename
583
+
584
+ if intensity_path.exists():
585
+ try: # noqa: SIM105
586
+ intensity_image = load_label_image(
587
+ str(intensity_path)
588
+ )
589
+ # Verify shapes match
590
+ if intensity_image.shape != label_image.shape:
591
+ print(
592
+ f" - WARNING: intensity image shape {intensity_image.shape} != label shape {label_image.shape}, skipping intensity"
593
+ )
594
+ intensity_image = None
595
+ except Exception as e: # noqa: BLE001
596
+ print(
597
+ f" - WARNING: could not load intensity image: {e}"
598
+ )
599
+ else:
600
+ print(
601
+ f" - WARNING: intensity image not found: {intensity_path.name}"
602
+ )
603
+
604
+ # Extract regionprops recursively
605
+ import time
606
+
607
+ start_time = time.time()
608
+ file_results = extract_regionprops_recursive(
609
+ label_image,
610
+ intensity_image=intensity_image,
611
+ prefix_dims={"filename": filename},
612
+ current_dim=0,
613
+ max_spatial_dims=max_spatial_dims,
614
+ dimension_order=dimension_order,
615
+ properties=properties,
616
+ )
617
+ elapsed = time.time() - start_time
618
+
619
+ all_results.extend(file_results)
620
+ print(f" - {len(file_results)} regions in {elapsed:.2f}s")
621
+
622
+ except Exception as e: # noqa: BLE001
623
+ print(f"\n Error processing {filename}: {e}")
624
+ import traceback
625
+
626
+ traceback.print_exc()
627
+
628
+ # Convert to DataFrame
629
+ if all_results:
630
+ df = pd.DataFrame(all_results)
631
+
632
+ # Reorder columns to put identifiers first
633
+ id_cols = ["filename"]
634
+ if "T" in df.columns:
635
+ id_cols.append("T")
636
+ if "C" in df.columns:
637
+ id_cols.append("C")
638
+ if "Z" in df.columns:
639
+ id_cols.append("Z")
640
+
641
+ id_cols.append("label")
642
+
643
+ # Get remaining columns
644
+ other_cols = [col for col in df.columns if col not in id_cols]
645
+
646
+ # Reorder
647
+ df = df[id_cols + other_cols]
648
+
649
+ # Save to CSV
650
+ df.to_csv(output_csv, index=False)
651
+ print(f"\n✅ Saved regionprops to: {output_csv}")
652
+ print(f" Total regions: {len(df)}")
653
+ print(f" Columns: {', '.join(df.columns)}")
654
+
655
+ return df
656
+ else:
657
+ print("⚠️ No regions found in any label images")
658
+ # Create empty DataFrame with expected columns
659
+ df = pd.DataFrame(columns=["filename", "label", "area"])
660
+ df.to_csv(output_csv, index=False)
661
+ return df
662
+
663
+
664
+ @BatchProcessingRegistry.register(
665
+ name="Extract Regionprops to CSV",
666
+ suffix="_regionprops",
667
+ description="Extract region properties from label images to single CSV. Set label_suffix (e.g., '_otsu_semantic.tif') to filter only label files and pair with intensity images. All results saved to one CSV per folder.",
668
+ parameters={
669
+ "max_spatial_dims": {
670
+ "type": int,
671
+ "default": 2,
672
+ "min": 2,
673
+ "max": 3,
674
+ "description": "Spatial dimensions: 2=2D slices (YX), 3=3D volumes (ZYX)",
675
+ },
676
+ "overwrite_existing": {
677
+ "type": bool,
678
+ "default": False,
679
+ "description": "Overwrite existing CSV file if it exists",
680
+ },
681
+ "label_suffix": {
682
+ "type": str,
683
+ "default": "",
684
+ "description": "Label suffix to remove for finding intensity image (e.g., '_otsu_semantic.tif'). Only files with this suffix are processed. Removes suffix to find intensity image. Leave empty to skip intensity.",
685
+ },
686
+ "size": {
687
+ "type": bool,
688
+ "default": True,
689
+ "description": "Size (pixel count)",
690
+ },
691
+ "centroid": {
692
+ "type": bool,
693
+ "default": True,
694
+ "description": "Centroid Y,X coords",
695
+ },
696
+ "bbox": {
697
+ "type": bool,
698
+ "default": True,
699
+ "description": "Bounding box coords",
700
+ },
701
+ "perimeter": {
702
+ "type": bool,
703
+ "default": False,
704
+ "description": "Perimeter (2D)",
705
+ },
706
+ "eccentricity": {
707
+ "type": bool,
708
+ "default": False,
709
+ "description": "Eccentricity (2D)",
710
+ },
711
+ "extent": {
712
+ "type": bool,
713
+ "default": False,
714
+ "description": "Extent (area/bbox ratio)",
715
+ },
716
+ "solidity": {
717
+ "type": bool,
718
+ "default": False,
719
+ "description": "Solidity (2D, SLOW)",
720
+ },
721
+ "major_axis": {
722
+ "type": bool,
723
+ "default": False,
724
+ "description": "Major axis (2D)",
725
+ },
726
+ "minor_axis": {
727
+ "type": bool,
728
+ "default": False,
729
+ "description": "Minor axis (2D)",
730
+ },
731
+ "orientation": {
732
+ "type": bool,
733
+ "default": False,
734
+ "description": "Orientation angle (2D)",
735
+ },
736
+ "mean_intensity": {
737
+ "type": bool,
738
+ "default": True,
739
+ "description": "Mean intensity",
740
+ },
741
+ "median_intensity": {
742
+ "type": bool,
743
+ "default": True,
744
+ "description": "Median intensity",
745
+ },
746
+ "std_intensity": {
747
+ "type": bool,
748
+ "default": True,
749
+ "description": "Std intensity",
750
+ },
751
+ "max_intensity": {
752
+ "type": bool,
753
+ "default": False,
754
+ "description": "Max intensity",
755
+ },
756
+ "min_intensity": {
757
+ "type": bool,
758
+ "default": False,
759
+ "description": "Min intensity",
760
+ },
761
+ },
762
+ )
763
+ def extract_regionprops_folder(
764
+ image: np.ndarray,
765
+ max_spatial_dims: int = 2,
766
+ overwrite_existing: bool = False,
767
+ label_suffix: str = "",
768
+ size: bool = True,
769
+ centroid: bool = True,
770
+ bbox: bool = True,
771
+ perimeter: bool = False,
772
+ eccentricity: bool = False,
773
+ extent: bool = False,
774
+ solidity: bool = False,
775
+ major_axis: bool = False,
776
+ minor_axis: bool = False,
777
+ orientation: bool = False,
778
+ mean_intensity: bool = True,
779
+ median_intensity: bool = True,
780
+ std_intensity: bool = True,
781
+ max_intensity: bool = False,
782
+ min_intensity: bool = False,
783
+ dimension_order: str = "Auto",
784
+ ) -> None:
785
+ """
786
+ Extract region properties from a label image and append to CSV file.
787
+
788
+ This function processes a single label image and extracts region properties
789
+ (area, centroid, bounding box, etc.) for each labeled region. Results are
790
+ appended to a single CSV file per folder (created in parent directory).
791
+
792
+ **Output:** Creates ONLY a CSV file, no image files are generated.
793
+
794
+ The function uses dimension_order (from file selector dropdown) to properly identify
795
+ T (time) and C (channel) dimensions, which are treated as grouping variables in the
796
+ output CSV.
797
+
798
+ **Intensity Measurements:** If label_suffix is provided, the function will find
799
+ matching intensity images by replacing the suffix in label filenames. For example:
800
+ - Label: "image_otsu_semantic.tif", Intensity: "image.tif" → use label_suffix="_otsu_semantic.tif"
801
+ - This enables mean/max/min intensity measurements for each region.
802
+
803
+ Parameters:
804
+ -----------
805
+ image : numpy.ndarray
806
+ Input label image
807
+ max_spatial_dims : int
808
+ Maximum number of spatial dimensions to process as a unit (2=YX, 3=ZYX)
809
+ overwrite_existing : bool
810
+ Overwrite existing CSV file if it exists (only applies to first image)
811
+ label_suffix : str
812
+ Label file suffix to remove for finding matching intensity image
813
+ (e.g., "_otsu_semantic.tif"). Leave empty to skip intensity measurements.
814
+ size, centroid, bbox, ... : bool
815
+ Enable/disable specific region properties
816
+ dimension_order : str
817
+ Dimension order string (e.g., "TZYX", "CZYX", "TYX", "Auto")
818
+ This parameter is automatically provided by the file selector dropdown
819
+
820
+ Returns:
821
+ --------
822
+ None
823
+ This function only generates CSV output, no image is returned
824
+ """
825
+ global _REGIONPROPS_CSV_FILES
826
+
827
+ # Get the current file path from the call stack
828
+ current_file = get_current_filepath()
829
+
830
+ if current_file is None:
831
+ print("⚠️ Could not determine current file path")
832
+ return None
833
+
834
+ # This is the label image file
835
+ label_path = Path(current_file)
836
+
837
+ # IMPORTANT: Only process files that have the label_suffix (label images)
838
+ # Skip files without the suffix (those are the intensity images)
839
+ if label_suffix and label_suffix.strip():
840
+ # The suffix should include the extension, e.g., "_otsu_semantic.tif"
841
+ # Check if filename ends with this suffix
842
+ filename_str = str(label_path.name)
843
+ if not filename_str.endswith(label_suffix):
844
+ # This file doesn't have the label suffix - skip it (it's an intensity image)
845
+ return None
846
+
847
+ # Generate output CSV path (one per folder)
848
+ folder_name = label_path.parent.name
849
+ parent_dir = label_path.parent.parent
850
+ output_csv = str(parent_dir / f"{folder_name}_regionprops.csv")
851
+
852
+ # Convert checkbox parameters to properties list
853
+ properties_list = []
854
+ if size:
855
+ properties_list.append("area")
856
+ if centroid:
857
+ properties_list.append("centroid")
858
+ if bbox:
859
+ properties_list.append("bbox")
860
+ if perimeter:
861
+ properties_list.append("perimeter")
862
+ if eccentricity:
863
+ properties_list.append("eccentricity")
864
+ if extent:
865
+ properties_list.append("extent")
866
+ if solidity:
867
+ properties_list.append("solidity")
868
+ if major_axis:
869
+ properties_list.append("major_axis_length")
870
+ if minor_axis:
871
+ properties_list.append("minor_axis_length")
872
+ if orientation:
873
+ properties_list.append("orientation")
874
+ if mean_intensity:
875
+ properties_list.append("mean_intensity")
876
+ if median_intensity:
877
+ properties_list.append("median_intensity")
878
+ if std_intensity:
879
+ properties_list.append("std_intensity")
880
+ if max_intensity:
881
+ properties_list.append("max_intensity")
882
+ if min_intensity:
883
+ properties_list.append("min_intensity")
884
+
885
+ # Always include label
886
+ if "label" not in properties_list:
887
+ properties_list.insert(0, "label")
888
+
889
+ # Debug: Print properties to extract (only on first file)
890
+ if output_csv not in _REGIONPROPS_CSV_FILES:
891
+ print(f"📋 Properties to extract: {', '.join(properties_list)}")
892
+
893
+ # Check if CSV already exists and overwrite_existing is False
894
+ csv_path = Path(output_csv)
895
+ if (
896
+ csv_path.exists()
897
+ and not overwrite_existing
898
+ and output_csv not in _REGIONPROPS_CSV_FILES
899
+ ):
900
+ # CSV exists, don't overwrite, and we haven't tracked it yet in this session
901
+ # Skip processing (user wants to keep existing file)
902
+ return None
903
+
904
+ # Determine if this is the first image in this folder
905
+ write_header = False
906
+ if output_csv not in _REGIONPROPS_CSV_FILES:
907
+ # First time seeing this CSV in this session
908
+ if overwrite_existing or not csv_path.exists():
909
+ write_header = True
910
+ _REGIONPROPS_CSV_FILES[output_csv] = True
911
+
912
+ # Process this single image
913
+ try: # noqa: SIM105
914
+ # Load intensity image if suffix provided
915
+ intensity_image = None
916
+ if label_suffix and label_suffix.strip():
917
+ intensity_path_str = str(label_path).replace(
918
+ label_suffix, Path(label_path).suffix
919
+ )
920
+ intensity_path = Path(intensity_path_str)
921
+ if intensity_path.exists():
922
+ intensity_image = load_label_image(str(intensity_path))
923
+ print(f"📊 Loaded intensity image: {intensity_path.name}")
924
+ else:
925
+ print(f"⚠️ Intensity image not found: {intensity_path.name}")
926
+
927
+ # Extract regionprops for this image
928
+ results = extract_regionprops_recursive(
929
+ image=image,
930
+ intensity_image=intensity_image,
931
+ properties=properties_list,
932
+ max_spatial_dims=max_spatial_dims,
933
+ dimension_order=dimension_order,
934
+ )
935
+
936
+ # Add filename to each result
937
+ for row in results:
938
+ row["filename"] = label_path.name
939
+
940
+ # Convert to DataFrame
941
+ if results:
942
+ df = pd.DataFrame(results)
943
+
944
+ # Write to CSV (header only on first write)
945
+ if write_header:
946
+ df.to_csv(output_csv, index=False, mode="w")
947
+ print(f"✅ Created CSV with header: {output_csv}")
948
+ else:
949
+ df.to_csv(output_csv, index=False, mode="a", header=False)
950
+ print(f"✅ Appended {len(df)} rows to: {output_csv}")
951
+
952
+ return None
953
+ else:
954
+ print(f"⚠️ No regions found in {label_path.name}")
955
+ return None
956
+
957
+ except Exception as e: # noqa: BLE001
958
+ print(f"❌ Error processing {label_path.name}: {e}")
959
+ import traceback
960
+
961
+ traceback.print_exc()
962
+ return None
963
+
964
+
965
+ @BatchProcessingRegistry.register(
966
+ name="Regionprops Summary Statistics",
967
+ suffix="_regionprops_summary",
968
+ description="Calculate summary statistics (count, sum, mean, median, std) of regionprops per file. Groups labels by file and optional dimensions (T/C/Z). Results saved to single CSV per folder.",
969
+ parameters={
970
+ "max_spatial_dims": {
971
+ "type": int,
972
+ "default": 2,
973
+ "min": 2,
974
+ "max": 3,
975
+ "description": "Spatial dimensions: 2=2D slices (YX), 3=3D volumes (ZYX)",
976
+ },
977
+ "overwrite_existing": {
978
+ "type": bool,
979
+ "default": False,
980
+ "description": "Overwrite existing CSV file if it exists",
981
+ },
982
+ "label_suffix": {
983
+ "type": str,
984
+ "default": "",
985
+ "description": "Label suffix to remove for finding intensity image (e.g., '_otsu_semantic.tif'). Only files with this suffix are processed. Leave empty to skip intensity.",
986
+ },
987
+ "group_by_dimensions": {
988
+ "type": bool,
989
+ "default": False,
990
+ "description": "Group by T/C/Z dimensions (if present) in addition to filename",
991
+ },
992
+ "size": {
993
+ "type": bool,
994
+ "default": True,
995
+ "description": "Include size (area) statistics",
996
+ },
997
+ "mean_intensity": {
998
+ "type": bool,
999
+ "default": True,
1000
+ "description": "Include mean intensity statistics",
1001
+ },
1002
+ "median_intensity": {
1003
+ "type": bool,
1004
+ "default": True,
1005
+ "description": "Include median intensity statistics",
1006
+ },
1007
+ "std_intensity": {
1008
+ "type": bool,
1009
+ "default": True,
1010
+ "description": "Include std intensity statistics",
1011
+ },
1012
+ "max_intensity": {
1013
+ "type": bool,
1014
+ "default": False,
1015
+ "description": "Include max intensity statistics",
1016
+ },
1017
+ "min_intensity": {
1018
+ "type": bool,
1019
+ "default": False,
1020
+ "description": "Include min intensity statistics",
1021
+ },
1022
+ },
1023
+ )
1024
+ def extract_regionprops_summary_folder(
1025
+ image: np.ndarray,
1026
+ max_spatial_dims: int = 2,
1027
+ overwrite_existing: bool = False,
1028
+ label_suffix: str = "",
1029
+ group_by_dimensions: bool = False,
1030
+ size: bool = True,
1031
+ mean_intensity: bool = True,
1032
+ median_intensity: bool = True,
1033
+ std_intensity: bool = True,
1034
+ max_intensity: bool = False,
1035
+ min_intensity: bool = False,
1036
+ dimension_order: str = "Auto",
1037
+ ) -> None:
1038
+ """
1039
+ Extract summary statistics of region properties from label images.
1040
+
1041
+ This function calculates aggregate statistics (count, sum, mean, median, std)
1042
+ for selected regionprops across all labels in each file. Results are grouped
1043
+ by filename and optionally by dimensions (T/C/Z).
1044
+
1045
+ **Output:** Creates ONLY a CSV file with summary statistics, no image files.
1046
+
1047
+ The CSV contains:
1048
+ - filename (and T/C/Z if group_by_dimensions=True)
1049
+ - label_count: number of labels/regions
1050
+ - For each selected property (e.g., size, mean_intensity):
1051
+ - {property}_sum: sum across all labels
1052
+ - {property}_mean: mean across all labels
1053
+ - {property}_median: median across all labels
1054
+ - {property}_std: standard deviation across all labels
1055
+
1056
+ **Intensity Measurements:** If label_suffix is provided, the function will find
1057
+ matching intensity images by replacing the suffix in label filenames.
1058
+
1059
+ Parameters:
1060
+ -----------
1061
+ image : numpy.ndarray
1062
+ Input label image
1063
+ max_spatial_dims : int
1064
+ Maximum number of spatial dimensions to process as a unit (2=YX, 3=ZYX)
1065
+ overwrite_existing : bool
1066
+ Overwrite existing CSV file if it exists (only applies to first image)
1067
+ label_suffix : str
1068
+ Label file suffix to remove for finding matching intensity image
1069
+ group_by_dimensions : bool
1070
+ If True, group statistics by T/C/Z dimensions in addition to filename
1071
+ size, mean_intensity, ... : bool
1072
+ Enable/disable specific region properties for statistics
1073
+ dimension_order : str
1074
+ Dimension order string (e.g., "TZYX", "CZYX", "TYX", "Auto")
1075
+
1076
+ Returns:
1077
+ --------
1078
+ None
1079
+ This function only generates CSV output, no image is returned
1080
+ """
1081
+ global _REGIONPROPS_CSV_FILES
1082
+
1083
+ # Get the current file path from the call stack
1084
+ current_file = get_current_filepath()
1085
+
1086
+ if current_file is None:
1087
+ print("⚠️ Could not determine current file path")
1088
+ return None
1089
+
1090
+ # This is the label image file
1091
+ label_path = Path(current_file)
1092
+
1093
+ # Only process files that have the label_suffix (label images)
1094
+ if label_suffix and label_suffix.strip():
1095
+ filename_str = str(label_path.name)
1096
+ if not filename_str.endswith(label_suffix):
1097
+ return None
1098
+
1099
+ # Generate output CSV path (one per folder)
1100
+ folder_name = label_path.parent.name
1101
+ parent_dir = label_path.parent.parent
1102
+ output_csv = str(parent_dir / f"{folder_name}_regionprops_summary.csv")
1103
+
1104
+ # Build properties list for extraction
1105
+ properties_list = ["label", "area"] # Always need these
1106
+ if mean_intensity:
1107
+ properties_list.append("mean_intensity")
1108
+ if median_intensity:
1109
+ properties_list.append("median_intensity")
1110
+ if std_intensity:
1111
+ properties_list.append("std_intensity")
1112
+ if max_intensity:
1113
+ properties_list.append("max_intensity")
1114
+ if min_intensity:
1115
+ properties_list.append("min_intensity")
1116
+
1117
+ # Debug: Print on first file
1118
+ csv_key = f"{output_csv}_summary"
1119
+ if csv_key not in _REGIONPROPS_CSV_FILES:
1120
+ print(
1121
+ f"📋 Computing summary statistics for: {', '.join(properties_list)}"
1122
+ )
1123
+
1124
+ # Check if CSV already exists
1125
+ csv_path = Path(output_csv)
1126
+ write_header = False
1127
+ if csv_key not in _REGIONPROPS_CSV_FILES:
1128
+ if overwrite_existing or not csv_path.exists():
1129
+ write_header = True
1130
+ _REGIONPROPS_CSV_FILES[csv_key] = True
1131
+
1132
+ # Process this single image
1133
+ try: # noqa: SIM105
1134
+ # Load intensity image if suffix provided
1135
+ intensity_image = None
1136
+ if label_suffix and label_suffix.strip():
1137
+ intensity_path_str = str(label_path).replace(
1138
+ label_suffix, Path(label_path).suffix
1139
+ )
1140
+ intensity_path = Path(intensity_path_str)
1141
+ if intensity_path.exists():
1142
+ intensity_image = load_label_image(str(intensity_path))
1143
+ print(f"📊 Loaded intensity image: {intensity_path.name}")
1144
+ else:
1145
+ print(f"⚠️ Intensity image not found: {intensity_path.name}")
1146
+
1147
+ # Extract regionprops for this image
1148
+ results = extract_regionprops_recursive(
1149
+ image=image,
1150
+ intensity_image=intensity_image,
1151
+ properties=properties_list,
1152
+ max_spatial_dims=max_spatial_dims,
1153
+ dimension_order=dimension_order,
1154
+ )
1155
+
1156
+ if not results:
1157
+ print(f"⚠️ No regions found in {label_path.name}")
1158
+ return None
1159
+
1160
+ # Convert to DataFrame for easier aggregation
1161
+ df = pd.DataFrame(results)
1162
+
1163
+ # Determine grouping columns
1164
+ group_cols = []
1165
+ if group_by_dimensions:
1166
+ # Check which dimension columns are present
1167
+ for dim in ["T", "C", "Z"]:
1168
+ if dim in df.columns:
1169
+ group_cols.append(dim)
1170
+
1171
+ # Prepare summary statistics
1172
+ summary_rows = []
1173
+
1174
+ if group_cols:
1175
+ # Group by dimensions
1176
+ for group_key, group_df in df.groupby(group_cols):
1177
+ summary_row = {"filename": label_path.name}
1178
+
1179
+ # Add dimension values
1180
+ if len(group_cols) == 1:
1181
+ summary_row[group_cols[0]] = group_key
1182
+ else:
1183
+ for i, col in enumerate(group_cols):
1184
+ summary_row[col] = group_key[i]
1185
+
1186
+ # Calculate statistics
1187
+ summary_row["label_count"] = len(group_df)
1188
+
1189
+ # Size statistics (note: 'area' is renamed to 'size' by extract_regionprops_recursive)
1190
+ if size and "size" in group_df.columns:
1191
+ summary_row["size_sum"] = int(group_df["size"].sum())
1192
+ summary_row["size_mean"] = float(group_df["size"].mean())
1193
+ summary_row["size_median"] = float(
1194
+ group_df["size"].median()
1195
+ )
1196
+ summary_row["size_std"] = float(group_df["size"].std())
1197
+
1198
+ # Intensity statistics
1199
+ for prop in [
1200
+ "mean_intensity",
1201
+ "median_intensity",
1202
+ "std_intensity",
1203
+ "max_intensity",
1204
+ "min_intensity",
1205
+ ]:
1206
+ # Check if user enabled this property and it exists in data
1207
+ prop_enabled = locals().get(
1208
+ prop.replace("_intensity", "_intensity")
1209
+ )
1210
+ if prop_enabled and prop in group_df.columns:
1211
+ prop_name = prop.replace("_intensity", "_int")
1212
+ summary_row[f"{prop_name}_sum"] = float(
1213
+ group_df[prop].sum()
1214
+ )
1215
+ summary_row[f"{prop_name}_mean"] = float(
1216
+ group_df[prop].mean()
1217
+ )
1218
+ summary_row[f"{prop_name}_median"] = float(
1219
+ group_df[prop].median()
1220
+ )
1221
+ summary_row[f"{prop_name}_std"] = float(
1222
+ group_df[prop].std()
1223
+ )
1224
+
1225
+ summary_rows.append(summary_row)
1226
+ else:
1227
+ # No grouping by dimensions - single summary for whole file
1228
+ summary_row = {"filename": label_path.name}
1229
+ summary_row["label_count"] = len(df)
1230
+
1231
+ # Size statistics (note: 'area' is renamed to 'size' by extract_regionprops_recursive)
1232
+ if size and "size" in df.columns:
1233
+ summary_row["size_sum"] = int(df["size"].sum())
1234
+ summary_row["size_mean"] = float(df["size"].mean())
1235
+ summary_row["size_median"] = float(df["size"].median())
1236
+ summary_row["size_std"] = float(df["size"].std())
1237
+
1238
+ # Intensity statistics
1239
+ for prop in [
1240
+ "mean_intensity",
1241
+ "median_intensity",
1242
+ "std_intensity",
1243
+ "max_intensity",
1244
+ "min_intensity",
1245
+ ]:
1246
+ prop_enabled = locals().get(
1247
+ prop.replace("_intensity", "_intensity")
1248
+ )
1249
+ if prop_enabled and prop in df.columns:
1250
+ prop_name = prop.replace("_intensity", "_int")
1251
+ summary_row[f"{prop_name}_sum"] = float(df[prop].sum())
1252
+ summary_row[f"{prop_name}_mean"] = float(df[prop].mean())
1253
+ summary_row[f"{prop_name}_median"] = float(
1254
+ df[prop].median()
1255
+ )
1256
+ summary_row[f"{prop_name}_std"] = float(df[prop].std())
1257
+
1258
+ summary_rows.append(summary_row)
1259
+
1260
+ # Convert to DataFrame and save
1261
+ summary_df = pd.DataFrame(summary_rows)
1262
+
1263
+ # Write to CSV
1264
+ if write_header:
1265
+ summary_df.to_csv(output_csv, index=False, mode="w")
1266
+ print(f"✅ Created summary CSV: {output_csv}")
1267
+ else:
1268
+ summary_df.to_csv(output_csv, index=False, mode="a", header=False)
1269
+ print(
1270
+ f"✅ Appended {len(summary_df)} summary rows to: {output_csv}"
1271
+ )
1272
+
1273
+ return None
1274
+
1275
+ except Exception as e: # noqa: BLE001
1276
+ print(f"❌ Error processing {label_path.name}: {e}")
1277
+ import traceback
1278
+
1279
+ traceback.print_exc()
1280
+ return None