microlive 1.0.12__py3-none-any.whl → 1.0.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
microlive/imports.py CHANGED
@@ -23,7 +23,11 @@ from numba import njit, types
23
23
  from numba.typed import List as TypedList
24
24
  import cv2
25
25
  import io
26
- import fpdf
26
+ # Optional PDF generation support
27
+ try:
28
+ from fpdf import FPDF
29
+ except ImportError:
30
+ FPDF = None # PDF generation will be skipped if fpdf not installed
27
31
  import json
28
32
 
29
33
  # Import third-party libraries
microlive/microscopy.py CHANGED
@@ -1423,6 +1423,13 @@ class Intensity():
1423
1423
  optimize_spot_size: Search for optimal spot size (5-11 px). Slower. Defaults to False.
1424
1424
  allow_subpixel_repositioning: Search ±2px for better center. Defaults to False.
1425
1425
  fast_gaussian_fit: Use moment-based (fast) vs full Gaussian fit. Defaults to True.
1426
+ snr_method: Method for calculating signal-to-noise ratio. Options:
1427
+ - 'peak' (default): Uses the maximum pixel value in the spot region as signal.
1428
+ This is the standard definition of SNR: (max_spot - mean_bg) / std_bg.
1429
+ Recommended for most applications.
1430
+ - 'disk_doughnut': Uses mean disk intensity as signal instead of peak value.
1431
+ Calculates SNR as: (mean_disk - mean_bg) / std_bg.
1432
+ More robust when data is very noisy or spots are dim.
1426
1433
 
1427
1434
  Attributes:
1428
1435
  number_spots: Number of spots to measure.
@@ -1431,7 +1438,7 @@ class Intensity():
1431
1438
 
1432
1439
  def __init__(self, original_image, spot_size=5, array_spot_location_z_y_x=None,
1433
1440
  use_max_projection=False, optimize_spot_size=False, allow_subpixel_repositioning=False,
1434
- fast_gaussian_fit=True):
1441
+ fast_gaussian_fit=True, snr_method='peak'):
1435
1442
  self.original_image = original_image
1436
1443
  if array_spot_location_z_y_x is None:
1437
1444
  self.array_spot_location_z_y_x = np.array([[0, 0, 0]])
@@ -1449,6 +1456,7 @@ class Intensity():
1449
1456
  self.optimize_spot_size = optimize_spot_size
1450
1457
  self.allow_subpixel_repositioning = allow_subpixel_repositioning
1451
1458
  self.fast_gaussian_fit = fast_gaussian_fit
1459
+ self.snr_method = snr_method
1452
1460
 
1453
1461
  def two_dimensional_gaussian(self, xy, amplitude, x0, y0, sigma_x, sigma_y, offset):
1454
1462
  """Evaluate 2D Gaussian at given coordinates."""
@@ -1551,11 +1559,23 @@ class Intensity():
1551
1559
  def calculate_intensity(self):
1552
1560
  """Calculate intensity metrics for all spots across all channels.
1553
1561
 
1562
+ The signal-to-noise ratio (SNR) calculation method is controlled by the
1563
+ `snr_method` parameter set during class initialization:
1564
+
1565
+ - **'peak'** (default): Uses the maximum pixel value in the spot region
1566
+ as the signal. This is the standard definition of SNR commonly used
1567
+ in microscopy: SNR = (max_spot - mean_background) / std_background.
1568
+
1569
+ - **'disk_doughnut'**: Uses the mean disk intensity as signal instead
1570
+ of the peak value. This method is more robust for very noisy data
1571
+ or dim spots where the maximum value may be unreliable due to noise.
1572
+ SNR = (mean_disk - mean_background) / std_background.
1573
+
1554
1574
  Returns:
1555
1575
  tuple: 8-element tuple of arrays, each with shape [N_spots, N_channels]:
1556
1576
  - intensities: Background-subtracted intensity (disk - doughnut mean).
1557
1577
  - intensities_std: Standard deviation within disk region.
1558
- - intensities_snr: Signal-to-noise ratio (disk-bg) / std(bg).
1578
+ - intensities_snr: Signal-to-noise ratio (calculation depends on snr_method).
1559
1579
  - intensities_background_mean: Mean background from doughnut.
1560
1580
  - intensities_background_std: Std of background from doughnut.
1561
1581
  - psfs_amplitude: PSF peak amplitude from Gaussian fit (or NaN).
@@ -1577,11 +1597,31 @@ class Intensity():
1577
1597
  donut_values = tem_img[~np.isnan(tem_img)].astype('uint16')
1578
1598
  return donut_values
1579
1599
 
1580
- def signal_to_noise_ratio(values_disk, values_donut):
1581
- mean_disk = np.mean(values_disk.astype(float))
1600
+ def signal_to_noise_ratio(values_disk, values_donut, snr_method='peak'):
1601
+ """Calculate signal-to-noise ratio for a spot.
1602
+
1603
+ Args:
1604
+ values_disk: Pixel values in the spot disk region.
1605
+ values_donut: Pixel values in the background doughnut region.
1606
+ snr_method: 'peak' uses max pixel value as signal (standard),
1607
+ 'disk_doughnut' uses mean disk intensity (robust for noisy data).
1608
+
1609
+ Returns:
1610
+ tuple: (SNR, mean_background, std_background)
1611
+ """
1582
1612
  mean_donut = np.mean(values_donut.astype(float))
1583
1613
  std_donut = np.std(values_donut.astype(float))
1584
- SNR = (mean_disk - mean_donut) / std_donut if std_donut > 0 else 0
1614
+
1615
+ if snr_method == 'peak':
1616
+ # Standard SNR: use peak (maximum) pixel value as signal
1617
+ max_disk = np.max(values_disk.astype(float))
1618
+ signal = max_disk - mean_donut
1619
+ else:
1620
+ # disk_doughnut method: use mean disk intensity as signal
1621
+ mean_disk = np.mean(values_disk.astype(float))
1622
+ signal = mean_disk - mean_donut
1623
+
1624
+ SNR = signal / std_donut if std_donut > 0 else 0
1585
1625
  return SNR, mean_donut, std_donut
1586
1626
 
1587
1627
  def disk_donut(values_disk, values_donut, spot_size):
@@ -1749,7 +1789,7 @@ class Intensity():
1749
1789
  # Use the updated integer positions for the final intensity crop
1750
1790
  crop_disk_and_donut = return_crop(frame_data[:,:,i], current_x_int, current_y_int, spot_range=crop_range)
1751
1791
  values_donut = return_donut(crop_disk_and_donut, spot_size=best_size)
1752
- intensities_snr[sp,i], intensities_background_mean[sp,i], intensities_background_std[sp,i] = signal_to_noise_ratio(values_disk, values_donut)
1792
+ intensities_snr[sp,i], intensities_background_mean[sp,i], intensities_background_std[sp,i] = signal_to_noise_ratio(values_disk, values_donut, self.snr_method)
1753
1793
  # disk_donut calculation
1754
1794
  intensities[sp,i], intensities_std[sp,i] = disk_donut(values_disk, values_donut, spot_size=best_size)
1755
1795
  intensities_total[sp,i] = np.sum(values_disk)
@@ -3908,7 +3948,8 @@ class BigFISH():
3908
3948
 
3909
3949
  # Select isolated spots (cluster_id < 0) and set cluster_size to 1
3910
3950
  spots_no_clusters = clusters_and_spots_big_fish[clusters_and_spots_big_fish[:,-1] < 0].copy()
3911
- spots_no_clusters[:,-1] = 1 # Replace cluster_id with cluster_size=1
3951
+ if len(spots_no_clusters) > 0:
3952
+ spots_no_clusters[:,-1] = 1 # Replace cluster_id with cluster_size=1
3912
3953
 
3913
3954
  # Select cluster centroids with cluster_size > 1
3914
3955
  clusters_no_spots = clusters[clusters[:,-2] > 1]
@@ -4972,6 +5013,7 @@ class DataProcessing():
4972
5013
  self.fast_gaussian_fit = fast_gaussian_fit
4973
5014
  # This number represent the number of columns that doesnt change with the number of color channels in the image
4974
5015
  self.NUMBER_OF_CONSTANT_COLUMNS_IN_DATAFRAME = 18
5016
+
4975
5017
  def get_dataframe(self):
4976
5018
  '''
4977
5019
  This method extracts data from the class SpotDetection and returns the data as a dataframe.
@@ -5122,7 +5164,7 @@ class DataProcessing():
5122
5164
  array_spots_nuc[:,10:13] = spots_nuc[:,:3] # populating coord
5123
5165
  array_spots_nuc[:,13] = 1 # is_nuc
5124
5166
  array_spots_nuc[:,14] = 0 # is_cluster
5125
- array_spots_nuc[:,15] = 0 # cluster_size
5167
+ array_spots_nuc[:,15] = spots_nuc[:,3] # cluster_size (use actual detected value)
5126
5168
  array_spots_nuc[:,16] = spot_type # spot_type
5127
5169
  array_spots_nuc[:,17] = is_cell_in_border # is_cell_fragmented
5128
5170
 
@@ -5131,7 +5173,7 @@ class DataProcessing():
5131
5173
  array_spots_cytosol_only[:,10:13] = spots_cytosol_only[:,:3] # populating coord
5132
5174
  array_spots_cytosol_only[:,13] = 0 # is_nuc
5133
5175
  array_spots_cytosol_only[:,14] = 0 # is_cluster
5134
- array_spots_cytosol_only[:,15] = 1 # cluster_size
5176
+ array_spots_cytosol_only[:,15] = spots_cytosol_only[:,3] # cluster_size (use actual detected value)
5135
5177
  array_spots_cytosol_only[:,16] = spot_type # spot_type
5136
5178
  array_spots_cytosol_only[:,17] = is_cell_in_border # is_cell_fragmented
5137
5179
  if (detected_cyto_clusters == True): #(detected_cyto == True) and
@@ -1,7 +1,14 @@
1
- """Pipeline module for MicroLive.
1
+ """Pipeline module for MicroLive FRAP analysis.
2
2
 
3
- This module is part of the microlive package.
3
+ This module is part of the microlive package and provides functions for
4
+ Fluorescence Recovery After Photobleaching (FRAP) analysis.
5
+
6
+ The pipeline uses a pretrained Cellpose model for nuclei segmentation that
7
+ is automatically downloaded from GitHub on first use.
4
8
  """
9
+ import os
10
+ import traceback
11
+
5
12
  from microlive.imports import *
6
13
 
7
14
  from skimage.feature import canny
@@ -12,6 +19,95 @@ from skimage.morphology import binary_opening, binary_closing
12
19
  from skimage.measure import label, regionprops
13
20
  from skimage.transform import hough_circle, hough_circle_peaks
14
21
 
22
+ # Import model downloader with graceful fallback
23
+ try:
24
+ from microlive.utils.model_downloader import get_frap_nuclei_model_path
25
+ _HAS_MODEL_DOWNLOADER = True
26
+ except ImportError:
27
+ _HAS_MODEL_DOWNLOADER = False
28
+
29
+ import logging
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ # =============================================================================
34
+ # GPU Detection and MPS Compatibility (aligned with microscopy.py)
35
+ # =============================================================================
36
+
37
+ class PatchMPSFloat64:
38
+ """
39
+ Context manager to safely monkeypatch torch.zeros on MPS devices
40
+ to force float32 instead of float64 (which is not supported).
41
+
42
+ Copied from microlive.microscopy for self-contained FRAP pipeline.
43
+ """
44
+ def __init__(self):
45
+ self.original_zeros = torch.zeros
46
+ self.is_mps = torch.backends.mps.is_available() and torch.backends.mps.is_built()
47
+
48
+ def __enter__(self):
49
+ if not self.is_mps:
50
+ return
51
+
52
+ def patched_zeros(*args, **kwargs):
53
+ # Check if device is MPS (either string or torch.device)
54
+ device = kwargs.get('device', None)
55
+ is_target_device = False
56
+ if device is not None:
57
+ if isinstance(device, str) and 'mps' in device:
58
+ is_target_device = True
59
+ elif isinstance(device, torch.device) and device.type == 'mps':
60
+ is_target_device = True
61
+
62
+ # Check if dtype is float64/double
63
+ dtype = kwargs.get('dtype', None)
64
+ is_target_dtype = (dtype == torch.float64 or dtype == torch.double)
65
+
66
+ if is_target_device and is_target_dtype:
67
+ kwargs['dtype'] = torch.float32
68
+
69
+ return self.original_zeros(*args, **kwargs)
70
+
71
+ torch.zeros = patched_zeros
72
+
73
+ def __exit__(self, exc_type, exc_val, exc_tb):
74
+ if self.is_mps:
75
+ torch.zeros = self.original_zeros
76
+
77
+
78
+ def _detect_gpu():
79
+ """
80
+ Detect available GPU (CUDA or MPS) for Cellpose.
81
+
82
+ Returns:
83
+ bool: True if GPU is available (CUDA or MPS), False otherwise.
84
+ """
85
+ os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1'
86
+ return torch.cuda.is_available() or torch.backends.mps.is_available()
87
+
88
+
89
+ def _get_frap_nuclei_model():
90
+ """
91
+ Get the path to the pretrained FRAP nuclei segmentation model.
92
+
93
+ Downloads from GitHub on first use, caches locally in ~/.microlive/models/.
94
+ Returns None if download fails, allowing fallback to default Cellpose model.
95
+
96
+ Returns:
97
+ str or None: Path to the model file, or None if unavailable.
98
+ """
99
+ if not _HAS_MODEL_DOWNLOADER:
100
+ logger.debug("Model downloader not available, using default nuclei model")
101
+ return None
102
+
103
+ try:
104
+ model_path = get_frap_nuclei_model_path()
105
+ logger.info(f"Using pretrained FRAP nuclei model: {model_path}")
106
+ return model_path
107
+ except Exception as e:
108
+ logger.warning(f"Could not load FRAP nuclei model: {e}. Using default.")
109
+ return None
110
+
15
111
  def read_lif_files_in_folder(folder_path):
16
112
  # create funtion that read all the .lif files in a folder and return the list of images
17
113
  list_folders = list(folder_path.glob('*.lif'))
@@ -160,43 +256,111 @@ def find_frap_coordinates(image_TXY, frap_time, stable_FRAP_channel, min_diamete
160
256
  return None, None
161
257
 
162
258
 
163
- def segment_image(image_TXY, step_size=5, pretrained_model_segmentation=None, frap_time=None, pixel_dilation_pseudo_cytosol=10,stable_FRAP_channel=0,min_diameter=10):
259
+ def segment_image(image_TXY, step_size=5, pretrained_model_segmentation='auto', frap_time=None, pixel_dilation_pseudo_cytosol=10,stable_FRAP_channel=0,min_diameter=10):
260
+ """
261
+ Segment nuclei in FRAP image stack using Cellpose.
262
+
263
+ Args:
264
+ image_TXY: 3D image array (Time, X, Y).
265
+ step_size: Number of frames between segmentations.
266
+ pretrained_model_segmentation: Model to use:
267
+ - 'auto' (default): Auto-download and use FRAP-optimized model from GitHub
268
+ - None or 'nuclei': Use default Cellpose nuclei model
269
+ - str path: Use custom pretrained model at given path
270
+ frap_time: Frame index of FRAP event.
271
+ pixel_dilation_pseudo_cytosol: Pixels to dilate for pseudo-cytosol.
272
+ stable_FRAP_channel: Channel index for stable signal.
273
+ min_diameter: Minimum ROI diameter in pixels.
274
+
275
+ Returns:
276
+ Tuple of (masks_TXY, background_mask, pseudo_cytosol_masks_TXY).
277
+ """
164
278
  num_pixels_to_dilate = 1
165
- use_gpu = False # or True if you want to try MPS on Apple Silicon
166
- if pretrained_model_segmentation is not None:
279
+
280
+ # GPU detection (aligned with microscopy.py)
281
+ use_gpu = _detect_gpu()
282
+ logger.debug(f"FRAP Pipeline: GPU available = {use_gpu}")
283
+
284
+ # Ensure image is float32 for MPS compatibility
285
+ image_TXY = image_TXY.astype(np.float32)
286
+
287
+ # Determine which model to use
288
+ if pretrained_model_segmentation == 'auto':
289
+ # Auto-download FRAP-optimized model from GitHub
290
+ pretrained_model_segmentation = _get_frap_nuclei_model()
291
+
292
+ # Helper function to run Cellpose with error handling
293
+ def _run_cellpose_eval(model, image, model_type_fallback=None, **kwargs):
294
+ """Run Cellpose evaluation with MPS error handling and CPU fallback."""
295
+ nonlocal use_gpu
296
+ try:
297
+ with PatchMPSFloat64():
298
+ return model.eval(image, **kwargs)[0]
299
+ except RuntimeError as e:
300
+ if "sparse" in str(e) and torch.backends.mps.is_available():
301
+ logger.warning(f"MPS sparse error detected: {e}. Retrying with resample=False.")
302
+ try:
303
+ kwargs['resample'] = False
304
+ with PatchMPSFloat64():
305
+ return model.eval(image, **kwargs)[0]
306
+ except RuntimeError as e2:
307
+ logger.warning(f"MPS error persisted: {e2}. Falling back to CPU.")
308
+ # Reinitialize model on CPU
309
+ if model_type_fallback is not None:
310
+ model = models.CellposeModel(gpu=False, model_type=model_type_fallback)
311
+ else:
312
+ model = models.CellposeModel(gpu=False, pretrained_model=pretrained_model_segmentation)
313
+ use_gpu = False
314
+ kwargs.pop('resample', None) # Reset resample
315
+ return model.eval(image, **kwargs)[0]
316
+ else:
317
+ logger.error(f"Cellpose RuntimeError: {e}")
318
+ logger.error(traceback.format_exc())
319
+ return np.zeros(image.shape[:2], dtype=np.uint16)
320
+ except Exception as e:
321
+ logger.error(f"Cellpose error: {e}")
322
+ logger.error(traceback.format_exc())
323
+ return np.zeros(image.shape[:2], dtype=np.uint16)
324
+
325
+ # Initialize models
326
+ if pretrained_model_segmentation is not None and pretrained_model_segmentation != 'nuclei':
327
+ logger.info(f"Using pretrained model for nuclei segmentation")
167
328
  model_nucleus = models.CellposeModel(
168
329
  gpu=use_gpu,
169
330
  pretrained_model=pretrained_model_segmentation
170
331
  )
171
332
  else:
333
+ logger.info("Using default Cellpose nuclei model")
172
334
  model_nucleus = models.CellposeModel(
173
335
  gpu=use_gpu,
174
336
  model_type='nuclei'
175
337
  )
176
- #model_cyto = models.Cellpose(gpu=False, model_type='cyto2')
177
- model_cyto = models.CellposeModel( gpu=use_gpu, model_type='cyto2')
338
+ model_cyto = models.CellposeModel(gpu=use_gpu, model_type='cyto2')
339
+
178
340
  num_steps = (image_TXY.shape[0] + step_size - 1) // step_size
179
341
  list_masks = []
180
342
  list_selected_mask_id = []
181
343
  list_selected_masks = []
182
344
  list_masks_cyto = []
345
+
183
346
  # If frap_time is provided, segment the FRAP images and select the mask with maximum intensity change
184
347
  if frap_time is not None:
185
348
  # Ensure frap_time is within valid range
186
349
  if frap_time < 1 or frap_time >= image_TXY.shape[0] - 1:
187
350
  raise ValueError("frap_time must be within the range of the image stack.")
188
351
  # Segment the image at frap_time
189
- #masks_frap = model_nucleus.eval(image_TXY[frap_time], normalize=True, channels=[0,0], flow_threshold=0.8, diameter=150, min_size=100)[0]
190
- masks_frap = model_nucleus.eval(
352
+ masks_frap = _run_cellpose_eval(
353
+ model_nucleus,
191
354
  image_TXY[frap_time],
192
- channels=[0, 0], # ← add this!
355
+ model_type_fallback='nuclei',
356
+ channels=[0, 0],
193
357
  normalize=True,
194
358
  flow_threshold=1,
195
359
  diameter=150,
196
360
  min_size=50
197
- )[0]
361
+ )
198
362
  # remove all the maks that are touching the border
199
- masks_frap =remove_border_masks(masks_frap,min_size=50)
363
+ masks_frap = remove_border_masks(masks_frap, min_size=50)
200
364
  # Get unique mask labels (excluding background)
201
365
  mask_labels = np.unique(masks_frap)
202
366
  mask_labels = mask_labels[mask_labels != 0]
@@ -210,13 +374,10 @@ def segment_image(image_TXY, step_size=5, pretrained_model_segmentation=None, fr
210
374
  selected_mask_frap = binary_dilation(selected_mask_frap, iterations=num_pixels_to_dilate).astype('int')
211
375
  break
212
376
  else:
213
- #selected_mask_id_frap = None
214
377
  selected_mask_frap = None
215
378
  else:
216
- #selected_mask_id_frap = None
217
379
  selected_mask_frap = None
218
380
  else:
219
- #selected_mask_id_frap = None
220
381
  selected_mask_frap = None
221
382
  if selected_mask_frap is None:
222
383
  return None, None, None
@@ -224,19 +385,29 @@ def segment_image(image_TXY, step_size=5, pretrained_model_segmentation=None, fr
224
385
  for step in range(num_steps):
225
386
  i = step * step_size
226
387
  # Detecting masks in i-th frame
227
- masks = model_nucleus.eval(
388
+ masks = _run_cellpose_eval(
389
+ model_nucleus,
228
390
  image_TXY[i],
229
- channels=[0, 0],
391
+ model_type_fallback='nuclei',
392
+ channels=[0, 0],
230
393
  normalize=True,
231
394
  flow_threshold=1,
232
395
  diameter=150,
233
396
  min_size=50
234
- )[0]
397
+ )
235
398
  list_masks.append(masks)
236
- masks =remove_border_masks(masks,min_size=50)
399
+ masks = remove_border_masks(masks, min_size=50)
237
400
  # Detect cytosol masks only every `step_size` frames
238
401
  if step % 2 == 0:
239
- masks_cyto = model_cyto.eval(image_TXY[i], normalize=True, flow_threshold=0.5, diameter=250, min_size=100)[0]
402
+ masks_cyto = _run_cellpose_eval(
403
+ model_cyto,
404
+ image_TXY[i],
405
+ model_type_fallback='cyto2',
406
+ normalize=True,
407
+ flow_threshold=0.5,
408
+ diameter=250,
409
+ min_size=100
410
+ )
240
411
  list_masks_cyto.append(masks_cyto)
241
412
  if frap_time is None:
242
413
  # Selecting the mask that is in the center of the image
@@ -311,7 +482,7 @@ def segment_image(image_TXY, step_size=5, pretrained_model_segmentation=None, fr
311
482
 
312
483
 
313
484
 
314
- def create_image_arrays(list_concatenated_images, selected_image=0, FRAP_channel_to_quantify=0,pretrained_model_segmentation=None,frap_time=None, starting_changing_frame=40, step_size_increase=5,min_diameter=10):
485
+ def create_image_arrays(list_concatenated_images, selected_image=0, FRAP_channel_to_quantify=0,pretrained_model_segmentation='auto',frap_time=None, starting_changing_frame=40, step_size_increase=5,min_diameter=10, segmentation_step_size=5):
315
486
  image_TZXYC = list_concatenated_images[selected_image] # shape (T Z Y X C)
316
487
  print('Image with shape (T Z Y X C):\n ' ,list_concatenated_images[selected_image].shape) # TZYXC
317
488
  print('Original Image pixel ', 'min: {:.2f}, max: {:.2f}, mean: {:.2f}, std: {:.2f}'.format(np.min(image_TZXYC), np.max(image_TZXYC), np.mean(image_TZXYC), np.std(image_TZXYC)) )
@@ -328,7 +499,7 @@ def create_image_arrays(list_concatenated_images, selected_image=0, FRAP_channel
328
499
  image_TXY_stable_FRAP = image_TZXYC[:,0,:,:,stable_FRAP_channel] # shape (T X Y)
329
500
  image_TXY_stable_FRAP_8bit = (image_TXY_stable_FRAP - np.min(image_TXY_stable_FRAP)) / (np.max(image_TXY_stable_FRAP) - np.min(image_TXY_stable_FRAP)) * 255
330
501
 
331
- masks_TXY, background_mask, pseudo_cytosol_masks_TXY = segment_image(image_TXY_stable_FRAP_8bit, step_size=5, pretrained_model_segmentation=pretrained_model_segmentation,frap_time=frap_time,stable_FRAP_channel=FRAP_channel_to_quantify,min_diameter=min_diameter)
502
+ masks_TXY, background_mask, pseudo_cytosol_masks_TXY = segment_image(image_TXY_stable_FRAP_8bit, step_size=segmentation_step_size, pretrained_model_segmentation=pretrained_model_segmentation,frap_time=frap_time,stable_FRAP_channel=FRAP_channel_to_quantify,min_diameter=min_diameter)
332
503
 
333
504
  if masks_TXY is None:
334
505
  return None, None, None, None, None, None, None
@@ -969,7 +1140,25 @@ def plot_frap_quantification_all_images(df_tracking_all, save_plot=False, plot_n
969
1140
  return np.array(frames), np.array(mean_values), np.array(std_values)
970
1141
 
971
1142
 
972
- def create_pdf(list_combined_image_paths,pdf_name, remove_original_images=False):
1143
+ def create_pdf(list_combined_image_paths, pdf_name, remove_original_images=False):
1144
+ """Create a PDF document from a list of images.
1145
+
1146
+ Args:
1147
+ list_combined_image_paths: List of Path objects to images.
1148
+ pdf_name: Output PDF file path.
1149
+ remove_original_images: If True, delete original images after adding to PDF.
1150
+
1151
+ Returns:
1152
+ None
1153
+
1154
+ Note:
1155
+ Requires fpdf/fpdf2 package. If not installed, prints a warning and skips PDF creation.
1156
+ """
1157
+ if FPDF is None:
1158
+ print(f"Warning: fpdf not installed. Skipping PDF creation: {pdf_name}")
1159
+ print("Install with: pip install fpdf2")
1160
+ return None
1161
+
973
1162
  pdf = FPDF()
974
1163
  pdf.set_auto_page_break(auto=True, margin=15)
975
1164
  pdf.set_font("Arial", size=12)
@@ -239,31 +239,35 @@ def pipeline_folding_efficiency(original_lif_name, list_images,list_images_names
239
239
  plt.grid(axis='y', linestyle='--', alpha=0.7)
240
240
  plt.savefig(path_summary_wisker_plot, dpi=300, bbox_inches='tight')
241
241
  plt.show()
242
- # Create PDF with images and quality text
243
- pdf = FPDF()
244
- pdf.set_auto_page_break(auto=True, margin=15)
245
- pdf.set_font("Arial", size=12)
246
- for i, image_path in enumerate(list_image_paths_for_pdf):
247
- pdf.add_page()
248
- pdf.set_xy(10, 10)
249
- pdf.cell(0, 10, list_quality_text[i], 0, 1, 'L')
250
- if low_quality_pdf:
251
- img = Image.open(image_path)
252
- base_width = 150 # Desired width in mm in the PDF
253
- w_percent = (base_width / float(img.size[0]))
254
- h_size = int((float(img.size[1]) * float(w_percent))) # Height in mm to maintain aspect ratio
255
- # Temporarily save resized image for quality adjustment
256
- temp_path = Path(image_path).with_name(Path(image_path).stem + '_temp').with_suffix('.jpg')
257
- img.save(temp_path, 'JPEG', quality=85) # You can adjust quality to manage file size
258
- pdf.image(str(temp_path), x=25, y=25, w=base_width, h=h_size) # Now specifying both width and height
259
- temp_path.unlink() # Delete the temporary file
260
- else:
261
- # Directly embed the image at specified dimensions without resizing beforehand
262
- img = Image.open(image_path)
263
- w_percent = (150 / float(img.size[0]))
264
- h_size = int((float(img.size[1]) * float(w_percent))) # Calculate height to maintain aspect ratio
265
- pdf.image(str(image_path), x=25, y=25, w=150, h=h_size)
266
- pdf.output(path_summary_pdf)
242
+ # Create PDF with images and quality text (optional - requires fpdf)
243
+ if FPDF is not None:
244
+ pdf = FPDF()
245
+ pdf.set_auto_page_break(auto=True, margin=15)
246
+ pdf.set_font("Arial", size=12)
247
+ for i, image_path in enumerate(list_image_paths_for_pdf):
248
+ pdf.add_page()
249
+ pdf.set_xy(10, 10)
250
+ pdf.cell(0, 10, list_quality_text[i], 0, 1, 'L')
251
+ if low_quality_pdf:
252
+ img = Image.open(image_path)
253
+ base_width = 150 # Desired width in mm in the PDF
254
+ w_percent = (base_width / float(img.size[0]))
255
+ h_size = int((float(img.size[1]) * float(w_percent))) # Height in mm to maintain aspect ratio
256
+ # Temporarily save resized image for quality adjustment
257
+ temp_path = Path(image_path).with_name(Path(image_path).stem + '_temp').with_suffix('.jpg')
258
+ img.save(temp_path, 'JPEG', quality=85) # You can adjust quality to manage file size
259
+ pdf.image(str(temp_path), x=25, y=25, w=base_width, h=h_size) # Now specifying both width and height
260
+ temp_path.unlink() # Delete the temporary file
261
+ else:
262
+ # Directly embed the image at specified dimensions without resizing beforehand
263
+ img = Image.open(image_path)
264
+ w_percent = (150 / float(img.size[0]))
265
+ h_size = int((float(img.size[1]) * float(w_percent))) # Calculate height to maintain aspect ratio
266
+ pdf.image(str(image_path), x=25, y=25, w=150, h=h_size)
267
+ pdf.output(path_summary_pdf)
268
+ else:
269
+ print(f"Warning: fpdf not installed. Skipping PDF creation: {path_summary_pdf}")
270
+ print("Install with: pip install fpdf2")
267
271
 
268
272
  # save metadata
269
273
  metadata_folding_efficiency(