microlive 1.0.12__py3-none-any.whl → 1.0.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
microlive/__init__.py CHANGED
@@ -23,7 +23,7 @@ Authors:
23
23
  Nathan L. Nowling, Brian Munsky, Ning Zhao
24
24
  """
25
25
 
26
- __version__ = "1.0.12"
26
+ __version__ = "1.0.14"
27
27
  __author__ = "Luis U. Aguilera, William S. Raymond, Rhiannon M. Sears, Nathan L. Nowling, Brian Munsky, Ning Zhao"
28
28
 
29
29
  # Package name (for backward compatibility)
microlive/microscopy.py CHANGED
@@ -1423,6 +1423,13 @@ class Intensity():
1423
1423
  optimize_spot_size: Search for optimal spot size (5-11 px). Slower. Defaults to False.
1424
1424
  allow_subpixel_repositioning: Search ±2px for better center. Defaults to False.
1425
1425
  fast_gaussian_fit: Use moment-based (fast) vs full Gaussian fit. Defaults to True.
1426
+ snr_method: Method for calculating signal-to-noise ratio. Options:
1427
+ - 'peak' (default): Uses the maximum pixel value in the spot region as signal.
1428
+ This is the standard definition of SNR: (max_spot - mean_bg) / std_bg.
1429
+ Recommended for most applications.
1430
+ - 'disk_doughnut': Uses mean disk intensity as signal instead of peak value.
1431
+ Calculates SNR as: (mean_disk - mean_bg) / std_bg.
1432
+ More robust when data is very noisy or spots are dim.
1426
1433
 
1427
1434
  Attributes:
1428
1435
  number_spots: Number of spots to measure.
@@ -1431,7 +1438,7 @@ class Intensity():
1431
1438
 
1432
1439
  def __init__(self, original_image, spot_size=5, array_spot_location_z_y_x=None,
1433
1440
  use_max_projection=False, optimize_spot_size=False, allow_subpixel_repositioning=False,
1434
- fast_gaussian_fit=True):
1441
+ fast_gaussian_fit=True, snr_method='peak'):
1435
1442
  self.original_image = original_image
1436
1443
  if array_spot_location_z_y_x is None:
1437
1444
  self.array_spot_location_z_y_x = np.array([[0, 0, 0]])
@@ -1449,6 +1456,7 @@ class Intensity():
1449
1456
  self.optimize_spot_size = optimize_spot_size
1450
1457
  self.allow_subpixel_repositioning = allow_subpixel_repositioning
1451
1458
  self.fast_gaussian_fit = fast_gaussian_fit
1459
+ self.snr_method = snr_method
1452
1460
 
1453
1461
  def two_dimensional_gaussian(self, xy, amplitude, x0, y0, sigma_x, sigma_y, offset):
1454
1462
  """Evaluate 2D Gaussian at given coordinates."""
@@ -1551,11 +1559,23 @@ class Intensity():
1551
1559
  def calculate_intensity(self):
1552
1560
  """Calculate intensity metrics for all spots across all channels.
1553
1561
 
1562
+ The signal-to-noise ratio (SNR) calculation method is controlled by the
1563
+ `snr_method` parameter set during class initialization:
1564
+
1565
+ - **'peak'** (default): Uses the maximum pixel value in the spot region
1566
+ as the signal. This is the standard definition of SNR commonly used
1567
+ in microscopy: SNR = (max_spot - mean_background) / std_background.
1568
+
1569
+ - **'disk_doughnut'**: Uses the mean disk intensity as signal instead
1570
+ of the peak value. This method is more robust for very noisy data
1571
+ or dim spots where the maximum value may be unreliable due to noise.
1572
+ SNR = (mean_disk - mean_background) / std_background.
1573
+
1554
1574
  Returns:
1555
1575
  tuple: 8-element tuple of arrays, each with shape [N_spots, N_channels]:
1556
1576
  - intensities: Background-subtracted intensity (disk - doughnut mean).
1557
1577
  - intensities_std: Standard deviation within disk region.
1558
- - intensities_snr: Signal-to-noise ratio (disk-bg) / std(bg).
1578
+ - intensities_snr: Signal-to-noise ratio (calculation depends on snr_method).
1559
1579
  - intensities_background_mean: Mean background from doughnut.
1560
1580
  - intensities_background_std: Std of background from doughnut.
1561
1581
  - psfs_amplitude: PSF peak amplitude from Gaussian fit (or NaN).
@@ -1577,11 +1597,31 @@ class Intensity():
1577
1597
  donut_values = tem_img[~np.isnan(tem_img)].astype('uint16')
1578
1598
  return donut_values
1579
1599
 
1580
- def signal_to_noise_ratio(values_disk, values_donut):
1581
- mean_disk = np.mean(values_disk.astype(float))
1600
+ def signal_to_noise_ratio(values_disk, values_donut, snr_method='peak'):
1601
+ """Calculate signal-to-noise ratio for a spot.
1602
+
1603
+ Args:
1604
+ values_disk: Pixel values in the spot disk region.
1605
+ values_donut: Pixel values in the background doughnut region.
1606
+ snr_method: 'peak' uses max pixel value as signal (standard),
1607
+ 'disk_doughnut' uses mean disk intensity (robust for noisy data).
1608
+
1609
+ Returns:
1610
+ tuple: (SNR, mean_background, std_background)
1611
+ """
1582
1612
  mean_donut = np.mean(values_donut.astype(float))
1583
1613
  std_donut = np.std(values_donut.astype(float))
1584
- SNR = (mean_disk - mean_donut) / std_donut if std_donut > 0 else 0
1614
+
1615
+ if snr_method == 'peak':
1616
+ # Standard SNR: use peak (maximum) pixel value as signal
1617
+ max_disk = np.max(values_disk.astype(float))
1618
+ signal = max_disk - mean_donut
1619
+ else:
1620
+ # disk_doughnut method: use mean disk intensity as signal
1621
+ mean_disk = np.mean(values_disk.astype(float))
1622
+ signal = mean_disk - mean_donut
1623
+
1624
+ SNR = signal / std_donut if std_donut > 0 else 0
1585
1625
  return SNR, mean_donut, std_donut
1586
1626
 
1587
1627
  def disk_donut(values_disk, values_donut, spot_size):
@@ -1749,7 +1789,7 @@ class Intensity():
1749
1789
  # Use the updated integer positions for the final intensity crop
1750
1790
  crop_disk_and_donut = return_crop(frame_data[:,:,i], current_x_int, current_y_int, spot_range=crop_range)
1751
1791
  values_donut = return_donut(crop_disk_and_donut, spot_size=best_size)
1752
- intensities_snr[sp,i], intensities_background_mean[sp,i], intensities_background_std[sp,i] = signal_to_noise_ratio(values_disk, values_donut)
1792
+ intensities_snr[sp,i], intensities_background_mean[sp,i], intensities_background_std[sp,i] = signal_to_noise_ratio(values_disk, values_donut, self.snr_method)
1753
1793
  # disk_donut calculation
1754
1794
  intensities[sp,i], intensities_std[sp,i] = disk_donut(values_disk, values_donut, spot_size=best_size)
1755
1795
  intensities_total[sp,i] = np.sum(values_disk)
@@ -1,7 +1,14 @@
1
- """Pipeline module for MicroLive.
1
+ """Pipeline module for MicroLive FRAP analysis.
2
2
 
3
- This module is part of the microlive package.
3
+ This module is part of the microlive package and provides functions for
4
+ Fluorescence Recovery After Photobleaching (FRAP) analysis.
5
+
6
+ The pipeline uses a pretrained Cellpose model for nuclei segmentation that
7
+ is automatically downloaded from GitHub on first use.
4
8
  """
9
+ import os
10
+ import traceback
11
+
5
12
  from microlive.imports import *
6
13
 
7
14
  from skimage.feature import canny
@@ -12,6 +19,95 @@ from skimage.morphology import binary_opening, binary_closing
12
19
  from skimage.measure import label, regionprops
13
20
  from skimage.transform import hough_circle, hough_circle_peaks
14
21
 
22
+ # Import model downloader with graceful fallback
23
+ try:
24
+ from microlive.utils.model_downloader import get_frap_nuclei_model_path
25
+ _HAS_MODEL_DOWNLOADER = True
26
+ except ImportError:
27
+ _HAS_MODEL_DOWNLOADER = False
28
+
29
+ import logging
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ # =============================================================================
34
+ # GPU Detection and MPS Compatibility (aligned with microscopy.py)
35
+ # =============================================================================
36
+
37
+ class PatchMPSFloat64:
38
+ """
39
+ Context manager to safely monkeypatch torch.zeros on MPS devices
40
+ to force float32 instead of float64 (which is not supported).
41
+
42
+ Copied from microlive.microscopy for self-contained FRAP pipeline.
43
+ """
44
+ def __init__(self):
45
+ self.original_zeros = torch.zeros
46
+ self.is_mps = torch.backends.mps.is_available() and torch.backends.mps.is_built()
47
+
48
+ def __enter__(self):
49
+ if not self.is_mps:
50
+ return
51
+
52
+ def patched_zeros(*args, **kwargs):
53
+ # Check if device is MPS (either string or torch.device)
54
+ device = kwargs.get('device', None)
55
+ is_target_device = False
56
+ if device is not None:
57
+ if isinstance(device, str) and 'mps' in device:
58
+ is_target_device = True
59
+ elif isinstance(device, torch.device) and device.type == 'mps':
60
+ is_target_device = True
61
+
62
+ # Check if dtype is float64/double
63
+ dtype = kwargs.get('dtype', None)
64
+ is_target_dtype = (dtype == torch.float64 or dtype == torch.double)
65
+
66
+ if is_target_device and is_target_dtype:
67
+ kwargs['dtype'] = torch.float32
68
+
69
+ return self.original_zeros(*args, **kwargs)
70
+
71
+ torch.zeros = patched_zeros
72
+
73
+ def __exit__(self, exc_type, exc_val, exc_tb):
74
+ if self.is_mps:
75
+ torch.zeros = self.original_zeros
76
+
77
+
78
+ def _detect_gpu():
79
+ """
80
+ Detect available GPU (CUDA or MPS) for Cellpose.
81
+
82
+ Returns:
83
+ bool: True if GPU is available (CUDA or MPS), False otherwise.
84
+ """
85
+ os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1'
86
+ return torch.cuda.is_available() or torch.backends.mps.is_available()
87
+
88
+
89
+ def _get_frap_nuclei_model():
90
+ """
91
+ Get the path to the pretrained FRAP nuclei segmentation model.
92
+
93
+ Downloads from GitHub on first use, caches locally in ~/.microlive/models/.
94
+ Returns None if download fails, allowing fallback to default Cellpose model.
95
+
96
+ Returns:
97
+ str or None: Path to the model file, or None if unavailable.
98
+ """
99
+ if not _HAS_MODEL_DOWNLOADER:
100
+ logger.debug("Model downloader not available, using default nuclei model")
101
+ return None
102
+
103
+ try:
104
+ model_path = get_frap_nuclei_model_path()
105
+ logger.info(f"Using pretrained FRAP nuclei model: {model_path}")
106
+ return model_path
107
+ except Exception as e:
108
+ logger.warning(f"Could not load FRAP nuclei model: {e}. Using default.")
109
+ return None
110
+
15
111
  def read_lif_files_in_folder(folder_path):
16
112
  # create funtion that read all the .lif files in a folder and return the list of images
17
113
  list_folders = list(folder_path.glob('*.lif'))
@@ -160,43 +256,111 @@ def find_frap_coordinates(image_TXY, frap_time, stable_FRAP_channel, min_diamete
160
256
  return None, None
161
257
 
162
258
 
163
- def segment_image(image_TXY, step_size=5, pretrained_model_segmentation=None, frap_time=None, pixel_dilation_pseudo_cytosol=10,stable_FRAP_channel=0,min_diameter=10):
259
+ def segment_image(image_TXY, step_size=5, pretrained_model_segmentation='auto', frap_time=None, pixel_dilation_pseudo_cytosol=10,stable_FRAP_channel=0,min_diameter=10):
260
+ """
261
+ Segment nuclei in FRAP image stack using Cellpose.
262
+
263
+ Args:
264
+ image_TXY: 3D image array (Time, X, Y).
265
+ step_size: Number of frames between segmentations.
266
+ pretrained_model_segmentation: Model to use:
267
+ - 'auto' (default): Auto-download and use FRAP-optimized model from GitHub
268
+ - None or 'nuclei': Use default Cellpose nuclei model
269
+ - str path: Use custom pretrained model at given path
270
+ frap_time: Frame index of FRAP event.
271
+ pixel_dilation_pseudo_cytosol: Pixels to dilate for pseudo-cytosol.
272
+ stable_FRAP_channel: Channel index for stable signal.
273
+ min_diameter: Minimum ROI diameter in pixels.
274
+
275
+ Returns:
276
+ Tuple of (masks_TXY, background_mask, pseudo_cytosol_masks_TXY).
277
+ """
164
278
  num_pixels_to_dilate = 1
165
- use_gpu = False # or True if you want to try MPS on Apple Silicon
166
- if pretrained_model_segmentation is not None:
279
+
280
+ # GPU detection (aligned with microscopy.py)
281
+ use_gpu = _detect_gpu()
282
+ logger.debug(f"FRAP Pipeline: GPU available = {use_gpu}")
283
+
284
+ # Ensure image is float32 for MPS compatibility
285
+ image_TXY = image_TXY.astype(np.float32)
286
+
287
+ # Determine which model to use
288
+ if pretrained_model_segmentation == 'auto':
289
+ # Auto-download FRAP-optimized model from GitHub
290
+ pretrained_model_segmentation = _get_frap_nuclei_model()
291
+
292
+ # Helper function to run Cellpose with error handling
293
+ def _run_cellpose_eval(model, image, model_type_fallback=None, **kwargs):
294
+ """Run Cellpose evaluation with MPS error handling and CPU fallback."""
295
+ nonlocal use_gpu
296
+ try:
297
+ with PatchMPSFloat64():
298
+ return model.eval(image, **kwargs)[0]
299
+ except RuntimeError as e:
300
+ if "sparse" in str(e) and torch.backends.mps.is_available():
301
+ logger.warning(f"MPS sparse error detected: {e}. Retrying with resample=False.")
302
+ try:
303
+ kwargs['resample'] = False
304
+ with PatchMPSFloat64():
305
+ return model.eval(image, **kwargs)[0]
306
+ except RuntimeError as e2:
307
+ logger.warning(f"MPS error persisted: {e2}. Falling back to CPU.")
308
+ # Reinitialize model on CPU
309
+ if model_type_fallback is not None:
310
+ model = models.CellposeModel(gpu=False, model_type=model_type_fallback)
311
+ else:
312
+ model = models.CellposeModel(gpu=False, pretrained_model=pretrained_model_segmentation)
313
+ use_gpu = False
314
+ kwargs.pop('resample', None) # Reset resample
315
+ return model.eval(image, **kwargs)[0]
316
+ else:
317
+ logger.error(f"Cellpose RuntimeError: {e}")
318
+ logger.error(traceback.format_exc())
319
+ return np.zeros(image.shape[:2], dtype=np.uint16)
320
+ except Exception as e:
321
+ logger.error(f"Cellpose error: {e}")
322
+ logger.error(traceback.format_exc())
323
+ return np.zeros(image.shape[:2], dtype=np.uint16)
324
+
325
+ # Initialize models
326
+ if pretrained_model_segmentation is not None and pretrained_model_segmentation != 'nuclei':
327
+ logger.info(f"Using pretrained model for nuclei segmentation")
167
328
  model_nucleus = models.CellposeModel(
168
329
  gpu=use_gpu,
169
330
  pretrained_model=pretrained_model_segmentation
170
331
  )
171
332
  else:
333
+ logger.info("Using default Cellpose nuclei model")
172
334
  model_nucleus = models.CellposeModel(
173
335
  gpu=use_gpu,
174
336
  model_type='nuclei'
175
337
  )
176
- #model_cyto = models.Cellpose(gpu=False, model_type='cyto2')
177
- model_cyto = models.CellposeModel( gpu=use_gpu, model_type='cyto2')
338
+ model_cyto = models.CellposeModel(gpu=use_gpu, model_type='cyto2')
339
+
178
340
  num_steps = (image_TXY.shape[0] + step_size - 1) // step_size
179
341
  list_masks = []
180
342
  list_selected_mask_id = []
181
343
  list_selected_masks = []
182
344
  list_masks_cyto = []
345
+
183
346
  # If frap_time is provided, segment the FRAP images and select the mask with maximum intensity change
184
347
  if frap_time is not None:
185
348
  # Ensure frap_time is within valid range
186
349
  if frap_time < 1 or frap_time >= image_TXY.shape[0] - 1:
187
350
  raise ValueError("frap_time must be within the range of the image stack.")
188
351
  # Segment the image at frap_time
189
- #masks_frap = model_nucleus.eval(image_TXY[frap_time], normalize=True, channels=[0,0], flow_threshold=0.8, diameter=150, min_size=100)[0]
190
- masks_frap = model_nucleus.eval(
352
+ masks_frap = _run_cellpose_eval(
353
+ model_nucleus,
191
354
  image_TXY[frap_time],
192
- channels=[0, 0], # ← add this!
355
+ model_type_fallback='nuclei',
356
+ channels=[0, 0],
193
357
  normalize=True,
194
358
  flow_threshold=1,
195
359
  diameter=150,
196
360
  min_size=50
197
- )[0]
361
+ )
198
362
  # remove all the maks that are touching the border
199
- masks_frap =remove_border_masks(masks_frap,min_size=50)
363
+ masks_frap = remove_border_masks(masks_frap, min_size=50)
200
364
  # Get unique mask labels (excluding background)
201
365
  mask_labels = np.unique(masks_frap)
202
366
  mask_labels = mask_labels[mask_labels != 0]
@@ -210,13 +374,10 @@ def segment_image(image_TXY, step_size=5, pretrained_model_segmentation=None, fr
210
374
  selected_mask_frap = binary_dilation(selected_mask_frap, iterations=num_pixels_to_dilate).astype('int')
211
375
  break
212
376
  else:
213
- #selected_mask_id_frap = None
214
377
  selected_mask_frap = None
215
378
  else:
216
- #selected_mask_id_frap = None
217
379
  selected_mask_frap = None
218
380
  else:
219
- #selected_mask_id_frap = None
220
381
  selected_mask_frap = None
221
382
  if selected_mask_frap is None:
222
383
  return None, None, None
@@ -224,19 +385,29 @@ def segment_image(image_TXY, step_size=5, pretrained_model_segmentation=None, fr
224
385
  for step in range(num_steps):
225
386
  i = step * step_size
226
387
  # Detecting masks in i-th frame
227
- masks = model_nucleus.eval(
388
+ masks = _run_cellpose_eval(
389
+ model_nucleus,
228
390
  image_TXY[i],
229
- channels=[0, 0],
391
+ model_type_fallback='nuclei',
392
+ channels=[0, 0],
230
393
  normalize=True,
231
394
  flow_threshold=1,
232
395
  diameter=150,
233
396
  min_size=50
234
- )[0]
397
+ )
235
398
  list_masks.append(masks)
236
- masks =remove_border_masks(masks,min_size=50)
399
+ masks = remove_border_masks(masks, min_size=50)
237
400
  # Detect cytosol masks only every `step_size` frames
238
401
  if step % 2 == 0:
239
- masks_cyto = model_cyto.eval(image_TXY[i], normalize=True, flow_threshold=0.5, diameter=250, min_size=100)[0]
402
+ masks_cyto = _run_cellpose_eval(
403
+ model_cyto,
404
+ image_TXY[i],
405
+ model_type_fallback='cyto2',
406
+ normalize=True,
407
+ flow_threshold=0.5,
408
+ diameter=250,
409
+ min_size=100
410
+ )
240
411
  list_masks_cyto.append(masks_cyto)
241
412
  if frap_time is None:
242
413
  # Selecting the mask that is in the center of the image
@@ -311,7 +482,7 @@ def segment_image(image_TXY, step_size=5, pretrained_model_segmentation=None, fr
311
482
 
312
483
 
313
484
 
314
- def create_image_arrays(list_concatenated_images, selected_image=0, FRAP_channel_to_quantify=0,pretrained_model_segmentation=None,frap_time=None, starting_changing_frame=40, step_size_increase=5,min_diameter=10):
485
+ def create_image_arrays(list_concatenated_images, selected_image=0, FRAP_channel_to_quantify=0,pretrained_model_segmentation='auto',frap_time=None, starting_changing_frame=40, step_size_increase=5,min_diameter=10):
315
486
  image_TZXYC = list_concatenated_images[selected_image] # shape (T Z Y X C)
316
487
  print('Image with shape (T Z Y X C):\n ' ,list_concatenated_images[selected_image].shape) # TZYXC
317
488
  print('Original Image pixel ', 'min: {:.2f}, max: {:.2f}, mean: {:.2f}, std: {:.2f}'.format(np.min(image_TZXYC), np.max(image_TZXYC), np.mean(image_TZXYC), np.std(image_TZXYC)) )
@@ -2,6 +2,12 @@
2
2
 
3
3
  from .device import get_device, is_gpu_available, get_device_info, check_gpu_status
4
4
  from .resources import get_icon_path, get_model_path
5
+ from .model_downloader import (
6
+ get_frap_nuclei_model_path,
7
+ cache_model,
8
+ list_cached_models,
9
+ MODEL_DIR,
10
+ )
5
11
 
6
12
  __all__ = [
7
13
  "get_device",
@@ -10,4 +16,9 @@ __all__ = [
10
16
  "check_gpu_status",
11
17
  "get_icon_path",
12
18
  "get_model_path",
19
+ # Model downloader
20
+ "get_frap_nuclei_model_path",
21
+ "cache_model",
22
+ "list_cached_models",
23
+ "MODEL_DIR",
13
24
  ]
@@ -0,0 +1,293 @@
1
+ """
2
+ Model download utilities for MicroLive.
3
+
4
+ This module provides functions to download and cache pretrained models from
5
+ the MicroLive GitHub repository. It follows the same patterns used by Cellpose
6
+ for robust model provisioning.
7
+
8
+ Models are downloaded on first use and cached locally in ~/.microlive/models/
9
+ to avoid repeated downloads.
10
+ """
11
+
12
+ import os
13
+ import ssl
14
+ import shutil
15
+ import tempfile
16
+ import logging
17
+ from pathlib import Path
18
+ from urllib.request import urlopen
19
+ from urllib.error import URLError, HTTPError
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+ # =============================================================================
24
+ # Configuration
25
+ # =============================================================================
26
+
27
+ # Base URL for raw GitHub content
28
+ _GITHUB_RAW_BASE = "https://raw.githubusercontent.com/ningzhaoAnschutz/microlive/main"
29
+
30
+ # Model URLs - Add new models here
31
+ MODEL_URLS = {
32
+ "frap_nuclei": f"{_GITHUB_RAW_BASE}/modeling/cellpose_models/cellpose_models/FRAP_nuclei_model/models/cellpose_1728581750.581418",
33
+ }
34
+
35
+ # Local cache directory (similar to Cellpose's ~/.cellpose/models/)
36
+ _MODEL_DIR_ENV = os.environ.get("MICROLIVE_LOCAL_MODELS_PATH")
37
+ _MODEL_DIR_DEFAULT = Path.home() / ".microlive" / "models"
38
+ MODEL_DIR = Path(_MODEL_DIR_ENV) if _MODEL_DIR_ENV else _MODEL_DIR_DEFAULT
39
+
40
+
41
+ # =============================================================================
42
+ # Download Utilities (adapted from Cellpose)
43
+ # =============================================================================
44
+
45
+ def download_url_to_file(url: str, dst: str, progress: bool = True) -> None:
46
+ """
47
+ Download object at the given URL to a local path.
48
+
49
+ Adapted from Cellpose/torch implementation for robustness.
50
+
51
+ Args:
52
+ url: URL of the object to download.
53
+ dst: Full path where object will be saved.
54
+ progress: Whether to display a progress bar. Default: True.
55
+
56
+ Raises:
57
+ HTTPError: If the server returns an error status.
58
+ URLError: If the URL cannot be reached.
59
+ """
60
+ try:
61
+ from tqdm import tqdm
62
+ HAS_TQDM = True
63
+ except ImportError:
64
+ HAS_TQDM = False
65
+ progress = False
66
+
67
+ file_size = None
68
+
69
+ # Handle SSL certificate verification issues
70
+ ssl_context = ssl.create_default_context()
71
+ ssl_context.check_hostname = False
72
+ ssl_context.verify_mode = ssl.CERT_NONE
73
+
74
+ try:
75
+ u = urlopen(url, context=ssl_context)
76
+ except URLError as e:
77
+ raise URLError(f"Failed to connect to {url}: {e}")
78
+
79
+ meta = u.info()
80
+ if hasattr(meta, "getheaders"):
81
+ content_length = meta.getheaders("Content-Length")
82
+ else:
83
+ content_length = meta.get_all("Content-Length")
84
+
85
+ if content_length is not None and len(content_length) > 0:
86
+ file_size = int(content_length[0])
87
+
88
+ # Save to temp file first, then move (atomic operation)
89
+ dst = os.path.expanduser(dst)
90
+ dst_dir = os.path.dirname(dst)
91
+ os.makedirs(dst_dir, exist_ok=True)
92
+
93
+ f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir)
94
+ try:
95
+ if HAS_TQDM and progress:
96
+ with tqdm(total=file_size, disable=not progress, unit="B",
97
+ unit_scale=True, unit_divisor=1024,
98
+ desc=f"Downloading {Path(dst).name}") as pbar:
99
+ while True:
100
+ buffer = u.read(8192)
101
+ if len(buffer) == 0:
102
+ break
103
+ f.write(buffer)
104
+ pbar.update(len(buffer))
105
+ else:
106
+ # Simple download without progress bar
107
+ while True:
108
+ buffer = u.read(8192)
109
+ if len(buffer) == 0:
110
+ break
111
+ f.write(buffer)
112
+
113
+ f.close()
114
+ shutil.move(f.name, dst)
115
+ logger.info(f"Successfully downloaded model to {dst}")
116
+
117
+ except Exception as e:
118
+ f.close()
119
+ if os.path.exists(f.name):
120
+ os.remove(f.name)
121
+ raise RuntimeError(f"Download failed: {e}")
122
+ finally:
123
+ if os.path.exists(f.name):
124
+ try:
125
+ os.remove(f.name)
126
+ except OSError:
127
+ pass
128
+
129
+
130
+ # =============================================================================
131
+ # Model Cache Functions
132
+ # =============================================================================
133
+
134
+ def get_model_path(model_name: str) -> Path:
135
+ """
136
+ Get the local cache path for a model.
137
+
138
+ Args:
139
+ model_name: Name of the model (e.g., "frap_nuclei").
140
+
141
+ Returns:
142
+ Path to the cached model file.
143
+ """
144
+ return MODEL_DIR / model_name
145
+
146
+
147
+ def is_model_cached(model_name: str) -> bool:
148
+ """
149
+ Check if a model is already cached locally.
150
+
151
+ Args:
152
+ model_name: Name of the model.
153
+
154
+ Returns:
155
+ True if the model exists locally, False otherwise.
156
+ """
157
+ return get_model_path(model_name).exists()
158
+
159
+
160
+ def cache_model(model_name: str, force_download: bool = False) -> str:
161
+ """
162
+ Ensure a model is cached locally, downloading if necessary.
163
+
164
+ This function follows the Cellpose pattern:
165
+ 1. Check if model exists in local cache
166
+ 2. If not (or force_download=True), download from GitHub
167
+ 3. Return the local path
168
+
169
+ Args:
170
+ model_name: Name of the model (must be in MODEL_URLS).
171
+ force_download: If True, re-download even if cached.
172
+
173
+ Returns:
174
+ String path to the cached model file.
175
+
176
+ Raises:
177
+ ValueError: If model_name is not recognized.
178
+ RuntimeError: If download fails.
179
+ """
180
+ if model_name not in MODEL_URLS:
181
+ available = ", ".join(MODEL_URLS.keys())
182
+ raise ValueError(f"Unknown model '{model_name}'. Available: {available}")
183
+
184
+ MODEL_DIR.mkdir(parents=True, exist_ok=True)
185
+ cached_file = get_model_path(model_name)
186
+
187
+ if not cached_file.exists() or force_download:
188
+ url = MODEL_URLS[model_name]
189
+ logger.info(f"Downloading model '{model_name}' from {url}")
190
+ print(f"Downloading MicroLive model '{model_name}' (first time only)...")
191
+
192
+ try:
193
+ download_url_to_file(url, str(cached_file), progress=True)
194
+ except (HTTPError, URLError) as e:
195
+ raise RuntimeError(
196
+ f"Failed to download model '{model_name}' from GitHub. "
197
+ f"Error: {e}\n\n"
198
+ f"If this persists, you can manually download from:\n"
199
+ f" {url}\n"
200
+ f"And place it at:\n"
201
+ f" {cached_file}"
202
+ )
203
+ else:
204
+ logger.debug(f"Model '{model_name}' already cached at {cached_file}")
205
+
206
+ return str(cached_file)
207
+
208
+
209
+ # =============================================================================
210
+ # Convenience Functions for Specific Models
211
+ # =============================================================================
212
+
213
+ def get_frap_nuclei_model_path() -> str:
214
+ """
215
+ Get the path to the FRAP nuclei segmentation model.
216
+
217
+ Downloads the model from GitHub if not already cached locally.
218
+ The model is stored in ~/.microlive/models/frap_nuclei
219
+
220
+ Returns:
221
+ String path to the FRAP nuclei model file.
222
+
223
+ Example:
224
+ >>> from microlive.utils.model_downloader import get_frap_nuclei_model_path
225
+ >>> model_path = get_frap_nuclei_model_path()
226
+ >>> # Use with Cellpose
227
+ >>> from cellpose import models
228
+ >>> model = models.CellposeModel(pretrained_model=model_path)
229
+ """
230
+ return cache_model("frap_nuclei")
231
+
232
+
233
+ # =============================================================================
234
+ # Verification and Diagnostics
235
+ # =============================================================================
236
+
237
+ def verify_model_integrity(model_name: str) -> bool:
238
+ """
239
+ Verify that a cached model file exists and has non-zero size.
240
+
241
+ Args:
242
+ model_name: Name of the model to verify.
243
+
244
+ Returns:
245
+ True if the model file exists and is valid.
246
+ """
247
+ model_path = get_model_path(model_name)
248
+ if not model_path.exists():
249
+ return False
250
+
251
+ # Check file size (should be > 1MB for a real model)
252
+ size_bytes = model_path.stat().st_size
253
+ if size_bytes < 1_000_000:
254
+ logger.warning(f"Model file seems too small ({size_bytes} bytes): {model_path}")
255
+ return False
256
+
257
+ return True
258
+
259
+
260
+ def list_cached_models() -> dict:
261
+ """
262
+ List all cached models and their status.
263
+
264
+ Returns:
265
+ Dictionary mapping model names to their cache status and size.
266
+ """
267
+ result = {}
268
+ for name in MODEL_URLS:
269
+ path = get_model_path(name)
270
+ if path.exists():
271
+ size_mb = path.stat().st_size / (1024 * 1024)
272
+ result[name] = {"cached": True, "size_mb": round(size_mb, 2), "path": str(path)}
273
+ else:
274
+ result[name] = {"cached": False, "size_mb": 0, "path": str(path)}
275
+ return result
276
+
277
+
278
+ def clear_model_cache(model_name: str = None) -> None:
279
+ """
280
+ Clear cached models.
281
+
282
+ Args:
283
+ model_name: Specific model to clear, or None to clear all.
284
+ """
285
+ if model_name:
286
+ path = get_model_path(model_name)
287
+ if path.exists():
288
+ path.unlink()
289
+ logger.info(f"Cleared cached model: {model_name}")
290
+ else:
291
+ if MODEL_DIR.exists():
292
+ shutil.rmtree(MODEL_DIR)
293
+ logger.info(f"Cleared all cached models from {MODEL_DIR}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: microlive
3
- Version: 1.0.12
3
+ Version: 1.0.14
4
4
  Summary: Live-cell microscopy image analysis and single-molecule measurements
5
5
  Project-URL: Homepage, https://github.com/ningzhaoAnschutz/microlive
6
6
  Project-URL: Documentation, https://github.com/ningzhaoAnschutz/microlive/blob/main/docs/user_guide.md
@@ -1,26 +1,28 @@
1
- microlive/__init__.py,sha256=vZTlNASAIJaUQc4HNaNRd7RdcqrKkyNkycE7tRd_5fQ,1385
1
+ microlive/__init__.py,sha256=ZAui2VsXaQAYA4fnc1FhYO1v3lfGGEFrmTJyZeBAY9E,1385
2
2
  microlive/imports.py,sha256=VAAMavSLIKO0LooadTXfCdZiv8LQbV_wITeIv8IHwxM,7531
3
- microlive/microscopy.py,sha256=D8mq7ssP7yw0tGCw2EESxH82LbGqYFKfVx3pDjky_pY,708504
3
+ microlive/microscopy.py,sha256=97T9tEOVwBhEbAZujlDSeC3jt5xSQSCGJ8kboI6ucho,710732
4
4
  microlive/ml_spot_detection.py,sha256=pVbOSGNJ0WWMuPRML42rFwvjKVZ0B1fJux1179OIbAg,10603
5
5
  microlive/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  microlive/data/icons/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  microlive/data/icons/icon_micro.png,sha256=b5tFv4E6vUmLwYmYeM4PJuxLV_XqEzN14ueolekTFW0,370236
8
8
  microlive/data/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ microlive/data/models/spot_detection_cnn.pth,sha256=Np7vpPJIbKQmuKY0Hx-4IkeEDsnks_QEgs7TqaYgZmI,8468580
9
10
  microlive/gui/__init__.py,sha256=tB-CdDC7x5OwYFAQxLOUvfVnUThaXKXVRsB68YP0Y6Q,28
10
11
  microlive/gui/app.py,sha256=GTl2Iwe5uG603Ja6ykwfSG2kB7YaZJYQukLpC0DOurw,787890
11
12
  microlive/gui/main.py,sha256=b66W_2V-pclGKOozfs75pwrCGbL_jkVU3kFt8RFMZIc,2520
12
13
  microlive/gui/micro_mac.command,sha256=TkxYOO_5A2AiNJMz3_--1geBYfl77THpOLFZnV4J2ac,444
13
14
  microlive/gui/micro_windows.bat,sha256=DJUKPhDbCO4HToLwSMT-QTYRe9Kr1wn5A2Ijy2klIrw,773
14
15
  microlive/pipelines/__init__.py,sha256=VimchYrIWalFs_edRmjR1zBHIg2CcpRceZoRmB1e8kA,764
15
- microlive/pipelines/pipeline_FRAP.py,sha256=GH5CqX6so1eWE07PXRCJZLAAqCzOqW-AoCqGXifapYE,56280
16
+ microlive/pipelines/pipeline_FRAP.py,sha256=jBGzb7m3RzbuKtmD-KCrpSZCbypuLHeUacm88-XlUUU,62691
16
17
  microlive/pipelines/pipeline_folding_efficiency.py,sha256=0PTogfXHRtO2kXOeQXb5-VBb46DQsj6namGVEkMGI0g,22550
17
18
  microlive/pipelines/pipeline_particle_tracking.py,sha256=euPTLH6O9I66HkUb4Izah8ZF_aOdQLRyyR8vo1jSkFA,28245
18
19
  microlive/pipelines/pipeline_spot_detection_no_tracking.py,sha256=t-p1xCQvThnVKMJZgk3Xhk3k6cvp1VgwTJ0ZIbfzNG0,19087
19
- microlive/utils/__init__.py,sha256=5Ut2PeA0V5dM0VysmPpGH9OB-nmWDydzDkpRUwXfMHw,323
20
+ microlive/utils/__init__.py,sha256=metAf2zPS8w23d8dyM7-ld1ovrOKBdx3y3zu5IVrzIg,564
20
21
  microlive/utils/device.py,sha256=tcPMU8UiXL-DuGwhudUgrbjW1lgIK_EUKIOeOn0U6q4,2533
22
+ microlive/utils/model_downloader.py,sha256=EruviTEh75YBekpznn1RZ1Nj8lnDmeC4TKEnFLOow6Y,9448
21
23
  microlive/utils/resources.py,sha256=Jz7kPI75xMLCBJMyX7Y_3ixKi_UgydfQkF0BlFtLCKs,1753
22
- microlive-1.0.12.dist-info/METADATA,sha256=FyV736N0WZJxVael-NLlX4up2tvwdPXAKreS2ulDkcg,12434
23
- microlive-1.0.12.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
24
- microlive-1.0.12.dist-info/entry_points.txt,sha256=Zqp2vixyD8lngcfEmOi8fkCj7vPhesz5xlGBI-EubRw,54
25
- microlive-1.0.12.dist-info/licenses/LICENSE,sha256=ixuiBLtpoK3iv89l7ylKkg9rs2GzF9ukPH7ynZYzK5s,35148
26
- microlive-1.0.12.dist-info/RECORD,,
24
+ microlive-1.0.14.dist-info/METADATA,sha256=mxn3h5atEOVG_u04F48N-H2ORlMJLC0c4aJQ9bGbB5c,12434
25
+ microlive-1.0.14.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
26
+ microlive-1.0.14.dist-info/entry_points.txt,sha256=Zqp2vixyD8lngcfEmOi8fkCj7vPhesz5xlGBI-EubRw,54
27
+ microlive-1.0.14.dist-info/licenses/LICENSE,sha256=ixuiBLtpoK3iv89l7ylKkg9rs2GzF9ukPH7ynZYzK5s,35148
28
+ microlive-1.0.14.dist-info/RECORD,,