microlive 1.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,368 @@
1
+ """Pipeline module for MicroLive.
2
+
3
+ This module is part of the microlive package.
4
+ """
5
+ from microlive.imports import *
6
+
7
+ def pipeline_particle_detection(data_folder_path, selected_image, channels_spots, max_spots_for_threshold=100000,
8
+ show_plot=True, channels_cytosol=None, channels_nucleus=None,
9
+ min_length_trajectory=5, yx_spot_size_in_px=3, z_spot_size_in_px=2,maximum_spots_cluster=4,
10
+ MINIMAL_SNR=0.5, diameter_cytosol=300, diameter_nucleus=200,particle_detection_threshold=None,
11
+ segmentation_selection_metric='max_area',recalculate_mask=False,optimization_segmentation_method='diameter_segmentation',
12
+ pretrained_model_cyto_segmentation=None,use_watershed=False,list_images_to_process=None,save_3d_visualization=False,
13
+ apply_photobleaching_correction=False,use_maximum_projection=False,link_particles=False):
14
+ # detect if the data is a lif file
15
+ list_images, list_names, pixel_xy_um, voxel_z_um, channel_names, number_color_channels, list_time_intervals, bit_depth = \
16
+ mi.ReadLif(data_folder_path, show_metadata=False, save_tif=False, save_png=False, format='TZYXC').read()
17
+ # if list_images_to_process is not None, select only the images with list_names in the list.
18
+ if list_images_to_process is not None:
19
+ selected_indices = [i for i in range(len(list_names)) if list_names[i] in list_images_to_process]
20
+ # Filter all lists using the selected indices
21
+ list_images = [list_images[i] for i in selected_indices]
22
+ list_names = [list_names[i] for i in selected_indices]
23
+ list_time_intervals = [list_time_intervals[i] for i in selected_indices]
24
+
25
+ # if channels_spots
26
+ # expanding the
27
+ # If selected_image is None, process all images
28
+ if selected_image is None:
29
+ list_df, list_masks, list_images_tested = [], [], []
30
+ for idx in range(len(list_images)):
31
+ df, masks,image = process_single_image(
32
+ data_folder_path=data_folder_path,
33
+ selected_image=idx,
34
+ channels_spots=channels_spots,
35
+ max_spots_for_threshold=max_spots_for_threshold,
36
+ show_plot=show_plot,
37
+ channels_cytosol=channels_cytosol,
38
+ channels_nucleus=channels_nucleus,
39
+ min_length_trajectory=min_length_trajectory,
40
+ yx_spot_size_in_px=yx_spot_size_in_px,
41
+ z_spot_size_in_px=z_spot_size_in_px,
42
+ maximum_spots_cluster=maximum_spots_cluster,
43
+ MINIMAL_SNR=MINIMAL_SNR,
44
+ diameter_cytosol=diameter_cytosol,
45
+ diameter_nucleus=diameter_nucleus,
46
+ segmentation_selection_metric=segmentation_selection_metric,
47
+ list_images=list_images,
48
+ list_names=list_names[idx],
49
+ pixel_xy_um=pixel_xy_um,
50
+ voxel_z_um=voxel_z_um,
51
+ channel_names=channel_names,
52
+ list_time_intervals=list_time_intervals[idx],
53
+ recalculate_mask=recalculate_mask,
54
+ optimization_segmentation_method=optimization_segmentation_method,
55
+ pretrained_model_cyto_segmentation=pretrained_model_cyto_segmentation,
56
+ use_watershed=use_watershed,
57
+ save_3d_visualization=save_3d_visualization,
58
+ apply_photobleaching_correction=apply_photobleaching_correction,
59
+ use_maximum_projection=use_maximum_projection,
60
+ link_particles=link_particles,
61
+ particle_detection_threshold=particle_detection_threshold,
62
+ )
63
+ # rename the field image_id to idx
64
+ df['image_id'] = idx
65
+ list_df.append(df)
66
+ list_masks.append(masks)
67
+ list_images_tested.append(image)
68
+ if len(list_df) >1 :
69
+ final_df = pd.concat(list_df, ignore_index=True)
70
+ else:
71
+ final_df = df
72
+ return final_df, list_df, list_masks, list_images_tested
73
+ else:
74
+ # Process single image
75
+ df,masks,image = process_single_image(
76
+ data_folder_path=data_folder_path,
77
+ selected_image=selected_image,
78
+ channels_spots=channels_spots,
79
+ max_spots_for_threshold=max_spots_for_threshold,
80
+ show_plot=show_plot,
81
+ channels_cytosol=channels_cytosol,
82
+ channels_nucleus=channels_nucleus,
83
+ min_length_trajectory=min_length_trajectory,
84
+ yx_spot_size_in_px=yx_spot_size_in_px,
85
+ maximum_spots_cluster=maximum_spots_cluster,
86
+ MINIMAL_SNR=MINIMAL_SNR,
87
+ diameter_cytosol=diameter_cytosol,
88
+ diameter_nucleus=diameter_nucleus,
89
+ segmentation_selection_metric=segmentation_selection_metric,
90
+ list_images=list_images,
91
+ list_names=list_names[selected_image],
92
+ pixel_xy_um=pixel_xy_um,
93
+ voxel_z_um=voxel_z_um,
94
+ channel_names=channel_names,
95
+ list_time_intervals = list_time_intervals[selected_image],
96
+ recalculate_mask=recalculate_mask,
97
+ optimization_segmentation_method=optimization_segmentation_method,
98
+ pretrained_model_cyto_segmentation=pretrained_model_cyto_segmentation,
99
+ use_watershed=use_watershed,
100
+ save_3d_visualization=save_3d_visualization,
101
+ apply_photobleaching_correction=apply_photobleaching_correction,
102
+ use_maximum_projection=use_maximum_projection,
103
+ link_particles=link_particles,
104
+ particle_detection_threshold=particle_detection_threshold,
105
+ )
106
+ return df, [df], [masks], [image]
107
+
108
+
109
+ @mi.Utilities().metadata_decorator(metadata_folder_func=mi.Utilities().get_metadata_folder,exclude_args=['list_images',]) # exclude_args=['list_images', 'list_names' ]
110
+ def process_single_image(data_folder_path, selected_image, channels_spots, max_spots_for_threshold=100000,
111
+ show_plot=True, channels_cytosol=None, channels_nucleus=None,
112
+ min_length_trajectory=5, yx_spot_size_in_px=3, z_spot_size_in_px=2 ,maximum_spots_cluster=4,
113
+ MINIMAL_SNR=0.5, diameter_cytosol=300, diameter_nucleus=200, segmentation_selection_metric='area',
114
+ list_images=None, list_names=None, pixel_xy_um=None, voxel_z_um=None,
115
+ channel_names=None, optimization_segmentation_method='diameter_segmentation',
116
+ recalculate_mask=False,use_watershed=False, pretrained_model_cyto_segmentation=None,particle_detection_threshold=None,
117
+ list_time_intervals=None,save_3d_visualization=False,apply_photobleaching_correction=False,use_maximum_projection=False,link_particles=False):
118
+ # Ensure lists are properly formatted
119
+ channels_spots = [channels_spots] if not isinstance(channels_spots, list) else channels_spots
120
+ channels_cytosol = [channels_cytosol] if not isinstance(channels_cytosol, list) else channels_cytosol
121
+ channels_nucleus = [channels_nucleus] if not isinstance(channels_nucleus, list) else channels_nucleus
122
+
123
+ # Convert pixel and voxel sizes to nm
124
+ pixel_xy_nm = int(pixel_xy_um * 1000)
125
+ voxel_z_nm = int(voxel_z_um * 1000)
126
+ list_voxels = [voxel_z_nm, pixel_xy_nm]
127
+ list_spot_size_px = [z_spot_size_in_px, yx_spot_size_in_px]
128
+
129
+ # Selecting the image to be analyzed
130
+ tested_image = list_images[selected_image] # TZYXC
131
+ original_tested_image = tested_image.copy()
132
+ # Creating the results folder
133
+ results_name = 'results_' + data_folder_path.stem + '_cell_id_' + str(selected_image)
134
+ current_dir = pathlib.Path().absolute()
135
+ results_folder = current_dir.joinpath('results_live_cell', results_name)
136
+ results_folder.mkdir(parents=True, exist_ok=True)
137
+ mi.Utilities().clear_folder_except_substring(results_folder, 'mask')
138
+ # Plot the original image
139
+ plot_name_original = results_folder.joinpath('original_image.png')
140
+ suptitle=f'Image: {data_folder_path.stem[:16]} - {list_names[selected_image]} - Cell_ID: {selected_image}'
141
+ mi.Plots().plot_images(
142
+ image_ZYXC=tested_image[0],
143
+ figsize=(12, 5),
144
+ show_plot=show_plot,
145
+ use_maximum_projection=True,
146
+ use_gaussian_filter=True,
147
+ cmap='binary',
148
+ min_max_percentile=[0.5, 99.9],
149
+ show_gird=False,
150
+ save_plots=True,
151
+ plot_name=plot_name_original,
152
+ suptitle=suptitle
153
+ )
154
+ # Read or create masks
155
+ mask_file_name = 'mask_' + data_folder_path.stem + '_image_' + str(selected_image) + '.tif'
156
+ mask_file_path = results_folder.joinpath(mask_file_name)
157
+ path_mask_exist = os.path.exists(str(mask_file_path))
158
+ if path_mask_exist and recalculate_mask is False:
159
+ masks = imread(str(mask_file_path)).astype(bool)
160
+ else:
161
+ # Use Cellpose to create masks
162
+ if use_watershed:
163
+ masks_complete_cells = mi.CellSegmentationWatershed(np.max(tested_image[:,:,:,:,channels_cytosol[0]],
164
+ axis=(0,1)),
165
+ footprint_size=5,
166
+ threshold_method='li',
167
+ markers_method='distance',
168
+ separation_size=5 ).apply_watershed()
169
+ else:
170
+ masks_complete_cells, _, _ = mi.CellSegmentation(
171
+ tested_image[0],
172
+ channels_cytosol=channels_cytosol,
173
+ channels_nucleus=channels_nucleus,
174
+ diameter_cytosol=diameter_cytosol,
175
+ diameter_nucleus=diameter_nucleus,
176
+ optimization_segmentation_method=optimization_segmentation_method,
177
+ remove_fragmented_cells=False,
178
+ show_plot=show_plot,
179
+ image_name=None,
180
+ NUMBER_OF_CORES=1,
181
+ selection_metric=segmentation_selection_metric,
182
+ pretrained_model_cyto_segmentation = pretrained_model_cyto_segmentation
183
+ ).calculate_masks()
184
+ # Selecting the mask that is in the center of the image
185
+ center_y = masks_complete_cells.shape[0] // 2
186
+ center_x = masks_complete_cells.shape[1] // 2
187
+ selected_mask_id = masks_complete_cells[center_y, center_x]
188
+ if selected_mask_id > 0:
189
+ masks = masks_complete_cells == selected_mask_id
190
+ else:
191
+ # Select the largest mask that is not the background mask (0)
192
+ mask_labels = np.unique(masks_complete_cells)
193
+ mask_sizes = [(label, np.sum(masks_complete_cells == label)) for label in mask_labels if label != 0]
194
+ if mask_sizes:
195
+ selected_mask_id = max(mask_sizes, key=lambda x: x[1])[0]
196
+ masks = masks_complete_cells == selected_mask_id
197
+ else:
198
+ masks = np.zeros_like(masks_complete_cells, dtype=bool)
199
+ # Save the mask
200
+ masks = masks.astype(np.uint8)
201
+ tifffile.imwrite(str(mask_file_path), masks, dtype='uint8')
202
+
203
+ if apply_photobleaching_correction:
204
+ file_path_photobleacing = results_folder.joinpath('photobleaching.png')
205
+ corrected_image = mi.Photobleaching(image_TZYXC=tested_image,mask_YX=masks, show_plot=False, mode='inside_cell',plot_name=file_path_photobleacing).apply_photobleaching_correction() #mi.PhotobleachingCorrection(tested_image).apply_correction()
206
+ else:
207
+ corrected_image = tested_image
208
+ # Calculate the threshold for spot detection
209
+ plot_name_threshold = results_folder.joinpath('threshold_spot_detection.png')
210
+ if particle_detection_threshold is None:
211
+ starting_threshold = mi.Utilities().calculate_threshold_for_spot_detection(
212
+ corrected_image, list_spot_size_px, list_voxels, channels_spots,
213
+ max_spots_for_threshold=max_spots_for_threshold,
214
+ show_plot=show_plot,plot_name=plot_name_threshold
215
+ )
216
+ else:
217
+ starting_threshold = [particle_detection_threshold]*len(channels_spots)
218
+
219
+
220
+ # Run the particle tracking
221
+ list_dataframes_trajectories, _ = mi.ParticleTracking(
222
+ image=corrected_image,
223
+ channels_spots=channels_spots,
224
+ masks=masks,
225
+ list_voxels=list_voxels,
226
+ #list_psfs=list_psfs,
227
+ channels_cytosol=channels_cytosol,
228
+ channels_nucleus=channels_nucleus,
229
+ min_length_trajectory=min_length_trajectory,
230
+ threshold_for_spot_detection=starting_threshold,
231
+ yx_spot_size_in_px=yx_spot_size_in_px,
232
+ maximum_spots_cluster=maximum_spots_cluster,
233
+ link_particles = link_particles
234
+ ).run()
235
+ #df_tracking = list_dataframes_trajectories[0]
236
+ df_tracking = list_dataframes_trajectories[0]
237
+ if len(list_dataframes_trajectories) > 1:
238
+ for i in range(1, len(list_dataframes_trajectories)):
239
+ df_tracking = pd.concat([df_tracking, list_dataframes_trajectories[i]], ignore_index=True)
240
+ df_tracking = df_tracking.reset_index(drop=True)
241
+ #df_tracking['particle'] = df_tracking.groupby('particle').ngroup()
242
+
243
+ #threshold_tracking = starting_threshold
244
+
245
+ # Plot histograms for the SNR
246
+ selected_field = 'snr' # options are: psf_sigma, snr, 'spot_int'
247
+ plot_name_snr = results_folder.joinpath('spots_' + selected_field + '.png')
248
+ mean_snr = mi.Plots().plot_histograms_from_df(
249
+ df_tracking,
250
+ selected_field=selected_field,
251
+ figsize=(8, 2),
252
+ plot_name=plot_name_snr,
253
+ bin_count=60,
254
+ save_plot=True,
255
+ #list_colors=channel_names,
256
+ remove_outliers=True
257
+ )
258
+ # Plot histograms for the spot intensity
259
+ selected_field = 'spot_int'
260
+ plot_name_int = results_folder.joinpath('spots_' + selected_field + '.png')
261
+ mean_int = mi.Plots().plot_histograms_from_df(
262
+ df_tracking,
263
+ selected_field=selected_field,
264
+ figsize=(8, 2),
265
+ plot_name=plot_name_int,
266
+ bin_count=60,
267
+ save_plot=True,
268
+ #list_colors=channel_names,
269
+ remove_outliers=True
270
+ )
271
+ # Remove tracks with low SNR in the tracking channel
272
+ if MINIMAL_SNR is not None:
273
+ array_selected_field = mi.Utilities().df_trajectories_to_array(
274
+ dataframe=df_tracking,
275
+ selected_field=selected_field + '_ch_' + str(channels_spots[0]),
276
+ fill_value='nans'
277
+ )
278
+ mean_snr = np.nanmean(array_selected_field, axis=1)
279
+ indices_low_quality_tracks = np.where(mean_snr < MINIMAL_SNR)[0]
280
+ df_tracking = df_tracking[~df_tracking['particle'].isin(indices_low_quality_tracks)]
281
+ df_tracking = df_tracking.reset_index(drop=True)
282
+ df_tracking['particle'] = df_tracking.groupby('particle').ngroup()
283
+
284
+ # Plot image intensity histogram
285
+ for i in range(len(channels_spots)):
286
+ plot_name_histogram = results_folder.joinpath('pixel_histogram_in_cell_'+str(channels_spots[i])+'.png')
287
+ masked_data = corrected_image * masks[np.newaxis, np.newaxis, :, :, np.newaxis].astype(float)
288
+ mi.Plots().plot_image_pixel_intensity_distribution(
289
+ image=np.mean(masked_data, axis=(0, 1)),
290
+ figsize=(8, 2),
291
+ bins=100,
292
+ remove_outliers=True,
293
+ remove_zeros=True,
294
+ save_plots=True,
295
+ plot_name=plot_name_histogram,
296
+ single_color=None,
297
+ #list_colors=channel_names,
298
+ tracking_channel=channels_spots[0],
299
+ threshold_tracking=starting_threshold[i]
300
+ )
301
+
302
+ # Plot original image and tracks
303
+ suptitle = f'Image: {data_folder_path.stem[:16]} - {list_names[selected_image]} - Cell_ID: {selected_image}'
304
+ plot_name_original_image_and_tracks = results_folder.joinpath('original_image_tracking.png')
305
+
306
+ mi.Plots().plot_images(
307
+ image_ZYXC=corrected_image[0],
308
+ df=df_tracking,
309
+ masks=masks,
310
+ show_trajectories=True,
311
+ suptitle=suptitle,
312
+ figsize=(12, 3),
313
+ show_plot=True,
314
+ selected_time=0,
315
+ use_maximum_projection=False,
316
+ use_gaussian_filter=True,
317
+ cmap='binary',
318
+ min_max_percentile=[0.05, 99.95],
319
+ show_gird=False,
320
+ save_plots=True,
321
+ plot_name=plot_name_original_image_and_tracks
322
+ )
323
+
324
+ # Combine the original image and the image with tracks
325
+ plot_name_complete_image = results_folder.joinpath('complete_image_tracking.png')
326
+ mi.Utilities().combine_images_vertically([plot_name_original, plot_name_original_image_and_tracks], plot_name_complete_image, delete_originals=True)
327
+
328
+ # Save the DataFrame
329
+ df_tracking.to_csv(results_folder.joinpath('tracking_results.csv'), index=False)
330
+
331
+ # PLOT_FILTERED_IMAGES = True
332
+ filtered_image = mi.Utilities().gaussian_laplace_filter_image(corrected_image, list_spot_size_px, list_voxels)
333
+
334
+ # plot image and detected spots
335
+ selected_color_channel = 0
336
+ selected_time_point = 0
337
+ # select only the first time point from the dataframe
338
+ mask_expanded = masks[np.newaxis, np.newaxis, :, :, np.newaxis]
339
+ masked_image_TZYXC = corrected_image * mask_expanded
340
+ df_tracking_plot = df_tracking[df_tracking['frame']==selected_time_point]
341
+ plot_name_detected_particles = results_folder.joinpath('detected_particles.png')
342
+
343
+ time_point = 0
344
+ zoom_size=15
345
+ selected_spot =0
346
+ time_point = 0
347
+ mi.Plots().plot_cell_zoom_selected_crop(image_TZYXC=masked_image_TZYXC,
348
+ df=df_tracking,
349
+ use_gaussian_filter = True,
350
+ image_name=plot_name_detected_particles,
351
+ microns_per_pixel=pixel_xy_um,
352
+ time_point = time_point,
353
+ list_channel_order_to_plot=[0,1,2],
354
+ list_max_percentile=[99.9,99.9],
355
+ min_percentile=1,
356
+ save_image=False,
357
+ show_spots_ids=False,
358
+ zoom_size=zoom_size,
359
+ selected_spot=selected_spot)
360
+ # plot napari visualizer
361
+ if save_3d_visualization:
362
+ mask_expanded = masks[np.newaxis, np.newaxis, :, :, np.newaxis]
363
+ masked_image_TZYXC = filtered_image * mask_expanded
364
+ # Remove extreme values from the image
365
+ masked_image_TZYXC = mi.RemoveExtrema(masked_image_TZYXC,min_percentile=0.001, max_percentile=99.995).remove_outliers()
366
+ plot_name_3d_visualizer = str(results_folder.joinpath('image_3d.gif'))
367
+ mi.Plots().Napari_Visualizer(masked_image_TZYXC, df_tracking, z_correction=7,channels_spots=0,plot_name=plot_name_3d_visualizer)
368
+ return df_tracking, masks, original_tested_image
@@ -0,0 +1,13 @@
1
+ """Utility modules for MicroLive."""
2
+
3
+ from .device import get_device, is_gpu_available, get_device_info, check_gpu_status
4
+ from .resources import get_icon_path, get_model_path
5
+
6
+ __all__ = [
7
+ "get_device",
8
+ "is_gpu_available",
9
+ "get_device_info",
10
+ "check_gpu_status",
11
+ "get_icon_path",
12
+ "get_model_path",
13
+ ]
@@ -0,0 +1,99 @@
1
+ """Centralized GPU device detection for MicroLive.
2
+
3
+ Supports:
4
+ - NVIDIA CUDA (Windows/Linux)
5
+ - Apple Silicon MPS (macOS)
6
+ - CPU fallback
7
+
8
+ Example:
9
+ from microlive.utils.device import get_device, get_device_info
10
+
11
+ device = get_device()
12
+ print(f"Using: {device}")
13
+
14
+ info = get_device_info()
15
+ print(f"GPU: {info['name']}")
16
+ """
17
+
18
+ import torch
19
+
20
+
21
+ def get_device():
22
+ """Get the best available compute device.
23
+
24
+ Returns:
25
+ torch.device: cuda if NVIDIA GPU available,
26
+ mps if Apple Silicon GPU available,
27
+ cpu otherwise.
28
+ """
29
+ if torch.cuda.is_available():
30
+ return torch.device('cuda')
31
+ if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
32
+ return torch.device('mps')
33
+ return torch.device('cpu')
34
+
35
+
36
+ def is_gpu_available():
37
+ """Check if any GPU acceleration is available.
38
+
39
+ Returns:
40
+ bool: True if CUDA or MPS GPU is available.
41
+ """
42
+ return torch.cuda.is_available() or (
43
+ hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
44
+ )
45
+
46
+
47
+ def get_device_info():
48
+ """Get detailed information about the compute device.
49
+
50
+ Returns:
51
+ dict: Dictionary with keys:
52
+ - device: Device string ('cuda', 'mps', or 'cpu')
53
+ - type: Device type
54
+ - name: Human-readable device name
55
+ - memory_gb: GPU memory in GB (CUDA only)
56
+ """
57
+ device = get_device()
58
+ info = {
59
+ "device": str(device),
60
+ "type": device.type,
61
+ "name": "CPU",
62
+ "memory_gb": None,
63
+ }
64
+
65
+ if device.type == "cuda":
66
+ info["name"] = torch.cuda.get_device_name(0)
67
+ props = torch.cuda.get_device_properties(0)
68
+ info["memory_gb"] = round(props.total_memory / (1024**3), 2)
69
+ elif device.type == "mps":
70
+ info["name"] = "Apple Silicon GPU (MPS)"
71
+
72
+ return info
73
+
74
+
75
+ def check_gpu_status():
76
+ """Print GPU status information.
77
+
78
+ Useful for verifying installation.
79
+
80
+ Returns:
81
+ str: Device type ('cuda', 'mps', or 'cpu')
82
+ """
83
+ print(f"PyTorch version: {torch.__version__}")
84
+
85
+ info = get_device_info()
86
+
87
+ if info["type"] == "cuda":
88
+ print(f"✅ CUDA available: {info['name']}")
89
+ print(f" Memory: {info['memory_gb']} GB")
90
+ elif info["type"] == "mps":
91
+ print(f"✅ MPS available: {info['name']}")
92
+ else:
93
+ print("⚠️ No GPU detected, using CPU")
94
+
95
+ return info["type"]
96
+
97
+
98
+ if __name__ == "__main__":
99
+ check_gpu_status()
@@ -0,0 +1,60 @@
1
+ """Resource file utilities for MicroLive.
2
+
3
+ Handles finding bundled data files like icons and ML models,
4
+ whether running from source or installed as a package.
5
+ """
6
+
7
+ from pathlib import Path
8
+ import importlib.resources
9
+
10
+
11
+ def get_package_data_dir():
12
+ """Get the path to the package data directory.
13
+
14
+ Returns:
15
+ Path: Path to microlive/data directory.
16
+ """
17
+ try:
18
+ # Python 3.9+ with importlib.resources.files
19
+ return Path(importlib.resources.files("microlive.data"))
20
+ except (TypeError, AttributeError):
21
+ # Fallback for older Python or editable installs
22
+ return Path(__file__).parent.parent / "data"
23
+
24
+
25
+ def get_icon_path():
26
+ """Get the path to the application icon.
27
+
28
+ Returns:
29
+ Path or None: Path to icon_micro.png, or None if not found.
30
+ """
31
+ # Try package data first
32
+ icon_path = get_package_data_dir() / "icons" / "icon_micro.png"
33
+ if icon_path.exists():
34
+ return icon_path
35
+
36
+ # Fallback to docs/icons (for development)
37
+ dev_path = Path(__file__).parent.parent.parent / "docs" / "icons" / "icon_micro.png"
38
+ if dev_path.exists():
39
+ return dev_path
40
+
41
+ return None
42
+
43
+
44
+ def get_model_path():
45
+ """Get the path to the ML spot detection model.
46
+
47
+ Returns:
48
+ Path or None: Path to spot_detection_cnn.pth, or None if not found.
49
+ """
50
+ # Try package data first
51
+ model_path = get_package_data_dir() / "models" / "spot_detection_cnn.pth"
52
+ if model_path.exists():
53
+ return model_path
54
+
55
+ # Fallback to modeling directory (for development)
56
+ dev_path = Path(__file__).parent.parent.parent / "modeling" / "machine_learning" / "spot_detection_cnn.pth"
57
+ if dev_path.exists():
58
+ return dev_path
59
+
60
+ return None