spacr 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
spacr/timelapse.py ADDED
@@ -0,0 +1,576 @@
1
+ import cv2, os, re, glob, random, btrack
2
+ import numpy as np
3
+ import pandas as pd
4
+ from collections import defaultdict
5
+ import matplotlib.pyplot as plt
6
+ from matplotlib.animation import FuncAnimation
7
+ from IPython.display import display
8
+ from IPython.display import Image as ipyimage
9
+ import trackpy as tp
10
+ from btrack import datasets as btrack_datasets
11
+ from skimage.measure import regionprops
12
+
13
+ from .logger import log_function_call
14
+
15
+ #from .plot import _visualize_and_save_timelapse_stack_with_tracks
16
+ #from .utils import _masks_to_masks_stack
17
+
18
+ def _npz_to_movie(arrays, filenames, save_path, fps=10):
19
+ """
20
+ Convert a list of numpy arrays to a movie file.
21
+
22
+ Args:
23
+ arrays (List[np.ndarray]): List of numpy arrays representing frames of the movie.
24
+ filenames (List[str]): List of filenames corresponding to each frame.
25
+ save_path (str): Path to save the movie file.
26
+ fps (int, optional): Frames per second of the movie. Defaults to 10.
27
+
28
+ Returns:
29
+ None
30
+ """
31
+ # Define the codec and create VideoWriter object
32
+ fourcc = cv2.VideoWriter_fourcc(*'XVID')
33
+ if save_path.endswith('.mp4'):
34
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
35
+
36
+ # Initialize VideoWriter with the size of the first image
37
+ height, width = arrays[0].shape[:2]
38
+ out = cv2.VideoWriter(save_path, fourcc, fps, (width, height))
39
+
40
+ for i, frame in enumerate(arrays):
41
+ # Handle float32 images by scaling or normalizing
42
+ if frame.dtype == np.float32:
43
+ frame = np.clip(frame, 0, 1)
44
+ frame = (frame * 255).astype(np.uint8)
45
+
46
+ # Convert 16-bit image to 8-bit
47
+ elif frame.dtype == np.uint16:
48
+ frame = cv2.convertScaleAbs(frame, alpha=(255.0/65535.0))
49
+
50
+ # Handling 1-channel (grayscale) or 2-channel images
51
+ if frame.ndim == 2 or (frame.ndim == 3 and frame.shape[2] in [1, 2]):
52
+ if frame.ndim == 2 or frame.shape[2] == 1:
53
+ # Convert grayscale to RGB
54
+ frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
55
+ elif frame.shape[2] == 2:
56
+ # Create an RGB image with the first channel as red, second as green, blue set to zero
57
+ rgb_frame = np.zeros((height, width, 3), dtype=np.uint8)
58
+ rgb_frame[..., 0] = frame[..., 0] # Red channel
59
+ rgb_frame[..., 1] = frame[..., 1] # Green channel
60
+ frame = rgb_frame
61
+
62
+ # For 3-channel images, ensure it's in BGR format for OpenCV
63
+ elif frame.shape[2] == 3:
64
+ frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
65
+
66
+ # Add filenames as text on frames
67
+ cv2.putText(frame, filenames[i], (10, height - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
68
+
69
+ out.write(frame)
70
+
71
+ out.release()
72
+ print(f"Movie saved to {save_path}")
73
+
74
+ def _scmovie(folder_paths):
75
+ """
76
+ Generate movies from a collection of PNG images in the given folder paths.
77
+
78
+ Args:
79
+ folder_paths (list): List of folder paths containing PNG images.
80
+
81
+ Returns:
82
+ None
83
+ """
84
+ folder_paths = list(set(folder_paths))
85
+ for folder_path in folder_paths:
86
+ movie_path = os.path.join(folder_path, 'movies')
87
+ os.makedirs(movie_path, exist_ok=True)
88
+ # Regular expression to parse the filename
89
+ filename_regex = re.compile(r'(\w+)_(\w+)_(\w+)_(\d+)_(\d+).png')
90
+ # Dictionary to hold lists of images by plate, well, field, and object number
91
+ grouped_images = defaultdict(list)
92
+ # Iterate over all PNG files in the folder
93
+ for filename in os.listdir(folder_path):
94
+ if filename.endswith('.png'):
95
+ match = filename_regex.match(filename)
96
+ if match:
97
+ plate, well, field, time, object_number = match.groups()
98
+ key = (plate, well, field, object_number)
99
+ grouped_images[key].append((int(time), os.path.join(folder_path, filename)))
100
+ for key, images in grouped_images.items():
101
+ # Sort images by time using sorted and lambda function for custom sort key
102
+ images = sorted(images, key=lambda x: x[0])
103
+ _, image_paths = zip(*images)
104
+ # Determine the size to which all images should be padded
105
+ max_height = max_width = 0
106
+ for image_path in image_paths:
107
+ image = cv2.imread(image_path)
108
+ h, w, _ = image.shape
109
+ max_height, max_width = max(max_height, h), max(max_width, w)
110
+ # Initialize VideoWriter
111
+ plate, well, field, object_number = key
112
+ output_filename = f"{plate}_{well}_{field}_{object_number}.mp4"
113
+ output_path = os.path.join(movie_path, output_filename)
114
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
115
+ video = cv2.VideoWriter(output_path, fourcc, 10, (max_width, max_height))
116
+ # Process each image
117
+ for image_path in image_paths:
118
+ image = cv2.imread(image_path)
119
+ h, w, _ = image.shape
120
+ padded_image = np.zeros((max_height, max_width, 3), dtype=np.uint8)
121
+ padded_image[:h, :w, :] = image
122
+ video.write(padded_image)
123
+ video.release()
124
+
125
+
126
+ def _sort_key(file_path):
127
+ """
128
+ Returns a sort key for the given file path based on the pattern '(\d+)_([A-Z]\d+)_(\d+)_(\d+).npy'.
129
+ The sort key is a tuple containing the plate, well, field, and time values extracted from the file path.
130
+ If the file path does not match the pattern, a default sort key is returned to sort the file as "earliest" or "lowest".
131
+
132
+ Args:
133
+ file_path (str): The file path to extract the sort key from.
134
+
135
+ Returns:
136
+ tuple: The sort key tuple containing the plate, well, field, and time values.
137
+ """
138
+ match = re.search(r'(\d+)_([A-Z]\d+)_(\d+)_(\d+).npy', os.path.basename(file_path))
139
+ if match:
140
+ plate, well, field, time = match.groups()
141
+ # Assuming plate, well, and field are to be returned as is and time converted to int for sorting
142
+ return (plate, well, field, int(time))
143
+ else:
144
+ # Return a tuple that sorts this file as "earliest" or "lowest"
145
+ return ('', '', '', 0)
146
+
147
+ def _save_mask_timelapse_as_gif(masks, path, cmap, norm, filenames):
148
+ """
149
+ Save a timelapse of masks as a GIF.
150
+
151
+ Parameters:
152
+ masks (list): List of mask frames.
153
+ path (str): Path to save the GIF.
154
+ cmap: Colormap for displaying the masks.
155
+ norm: Normalization for the masks.
156
+ filenames (list): List of filenames corresponding to each mask frame.
157
+
158
+ Returns:
159
+ None
160
+ """
161
+ def _update(frame):
162
+ """
163
+ Update the plot with the given frame.
164
+
165
+ Parameters:
166
+ frame (int): The frame number to update the plot with.
167
+
168
+ Returns:
169
+ None
170
+ """
171
+ nonlocal filename_text_obj
172
+ if filename_text_obj is not None:
173
+ filename_text_obj.remove()
174
+ ax.clear()
175
+ ax.axis('off')
176
+ current_mask = masks[frame]
177
+ ax.imshow(current_mask, cmap=cmap, norm=norm)
178
+ ax.set_title(f'Frame: {frame}', fontsize=24, color='white')
179
+ filename_text = filenames[frame]
180
+ filename_text_obj = fig.text(0.5, 0.01, filename_text, ha='center', va='center', fontsize=20, color='white')
181
+ for label_value in np.unique(current_mask):
182
+ if label_value == 0: continue # Skip background
183
+ y, x = np.mean(np.where(current_mask == label_value), axis=1)
184
+ ax.text(x, y, str(label_value), color='white', fontsize=24, ha='center', va='center')
185
+
186
+ fig, ax = plt.subplots(figsize=(50, 50), facecolor='black')
187
+ ax.set_facecolor('black')
188
+ ax.axis('off')
189
+ plt.subplots_adjust(left=0, right=1, top=1, bottom=0, wspace=0, hspace=0)
190
+
191
+ filename_text_obj = None
192
+ anim = FuncAnimation(fig, _update, frames=len(masks), blit=False)
193
+ anim.save(path, writer='pillow', fps=2, dpi=80) # Adjust DPI for size/quality
194
+ plt.close(fig)
195
+ print(f'Saved timelapse to {path}')
196
+
197
+ def _masks_to_gif(masks, gif_folder, name, filenames, object_type):
198
+ """
199
+ Converts a sequence of masks into a GIF file.
200
+
201
+ Args:
202
+ masks (list): List of masks representing the sequence.
203
+ gif_folder (str): Path to the folder where the GIF file will be saved.
204
+ name (str): Name of the GIF file.
205
+ filenames (list): List of filenames corresponding to each mask in the sequence.
206
+ object_type (str): Type of object represented by the masks.
207
+
208
+ Returns:
209
+ None
210
+ """
211
+ def _display_gif(path):
212
+ with open(path, 'rb') as file:
213
+ display(ipyimage(file.read()))
214
+
215
+ highest_label = max(np.max(mask) for mask in masks)
216
+ random_colors = np.random.rand(highest_label + 1, 4)
217
+ random_colors[:, 3] = 1 # Full opacity
218
+ random_colors[0] = [0, 0, 0, 1] # Background color
219
+ cmap = plt.cm.colors.ListedColormap(random_colors)
220
+ norm = plt.cm.colors.Normalize(vmin=0, vmax=highest_label)
221
+
222
+ save_path_gif = os.path.join(gif_folder, f'timelapse_masks_{object_type}_{name}.gif')
223
+ _save_mask_timelapse_as_gif(masks, save_path_gif, cmap, norm, filenames)
224
+ #_display_gif(save_path_gif)
225
+
226
+ def _timelapse_masks_to_gif(folder_path, mask_channels, object_types):
227
+ """
228
+ Converts a sequence of masks into a timelapse GIF file.
229
+
230
+ Args:
231
+ folder_path (str): The path to the folder containing the mask files.
232
+ mask_channels (list): List of channel indices to extract masks from.
233
+ object_types (list): List of object types corresponding to each mask channel.
234
+
235
+ Returns:
236
+ None
237
+ """
238
+ master_folder = os.path.dirname(folder_path)
239
+ gif_folder = os.path.join(master_folder, 'movies', 'gif')
240
+ os.makedirs(gif_folder, exist_ok=True)
241
+
242
+ paths = glob.glob(os.path.join(folder_path, '*.npy'))
243
+ paths.sort(key=_sort_key)
244
+
245
+ organized_files = {}
246
+ for file in paths:
247
+ match = re.search(r'(\d+)_([A-Z]\d+)_(\d+)_\d+.npy', os.path.basename(file))
248
+ if match:
249
+ plate, well, field = match.groups()
250
+ key = (plate, well, field)
251
+ if key not in organized_files:
252
+ organized_files[key] = []
253
+ organized_files[key].append(file)
254
+
255
+ for key, file_list in organized_files.items():
256
+ # Generate the name for the GIF based on plate, well, field
257
+ name = f'{key[0]}_{key[1]}_{key[2]}'
258
+ save_path_gif = os.path.join(gif_folder, f'timelapse_masks_{name}.gif')
259
+
260
+ for i, mask_channel in enumerate(mask_channels):
261
+ object_type = object_types[i]
262
+ # Initialize an empty list to store masks for the current object type
263
+ mask_arrays = []
264
+
265
+ for file in file_list:
266
+ # Load only the current time series array
267
+ array = np.load(file)
268
+ # Append the specific channel mask to the mask_arrays list
269
+ mask_arrays.append(array[:, :, mask_channel])
270
+
271
+ # Convert mask_arrays list to a numpy array for processing
272
+ mask_arrays_np = np.array(mask_arrays)
273
+ # Generate filenames for each frame in the time series
274
+ filenames = [os.path.basename(f) for f in file_list]
275
+ # Create the GIF for the current time series and object type
276
+ _masks_to_gif(mask_arrays_np, gif_folder, name, filenames, object_type)
277
+
278
+ def _relabel_masks_based_on_tracks(masks, tracks, mode='btrack'):
279
+ """
280
+ Relabels the masks based on the tracks DataFrame.
281
+
282
+ Args:
283
+ masks (ndarray): Input masks array with shape (num_frames, height, width).
284
+ tracks (DataFrame): DataFrame containing track information.
285
+ mode (str, optional): Mode for relabeling. Defaults to 'btrack'.
286
+
287
+ Returns:
288
+ ndarray: Relabeled masks array with the same shape and dtype as the input masks.
289
+ """
290
+ # Initialize an array to hold the relabeled masks with the same shape and dtype as the input masks
291
+ relabeled_masks = np.zeros(masks.shape, dtype=masks.dtype)
292
+
293
+ # Iterate through each frame
294
+ for frame_number in range(masks.shape[0]):
295
+ # Extract the mapping for the current frame from the tracks DataFrame
296
+ frame_tracks = tracks[tracks['frame'] == frame_number]
297
+ mapping = dict(zip(frame_tracks['original_label'], frame_tracks['track_id']))
298
+ current_mask = masks[frame_number, :, :]
299
+
300
+ # Apply the mapping to the current mask
301
+ for original_label, new_label in mapping.items():
302
+ # Where the current mask equals the original label, set it to the new label value
303
+ relabeled_masks[frame_number][current_mask == original_label] = new_label
304
+
305
+ return relabeled_masks
306
+
307
+ def _prepare_for_tracking(mask_array):
308
+ """
309
+ Prepare the mask array for object tracking.
310
+
311
+ Args:
312
+ mask_array (ndarray): Array of binary masks representing objects.
313
+
314
+ Returns:
315
+ DataFrame: DataFrame containing information about each object in the mask array.
316
+ The DataFrame has the following columns:
317
+ - frame: The frame number.
318
+ - y: The y-coordinate of the object's centroid.
319
+ - x: The x-coordinate of the object's centroid.
320
+ - mass: The area of the object.
321
+ - original_label: The original label of the object.
322
+
323
+ """
324
+ frames = []
325
+ for t, frame in enumerate(mask_array):
326
+ props = regionprops(frame)
327
+ for obj in props:
328
+ # Include 'label' in the dictionary to capture the original label of the object
329
+ frames.append({
330
+ 'frame': t,
331
+ 'y': obj.centroid[0],
332
+ 'x': obj.centroid[1],
333
+ 'mass': obj.area,
334
+ 'original_label': obj.label # Capture the original label
335
+ })
336
+ return pd.DataFrame(frames)
337
+
338
+ def _find_optimal_search_range(features, initial_search_range=500, increment=10, max_attempts=49, memory=3):
339
+ """
340
+ Find the optimal search range for linking features.
341
+
342
+ Args:
343
+ features (list): List of features to be linked.
344
+ initial_search_range (int, optional): Initial search range. Defaults to 500.
345
+ increment (int, optional): Increment value for reducing the search range. Defaults to 10.
346
+ max_attempts (int, optional): Maximum number of attempts to find the optimal search range. Defaults to 49.
347
+ memory (int, optional): Memory parameter for linking features. Defaults to 3.
348
+
349
+ Returns:
350
+ int: The optimal search range for linking features.
351
+ """
352
+ optimal_search_range = initial_search_range
353
+ for attempt in range(max_attempts):
354
+ try:
355
+ # Attempt to link features with the current search range
356
+ tracks_df = tp.link(features, search_range=optimal_search_range, memory=memory)
357
+ print(f"Success with search_range={optimal_search_range}")
358
+ return optimal_search_range
359
+ except Exception as e:
360
+ #print(f"SubnetOversizeException with search_range={optimal_search_range}: {e}")
361
+ optimal_search_range -= increment
362
+ print(f'Retrying with displacement value: {optimal_search_range}', end='\r', flush=True)
363
+ min_range = initial_search_range-(max_attempts*increment)
364
+ if optimal_search_range <= min_range:
365
+ print(f'timelapse_displacement={optimal_search_range} is too high. Lower timelapse_displacement or set to None for automatic thresholding.')
366
+ return optimal_search_range
367
+
368
+ def _remove_objects_from_first_frame(masks, percentage=10):
369
+ """
370
+ Removes a specified percentage of objects from the first frame of a sequence of masks.
371
+
372
+ Parameters:
373
+ masks (ndarray): Sequence of masks representing the frames.
374
+ percentage (int): Percentage of objects to remove from the first frame.
375
+
376
+ Returns:
377
+ ndarray: Sequence of masks with objects removed from the first frame.
378
+ """
379
+ first_frame = masks[0]
380
+ unique_labels = np.unique(first_frame[first_frame != 0])
381
+ num_labels_to_remove = max(1, int(len(unique_labels) * (percentage / 100)))
382
+ labels_to_remove = random.sample(list(unique_labels), num_labels_to_remove)
383
+
384
+ for label in labels_to_remove:
385
+ masks[0][first_frame == label] = 0
386
+ return masks
387
+
388
+ def _facilitate_trackin_with_adaptive_removal(masks, search_range=500, max_attempts=100, memory=3):
389
+ """
390
+ Facilitates object tracking with adaptive removal.
391
+
392
+ Args:
393
+ masks (numpy.ndarray): Array of binary masks representing objects in each frame.
394
+ search_range (int, optional): Maximum distance objects can move between frames. Defaults to 500.
395
+ max_attempts (int, optional): Maximum number of attempts to track objects. Defaults to 100.
396
+ memory (int, optional): Number of frames to remember when linking tracks. Defaults to 3.
397
+
398
+ Returns:
399
+ tuple: A tuple containing the updated masks, features, and tracks_df.
400
+ masks (numpy.ndarray): Updated array of binary masks.
401
+ features (pandas.DataFrame): DataFrame containing features for object tracking.
402
+ tracks_df (pandas.DataFrame): DataFrame containing the tracked object trajectories.
403
+
404
+ Raises:
405
+ Exception: If tracking fails after the maximum number of attempts.
406
+
407
+ """
408
+ attempts = 0
409
+ first_frame = masks[0]
410
+ starting_objects = np.unique(first_frame[first_frame != 0])
411
+ while attempts < max_attempts:
412
+ try:
413
+ masks = _remove_objects_from_first_frame(masks, 10)
414
+ first_frame = masks[0]
415
+ objects = np.unique(first_frame[first_frame != 0])
416
+ print(len(objects))
417
+ features = _prepare_for_tracking(masks)
418
+ tracks_df = tp.link(features, search_range=search_range, memory=memory)
419
+ print(f"Success with {len(objects)} objects, started with {len(starting_objects)} objects")
420
+ return masks, features, tracks_df
421
+ except Exception as e: # Consider catching a more specific exception if possible
422
+ print(f"Retrying with fewer objects. Exception: {e}", flush=True)
423
+ finally:
424
+ attempts += 1
425
+ print(f"Failed to track objects after {max_attempts} attempts. Consider adjusting parameters.")
426
+ return None, None, None
427
+
428
+ def _trackpy_track_cells(src, name, batch_filenames, object_type, masks, timelapse_displacement, timelapse_memory, timelapse_remove_transient, plot, save, mode):
429
+ """
430
+ Track cells using the Trackpy library.
431
+
432
+ Args:
433
+ src (str): The source file path.
434
+ name (str): The name of the track.
435
+ batch_filenames (list): List of batch filenames.
436
+ object_type (str): The type of object to track.
437
+ masks (list): List of masks.
438
+ timelapse_displacement (int): The displacement for timelapse tracking.
439
+ timelapse_memory (int): The memory for timelapse tracking.
440
+ timelapse_remove_transient (bool): Whether to remove transient objects in timelapse tracking.
441
+ plot (bool): Whether to plot the tracks.
442
+ save (bool): Whether to save the tracks.
443
+ mode (str): The mode of tracking.
444
+
445
+ Returns:
446
+ list: The mask stack.
447
+
448
+ """
449
+
450
+ from .plot import _visualize_and_save_timelapse_stack_with_tracks
451
+ from .utils import _masks_to_masks_stack
452
+
453
+ if timelapse_displacement is None:
454
+ features = _prepare_for_tracking(masks)
455
+ timelapse_displacement = _find_optimal_search_range(features, initial_search_range=500, increment=10, max_attempts=49, memory=3)
456
+ if timelapse_displacement is None:
457
+ timelapse_displacement = 50
458
+
459
+ masks, features, tracks_df = _facilitate_trackin_with_adaptive_removal(masks, search_range=timelapse_displacement, max_attempts=100, memory=timelapse_memory)
460
+
461
+ tracks_df['particle'] += 1
462
+
463
+ if timelapse_remove_transient:
464
+ tracks_df_filter = tp.filter_stubs(tracks_df, len(masks))
465
+ else:
466
+ tracks_df_filter = tracks_df.copy()
467
+
468
+ tracks_df_filter = tracks_df_filter.rename(columns={'particle': 'track_id'})
469
+ print(f'Removed {len(tracks_df)-len(tracks_df_filter)} objects that were not present in all frames')
470
+ masks = _relabel_masks_based_on_tracks(masks, tracks_df_filter)
471
+ tracks_path = os.path.join(os.path.dirname(src), 'tracks')
472
+ os.makedirs(tracks_path, exist_ok=True)
473
+ tracks_df_filter.to_csv(os.path.join(tracks_path, f'trackpy_tracks_{object_type}_{name}.csv'), index=False)
474
+ if plot or save:
475
+ _visualize_and_save_timelapse_stack_with_tracks(masks, tracks_df_filter, save, src, name, plot, batch_filenames, object_type, mode)
476
+
477
+ mask_stack = _masks_to_masks_stack(masks)
478
+ return mask_stack
479
+
480
+ def _filter_short_tracks(df, min_length=5):
481
+ """Filter out tracks that are shorter than min_length.
482
+
483
+ Args:
484
+ df (pandas.DataFrame): The input DataFrame containing track information.
485
+ min_length (int, optional): The minimum length of tracks to keep. Defaults to 5.
486
+
487
+ Returns:
488
+ pandas.DataFrame: The filtered DataFrame with only tracks longer than min_length.
489
+ """
490
+ track_lengths = df.groupby('track_id').size()
491
+ long_tracks = track_lengths[track_lengths >= min_length].index
492
+ return df[df['track_id'].isin(long_tracks)]
493
+
494
+ def _btrack_track_cells(src, name, batch_filenames, object_type, plot, save, masks_3D, mode, timelapse_remove_transient, radius=100, workers=10):
495
+ """
496
+ Track cells using the btrack library.
497
+
498
+ Args:
499
+ src (str): The source file path.
500
+ name (str): The name of the track.
501
+ batch_filenames (list): List of batch filenames.
502
+ object_type (str): The type of object to track.
503
+ plot (bool): Whether to plot the tracks.
504
+ save (bool): Whether to save the tracks.
505
+ masks_3D (ndarray): 3D array of masks.
506
+ mode (str): The tracking mode.
507
+ timelapse_remove_transient (bool): Whether to remove transient tracks.
508
+ radius (int, optional): The maximum search radius. Defaults to 100.
509
+ workers (int, optional): The number of workers. Defaults to 10.
510
+
511
+ Returns:
512
+ ndarray: The mask stack.
513
+
514
+ """
515
+
516
+ from .plot import _visualize_and_save_timelapse_stack_with_tracks
517
+ from .utils import _masks_to_masks_stack
518
+
519
+ CONFIG_FILE = btrack_datasets.cell_config()
520
+ frame, width, height = masks_3D.shape
521
+
522
+ FEATURES = ["area", "major_axis_length", "minor_axis_length", "orientation", "solidity"]
523
+ objects = btrack.utils.segmentation_to_objects(masks_3D, properties=tuple(FEATURES), num_workers=workers)
524
+
525
+ # initialise a tracker session using a context manager
526
+ with btrack.BayesianTracker() as tracker:
527
+ tracker.configure(CONFIG_FILE) # configure the tracker using a config file
528
+ tracker.max_search_radius = radius
529
+ tracker.tracking_updates = ["MOTION", "VISUAL"]
530
+ #tracker.tracking_updates = ["MOTION"]
531
+ tracker.features = FEATURES
532
+ tracker.append(objects) # append the objects to be tracked
533
+ tracker.volume=((0, height), (0, width)) # set the tracking volume
534
+ tracker.track(step_size=100) # track them (in interactive mode)
535
+ tracker.optimize() # generate hypotheses and run the global optimizer
536
+ #data, properties, graph = tracker.to_napari() # get the tracks in a format for napari visualization
537
+ tracks = tracker.tracks # store the tracks
538
+ #cfg = tracker.configuration # store the configuration
539
+
540
+ # Process the track data to create a DataFrame
541
+ track_data = []
542
+ for track in tracks:
543
+ for t, x, y, z in zip(track.t, track.x, track.y, track.z):
544
+ track_data.append({
545
+ 'track_id': track.ID,
546
+ 'frame': t,
547
+ 'x': x,
548
+ 'y': y,
549
+ 'z': z
550
+ })
551
+ # Convert track data to a DataFrame
552
+ tracks_df = pd.DataFrame(track_data)
553
+ if timelapse_remove_transient:
554
+ tracks_df = _filter_short_tracks(tracks_df, min_length=len(masks_3D))
555
+
556
+ objects_df = _prepare_for_tracking(masks_3D)
557
+
558
+ # Optional: If necessary, round 'x' and 'y' to ensure matching precision
559
+ tracks_df['x'] = tracks_df['x'].round(decimals=2)
560
+ tracks_df['y'] = tracks_df['y'].round(decimals=2)
561
+ objects_df['x'] = objects_df['x'].round(decimals=2)
562
+ objects_df['y'] = objects_df['y'].round(decimals=2)
563
+
564
+ # Merge the DataFrames on 'frame', 'x', and 'y'
565
+ merged_df = pd.merge(tracks_df, objects_df, on=['frame', 'x', 'y'], how='inner')
566
+ final_df = merged_df[['track_id', 'frame', 'x', 'y', 'original_label']]
567
+
568
+ masks = _relabel_masks_based_on_tracks(masks_3D, final_df)
569
+ tracks_path = os.path.join(os.path.dirname(src), 'tracks')
570
+ os.makedirs(tracks_path, exist_ok=True)
571
+ final_df.to_csv(os.path.join(tracks_path, f'btrack_tracks_{object_type}_{name}.csv'), index=False)
572
+ if plot or save:
573
+ _visualize_and_save_timelapse_stack_with_tracks(masks, final_df, save, src, name, plot, batch_filenames, object_type, mode)
574
+
575
+ mask_stack = _masks_to_masks_stack(masks)
576
+ return mask_stack