fucciphase 0.0.3__py3-none-any.whl → 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
fucciphase/fucci_phase.py CHANGED
@@ -1,3 +1,4 @@
1
+ import logging
1
2
  from pathlib import Path
2
3
 
3
4
  import pandas as pd
@@ -7,6 +8,8 @@ from .phase import generate_cycle_phases
7
8
  from .sensor import FUCCISensor
8
9
  from .utils import normalize_channels, split_trackmate_tracks
9
10
 
11
+ logger = logging.getLogger(__name__)
12
+
10
13
 
11
14
  def process_dataframe(
12
15
  df: pd.DataFrame,
@@ -91,6 +94,25 @@ def process_dataframe(
91
94
  if len(channels) != sensor.fluorophores:
92
95
  raise ValueError(f"Need to provide {sensor.fluorophores} channel names.")
93
96
 
97
+ # validate DataFrame is not empty
98
+ if df.empty:
99
+ raise ValueError("Input DataFrame is empty.")
100
+
101
+ # validate that required channel columns exist
102
+ missing_channels = [ch for ch in channels if ch not in df.columns]
103
+ if missing_channels:
104
+ raise ValueError(
105
+ f"Missing channel columns in DataFrame: {missing_channels}. "
106
+ f"Available columns: {list(df.columns)}"
107
+ )
108
+
109
+ # validate that FRAME column exists (required for processing)
110
+ if "FRAME" not in df.columns:
111
+ raise ValueError(
112
+ "Missing required 'FRAME' column in DataFrame. "
113
+ f"Available columns: {list(df.columns)}"
114
+ )
115
+
94
116
  # optionally split TrackMate subtracks and re-label them as unique tracks
95
117
  if generate_unique_tracks:
96
118
  if "TRACK_ID" in df.columns:
@@ -98,8 +120,10 @@ def process_dataframe(
98
120
  # perform all operation on unique tracks
99
121
  track_id_name = "UNIQUE_TRACK_ID"
100
122
  else:
101
- print("Warning: unique tracks can only be prepared for TrackMate files.")
102
- print("The tracks have not been updated.")
123
+ logger.warning(
124
+ "Unique tracks can only be prepared for TrackMate files. "
125
+ "The tracks have not been updated."
126
+ )
103
127
 
104
128
  # normalize the channels
105
129
  normalize_channels(
fucciphase/io.py CHANGED
@@ -27,7 +27,7 @@ def read_trackmate_xml(xml_path: Path | str) -> tuple[pd.DataFrame, TrackMateXML
27
27
  # convert the spots to a dataframe
28
28
  df = trackmate.to_pandas()
29
29
  # sort by frame number to have increasing time
30
- df.sort_values(by="FRAME")
30
+ df = df.sort_values(by="FRAME")
31
31
 
32
32
  return df, trackmate
33
33
 
@@ -61,10 +61,7 @@ def read_trackmate_csv(csv_path: Path | str) -> pd.DataFrame:
61
61
  df = pd.read_csv(csv_path, encoding="unicode_escape", skiprows=[1, 2, 3])
62
62
 
63
63
  # sanity check: trackmate must have at least two channels
64
- if (
65
- "MEAN_INTENSITY_CH1" not in df.columns
66
- and "MEAN_INTENSITY_CH2" not in df.columns
67
- ):
64
+ if "MEAN_INTENSITY_CH1" not in df.columns or "MEAN_INTENSITY_CH2" not in df.columns:
68
65
  raise ValueError("Trackmate must have at least two channels.")
69
66
 
70
67
  # return dataframe with converted types (object -> string)
fucciphase/main_cli.py CHANGED
@@ -1,5 +1,6 @@
1
1
  import argparse
2
2
  import json
3
+ import logging
3
4
  from pathlib import Path
4
5
 
5
6
  import pandas as pd
@@ -9,12 +10,10 @@ from fucciphase.napari import add_trackmate_data_to_viewer
9
10
  from fucciphase.phase import estimate_percentage_by_subsequence_alignment
10
11
  from fucciphase.sensor import FUCCISASensor, get_fuccisa_default_sensor
11
12
 
12
- try:
13
- import napari
14
- except ImportError as err:
15
- raise ImportError("Install napari.") from err
13
+ logger = logging.getLogger(__name__)
16
14
 
17
15
 
16
+ # ruff: noqa: C901
18
17
  def main_cli() -> None:
19
18
  """Fucciphase CLI: Command-line entry point for FUCCIphase.
20
19
 
@@ -40,6 +39,11 @@ def main_cli() -> None:
40
39
  not return a value. It will raise a ``ValueError`` if the tracking
41
40
  file does not have an XML or CSV extension.
42
41
  """
42
+ logging.basicConfig(
43
+ level=logging.INFO,
44
+ format="%(levelname)s - %(name)s - %(message)s",
45
+ )
46
+
43
47
  parser = argparse.ArgumentParser(
44
48
  prog="fucciphase",
45
49
  description="FUCCIphase tool to estimate cell cycle phases and percentages.",
@@ -81,9 +85,8 @@ def main_cli() -> None:
81
85
  )
82
86
  parser.add_argument(
83
87
  "--generate_unique_tracks",
84
- type=bool,
88
+ action="store_true",
85
89
  help="Split subtracks (TrackMate specific)",
86
- default=False,
87
90
  )
88
91
 
89
92
  args = parser.parse_args()
@@ -92,17 +95,39 @@ def main_cli() -> None:
92
95
  output_dir.mkdir(exist_ok=True)
93
96
 
94
97
  # ---------------- 2. Load and adapt the reference cell-cycle trace ----------------
95
- reference_df = pd.read_csv(args.reference_file)
98
+ try:
99
+ reference_df = pd.read_csv(args.reference_file)
100
+ except FileNotFoundError:
101
+ raise FileNotFoundError(
102
+ f"Reference file not found: {args.reference_file}"
103
+ ) from None
104
+ except pd.errors.EmptyDataError:
105
+ raise ValueError(f"Reference file is empty: {args.reference_file}") from None
106
+ except pd.errors.ParserError as e:
107
+ raise ValueError(
108
+ f"Failed to parse reference file {args.reference_file}: {e}"
109
+ ) from None
110
+
96
111
  # The reference file is expected to contain 'cyan' and 'magenta' columns;
97
112
  # they are renamed here to match the actual channel names in the data.
98
113
  reference_df.rename(
99
114
  columns={"cyan": args.cyan_channel, "magenta": args.magenta_channel},
100
115
  inplace=True,
101
116
  )
117
+
102
118
  # ---------------- 3. Build the sensor model ----------------
103
119
  if args.sensor_file is not None:
104
- with open(args.sensor_file) as fp:
105
- sensor_properties = json.load(fp)
120
+ try:
121
+ with open(args.sensor_file) as fp:
122
+ sensor_properties = json.load(fp)
123
+ except FileNotFoundError:
124
+ raise FileNotFoundError(
125
+ f"Sensor file not found: {args.sensor_file}"
126
+ ) from None
127
+ except json.JSONDecodeError as e:
128
+ raise ValueError(
129
+ f"Invalid JSON in sensor file {args.sensor_file}: {e}"
130
+ ) from None
106
131
  sensor = FUCCISASensor(**sensor_properties)
107
132
  else:
108
133
  sensor = get_fuccisa_default_sensor()
@@ -120,7 +145,18 @@ def main_cli() -> None:
120
145
  )
121
146
  elif args.tracking_file.endswith(".csv"):
122
147
  # CSV: read the table and then run the processing pipeline on it
123
- df = pd.read_csv(args.tracking_file)
148
+ try:
149
+ df = pd.read_csv(args.tracking_file)
150
+ except FileNotFoundError:
151
+ raise FileNotFoundError(
152
+ f"Tracking file not found: {args.tracking_file}"
153
+ ) from None
154
+ except pd.errors.EmptyDataError:
155
+ raise ValueError(f"Tracking file is empty: {args.tracking_file}") from None
156
+ except pd.errors.ParserError as e:
157
+ raise ValueError(
158
+ f"Failed to parse tracking file {args.tracking_file}: {e}"
159
+ ) from None
124
160
  process_dataframe(
125
161
  df,
126
162
  channels=[args.cyan_channel, args.magenta_channel],
@@ -166,6 +202,16 @@ def main_visualization() -> None:
166
202
  console script and does not return a value.
167
203
 
168
204
  """
205
+ logging.basicConfig(
206
+ level=logging.INFO,
207
+ format="%(levelname)s - %(name)s - %(message)s",
208
+ )
209
+
210
+ try:
211
+ import napari
212
+ except ImportError as err:
213
+ raise ImportError("Install napari.") from err
214
+
169
215
  parser = argparse.ArgumentParser(
170
216
  prog="fucciphase-napari",
171
217
  description="FUCCIphase napari script to launch visualization.",
@@ -211,24 +257,24 @@ def main_visualization() -> None:
211
257
 
212
258
  # Try to read the video using AICSImage; fall back to bioio if needed
213
259
  AICSIMAGE = False
214
- BIOIMAGE = False
215
260
  try:
216
261
  from aicsimageio import AICSImage
217
262
 
218
263
  AICSIMAGE = True
219
- except ImportError as err:
220
- from bioio import BioImage
221
-
222
- BIOIMAGE = True
223
- import bioio_ome_tiff
224
-
225
- if not BIOIMAGE:
264
+ except ImportError:
265
+ try:
266
+ import bioio_ome_tiff
267
+ from bioio import BioImage
268
+ except ImportError as err:
226
269
  raise ImportError(
227
- "Please install AICSImage or bioio to read videos"
270
+ "Please install aicsimageio or bioio to read videos. "
271
+ "Install with: pip install aicsimageio "
272
+ "or pip install bioio bioio-ome-tiff"
228
273
  ) from err
274
+
229
275
  if AICSIMAGE:
230
276
  image = AICSImage(args.video)
231
- elif BIOIMAGE:
277
+ else:
232
278
  image = BioImage(args.video, reader=bioio_ome_tiff.Reader)
233
279
 
234
280
  # Determine spatial scale; fall back to unit scale or user-provided pixel size
@@ -237,12 +283,24 @@ def main_visualization() -> None:
237
283
  if args.pixel_size is not None:
238
284
  scale = (args.pixel_size, args.pixel_size)
239
285
  else:
240
- print("WARNING: No pixel sizes found, using unit scale")
286
+ logger.warning("No pixel sizes found in image metadata, using unit scale")
241
287
  scale = (1.0, 1.0)
242
288
  cyan = image.get_image_dask_data("TYX", C=args.cyan_channel)
243
289
  magenta = image.get_image_dask_data("TYX", C=args.magenta_channel)
244
290
  masks = image.get_image_dask_data("TYX", C=args.segmask_channel)
245
- track_df = pd.read_csv(args.fucciphase_file)
291
+
292
+ try:
293
+ track_df = pd.read_csv(args.fucciphase_file)
294
+ except FileNotFoundError:
295
+ raise FileNotFoundError(
296
+ f"FUCCIphase file not found: {args.fucciphase_file}"
297
+ ) from None
298
+ except pd.errors.EmptyDataError:
299
+ raise ValueError(f"FUCCIphase file is empty: {args.fucciphase_file}") from None
300
+ except pd.errors.ParserError as e:
301
+ raise ValueError(
302
+ f"Failed to parse FUCCIphase file {args.fucciphase_file}: {e}"
303
+ ) from None
246
304
 
247
305
  viewer = napari.Viewer()
248
306
 
@@ -257,7 +315,3 @@ def main_visualization() -> None:
257
315
  textkwargs={"size": 14},
258
316
  )
259
317
  napari.run()
260
-
261
-
262
- if __name__ == "__main__":
263
- main_cli()
@@ -104,12 +104,13 @@ def pandas_df_to_napari_tracks(
104
104
  track_data = track_df[
105
105
  [unique_track_id_name, frame_id_name, position_y_name, position_x_name]
106
106
  ].to_numpy()
107
- if track_df[feature_name].min() < 0 or track_df[feature_name].max() > 100.0:
108
- raise ValueError(
109
- "Make sure that the features are between 0 and 1, "
110
- "otherwise the colormapping does not work well"
111
- )
107
+
112
108
  features = None
113
109
  if feature_name is not None:
110
+ if track_df[feature_name].min() < 0 or track_df[feature_name].max() > 100.0:
111
+ raise ValueError(
112
+ "Make sure that the features are between 0 and 1, "
113
+ "otherwise the colormapping does not work well"
114
+ )
114
115
  features = {feature_name: track_df[feature_name].to_numpy()}
115
116
  viewer.add_tracks(track_data, features=features, colormaps_dict=colormaps_dict)
fucciphase/phase.py CHANGED
@@ -1,11 +1,13 @@
1
+ import logging
1
2
  from enum import Enum
3
+ from typing import Literal
2
4
 
3
5
  import dtaidistance.preprocessing
4
6
  import numpy as np
5
7
  import pandas as pd
6
8
  from dtaidistance.dtw import warping_amount
7
9
  from dtaidistance.subsequence.dtw import subsequence_alignment
8
- from scipy import interpolate, stats
10
+ from scipy import interpolate, signal, stats
9
11
 
10
12
  from .sensor import FUCCISensor
11
13
  from .utils import (
@@ -15,6 +17,11 @@ from .utils import (
15
17
  get_time_distortion_coefficient,
16
18
  )
17
19
 
20
+ logger = logging.getLogger(__name__)
21
+
22
+ # Type alias for signal processing mode
23
+ SignalMode = Literal["signal", "derivative", "both"]
24
+
18
25
 
19
26
  class NewColumns(str, Enum):
20
27
  """Columns generated by the analysis.
@@ -336,6 +343,144 @@ def estimate_cell_phase_from_background(
336
343
  df[NewColumns.discrete_phase_bg()] = pd.Series(phase_names, dtype=str) # add as str
337
344
 
338
345
 
346
+ def _process_channel(
347
+ series: np.ndarray,
348
+ signal_mode: SignalMode,
349
+ smooth: float,
350
+ channel_name: str = "",
351
+ signal_smooth: int = 0,
352
+ ) -> list[np.ndarray]:
353
+ """Process a single channel according to the signal mode.
354
+
355
+ Parameters
356
+ ----------
357
+ series : np.ndarray
358
+ The input signal array.
359
+ signal_mode : SignalMode
360
+ Processing mode: "signal", "derivative", or "both".
361
+ smooth : float
362
+ Smoothing factor for differencing (removes high frequencies).
363
+ channel_name : str, optional
364
+ Channel name for warning messages.
365
+ signal_smooth : int, optional
366
+ Window size for signal smoothing (Savitzky-Golay filter with polyorder=3).
367
+ 0 means no smoothing. Must be > 3 if used.
368
+ Only applies when signal_mode is "signal" or "both".
369
+
370
+ Returns
371
+ -------
372
+ list[np.ndarray]
373
+ List of processed arrays. Length 1 for "signal" or "derivative",
374
+ length 2 for "both" (signal first, then derivative).
375
+ """
376
+ results = []
377
+
378
+ if signal_mode in ("signal", "both"):
379
+ smoothed_signal = series.copy()
380
+ if signal_smooth > 3:
381
+ smoothed_signal = signal.savgol_filter(
382
+ series, window_length=signal_smooth, polyorder=3, mode="nearest"
383
+ )
384
+ elif signal_smooth > 0:
385
+ logger.warning(
386
+ "signal_smooth=%d is too small (must be > 3), skipping smoothing",
387
+ signal_smooth,
388
+ )
389
+ results.append(smoothed_signal)
390
+
391
+ if signal_mode in ("derivative", "both"):
392
+ try:
393
+ diff = dtaidistance.preprocessing.differencing(series, smooth=smooth)
394
+ except ValueError:
395
+ if channel_name:
396
+ logger.warning(
397
+ "Smoothing failed for channel %s, continuing without smoothing",
398
+ channel_name,
399
+ )
400
+ diff = dtaidistance.preprocessing.differencing(series)
401
+ results.append(diff)
402
+
403
+ return results
404
+
405
+
406
+ def _compute_both_mode_scale_factor(processed_series: list[np.ndarray]) -> float:
407
+ """Compute scale factor to balance signal and derivative contributions.
408
+
409
+ In "both" mode, signals and derivatives may have different magnitudes.
410
+ This function computes a scale factor to apply to signals so they
411
+ contribute equally to the DTW distance.
412
+
413
+ Parameters
414
+ ----------
415
+ processed_series : list[np.ndarray]
416
+ List of processed arrays in order:
417
+ [signal_ch1, deriv_ch1, signal_ch2, deriv_ch2, ...]
418
+
419
+ Returns
420
+ -------
421
+ float
422
+ Scale factor to multiply signals by. Returns 1.0 if derivatives have zero std.
423
+ """
424
+ # In "both" mode, signals are at even indices, derivatives at odd indices
425
+ signals = [processed_series[i] for i in range(0, len(processed_series), 2)]
426
+ derivatives = [processed_series[i] for i in range(1, len(processed_series), 2)]
427
+
428
+ signal_std = np.mean([np.std(s) for s in signals])
429
+ deriv_std = np.mean([np.std(d) for d in derivatives])
430
+
431
+ if signal_std == 0:
432
+ return 1.0
433
+ return deriv_std / signal_std # type: ignore[no-any-return]
434
+
435
+
436
+ def _apply_both_mode_scaling(
437
+ processed_series: list[np.ndarray], scale_factor: float
438
+ ) -> list[np.ndarray]:
439
+ """Apply scale factor to signal features in "both" mode.
440
+
441
+ Parameters
442
+ ----------
443
+ processed_series : list[np.ndarray]
444
+ List of processed arrays in order:
445
+ [signal_ch1, deriv_ch1, signal_ch2, deriv_ch2, ...]
446
+ scale_factor : float
447
+ Scale factor to multiply signals by.
448
+
449
+ Returns
450
+ -------
451
+ list[np.ndarray]
452
+ Scaled processed series with signals multiplied by scale_factor.
453
+ """
454
+ scaled = []
455
+ for i, arr in enumerate(processed_series):
456
+ if i % 2 == 0: # Signal (even index)
457
+ scaled.append(arr * scale_factor)
458
+ else: # Derivative (odd index)
459
+ scaled.append(arr)
460
+ return scaled
461
+
462
+
463
+ def _compute_output_length_offset(signal_mode: SignalMode) -> int:
464
+ """Return the offset to add to query length for output array size.
465
+
466
+ When using derivatives, the output is 1 element shorter, so we need
467
+ to add 1 to get back to the original track length.
468
+
469
+ Parameters
470
+ ----------
471
+ signal_mode : SignalMode
472
+ The signal processing mode.
473
+
474
+ Returns
475
+ -------
476
+ int
477
+ Offset to add: 1 if derivative is used, 0 otherwise.
478
+ """
479
+ if signal_mode in ("derivative", "both"):
480
+ return 1
481
+ return 0
482
+
483
+
339
484
  # flake8: noqa: C901
340
485
  def estimate_percentage_by_subsequence_alignment(
341
486
  df: pd.DataFrame,
@@ -347,7 +492,10 @@ def estimate_percentage_by_subsequence_alignment(
347
492
  track_id_name: str = "TRACK_ID",
348
493
  minimum_track_length: int = 10,
349
494
  use_zscore_norm: bool = True,
350
- use_derivative: bool = True,
495
+ signal_mode: SignalMode = "derivative",
496
+ signal_weight: float = 1.0,
497
+ signal_smooth: int = 0,
498
+ use_derivative: bool | None = None,
351
499
  ) -> None:
352
500
  """Use subsequence alignment to estimate percentage.
353
501
 
@@ -362,7 +510,7 @@ def estimate_percentage_by_subsequence_alignment(
362
510
  reference_data: pd.DataFrame
363
511
  Containing reference intensities over time
364
512
  smooth: float
365
- Smoothing factor, see dtaidistance documentation
513
+ Smoothing factor for derivative (removes high frequencies, 0-0.5)
366
514
  penalty: float
367
515
  Penalty for DTW algorithm, enforces diagonal warping path
368
516
  track_id_name: str
@@ -373,10 +521,34 @@ def estimate_percentage_by_subsequence_alignment(
373
521
  Use z-score normalization before differencing curves
374
522
  Probably not needed if intensities of reference and measured
375
523
  curve are similar
376
- use_derivative: bool
377
- Take derivative to perform alignment independent of intensity
378
- baseline (in default mode also after normalization)
524
+ signal_mode: SignalMode
525
+ Signal processing mode:
526
+ - "signal": use raw signal only
527
+ - "derivative": use derivative only (default, for baseline independence)
528
+ - "both": use both signal and derivative as features
529
+ signal_weight: float
530
+ Weight for signal relative to derivative in "both" mode.
531
+ Default 1.0 means equal contribution. Values > 1.0 weight signal
532
+ higher, values < 1.0 weight derivative higher. Ignored for other modes.
533
+ signal_smooth: int
534
+ Window size for signal smoothing (Savitzky-Golay filter, polyorder=3).
535
+ 0 means no smoothing. Must be > 3 if used.
536
+ Only applies in "signal" or "both" modes.
537
+ use_derivative: bool | None
538
+ Deprecated. Use signal_mode instead. If provided, overrides signal_mode
539
+ for backward compatibility (True -> "derivative", False -> "signal").
379
540
  """
541
+ # Handle backward compatibility with use_derivative parameter
542
+ if use_derivative is not None:
543
+ import warnings
544
+
545
+ warnings.warn(
546
+ "use_derivative is deprecated, use signal_mode instead",
547
+ DeprecationWarning,
548
+ stacklevel=2,
549
+ )
550
+ signal_mode = "derivative" if use_derivative else "signal"
551
+
380
552
  if "time" not in reference_data:
381
553
  raise ValueError("Need to provide time column in reference_data.")
382
554
  if "percentage" not in reference_data:
@@ -398,33 +570,48 @@ def estimate_percentage_by_subsequence_alignment(
398
570
 
399
571
  num_time = int(time_scale[-1] / dt)
400
572
  new_time_scale = np.linspace(0, dt * num_time, num=num_time + 1)
401
- assert np.isclose(dt, new_time_scale[1] - new_time_scale[0])
573
+ actual_dt = new_time_scale[1] - new_time_scale[0]
574
+ if not np.isclose(dt, actual_dt):
575
+ raise ValueError(
576
+ f"Time scale mismatch: requested dt={dt}, but computed dt={actual_dt}. "
577
+ "Check that the reference data time scale is compatible with "
578
+ "the requested timestep."
579
+ )
402
580
 
403
581
  # reference curve in time scale of provided track
404
582
  percentage_ref = f_percentage(new_time_scale)
405
583
 
406
- series_diff = []
584
+ processed_series = []
407
585
  for channel in channels:
408
586
  series = interpolation_functions[channel](new_time_scale)
409
587
  if use_zscore_norm:
410
588
  series = stats.zscore(series)
411
- # if all values are the same, we zero to not numerical issues
589
+ # if all values are the same, we zero to avoid numerical issues
412
590
  if np.all(np.isnan(series)):
413
- series = 0.0
414
-
415
- if use_derivative:
416
- try:
417
- diff_ch = dtaidistance.preprocessing.differencing(series, smooth=smooth)
418
- except ValueError:
419
- print(
420
- "WARNING: The smoothing failed, continue without smoothing"
421
- f" for channel {channel}"
422
- )
423
- diff_ch = dtaidistance.preprocessing.differencing(series)
424
- else:
425
- diff_ch = series
426
- series_diff.append(diff_ch)
427
- series = np.array(series_diff)
591
+ series = np.zeros_like(series)
592
+
593
+ channel_features = _process_channel(
594
+ series, signal_mode, smooth, channel, signal_smooth
595
+ )
596
+ processed_series.extend(channel_features)
597
+
598
+ # For "both" mode, trim signal features to match derivative length and scale
599
+ both_mode_scale_factor = 1.0
600
+ if signal_mode == "both":
601
+ min_len = min(len(s) for s in processed_series)
602
+ processed_series = [s[-min_len:] for s in processed_series]
603
+ # Also trim the percentage reference to match
604
+ percentage_ref = percentage_ref[-min_len:]
605
+ # Compute and apply scaling to balance signal and derivative contributions
606
+ # signal_weight > 1.0 weights signal higher relative to derivative
607
+ both_mode_scale_factor = (
608
+ _compute_both_mode_scale_factor(processed_series) * signal_weight
609
+ )
610
+ processed_series = _apply_both_mode_scaling(
611
+ processed_series, both_mode_scale_factor
612
+ )
613
+
614
+ series = np.array(processed_series)
428
615
  series = np.swapaxes(series, 0, 1)
429
616
 
430
617
  df.loc[:, NewColumns.cell_cycle_dtw()] = np.nan
@@ -444,37 +631,48 @@ def estimate_percentage_by_subsequence_alignment(
444
631
  # find percentages if track is long enough
445
632
  queries = track_df[channels].to_numpy()
446
633
 
447
- queries_diff = []
634
+ processed_queries = []
448
635
  for idx in range(len(channels)):
636
+ query_series = queries[:, idx].copy()
449
637
  if use_zscore_norm:
450
- queries[:, idx] = stats.zscore(queries[:, idx])
451
- # if all values are the same, we zero to not numerical issues
452
- if np.all(np.isnan(queries[:, idx])):
453
- queries[:, idx] = 0.0
454
- if use_derivative:
455
- diff_ch = dtaidistance.preprocessing.differencing(
456
- queries[:, idx], smooth=smooth
457
- )
458
- else:
459
- diff_ch = queries[:, idx]
460
- queries_diff.append(diff_ch)
638
+ query_series = stats.zscore(query_series)
639
+ # if all values are the same, we zero to avoid numerical issues
640
+ if np.all(np.isnan(query_series)):
641
+ query_series = np.zeros_like(query_series)
461
642
 
462
- query = np.array(queries_diff)
643
+ channel_features = _process_channel(
644
+ query_series, signal_mode, smooth, signal_smooth=signal_smooth
645
+ )
646
+ processed_queries.extend(channel_features)
647
+
648
+ # For "both" mode, trim signal features to match derivative length and scale
649
+ if signal_mode == "both":
650
+ min_len = min(len(q) for q in processed_queries)
651
+ processed_queries = [q[-min_len:] for q in processed_queries]
652
+ # Apply same scale factor as reference to ensure consistent weighting
653
+ processed_queries = _apply_both_mode_scaling(
654
+ processed_queries, both_mode_scale_factor
655
+ )
656
+
657
+ query = np.array(processed_queries)
463
658
  query = np.swapaxes(query, 0, 1)
464
659
 
465
660
  sa = subsequence_alignment(query, series, penalty=penalty)
466
661
  best_match = sa.best_match()
467
- if use_derivative:
468
- new_percentage = np.zeros(query.shape[0] + 1)
469
- else:
470
- new_percentage = np.zeros(query.shape[0])
471
- for p in best_match.path:
472
- new_percentage[p[0]] = percentage_ref[p[1]]
473
- if p[1] + 1 < len(percentage_ref):
474
- last_percentage = p[1] + 1
662
+ length_offset = _compute_output_length_offset(signal_mode)
663
+ new_percentage = np.zeros(query.shape[0] + length_offset)
664
+
665
+ # Handle empty path case
666
+ if len(best_match.path) == 0:
667
+ new_percentage[:] = np.nan
475
668
  else:
476
- last_percentage = p[1]
477
- new_percentage[-1] = percentage_ref[last_percentage]
669
+ for p in best_match.path:
670
+ new_percentage[p[0]] = percentage_ref[p[1]]
671
+ if p[1] + 1 < len(percentage_ref):
672
+ last_percentage = p[1] + 1
673
+ else:
674
+ last_percentage = p[1]
675
+ new_percentage[-1] = percentage_ref[last_percentage]
478
676
  # save estimated cell cycle percentages
479
677
  df.loc[df[track_id_name] == track_id, NewColumns.cell_cycle_dtw()] = (
480
678
  new_percentage[:]
@@ -484,21 +682,33 @@ def estimate_percentage_by_subsequence_alignment(
484
682
  best_match.value
485
683
  )
486
684
 
487
- _, distortion_score, _, _ = get_time_distortion_coefficient(best_match.path)
488
- # save DTW distortion
489
- df.loc[df[track_id_name] == track_id, NewColumns.dtw_distortion()] = (
490
- distortion_score
491
- )
492
- df.loc[df[track_id_name] == track_id, NewColumns.dtw_distortion_norm()] = (
493
- distortion_score / len(track_df)
494
- )
685
+ # Handle empty path case for DTW metrics
686
+ if len(best_match.path) == 0:
687
+ df.loc[df[track_id_name] == track_id, NewColumns.dtw_distortion()] = np.nan
688
+ df.loc[df[track_id_name] == track_id, NewColumns.dtw_distortion_norm()] = (
689
+ np.nan
690
+ )
691
+ df.loc[df[track_id_name] == track_id, NewColumns.dtw_warping_amount()] = (
692
+ np.nan
693
+ )
694
+ df.loc[
695
+ df[track_id_name] == track_id, NewColumns.rel_dtw_warping_amount()
696
+ ] = np.nan
697
+ else:
698
+ _, distortion_score, _, _ = get_time_distortion_coefficient(best_match.path)
699
+ # save DTW distortion
700
+ df.loc[df[track_id_name] == track_id, NewColumns.dtw_distortion()] = (
701
+ distortion_score
702
+ )
703
+ df.loc[df[track_id_name] == track_id, NewColumns.dtw_distortion_norm()] = (
704
+ distortion_score / len(track_df)
705
+ )
495
706
 
496
- # save DTW warping amount
497
- df.loc[df[track_id_name] == track_id, NewColumns.dtw_warping_amount()] = (
498
- warping_amount(best_match.path)
499
- )
707
+ # save DTW warping amount
708
+ df.loc[df[track_id_name] == track_id, NewColumns.dtw_warping_amount()] = (
709
+ warping_amount(best_match.path)
710
+ )
500
711
 
501
- # save DTW warping amount
502
- df.loc[df[track_id_name] == track_id, NewColumns.rel_dtw_warping_amount()] = (
503
- warping_amount(best_match.path) / len(track_df)
504
- )
712
+ df.loc[
713
+ df[track_id_name] == track_id, NewColumns.rel_dtw_warping_amount()
714
+ ] = warping_amount(best_match.path) / len(track_df)
fucciphase/plot.py CHANGED
@@ -1,3 +1,4 @@
1
+ import logging
1
2
  from itertools import cycle
2
3
 
3
4
  import numpy as np
@@ -11,6 +12,8 @@ from scipy import interpolate
11
12
  from .phase import NewColumns
12
13
  from .utils import get_norm_channel_name
13
14
 
15
+ logger = logging.getLogger(__name__)
16
+
14
17
 
15
18
  def set_phase_colors(
16
19
  df: pd.DataFrame, colordict: dict, phase_column: str = "DISCRETE_PHASE_MAX"
@@ -569,7 +572,7 @@ def get_percentage_color(percentage: float) -> tuple:
569
572
  cmap_name = "cool"
570
573
  cmap = colormaps.get(cmap_name)
571
574
  if np.isnan(percentage):
572
- print("WARNING: NaN value detected, plot will be transparent")
575
+ logger.warning("NaN percentage value detected, plot will be transparent")
573
576
  rgba_value = (0, 0, 0, 0)
574
577
  else:
575
578
  rgba_value = cmap(percentage / 100.0)
fucciphase/sensor.py CHANGED
@@ -1,3 +1,4 @@
1
+ import logging
1
2
  from abc import ABC, abstractmethod
2
3
  from typing import Union
3
4
 
@@ -5,6 +6,8 @@ import numpy as np
5
6
  import pandas as pd
6
7
  from scipy import optimize
7
8
 
9
+ logger = logging.getLogger(__name__)
10
+
8
11
 
9
12
  def logistic(
10
13
  x: float | np.ndarray, center: float, sigma: float, sign: float = 1.0
@@ -273,8 +276,8 @@ class FUCCISASensor(FUCCISensor):
273
276
  )
274
277
  )
275
278
  except ValueError:
276
- print(
277
- "WARNING: could not infer percentage in SG2M phase, using average phase"
279
+ logger.warning(
280
+ "Could not infer percentage in SG2M phase, using average phase value"
278
281
  )
279
282
  return g1s_perc + 0.5 * (100.0 - g1s_perc - g1_perc)
280
283
 
@@ -289,9 +292,20 @@ class FUCCISASensor(FUCCISensor):
289
292
  Name of phase
290
293
  intensities: List[float]
291
294
  List of channel intensities for all fluorophores
295
+
296
+ Raises
297
+ ------
298
+ ValueError
299
+ If the phase is not defined for this sensor or if the intensities
300
+ list does not have the expected number of elements.
292
301
  """
293
302
  if phase not in self.phases:
294
303
  raise ValueError(f"Phase {phase} is not defined for this sensor.")
304
+ if len(intensities) < self.fluorophores:
305
+ raise ValueError(
306
+ f"Expected {self.fluorophores} intensity values, "
307
+ f"but got {len(intensities)}."
308
+ )
295
309
  if phase == "G1":
296
310
  return self._find_g1_percentage(intensities[0])
297
311
  if phase == "G1/S":
@@ -29,7 +29,13 @@ def get_feature_value_at_frame(
29
29
  If zero or multiple rows match the requested label.
30
30
  """
31
31
  value = labels[labels[label_name] == label, feature].to_numpy()
32
- assert len(value) == 1
32
+ if len(value) == 0:
33
+ raise ValueError(f"No rows match label '{label}' in column '{label_name}'.")
34
+ if len(value) > 1:
35
+ raise ValueError(
36
+ f"Multiple rows ({len(value)}) match label '{label}' "
37
+ f"in column '{label_name}'. Expected exactly one match."
38
+ )
33
39
  return float(value[0])
34
40
 
35
41
 
@@ -35,21 +35,44 @@ def get_avg_channel_name(channel: str) -> str:
35
35
  return f"{channel}_AVG"
36
36
 
37
37
 
38
- def norm(vector: pd.Series | np.ndarray) -> pd.Series | np.ndarray:
38
+ def norm(
39
+ vector: pd.Series | np.ndarray,
40
+ max_ch: float | None = None,
41
+ min_ch: float | None = None,
42
+ ) -> pd.Series | np.ndarray:
39
43
  """Normalize a vector by subtracting the min and dividing by (max - min).
40
44
 
41
45
  Parameters
42
46
  ----------
43
47
  vector : Union[pd.Series, np.ndarray]
44
48
  Vector to normalize.
49
+ max_ch: Optional[float]
50
+ Optional value for the maximum used in normalization
51
+ min_ch: Optional[float]
52
+ Optional value for the minimum used in normalization
45
53
 
46
54
  Returns
47
55
  -------
48
56
  Union[pd.Series, np.ndarray]
49
57
  Normalized vector.
58
+
59
+ Raises
60
+ ------
61
+ ValueError
62
+ If max_ch equals min_ch (constant signal), which would cause division by zero.
50
63
  """
51
- max_ch = vector.max()
52
- min_ch = vector.min()
64
+ if max_ch is None:
65
+ max_ch = vector.max()
66
+ if min_ch is None:
67
+ min_ch = vector.min()
68
+
69
+ # Check for division by zero (constant signal)
70
+ if np.isclose(max_ch, min_ch):
71
+ raise ValueError(
72
+ f"Cannot normalize: max ({max_ch}) equals min ({min_ch}). "
73
+ "The signal appears to be constant."
74
+ )
75
+
53
76
  norm_ch = np.round(
54
77
  (vector - min_ch) / (max_ch - min_ch),
55
78
  2, # number of decimals
@@ -148,14 +171,22 @@ def normalize_channels(
148
171
  df.loc[index, avg_channel] = ma
149
172
 
150
173
  # normalize channels
151
- for channel in channels:
174
+ for idx, channel in enumerate(channels):
152
175
  # moving average creates a new column with an own name
153
176
  if use_moving_average:
154
177
  avg_channel = get_avg_channel_name(channel)
155
178
  else:
156
179
  avg_channel = channel
157
180
  # normalize channel
158
- norm_ch = norm(df[avg_channel])
181
+ # default: compute max and min per channel
182
+ max_ch = None
183
+ min_ch = None
184
+ # if manually specified limits, overwrite
185
+ if manual_max is not None:
186
+ max_ch = manual_max[idx]
187
+ if manual_min is not None:
188
+ min_ch = manual_min[idx]
189
+ norm_ch = norm(df[avg_channel], max_ch=max_ch, min_ch=min_ch)
159
190
 
160
191
  # add the new column
161
192
  new_column = get_norm_channel_name(channel)
@@ -186,10 +217,15 @@ def smooth_track(
186
217
  Name of column with track IDs
187
218
  moving_average_window : int
188
219
  Size of the window used for the moving average, default 7.
220
+ Must be greater than 3.
189
221
  """
190
222
  # get the track
191
223
  track: pd.DataFrame = df[df[track_id_name] == track_ID]
192
224
 
225
+ # hard-coded polyorder is 3, window length must be longer
226
+ if moving_average_window <= 3:
227
+ raise ValueError("Use moving_average_window of at least 4.")
228
+
193
229
  # compute the moving average
194
230
  ma = signal.savgol_filter(
195
231
  track[channel],
@@ -1,7 +1,11 @@
1
+ import logging
2
+
1
3
  import numpy as np
2
4
  import pandas as pd
3
5
  from monotonic_derivative import ensure_monotonic_derivative
4
6
 
7
+ logger = logging.getLogger(__name__)
8
+
5
9
 
6
10
  def fit_percentages(frames: np.ndarray, percentages: np.ndarray) -> np.ndarray:
7
11
  """Fit estimated percentages to function with non-negative derivative."""
@@ -12,7 +16,7 @@ def fit_percentages(frames: np.ndarray, percentages: np.ndarray) -> np.ndarray:
12
16
  force_negative_derivative=False,
13
17
  )
14
18
  # clip to range (0, 100)
15
- return np.clip(best_fit, 0.0, 100.0)
19
+ return np.clip(best_fit, 0.0, 100.0) # type: ignore[no-any-return]
16
20
 
17
21
 
18
22
  def postprocess_estimated_percentages(
@@ -31,17 +35,17 @@ def postprocess_estimated_percentages(
31
35
  frames = track["FRAME"]
32
36
  percentages = track[percentage_column]
33
37
  if np.all(np.isnan(percentages)):
34
- print("WARNING: No percentages to postprocess")
38
+ logger.warning("No percentages to postprocess")
35
39
  return
36
40
  try:
37
41
  restored_percentages = fit_percentages(frames, percentages)
38
42
  except ValueError:
39
- print(f"Error in track {index}")
40
- print(
41
- "Make sure that the spots belong to a unique track,"
42
- " i.e., not more than one spot per frame per track."
43
+ logger.error(
44
+ "Error in track %s. Make sure that the spots belong to a unique track, "
45
+ "i.e., not more than one spot per frame per track.\n%s",
46
+ index,
47
+ track,
43
48
  )
44
- print(track)
45
49
  df.loc[df[track_id_name] == index, postprocessed_percentage_column] = (
46
50
  restored_percentages
47
51
  )
@@ -1,3 +1,5 @@
1
+ import logging
2
+
1
3
  import matplotlib.pyplot as plt
2
4
  import numpy as np
3
5
  import pandas as pd
@@ -5,6 +7,8 @@ from LineageTree import lineageTree
5
7
  from matplotlib import colormaps
6
8
  from scipy import signal
7
9
 
10
+ logger = logging.getLogger(__name__)
11
+
8
12
 
9
13
  def split_track(
10
14
  track: pd.DataFrame,
@@ -91,10 +95,13 @@ def split_all_tracks(
91
95
  """
92
96
  if track_id_name not in track_df.columns:
93
97
  raise ValueError(f"{track_id_name} column is missing.")
94
- highest_track_idx = track_df[track_id_name].max()
95
- highest_track_idx_counter = highest_track_idx
98
+
99
+ # Use unique() to handle non-contiguous track IDs correctly
100
+ track_ids = track_df[track_id_name].unique()
101
+ highest_track_idx_counter = track_df[track_id_name].max()
102
+
96
103
  # go through all tracks and split if needed
97
- for track_idx in range(highest_track_idx):
104
+ for track_idx in track_ids:
98
105
  track = track_df.loc[track_df[track_id_name] == track_idx]
99
106
  if len(track) < minimum_track_length:
100
107
  continue
@@ -382,7 +389,7 @@ def export_lineage_tree_to_svg(
382
389
  This function currently only supports
383
390
  the standard FUCCISA sensor.
384
391
  """
385
- print("Warning: make sure that you updated the spot names using TrackMate actions!")
392
+ logger.warning("Make sure that you updated the spot names using TrackMate actions!")
386
393
  # initialise lineage tree
387
394
  lt = lineageTree(trackmate_file, file_type="TrackMate")
388
395
  cmap_name = "cool"
@@ -55,9 +55,26 @@ class TrackMateXML:
55
55
  ----------
56
56
  xml_path : Union[str, Path]
57
57
  Path to the xml file.
58
+
59
+ Raises
60
+ ------
61
+ FileNotFoundError
62
+ If the XML file does not exist.
63
+ ValueError
64
+ If the XML file is malformed or not a valid TrackMate file.
58
65
  """
59
66
  # parse tree
60
- self._tree: et.ElementTree[et.Element[str]] = et.parse(xml_path)
67
+ xml_path = Path(xml_path)
68
+ if not xml_path.exists():
69
+ raise FileNotFoundError(f"TrackMate XML file not found: {xml_path}")
70
+
71
+ try:
72
+ self._tree: et.ElementTree[et.Element[str]] = et.parse(xml_path)
73
+ except et.ParseError as e:
74
+ raise ValueError(
75
+ f"Failed to parse TrackMate XML file {xml_path}: {e}"
76
+ ) from None
77
+
61
78
  self._root: et.Element | Any = self._tree.getroot()
62
79
 
63
80
  # placeholders
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fucciphase
3
- Version: 0.0.3
3
+ Version: 0.0.4
4
4
  Summary: Cell cycle analysis plugin.
5
5
  Project-URL: homepage, https://github.com/nobias-ht/fucciphase
6
6
  Project-URL: repository, https://github.com/nobias-ht/fucciphase
@@ -15,7 +15,7 @@ Classifier: Programming Language :: Python :: 3.11
15
15
  Classifier: Programming Language :: Python :: 3.12
16
16
  Classifier: Programming Language :: Python :: 3.13
17
17
  Classifier: Programming Language :: Python :: 3.14
18
- Requires-Python: >=3.8
18
+ Requires-Python: >=3.10
19
19
  Requires-Dist: dtaidistance
20
20
  Requires-Dist: lineagetree<1.5.0
21
21
  Requires-Dist: matplotlib
@@ -0,0 +1,25 @@
1
+ fucciphase/__init__.py,sha256=HkyHcSzLlTlEIz7E3bvuINGBqfnIL0s6oPU0EE_pQCs,545
2
+ fucciphase/__main__.py,sha256=lXUDkaeNRAZep5O7dNLj9RY4EclpPolruAll0FFC_jo,298
3
+ fucciphase/fucci_phase.py,sha256=34EmEaG_jEGvRMBocPRYfBYXSJpko3_nYUgP013Bkjo,8990
4
+ fucciphase/io.py,sha256=5a8Qre5KCT1haoqsB8FELyPKM7hzPM9kcEBGclokSPA,2018
5
+ fucciphase/main_cli.py,sha256=870EiTIxck_vHczeZ0Go9Ia42d9UTgWtHoNfm0Q9BJ0,11036
6
+ fucciphase/phase.py,sha256=wxBJoglCtnLTpZwJ2j1kUeKZMb3y3pqeKakjMSIn1bw,25450
7
+ fucciphase/plot.py,sha256=0bLCJBdfR-I7_qNDurY4iJyeV8D0GqBTvHZO4NSNTlk,24212
8
+ fucciphase/py.typed,sha256=esB4cHc6c07uVkGtqf8at7ttEnprwRxwk8obY8Qumq4,187
9
+ fucciphase/sensor.py,sha256=XVshjhe6ix8FO5xmICLF7ovBCf_JcEkD328tU40-dj0,15172
10
+ fucciphase/tracking_utilities.py,sha256=dJ0q903_aF7bJyARU7wPeRjX6r7b-sHWJVOyXucpMcI,4825
11
+ fucciphase/napari/__init__.py,sha256=At9Shk6HfDf6obtQaM0yKG4NOZVO6YxD2-J1M2ZGm7w,198
12
+ fucciphase/napari/tracks_to_napari.py,sha256=ITqm_aw1uRG-RIfuxoC-zJLCDmrxjduWbiWC69xGzMM,4060
13
+ fucciphase/utils/__init__.py,sha256=YwgK2COtG44QJaXHVPYKu2-Ifm6GIG1ahykvJ4pr-MM,1408
14
+ fucciphase/utils/checks.py,sha256=o4mMGJMIE5wlaf0jtblnTD4JGTfjc5W7ZS4tYDIPlLo,625
15
+ fucciphase/utils/dtw.py,sha256=6RJ5wZ8jDFKSVmcYajsdTJ4tfe3E-wKROEA2rWyGRN4,1638
16
+ fucciphase/utils/normalize.py,sha256=EyVDeMJrDu02XfSwt0jIUecAlTN9v9BAS2KdU5wKk_c,7173
17
+ fucciphase/utils/phase_fit.py,sha256=tiZ7rVkPvFokf7ghhGfkcdr1udjptUU4HmKu53EL3LQ,1835
18
+ fucciphase/utils/simulator.py,sha256=fV-Pj6APms3QbYmiD9TjEYYkTqkIWk0uKDLCpz36HmA,2323
19
+ fucciphase/utils/track_postprocessing.py,sha256=AOm4N4WACzKymX7vz74HWeAIQx556OQ5IS6uHZHgvHw,14500
20
+ fucciphase/utils/trackmate.py,sha256=NeP8vCQEAGiKwz2O0kDmmvJtkxHN6LBtDwkAHsZJrvg,11131
21
+ fucciphase-0.0.4.dist-info/METADATA,sha256=JNqZF53wGiKLnWceUHEYXj1sim9p8TgftwNoRrnWHrk,9019
22
+ fucciphase-0.0.4.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
23
+ fucciphase-0.0.4.dist-info/entry_points.txt,sha256=B77Cm5QnxeQz6DEfqD6n7zDw48-HrlepWPwLbdVITMY,119
24
+ fucciphase-0.0.4.dist-info/licenses/LICENSE,sha256=pQGrOGpOTwikEzkZ8Zc9XLQwbaZ85TMJP-GaWCNZciw,1554
25
+ fucciphase-0.0.4.dist-info/RECORD,,
@@ -1,25 +0,0 @@
1
- fucciphase/__init__.py,sha256=HkyHcSzLlTlEIz7E3bvuINGBqfnIL0s6oPU0EE_pQCs,545
2
- fucciphase/__main__.py,sha256=lXUDkaeNRAZep5O7dNLj9RY4EclpPolruAll0FFC_jo,298
3
- fucciphase/fucci_phase.py,sha256=aR50chW5JAs-56R2vezINnMX_QkNT1nwGgqYLVreDVQ,8237
4
- fucciphase/io.py,sha256=oleF2KmW5uE_cV2PQx5bTka1Aw69kLIO-200ZQpuiAQ,2038
5
- fucciphase/main_cli.py,sha256=3wh32o91BV8J4309CxhN5tW--MOYgFId8eQWkCjdXg8,8958
6
- fucciphase/phase.py,sha256=uZNMK7Odd07kNh8SbmsrZpTolEGdRBGE0o2sGbocZhw,17684
7
- fucciphase/plot.py,sha256=E04VF7ejAUb8uC1pVRGxw7sS9A9_uaO51h6bVvu1aWc,24148
8
- fucciphase/py.typed,sha256=esB4cHc6c07uVkGtqf8at7ttEnprwRxwk8obY8Qumq4,187
9
- fucciphase/sensor.py,sha256=LSIVC0MfJncMjovUriAiFRTaEmEnClbjnsxqIgId6FQ,14714
10
- fucciphase/tracking_utilities.py,sha256=vnjCFGS2vArnCuxBlw7852eN2Xhd-FIo63bVWkHV0Lo,4548
11
- fucciphase/napari/__init__.py,sha256=At9Shk6HfDf6obtQaM0yKG4NOZVO6YxD2-J1M2ZGm7w,198
12
- fucciphase/napari/tracks_to_napari.py,sha256=_Dknd1wwXj9VJzo3y8Ry6lgJni9VokfZWVq_z98RlfE,4039
13
- fucciphase/utils/__init__.py,sha256=YwgK2COtG44QJaXHVPYKu2-Ifm6GIG1ahykvJ4pr-MM,1408
14
- fucciphase/utils/checks.py,sha256=o4mMGJMIE5wlaf0jtblnTD4JGTfjc5W7ZS4tYDIPlLo,625
15
- fucciphase/utils/dtw.py,sha256=6RJ5wZ8jDFKSVmcYajsdTJ4tfe3E-wKROEA2rWyGRN4,1638
16
- fucciphase/utils/normalize.py,sha256=xaZ_1Mgw6fd3RouuYkfwRGKr4DimSwmVq-ghRcTxFpc,5964
17
- fucciphase/utils/phase_fit.py,sha256=Ht_dEyuLYonv6is9qQ-Xd95pQR7IR-8C8mv0ckDcp4E,1743
18
- fucciphase/utils/simulator.py,sha256=fV-Pj6APms3QbYmiD9TjEYYkTqkIWk0uKDLCpz36HmA,2323
19
- fucciphase/utils/track_postprocessing.py,sha256=lv7TLBaGF4lOqlJUsl80rt1Igue2lM7WfmxIZXKwk3E,14388
20
- fucciphase/utils/trackmate.py,sha256=rbgARawfefSmvaRjfzmYF_XWqUPDZnBQCgU1s9Ev_00,10612
21
- fucciphase-0.0.3.dist-info/METADATA,sha256=YeQuzOiDo64ltvCliaWgk6H_OJJ1aCQnYufl4U1cSvQ,9018
22
- fucciphase-0.0.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
23
- fucciphase-0.0.3.dist-info/entry_points.txt,sha256=B77Cm5QnxeQz6DEfqD6n7zDw48-HrlepWPwLbdVITMY,119
24
- fucciphase-0.0.3.dist-info/licenses/LICENSE,sha256=pQGrOGpOTwikEzkZ8Zc9XLQwbaZ85TMJP-GaWCNZciw,1554
25
- fucciphase-0.0.3.dist-info/RECORD,,