fibphot 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1163 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Sequence
4
+ from dataclasses import dataclass, replace
5
+ from typing import Any, Literal
6
+
7
+ import numpy as np
8
+
9
+ from ..stages.smooth import (
10
+ KalmanModel,
11
+ PadMode,
12
+ WindowType,
13
+ kalman_smooth_1d,
14
+ savgol_smooth_1d,
15
+ smooth_1d,
16
+ )
17
+ from ..state import PhotometryState
18
+ from ..types import FloatArray
19
+ from .report import AnalysisResult, AnalysisWindow
20
+
21
+ PeakKind = Literal["peak", "valley"]
22
+ SelectKind = Literal["peak", "valley", "both"]
23
+ AreaRegion = Literal["bases", "fwhm"]
24
+ BaselineMode = Literal["line", "flat"]
25
+
26
+ FitModelName = Literal["gaussian", "lorentzian", "alpha"]
27
+ SmoothMethod = Literal["moving", "savgol", "kalman"]
28
+ EdgeMethod = Literal["prominence", "fraction", "sigma"]
29
+
30
+
31
+ def _as_float_1d(x: FloatArray) -> np.ndarray:
32
+ x = np.asarray(x, dtype=float)
33
+ if x.ndim != 1:
34
+ raise ValueError("Expected a 1D array.")
35
+ return x
36
+
37
+
38
+ def _mad_sigma(x: FloatArray) -> float:
39
+ x = np.asarray(x, dtype=float)
40
+ x = x[np.isfinite(x)]
41
+ if x.size == 0:
42
+ return float("nan")
43
+ med = float(np.median(x))
44
+ mad = float(np.median(np.abs(x - med)))
45
+ return 1.4826 * mad
46
+
47
+
48
+ def _window_to_slice(
49
+ state: PhotometryState, window: AnalysisWindow | None
50
+ ) -> slice:
51
+ if window is None:
52
+ return slice(0, state.n_samples)
53
+
54
+ if window.ref == "samples":
55
+ a = int(window.start)
56
+ b = int(window.end)
57
+ if a < 0 or b < 0:
58
+ raise ValueError("Sample windows must be non-negative.")
59
+ if b <= a:
60
+ raise ValueError("Sample window must satisfy end > start.")
61
+ return slice(max(0, a), min(state.n_samples, b))
62
+
63
+ # seconds
64
+ t0 = float(window.start)
65
+ t1 = float(window.end)
66
+ if t1 <= t0:
67
+ raise ValueError("Seconds window must satisfy end > start.")
68
+ t = state.time_seconds
69
+ m = np.isfinite(t) & (t >= t0) & (t <= t1)
70
+ if not np.any(m):
71
+ return slice(0, 0)
72
+ idx = np.where(m)[0]
73
+ return slice(int(idx[0]), int(idx[-1]) + 1)
74
+
75
+
76
+ def _fs_from_time(t: np.ndarray) -> float:
77
+ t = np.asarray(t, dtype=float)
78
+ m = np.isfinite(t)
79
+ if np.sum(m) < 3:
80
+ return float("nan")
81
+ dt = np.diff(t[m])
82
+ if dt.size == 0:
83
+ return float("nan")
84
+ med = float(np.median(dt))
85
+ if med <= 0:
86
+ return float("nan")
87
+ return 1.0 / med
88
+
89
+
90
+ def _interp_x_at_positions(x: np.ndarray, positions: np.ndarray) -> np.ndarray:
91
+ idx = np.arange(x.shape[0], dtype=float)
92
+ return np.interp(positions, idx, x)
93
+
94
+
95
+ def _baseline_flat(y: np.ndarray, left_i: int, right_i: int) -> float:
96
+ return float(0.5 * (y[left_i] + y[right_i]))
97
+
98
+
99
+ def _baseline_line(
100
+ x: np.ndarray, y: np.ndarray, left_i: int, right_i: int
101
+ ) -> np.ndarray:
102
+ x0 = float(x[left_i])
103
+ x1 = float(x[right_i])
104
+ if np.isclose(x1, x0):
105
+ return np.full_like(x, fill_value=float(y[left_i]), dtype=float)
106
+ m = (float(y[right_i]) - float(y[left_i])) / (x1 - x0)
107
+ c = float(y[left_i]) - m * x0
108
+ return m * x + c
109
+
110
+
111
+ def _trapz(y: np.ndarray, x: np.ndarray) -> float:
112
+ if hasattr(np, "trapezoid"):
113
+ return float(np.trapezoid(y, x)) # type: ignore[attr-defined]
114
+ return float(np.trapz(y, x)) # type: ignore[attr-defined]
115
+
116
+
117
+ def _r2(y: np.ndarray, yhat: np.ndarray) -> float:
118
+ y = np.asarray(y, dtype=float)
119
+ yhat = np.asarray(yhat, dtype=float)
120
+ ss_res = float(np.sum((y - yhat) ** 2))
121
+ ss_tot = float(np.sum((y - float(np.mean(y))) ** 2))
122
+ if ss_tot < 1e-20:
123
+ return float("nan")
124
+ return 1.0 - ss_res / ss_tot
125
+
126
+
127
+ def _rmse(y: np.ndarray, yhat: np.ndarray) -> float:
128
+ y = np.asarray(y, dtype=float)
129
+ yhat = np.asarray(yhat, dtype=float)
130
+ return float(np.sqrt(np.mean((y - yhat) ** 2)))
131
+
132
+
133
+ def gaussian(
134
+ x: np.ndarray, amp: float, mu: float, sigma: float, offset: float
135
+ ) -> np.ndarray:
136
+ sigma = max(float(sigma), 1e-12)
137
+ return offset + amp * np.exp(-0.5 * ((x - mu) / sigma) ** 2)
138
+
139
+
140
+ def lorentzian(
141
+ x: np.ndarray, amp: float, x0: float, gamma: float, offset: float
142
+ ) -> np.ndarray:
143
+ gamma = max(float(gamma), 1e-12)
144
+ return offset + amp * (gamma**2) / ((x - x0) ** 2 + gamma**2)
145
+
146
+
147
+ def alpha_transient(
148
+ x: np.ndarray, amp: float, t0: float, tau: float, offset: float
149
+ ) -> np.ndarray:
150
+ tau = max(float(tau), 1e-12)
151
+ dt = (x - t0) / tau
152
+ out = np.full_like(x, fill_value=offset, dtype=float)
153
+ m = x >= t0
154
+ out[m] = offset + amp * dt[m] * np.exp(1.0 - dt[m])
155
+ return out
156
+
157
+
158
+ _MODEL_FUNCS: dict[FitModelName, Any] = {
159
+ "gaussian": gaussian,
160
+ "lorentzian": lorentzian,
161
+ "alpha": alpha_transient,
162
+ }
163
+
164
+
165
+ @dataclass(frozen=True, slots=True)
166
+ class PeakFit:
167
+ model: FitModelName
168
+ params: tuple[float, ...]
169
+ r2: float
170
+ rmse: float
171
+ success: bool
172
+ message: str | None = None
173
+
174
+
175
+ @dataclass(frozen=True, slots=True)
176
+ class PeakEvent:
177
+ kind: PeakKind
178
+ index: int
179
+ x: float
180
+ y: float
181
+
182
+ prominence: float | None = None
183
+ left_base_index: int | None = None
184
+ right_base_index: int | None = None
185
+
186
+ height: float | None = None
187
+ fwhm: float | None = None
188
+ left_ip: float | None = None
189
+ right_ip: float | None = None
190
+
191
+ # asymmetry helpers (derived from half-height crossings)
192
+ rise_s: float | None = None
193
+ decay_s: float | None = None
194
+
195
+ area: float | None = None
196
+ fit: PeakFit | None = None
197
+
198
+
199
+ def _fit_model_to_peak(
200
+ x: np.ndarray,
201
+ y: np.ndarray,
202
+ peak: PeakEvent,
203
+ *,
204
+ model: FitModelName,
205
+ window_s: float | None,
206
+ window_samples: int | None,
207
+ maxfev: int,
208
+ ) -> PeakFit:
209
+ import scipy.optimize
210
+
211
+ n = x.shape[0]
212
+ i0 = peak.index
213
+
214
+ if window_samples is not None:
215
+ half = max(int(window_samples // 2), 1)
216
+ lo = max(0, i0 - half)
217
+ hi = min(n, i0 + half + 1)
218
+ elif window_s is not None:
219
+ half = float(window_s) / 2.0
220
+ lo_x = peak.x - half
221
+ hi_x = peak.x + half
222
+ lo = int(np.searchsorted(x, lo_x, side="left"))
223
+ hi = int(np.searchsorted(x, hi_x, side="right"))
224
+ lo, hi = max(0, lo), min(n, hi)
225
+ else:
226
+ lo = max(0, i0 - 50)
227
+ hi = min(n, i0 + 51)
228
+
229
+ xf = x[lo:hi]
230
+ yf = y[lo:hi]
231
+ if xf.size < 6:
232
+ return PeakFit(
233
+ model=model,
234
+ params=(),
235
+ r2=float("nan"),
236
+ rmse=float("nan"),
237
+ success=False,
238
+ message="Not enough points.",
239
+ )
240
+
241
+ func = _MODEL_FUNCS[model]
242
+
243
+ offset0 = float(np.median(yf))
244
+ amp0 = float(peak.y - offset0)
245
+ mu0 = float(peak.x)
246
+
247
+ dx = float(np.median(np.diff(xf))) if xf.size > 2 else 1.0
248
+ sigma0 = 3.0 * dx
249
+ gamma0 = 3.0 * dx
250
+ tau0 = 3.0 * dx
251
+
252
+ try:
253
+ if model == "gaussian":
254
+ p0 = (amp0, mu0, sigma0, offset0)
255
+ bounds = (
256
+ (-np.inf, float(xf.min()), 1e-12, -np.inf),
257
+ (np.inf, float(xf.max()), np.inf, np.inf),
258
+ )
259
+ elif model == "lorentzian":
260
+ p0 = (amp0, mu0, gamma0, offset0)
261
+ bounds = (
262
+ (-np.inf, float(xf.min()), 1e-12, -np.inf),
263
+ (np.inf, float(xf.max()), np.inf, np.inf),
264
+ )
265
+ else: # alpha
266
+ p0 = (amp0, mu0, tau0, offset0)
267
+ bounds = (
268
+ (-np.inf, float(xf.min()), 1e-12, -np.inf),
269
+ (np.inf, float(xf.max()), np.inf, np.inf),
270
+ )
271
+
272
+ popt, _pcov = scipy.optimize.curve_fit(
273
+ f=func,
274
+ xdata=xf,
275
+ ydata=yf,
276
+ p0=p0,
277
+ bounds=bounds,
278
+ maxfev=maxfev,
279
+ )
280
+ yhat = func(xf, *popt)
281
+ return PeakFit(
282
+ model=model,
283
+ params=tuple(float(v) for v in popt),
284
+ r2=_r2(yf, yhat),
285
+ rmse=_rmse(yf, yhat),
286
+ success=True,
287
+ )
288
+ except Exception as exc: # noqa: BLE001
289
+ return PeakFit(
290
+ model=model,
291
+ params=(),
292
+ r2=float("nan"),
293
+ rmse=float("nan"),
294
+ success=False,
295
+ message=str(exc),
296
+ )
297
+
298
+
299
+ def _half_height_times(
300
+ x: np.ndarray,
301
+ y_corr: np.ndarray,
302
+ peak_i: int,
303
+ *,
304
+ sign: float,
305
+ ) -> tuple[float | None, float | None]:
306
+ """
307
+ Estimate rise/decay durations from half-height crossings.
308
+
309
+ y_corr: baseline-corrected local segment (baseline ~ 0).
310
+ sign: +1 for peaks, -1 for valleys (operate in "upward" space).
311
+ """
312
+ z = sign * y_corr
313
+ if not np.isfinite(z[peak_i]):
314
+ return None, None
315
+ h = float(z[peak_i])
316
+ if h <= 0:
317
+ return None, None
318
+ half = 0.5 * h
319
+
320
+ left = None
321
+ for i in range(peak_i, 0, -1):
322
+ if np.isfinite(z[i]) and z[i] <= half:
323
+ left = i
324
+ break
325
+
326
+ right = None
327
+ for i in range(peak_i, z.size):
328
+ if np.isfinite(z[i]) and z[i] <= half:
329
+ right = i
330
+ break
331
+
332
+ if left is None or right is None:
333
+ return None, None
334
+
335
+ rise = float(x[peak_i] - x[left])
336
+ decay = float(x[right] - x[peak_i])
337
+ return rise, decay
338
+
339
+
340
+ def _edge_indices_from_threshold(
341
+ z: np.ndarray,
342
+ peak_i: int,
343
+ thr: float,
344
+ ) -> tuple[int | None, int | None]:
345
+ """
346
+ Find left/right indices where z drops to <= thr.
347
+ z should be in "upward" space (peaks positive).
348
+ """
349
+ if not np.isfinite(thr):
350
+ return None, None
351
+
352
+ # guard: if thr is above peak height, it collapses to the peak itself
353
+ h = float(z[peak_i]) if np.isfinite(z[peak_i]) else float("nan")
354
+ if not np.isfinite(h) or thr >= h:
355
+ return None, None
356
+
357
+ li = None
358
+ for i in range(peak_i, -1, -1):
359
+ if np.isfinite(z[i]) and z[i] <= thr:
360
+ li = i
361
+ break
362
+
363
+ ri = None
364
+ for i in range(peak_i, z.size):
365
+ if np.isfinite(z[i]) and z[i] <= thr:
366
+ ri = i
367
+ break
368
+
369
+ return li, ri
370
+
371
+
372
+ @dataclass(frozen=True, slots=True)
373
+ class PeakAnalysis:
374
+ """
375
+ Peak/valley detection + measurements, packaged as an analysis object.
376
+
377
+ - Detection can be done on a smoothed copy (recommended).
378
+ - Measurements (area/height) are taken on the original unsmoothed trace.
379
+ - Returns an AnalysisResult suitable for PhotometryReport.
380
+ """
381
+
382
+ signal: str
383
+ kind: SelectKind = "peak"
384
+ window: AnalysisWindow | None = None
385
+
386
+ # Robustify detection:
387
+ smooth_for_detection: bool = True
388
+ smooth_method: SmoothMethod = "moving"
389
+
390
+ # Moving-window smoothing options
391
+ smooth_window_len: int = 25
392
+ smooth_window: WindowType = "flat"
393
+ smooth_pad_mode: PadMode = "reflect"
394
+ smooth_match_edges: bool = True
395
+
396
+ # SavGol options
397
+ savgol_polyorder: int = 3
398
+ savgol_mode: Literal["interp", "mirror", "nearest", "constant", "wrap"] = (
399
+ "interp"
400
+ )
401
+
402
+ # Kalman options
403
+ kalman_model: KalmanModel = "local_level"
404
+ kalman_r: float | Literal["auto"] = "auto"
405
+ kalman_q: float | None = None
406
+ kalman_q_scale: float = 1e-3
407
+
408
+ # scipy.signal.find_peaks parameters (in *signal units* / *samples*)
409
+ height: float | tuple[float, float] | None = None
410
+ prominence: float | tuple[float, float] | None = None
411
+ threshold: float | tuple[float, float] | None = None
412
+ wlen: int | None = None
413
+ plateau_size: int | tuple[int, int] | None = None
414
+
415
+ # convenience parameters in seconds (converted using fs)
416
+ distance_s: float | None = 0.25
417
+ width_s: float | None = 0.8
418
+
419
+ # if explicit sample-based distance/width are set, they override *_s
420
+ distance: int | None = None
421
+ width: float | tuple[float, float] | None = None
422
+
423
+ # auto thresholds based on MAD(y_det) if height/prominence are None
424
+ auto_height_sigmas: float = 1.0
425
+ auto_prominence_sigmas: float = 2.0
426
+
427
+ # measurement choices
428
+ rel_height: float = 0.5
429
+ baseline_mode: BaselineMode = "line"
430
+ area_region: AreaRegion = "bases"
431
+
432
+ # edge selection (helps long decay peaks)
433
+ edge_method: EdgeMethod = "prominence"
434
+ edge_fraction: float = 0.10 # used when edge_method="fraction"
435
+ edge_sigmas: float = 1.0 # used when edge_method="sigma"
436
+
437
+ # optional per-peak parametric fit (helpful for asymmetric peaks)
438
+ fit_model: FitModelName | None = "alpha"
439
+ fit_window_s: float | None = 2.0
440
+ fit_window_samples: int | None = None
441
+ fit_maxfev: int = 5000
442
+
443
+ def __call__(self, state: PhotometryState) -> AnalysisResult:
444
+ import scipy.signal
445
+
446
+ i_sig = state.idx(self.signal)
447
+ t_full = _as_float_1d(state.time_seconds)
448
+ y_raw_full = _as_float_1d(state.signals[i_sig])
449
+
450
+ sl = _window_to_slice(state, self.window)
451
+ t = t_full[sl]
452
+ y_raw = y_raw_full[sl]
453
+
454
+ if t.size < 4:
455
+ return AnalysisResult(
456
+ name="peaks",
457
+ channel=self.signal,
458
+ window=self.window,
459
+ params=self._params(),
460
+ metrics={"n_events": 0.0},
461
+ arrays={},
462
+ notes="Empty/too-small window.",
463
+ )
464
+
465
+ fs = _fs_from_time(t)
466
+ if not np.isfinite(fs) or fs <= 0:
467
+ fs = float(state.sampling_rate)
468
+
469
+ # detection signal (optionally smoothed)
470
+ y_det = y_raw.copy()
471
+ if self.smooth_for_detection:
472
+ if self.smooth_method == "moving":
473
+ y_det = smooth_1d(
474
+ y_det,
475
+ window_len=self.smooth_window_len,
476
+ window=self.smooth_window,
477
+ pad_mode=self.smooth_pad_mode,
478
+ match_edges=self.smooth_match_edges,
479
+ )
480
+ elif self.smooth_method == "savgol":
481
+ y_det = savgol_smooth_1d(
482
+ y_det,
483
+ window_len=self.smooth_window_len,
484
+ polyorder=self.savgol_polyorder,
485
+ mode=self.savgol_mode,
486
+ )
487
+ elif self.smooth_method == "kalman":
488
+ y_det = kalman_smooth_1d(
489
+ y_det,
490
+ model=self.kalman_model,
491
+ r=self.kalman_r,
492
+ q=self.kalman_q,
493
+ q_scale=self.kalman_q_scale,
494
+ )
495
+ else:
496
+ raise ValueError(
497
+ f"Unknown smooth_method: {self.smooth_method!r}"
498
+ )
499
+
500
+ # robust sigma estimate for auto thresholds and optional edge_method="sigma"
501
+ sigma = _mad_sigma(y_det - np.nanmedian(y_det))
502
+
503
+ # auto thresholds on detection signal
504
+ height = self.height
505
+ prominence = self.prominence
506
+ if height is None and np.isfinite(sigma):
507
+ height = float(self.auto_height_sigmas) * float(sigma)
508
+ if prominence is None and np.isfinite(sigma):
509
+ prominence = float(self.auto_prominence_sigmas) * float(sigma)
510
+
511
+ # convert seconds -> samples for find_peaks
512
+ distance = self.distance
513
+ width = self.width
514
+
515
+ if distance is None and self.distance_s is not None:
516
+ distance = int(max(1, round(float(self.distance_s) * fs)))
517
+
518
+ if width is None and self.width_s is not None:
519
+ width = float(max(1.0, round(float(self.width_s) * fs)))
520
+
521
+ base_kwargs: dict[str, Any] = {
522
+ "height": height,
523
+ "prominence": prominence,
524
+ "threshold": self.threshold,
525
+ "distance": distance,
526
+ "width": width,
527
+ "wlen": self.wlen,
528
+ "plateau_size": self.plateau_size,
529
+ }
530
+ base_kwargs = {k: v for k, v in base_kwargs.items() if v is not None}
531
+
532
+ events: list[PeakEvent] = []
533
+
534
+ def detect_one(kind: PeakKind) -> None:
535
+ if kind == "peak":
536
+ y_work = y_det
537
+ sign = 1.0
538
+ else:
539
+ y_work = -y_det
540
+ sign = -1.0
541
+
542
+ finite = np.isfinite(y_work)
543
+ if np.sum(finite) < 5:
544
+ return
545
+
546
+ # interpolate NaNs for scipy
547
+ yw = y_work.copy()
548
+ if not np.all(finite):
549
+ xi = np.arange(yw.size, dtype=float)
550
+ yw[~finite] = np.interp(xi[~finite], xi[finite], yw[finite])
551
+
552
+ idx, _props = scipy.signal.find_peaks(yw, **base_kwargs)
553
+ if idx.size == 0:
554
+ return
555
+
556
+ # prominence bases (in detection space)
557
+ prom, left_bases0, right_bases0 = scipy.signal.peak_prominences(
558
+ yw, idx, wlen=self.wlen
559
+ )
560
+
561
+ # widths at rel_height (FWHM if rel_height=0.5)
562
+ widths, width_heights, left_ips, right_ips = (
563
+ scipy.signal.peak_widths(
564
+ yw, idx, rel_height=self.rel_height, wlen=self.wlen
565
+ )
566
+ )
567
+
568
+ # convert fractional sample positions to x-units
569
+ left_ip_x = _interp_x_at_positions(t, left_ips.astype(float))
570
+ right_ip_x = _interp_x_at_positions(t, right_ips.astype(float))
571
+
572
+ for j, i0 in enumerate(idx.tolist()):
573
+ lb0 = int(left_bases0[j])
574
+ rb0 = int(right_bases0[j])
575
+
576
+ # First-pass baseline from prominence bases
577
+ if self.baseline_mode == "flat":
578
+ b0 = _baseline_flat(y_raw, lb0, rb0)
579
+ base_line0 = None
580
+ ycorr0 = y_raw - b0
581
+ else:
582
+ base_line0 = _baseline_line(t, y_raw, lb0, rb0)
583
+ b0 = float(base_line0[i0])
584
+ ycorr0 = y_raw - base_line0
585
+
586
+ height0 = float(y_raw[i0] - b0)
587
+
588
+ # Choose final "bases" (event edges)
589
+ lb = lb0
590
+ rb = rb0
591
+
592
+ if self.edge_method != "prominence":
593
+ # operate in upward space
594
+ z = sign * ycorr0
595
+ zheight = float(sign * height0)
596
+
597
+ if self.edge_method == "fraction":
598
+ frac = float(self.edge_fraction)
599
+ if not (0.0 < frac < 1.0):
600
+ raise ValueError("edge_fraction must be in (0, 1).")
601
+ thr = frac * zheight
602
+ else: # sigma
603
+ thr = (
604
+ float(self.edge_sigmas) * float(sigma)
605
+ if np.isfinite(sigma)
606
+ else float("nan")
607
+ )
608
+
609
+ li, ri = _edge_indices_from_threshold(z, i0, thr)
610
+ if li is not None:
611
+ lb = int(li)
612
+ if ri is not None:
613
+ rb = int(ri)
614
+
615
+ # Second-pass baseline using chosen lb/rb
616
+ b: float | None = None
617
+ if self.baseline_mode == "flat":
618
+ b = _baseline_flat(y_raw, lb, rb)
619
+ base_line = None
620
+ base_at_peak = float(b)
621
+ else:
622
+ base_line = _baseline_line(t, y_raw, lb, rb)
623
+ base_at_peak = float(base_line[i0])
624
+
625
+ height_raw = float(y_raw[i0] - base_at_peak)
626
+
627
+ # region for area
628
+ if self.area_region == "fwhm":
629
+ lo_x = float(left_ip_x[j])
630
+ hi_x = float(right_ip_x[j])
631
+ lo = int(np.searchsorted(t, lo_x, side="left"))
632
+ hi = int(np.searchsorted(t, hi_x, side="right"))
633
+ lo, hi = max(0, lo), min(t.size, hi)
634
+ else:
635
+ lo = min(lb, rb)
636
+ hi = max(lb, rb) + 1
637
+
638
+ # baseline-corrected area
639
+ if hi - lo >= 2:
640
+ xs = t[lo:hi]
641
+ ys = y_raw[lo:hi]
642
+ if self.baseline_mode == "flat":
643
+ assert b is not None
644
+ ycorr = ys - b
645
+ else:
646
+ assert base_line is not None
647
+ ycorr = ys - base_line[lo:hi]
648
+ area = _trapz(ycorr, xs)
649
+ else:
650
+ area = float("nan")
651
+
652
+ # FWHM in x-units (from detection)
653
+ fwhm = float(right_ip_x[j] - left_ip_x[j])
654
+
655
+ # asymmetry: compute on local baseline-corrected segment (use lb/rb)
656
+ lo2 = min(lb, rb)
657
+ hi2 = max(lb, rb) + 1
658
+ rise_s = decay_s = None
659
+ if hi2 - lo2 >= 5:
660
+ xs2 = t[lo2:hi2]
661
+ ys2 = y_raw[lo2:hi2]
662
+ if self.baseline_mode == "flat":
663
+ assert b is not None
664
+ ycorr2 = ys2 - b
665
+ else:
666
+ assert base_line is not None
667
+ ycorr2 = ys2 - base_line[lo2:hi2]
668
+ peak_i_local = int(i0 - lo2)
669
+ r, d = _half_height_times(
670
+ xs2,
671
+ ycorr2,
672
+ peak_i_local,
673
+ sign=1.0 if kind == "peak" else -1.0,
674
+ )
675
+ rise_s, decay_s = r, d
676
+
677
+ ev = PeakEvent(
678
+ kind=kind,
679
+ index=int(i0),
680
+ x=float(t[i0]),
681
+ y=float(y_raw[i0]),
682
+ prominence=float(prom[j]),
683
+ left_base_index=int(lb),
684
+ right_base_index=int(rb),
685
+ height=float(height_raw),
686
+ fwhm=float(fwhm),
687
+ left_ip=float(left_ip_x[j]),
688
+ right_ip=float(right_ip_x[j]),
689
+ rise_s=rise_s,
690
+ decay_s=decay_s,
691
+ area=float(area),
692
+ fit=None,
693
+ )
694
+
695
+ if self.fit_model is not None:
696
+ fit = _fit_model_to_peak(
697
+ t,
698
+ y_raw,
699
+ ev,
700
+ model=self.fit_model,
701
+ window_s=self.fit_window_s,
702
+ window_samples=self.fit_window_samples,
703
+ maxfev=self.fit_maxfev,
704
+ )
705
+ ev = replace(ev, fit=fit)
706
+
707
+ events.append(ev)
708
+
709
+ if self.kind in ("peak", "both"):
710
+ detect_one("peak")
711
+ if self.kind in ("valley", "both"):
712
+ detect_one("valley")
713
+
714
+ events.sort(key=lambda e: e.index)
715
+
716
+ arrays = _events_to_arrays(events, offset=int(sl.start))
717
+ metrics = _events_to_metrics(events)
718
+
719
+ notes = (
720
+ f"detection: {'smoothed' if self.smooth_for_detection else 'raw'} "
721
+ f"({self.smooth_method}) ; measurements: raw ; "
722
+ f"edges: {self.edge_method}"
723
+ )
724
+
725
+ return AnalysisResult(
726
+ name="peaks",
727
+ channel=self.signal,
728
+ window=self.window,
729
+ params=self._params(),
730
+ metrics=metrics,
731
+ arrays=arrays,
732
+ notes=notes,
733
+ )
734
+
735
+ def _params(self) -> dict[str, Any]:
736
+ return {
737
+ "signal": self.signal,
738
+ "kind": self.kind,
739
+ "window": None
740
+ if self.window is None
741
+ else {
742
+ "start": self.window.start,
743
+ "end": self.window.end,
744
+ "ref": self.window.ref,
745
+ "label": self.window.label,
746
+ },
747
+ "smooth_for_detection": self.smooth_for_detection,
748
+ "smooth_method": self.smooth_method,
749
+ "smooth_window_len": self.smooth_window_len,
750
+ "smooth_window": self.smooth_window,
751
+ "smooth_pad_mode": self.smooth_pad_mode,
752
+ "smooth_match_edges": self.smooth_match_edges,
753
+ "savgol_polyorder": self.savgol_polyorder,
754
+ "savgol_mode": self.savgol_mode,
755
+ "kalman_model": self.kalman_model,
756
+ "kalman_r": self.kalman_r,
757
+ "kalman_q": self.kalman_q,
758
+ "kalman_q_scale": self.kalman_q_scale,
759
+ "distance_s": self.distance_s,
760
+ "width_s": self.width_s,
761
+ "rel_height": self.rel_height,
762
+ "baseline_mode": self.baseline_mode,
763
+ "area_region": self.area_region,
764
+ "auto_height_sigmas": self.auto_height_sigmas,
765
+ "auto_prominence_sigmas": self.auto_prominence_sigmas,
766
+ "edge_method": self.edge_method,
767
+ "edge_fraction": self.edge_fraction,
768
+ "edge_sigmas": self.edge_sigmas,
769
+ "fit_model": self.fit_model,
770
+ "fit_window_s": self.fit_window_s,
771
+ "fit_window_samples": self.fit_window_samples,
772
+ }
773
+
774
+
775
+ def _events_to_arrays(
776
+ events: Sequence[PeakEvent], offset: int = 0
777
+ ) -> dict[str, np.ndarray]:
778
+ if len(events) == 0:
779
+ return {}
780
+
781
+ kind = np.array([e.kind for e in events], dtype="U6")
782
+ index = np.array([e.index + offset for e in events], dtype=int)
783
+ x = np.array([e.x for e in events], dtype=float)
784
+ y = np.array([e.y for e in events], dtype=float)
785
+
786
+ prominence = np.array(
787
+ [
788
+ np.nan if e.prominence is None else float(e.prominence)
789
+ for e in events
790
+ ],
791
+ dtype=float,
792
+ )
793
+ left_base = np.array(
794
+ [
795
+ (
796
+ -1
797
+ if e.left_base_index is None
798
+ else int(e.left_base_index) + offset
799
+ )
800
+ for e in events
801
+ ],
802
+ dtype=int,
803
+ )
804
+ right_base = np.array(
805
+ [
806
+ (
807
+ -1
808
+ if e.right_base_index is None
809
+ else int(e.right_base_index) + offset
810
+ )
811
+ for e in events
812
+ ],
813
+ dtype=int,
814
+ )
815
+
816
+ height = np.array(
817
+ [np.nan if e.height is None else float(e.height) for e in events],
818
+ dtype=float,
819
+ )
820
+ fwhm = np.array(
821
+ [np.nan if e.fwhm is None else float(e.fwhm) for e in events],
822
+ dtype=float,
823
+ )
824
+ left_ip = np.array(
825
+ [np.nan if e.left_ip is None else float(e.left_ip) for e in events],
826
+ dtype=float,
827
+ )
828
+ right_ip = np.array(
829
+ [np.nan if e.right_ip is None else float(e.right_ip) for e in events],
830
+ dtype=float,
831
+ )
832
+ rise_s = np.array(
833
+ [np.nan if e.rise_s is None else float(e.rise_s) for e in events],
834
+ dtype=float,
835
+ )
836
+ decay_s = np.array(
837
+ [np.nan if e.decay_s is None else float(e.decay_s) for e in events],
838
+ dtype=float,
839
+ )
840
+ area = np.array(
841
+ [np.nan if e.area is None else float(e.area) for e in events],
842
+ dtype=float,
843
+ )
844
+
845
+ fit_success = np.array(
846
+ [False if e.fit is None else bool(e.fit.success) for e in events],
847
+ dtype=bool,
848
+ )
849
+ fit_r2 = np.array(
850
+ [np.nan if e.fit is None else float(e.fit.r2) for e in events],
851
+ dtype=float,
852
+ )
853
+ fit_rmse = np.array(
854
+ [np.nan if e.fit is None else float(e.fit.rmse) for e in events],
855
+ dtype=float,
856
+ )
857
+ fit_model = np.array(
858
+ ["" if e.fit is None else str(e.fit.model) for e in events], dtype="U12"
859
+ )
860
+
861
+ # force 1D object array for params (avoids pandas “ndim > 1” error)
862
+ fit_params = np.empty(len(events), dtype=object)
863
+ for i, e in enumerate(events):
864
+ fit_params[i] = () if e.fit is None else tuple(e.fit.params)
865
+
866
+ return {
867
+ "kind": kind,
868
+ "index": index,
869
+ "x": x,
870
+ "y": y,
871
+ "prominence": prominence,
872
+ "left_base_index": left_base,
873
+ "right_base_index": right_base,
874
+ "height": height,
875
+ "fwhm": fwhm,
876
+ "left_ip": left_ip,
877
+ "right_ip": right_ip,
878
+ "rise_s": rise_s,
879
+ "decay_s": decay_s,
880
+ "area": area,
881
+ "fit_success": fit_success,
882
+ "fit_r2": fit_r2,
883
+ "fit_rmse": fit_rmse,
884
+ "fit_model": fit_model,
885
+ "fit_params": fit_params,
886
+ }
887
+
888
+
889
+ def _events_to_metrics(events: Sequence[PeakEvent]) -> dict[str, float]:
890
+ if len(events) == 0:
891
+ return {"n_events": 0.0}
892
+
893
+ heights = np.array(
894
+ [np.nan if e.height is None else float(e.height) for e in events],
895
+ dtype=float,
896
+ )
897
+ areas = np.array(
898
+ [np.nan if e.area is None else float(e.area) for e in events],
899
+ dtype=float,
900
+ )
901
+ fwhm = np.array(
902
+ [np.nan if e.fwhm is None else float(e.fwhm) for e in events],
903
+ dtype=float,
904
+ )
905
+
906
+ return {
907
+ "n_events": float(len(events)),
908
+ "mean_height": float(np.nanmean(heights))
909
+ if np.any(np.isfinite(heights))
910
+ else float("nan"),
911
+ "mean_area": float(np.nanmean(areas))
912
+ if np.any(np.isfinite(areas))
913
+ else float("nan"),
914
+ "mean_fwhm": float(np.nanmean(fwhm))
915
+ if np.any(np.isfinite(fwhm))
916
+ else float("nan"),
917
+ }
918
+
919
+
920
+ def peak_result_to_dataframe(res: AnalysisResult):
921
+ """
922
+ Convert AnalysisResult.arrays to a pandas DataFrame.
923
+
924
+ Robust to:
925
+ - object columns (e.g. fit_params tuples)
926
+ - accidental multi-dimensional arrays (converted row-wise to tuples/objects)
927
+ """
928
+ import pandas as pd
929
+
930
+ a = res.arrays
931
+ if not a:
932
+ return pd.DataFrame()
933
+
934
+ cols: dict[str, Any] = {}
935
+ n: int | None = None
936
+
937
+ for k, v in a.items():
938
+ vv = v
939
+ if isinstance(vv, np.ndarray):
940
+ if vv.ndim == 0:
941
+ vv = np.array([vv.item()])
942
+ elif vv.ndim == 1:
943
+ pass
944
+ elif vv.ndim == 2:
945
+ # convert each row to a tuple -> 1D object column
946
+ vv = [tuple(row.tolist()) for row in vv]
947
+ else:
948
+ # higher dims: store each entry as an object
949
+ vv = [vv[i] for i in range(vv.shape[0])]
950
+
951
+ if n is None:
952
+ n = len(vv)
953
+ else:
954
+ if len(vv) != n:
955
+ raise ValueError(
956
+ f"Column {k!r} has length {len(vv)} but expected {n}."
957
+ )
958
+
959
+ cols[k] = vv
960
+
961
+ return pd.DataFrame(cols)
962
+
963
+
964
+ def plot_peak_result(
965
+ state: PhotometryState,
966
+ res: AnalysisResult,
967
+ *,
968
+ label: str | None = None,
969
+ show_window: bool = True,
970
+ show_bases: bool = False,
971
+ show_fwhm: bool = True,
972
+ annotate: bool = False,
973
+ show_area: bool = False,
974
+ area_event: int | None = None, # which event index to highlight
975
+ area_region_override: AreaRegion | None = None, # "bases" or "fwhm"
976
+ area_alpha: float = 0.25,
977
+ ax=None,
978
+ ):
979
+ import matplotlib.pyplot as plt
980
+
981
+ sig = res.channel
982
+ t = np.asarray(state.time_seconds, dtype=float)
983
+ y = np.asarray(state.channel(sig), dtype=float)
984
+
985
+ if ax is None:
986
+ fig, ax = plt.subplots(figsize=(6, 3), dpi=150)
987
+ else:
988
+ fig = ax.figure
989
+
990
+ ax.plot(t, y, linewidth=1.2, alpha=0.9, label=label or sig)
991
+
992
+ if show_window and res.window is not None and res.window.ref == "seconds":
993
+ ax.axvspan(
994
+ float(res.window.start),
995
+ float(res.window.end),
996
+ alpha=0.06,
997
+ color="gray",
998
+ )
999
+
1000
+ a = res.arrays
1001
+ if not a:
1002
+ ax.legend(frameon=False, fontsize=8)
1003
+ return fig, ax
1004
+
1005
+ kinds = a.get("kind")
1006
+ xs = a.get("x")
1007
+ ys = a.get("y")
1008
+ if kinds is None or xs is None or ys is None:
1009
+ ax.legend(frameon=False, fontsize=8)
1010
+ return fig, ax
1011
+
1012
+ # markers
1013
+ m_peak = kinds == "peak"
1014
+ m_valley = kinds == "valley"
1015
+
1016
+ if np.any(m_peak):
1017
+ ax.scatter(xs[m_peak], ys[m_peak], s=18, linewidth=0.0, label="peaks")
1018
+ if np.any(m_valley):
1019
+ ax.scatter(
1020
+ xs[m_valley], ys[m_valley], s=18, linewidth=0.0, label="valleys"
1021
+ )
1022
+
1023
+ # bases markers
1024
+ if show_bases:
1025
+ lb = a.get("left_base_index")
1026
+ rb = a.get("right_base_index")
1027
+ if lb is not None and rb is not None:
1028
+ for i in range(len(xs)):
1029
+ lbi = int(lb[i])
1030
+ rbi = int(rb[i])
1031
+ if lbi >= 0 and rbi >= 0:
1032
+ ax.scatter(
1033
+ [t[lbi], t[rbi]], [y[lbi], y[rbi]], marker="x", s=30
1034
+ )
1035
+
1036
+ # FWHM guides
1037
+ if show_fwhm:
1038
+ li = a.get("left_ip")
1039
+ ri = a.get("right_ip")
1040
+ if li is not None and ri is not None:
1041
+ for i in range(len(xs)):
1042
+ if np.isfinite(li[i]) and np.isfinite(ri[i]):
1043
+ ax.vlines(
1044
+ [float(li[i]), float(ri[i])],
1045
+ ymin=np.nanmin(y),
1046
+ ymax=np.nanmax(y),
1047
+ linestyles="dashed",
1048
+ alpha=0.25,
1049
+ )
1050
+
1051
+ # draw the integration region used for "area" for one chosen event
1052
+ if show_area:
1053
+ lb = a.get("left_base_index")
1054
+ rb = a.get("right_base_index")
1055
+ li = a.get("left_ip")
1056
+ ri = a.get("right_ip")
1057
+ area = a.get("area")
1058
+
1059
+ if lb is not None and rb is not None and len(xs) > 0:
1060
+ # choose which event to highlight
1061
+ if area_event is None:
1062
+ # default: largest |area| if available, else first event
1063
+ if area is not None and np.any(np.isfinite(area)):
1064
+ area_event = int(np.nanargmax(np.abs(area)))
1065
+ else:
1066
+ area_event = 0
1067
+
1068
+ i = int(area_event)
1069
+ if 0 <= i < len(xs):
1070
+ lbi = int(lb[i]) if lb is not None else -1
1071
+ rbi = int(rb[i]) if rb is not None else -1
1072
+
1073
+ if lbi >= 0 and rbi >= 0:
1074
+ # which region do we shade?
1075
+ params = res.params or {}
1076
+ area_region = area_region_override or params.get(
1077
+ "area_region", "bases"
1078
+ )
1079
+ baseline_mode = params.get("baseline_mode", "line")
1080
+
1081
+ if (
1082
+ area_region == "fwhm"
1083
+ and li is not None
1084
+ and ri is not None
1085
+ and np.isfinite(li[i])
1086
+ and np.isfinite(ri[i])
1087
+ ):
1088
+ lo = int(np.searchsorted(t, float(li[i]), side="left"))
1089
+ hi = int(np.searchsorted(t, float(ri[i]), side="right"))
1090
+ else:
1091
+ lo = min(lbi, rbi)
1092
+ hi = max(lbi, rbi) + 1
1093
+
1094
+ lo = max(0, lo)
1095
+ hi = min(t.size, hi)
1096
+
1097
+ if hi - lo >= 2:
1098
+ xs_area = t[lo:hi]
1099
+ ys_area = y[lo:hi]
1100
+
1101
+ # baseline evaluated over the same xs_area
1102
+ if baseline_mode == "flat":
1103
+ b0 = _baseline_flat(y, lbi, rbi)
1104
+ b = np.full_like(ys_area, b0, dtype=float)
1105
+ else:
1106
+ b_line = _baseline_line(t, y, lbi, rbi)
1107
+ b = b_line[lo:hi]
1108
+
1109
+ m = (
1110
+ np.isfinite(xs_area)
1111
+ & np.isfinite(ys_area)
1112
+ & np.isfinite(b)
1113
+ )
1114
+ if np.any(m):
1115
+ # fill between baseline and trace over the integration region
1116
+ ax.fill_between(
1117
+ xs_area[m],
1118
+ b[m],
1119
+ ys_area[m],
1120
+ alpha=area_alpha,
1121
+ label="area region"
1122
+ if "area region"
1123
+ not in [
1124
+ h.get_label()
1125
+ for h in ax.get_legend_handles_labels()[0]
1126
+ ]
1127
+ else None,
1128
+ )
1129
+
1130
+ # show bounds + baseline segment (helps debugging “why did it stop early?”)
1131
+ ax.vlines(
1132
+ [xs_area[m][0], xs_area[m][-1]],
1133
+ ymin=np.nanmin(y),
1134
+ ymax=np.nanmax(y),
1135
+ linestyles=":",
1136
+ alpha=0.35,
1137
+ )
1138
+ ax.plot(xs_area[m], b[m], linewidth=1.0, alpha=0.6)
1139
+
1140
+ # annotate area value (if present)
1141
+ if area is not None and np.isfinite(area[i]):
1142
+ ax.annotate(
1143
+ f"area={float(area[i]):.3g}",
1144
+ (float(xs[i]), float(ys[i])),
1145
+ textcoords="offset points",
1146
+ xytext=(6, -10),
1147
+ fontsize=8,
1148
+ )
1149
+
1150
+ if annotate:
1151
+ for i in range(len(xs)):
1152
+ ax.annotate(
1153
+ f"{kinds[i]}@{xs[i]:.2f}",
1154
+ (xs[i], ys[i]),
1155
+ textcoords="offset points",
1156
+ xytext=(4, 4),
1157
+ fontsize=8,
1158
+ )
1159
+
1160
+ ax.set_xlabel("time (s)")
1161
+ ax.set_ylabel(sig)
1162
+ ax.legend(frameon=False, fontsize=8)
1163
+ return fig, ax