firepype 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
firepype/detection.py ADDED
@@ -0,0 +1,517 @@
1
+ # firepype/detection.py
2
+ from __future__ import annotations
3
+
4
+ from typing import Tuple
5
+
6
+ import numpy as np
7
+ from scipy.ndimage import gaussian_filter1d
8
+ from scipy.signal import find_peaks
9
+
10
+ from .utils import skyline_mask_from_1d
11
+
12
+
13
+ def detect_slit_edges(
14
+ data: np.ndarray,
15
+ x_hint: Tuple[int, int] | None = None,
16
+ hint_expand: int = 150,
17
+ row_frac: Tuple[float, float] = (0.35, 0.85),
18
+ debug: bool = False,
19
+ ):
20
+ """
21
+ Purpose:
22
+ Detect left and right slit edges in a 2D frame by forming median spatial
23
+ profile over row band, high-pass filtering, and locating opposing gradient
24
+ peaks indicative of slit boundaries. Uses optional x-range hint and
25
+ adaptive recentring to focus search
26
+ Inputs:
27
+ data: 2D image array (rows x cols)
28
+ x_hint: Optional tuple (xmin, xmax) of expected slit horizontal span (columns)
29
+ hint_expand: Columns to expand around x_hint for search window (default 150)
30
+ row_frac: Fractional row limits (lo, hi) used to build profile (default (0.35, 0.85))
31
+ debug: Print diagnostic information
32
+ Returns:
33
+ tuple:
34
+ - left_edge (int): Detected left slit edge column index
35
+ - right_edge (int): Detected right slit edge column index
36
+ - sm (np.ndarray): Smoothed, high-pass profile used for detection (per column)
37
+ - g (np.ndarray): Smoothed gradient of sm used to find edge peaks
38
+ - (lo, hi) (tuple[int, int]): Final column search window used
39
+ - (r0, r1) (tuple[int, int]): Row band used to form median profile
40
+ """
41
+
42
+ nrows, ncols = data.shape
43
+ r0 = int(max(0, min(row_frac[0], row_frac[1])) * nrows)
44
+ r1 = int(min(1.0, max(row_frac[0], row_frac[1])) * nrows)
45
+ band_rows = data[r0:r1, :]
46
+
47
+ lo_global = int(0.03 * ncols)
48
+ hi_global = int(0.97 * ncols)
49
+
50
+ if x_hint is not None and len(x_hint) == 2:
51
+ xmin = max(lo_global, int(min(x_hint)))
52
+ xmax = min(hi_global, int(max(x_hint)))
53
+ lo = max(lo_global, xmin - hint_expand)
54
+ hi = min(hi_global, xmax + hint_expand)
55
+
56
+ else:
57
+ lo, hi = lo_global, hi_global
58
+
59
+ prof_raw = np.median(band_rows, axis=0).astype(float)
60
+
61
+ def band_recenter(profile, target_width_px):
62
+ base = gaussian_filter1d(profile, 150, mode="nearest")
63
+ hp = profile - base
64
+ hp = gaussian_filter1d(hp, 3, mode="nearest")
65
+ w = max(40, int(target_width_px))
66
+ score = np.convolve(np.abs(hp), np.ones(w, float), mode="same")
67
+ c = int(np.argmax(score))
68
+ lo2 = max(lo_global, c - w // 2)
69
+ hi2 = min(hi_global, c + w // 2)
70
+
71
+ if hi2 - lo2 < 30:
72
+ lo2 = max(lo_global, c - 30)
73
+ hi2 = min(hi_global, c + 30)
74
+
75
+ return lo2, hi2
76
+
77
+ SLIT_TARGET_WIDTH_FRAC = 0.28
78
+
79
+ if (hi - lo) > 0.5 * (hi_global - lo_global):
80
+ lo, hi = band_recenter(prof_raw, SLIT_TARGET_WIDTH_FRAC * ncols)
81
+
82
+ best = None
83
+
84
+ for base_sig, sm_sig, grad_sig in [(120, 3.0, 2.2), (80, 2.2, 1.8)]:
85
+ base_lo = gaussian_filter1d(prof_raw, base_sig, mode="nearest")
86
+ hp = prof_raw - base_lo
87
+ sm = gaussian_filter1d(hp, sm_sig, mode="nearest")
88
+ g = gaussian_filter1d(np.gradient(sm), grad_sig, mode="nearest")
89
+
90
+ def find_pairs(lo_i, hi_i, prom_pct=70, dist=12):
91
+ if hi_i <= lo_i + 10:
92
+ return []
93
+
94
+ prom = np.percentile(np.abs(g[lo_i:hi_i]), prom_pct)
95
+ L_idx, _ = find_peaks(
96
+ g[lo_i:hi_i], prominence=float(max(prom, 1e-6)), distance=dist
97
+ )
98
+
99
+ R_idx, _ = find_peaks(
100
+ -g[lo_i:hi_i], prominence=float(max(prom, 1e-6)), distance=dist
101
+ )
102
+
103
+ L = lo_i + L_idx
104
+ R = lo_i + R_idx
105
+
106
+ def score_pair(l, r):
107
+ if r - l < 12:
108
+ return -np.inf
109
+
110
+ interior = np.median(sm[l:r])
111
+ left_bg = np.median(sm[max(lo_i, l - 60) : l])
112
+ right_bg = np.median(sm[r : min(hi_i, r + 60)])
113
+ bg = 0.5 * (left_bg + right_bg)
114
+ contrast = abs(interior - bg)
115
+ var_interior = np.median(np.abs(sm[l:r] - interior))
116
+ steep_left = abs(g[l]) if 0 <= l < ncols else 0.0
117
+ steep_right = abs(g[r]) if 0 <= r < ncols else 0.0
118
+
119
+ return contrast + 0.5 * var_interior + 0.2 * (steep_left + steep_right)
120
+
121
+ cand = []
122
+
123
+ for l in L:
124
+ for r in R[R > l + 8]:
125
+ cand.append((score_pair(int(l), int(r)), int(l), int(r)))
126
+
127
+ cand.sort(reverse=True)
128
+
129
+ return cand
130
+
131
+ cand = find_pairs(lo, hi, prom_pct=70, dist=12)
132
+
133
+ if not cand:
134
+ cand = find_pairs(lo, hi, prom_pct=60, dist=8)
135
+
136
+ if cand:
137
+ chosen = cand[0]
138
+ best = (chosen[1], chosen[2], sm, g, (lo, hi), (r0, r1))
139
+ break
140
+
141
+ if best is None:
142
+ lo2, hi2 = band_recenter(prof_raw, SLIT_TARGET_WIDTH_FRAC * ncols)
143
+ base_lo = gaussian_filter1d(prof_raw, 120, mode="nearest")
144
+ hp = prof_raw - base_lo
145
+ sm = gaussian_filter1d(hp, 3, mode="nearest")
146
+ g = gaussian_filter1d(np.gradient(sm), 2.2, mode="nearest")
147
+ window = 40
148
+ Lcand = np.argmax(
149
+ np.abs(g[max(lo_global, lo2 - window) : min(hi_global, lo2 + window)])
150
+ )
151
+
152
+ Rcand = np.argmax(
153
+ np.abs(g[max(lo_global, hi2 - window) : min(hi_global, hi2 + window)])
154
+ )
155
+
156
+ left_edge = max(lo_global, lo2 - window) + int(Lcand)
157
+ right_edge = max(lo_global, hi2 - window) + int(Rcand)
158
+
159
+ if left_edge >= right_edge:
160
+ left_edge, right_edge = lo2, hi2
161
+
162
+ best = (left_edge, right_edge, sm, g, (lo2, hi2), (r0, r1))
163
+
164
+ left_edge, right_edge, sm, g, (lo, hi), (r0, r1) = best
165
+
166
+ if debug:
167
+ print(
168
+ f"[detect_slit_edges] rows={r0}:{r1}, final {left_edge}:{right_edge} "
169
+ f"(W={right_edge-left_edge})"
170
+ )
171
+
172
+ return left_edge, right_edge, sm, g, (lo, hi), (r0, r1)
173
+
174
+
175
+ def detect_objects_in_slit(
176
+ data: np.ndarray,
177
+ left_edge: int,
178
+ right_edge: int,
179
+ row_frac: Tuple[float, float] = (0.40, 0.80),
180
+ min_sep_frac: float = 0.06,
181
+ edge_pad_frac: float = 0.04,
182
+ debug: bool = False,
183
+ ):
184
+ """
185
+ Purpose:
186
+ Find approximate column positions of positive and negative object
187
+ traces within the slit by analyzing smoothed median spatial profile,
188
+ enforcing minimum separation and padding from slit edges
189
+ Inputs:
190
+ data: 2D image array (rows x cols)
191
+ left_edge: Left slit edge column index
192
+ right_edge: Right slit edge column index
193
+ row_frac: Fractional row band used to compute median profile (default (0.40, 0.80))
194
+ min_sep_frac: Minimum separation between pos/neg in units of slit width (default 0.06)
195
+ edge_pad_frac: Padding from edges as fraction of slit width (default 0.04)
196
+ debug: If True, print diagnostics
197
+ Returns:
198
+ tuple:
199
+ - obj_pos_abs (int): Column of positive object peak
200
+ - obj_neg_abs (int): Column of negative object trough
201
+ - prof (np.ndarray): Smoothed median profile used for detection
202
+ - (r0, r1) (tuple[int,int]): Row band used to build profile
203
+ """
204
+
205
+ nrows, _ = data.shape
206
+ r0 = int(max(0, min(row_frac[0], row_frac[1])) * nrows)
207
+ r1 = int(min(1.0, max(row_frac[0], row_frac[1])) * nrows)
208
+
209
+ mid_lo = left_edge + int(0.20 * (right_edge - left_edge))
210
+ mid_hi = right_edge - int(0.20 * (right_edge - left_edge))
211
+ mid_lo = max(left_edge, mid_lo)
212
+ mid_hi = min(right_edge, mid_hi)
213
+
214
+ band = data[r0:r1, mid_lo : mid_hi + 1]
215
+ prof = np.median(band, axis=0).astype(float)
216
+ prof = gaussian_filter1d(prof, sigma=4.0)
217
+
218
+ pos_rel = int(np.argmax(prof))
219
+ neg_rel = int(np.argmin(prof))
220
+ obj_pos_abs = mid_lo + pos_rel
221
+ obj_neg_abs = mid_lo + neg_rel
222
+
223
+ pad = max(2, int(edge_pad_frac * (right_edge - left_edge)))
224
+ min_sep = max(3, int(min_sep_frac * (right_edge - left_edge)))
225
+
226
+ def clamp_to_mid(x):
227
+ return min(max(x, mid_lo + pad), mid_hi - pad)
228
+
229
+ obj_pos_abs = clamp_to_mid(obj_pos_abs)
230
+ obj_neg_abs = clamp_to_mid(obj_neg_abs)
231
+
232
+ if abs(obj_pos_abs - obj_neg_abs) < min_sep:
233
+ exc_lo = max(mid_lo, min(obj_pos_abs, obj_neg_abs) - min_sep // 2)
234
+ exc_hi = min(mid_hi, max(obj_pos_abs, obj_neg_abs) + min_sep // 2)
235
+ mask = np.ones_like(prof, dtype=bool)
236
+ mask[(exc_lo - mid_lo) : (exc_hi - mid_lo + 1)] = False
237
+
238
+ if prof[pos_rel] >= -prof[neg_rel]:
239
+ cand = np.where(mask, prof, np.inf)
240
+ neg_rel2 = int(np.argmin(cand))
241
+ obj_neg_abs = clamp_to_mid(mid_lo + neg_rel2)
242
+
243
+ else:
244
+ cand = np.where(mask, -prof, np.inf)
245
+ pos_rel2 = int(np.argmin(cand))
246
+ obj_pos_abs = clamp_to_mid(mid_lo + pos_rel2)
247
+
248
+ if obj_pos_abs == obj_neg_abs:
249
+ order = np.argsort(prof)
250
+ order_pos = np.argsort(-prof)
251
+
252
+ for j in order:
253
+ cand = mid_lo + int(j)
254
+
255
+ if (
256
+ abs(cand - obj_pos_abs) >= min_sep
257
+ and (mid_lo + pad) <= cand <= (mid_hi - pad)
258
+ ):
259
+ obj_neg_abs = cand
260
+ break
261
+
262
+ for j in order_pos:
263
+ cand = mid_lo + int(j)
264
+ if (
265
+ abs(cand - obj_neg_abs) >= min_sep
266
+ and (mid_lo + pad) <= cand <= (mid_hi - pad)
267
+ ):
268
+ obj_pos_abs = cand
269
+ break
270
+
271
+ if debug:
272
+ print(
273
+ f"[detect_objects_in_slit] rows={r0}:{r1} POS={obj_pos_abs} "
274
+ f"NEG={obj_neg_abs} sep={abs(obj_pos_abs - obj_neg_abs)}"
275
+ )
276
+
277
+ return obj_pos_abs, obj_neg_abs, prof, (r0, r1)
278
+
279
+
280
+ def find_arc_trace_col_strong(
281
+ arc_img: np.ndarray,
282
+ approx_col: int | None = None,
283
+ *,
284
+ search_half: int = 240,
285
+ min_sep: int = 12,
286
+ x_hint: Tuple[int, int] | None = None,
287
+ row_frac: Tuple[float, float] = (0.35, 0.85),
288
+ debug_print: bool = True,
289
+ ) -> int:
290
+ """
291
+ Purpose:
292
+ Select strong arc column for tracing by analyzing row-banded median
293
+ spatial profile of arc image, high-pass filtering, and scoring peak
294
+ candidates near optional approximate column
295
+ Inputs:
296
+ arc_img: 2D arc image array (rows x cols)
297
+ approx_col: Optional approximate column index to bias selection near
298
+ search_half: Half-width of search window around approx_col (default 240)
299
+ min_sep: Minimum spacing between candidate peaks in pixels (default 12)
300
+ x_hint: Optional (xmin, xmax) limit for analysis within columns
301
+ row_frac: Fractional row band used to compute profile (default (0.35, 0.85))
302
+ debug_print: Print chosen candidate details
303
+ Returns:
304
+ int:
305
+ Selected column index best suited for arc tracing
306
+ """
307
+
308
+ img = np.asarray(arc_img, float)
309
+ nrows, ncols = img.shape
310
+
311
+ lo_x = int(max(0, (min(x_hint) if x_hint else 0)))
312
+ hi_x = int(min(ncols, (max(x_hint) if x_hint else ncols)))
313
+
314
+ r0 = int(min(row_frac) * nrows)
315
+ r1 = int(max(row_frac) * nrows)
316
+ band = img[r0:r1, lo_x:hi_x]
317
+ prof = np.median(band, axis=0).astype(float)
318
+
319
+ base = gaussian_filter1d(prof, 65, mode="nearest")
320
+ hp = prof - base
321
+ sm = gaussian_filter1d(hp, 3, mode="nearest")
322
+
323
+ prom = max(np.nanpercentile(np.abs(sm), 98) * 0.6, 10.0)
324
+ cand_idx, _ = find_peaks(
325
+ sm, prominence=float(prom), distance=int(max(min_sep, 8))
326
+ )
327
+
328
+ if cand_idx.size == 0:
329
+ prom2 = max(np.nanpercentile(np.abs(sm), 95) * 0.4, 5.0)
330
+ cand_idx, _ = find_peaks(
331
+ sm, prominence=float(prom2), distance=int(max(min_sep, 6))
332
+ )
333
+
334
+ cand_cols = (lo_x + cand_idx).astype(int)
335
+
336
+ if approx_col is not None and cand_cols.size:
337
+ cand_cols = cand_cols[
338
+ np.abs(cand_cols - int(approx_col)) <= int(search_half)
339
+ ]
340
+
341
+ if cand_cols.size == 0:
342
+ if approx_col is not None:
343
+ lo = max(0, int(approx_col) - int(search_half))
344
+ hi = min(ncols, int(approx_col) + int(search_half) + 1)
345
+
346
+ else:
347
+ lo, hi = lo_x, hi_x
348
+ j = int(np.argmax(sm[(lo - lo_x) : (hi - lo_x)]))
349
+ best_col = lo + j
350
+
351
+ if debug_print:
352
+ print(f"[arc-col] fallback peak at col {best_col}")
353
+
354
+ return int(best_col)
355
+
356
+ win = 5
357
+ scores = []
358
+
359
+ for c in cand_cols:
360
+ j = int(np.clip(c - lo_x, 0, band.shape[1] - 1))
361
+ j0 = max(0, j - win)
362
+ j1 = min(band.shape[1], j + win + 1)
363
+ scores.append(float(np.nanmedian(band[:, j0:j1], axis=0).max()))
364
+
365
+ best_col = int(cand_cols[int(np.argmax(scores))])
366
+
367
+ if debug_print:
368
+ print(f"[arc-col] candidates={cand_cols.tolist()} -> chosen {best_col}")
369
+
370
+ return best_col
371
+
372
+
373
+ def estimate_parity(
374
+ img: np.ndarray, pos_col: int, neg_col: int, row_bands=((0.40, 0.60), (0.60, 0.80))
375
+ ) -> int:
376
+ """
377
+ Purpose:
378
+ Estimate AB parity from two row bands by comparing median brightness
379
+ around positive and negative column positions
380
+ Inputs:
381
+ img: 2D image array (rows x cols)
382
+ pos_col: Column index of the positive trace
383
+ neg_col: Column index of the negative trace
384
+ row_bands: Iterable of (lo, hi) fractional row bands to evaluate
385
+ Returns:
386
+ int:
387
+ +1 if the positive is brighter in aggregate; otherwise -1
388
+ """
389
+
390
+ def one_band(rf):
391
+ r0 = int(min(rf) * img.shape[0])
392
+ r1 = int(max(rf) * img.shape[0])
393
+ cpos0 = max(0, pos_col - 2)
394
+ cpos1 = min(img.shape[1], pos_col + 3)
395
+ cneg0 = max(0, neg_col - 2)
396
+ cneg1 = min(img.shape[1], neg_col + 3)
397
+ v_pos = float(np.nanmedian(img[r0:r1, cpos0:cpos1]))
398
+ v_neg = float(np.nanmedian(img[r0:r1, cneg0:cneg1]))
399
+
400
+ return +1 if v_pos >= v_neg else -1
401
+
402
+ s = sum(one_band(b) for b in row_bands)
403
+
404
+ return +1 if s >= 0 else -1
405
+
406
+
407
+ def estimate_negative_scale_robust(
408
+ img: np.ndarray,
409
+ pos_col: int,
410
+ neg_col: int,
411
+ ap: int = 5,
412
+ row_exclude_frac: Tuple[float, float] = (0.40, 0.80),
413
+ g_limits: Tuple[float, float] = (0.1, 10.0),
414
+ ) -> float:
415
+ """
416
+ Purpose:
417
+ Robustly estimate negative-beam scale g such that pos ≈ g * neg,
418
+ using only sky rows (excluding a central band) and masking rows with
419
+ strong skylines detected in either beam
420
+ Inputs:
421
+ img: 2D image array (rows, cols)
422
+ pos_col: Column index of positive trace
423
+ neg_col: Column index of negative trace
424
+ ap: Half-width of per-column extraction aperture (default 5)
425
+ row_exclude_frac: Fractional row band to exclude as containing object light (default (0.40, 0.80))
426
+ g_limits: Tuple of (min, max) bounds to clamp final g (default (0.1, 10.0))
427
+ Returns:
428
+ float:
429
+ Robust estimate of g, clamped to g_limits
430
+ """
431
+
432
+ nrows = img.shape[0]
433
+ pos = np.median(img[:, max(0, pos_col - ap) : pos_col + ap + 1], axis=1).astype(
434
+ float
435
+ )
436
+
437
+ neg = np.median(img[:, max(0, neg_col - ap) : neg_col + ap + 1], axis=1).astype(
438
+ float
439
+ )
440
+
441
+ r0e = int(min(row_exclude_frac) * nrows)
442
+ r1e = int(max(row_exclude_frac) * nrows)
443
+ sky_rows = np.r_[0:r0e, r1e:nrows]
444
+
445
+ sky_bad = skyline_mask_from_1d(pos, sigma_hi=3.5) | skyline_mask_from_1d(
446
+ neg, sigma_hi=3.5
447
+ )
448
+
449
+ m = np.ones(nrows, dtype=bool)
450
+ m[sky_rows] = True
451
+ m &= ~sky_bad
452
+ m &= np.isfinite(pos) & np.isfinite(neg) & (np.abs(neg) > 1e-9)
453
+
454
+ if np.count_nonzero(m) < 40:
455
+ m = np.isfinite(pos) & np.isfinite(neg) & (np.abs(neg) > 1e-9)
456
+ center = np.zeros(nrows, dtype=bool)
457
+ center[r0e:r1e] = True
458
+ m &= ~center
459
+
460
+ if np.count_nonzero(m) < 20:
461
+ m = np.isfinite(pos) & np.isfinite(neg) & (np.abs(neg) > 1e-9)
462
+
463
+ ratios = pos[m] / neg[m]
464
+ med = np.nanmedian(ratios)
465
+ mad = np.nanmedian(np.abs(ratios - med)) + 1e-12
466
+ keep = np.abs(ratios - med) <= 3.5 * 1.4826 * mad
467
+ ratios_t = ratios[keep] if np.any(keep) else ratios
468
+ g = float(np.nanmedian(ratios_t))
469
+ g = float(np.clip(g, g_limits[0], g_limits[1]))
470
+
471
+ return g
472
+
473
+
474
+ def refine_neg_column_local(
475
+ img: np.ndarray, pos_col: int, neg_col_init: int, *, search_half: int = 10, ap: int = 5
476
+ ) -> int:
477
+ """
478
+ Purpose:
479
+ Refine negative-column position by local search within +/- search_half
480
+ columns to minimise median-absolute-deviation (MAD) of residual
481
+ pos - g * neg, where g is robustly re-estimated at each candidate
482
+ Inputs:
483
+ img: 2D image array (rows, cols)
484
+ pos_col: Column index of positive trace
485
+ neg_col_init: Initial column index for negative trace
486
+ search_half: Half-width of integer search window (default 10)
487
+ ap: Half-width of per-column extraction aperture (default 5)
488
+ Returns:
489
+ int:
490
+ Best-fit negative column index that minimizes residual MAD
491
+ """
492
+
493
+ ncols = img.shape[1]
494
+ candidates = range(
495
+ max(0, neg_col_init - search_half), min(ncols, neg_col_init + search_half + 1)
496
+ )
497
+
498
+ best_col, best_score = neg_col_init, -np.inf
499
+
500
+ for cneg in candidates:
501
+ g = estimate_negative_scale_robust(img, pos_col, cneg, ap=ap)
502
+ pos = np.median(
503
+ img[:, max(0, pos_col - ap) : pos_col + ap + 1], axis=1
504
+ )
505
+
506
+ neg = np.median(
507
+ img[:, max(0, cneg - ap) : cneg + ap + 1], axis=1
508
+ )
509
+
510
+ resid = pos - g * neg
511
+ mad = np.nanmedian(np.abs(resid - np.nanmedian(resid))) + 1e-12
512
+ score = -mad
513
+
514
+ if score > best_score:
515
+ best_score, best_col = score, cneg
516
+
517
+ return int(best_col)
firepype/extraction.py ADDED
@@ -0,0 +1,198 @@
1
+ # firepype/extraction.py
2
+ from __future__ import annotations
3
+
4
+ from typing import Tuple
5
+
6
+ import numpy as np
7
+
8
+
9
+ def extract_with_local_bg(
10
+ img: np.ndarray,
11
+ center_col: int,
12
+ *,
13
+ ap: int = 5,
14
+ bg_in: int = 8,
15
+ bg_out: int = 18,
16
+ ) -> np.ndarray:
17
+ """
18
+ Purpose:
19
+ Extract 1D spectrum by median-collapsing a small aperture around
20
+ center_col and subtracting local background estimated from side bands
21
+ Inputs:
22
+ img: 2D image array with shape (rows, cols)
23
+ center_col: Central column index of object trace
24
+ ap: Half-width (in columns) of extraction aperture (default 5)
25
+ bg_in: Inner offset (in columns) from center to background band (default 8)
26
+ bg_out: Outer offset (in columns) from center to background band (default 18)
27
+ Returns:
28
+ np.ndarray:
29
+ Extracted 1D spectrum (length = number of rows), as float
30
+ """
31
+
32
+ nrows, ncols = img.shape
33
+ lo = max(0, center_col - ap)
34
+ hi = min(ncols, center_col + ap + 1)
35
+ bg_left = img[:, max(0, center_col - bg_out) : max(0, center_col - bg_in)]
36
+ bg_right = img[:, min(ncols, center_col + bg_in) : min(ncols, center_col + bg_out)]
37
+
38
+ if bg_left.size == 0 and bg_right.size == 0:
39
+ bg = np.zeros(nrows, dtype=img.dtype)
40
+ else:
41
+ if bg_left.size and bg_right.size:
42
+ bg_all = np.concatenate([bg_left, bg_right], axis=1)
43
+ else:
44
+ bg_all = bg_left if bg_left.size else bg_right
45
+ bg = np.median(bg_all, axis=1)
46
+ spec = np.median(img[:, lo:hi], axis=1) - bg
47
+
48
+ return spec.astype(float)
49
+
50
+
51
+ def extract_cols_median(
52
+ img: np.ndarray,
53
+ center_col: int,
54
+ *,
55
+ half: int = 1,
56
+ ap: int = 5,
57
+ bg_in: int = 8,
58
+ bg_out: int = 18,
59
+ ) -> np.ndarray:
60
+ """
61
+ Purpose:
62
+ Extract spectra from multiple adjacent columns around center_col using
63
+ extract_with_local_bg, then median-combine spectra
64
+ Inputs:
65
+ img: 2D image array (rows, cols)
66
+ center_col: Central column index
67
+ half: Include columns in [center_col - half, center_col + half] (default 1)
68
+ ap: Half-width of extraction aperture in columns (default 5)
69
+ bg_in: Inner offset for background window (default 8)
70
+ bg_out: Outer offset for background window (default 18)
71
+ Returns:
72
+ np.ndarray:
73
+ Median of stacked extractions across selected columns (1D array)
74
+ """
75
+
76
+ ncols = img.shape[1]
77
+ cols = [center_col + dc for dc in range(-half, half + 1)]
78
+ cols = [c for c in cols if 0 <= c < ncols]
79
+ stacks = [
80
+ extract_with_local_bg(img, c, ap=ap, bg_in=bg_in, bg_out=bg_out) for c in cols
81
+ ]
82
+
83
+ return np.median(np.vstack(stacks), axis=0).astype(float)
84
+
85
+
86
+ def extract_cols_median_with_err(
87
+ img: np.ndarray,
88
+ center_col: int,
89
+ *,
90
+ half: int = 1,
91
+ ap: int = 5,
92
+ bg_in: int = 8,
93
+ bg_out: int = 18,
94
+ ) -> tuple[np.ndarray, np.ndarray]:
95
+ """
96
+ Purpose:
97
+ Extract median-combined 1D spectrum across a footprint centered at
98
+ center_col with local background subtraction, and estimate per-row
99
+ 1-sigma errors from background MAD propagated to the median
100
+ Inputs:
101
+ img: 2D image array (rows, cols)
102
+ center_col: Central column index
103
+ half: Include columns in [center_col - half, center_col + half] (default 1)
104
+ ap: Half-width of extraction aperture in columns (default 5)
105
+ bg_in: Inner offset for background window (default 8)
106
+ bg_out: Outer offset for background window (default 18)
107
+ Returns:
108
+ tuple:
109
+ - flux_1d (np.ndarray): Extracted 1D flux versus row
110
+ - sigma_1d (np.ndarray): Estimated per-row 1-sigma uncertainties
111
+ """
112
+
113
+ nrows, ncols = img.shape
114
+ cols = [center_col + dc for dc in range(-half, half + 1)]
115
+ cols = [c for c in cols if 0 <= c < ncols]
116
+
117
+ specs = []
118
+ sigmas = []
119
+
120
+ for c in cols:
121
+ lo = max(0, c - ap)
122
+ hi = min(ncols, c + ap + 1)
123
+
124
+ bg_left = img[:, max(0, c - bg_out) : max(0, c - bg_in)]
125
+ bg_right = img[:, min(ncols, c + bg_in) : min(ncols, c + bg_out)]
126
+
127
+ if bg_left.size == 0 and bg_right.size == 0:
128
+ bg_med = np.zeros(nrows, dtype=float)
129
+ bg_std = np.zeros(nrows, dtype=float)
130
+ else:
131
+ if bg_left.size and bg_right.size:
132
+ bg_all = np.concatenate([bg_left, bg_right], axis=1)
133
+ else:
134
+ bg_all = bg_left if bg_left.size else bg_right
135
+ bg_med = np.median(bg_all, axis=1).astype(float)
136
+ mad = np.median(np.abs(bg_all - bg_med[:, None]), axis=1) + 1e-12
137
+ bg_std = 1.4826 * mad
138
+
139
+ sub = img[:, lo:hi] - bg_med[:, None]
140
+ spec = np.median(sub, axis=1).astype(float)
141
+ specs.append(spec)
142
+
143
+ n_eff = max(1, hi - lo)
144
+ sigma_row = np.sqrt(np.pi / 2.0) * bg_std / np.sqrt(n_eff)
145
+ sigmas.append(sigma_row)
146
+
147
+ spec_stack = np.vstack(specs)
148
+ sigma_stack = np.vstack(sigmas)
149
+ flux_1d = np.median(spec_stack, axis=0)
150
+ M = max(1, len(cols))
151
+ # Approximate variance of the median of M Gaussians by scaling
152
+ sigma_1d = np.sqrt(np.pi / 2.0) * np.median(sigma_stack**2, axis=0) ** 0.5 / np.sqrt(
153
+ M
154
+ )
155
+
156
+ return flux_1d.astype(float), sigma_1d.astype(float)
157
+
158
+
159
+ def estimate_negative_scale(
160
+ img: np.ndarray,
161
+ pos_col: int,
162
+ neg_col: int,
163
+ *,
164
+ ap: int = 5,
165
+ row_bg_frac: Tuple[float, float] = (0.0, 0.2),
166
+ row_bg_frac_hi: Tuple[float, float] = (0.8, 1.0),
167
+ ) -> float:
168
+ """
169
+ Purpose:
170
+ Provide simple estimator for the negative-beam scale g using outer-row
171
+ bands as sky. Primarily for reference/tests - the main pipeline should
172
+ prefer detection.estimate_negative_scale_robust
173
+ Inputs:
174
+ img: 2D image array (rows, cols)
175
+ pos_col: Column index of positive object trace
176
+ neg_col: Column index of negative object trace
177
+ ap: Half-width of extraction aperture in columns (default 5)
178
+ row_bg_frac: Fractional row range for lower background band (default (0.0, 0.2))
179
+ row_bg_frac_hi: Fractional row range for upper background band (default (0.8, 1.0))
180
+ Returns:
181
+ float:
182
+ Estimated negative beam scale g
183
+ """
184
+
185
+ nrows = img.shape[0]
186
+ pos = np.median(img[:, max(0, pos_col - ap) : pos_col + ap + 1], axis=1)
187
+ neg = np.median(img[:, max(0, neg_col - ap) : neg_col + ap + 1], axis=1)
188
+
189
+ r0a = int(min(row_bg_frac) * nrows)
190
+ r1a = int(max(row_bg_frac) * nrows)
191
+ r0b = int(min(row_bg_frac_hi) * nrows)
192
+ r1b = int(max(row_bg_frac_hi) * nrows)
193
+ rows_bg = np.r_[r0a:r1a, r0b:r1b]
194
+
195
+ num = float(np.sum(pos[rows_bg] * neg[rows_bg]))
196
+ den = float(np.sum(neg[rows_bg] ** 2) + 1e-12)
197
+
198
+ return num / den