pyreduce-astro 0.7a4__cp314-cp314-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (182) hide show
  1. pyreduce/__init__.py +67 -0
  2. pyreduce/__main__.py +322 -0
  3. pyreduce/cli.py +342 -0
  4. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.exp +0 -0
  5. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.lib +0 -0
  6. pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.exp +0 -0
  7. pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.lib +0 -0
  8. pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.exp +0 -0
  9. pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.lib +0 -0
  10. pyreduce/clib/Release/_slitfunc_2d.cp314-win_amd64.exp +0 -0
  11. pyreduce/clib/Release/_slitfunc_2d.cp314-win_amd64.lib +0 -0
  12. pyreduce/clib/Release/_slitfunc_2d.obj +0 -0
  13. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.exp +0 -0
  14. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.lib +0 -0
  15. pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.exp +0 -0
  16. pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.lib +0 -0
  17. pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.exp +0 -0
  18. pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.lib +0 -0
  19. pyreduce/clib/Release/_slitfunc_bd.cp314-win_amd64.exp +0 -0
  20. pyreduce/clib/Release/_slitfunc_bd.cp314-win_amd64.lib +0 -0
  21. pyreduce/clib/Release/_slitfunc_bd.obj +0 -0
  22. pyreduce/clib/__init__.py +0 -0
  23. pyreduce/clib/_slitfunc_2d.cp311-win_amd64.pyd +0 -0
  24. pyreduce/clib/_slitfunc_2d.cp312-win_amd64.pyd +0 -0
  25. pyreduce/clib/_slitfunc_2d.cp313-win_amd64.pyd +0 -0
  26. pyreduce/clib/_slitfunc_2d.cp314-win_amd64.pyd +0 -0
  27. pyreduce/clib/_slitfunc_bd.cp311-win_amd64.pyd +0 -0
  28. pyreduce/clib/_slitfunc_bd.cp312-win_amd64.pyd +0 -0
  29. pyreduce/clib/_slitfunc_bd.cp313-win_amd64.pyd +0 -0
  30. pyreduce/clib/_slitfunc_bd.cp314-win_amd64.pyd +0 -0
  31. pyreduce/clib/build_extract.py +75 -0
  32. pyreduce/clib/slit_func_2d_xi_zeta_bd.c +1313 -0
  33. pyreduce/clib/slit_func_2d_xi_zeta_bd.h +55 -0
  34. pyreduce/clib/slit_func_bd.c +362 -0
  35. pyreduce/clib/slit_func_bd.h +17 -0
  36. pyreduce/clipnflip.py +147 -0
  37. pyreduce/combine_frames.py +861 -0
  38. pyreduce/configuration.py +191 -0
  39. pyreduce/continuum_normalization.py +329 -0
  40. pyreduce/cwrappers.py +404 -0
  41. pyreduce/datasets.py +238 -0
  42. pyreduce/echelle.py +413 -0
  43. pyreduce/estimate_background_scatter.py +130 -0
  44. pyreduce/extract.py +1362 -0
  45. pyreduce/extraction_width.py +77 -0
  46. pyreduce/instruments/__init__.py +0 -0
  47. pyreduce/instruments/aj.py +9 -0
  48. pyreduce/instruments/aj.yaml +51 -0
  49. pyreduce/instruments/andes.py +102 -0
  50. pyreduce/instruments/andes.yaml +72 -0
  51. pyreduce/instruments/common.py +711 -0
  52. pyreduce/instruments/common.yaml +57 -0
  53. pyreduce/instruments/crires_plus.py +103 -0
  54. pyreduce/instruments/crires_plus.yaml +101 -0
  55. pyreduce/instruments/filters.py +195 -0
  56. pyreduce/instruments/harpn.py +203 -0
  57. pyreduce/instruments/harpn.yaml +140 -0
  58. pyreduce/instruments/harps.py +312 -0
  59. pyreduce/instruments/harps.yaml +144 -0
  60. pyreduce/instruments/instrument_info.py +140 -0
  61. pyreduce/instruments/jwst_miri.py +29 -0
  62. pyreduce/instruments/jwst_miri.yaml +53 -0
  63. pyreduce/instruments/jwst_niriss.py +98 -0
  64. pyreduce/instruments/jwst_niriss.yaml +60 -0
  65. pyreduce/instruments/lick_apf.py +35 -0
  66. pyreduce/instruments/lick_apf.yaml +60 -0
  67. pyreduce/instruments/mcdonald.py +123 -0
  68. pyreduce/instruments/mcdonald.yaml +56 -0
  69. pyreduce/instruments/metis_ifu.py +45 -0
  70. pyreduce/instruments/metis_ifu.yaml +62 -0
  71. pyreduce/instruments/metis_lss.py +45 -0
  72. pyreduce/instruments/metis_lss.yaml +62 -0
  73. pyreduce/instruments/micado.py +45 -0
  74. pyreduce/instruments/micado.yaml +62 -0
  75. pyreduce/instruments/models.py +257 -0
  76. pyreduce/instruments/neid.py +156 -0
  77. pyreduce/instruments/neid.yaml +61 -0
  78. pyreduce/instruments/nirspec.py +215 -0
  79. pyreduce/instruments/nirspec.yaml +63 -0
  80. pyreduce/instruments/nte.py +42 -0
  81. pyreduce/instruments/nte.yaml +55 -0
  82. pyreduce/instruments/uves.py +46 -0
  83. pyreduce/instruments/uves.yaml +65 -0
  84. pyreduce/instruments/xshooter.py +39 -0
  85. pyreduce/instruments/xshooter.yaml +63 -0
  86. pyreduce/make_shear.py +607 -0
  87. pyreduce/masks/mask_crires_plus_det1.fits.gz +0 -0
  88. pyreduce/masks/mask_crires_plus_det2.fits.gz +0 -0
  89. pyreduce/masks/mask_crires_plus_det3.fits.gz +0 -0
  90. pyreduce/masks/mask_ctio_chiron.fits.gz +0 -0
  91. pyreduce/masks/mask_elodie.fits.gz +0 -0
  92. pyreduce/masks/mask_feros3.fits.gz +0 -0
  93. pyreduce/masks/mask_flames_giraffe.fits.gz +0 -0
  94. pyreduce/masks/mask_harps_blue.fits.gz +0 -0
  95. pyreduce/masks/mask_harps_red.fits.gz +0 -0
  96. pyreduce/masks/mask_hds_blue.fits.gz +0 -0
  97. pyreduce/masks/mask_hds_red.fits.gz +0 -0
  98. pyreduce/masks/mask_het_hrs_2x5.fits.gz +0 -0
  99. pyreduce/masks/mask_jwst_miri_lrs_slitless.fits.gz +0 -0
  100. pyreduce/masks/mask_jwst_niriss_gr700xd.fits.gz +0 -0
  101. pyreduce/masks/mask_lick_apf_.fits.gz +0 -0
  102. pyreduce/masks/mask_mcdonald.fits.gz +0 -0
  103. pyreduce/masks/mask_nes.fits.gz +0 -0
  104. pyreduce/masks/mask_nirspec_nirspec.fits.gz +0 -0
  105. pyreduce/masks/mask_sarg.fits.gz +0 -0
  106. pyreduce/masks/mask_sarg_2x2a.fits.gz +0 -0
  107. pyreduce/masks/mask_sarg_2x2b.fits.gz +0 -0
  108. pyreduce/masks/mask_subaru_hds_red.fits.gz +0 -0
  109. pyreduce/masks/mask_uves_blue.fits.gz +0 -0
  110. pyreduce/masks/mask_uves_blue_binned_2_2.fits.gz +0 -0
  111. pyreduce/masks/mask_uves_middle.fits.gz +0 -0
  112. pyreduce/masks/mask_uves_middle_2x2_split.fits.gz +0 -0
  113. pyreduce/masks/mask_uves_middle_binned_2_2.fits.gz +0 -0
  114. pyreduce/masks/mask_uves_red.fits.gz +0 -0
  115. pyreduce/masks/mask_uves_red_2x2.fits.gz +0 -0
  116. pyreduce/masks/mask_uves_red_2x2_split.fits.gz +0 -0
  117. pyreduce/masks/mask_uves_red_binned_2_2.fits.gz +0 -0
  118. pyreduce/masks/mask_xshooter_nir.fits.gz +0 -0
  119. pyreduce/pipeline.py +619 -0
  120. pyreduce/rectify.py +138 -0
  121. pyreduce/reduce.py +2065 -0
  122. pyreduce/settings/settings_AJ.json +19 -0
  123. pyreduce/settings/settings_ANDES.json +89 -0
  124. pyreduce/settings/settings_CRIRES_PLUS.json +89 -0
  125. pyreduce/settings/settings_HARPN.json +73 -0
  126. pyreduce/settings/settings_HARPS.json +69 -0
  127. pyreduce/settings/settings_JWST_MIRI.json +55 -0
  128. pyreduce/settings/settings_JWST_NIRISS.json +55 -0
  129. pyreduce/settings/settings_LICK_APF.json +62 -0
  130. pyreduce/settings/settings_MCDONALD.json +58 -0
  131. pyreduce/settings/settings_METIS_IFU.json +77 -0
  132. pyreduce/settings/settings_METIS_LSS.json +77 -0
  133. pyreduce/settings/settings_MICADO.json +78 -0
  134. pyreduce/settings/settings_NEID.json +73 -0
  135. pyreduce/settings/settings_NIRSPEC.json +58 -0
  136. pyreduce/settings/settings_NTE.json +60 -0
  137. pyreduce/settings/settings_UVES.json +54 -0
  138. pyreduce/settings/settings_XSHOOTER.json +78 -0
  139. pyreduce/settings/settings_pyreduce.json +184 -0
  140. pyreduce/settings/settings_schema.json +850 -0
  141. pyreduce/tools/__init__.py +0 -0
  142. pyreduce/tools/combine.py +117 -0
  143. pyreduce/trace.py +979 -0
  144. pyreduce/util.py +1366 -0
  145. pyreduce/wavecal/MICADO_HK_3arcsec_chip5.npz +0 -0
  146. pyreduce/wavecal/atlas/thar.fits +4946 -13
  147. pyreduce/wavecal/atlas/thar_list.txt +4172 -0
  148. pyreduce/wavecal/atlas/une.fits +0 -0
  149. pyreduce/wavecal/convert.py +38 -0
  150. pyreduce/wavecal/crires_plus_J1228_Open_det1.npz +0 -0
  151. pyreduce/wavecal/crires_plus_J1228_Open_det2.npz +0 -0
  152. pyreduce/wavecal/crires_plus_J1228_Open_det3.npz +0 -0
  153. pyreduce/wavecal/harpn_harpn_2D.npz +0 -0
  154. pyreduce/wavecal/harps_blue_2D.npz +0 -0
  155. pyreduce/wavecal/harps_blue_pol_2D.npz +0 -0
  156. pyreduce/wavecal/harps_red_2D.npz +0 -0
  157. pyreduce/wavecal/harps_red_pol_2D.npz +0 -0
  158. pyreduce/wavecal/mcdonald.npz +0 -0
  159. pyreduce/wavecal/metis_lss_l_2D.npz +0 -0
  160. pyreduce/wavecal/metis_lss_m_2D.npz +0 -0
  161. pyreduce/wavecal/nirspec_K2.npz +0 -0
  162. pyreduce/wavecal/uves_blue_360nm_2D.npz +0 -0
  163. pyreduce/wavecal/uves_blue_390nm_2D.npz +0 -0
  164. pyreduce/wavecal/uves_blue_437nm_2D.npz +0 -0
  165. pyreduce/wavecal/uves_middle_2x2_2D.npz +0 -0
  166. pyreduce/wavecal/uves_middle_565nm_2D.npz +0 -0
  167. pyreduce/wavecal/uves_middle_580nm_2D.npz +0 -0
  168. pyreduce/wavecal/uves_middle_600nm_2D.npz +0 -0
  169. pyreduce/wavecal/uves_middle_665nm_2D.npz +0 -0
  170. pyreduce/wavecal/uves_middle_860nm_2D.npz +0 -0
  171. pyreduce/wavecal/uves_red_580nm_2D.npz +0 -0
  172. pyreduce/wavecal/uves_red_600nm_2D.npz +0 -0
  173. pyreduce/wavecal/uves_red_665nm_2D.npz +0 -0
  174. pyreduce/wavecal/uves_red_760nm_2D.npz +0 -0
  175. pyreduce/wavecal/uves_red_860nm_2D.npz +0 -0
  176. pyreduce/wavecal/xshooter_nir.npz +0 -0
  177. pyreduce/wavelength_calibration.py +1871 -0
  178. pyreduce_astro-0.7a4.dist-info/METADATA +106 -0
  179. pyreduce_astro-0.7a4.dist-info/RECORD +182 -0
  180. pyreduce_astro-0.7a4.dist-info/WHEEL +4 -0
  181. pyreduce_astro-0.7a4.dist-info/entry_points.txt +2 -0
  182. pyreduce_astro-0.7a4.dist-info/licenses/LICENSE +674 -0
pyreduce/trace.py ADDED
@@ -0,0 +1,979 @@
1
+ """
2
+ Find clusters of pixels with signal and fit polynomial traces.
3
+
4
+ Note on terminology:
5
+ - "trace": A single polynomial fit to a cluster of pixels (e.g., one fiber)
6
+ - "spectral order": A group of traces at similar wavelengths (e.g., all fibers in one echelle order)
7
+
8
+ The main function `trace` detects and fits individual traces.
9
+ Use `merge_traces` and `group_and_refit` to organize traces into spectral orders.
10
+ """
11
+
12
+ import logging
13
+ from functools import cmp_to_key
14
+ from itertools import combinations
15
+
16
+ import matplotlib.pyplot as plt
17
+ import numpy as np
18
+ from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans
19
+ from numpy.polynomial.polynomial import Polynomial
20
+ from scipy.ndimage import binary_closing, binary_opening, label
21
+ from scipy.ndimage.filters import gaussian_filter1d, median_filter, uniform_filter1d
22
+ from scipy.signal import find_peaks, peak_widths
23
+ from scipy.sparse import diags
24
+ from scipy.sparse.linalg import spsolve
25
+
26
+ from . import util
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+
31
+ def whittaker_smooth(y, lam, axis=0):
32
+ """Whittaker smoother (optimal filter).
33
+
34
+ Solves: min sum((y - z)^2) + lam * sum((z[i] - z[i-1])^2)
35
+
36
+ Parameters
37
+ ----------
38
+ y : array
39
+ Input data (1D or 2D)
40
+ lam : float
41
+ Smoothing parameter (higher = smoother)
42
+ axis : int
43
+ Axis along which to smooth (for 2D arrays)
44
+
45
+ Returns
46
+ -------
47
+ z : array
48
+ Smoothed data
49
+ """
50
+ if y.ndim == 1:
51
+ n = len(y)
52
+ # Construct tridiagonal matrix: W + lam * D'D
53
+ # where D is first-difference matrix
54
+ diag_main = np.ones(n) + 2 * lam
55
+ diag_main[0] = 1 + lam
56
+ diag_main[-1] = 1 + lam
57
+ diag_off = -lam * np.ones(n - 1)
58
+ A = diags([diag_off, diag_main, diag_off], [-1, 0, 1], format="csc")
59
+ return spsolve(A, y)
60
+ else:
61
+ # Apply along specified axis
62
+ return np.apply_along_axis(lambda row: whittaker_smooth(row, lam), axis, y)
63
+
64
+
65
+ def fit(x, y, deg, regularization=0):
66
+ # order = polyfit1d(y, x, deg, regularization)
67
+ if deg == "best":
68
+ order = best_fit(x, y)
69
+ else:
70
+ order = Polynomial.fit(y, x, deg=deg, domain=[]).coef[::-1]
71
+ return order
72
+
73
+
74
+ def best_fit(x, y):
75
+ aic = np.inf
76
+ for k in range(5):
77
+ coeff_new = fit(x, y, k)
78
+ chisq = np.sum((np.polyval(coeff_new, y) - x) ** 2)
79
+ aic_new = 2 * k + chisq
80
+ if aic_new > aic:
81
+ break
82
+ else:
83
+ coeff = coeff_new
84
+ aic = aic_new
85
+ return coeff
86
+
87
+
88
+ def determine_overlap_rating(xi, yi, xj, yj, mean_cluster_thickness, nrow, ncol, deg=2):
89
+ # i and j are the indices of the 2 clusters
90
+ i_left, i_right = yi.min(), yi.max()
91
+ j_left, j_right = yj.min(), yj.max()
92
+
93
+ # The number of pixels in the smaller cluster
94
+ # this limits the accuracy of the fit
95
+ n_min = min(i_right - i_left, j_right - j_left)
96
+
97
+ # Fit a polynomial to each cluster
98
+ order_i = fit(xi, yi, deg)
99
+ order_j = fit(xj, yj, deg)
100
+
101
+ # Get polynomial points inside cluster limits for each cluster and polynomial
102
+ y_ii = np.polyval(order_i, np.arange(i_left, i_right))
103
+ y_ij = np.polyval(order_i, np.arange(j_left, j_right))
104
+ y_jj = np.polyval(order_j, np.arange(j_left, j_right))
105
+ y_ji = np.polyval(order_j, np.arange(i_left, i_right))
106
+
107
+ # difference of polynomials within each cluster limit
108
+ diff_i = np.abs(y_ii - y_ji)
109
+ diff_j = np.abs(y_ij - y_jj)
110
+
111
+ ind_i = np.where((diff_i < mean_cluster_thickness) & (y_ji >= 0) & (y_ji < nrow))
112
+ ind_j = np.where((diff_j < mean_cluster_thickness) & (y_ij >= 0) & (y_ij < nrow))
113
+
114
+ # TODO: There should probably be some kind of normaliztion, that scales with the size of the cluster?
115
+ # or possibly only use the closest pixels to determine overlap, since the polynomial is badly constrained outside of the bounds.
116
+ overlap = min(n_min, len(ind_i[0])) + min(n_min, len(ind_j[0]))
117
+ # overlap = overlap / ((i_right - i_left) + (j_right - j_left))
118
+ overlap /= 2 * n_min
119
+ if i_right < j_left:
120
+ overlap *= 1 - (i_right - j_left) / ncol
121
+ elif j_right < i_left:
122
+ overlap *= 1 - (j_right - i_left) / ncol
123
+
124
+ overlap_region = [-1, -1]
125
+ if len(ind_i[0]) > 0:
126
+ overlap_region[0] = np.min(ind_i[0]) + i_left
127
+ if len(ind_j[0]) > 0:
128
+ overlap_region[1] = np.max(ind_j[0]) + j_left
129
+
130
+ return overlap, overlap_region
131
+
132
+
133
+ def create_merge_array(x, y, mean_cluster_thickness, nrow, ncol, deg, threshold):
134
+ n_clusters = list(x.keys())
135
+ nmax = len(n_clusters) ** 2
136
+ merge = np.zeros((nmax, 5))
137
+ for k, (i, j) in enumerate(combinations(n_clusters, 2)):
138
+ overlap, region = determine_overlap_rating(
139
+ x[i], y[i], x[j], y[j], mean_cluster_thickness, nrow, ncol, deg=deg
140
+ )
141
+ merge[k] = [i, j, overlap, *region]
142
+ merge = merge[merge[:, 2] > threshold]
143
+ merge = merge[np.argsort(merge[:, 2])[::-1]]
144
+ return merge
145
+
146
+
147
+ def update_merge_array(
148
+ merge, x, y, j, mean_cluster_thickness, nrow, ncol, deg, threshold
149
+ ):
150
+ j = int(j)
151
+ n_clusters = np.array(list(x.keys()))
152
+ update = []
153
+ for i in n_clusters[n_clusters != j]:
154
+ overlap, region = determine_overlap_rating(
155
+ x[i], y[i], x[j], y[j], mean_cluster_thickness, nrow, ncol, deg=deg
156
+ )
157
+ if overlap <= threshold:
158
+ # no , or little overlap
159
+ continue
160
+ update += [[i, j, overlap, *region]]
161
+ if len(update) == 0:
162
+ return merge
163
+ update = np.array(update)
164
+ merge = np.concatenate((merge, update))
165
+ merge = merge[np.argsort(merge[:, 2])[::-1]]
166
+ return merge
167
+
168
+
169
+ def calculate_mean_cluster_thickness(x, y):
170
+ mean_cluster_thickness = 10 # Default thickness if no clusters found
171
+ cluster_thicknesses = []
172
+
173
+ for cluster in x.keys():
174
+ if cluster == 0:
175
+ continue # Skip the background cluster if present
176
+
177
+ # Get all y-coordinates and corresponding x-coordinates for this cluster
178
+ y_coords = y[cluster]
179
+ x_coords = x[cluster]
180
+
181
+ # Find unique columns and precompute the x-coordinates for each column
182
+ unique_columns = np.unique(y_coords)
183
+ column_thicknesses = []
184
+
185
+ for col in unique_columns:
186
+ # Select x-coordinates that correspond to the current column
187
+ col_indices = y_coords == col
188
+ if np.any(col_indices):
189
+ x_in_col = x_coords[col_indices]
190
+ thickness = x_in_col.max() - x_in_col.min()
191
+ column_thicknesses.append(thickness)
192
+
193
+ # Average thickness per cluster, if any columns were processed
194
+ if column_thicknesses:
195
+ cluster_thicknesses.append(np.mean(column_thicknesses))
196
+
197
+ # Compute the final mean thickness adjusted by the number of clusters
198
+ if cluster_thicknesses:
199
+ mean_cluster_thickness = (
200
+ 1.5 * np.mean(cluster_thicknesses) / len(cluster_thicknesses)
201
+ )
202
+
203
+ return mean_cluster_thickness
204
+
205
+
206
+ # origianl version
207
+ # def calculate_mean_cluster_thickness(x, y):
208
+ # # Calculate mean cluster thickness
209
+ # # TODO optimize
210
+ # n_clusters = list(x.keys())
211
+ # mean_cluster_thickness = 10
212
+ # for cluster in n_clusters:
213
+ # # individual columns of this cluster
214
+ # columns = np.unique(y[cluster])
215
+ # delta = 0
216
+ # for col in columns:
217
+ # # thickness of the cluster in each column
218
+ # tmp = x[cluster][y[cluster] == col]
219
+ # delta += np.max(tmp) - np.min(tmp)
220
+ # mean_cluster_thickness += delta / len(columns)
221
+
222
+ # mean_cluster_thickness *= 1.5 / len(n_clusters)
223
+ # return mean_cluster_thickness
224
+
225
+
226
+ def delete(i, x, y, merge):
227
+ del x[i], y[i]
228
+ merge = merge[(merge[:, 0] != i) & (merge[:, 1] != i)]
229
+ return x, y, merge
230
+
231
+
232
+ def combine(i, j, x, y, merge, mct, nrow, ncol, deg, threshold):
233
+ # Merge pixels
234
+ y[j] = np.concatenate((y[j], y[i]))
235
+ x[j] = np.concatenate((x[j], x[i]))
236
+ # Delete obsolete data
237
+ x, y, merge = delete(i, x, y, merge)
238
+ merge = merge[(merge[:, 0] != j) & (merge[:, 1] != j)]
239
+ # Update merge array
240
+ merge = update_merge_array(merge, x, y, j, mct, nrow, ncol, deg, threshold)
241
+ return x, y, merge
242
+
243
+
244
+ def merge_clusters(
245
+ img,
246
+ x,
247
+ y,
248
+ n_clusters,
249
+ manual=True,
250
+ deg=2,
251
+ auto_merge_threshold=0.9,
252
+ merge_min_threshold=0.1,
253
+ plot_title=None,
254
+ ):
255
+ """Merge clusters that belong together
256
+
257
+ Parameters
258
+ ----------
259
+ img : array[nrow, ncol]
260
+ the image the order trace is based on
261
+ orders : dict(int, array(float))
262
+ coefficients of polynomial fits to clusters
263
+ x : dict(int, array(int))
264
+ x coordinates of cluster points
265
+ y : dict(int, array(int))
266
+ y coordinates of cluster points
267
+ n_clusters : array(int)
268
+ cluster numbers
269
+ threshold : int, optional
270
+ overlap threshold for merging clusters (the default is 100)
271
+ manual : bool, optional
272
+ if True ask before merging orders
273
+
274
+ Returns
275
+ -------
276
+ x : dict(int: array)
277
+ x coordinates of clusters, key=cluster id
278
+ y : dict(int: array)
279
+ y coordinates of clusters, key=cluster id
280
+ n_clusters : int
281
+ number of identified clusters
282
+ """
283
+
284
+ nrow, ncol = img.shape
285
+ mct = calculate_mean_cluster_thickness(x, y)
286
+
287
+ merge = create_merge_array(x, y, mct, nrow, ncol, deg, merge_min_threshold)
288
+
289
+ if manual:
290
+ plt.ion()
291
+
292
+ k = 0
293
+ while k < len(merge):
294
+ i, j, overlap, _, _ = merge[k]
295
+ i, j = int(i), int(j)
296
+
297
+ if overlap >= auto_merge_threshold and auto_merge_threshold != 1:
298
+ answer = "y"
299
+ elif manual:
300
+ title = f"Probability: {overlap}"
301
+ if plot_title is not None:
302
+ title = f"{plot_title}\n{title}"
303
+ plot_order(i, j, x, y, img, deg, title=title)
304
+ while True:
305
+ if manual:
306
+ answer = input("Merge? [y/n]")
307
+ if answer in "ynrg":
308
+ break
309
+ else:
310
+ answer = "n"
311
+
312
+ if answer == "y":
313
+ # just merge automatically
314
+ logger.info("Merging orders %i and %i", i, j)
315
+ x, y, merge = combine(
316
+ i, j, x, y, merge, mct, nrow, ncol, deg, merge_min_threshold
317
+ )
318
+ elif answer == "n":
319
+ k += 1
320
+ elif answer == "r":
321
+ x, y, merge = delete(i, x, y, merge)
322
+ elif answer == "g":
323
+ x, y, merge = delete(j, x, y, merge)
324
+
325
+ if manual:
326
+ plt.close()
327
+ plt.ioff()
328
+
329
+ n_clusters = list(x.keys())
330
+ return x, y, n_clusters
331
+
332
+
333
+ def fit_polynomials_to_clusters(x, y, clusters, degree, regularization=0):
334
+ """Fits a polynomial of degree opower to points x, y in cluster clusters
335
+
336
+ Parameters
337
+ ----------
338
+ x : dict(int: array)
339
+ x coordinates seperated by cluster
340
+ y : dict(int: array)
341
+ y coordinates seperated by cluster
342
+ clusters : list(int)
343
+ cluster labels, equivalent to x.keys() or y.keys()
344
+ degree : int
345
+ degree of polynomial fit
346
+ Returns
347
+ -------
348
+ orders : dict(int, array[degree+1])
349
+ coefficients of polynomial fit for each cluster
350
+ """
351
+
352
+ orders = {c: fit(x[c], y[c], degree, regularization) for c in clusters}
353
+ return orders
354
+
355
+
356
+ def plot_orders(im, x, y, clusters, orders, order_range, title=None):
357
+ """Plot orders and image"""
358
+
359
+ cluster_img = np.zeros(im.shape, dtype=im.dtype)
360
+ for c in clusters:
361
+ cluster_img[x[c], y[c]] = c + 1
362
+ cluster_img = np.ma.masked_array(cluster_img, mask=cluster_img == 0)
363
+
364
+ plt.subplot(121)
365
+ # Handle non-finite values for plotting
366
+ plot_im = np.where(np.isfinite(im), im, np.nan)
367
+ valid = np.isfinite(plot_im)
368
+ if np.any(valid):
369
+ bot, top = np.percentile(plot_im[valid], (1, 99))
370
+ if bot >= top:
371
+ bot, top = None, None
372
+ else:
373
+ bot, top = None, None
374
+ plt.imshow(plot_im, origin="lower", vmin=bot, vmax=top)
375
+ plt.title("Input Image + Order polynomials")
376
+ plt.xlabel("x [pixel]")
377
+ plt.ylabel("y [pixel]")
378
+ plt.ylim([0, im.shape[0]])
379
+
380
+ if orders is not None:
381
+ for i, order in enumerate(orders):
382
+ x = np.arange(*order_range[i], 1)
383
+ y = np.polyval(order, x)
384
+ plt.plot(x, y)
385
+
386
+ plt.subplot(122)
387
+ plt.imshow(cluster_img, cmap=plt.get_cmap("tab20"), origin="upper")
388
+ plt.title("Detected Clusters + Order Polynomials")
389
+ plt.xlabel("x [pixel]")
390
+ plt.ylabel("y [pixel]")
391
+
392
+ if orders is not None:
393
+ for i, order in enumerate(orders):
394
+ x = np.arange(*order_range[i], 1)
395
+ y = np.polyval(order, x)
396
+ plt.plot(x, y)
397
+
398
+ plt.ylim([0, im.shape[0]])
399
+ if title is not None:
400
+ plt.suptitle(title)
401
+ util.show_or_save("orders_trace")
402
+
403
+
404
+ def plot_order(i, j, x, y, img, deg, title=""):
405
+ """Plot a single order"""
406
+ _, ncol = img.shape
407
+
408
+ order_i = fit(x[i], y[i], deg)
409
+ order_j = fit(x[j], y[j], deg)
410
+
411
+ xp = np.arange(ncol)
412
+ yi = np.polyval(order_i, xp)
413
+ yj = np.polyval(order_j, xp)
414
+
415
+ xmin = min(np.min(x[i]), np.min(x[j])) - 50
416
+ xmax = max(np.max(x[i]), np.max(x[j])) + 50
417
+ ymin = min(np.min(y[i]), np.min(y[j])) - 50
418
+ ymax = max(np.max(y[i]), np.max(y[j])) + 50
419
+
420
+ yymin = min(max(0, ymin), img.shape[0] - 2)
421
+ yymax = min(ymax, img.shape[0] - 1)
422
+ xxmin = min(max(0, xmin), img.shape[1] - 2)
423
+ xxmax = min(xmax, img.shape[1] - 1)
424
+
425
+ vmin, vmax = np.percentile(img[yymin:yymax, xxmin:xxmax], (5, 95))
426
+
427
+ plt.clf()
428
+ plt.title(title)
429
+ plt.imshow(img, vmin=vmin, vmax=vmax)
430
+ plt.plot(xp, yi, "r")
431
+ plt.plot(xp, yj, "g")
432
+ plt.plot(y[i], x[i], "r.")
433
+ plt.plot(y[j], x[j], "g.")
434
+ plt.xlim([ymin, ymax])
435
+ plt.ylim([xmin, xmax])
436
+ util.show_or_save(f"orders_single_{i}_{j}")
437
+
438
+
439
+ def trace(
440
+ im,
441
+ min_cluster=None,
442
+ min_width=None,
443
+ filter_x=0,
444
+ filter_y=None,
445
+ filter_type="boxcar",
446
+ noise=None,
447
+ degree=4,
448
+ border_width=None,
449
+ degree_before_merge=2,
450
+ regularization=0,
451
+ closing_shape=(5, 5),
452
+ opening_shape=(2, 2),
453
+ plot=False,
454
+ plot_title=None,
455
+ manual=True,
456
+ auto_merge_threshold=0.9,
457
+ merge_min_threshold=0.1,
458
+ sigma=0,
459
+ debug_dir=None,
460
+ ):
461
+ """Identify and trace orders
462
+
463
+ Parameters
464
+ ----------
465
+ im : array[nrow, ncol]
466
+ order definition image
467
+ min_cluster : int, optional
468
+ minimum cluster size in pixels (default: 500)
469
+ filter_x : int, optional
470
+ Smoothing width along x-axis/dispersion direction (default: 0, no smoothing).
471
+ Useful for noisy data or thin fiber traces.
472
+ filter_y : int, optional
473
+ Smoothing width along y-axis/cross-dispersion direction (default: auto).
474
+ Used to estimate local background. For thin closely-spaced traces, use small values.
475
+ filter_type : str, optional
476
+ Type of smoothing filter: "boxcar" (default), "gaussian", or "whittaker".
477
+ Boxcar is a uniform moving average. Whittaker preserves edges better.
478
+ noise : float, optional
479
+ noise to filter out (default: 8)
480
+ opower : int, optional
481
+ polynomial degree of the order fit (default: 4)
482
+ border_width : int, optional
483
+ number of pixels at the bottom and top borders of the image to ignore for order tracing (default: 5)
484
+ plot : bool, optional
485
+ wether to plot the final order fits (default: False)
486
+ manual : bool, optional
487
+ wether to manually select clusters to merge (strongly recommended) (default: True)
488
+ debug_dir : str, optional
489
+ if set, write intermediate images (filtered, background, mask) to this directory
490
+
491
+ Returns
492
+ -------
493
+ orders : array[nord, opower+1]
494
+ order tracing coefficients (in numpy order, i.e. largest exponent first)
495
+ """
496
+
497
+ # Convert to signed integer, to avoid underflow problems
498
+ im = np.asanyarray(im)
499
+ im = im.astype(int)
500
+
501
+ if filter_y is None:
502
+ col = im[:, im.shape[0] // 2]
503
+ col = median_filter(col, 5)
504
+ threshold = np.percentile(col, 90)
505
+ npeaks = find_peaks(col, height=threshold)[0].size
506
+ filter_y = im.shape[0] // (npeaks * 2)
507
+ logger.info("Median filter size (y), estimated: %i", filter_y)
508
+ elif filter_y <= 0:
509
+ raise ValueError(f"Expected filter_y > 0, but got {filter_y}")
510
+
511
+ if border_width is None:
512
+ # find width of orders, based on central column
513
+ col = im[:, im.shape[0] // 2]
514
+ col = median_filter(col, 5)
515
+ idx = np.argmax(col)
516
+ width = peak_widths(col, [idx])[0][0]
517
+ border_width = int(np.ceil(width))
518
+ logger.info("Image border width, estimated: %i", border_width)
519
+ elif border_width < 0:
520
+ raise ValueError(f"Expected border width > 0, but got {border_width}")
521
+
522
+ if min_cluster is None:
523
+ min_cluster = im.shape[1] // 4
524
+ logger.info("Minimum cluster size, estimated: %i", min_cluster)
525
+ elif not np.isscalar(min_cluster):
526
+ raise TypeError(f"Expected scalar minimum cluster size, but got {min_cluster}")
527
+
528
+ if min_width is None:
529
+ min_width = 0.25
530
+ if min_width == 0:
531
+ pass
532
+ elif isinstance(min_width, (float, np.floating)):
533
+ min_width = int(min_width * im.shape[0])
534
+ logger.info("Minimum trace width: %i", min_width)
535
+
536
+ # Validate filter_type
537
+ valid_filters = ("boxcar", "gaussian", "whittaker")
538
+ if filter_type not in valid_filters:
539
+ raise ValueError(
540
+ f"filter_type must be one of {valid_filters}, got {filter_type}"
541
+ )
542
+
543
+ # Prepare image for thresholding
544
+ # Convert masked values to NaN, interpolate, then back to regular ndarray
545
+ if np.ma.is_masked(im):
546
+ im_clean = np.ma.filled(im.astype(float), fill_value=np.nan)
547
+ kernel = Gaussian2DKernel(x_stddev=1.5, y_stddev=2.5)
548
+ im_clean = np.asarray(interpolate_replace_nans(im_clean, kernel))
549
+ im_clean = np.nan_to_num(im_clean, nan=0.0)
550
+ else:
551
+ im_clean = np.asarray(im, dtype=float)
552
+
553
+ # Select filter function based on filter_type
554
+ if filter_type == "boxcar":
555
+
556
+ def smooth(data, size, axis):
557
+ return uniform_filter1d(data, int(size), axis=axis, mode="nearest")
558
+ elif filter_type == "gaussian":
559
+
560
+ def smooth(data, size, axis):
561
+ return gaussian_filter1d(data, size, axis=axis)
562
+ else: # whittaker
563
+
564
+ def smooth(data, size, axis):
565
+ return whittaker_smooth(data, size, axis=axis)
566
+
567
+ # Optionally smooth along x (dispersion) to reduce noise
568
+ # Applied to both signal and background so we detect y-structure only
569
+ if filter_x > 0:
570
+ im_clean = smooth(im_clean, filter_x, axis=1)
571
+
572
+ # Estimate local background by smoothing along y (cross-dispersion)
573
+ background = smooth(im_clean, filter_y, axis=0)
574
+
575
+ if noise is None:
576
+ tmp = np.abs(background.flatten())
577
+ noise = np.percentile(tmp, 5)
578
+ logger.info("Background noise, estimated: %f", noise)
579
+ elif not np.isscalar(noise):
580
+ raise TypeError(f"Expected scalar noise level, but got {noise}")
581
+
582
+ # Threshold: pixels above local background are signal
583
+ mask = im_clean > background + noise
584
+ mask_initial = mask.copy()
585
+ # remove borders
586
+ if border_width != 0:
587
+ mask[:border_width, :] = mask[-border_width:, :] = False
588
+ mask[:, :border_width] = mask[:, -border_width:] = False
589
+ # remove masked areas with no clusters
590
+ mask = np.ma.filled(mask, fill_value=False)
591
+ # close gaps inbetween clusters
592
+ struct = np.full(closing_shape, 1)
593
+ mask = binary_closing(mask, struct, border_value=1)
594
+ # remove small lonely clusters
595
+ struct = np.full(opening_shape, 1)
596
+ # struct = generate_binary_structure(2, 1)
597
+ mask = binary_opening(mask, struct)
598
+
599
+ # Write debug output if requested
600
+ if debug_dir is not None:
601
+ import os
602
+
603
+ from astropy.io import fits
604
+
605
+ os.makedirs(debug_dir, exist_ok=True)
606
+ fits.writeto(
607
+ os.path.join(debug_dir, "trace_filtered.fits"),
608
+ im_clean.astype(np.float32),
609
+ overwrite=True,
610
+ )
611
+ fits.writeto(
612
+ os.path.join(debug_dir, "trace_background.fits"),
613
+ background.astype(np.float32),
614
+ overwrite=True,
615
+ )
616
+ fits.writeto(
617
+ os.path.join(debug_dir, "trace_mask_initial.fits"),
618
+ mask_initial.astype(np.uint8),
619
+ overwrite=True,
620
+ )
621
+ fits.writeto(
622
+ os.path.join(debug_dir, "trace_mask_final.fits"),
623
+ mask.astype(np.uint8),
624
+ overwrite=True,
625
+ )
626
+ logger.info("Wrote debug images to %s", debug_dir)
627
+
628
+ # label clusters
629
+ clusters, _ = label(mask)
630
+
631
+ # remove small clusters
632
+ sizes = np.bincount(clusters.ravel())
633
+ mask_sizes = sizes > min_cluster
634
+ mask_sizes[0] = True # This is the background, which we don't need to remove
635
+ clusters[~mask_sizes[clusters]] = 0
636
+
637
+ # # Reorganize x, y, clusters into a more convenient "pythonic" format
638
+ # # x, y become dictionaries, with an entry for each order
639
+ # # n is just a list of all orders (ignore cluster == 0)
640
+ n = np.unique(clusters)
641
+ n = n[n != 0]
642
+ x = {i: np.where(clusters == c)[0] for i, c in enumerate(n)}
643
+ y = {i: np.where(clusters == c)[1] for i, c in enumerate(n)}
644
+
645
+ def best_fit_degree(x, y):
646
+ L1 = np.sum((np.polyval(np.polyfit(y, x, 1), y) - x) ** 2)
647
+ L2 = np.sum((np.polyval(np.polyfit(y, x, 2), y) - x) ** 2)
648
+
649
+ # aic1 = 2 + 2 * np.log(L1) + 4 / (x.size - 2)
650
+ # aic2 = 4 + 2 * np.log(L2) + 12 / (x.size - 3)
651
+
652
+ if L1 < L2:
653
+ return 1
654
+ else:
655
+ return 2
656
+
657
+ if sigma > 0:
658
+ cluster_degrees = {i: best_fit_degree(x[i], y[i]) for i in x.keys()}
659
+ bias = {i: np.polyfit(y[i], x[i], deg=cluster_degrees[i])[-1] for i in x.keys()}
660
+ n = list(x.keys())
661
+ yt = np.concatenate([y[i] for i in n])
662
+ xt = np.concatenate([x[i] - bias[i] for i in n])
663
+ coef = np.polyfit(yt, xt, deg=degree_before_merge)
664
+
665
+ res = np.polyval(coef, yt)
666
+ cutoff = sigma * (res - xt).std()
667
+
668
+ # DEBUG plot
669
+ # uy = np.unique(yt)
670
+ # mask = np.abs(res - xt) > cutoff
671
+ # plt.plot(yt, xt, ".")
672
+ # plt.plot(yt[mask], xt[mask], "r.")
673
+ # plt.plot(uy, np.polyval(coef, uy))
674
+ # plt.show()
675
+ #
676
+
677
+ m = {
678
+ i: np.abs(np.polyval(coef, y[i]) - (x[i] - bias[i])) < cutoff
679
+ for i in x.keys()
680
+ }
681
+
682
+ k = max(x.keys()) + 1
683
+ for i in range(1, k):
684
+ new_img = np.zeros(im.shape, dtype=int)
685
+ new_img[x[i][~m[i]], y[i][~m[i]]] = 1
686
+ clusters, _ = label(new_img)
687
+
688
+ x[i] = x[i][m[i]]
689
+ y[i] = y[i][m[i]]
690
+ if len(x[i]) == 0:
691
+ del x[i], y[i]
692
+
693
+ nnew = np.max(clusters)
694
+ if nnew != 0:
695
+ xidx, yidx = np.indices(im.shape)
696
+ for j in range(1, nnew + 1):
697
+ xn = xidx[clusters == j]
698
+ yn = yidx[clusters == j]
699
+ if xn.size >= min_cluster:
700
+ x[k] = xn
701
+ y[k] = yn
702
+ k += 1
703
+ # plt.imshow(clusters, origin="lower")
704
+ # plt.show()
705
+
706
+ if plot: # pragma: no cover
707
+ title = "Identified clusters"
708
+ if plot_title is not None:
709
+ title = f"{plot_title}\n{title}"
710
+ plt.title(title)
711
+ plt.xlabel("x [pixel]")
712
+ plt.ylabel("y [pixel]")
713
+ clusters = np.ma.zeros(im.shape, dtype=int)
714
+ for i in x.keys():
715
+ clusters[x[i], y[i]] = i + 1
716
+ clusters[clusters == 0] = np.ma.masked
717
+
718
+ plt.imshow(clusters, origin="lower", cmap="prism")
719
+ util.show_or_save("orders_clusters")
720
+
721
+ # Merge clusters, if there are even any possible mergers left
722
+ x, y, n = merge_clusters(
723
+ im,
724
+ x,
725
+ y,
726
+ n,
727
+ manual=manual,
728
+ deg=degree_before_merge,
729
+ auto_merge_threshold=auto_merge_threshold,
730
+ merge_min_threshold=merge_min_threshold,
731
+ plot_title=plot_title,
732
+ )
733
+
734
+ if min_width > 0:
735
+ sizes = {k: v.max() - v.min() for k, v in y.items()}
736
+ mask_sizes = {k: v > min_width for k, v in sizes.items()}
737
+ for k, v in mask_sizes.items():
738
+ if not v:
739
+ del x[k]
740
+ del y[k]
741
+ n = x.keys()
742
+
743
+ orders = fit_polynomials_to_clusters(x, y, n, degree)
744
+
745
+ # sort orders from bottom to top, using relative position
746
+
747
+ def compare(i, j):
748
+ _, xi, i_left, i_right = i
749
+ _, xj, j_left, j_right = j
750
+
751
+ if i_right < j_left or j_right < i_left:
752
+ return xi.mean() - xj.mean()
753
+
754
+ left = max(i_left, j_left)
755
+ right = min(i_right, j_right)
756
+
757
+ return xi[left:right].mean() - xj[left:right].mean()
758
+
759
+ xp = np.arange(im.shape[1])
760
+ keys = [(c, np.polyval(orders[c], xp), y[c].min(), y[c].max()) for c in x.keys()]
761
+ keys = sorted(keys, key=cmp_to_key(compare))
762
+ key = [k[0] for k in keys]
763
+
764
+ n = np.arange(len(n), dtype=int)
765
+ x = {c: x[key[c]] for c in n}
766
+ y = {c: y[key[c]] for c in n}
767
+ orders = np.array([orders[key[c]] for c in n])
768
+
769
+ column_range = np.array([[np.min(y[i]), np.max(y[i]) + 1] for i in n])
770
+
771
+ if plot: # pragma: no cover
772
+ plot_orders(im, x, y, n, orders, column_range, title=plot_title)
773
+
774
+ return orders, column_range
775
+
776
+
777
+ def merge_traces(
778
+ traces_a,
779
+ column_range_a,
780
+ traces_b,
781
+ column_range_b,
782
+ order_centers=None,
783
+ order_numbers=None,
784
+ ncols=None,
785
+ ):
786
+ """
787
+ Merge two sets of traces from different illumination patterns.
788
+
789
+ Traces are assigned to spectral orders based on their y-position at x=ncols/2
790
+ compared to order_centers. Within each order, traces are sorted by y-position
791
+ and assigned fiber IDs.
792
+
793
+ Parameters
794
+ ----------
795
+ traces_a : array (n_traces_a, degree+1)
796
+ Polynomial coefficients from first illumination set (even fibers)
797
+ column_range_a : array (n_traces_a, 2)
798
+ Column ranges for first set
799
+ traces_b : array (n_traces_b, degree+1)
800
+ Polynomial coefficients from second illumination set (odd fibers)
801
+ column_range_b : array (n_traces_b, 2)
802
+ Column ranges for second set
803
+ order_centers : array-like, optional
804
+ Expected y-positions of order centers at x=ncols/2
805
+ order_numbers : array-like, optional
806
+ Actual order numbers corresponding to each center. If None, uses 0-based indices.
807
+ ncols : int, optional
808
+ Number of columns in the image (for center calculation)
809
+
810
+ Returns
811
+ -------
812
+ traces_by_order : dict
813
+ {order_num: array (n_fibers, degree+1)} traces per order
814
+ column_range_by_order : dict
815
+ {order_num: array (n_fibers, 2)} column ranges per order
816
+ fiber_ids_by_order : dict
817
+ {order_num: array (n_fibers,)} fiber indices per order (0-74)
818
+ """
819
+ if len(traces_a) == 0 and len(traces_b) == 0:
820
+ return {}, {}, {}
821
+
822
+ # Combine all traces
823
+ if len(traces_a) > 0 and len(traces_b) > 0:
824
+ traces = np.vstack([traces_a, traces_b])
825
+ column_range = np.vstack([column_range_a, column_range_b])
826
+ is_even = np.concatenate(
827
+ [np.ones(len(traces_a), dtype=bool), np.zeros(len(traces_b), dtype=bool)]
828
+ )
829
+ elif len(traces_a) > 0:
830
+ traces = traces_a
831
+ column_range = column_range_a
832
+ is_even = np.ones(len(traces_a), dtype=bool)
833
+ else:
834
+ traces = traces_b
835
+ column_range = column_range_b
836
+ is_even = np.zeros(len(traces_b), dtype=bool)
837
+
838
+ # Evaluate y-position at center column
839
+ if ncols is None:
840
+ ncols = int(np.max(column_range[:, 1]))
841
+ x_center = ncols // 2
842
+ y_positions = np.array([np.polyval(t, x_center) for t in traces])
843
+
844
+ # Assign each trace to nearest order center
845
+ if order_centers is None:
846
+ # No order centers - put all in order 0
847
+ order_ids = np.zeros(len(traces), dtype=int)
848
+ else:
849
+ order_centers = np.array(order_centers)
850
+ center_indices = np.array(
851
+ [np.argmin(np.abs(order_centers - y)) for y in y_positions]
852
+ )
853
+ if order_numbers is not None:
854
+ order_numbers = np.array(order_numbers)
855
+ order_ids = order_numbers[center_indices]
856
+ else:
857
+ order_ids = center_indices
858
+
859
+ # Group by order, sort by y within each order, assign fiber IDs
860
+ traces_by_order = {}
861
+ column_range_by_order = {}
862
+ fiber_ids_by_order = {}
863
+
864
+ for order_idx in np.unique(order_ids):
865
+ mask = order_ids == order_idx
866
+ order_traces = traces[mask]
867
+ order_cr = column_range[mask]
868
+ order_y = y_positions[mask]
869
+ order_is_even = is_even[mask]
870
+
871
+ # Sort by y-position within this order
872
+ sort_idx = np.argsort(order_y)
873
+ order_traces = order_traces[sort_idx]
874
+ order_cr = order_cr[sort_idx]
875
+ order_is_even = order_is_even[sort_idx]
876
+
877
+ # Assign fiber IDs: even fibers get 1,3,5,... odd get 2,4,6,...
878
+ fiber_ids = np.zeros(len(order_traces), dtype=int)
879
+ even_count = 0
880
+ odd_count = 0
881
+ for i, is_e in enumerate(order_is_even):
882
+ if is_e:
883
+ fiber_ids[i] = even_count * 2 + 1
884
+ even_count += 1
885
+ else:
886
+ fiber_ids[i] = odd_count * 2 + 2
887
+ odd_count += 1
888
+
889
+ traces_by_order[order_idx] = order_traces
890
+ column_range_by_order[order_idx] = order_cr
891
+ fiber_ids_by_order[order_idx] = fiber_ids
892
+
893
+ return traces_by_order, column_range_by_order, fiber_ids_by_order
894
+
895
+
896
+ def group_and_refit(
897
+ traces_by_order, column_range_by_order, fiber_ids_by_order, groups, degree=4
898
+ ):
899
+ """
900
+ Group physical fiber traces into logical fibers and refit polynomials.
901
+
902
+ For each spectral order and each fiber group, evaluates all member
903
+ polynomials at each column, averages the y-positions, and fits a new
904
+ polynomial.
905
+
906
+ Parameters
907
+ ----------
908
+ traces_by_order : dict
909
+ {order_idx: array (n_fibers, degree+1)} traces per order
910
+ column_range_by_order : dict
911
+ {order_idx: array (n_fibers, 2)} column ranges per order
912
+ fiber_ids_by_order : dict
913
+ {order_idx: array (n_fibers,)} fiber IDs per order (0-74)
914
+ groups : dict
915
+ Mapping of group name to fiber index range, e.g.:
916
+ {'A': (0, 36), 'cal': (36, 38), 'B': (38, 75)}
917
+ degree : int
918
+ Polynomial degree for refitted traces
919
+
920
+ Returns
921
+ -------
922
+ logical_traces : dict
923
+ {group_name: array (n_orders, degree+1)} polynomials per group
924
+ logical_column_range : array (n_orders, 2)
925
+ Column range per order
926
+ fiber_counts : dict
927
+ {group_name: dict {order_idx: int}} fiber counts per order
928
+ """
929
+ from numpy.polynomial.polynomial import Polynomial
930
+
931
+ order_indices = sorted(traces_by_order.keys())
932
+
933
+ logical_traces = {name: [] for name in groups.keys()}
934
+ logical_column_range = []
935
+ fiber_counts = {name: {} for name in groups.keys()}
936
+
937
+ for order_idx in order_indices:
938
+ traces = traces_by_order[order_idx]
939
+ column_range = column_range_by_order[order_idx]
940
+ fiber_ids = fiber_ids_by_order[order_idx]
941
+
942
+ # Find shared column range for this order
943
+ col_min = np.max(column_range[:, 0])
944
+ col_max = np.min(column_range[:, 1])
945
+ x_eval = np.arange(col_min, col_max)
946
+ logical_column_range.append([col_min, col_max])
947
+
948
+ for group_name, (start, end) in groups.items():
949
+ # Find traces belonging to this group
950
+ mask = (fiber_ids >= start) & (fiber_ids < end)
951
+ group_traces = traces[mask]
952
+
953
+ if len(group_traces) == 0:
954
+ logger.warning(
955
+ "No traces for group %s in order %d", group_name, order_idx
956
+ )
957
+ # Use NaN coefficients for missing groups
958
+ logical_traces[group_name].append(np.full(degree + 1, np.nan))
959
+ fiber_counts[group_name][order_idx] = 0
960
+ continue
961
+
962
+ # Evaluate all traces at each column and average
963
+ y_values = np.array([np.polyval(t, x_eval) for t in group_traces])
964
+ y_mean = np.mean(y_values, axis=0)
965
+
966
+ # Fit new polynomial to averaged positions
967
+ fit = Polynomial.fit(x_eval, y_mean, deg=degree, domain=[])
968
+ coeffs = fit.coef[::-1] # Convert to np.polyval order
969
+
970
+ logical_traces[group_name].append(coeffs)
971
+ fiber_counts[group_name][order_idx] = len(group_traces)
972
+
973
+ # Convert lists to arrays
974
+ for name in groups.keys():
975
+ logical_traces[name] = np.array(logical_traces[name])
976
+
977
+ logical_column_range = np.array(logical_column_range)
978
+
979
+ return logical_traces, logical_column_range, fiber_counts