pyreduce-astro 0.6.0b5__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (170) hide show
  1. pyreduce/__init__.py +67 -0
  2. pyreduce/__main__.py +106 -0
  3. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.exp +0 -0
  4. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.lib +0 -0
  5. pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.exp +0 -0
  6. pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.lib +0 -0
  7. pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.exp +0 -0
  8. pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.lib +0 -0
  9. pyreduce/clib/Release/_slitfunc_2d.obj +0 -0
  10. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.exp +0 -0
  11. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.lib +0 -0
  12. pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.exp +0 -0
  13. pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.lib +0 -0
  14. pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.exp +0 -0
  15. pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.lib +0 -0
  16. pyreduce/clib/Release/_slitfunc_bd.obj +0 -0
  17. pyreduce/clib/__init__.py +0 -0
  18. pyreduce/clib/_slitfunc_2d.cp311-win_amd64.pyd +0 -0
  19. pyreduce/clib/_slitfunc_2d.cp312-win_amd64.pyd +0 -0
  20. pyreduce/clib/_slitfunc_2d.cp313-win_amd64.pyd +0 -0
  21. pyreduce/clib/_slitfunc_bd.cp311-win_amd64.pyd +0 -0
  22. pyreduce/clib/_slitfunc_bd.cp312-win_amd64.pyd +0 -0
  23. pyreduce/clib/_slitfunc_bd.cp313-win_amd64.pyd +0 -0
  24. pyreduce/clib/build_extract.py +75 -0
  25. pyreduce/clib/slit_func_2d_xi_zeta_bd.c +1313 -0
  26. pyreduce/clib/slit_func_2d_xi_zeta_bd.h +55 -0
  27. pyreduce/clib/slit_func_bd.c +362 -0
  28. pyreduce/clib/slit_func_bd.h +17 -0
  29. pyreduce/clipnflip.py +147 -0
  30. pyreduce/combine_frames.py +855 -0
  31. pyreduce/configuration.py +186 -0
  32. pyreduce/continuum_normalization.py +329 -0
  33. pyreduce/cwrappers.py +404 -0
  34. pyreduce/datasets.py +231 -0
  35. pyreduce/echelle.py +413 -0
  36. pyreduce/estimate_background_scatter.py +129 -0
  37. pyreduce/extract.py +1361 -0
  38. pyreduce/extraction_width.py +77 -0
  39. pyreduce/instruments/__init__.py +0 -0
  40. pyreduce/instruments/andes.json +61 -0
  41. pyreduce/instruments/andes.py +102 -0
  42. pyreduce/instruments/common.json +46 -0
  43. pyreduce/instruments/common.py +675 -0
  44. pyreduce/instruments/crires_plus.json +63 -0
  45. pyreduce/instruments/crires_plus.py +103 -0
  46. pyreduce/instruments/filters.py +195 -0
  47. pyreduce/instruments/harpn.json +136 -0
  48. pyreduce/instruments/harpn.py +201 -0
  49. pyreduce/instruments/harps.json +155 -0
  50. pyreduce/instruments/harps.py +310 -0
  51. pyreduce/instruments/instrument_info.py +140 -0
  52. pyreduce/instruments/instrument_schema.json +221 -0
  53. pyreduce/instruments/jwst_miri.json +53 -0
  54. pyreduce/instruments/jwst_miri.py +29 -0
  55. pyreduce/instruments/jwst_niriss.json +52 -0
  56. pyreduce/instruments/jwst_niriss.py +98 -0
  57. pyreduce/instruments/lick_apf.json +53 -0
  58. pyreduce/instruments/lick_apf.py +35 -0
  59. pyreduce/instruments/mcdonald.json +59 -0
  60. pyreduce/instruments/mcdonald.py +123 -0
  61. pyreduce/instruments/metis_ifu.json +63 -0
  62. pyreduce/instruments/metis_ifu.py +45 -0
  63. pyreduce/instruments/metis_lss.json +65 -0
  64. pyreduce/instruments/metis_lss.py +45 -0
  65. pyreduce/instruments/micado.json +53 -0
  66. pyreduce/instruments/micado.py +45 -0
  67. pyreduce/instruments/neid.json +51 -0
  68. pyreduce/instruments/neid.py +154 -0
  69. pyreduce/instruments/nirspec.json +56 -0
  70. pyreduce/instruments/nirspec.py +215 -0
  71. pyreduce/instruments/nte.json +47 -0
  72. pyreduce/instruments/nte.py +42 -0
  73. pyreduce/instruments/uves.json +59 -0
  74. pyreduce/instruments/uves.py +46 -0
  75. pyreduce/instruments/xshooter.json +66 -0
  76. pyreduce/instruments/xshooter.py +39 -0
  77. pyreduce/make_shear.py +606 -0
  78. pyreduce/masks/mask_crires_plus_det1.fits.gz +0 -0
  79. pyreduce/masks/mask_crires_plus_det2.fits.gz +0 -0
  80. pyreduce/masks/mask_crires_plus_det3.fits.gz +0 -0
  81. pyreduce/masks/mask_ctio_chiron.fits.gz +0 -0
  82. pyreduce/masks/mask_elodie.fits.gz +0 -0
  83. pyreduce/masks/mask_feros3.fits.gz +0 -0
  84. pyreduce/masks/mask_flames_giraffe.fits.gz +0 -0
  85. pyreduce/masks/mask_harps_blue.fits.gz +0 -0
  86. pyreduce/masks/mask_harps_red.fits.gz +0 -0
  87. pyreduce/masks/mask_hds_blue.fits.gz +0 -0
  88. pyreduce/masks/mask_hds_red.fits.gz +0 -0
  89. pyreduce/masks/mask_het_hrs_2x5.fits.gz +0 -0
  90. pyreduce/masks/mask_jwst_miri_lrs_slitless.fits.gz +0 -0
  91. pyreduce/masks/mask_jwst_niriss_gr700xd.fits.gz +0 -0
  92. pyreduce/masks/mask_lick_apf_.fits.gz +0 -0
  93. pyreduce/masks/mask_mcdonald.fits.gz +0 -0
  94. pyreduce/masks/mask_nes.fits.gz +0 -0
  95. pyreduce/masks/mask_nirspec_nirspec.fits.gz +0 -0
  96. pyreduce/masks/mask_sarg.fits.gz +0 -0
  97. pyreduce/masks/mask_sarg_2x2a.fits.gz +0 -0
  98. pyreduce/masks/mask_sarg_2x2b.fits.gz +0 -0
  99. pyreduce/masks/mask_subaru_hds_red.fits.gz +0 -0
  100. pyreduce/masks/mask_uves_blue.fits.gz +0 -0
  101. pyreduce/masks/mask_uves_blue_binned_2_2.fits.gz +0 -0
  102. pyreduce/masks/mask_uves_middle.fits.gz +0 -0
  103. pyreduce/masks/mask_uves_middle_2x2_split.fits.gz +0 -0
  104. pyreduce/masks/mask_uves_middle_binned_2_2.fits.gz +0 -0
  105. pyreduce/masks/mask_uves_red.fits.gz +0 -0
  106. pyreduce/masks/mask_uves_red_2x2.fits.gz +0 -0
  107. pyreduce/masks/mask_uves_red_2x2_split.fits.gz +0 -0
  108. pyreduce/masks/mask_uves_red_binned_2_2.fits.gz +0 -0
  109. pyreduce/masks/mask_xshooter_nir.fits.gz +0 -0
  110. pyreduce/rectify.py +138 -0
  111. pyreduce/reduce.py +2205 -0
  112. pyreduce/settings/settings_ANDES.json +89 -0
  113. pyreduce/settings/settings_CRIRES_PLUS.json +89 -0
  114. pyreduce/settings/settings_HARPN.json +73 -0
  115. pyreduce/settings/settings_HARPS.json +69 -0
  116. pyreduce/settings/settings_JWST_MIRI.json +55 -0
  117. pyreduce/settings/settings_JWST_NIRISS.json +55 -0
  118. pyreduce/settings/settings_LICK_APF.json +62 -0
  119. pyreduce/settings/settings_MCDONALD.json +58 -0
  120. pyreduce/settings/settings_METIS_IFU.json +77 -0
  121. pyreduce/settings/settings_METIS_LSS.json +77 -0
  122. pyreduce/settings/settings_MICADO.json +78 -0
  123. pyreduce/settings/settings_NEID.json +73 -0
  124. pyreduce/settings/settings_NIRSPEC.json +58 -0
  125. pyreduce/settings/settings_NTE.json +60 -0
  126. pyreduce/settings/settings_UVES.json +54 -0
  127. pyreduce/settings/settings_XSHOOTER.json +78 -0
  128. pyreduce/settings/settings_pyreduce.json +178 -0
  129. pyreduce/settings/settings_schema.json +827 -0
  130. pyreduce/tools/__init__.py +0 -0
  131. pyreduce/tools/combine.py +117 -0
  132. pyreduce/trace_orders.py +645 -0
  133. pyreduce/util.py +1288 -0
  134. pyreduce/wavecal/MICADO_HK_3arcsec_chip5.npz +0 -0
  135. pyreduce/wavecal/atlas/thar.fits +4946 -13
  136. pyreduce/wavecal/atlas/thar_list.txt +4172 -0
  137. pyreduce/wavecal/atlas/une.fits +0 -0
  138. pyreduce/wavecal/convert.py +38 -0
  139. pyreduce/wavecal/crires_plus_J1228_Open_det1.npz +0 -0
  140. pyreduce/wavecal/crires_plus_J1228_Open_det2.npz +0 -0
  141. pyreduce/wavecal/crires_plus_J1228_Open_det3.npz +0 -0
  142. pyreduce/wavecal/harpn_harpn_2D.npz +0 -0
  143. pyreduce/wavecal/harps_blue_2D.npz +0 -0
  144. pyreduce/wavecal/harps_blue_pol_2D.npz +0 -0
  145. pyreduce/wavecal/harps_red_2D.npz +0 -0
  146. pyreduce/wavecal/harps_red_pol_2D.npz +0 -0
  147. pyreduce/wavecal/mcdonald.npz +0 -0
  148. pyreduce/wavecal/metis_lss_l_2D.npz +0 -0
  149. pyreduce/wavecal/metis_lss_m_2D.npz +0 -0
  150. pyreduce/wavecal/nirspec_K2.npz +0 -0
  151. pyreduce/wavecal/uves_blue_360nm_2D.npz +0 -0
  152. pyreduce/wavecal/uves_blue_390nm_2D.npz +0 -0
  153. pyreduce/wavecal/uves_blue_437nm_2D.npz +0 -0
  154. pyreduce/wavecal/uves_middle_2x2_2D.npz +0 -0
  155. pyreduce/wavecal/uves_middle_565nm_2D.npz +0 -0
  156. pyreduce/wavecal/uves_middle_580nm_2D.npz +0 -0
  157. pyreduce/wavecal/uves_middle_600nm_2D.npz +0 -0
  158. pyreduce/wavecal/uves_middle_665nm_2D.npz +0 -0
  159. pyreduce/wavecal/uves_middle_860nm_2D.npz +0 -0
  160. pyreduce/wavecal/uves_red_580nm_2D.npz +0 -0
  161. pyreduce/wavecal/uves_red_600nm_2D.npz +0 -0
  162. pyreduce/wavecal/uves_red_665nm_2D.npz +0 -0
  163. pyreduce/wavecal/uves_red_760nm_2D.npz +0 -0
  164. pyreduce/wavecal/uves_red_860nm_2D.npz +0 -0
  165. pyreduce/wavecal/xshooter_nir.npz +0 -0
  166. pyreduce/wavelength_calibration.py +1873 -0
  167. pyreduce_astro-0.6.0b5.dist-info/METADATA +113 -0
  168. pyreduce_astro-0.6.0b5.dist-info/RECORD +170 -0
  169. pyreduce_astro-0.6.0b5.dist-info/WHEEL +4 -0
  170. pyreduce_astro-0.6.0b5.dist-info/licenses/LICENSE +674 -0
@@ -0,0 +1,645 @@
1
+ """
2
+ Find clusters of pixels with signal
3
+ And combine them into continous orders
4
+ """
5
+
6
+ import logging
7
+ from functools import cmp_to_key
8
+ from itertools import combinations
9
+
10
+ import matplotlib.pyplot as plt
11
+ import numpy as np
12
+ from numpy.polynomial.polynomial import Polynomial
13
+ from scipy.ndimage import binary_closing, binary_opening, grey_closing, label
14
+ from scipy.ndimage.filters import gaussian_filter1d, median_filter
15
+ from scipy.signal import find_peaks, peak_widths
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ def fit(x, y, deg, regularization=0):
21
+ # order = polyfit1d(y, x, deg, regularization)
22
+ if deg == "best":
23
+ order = best_fit(x, y)
24
+ else:
25
+ order = Polynomial.fit(y, x, deg=deg, domain=[]).coef[::-1]
26
+ return order
27
+
28
+
29
+ def best_fit(x, y):
30
+ aic = np.inf
31
+ for k in range(5):
32
+ coeff_new = fit(x, y, k)
33
+ chisq = np.sum((np.polyval(coeff_new, y) - x) ** 2)
34
+ aic_new = 2 * k + chisq
35
+ if aic_new > aic:
36
+ break
37
+ else:
38
+ coeff = coeff_new
39
+ aic = aic_new
40
+ return coeff
41
+
42
+
43
+ def determine_overlap_rating(xi, yi, xj, yj, mean_cluster_thickness, nrow, ncol, deg=2):
44
+ # i and j are the indices of the 2 clusters
45
+ i_left, i_right = yi.min(), yi.max()
46
+ j_left, j_right = yj.min(), yj.max()
47
+
48
+ # The number of pixels in the smaller cluster
49
+ # this limits the accuracy of the fit
50
+ n_min = min(i_right - i_left, j_right - j_left)
51
+
52
+ # Fit a polynomial to each cluster
53
+ order_i = fit(xi, yi, deg)
54
+ order_j = fit(xj, yj, deg)
55
+
56
+ # Get polynomial points inside cluster limits for each cluster and polynomial
57
+ y_ii = np.polyval(order_i, np.arange(i_left, i_right))
58
+ y_ij = np.polyval(order_i, np.arange(j_left, j_right))
59
+ y_jj = np.polyval(order_j, np.arange(j_left, j_right))
60
+ y_ji = np.polyval(order_j, np.arange(i_left, i_right))
61
+
62
+ # difference of polynomials within each cluster limit
63
+ diff_i = np.abs(y_ii - y_ji)
64
+ diff_j = np.abs(y_ij - y_jj)
65
+
66
+ ind_i = np.where((diff_i < mean_cluster_thickness) & (y_ji >= 0) & (y_ji < nrow))
67
+ ind_j = np.where((diff_j < mean_cluster_thickness) & (y_ij >= 0) & (y_ij < nrow))
68
+
69
+ # TODO: There should probably be some kind of normaliztion, that scales with the size of the cluster?
70
+ # or possibly only use the closest pixels to determine overlap, since the polynomial is badly constrained outside of the bounds.
71
+ overlap = min(n_min, len(ind_i[0])) + min(n_min, len(ind_j[0]))
72
+ # overlap = overlap / ((i_right - i_left) + (j_right - j_left))
73
+ overlap /= 2 * n_min
74
+ if i_right < j_left:
75
+ overlap *= 1 - (i_right - j_left) / ncol
76
+ elif j_right < i_left:
77
+ overlap *= 1 - (j_right - i_left) / ncol
78
+
79
+ overlap_region = [-1, -1]
80
+ if len(ind_i[0]) > 0:
81
+ overlap_region[0] = np.min(ind_i[0]) + i_left
82
+ if len(ind_j[0]) > 0:
83
+ overlap_region[1] = np.max(ind_j[0]) + j_left
84
+
85
+ return overlap, overlap_region
86
+
87
+
88
+ def create_merge_array(x, y, mean_cluster_thickness, nrow, ncol, deg, threshold):
89
+ n_clusters = list(x.keys())
90
+ nmax = len(n_clusters) ** 2
91
+ merge = np.zeros((nmax, 5))
92
+ for k, (i, j) in enumerate(combinations(n_clusters, 2)):
93
+ overlap, region = determine_overlap_rating(
94
+ x[i], y[i], x[j], y[j], mean_cluster_thickness, nrow, ncol, deg=deg
95
+ )
96
+ merge[k] = [i, j, overlap, *region]
97
+ merge = merge[merge[:, 2] > threshold]
98
+ merge = merge[np.argsort(merge[:, 2])[::-1]]
99
+ return merge
100
+
101
+
102
+ def update_merge_array(
103
+ merge, x, y, j, mean_cluster_thickness, nrow, ncol, deg, threshold
104
+ ):
105
+ j = int(j)
106
+ n_clusters = np.array(list(x.keys()))
107
+ update = []
108
+ for i in n_clusters[n_clusters != j]:
109
+ overlap, region = determine_overlap_rating(
110
+ x[i], y[i], x[j], y[j], mean_cluster_thickness, nrow, ncol, deg=deg
111
+ )
112
+ if overlap <= threshold:
113
+ # no , or little overlap
114
+ continue
115
+ update += [[i, j, overlap, *region]]
116
+ if len(update) == 0:
117
+ return merge
118
+ update = np.array(update)
119
+ merge = np.concatenate((merge, update))
120
+ merge = merge[np.argsort(merge[:, 2])[::-1]]
121
+ return merge
122
+
123
+
124
+ def calculate_mean_cluster_thickness(x, y):
125
+ mean_cluster_thickness = 10 # Default thickness if no clusters found
126
+ cluster_thicknesses = []
127
+
128
+ for cluster in x.keys():
129
+ if cluster == 0:
130
+ continue # Skip the background cluster if present
131
+
132
+ # Get all y-coordinates and corresponding x-coordinates for this cluster
133
+ y_coords = y[cluster]
134
+ x_coords = x[cluster]
135
+
136
+ # Find unique columns and precompute the x-coordinates for each column
137
+ unique_columns = np.unique(y_coords)
138
+ column_thicknesses = []
139
+
140
+ for col in unique_columns:
141
+ # Select x-coordinates that correspond to the current column
142
+ col_indices = y_coords == col
143
+ if np.any(col_indices):
144
+ x_in_col = x_coords[col_indices]
145
+ thickness = x_in_col.max() - x_in_col.min()
146
+ column_thicknesses.append(thickness)
147
+
148
+ # Average thickness per cluster, if any columns were processed
149
+ if column_thicknesses:
150
+ cluster_thicknesses.append(np.mean(column_thicknesses))
151
+
152
+ # Compute the final mean thickness adjusted by the number of clusters
153
+ if cluster_thicknesses:
154
+ mean_cluster_thickness = (
155
+ 1.5 * np.mean(cluster_thicknesses) / len(cluster_thicknesses)
156
+ )
157
+
158
+ return mean_cluster_thickness
159
+
160
+
161
+ # origianl version
162
+ # def calculate_mean_cluster_thickness(x, y):
163
+ # # Calculate mean cluster thickness
164
+ # # TODO optimize
165
+ # n_clusters = list(x.keys())
166
+ # mean_cluster_thickness = 10
167
+ # for cluster in n_clusters:
168
+ # # individual columns of this cluster
169
+ # columns = np.unique(y[cluster])
170
+ # delta = 0
171
+ # for col in columns:
172
+ # # thickness of the cluster in each column
173
+ # tmp = x[cluster][y[cluster] == col]
174
+ # delta += np.max(tmp) - np.min(tmp)
175
+ # mean_cluster_thickness += delta / len(columns)
176
+
177
+ # mean_cluster_thickness *= 1.5 / len(n_clusters)
178
+ # return mean_cluster_thickness
179
+
180
+
181
+ def delete(i, x, y, merge):
182
+ del x[i], y[i]
183
+ merge = merge[(merge[:, 0] != i) & (merge[:, 1] != i)]
184
+ return x, y, merge
185
+
186
+
187
+ def combine(i, j, x, y, merge, mct, nrow, ncol, deg, threshold):
188
+ # Merge pixels
189
+ y[j] = np.concatenate((y[j], y[i]))
190
+ x[j] = np.concatenate((x[j], x[i]))
191
+ # Delete obsolete data
192
+ x, y, merge = delete(i, x, y, merge)
193
+ merge = merge[(merge[:, 0] != j) & (merge[:, 1] != j)]
194
+ # Update merge array
195
+ merge = update_merge_array(merge, x, y, j, mct, nrow, ncol, deg, threshold)
196
+ return x, y, merge
197
+
198
+
199
+ def merge_clusters(
200
+ img,
201
+ x,
202
+ y,
203
+ n_clusters,
204
+ manual=True,
205
+ deg=2,
206
+ auto_merge_threshold=0.9,
207
+ merge_min_threshold=0.1,
208
+ plot_title=None,
209
+ ):
210
+ """Merge clusters that belong together
211
+
212
+ Parameters
213
+ ----------
214
+ img : array[nrow, ncol]
215
+ the image the order trace is based on
216
+ orders : dict(int, array(float))
217
+ coefficients of polynomial fits to clusters
218
+ x : dict(int, array(int))
219
+ x coordinates of cluster points
220
+ y : dict(int, array(int))
221
+ y coordinates of cluster points
222
+ n_clusters : array(int)
223
+ cluster numbers
224
+ threshold : int, optional
225
+ overlap threshold for merging clusters (the default is 100)
226
+ manual : bool, optional
227
+ if True ask before merging orders
228
+
229
+ Returns
230
+ -------
231
+ x : dict(int: array)
232
+ x coordinates of clusters, key=cluster id
233
+ y : dict(int: array)
234
+ y coordinates of clusters, key=cluster id
235
+ n_clusters : int
236
+ number of identified clusters
237
+ """
238
+
239
+ nrow, ncol = img.shape
240
+ mct = calculate_mean_cluster_thickness(x, y)
241
+
242
+ merge = create_merge_array(x, y, mct, nrow, ncol, deg, merge_min_threshold)
243
+
244
+ if manual:
245
+ plt.ion()
246
+
247
+ k = 0
248
+ while k < len(merge):
249
+ i, j, overlap, _, _ = merge[k]
250
+ i, j = int(i), int(j)
251
+
252
+ if overlap >= auto_merge_threshold and auto_merge_threshold != 1:
253
+ answer = "y"
254
+ elif manual:
255
+ title = f"Probability: {overlap}"
256
+ if plot_title is not None:
257
+ title = f"{plot_title}\n{title}"
258
+ plot_order(i, j, x, y, img, deg, title=title)
259
+ while True:
260
+ if manual:
261
+ answer = input("Merge? [y/n]")
262
+ if answer in "ynrg":
263
+ break
264
+ else:
265
+ answer = "n"
266
+
267
+ if answer == "y":
268
+ # just merge automatically
269
+ logger.info("Merging orders %i and %i", i, j)
270
+ x, y, merge = combine(
271
+ i, j, x, y, merge, mct, nrow, ncol, deg, merge_min_threshold
272
+ )
273
+ elif answer == "n":
274
+ k += 1
275
+ elif answer == "r":
276
+ x, y, merge = delete(i, x, y, merge)
277
+ elif answer == "g":
278
+ x, y, merge = delete(j, x, y, merge)
279
+
280
+ if manual:
281
+ plt.close()
282
+ plt.ioff()
283
+
284
+ n_clusters = list(x.keys())
285
+ return x, y, n_clusters
286
+
287
+
288
+ def fit_polynomials_to_clusters(x, y, clusters, degree, regularization=0):
289
+ """Fits a polynomial of degree opower to points x, y in cluster clusters
290
+
291
+ Parameters
292
+ ----------
293
+ x : dict(int: array)
294
+ x coordinates seperated by cluster
295
+ y : dict(int: array)
296
+ y coordinates seperated by cluster
297
+ clusters : list(int)
298
+ cluster labels, equivalent to x.keys() or y.keys()
299
+ degree : int
300
+ degree of polynomial fit
301
+ Returns
302
+ -------
303
+ orders : dict(int, array[degree+1])
304
+ coefficients of polynomial fit for each cluster
305
+ """
306
+
307
+ orders = {c: fit(x[c], y[c], degree, regularization) for c in clusters}
308
+ return orders
309
+
310
+
311
+ def plot_orders(im, x, y, clusters, orders, order_range, title=None):
312
+ """Plot orders and image"""
313
+
314
+ cluster_img = np.zeros(im.shape, dtype=im.dtype)
315
+ for c in clusters:
316
+ cluster_img[x[c], y[c]] = c + 1
317
+ cluster_img = np.ma.masked_array(cluster_img, mask=cluster_img == 0)
318
+
319
+ plt.subplot(121)
320
+ bot, top = np.percentile(im, (1, 99))
321
+ plt.imshow(im, origin="lower", vmin=bot, vmax=top)
322
+ plt.title("Input Image + Order polynomials")
323
+ plt.xlabel("x [pixel]")
324
+ plt.ylabel("y [pixel]")
325
+ plt.ylim([0, im.shape[0]])
326
+
327
+ if orders is not None:
328
+ for i, order in enumerate(orders):
329
+ x = np.arange(*order_range[i], 1)
330
+ y = np.polyval(order, x)
331
+ plt.plot(x, y)
332
+
333
+ plt.subplot(122)
334
+ plt.imshow(cluster_img, cmap=plt.get_cmap("tab20"), origin="upper")
335
+ plt.title("Detected Clusters + Order Polynomials")
336
+ plt.xlabel("x [pixel]")
337
+ plt.ylabel("y [pixel]")
338
+
339
+ if orders is not None:
340
+ for i, order in enumerate(orders):
341
+ x = np.arange(*order_range[i], 1)
342
+ y = np.polyval(order, x)
343
+ plt.plot(x, y)
344
+
345
+ plt.ylim([0, im.shape[0]])
346
+ if title is not None:
347
+ plt.suptitle(title)
348
+ plt.show()
349
+
350
+
351
+ def plot_order(i, j, x, y, img, deg, title=""):
352
+ """Plot a single order"""
353
+ _, ncol = img.shape
354
+
355
+ order_i = fit(x[i], y[i], deg)
356
+ order_j = fit(x[j], y[j], deg)
357
+
358
+ xp = np.arange(ncol)
359
+ yi = np.polyval(order_i, xp)
360
+ yj = np.polyval(order_j, xp)
361
+
362
+ xmin = min(np.min(x[i]), np.min(x[j])) - 50
363
+ xmax = max(np.max(x[i]), np.max(x[j])) + 50
364
+ ymin = min(np.min(y[i]), np.min(y[j])) - 50
365
+ ymax = max(np.max(y[i]), np.max(y[j])) + 50
366
+
367
+ yymin = min(max(0, ymin), img.shape[0] - 2)
368
+ yymax = min(ymax, img.shape[0] - 1)
369
+ xxmin = min(max(0, xmin), img.shape[1] - 2)
370
+ xxmax = min(xmax, img.shape[1] - 1)
371
+
372
+ vmin, vmax = np.percentile(img[yymin:yymax, xxmin:xxmax], (5, 95))
373
+
374
+ plt.clf()
375
+ plt.title(title)
376
+ plt.imshow(img, vmin=vmin, vmax=vmax)
377
+ plt.plot(xp, yi, "r")
378
+ plt.plot(xp, yj, "g")
379
+ plt.plot(y[i], x[i], "r.")
380
+ plt.plot(y[j], x[j], "g.")
381
+ plt.xlim([ymin, ymax])
382
+ plt.ylim([xmin, xmax])
383
+ plt.show()
384
+
385
+
386
+ def mark_orders(
387
+ im,
388
+ min_cluster=None,
389
+ min_width=None,
390
+ filter_size=None,
391
+ noise=None,
392
+ opower=4,
393
+ border_width=None,
394
+ degree_before_merge=2,
395
+ regularization=0,
396
+ closing_shape=(5, 5),
397
+ opening_shape=(2, 2),
398
+ plot=False,
399
+ plot_title=None,
400
+ manual=True,
401
+ auto_merge_threshold=0.9,
402
+ merge_min_threshold=0.1,
403
+ sigma=0,
404
+ ):
405
+ """Identify and trace orders
406
+
407
+ Parameters
408
+ ----------
409
+ im : array[nrow, ncol]
410
+ order definition image
411
+ min_cluster : int, optional
412
+ minimum cluster size in pixels (default: 500)
413
+ filter_size : int, optional
414
+ size of the running filter (default: 120)
415
+ noise : float, optional
416
+ noise to filter out (default: 8)
417
+ opower : int, optional
418
+ polynomial degree of the order fit (default: 4)
419
+ border_width : int, optional
420
+ number of pixels at the bottom and top borders of the image to ignore for order tracing (default: 5)
421
+ plot : bool, optional
422
+ wether to plot the final order fits (default: False)
423
+ manual : bool, optional
424
+ wether to manually select clusters to merge (strongly recommended) (default: True)
425
+
426
+ Returns
427
+ -------
428
+ orders : array[nord, opower+1]
429
+ order tracing coefficients (in numpy order, i.e. largest exponent first)
430
+ """
431
+
432
+ # Convert to signed integer, to avoid underflow problems
433
+ im = np.asanyarray(im)
434
+ im = im.astype(int)
435
+
436
+ if filter_size is None:
437
+ col = im[:, im.shape[0] // 2]
438
+ col = median_filter(col, 5)
439
+ threshold = np.percentile(col, 90)
440
+ npeaks = find_peaks(col, height=threshold)[0].size
441
+ filter_size = im.shape[0] // (npeaks * 2)
442
+ logger.info("Median filter size, estimated: %i", filter_size)
443
+ elif filter_size <= 0:
444
+ raise ValueError(f"Expected filter size > 0, but got {filter_size}")
445
+
446
+ if border_width is None:
447
+ # find width of orders, based on central column
448
+ col = im[:, im.shape[0] // 2]
449
+ col = median_filter(col, 5)
450
+ idx = np.argmax(col)
451
+ width = peak_widths(col, [idx])[0][0]
452
+ border_width = int(np.ceil(width))
453
+ logger.info("Image border width, estimated: %i", border_width)
454
+ elif border_width < 0:
455
+ raise ValueError(f"Expected border width > 0, but got {border_width}")
456
+
457
+ if min_cluster is None:
458
+ min_cluster = im.shape[1] // 4
459
+ logger.info("Minimum cluster size, estimated: %i", min_cluster)
460
+ elif not np.isscalar(min_cluster):
461
+ raise TypeError(f"Expected scalar minimum cluster size, but got {min_cluster}")
462
+
463
+ if min_width is None:
464
+ min_width = 0.25
465
+ if min_width == 0:
466
+ pass
467
+ elif isinstance(min_width, (float, np.floating)):
468
+ min_width = int(min_width * im.shape[0])
469
+ logger.info("Minimum order width, estimated: %i", min_width)
470
+
471
+ # im[im < 0] = np.ma.masked
472
+ blurred = np.ma.filled(im, fill_value=0)
473
+ blurred = grey_closing(blurred, 5)
474
+ # blur image along columns, and use the median + blurred + noise as threshold
475
+ blurred = gaussian_filter1d(blurred, filter_size, axis=0)
476
+
477
+ if noise is None:
478
+ tmp = np.abs(blurred.flatten())
479
+ noise = np.percentile(tmp, 5)
480
+ logger.info("Background noise, estimated: %f", noise)
481
+ elif not np.isscalar(noise):
482
+ raise TypeError(f"Expected scalar noise level, but got {noise}")
483
+
484
+ mask = im > blurred + noise
485
+ # remove borders
486
+ if border_width != 0:
487
+ mask[:border_width, :] = mask[-border_width:, :] = False
488
+ mask[:, :border_width] = mask[:, -border_width:] = False
489
+ # remove masked areas with no clusters
490
+ mask = np.ma.filled(mask, fill_value=False)
491
+ # close gaps inbetween clusters
492
+ struct = np.full(closing_shape, 1)
493
+ mask = binary_closing(mask, struct, border_value=1)
494
+ # remove small lonely clusters
495
+ struct = np.full(opening_shape, 1)
496
+ # struct = generate_binary_structure(2, 1)
497
+ mask = binary_opening(mask, struct)
498
+
499
+ # label clusters
500
+ clusters, _ = label(mask)
501
+
502
+ # remove small clusters
503
+ sizes = np.bincount(clusters.ravel())
504
+ mask_sizes = sizes > min_cluster
505
+ mask_sizes[0] = True # This is the background, which we don't need to remove
506
+ clusters[~mask_sizes[clusters]] = 0
507
+
508
+ # # Reorganize x, y, clusters into a more convenient "pythonic" format
509
+ # # x, y become dictionaries, with an entry for each order
510
+ # # n is just a list of all orders (ignore cluster == 0)
511
+ n = np.unique(clusters)
512
+ n = n[n != 0]
513
+ x = {i: np.where(clusters == c)[0] for i, c in enumerate(n)}
514
+ y = {i: np.where(clusters == c)[1] for i, c in enumerate(n)}
515
+
516
+ def best_fit_degree(x, y):
517
+ L1 = np.sum((np.polyval(np.polyfit(y, x, 1), y) - x) ** 2)
518
+ L2 = np.sum((np.polyval(np.polyfit(y, x, 2), y) - x) ** 2)
519
+
520
+ # aic1 = 2 + 2 * np.log(L1) + 4 / (x.size - 2)
521
+ # aic2 = 4 + 2 * np.log(L2) + 12 / (x.size - 3)
522
+
523
+ if L1 < L2:
524
+ return 1
525
+ else:
526
+ return 2
527
+
528
+ if sigma > 0:
529
+ degree = {i: best_fit_degree(x[i], y[i]) for i in x.keys()}
530
+ bias = {i: np.polyfit(y[i], x[i], deg=degree[i])[-1] for i in x.keys()}
531
+ n = list(x.keys())
532
+ yt = np.concatenate([y[i] for i in n])
533
+ xt = np.concatenate([x[i] - bias[i] for i in n])
534
+ coef = np.polyfit(yt, xt, deg=degree_before_merge)
535
+
536
+ res = np.polyval(coef, yt)
537
+ cutoff = sigma * (res - xt).std()
538
+
539
+ # DEBUG plot
540
+ # uy = np.unique(yt)
541
+ # mask = np.abs(res - xt) > cutoff
542
+ # plt.plot(yt, xt, ".")
543
+ # plt.plot(yt[mask], xt[mask], "r.")
544
+ # plt.plot(uy, np.polyval(coef, uy))
545
+ # plt.show()
546
+ #
547
+
548
+ m = {
549
+ i: np.abs(np.polyval(coef, y[i]) - (x[i] - bias[i])) < cutoff
550
+ for i in x.keys()
551
+ }
552
+
553
+ k = max(x.keys()) + 1
554
+ for i in range(1, k):
555
+ new_img = np.zeros(im.shape, dtype=int)
556
+ new_img[x[i][~m[i]], y[i][~m[i]]] = 1
557
+ clusters, _ = label(new_img)
558
+
559
+ x[i] = x[i][m[i]]
560
+ y[i] = y[i][m[i]]
561
+ if len(x[i]) == 0:
562
+ del x[i], y[i]
563
+
564
+ nnew = np.max(clusters)
565
+ if nnew != 0:
566
+ xidx, yidx = np.indices(im.shape)
567
+ for j in range(1, nnew + 1):
568
+ xn = xidx[clusters == j]
569
+ yn = yidx[clusters == j]
570
+ if xn.size >= min_cluster:
571
+ x[k] = xn
572
+ y[k] = yn
573
+ k += 1
574
+ # plt.imshow(clusters, origin="lower")
575
+ # plt.show()
576
+
577
+ if plot: # pragma: no cover
578
+ title = "Identified clusters"
579
+ if plot_title is not None:
580
+ title = f"{plot_title}\n{title}"
581
+ plt.title(title)
582
+ plt.xlabel("x [pixel]")
583
+ plt.ylabel("y [pixel]")
584
+ clusters = np.ma.zeros(im.shape, dtype=int)
585
+ for i in x.keys():
586
+ clusters[x[i], y[i]] = i + 1
587
+ clusters[clusters == 0] = np.ma.masked
588
+
589
+ plt.imshow(clusters, origin="lower", cmap="prism")
590
+ plt.show()
591
+
592
+ # Merge clusters, if there are even any possible mergers left
593
+ x, y, n = merge_clusters(
594
+ im,
595
+ x,
596
+ y,
597
+ n,
598
+ manual=manual,
599
+ deg=degree_before_merge,
600
+ auto_merge_threshold=auto_merge_threshold,
601
+ merge_min_threshold=merge_min_threshold,
602
+ plot_title=plot_title,
603
+ )
604
+
605
+ if min_width > 0:
606
+ sizes = {k: v.max() - v.min() for k, v in y.items()}
607
+ mask_sizes = {k: v > min_width for k, v in sizes.items()}
608
+ for k, v in mask_sizes.items():
609
+ if not v:
610
+ del x[k]
611
+ del y[k]
612
+ n = x.keys()
613
+
614
+ orders = fit_polynomials_to_clusters(x, y, n, opower)
615
+
616
+ # sort orders from bottom to top, using relative position
617
+
618
+ def compare(i, j):
619
+ _, xi, i_left, i_right = i
620
+ _, xj, j_left, j_right = j
621
+
622
+ if i_right < j_left or j_right < i_left:
623
+ return xi.mean() - xj.mean()
624
+
625
+ left = max(i_left, j_left)
626
+ right = min(i_right, j_right)
627
+
628
+ return xi[left:right].mean() - xj[left:right].mean()
629
+
630
+ xp = np.arange(im.shape[1])
631
+ keys = [(c, np.polyval(orders[c], xp), y[c].min(), y[c].max()) for c in x.keys()]
632
+ keys = sorted(keys, key=cmp_to_key(compare))
633
+ key = [k[0] for k in keys]
634
+
635
+ n = np.arange(len(n), dtype=int)
636
+ x = {c: x[key[c]] for c in n}
637
+ y = {c: y[key[c]] for c in n}
638
+ orders = np.array([orders[key[c]] for c in n])
639
+
640
+ column_range = np.array([[np.min(y[i]), np.max(y[i]) + 1] for i in n])
641
+
642
+ if plot: # pragma: no cover
643
+ plot_orders(im, x, y, n, orders, column_range, title=plot_title)
644
+
645
+ return orders, column_range