hyper-py-photometry 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hyper_py/__init__.py +1 -0
- hyper_py/bkg_multigauss.py +524 -0
- hyper_py/bkg_single.py +477 -0
- hyper_py/config.py +43 -0
- hyper_py/create_background_slices.py +160 -0
- hyper_py/data_output.py +132 -0
- hyper_py/detection.py +142 -0
- hyper_py/extract_cubes.py +42 -0
- hyper_py/fitting.py +562 -0
- hyper_py/gaussfit.py +519 -0
- hyper_py/groups.py +66 -0
- hyper_py/hyper.py +150 -0
- hyper_py/logger.py +73 -0
- hyper_py/map_io.py +73 -0
- hyper_py/paths_io.py +122 -0
- hyper_py/photometry.py +114 -0
- hyper_py/run_hyper.py +45 -0
- hyper_py/single_map.py +716 -0
- hyper_py/survey.py +70 -0
- hyper_py/visualization.py +150 -0
- hyper_py_photometry-0.1.0.dist-info/METADATA +514 -0
- hyper_py_photometry-0.1.0.dist-info/RECORD +26 -0
- hyper_py_photometry-0.1.0.dist-info/WHEEL +5 -0
- hyper_py_photometry-0.1.0.dist-info/entry_points.txt +4 -0
- hyper_py_photometry-0.1.0.dist-info/licenses/LICENSE +13 -0
- hyper_py_photometry-0.1.0.dist-info/top_level.txt +1 -0
hyper_py/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from hyper_py.hyper import start_hyper
|
|
@@ -0,0 +1,524 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import warnings
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
import matplotlib.pyplot as plt
|
|
6
|
+
from astropy.io import fits
|
|
7
|
+
from astropy.modeling import fitting, models
|
|
8
|
+
from astropy.stats import SigmaClip, sigma_clipped_stats
|
|
9
|
+
from astropy.utils.exceptions import AstropyUserWarning
|
|
10
|
+
from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans
|
|
11
|
+
from astropy.wcs import WCS
|
|
12
|
+
from photutils.aperture import CircularAperture
|
|
13
|
+
from sklearn.linear_model import HuberRegressor, TheilSenRegressor
|
|
14
|
+
from scipy.ndimage import find_objects, label
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def multigauss_background(minimize_method, image, header, xcen, ycen, nx, ny, all_sources_xcen, all_sources_ycen, aper_sup, max_fwhm_extent,
|
|
18
|
+
box_sizes, pol_orders_separate, suffix, group_id, count_source_blended_indexes=None, config=None, logger=None, logger_file_only=None):
|
|
19
|
+
"""
|
|
20
|
+
Estimate polynomial background in masked cutout, looping over box sizes and polynomial orders.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# ---------- SELECT WHICH FITTERS TO USE ----------
|
|
25
|
+
bg_fitters = config.get("fit_options", "bg_fitters", ["least_squares"])
|
|
26
|
+
huber_epsilons = config.get("fit_options", "huber_epsilons", [1.35])
|
|
27
|
+
|
|
28
|
+
fitters = []
|
|
29
|
+
if "least_squares" in bg_fitters:
|
|
30
|
+
fitters.append(("LeastSquares", None, None)) # Will use np.linalg.lstsq
|
|
31
|
+
|
|
32
|
+
if "huber" in bg_fitters:
|
|
33
|
+
for eps in huber_epsilons:
|
|
34
|
+
reg = HuberRegressor(fit_intercept=False, max_iter=100, epsilon=eps)
|
|
35
|
+
fitters.append((f"Huber_{eps}", eps, reg))
|
|
36
|
+
|
|
37
|
+
if "theilsen" in bg_fitters:
|
|
38
|
+
reg = TheilSenRegressor(fit_intercept=False, max_subpopulation=1e4, random_state=42)
|
|
39
|
+
fitters.append(("TheilSen", None, reg))
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
# --- identify if trun on whole map (fix_min_box = 0) or not --- #
|
|
43
|
+
fix_min_box = config.get("background", "fix_min_box", 3) # minimum padding value (multiple of FWHM)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
# - Initialize parameters - #
|
|
47
|
+
best_params = {}
|
|
48
|
+
best_order = None
|
|
49
|
+
best_min = np.inf
|
|
50
|
+
|
|
51
|
+
cutout_reference_mask = None
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
for box in box_sizes:
|
|
55
|
+
if fix_min_box != 0:
|
|
56
|
+
half_box = box // 2 -1
|
|
57
|
+
xmin = max(0, int(np.min(xcen)) - half_box)
|
|
58
|
+
xmax = min(nx, int(np.max(xcen)) + half_box + 1)
|
|
59
|
+
ymin = max(0, int(np.min(ycen)) - half_box)
|
|
60
|
+
ymax = min(ny, int(np.max(ycen)) + half_box + 1)
|
|
61
|
+
|
|
62
|
+
cutout = np.array(image[ymin:ymax, xmin:xmax], dtype=np.float64)
|
|
63
|
+
else:
|
|
64
|
+
xmin = 0
|
|
65
|
+
xmax = box_sizes[0]
|
|
66
|
+
ymin = 0
|
|
67
|
+
ymax = box_sizes[1]
|
|
68
|
+
cutout = np.array(image)
|
|
69
|
+
|
|
70
|
+
if cutout.size == 0 or np.isnan(cutout).all():
|
|
71
|
+
continue
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
# - first regularize mean background - #
|
|
75
|
+
valid_cutout = ~np.isnan(cutout)
|
|
76
|
+
mean_valid_cutout, median_valid_cutout, std_valid_cutout = sigma_clipped_stats(cutout[valid_cutout], sigma=3.0, maxiters=5)
|
|
77
|
+
cutout = cutout - median_valid_cutout
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
yy, xx = np.indices(cutout.shape)
|
|
81
|
+
x0 = xcen - xmin
|
|
82
|
+
y0 = ycen - ymin
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
# ---Initialize mask: True = valid pixel for background fitting --- #
|
|
86
|
+
# mask_bg = np.ones_like(cutout, dtype=bool)
|
|
87
|
+
mask_bg = np.isfinite(cutout)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
all_sources_to_mask = []
|
|
91
|
+
all_sources_to_mask.extend(zip(x0, y0))
|
|
92
|
+
|
|
93
|
+
main_sources = []
|
|
94
|
+
main_sources.extend(zip(x0, y0))
|
|
95
|
+
|
|
96
|
+
external_sources = []
|
|
97
|
+
|
|
98
|
+
#--- Identify external sources inside box and add to main source - only if the background is not estimated over the whole map ---#
|
|
99
|
+
mask_bg = np.ones_like(cutout, dtype=bool)
|
|
100
|
+
|
|
101
|
+
if fix_min_box != 0:
|
|
102
|
+
# Convert reference Gaussians to a set of (x0, y0) pixel tuples
|
|
103
|
+
x0_int = np.array(x0).astype(int)
|
|
104
|
+
y0_int = np.array(y0).astype(int)
|
|
105
|
+
reference_positions = set(zip(x0_int, y0_int))
|
|
106
|
+
|
|
107
|
+
# Loop over all other sources
|
|
108
|
+
for i in range(len(all_sources_xcen)):
|
|
109
|
+
sx = all_sources_xcen[i]
|
|
110
|
+
sy = all_sources_ycen[i]
|
|
111
|
+
|
|
112
|
+
ex = int(sx - xmin)
|
|
113
|
+
ey = int(sy - ymin)
|
|
114
|
+
|
|
115
|
+
if (ex, ey) not in reference_positions:
|
|
116
|
+
if xmin <= sx <= xmax and ymin <= sy <= ymax:
|
|
117
|
+
all_sources_to_mask.append((ex, ey))
|
|
118
|
+
external_sources.append((ex, ey))
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
# --- Mask all external sources using simple 2D Gaussian fitting --- #
|
|
122
|
+
cut_local = cutout
|
|
123
|
+
for xc, yc in external_sources:
|
|
124
|
+
xc_int = int(round(xc))
|
|
125
|
+
yc_int = int(round(yc))
|
|
126
|
+
|
|
127
|
+
# Define small cutout around each source (e.g. max_fwhm_extent)
|
|
128
|
+
fit_size = round(max_fwhm_extent/2.) # half-size
|
|
129
|
+
xfit_min = max(0, xc_int - fit_size)
|
|
130
|
+
xfit_max = min(cut_local.shape[1], xc_int + fit_size + 1)
|
|
131
|
+
yfit_min = max(0, yc_int - fit_size)
|
|
132
|
+
yfit_max = min(cut_local.shape[0], yc_int + fit_size + 1)
|
|
133
|
+
|
|
134
|
+
data_fit = cut_local[yfit_min:yfit_max, xfit_min:xfit_max]
|
|
135
|
+
if data_fit.size < max_fwhm_extent*2 or np.all(np.isnan(data_fit)) or np.nanmax(data_fit) <= 0:
|
|
136
|
+
continue # skip this source if empty or invalid
|
|
137
|
+
|
|
138
|
+
yy_sub, xx_sub = np.mgrid[yfit_min:yfit_max, xfit_min:xfit_max]
|
|
139
|
+
|
|
140
|
+
# Define and fit elliptical Gaussian
|
|
141
|
+
g_init = models.Gaussian2D(
|
|
142
|
+
amplitude=np.nanmax(data_fit),
|
|
143
|
+
x_mean=xc,
|
|
144
|
+
y_mean=yc,
|
|
145
|
+
x_stddev=max_fwhm_extent,
|
|
146
|
+
y_stddev=max_fwhm_extent,
|
|
147
|
+
theta=0.0,
|
|
148
|
+
bounds={'x_stddev': (max_fwhm_extent/4., max_fwhm_extent*2), 'y_stddev': (max_fwhm_extent/4., max_fwhm_extent*2), 'theta': (-np.pi/2, np.pi/2)}
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
fit_p = fitting.LevMarLSQFitter()
|
|
152
|
+
try:
|
|
153
|
+
g_fit = fit_p(g_init, xx_sub, yy_sub, data_fit)
|
|
154
|
+
except Exception:
|
|
155
|
+
continue # skip if fit fails
|
|
156
|
+
|
|
157
|
+
# Evaluate fitted model over full local cutout
|
|
158
|
+
yy_full, xx_full = np.indices(cut_local.shape)
|
|
159
|
+
model_vals = g_fit(xx_full, yy_full)
|
|
160
|
+
|
|
161
|
+
# # Mask pixels above 2-FWHM threshold for external sources (≈ 0.1353 × peak) - or below it if too many NaNs
|
|
162
|
+
threshold_factor = 0.1353
|
|
163
|
+
max_fraction = 0.3
|
|
164
|
+
max_extent_pix = max_fwhm_extent *2 # in pixels
|
|
165
|
+
|
|
166
|
+
# Loop to find acceptable threshold
|
|
167
|
+
while True:
|
|
168
|
+
threshold = g_fit.amplitude.value * threshold_factor
|
|
169
|
+
mask_above = model_vals > threshold
|
|
170
|
+
frac_above_thresh = np.sum(mask_above) / model_vals.size
|
|
171
|
+
|
|
172
|
+
if frac_above_thresh <= max_fraction:
|
|
173
|
+
# Check the maximum spatial extent of the mask
|
|
174
|
+
labeled, _ = label(mask_above)
|
|
175
|
+
slices = find_objects(labeled)
|
|
176
|
+
|
|
177
|
+
max_extent = 0
|
|
178
|
+
for slc in slices:
|
|
179
|
+
if slc is None:
|
|
180
|
+
continue
|
|
181
|
+
dy = slc[0].stop - slc[0].start
|
|
182
|
+
dx = slc[1].stop - slc[1].start
|
|
183
|
+
extent = np.sqrt(dx**2 + dy**2)
|
|
184
|
+
max_extent = max(max_extent, extent)
|
|
185
|
+
|
|
186
|
+
if max_extent <= max_extent_pix:
|
|
187
|
+
break # both conditions passed
|
|
188
|
+
else:
|
|
189
|
+
# increase threshold to shrink extent
|
|
190
|
+
threshold_factor *= 2
|
|
191
|
+
else:
|
|
192
|
+
# too many pixels above threshold
|
|
193
|
+
threshold_factor *= 2
|
|
194
|
+
|
|
195
|
+
# Apply the final mask
|
|
196
|
+
mask_bg[model_vals > threshold] = False
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
### --- From now on, all photometry and background estimation is done on cutout_masked from external sources --- ###
|
|
201
|
+
# --- Apply external sources mask → set masked pixels to np.nan --- #
|
|
202
|
+
cutout_masked = np.copy(cutout)
|
|
203
|
+
cutout_masked[~mask_bg] = np.nan
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
# # ---- interpolate NaNs at the edges of the maps --- #
|
|
208
|
+
# --- Count NaNs in edge pixels ---
|
|
209
|
+
edge_thickness = round(max_fwhm_extent) # pixels to define the edge region
|
|
210
|
+
|
|
211
|
+
edge_mask = np.zeros_like(cutout_masked, dtype=bool)
|
|
212
|
+
edge_mask[:edge_thickness, :] = True # top edge
|
|
213
|
+
edge_mask[-edge_thickness:, :] = True # bottom edge
|
|
214
|
+
edge_mask[:, :edge_thickness] = True # left edge
|
|
215
|
+
edge_mask[:, -edge_thickness:] = True # right edge
|
|
216
|
+
|
|
217
|
+
n_edge_total = np.sum(edge_mask)
|
|
218
|
+
n_edge_nan = np.sum(edge_mask & ~np.isfinite(cutout_masked))
|
|
219
|
+
nan_fraction = n_edge_nan / n_edge_total
|
|
220
|
+
|
|
221
|
+
# --- Only interpolate if edge NaNs < threshold ---
|
|
222
|
+
nan_threshold = 0.3 # allow up to 30% NaNs in edge region
|
|
223
|
+
|
|
224
|
+
if nan_fraction < nan_threshold:
|
|
225
|
+
sigma = 2.0
|
|
226
|
+
max_sigma = 10.0
|
|
227
|
+
success = False
|
|
228
|
+
|
|
229
|
+
with warnings.catch_warnings():
|
|
230
|
+
warnings.simplefilter("ignore", AstropyUserWarning)
|
|
231
|
+
|
|
232
|
+
while sigma <= max_sigma:
|
|
233
|
+
kernel = Gaussian2DKernel(x_stddev=sigma)
|
|
234
|
+
interpolated_map = interpolate_replace_nans(cutout_masked, kernel)
|
|
235
|
+
|
|
236
|
+
if np.all(np.isfinite(interpolated_map)):
|
|
237
|
+
success = True
|
|
238
|
+
break
|
|
239
|
+
else:
|
|
240
|
+
sigma += 1.0 # Increase kernel size and retry
|
|
241
|
+
|
|
242
|
+
if success:
|
|
243
|
+
cutout_masked = interpolated_map
|
|
244
|
+
mask_bg = np.ones_like(cutout_masked, dtype=bool)
|
|
245
|
+
else:
|
|
246
|
+
logger_file_only.warning("⚠️ NaNs remain after interpolation even with enlarged kernel!")
|
|
247
|
+
|
|
248
|
+
else:
|
|
249
|
+
logger_file_only.warning(f"⚠️ Too many NaNs at edges (fraction: {nan_fraction:.2f}) — interpolation skipped.")
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
# --- Mask all main sources using simple 2D Gaussian fitting for background estimation purposes --- #
|
|
255
|
+
mask_bg_all = np.copy(mask_bg)
|
|
256
|
+
|
|
257
|
+
cut_local = cutout_masked
|
|
258
|
+
for xc, yc in main_sources:
|
|
259
|
+
xc_int = int(round(xc))
|
|
260
|
+
yc_int = int(round(yc))
|
|
261
|
+
|
|
262
|
+
# Define small cutout around each source (e.g. 2*max_fwhm_extent)
|
|
263
|
+
fit_size = round(max_fwhm_extent/2.) # half-size
|
|
264
|
+
xfit_min = max(0, xc_int - fit_size)
|
|
265
|
+
xfit_max = min(cut_local.shape[1], xc_int + fit_size + 1)
|
|
266
|
+
yfit_min = max(0, yc_int - fit_size)
|
|
267
|
+
yfit_max = min(cut_local.shape[0], yc_int + fit_size + 1)
|
|
268
|
+
|
|
269
|
+
data_fit = cut_local[yfit_min:yfit_max, xfit_min:xfit_max]
|
|
270
|
+
if data_fit.size < max_fwhm_extent*2 or np.all(np.isnan(data_fit)) or np.nanmax(data_fit) <= 0:
|
|
271
|
+
continue # skip this source if empty or invalid
|
|
272
|
+
|
|
273
|
+
yy_sub, xx_sub = np.mgrid[yfit_min:yfit_max, xfit_min:xfit_max]
|
|
274
|
+
|
|
275
|
+
# Define and fit elliptical Gaussian
|
|
276
|
+
g_init = models.Gaussian2D(
|
|
277
|
+
amplitude=np.nanmax(data_fit),
|
|
278
|
+
x_mean=xc,
|
|
279
|
+
y_mean=yc,
|
|
280
|
+
x_stddev=max_fwhm_extent,
|
|
281
|
+
y_stddev=max_fwhm_extent,
|
|
282
|
+
theta=0.0,
|
|
283
|
+
bounds={'x_stddev': (max_fwhm_extent/4., max_fwhm_extent*2), 'y_stddev': (max_fwhm_extent/4., max_fwhm_extent*2), 'theta': (-np.pi/2, np.pi/2)}
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
fit_p = fitting.LevMarLSQFitter()
|
|
287
|
+
try:
|
|
288
|
+
g_fit = fit_p(g_init, xx_sub, yy_sub, data_fit)
|
|
289
|
+
except Exception:
|
|
290
|
+
continue # skip if fit fails
|
|
291
|
+
|
|
292
|
+
# Evaluate fitted model over full local cutout
|
|
293
|
+
yy_full, xx_full = np.indices(cut_local.shape)
|
|
294
|
+
model_vals = g_fit(xx_full, yy_full)
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
# # Mask pixels above 2-FWHM threshold for external sources (≈ 0.1353 × peak) - or below it if too many NaNs
|
|
298
|
+
threshold_factor = 0.1353
|
|
299
|
+
max_fraction = 0.3
|
|
300
|
+
max_extent_pix = max_fwhm_extent *2 # in pixels
|
|
301
|
+
|
|
302
|
+
# Loop to find acceptable threshold
|
|
303
|
+
while True:
|
|
304
|
+
threshold = g_fit.amplitude.value * threshold_factor
|
|
305
|
+
mask_above = model_vals > threshold
|
|
306
|
+
frac_above_thresh = np.sum(mask_above) / model_vals.size
|
|
307
|
+
|
|
308
|
+
if frac_above_thresh <= max_fraction:
|
|
309
|
+
# Check the maximum spatial extent of the mask
|
|
310
|
+
labeled, _ = label(mask_above)
|
|
311
|
+
slices = find_objects(labeled)
|
|
312
|
+
|
|
313
|
+
max_extent = 0
|
|
314
|
+
for slc in slices:
|
|
315
|
+
if slc is None:
|
|
316
|
+
continue
|
|
317
|
+
dy = slc[0].stop - slc[0].start
|
|
318
|
+
dx = slc[1].stop - slc[1].start
|
|
319
|
+
extent = np.sqrt(dx**2 + dy**2)
|
|
320
|
+
max_extent = max(max_extent, extent)
|
|
321
|
+
|
|
322
|
+
if max_extent <= max_extent_pix:
|
|
323
|
+
break # both conditions passed
|
|
324
|
+
else:
|
|
325
|
+
# increase threshold to shrink extent
|
|
326
|
+
threshold_factor *= 2
|
|
327
|
+
else:
|
|
328
|
+
# too many pixels above threshold
|
|
329
|
+
threshold_factor *= 2
|
|
330
|
+
|
|
331
|
+
# Apply the final mask
|
|
332
|
+
mask_bg_all[model_vals > threshold] = False
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
# --- Apply main sources mask → set masked pixels to np.nan --- #
|
|
337
|
+
cutout_masked_all = np.copy(cutout_masked)
|
|
338
|
+
cutout_masked_all[~mask_bg_all] = np.nan
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
# - Estimate good pixels for background estimation only in cutout_masked_all - #
|
|
343
|
+
y_bg, x_bg = np.where(mask_bg_all)
|
|
344
|
+
z_bg = cutout_masked_all[y_bg, x_bg]
|
|
345
|
+
|
|
346
|
+
sigma_clip = SigmaClip(sigma=3.0, maxiters=10)
|
|
347
|
+
clipped = sigma_clip(z_bg)
|
|
348
|
+
valid = ~clipped.mask
|
|
349
|
+
|
|
350
|
+
x_valid = x_bg[valid]
|
|
351
|
+
y_valid = y_bg[valid]
|
|
352
|
+
z_valid = clipped.data[valid]
|
|
353
|
+
|
|
354
|
+
|
|
355
|
+
# - identify the reference mask to estimate best_min from the first run - #
|
|
356
|
+
if cutout_reference_mask is None:
|
|
357
|
+
cutout_reference_mask = np.copy(cutout_masked_all)
|
|
358
|
+
ref_ny, ref_nx = cutout_reference_mask.shape
|
|
359
|
+
ref_box_size = box
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
# ------------------ Loop over polynomial orders ------------------
|
|
363
|
+
for order in pol_orders_separate:
|
|
364
|
+
# Build design matrix
|
|
365
|
+
terms = []
|
|
366
|
+
param_names = []
|
|
367
|
+
for dx in range(order + 1):
|
|
368
|
+
for dy in range(order + 1 - dx):
|
|
369
|
+
terms.append((x_valid ** dx) * (y_valid ** dy))
|
|
370
|
+
param_names.append(f"c{dx}_{dy}")
|
|
371
|
+
|
|
372
|
+
A = np.vstack(terms).T
|
|
373
|
+
add_intercept = False
|
|
374
|
+
if "c0_0" not in param_names:
|
|
375
|
+
A = np.column_stack([np.ones_like(z_valid), A])
|
|
376
|
+
param_names = ["c0_0"] + param_names
|
|
377
|
+
add_intercept = True
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
# --- run chosen fitter algorithm --- #
|
|
381
|
+
for method_name, eps, reg in fitters:
|
|
382
|
+
try:
|
|
383
|
+
if reg is None:
|
|
384
|
+
# Least-squares case
|
|
385
|
+
coeffs, _, _, _ = np.linalg.lstsq(A, z_valid, rcond=None)
|
|
386
|
+
else:
|
|
387
|
+
reg.fit(A, z_valid)
|
|
388
|
+
coeffs = reg.coef_
|
|
389
|
+
if add_intercept:
|
|
390
|
+
coeffs[0] = reg.intercept_
|
|
391
|
+
except Exception as e:
|
|
392
|
+
logger_file_only.warning(f"[FAIL] {method_name} fit failed (order={order}, ε={eps}): {e}")
|
|
393
|
+
continue
|
|
394
|
+
|
|
395
|
+
# Rebuild coeff_dict
|
|
396
|
+
coeff_dict = dict(zip(param_names, coeffs))
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+
# --- Estimate best_min on common mask size for all runs --- #
|
|
400
|
+
if fix_min_box != 0:
|
|
401
|
+
half_ref_box_x = ref_nx // 2 -1
|
|
402
|
+
half_ref_box_y = ref_ny // 2 -1
|
|
403
|
+
|
|
404
|
+
x_start = max(0, round(np.mean(x0)) - half_ref_box_x)
|
|
405
|
+
x_end = x_start + ref_nx #max(ref_nx, round(np.mean(x0)) + half_ref_box_x +1)
|
|
406
|
+
y_start = max(0, round(np.mean(y0)) - half_ref_box_y)
|
|
407
|
+
y_end = y_start + ref_ny #max(ref_ny, round(np.mean(y0)) + half_ref_box_y +1)
|
|
408
|
+
|
|
409
|
+
if (x_end > cutout_masked_all.shape[1]):
|
|
410
|
+
x_start = x_start - (x_end - cutout_masked_all.shape[1])
|
|
411
|
+
x_end = cutout_masked_all.shape[1]
|
|
412
|
+
|
|
413
|
+
if (y_end > cutout_masked_all.shape[0]):
|
|
414
|
+
y_start = y_start - (y_end - cutout_masked_all.shape[0])
|
|
415
|
+
y_end = cutout_masked_all.shape[0]
|
|
416
|
+
|
|
417
|
+
cutout_eval = cutout_masked_all[y_start:y_end, x_start:x_end]
|
|
418
|
+
|
|
419
|
+
else:
|
|
420
|
+
xmin = 0
|
|
421
|
+
xmax = box_sizes[0]
|
|
422
|
+
ymin = 0
|
|
423
|
+
ymax = box_sizes[1]
|
|
424
|
+
cutout_eval = cutout_masked_all
|
|
425
|
+
|
|
426
|
+
|
|
427
|
+
# --- Crop current cutout to match reference size ---
|
|
428
|
+
shared_valid_mask = np.isfinite(cutout_reference_mask) & np.isfinite(cutout_eval)
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
if np.count_nonzero(shared_valid_mask) < 10:
|
|
432
|
+
continue # Not enough shared pixels
|
|
433
|
+
|
|
434
|
+
yy_best_min, xx_best_min = np.where(shared_valid_mask)
|
|
435
|
+
z_valid_best_min = cutout_eval[yy_best_min, xx_best_min]
|
|
436
|
+
x_valid_best_min = xx_best_min
|
|
437
|
+
y_valid_best_min = yy_best_min
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
bg_model_local_valid_best_min = np.zeros_like(z_valid_best_min)
|
|
441
|
+
for pname, val in coeff_dict.items():
|
|
442
|
+
dx, dy = map(int, pname[1:].split("_"))
|
|
443
|
+
bg_model_local_valid_best_min += val * (x_valid_best_min ** dx) * (y_valid_best_min ** dy)
|
|
444
|
+
|
|
445
|
+
# Then compute your residual and metric
|
|
446
|
+
residual_valid_best_min = bg_model_local_valid_best_min - z_valid_best_min
|
|
447
|
+
|
|
448
|
+
|
|
449
|
+
mse = np.mean(residual_valid_best_min ** 2)
|
|
450
|
+
norm = np.mean(z_valid ** 2) + 1e-12
|
|
451
|
+
nmse = mse / norm
|
|
452
|
+
|
|
453
|
+
k_params = len(coeff_dict)
|
|
454
|
+
n_points = len(z_valid)
|
|
455
|
+
bic = n_points * np.log(mse) + k_params * np.log(n_points)
|
|
456
|
+
|
|
457
|
+
std_res = np.nanstd(residual_valid_best_min)
|
|
458
|
+
std_res = std_res if std_res > 0 else 1e-10
|
|
459
|
+
redchi = np.sum((residual_valid_best_min / std_res) ** 2) / (n_points - k_params)
|
|
460
|
+
|
|
461
|
+
# Evaluate metric
|
|
462
|
+
if minimize_method == "nmse":
|
|
463
|
+
my_min = nmse
|
|
464
|
+
elif minimize_method == "bic":
|
|
465
|
+
my_min = bic
|
|
466
|
+
elif minimize_method == "redchi":
|
|
467
|
+
my_min = redchi
|
|
468
|
+
else:
|
|
469
|
+
my_min = nmse # fallback
|
|
470
|
+
|
|
471
|
+
|
|
472
|
+
if my_min < best_min:
|
|
473
|
+
# Evaluate full model only once now
|
|
474
|
+
bg_model_full = np.zeros_like(xx, dtype=np.float64)
|
|
475
|
+
for pname, val in coeff_dict.items():
|
|
476
|
+
dx, dy = map(int, pname[1:].split("_"))
|
|
477
|
+
bg_model_full += val * (xx ** dx) * (yy ** dy)
|
|
478
|
+
|
|
479
|
+
#- save cutout header -#
|
|
480
|
+
cutout_wcs = WCS(header).deepcopy()
|
|
481
|
+
cutout_wcs.wcs.crpix[0] -= xmin # CRPIX1
|
|
482
|
+
cutout_wcs.wcs.crpix[1] -= ymin # CRPIX2
|
|
483
|
+
cutout_header = cutout_wcs.to_header()
|
|
484
|
+
#- preserve other non-WCS cards (e.g. instrument, DATE-OBS) -#
|
|
485
|
+
cutout_header.update({k: header[k] for k in header if k not in cutout_header and k not in ['COMMENT', 'HISTORY']})
|
|
486
|
+
|
|
487
|
+
best_cutout = cutout
|
|
488
|
+
best_cutout_masked = cutout_masked
|
|
489
|
+
best_cutout_masked_full = cutout_masked_all
|
|
490
|
+
best_bg_model = bg_model_full
|
|
491
|
+
best_median_cutout = median_valid_cutout
|
|
492
|
+
best_header = cutout_header
|
|
493
|
+
best_mask_bg = mask_bg
|
|
494
|
+
best_x0 = x0
|
|
495
|
+
best_y0 = y0
|
|
496
|
+
best_xx = xx
|
|
497
|
+
best_yy = yy
|
|
498
|
+
best_xmin = xmin
|
|
499
|
+
best_xmax = xmax
|
|
500
|
+
best_ymin = ymin
|
|
501
|
+
best_ymax = ymax
|
|
502
|
+
best_params = coeff_dict
|
|
503
|
+
best_order = order
|
|
504
|
+
best_box_sizes = [box]
|
|
505
|
+
best_method = method_name
|
|
506
|
+
best_eps = eps
|
|
507
|
+
best_min = my_min
|
|
508
|
+
|
|
509
|
+
|
|
510
|
+
# ------------------ Final background subtraction ------------------
|
|
511
|
+
if best_order is None:
|
|
512
|
+
# If no valid background was found, return unmodified cutout
|
|
513
|
+
logger_file_only.warning("[WARNING] Background fit failed; returning original cutout.")
|
|
514
|
+
return cutout_masked, np.zeros_like(cutout), None, np.zeros_like(cutout), np.zeros_like(cutout), 0, 0, 0, 0, 0, 0, 0, 0, [box], 0, {}
|
|
515
|
+
|
|
516
|
+
else:
|
|
517
|
+
# Subtract background from the original cutout
|
|
518
|
+
best_cutout -= best_bg_model
|
|
519
|
+
best_cutout_masked -= best_bg_model
|
|
520
|
+
best_bg_model = best_bg_model + best_median_cutout
|
|
521
|
+
|
|
522
|
+
logger_file_only.info(f"[INFO] Background subtracted using order {best_order} polynomial.")
|
|
523
|
+
|
|
524
|
+
return best_cutout_masked, best_cutout_masked_full, best_header, best_bg_model, best_mask_bg, best_x0, best_y0, best_xx, best_yy, best_xmin, best_xmax, best_ymin, best_ymax, best_box_sizes, best_order, best_params
|