hyper-py-photometry 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
hyper_py/fitting.py ADDED
@@ -0,0 +1,562 @@
1
+ import os
2
+ import warnings
3
+
4
+ import numpy as np
5
+ from astropy.io import fits
6
+ from astropy.modeling import fitting, models
7
+ from astropy.stats import SigmaClip, sigma_clipped_stats
8
+ from astropy.utils.exceptions import AstropyUserWarning
9
+ from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans
10
+ from astropy.wcs import WCS
11
+ from lmfit import minimize, Parameters
12
+ from photutils.aperture import CircularAperture
13
+ from sklearn.linear_model import HuberRegressor, TheilSenRegressor
14
+ from scipy.ndimage import gaussian_filter
15
+
16
+ from hyper_py.visualization import plot_fit_summary
17
+ from .bkg_multigauss import multigauss_background
18
+
19
+ from scipy.spatial.distance import pdist
20
+ import matplotlib.pyplot as plt
21
+
22
+
23
+ def fit_group_with_background(image, xcen, ycen, all_sources_xcen, all_sources_ycen, group_indices, map_struct, config,
24
+ suffix, logger, logger_file_only, group_id, count_source_blended_indexes):
25
+
26
+ header = map_struct['header']
27
+ ny, nx = image.shape
28
+
29
+ # --- Load config parameters ---
30
+ dir_root = config.get("paths", "output")["dir_root"]
31
+ beam_pix = map_struct['beam_dim']/map_struct['pix_dim']/2.3548 # beam sigma size in pixels
32
+ fwhm_beam_pix = map_struct['beam_dim']/map_struct['pix_dim'] # beam FWHM size in pixels
33
+ aper_inf = config.get("photometry", "aper_inf", 1.0) * beam_pix
34
+ aper_sup = config.get("photometry", "aper_sup", 2.0) * beam_pix
35
+ max_fwhm_extent = aper_sup * 2.3548 # twice major FWHM in pixels
36
+
37
+
38
+ convert_mjy=config.get("units", "convert_mJy")
39
+
40
+
41
+ fit_cfg = config.get("fit_options", {})
42
+ minimize_method = config.get("fit_options", "min_method", "redchi")
43
+ weight_choice = fit_cfg.get("weights", None)
44
+ weight_power_snr = fit_cfg.get("power_snr", 1.0)
45
+
46
+ use_l2 = fit_cfg.get("use_l2_regularization", False)
47
+ lambda_l2 = fit_cfg.get("lambda_l2", 1e-3)
48
+
49
+ fit_gauss_and_bg_together = config.get("background", "fit_gauss_and_bg_together", False)
50
+ fix_min_box = config.get("background", "fix_min_box", 3) # minimum padding value (multiple of FWHM)
51
+ fix_max_box = config.get("background", "fix_max_box", 5) # maximum padding value (multiple of FWHM)
52
+ orders = config.get("background", "polynomial_orders", [0, 1, 2]) if fit_gauss_and_bg_together else [0]
53
+ fit_separately = config.get("background", "fit_gauss_and_bg_separately", False)
54
+ pol_orders_separate = config.get("background", "pol_orders_separate", [0])
55
+
56
+
57
+ try:
58
+ lambda_l2 = float(lambda_l2)
59
+ except Exception as e:
60
+ logger.warning(f"[WARNING] lambda_l2 is not a float: {lambda_l2} → {e}")
61
+ lambda_l2 = 1e-3 # fallback
62
+
63
+
64
+ # === Determine box size === #
65
+ if fix_min_box == 0:
66
+ # Use entire map size directly
67
+ box_sizes = list((ny, nx))
68
+ else:
69
+ positions = np.column_stack([xcen, ycen])
70
+ max_dist = np.max(pdist(positions)) if len(positions) > 1 else 0.0
71
+ # box size is a multiplicative factor of the fwhm_beam_pix + maximum source size: max_fwhm_extent*2 + distance between common sources (max_dist)
72
+ dynamic_min_box = int(np.ceil(fix_min_box*fwhm_beam_pix)*2 + max_fwhm_extent*2 + max_dist)
73
+ dynamic_max_box = int(np.ceil(fix_max_box*fwhm_beam_pix)*2 + max_fwhm_extent*2 + max_dist)
74
+ box_sizes = list(range(dynamic_min_box + 1, dynamic_max_box + 2, 2)) # ensure odd
75
+
76
+
77
+
78
+ # - initialize map and header - #
79
+ header=map_struct['header']
80
+ ny, nx = image.shape
81
+
82
+
83
+ # - initialize params - #
84
+ best_result = None
85
+ best_min = np.inf
86
+ best_cutout = None
87
+ best_header = None
88
+ best_slice = None
89
+ best_order = None
90
+ best_box = None
91
+
92
+
93
+
94
+ #=== Estimate separated background masking also external sources ===#
95
+ if fit_separately:
96
+ cutout_after_bg, cutout_full_with_bg, cutout_header, bg_model, mask_bg, xcen_cut, ycen_cut, xx, yy, xmin, xmax, ymin, ymax, box_sizes_after_bg, back_order, poly_params = multigauss_background(
97
+ minimize_method=minimize_method,
98
+ image=image,
99
+ header=header,
100
+ xcen=xcen,
101
+ ycen=ycen,
102
+ nx=nx,
103
+ ny=ny,
104
+ all_sources_xcen=all_sources_xcen,
105
+ all_sources_ycen=all_sources_ycen,
106
+ aper_sup=aper_sup,
107
+ max_fwhm_extent=max_fwhm_extent,
108
+ box_sizes=box_sizes,
109
+ pol_orders_separate=pol_orders_separate,
110
+ suffix=suffix,
111
+ group_id=group_id,
112
+ count_source_blended_indexes=count_source_blended_indexes,
113
+ config=config,
114
+ logger=logger,
115
+ logger_file_only=logger_file_only
116
+ )
117
+
118
+ # - save original map without background - #
119
+ cutout = np.copy(cutout_after_bg)
120
+ cutout_masked = cutout_after_bg
121
+ cutout_masked_full = cutout_full_with_bg
122
+ box_sizes = box_sizes_after_bg
123
+ else:
124
+ bg_model = None
125
+
126
+
127
+
128
+ # --- Run over the various box sizes (if fit_separately = True this is the best size identified in the background fit) --- #
129
+ for box in box_sizes:
130
+
131
+ if not fit_separately:
132
+ if fix_min_box != 0:
133
+ half_box = box // 2 -1
134
+ xmin = max(0, int(np.min(xcen)) - half_box)
135
+ xmax = min(nx, int(np.max(xcen)) + half_box + 1)
136
+ ymin = max(0, int(np.min(ycen)) - half_box)
137
+ ymax = min(ny, int(np.max(ycen)) + half_box + 1)
138
+
139
+ cutout = image[ymin:ymax, xmin:xmax].copy()
140
+ else:
141
+ xmin = 0
142
+ xmax = box_sizes[0]
143
+ ymin = 0
144
+ ymax = box_sizes[1]
145
+ cutout = image
146
+
147
+ if cutout.size == 0 or np.isnan(cutout).all():
148
+ continue
149
+
150
+ #- save cutout header -#
151
+ cutout_wcs = WCS(header).deepcopy()
152
+ cutout_wcs.wcs.crpix[0] -= xmin # CRPIX1
153
+ cutout_wcs.wcs.crpix[1] -= ymin # CRPIX2
154
+ cutout_header = cutout_wcs.to_header()
155
+ #- preserve other non-WCS cards (e.g. instrument, DATE-OBS) -#
156
+ cutout_header.update({k: header[k] for k in header if k not in cutout_header and k not in ['COMMENT', 'HISTORY']})
157
+
158
+ yy, xx = np.indices(cutout.shape)
159
+
160
+
161
+ #--- estimate cutout rms and weighting scheme ---#
162
+ xcen_cut = xcen - xmin
163
+ ycen_cut = ycen - ymin
164
+
165
+
166
+
167
+ #--- Identify external sources inside box ---#
168
+ mask = np.ones_like(cutout, dtype=bool) # True = valid, False = masked
169
+ external_sources = []
170
+ for i in range(len(all_sources_xcen)):
171
+ if i in group_indices:
172
+ continue # skip sources belonging to current group
173
+ sx = all_sources_xcen[i]
174
+ sy = all_sources_ycen[i]
175
+
176
+ if xmin <= sx <= xmax and ymin <= sy <= ymax and fix_min_box != 0:
177
+ ex = sx - xmin
178
+ ey = sy - ymin
179
+ external_sources.append((ex, ey)) # local cutout coords
180
+
181
+ # Define a bounding box around the source, clipped to cutout size
182
+ masking_radius = max_fwhm_extent/2. # radius
183
+ masking_radius_pix=np.round(masking_radius)
184
+
185
+ xmin_box = max(0, int(ex - masking_radius_pix))
186
+ xmax_box = min(nx, int(ex + masking_radius_pix + 1))
187
+ ymin_box = max(0, int(ey - masking_radius_pix))
188
+ ymax_box = min(ny, int(ey + masking_radius_pix + 1))
189
+
190
+ # Create coordinate grid for the local region
191
+ mask[ymin_box:ymax_box, xmin_box:xmax_box] = False
192
+
193
+
194
+
195
+ #--- Apply external sources mask → set masked pixels to np.nan ---#
196
+ cutout_masked = np.copy(cutout)
197
+ mask_bg = np.ones_like(cutout_masked, dtype=bool)
198
+ mask_bg[np.isnan(cutout_masked)] = False
199
+ mask_bg[~mask] = False # mask external sources etc.
200
+
201
+ ### --- From now on, all photometry and background estimation is done on cutout_masked from external sources --- ###
202
+ cutout_masked[~mask_bg] = np.nan
203
+
204
+
205
+
206
+ # Mask NaNs before computing stats
207
+ valid = ~np.isnan(cutout_masked)
208
+ mean_bg, median_bg, std_bg = sigma_clipped_stats(cutout_masked[valid], sigma=3.0, maxiters=10)
209
+
210
+ # Create rms map and propagate NaNs
211
+ cutout_rms = np.full_like(cutout_masked, std_bg)
212
+ cutout_rms[~valid] = np.nan
213
+
214
+
215
+ weights = None
216
+ if weight_choice == "inverse_rms":
217
+ weights = 1.0 / (cutout_rms + mean_bg)
218
+ elif weight_choice == "snr":
219
+ weights = (cutout_masked / (cutout_rms + mean_bg))
220
+ elif weight_choice == "power_snr":
221
+ weights = ((cutout_masked / (cutout_rms + mean_bg)))**weight_power_snr
222
+ elif weight_choice == "map":
223
+ weights = cutout_masked
224
+ elif weight_choice == "mask":
225
+ mask_stats = ~SigmaClip(sigma=3.0)(cutout_masked).mask
226
+ weights = mask_stats.astype(float)
227
+
228
+
229
+ for order in orders:
230
+ try:
231
+ vary = config.get("fit_options", "vary", True)
232
+ params = Parameters()
233
+
234
+ # --- Add Gaussian components ---
235
+ for i, (xc, yc) in enumerate(zip(xcen_cut, ycen_cut)):
236
+
237
+ # --- Local peak near (xc, yc) in cutout_masked ---
238
+ prefix = f"g{i}_"
239
+ local_peak = np.nanmax(cutout_masked[int(yc)-1:int(yc)+1, int(xc)-1:int(xc)+1])
240
+
241
+ # - peak in cutout masked is well-defined after background subtraction (fit_separately = True) - #
242
+ if fit_separately:
243
+ params.add(f"{prefix}amplitude", value=local_peak, min=0.4*local_peak, max=1.3*local_peak)
244
+ else:
245
+ params.add(f"{prefix}amplitude", value=local_peak, min=0.2*local_peak, max=1.5*local_peak)
246
+
247
+ if vary == True:
248
+ params.add(f"{prefix}x0", value=xc, min=xc - 1, max=xc + 1)
249
+ params.add(f"{prefix}y0", value=yc, min=yc - 1, max=yc + 1)
250
+
251
+ if vary == False:
252
+ params.add(f"{prefix}x0", value=xc, vary=False)
253
+ params.add(f"{prefix}y0", value=yc, vary=False)
254
+
255
+ params.add(f"{prefix}sx", value=(aper_inf+aper_sup)/2., min=aper_inf, max=aper_sup)
256
+ params.add(f"{prefix}sy", value=(aper_inf+aper_sup)/2., min=aper_inf, max=aper_sup)
257
+ params.add(f"{prefix}theta", value=0.0, min=-np.pi/2, max=np.pi/2)
258
+
259
+
260
+
261
+ # --- Add full 2D polynomial background (including cross terms) ---
262
+ if fit_gauss_and_bg_together:
263
+ max_order_all = max(orders)
264
+
265
+ for dx in range(max_order_all + 1):
266
+ for dy in range(max_order_all + 1 - dx):
267
+ pname = f"c{dx}_{dy}"
268
+ val = median_bg if (dx == 0 and dy == 0) else 1e-5
269
+ params.add(pname, value=val, vary=(dx + dy <= order))
270
+
271
+
272
+ def model_fn(p, x, y):
273
+ model = np.zeros_like(x, dtype=float)
274
+ for i in range(len(xcen_cut)):
275
+ prefix = f"g{i}_"
276
+ A = p[f"{prefix}amplitude"]
277
+ x0 = p[f"{prefix}x0"]
278
+ y0 = p[f"{prefix}y0"]
279
+ sx = p[f"{prefix}sx"]
280
+ sy = p[f"{prefix}sy"]
281
+ th = p[f"{prefix}theta"]
282
+ a = (np.cos(th)**2)/(2*sx**2) + (np.sin(th)**2)/(2*sy**2)
283
+ b = -np.sin(2*th)/(4*sx**2) + np.sin(2*th)/(4*sy**2)
284
+ c = (np.sin(th)**2)/(2*sx**2) + (np.cos(th)**2)/(2*sy**2)
285
+ model += A * np.exp(- (a*(x - x0)**2 + 2*b*(x - x0)*(y - y0) + c*(y - y0)**2))
286
+
287
+ if fit_gauss_and_bg_together:
288
+ max_order_all = max(orders)
289
+
290
+ for dx in range(max_order_all + 1):
291
+ for dy in range(max_order_all + 1 - dx):
292
+ pname = f"c{dx}_{dy}"
293
+ val = median_bg if (dx == 0 and dy == 0) else 1e-5
294
+ params.add(pname, value=val, vary=(dx + dy <= order))
295
+
296
+ # Final check
297
+ model = np.where(np.isfinite(model), model, 0.0)
298
+ return model
299
+
300
+
301
+ def residual(params, x, y, data, weights=None):
302
+ model = model_fn(params, x, y)
303
+ resid = np.asarray(model - data, dtype=np.float64) # Ensure float array
304
+
305
+ if weights is not None:
306
+ resid *= weights
307
+
308
+ # Ensure residual is a clean float64 array
309
+ resid = np.asarray(resid, dtype=np.float64).ravel()
310
+
311
+ if use_l2 and fit_gauss_and_bg_together:
312
+ penalty_values = [
313
+ float(params[p].value)
314
+ for p in params if p.startswith("c")
315
+ ]
316
+
317
+ if penalty_values:
318
+ penalty_resid = lambda_l2 * np.array(penalty_values, dtype=np.float64)
319
+ return np.concatenate([resid.ravel(), penalty_resid.ravel()])
320
+ return resid
321
+
322
+
323
+ # --- Extract extra minimize kwargs from config ---
324
+ fit_cfg = config.get("fit_options", {})
325
+ minimize_keys = ["max_nfev", "xtol", "ftol", "gtol", "calc_covar", "loss", "f_scale"]
326
+ minimize_kwargs = {}
327
+
328
+ for key in minimize_keys:
329
+ val = fit_cfg.get(key)
330
+ if val is not None:
331
+ if key == "calc_covar":
332
+ minimize_kwargs[key] = bool(val)
333
+ elif key == "max_nfev":
334
+ minimize_kwargs[key] = int(val)
335
+ elif key in ["loss"]: # must be string
336
+ minimize_kwargs[key] = str(val)
337
+ else:
338
+ minimize_kwargs[key] = float(val)
339
+
340
+
341
+
342
+ # --- Call minimize with dynamic kwargs ONLY across good pixels (masked sources within each box) ---
343
+ valid = ~np.isnan(cutout_masked)
344
+ x_valid = xx.ravel()[valid.ravel()]
345
+ y_valid = yy.ravel()[valid.ravel()]
346
+ data_valid = cutout_masked.ravel()[valid.ravel()]
347
+ weights_valid = weights.ravel()[valid.ravel()] if weights is not None else None
348
+
349
+ result = minimize(
350
+ residual,
351
+ params,
352
+ args=(x_valid.ravel(), y_valid.ravel(), data_valid),
353
+ kws={'weights': weights_valid},
354
+ method=fit_cfg.get("fit_method", "leastsq"),
355
+ **minimize_kwargs
356
+ )
357
+
358
+ # --- Evaluate reduced chi**2, BIC and NMSE (Normalized Mean Squared Error) statistics --- #
359
+ if result.success:
360
+ # Evaluate model on grid #
361
+ model_eval = model_fn(result.params, xx, yy)
362
+
363
+ # Compute normalized mean squared error only on valid pixels
364
+ valid_mask = np.isfinite(cutout_masked) & np.isfinite(model_eval)
365
+ residual = (model_eval - cutout_masked)[valid_mask]
366
+ mse = np.mean(residual**2)
367
+
368
+ norm = np.mean(cutout_masked[valid_mask]**2) + 1e-12
369
+ nmse = mse / norm
370
+
371
+ redchi = result.redchi
372
+ bic = result.bic
373
+
374
+ if minimize_method == "redchi" : my_min = redchi
375
+ if minimize_method == "nmse" : my_min = nmse
376
+ if minimize_method == "bic" : my_min = bic
377
+ logger_file_only.info(f"[SUCCESS] Fit (box={cutout_masked.shape[1], cutout_masked.shape[0]}, order={order}) → reduced chi² = {result.redchi:.5f}, NMSE = {nmse:.2e}, BIC = {bic:.2e}")
378
+ else:
379
+ nmse = np.nan
380
+ redchi = np.nan
381
+ bic = np.nan
382
+ my_min = np.nan
383
+ logger_file_only.error(f"[FAILURE] Fit failed (box={cutout_masked.shape[1], cutout_masked.shape[0]}, order={order})")
384
+
385
+
386
+
387
+ if my_min < best_min:
388
+ best_result = result
389
+ best_nmse = nmse
390
+ best_redchi = redchi
391
+ best_bic = bic
392
+ if fit_separately:
393
+ best_order = back_order
394
+ else:
395
+ best_order = order
396
+ best_cutout = cutout_masked
397
+ best_cutout_masked_full = cutout_masked_full
398
+ best_header = cutout_header
399
+
400
+ bg_model = np.where(np.isfinite(cutout_masked), bg_model, np.nan)
401
+ best_bg_model = bg_model
402
+
403
+ best_slice = (slice(ymin, ymax), slice(xmin, xmax))
404
+ bg_mean = median_bg
405
+ best_box = (cutout_masked.shape[1], cutout_masked.shape[0])
406
+ best_min = my_min
407
+
408
+ except Exception as e:
409
+ logger.error(f"[ERROR] Fit failed (box={cutout_masked.shape[1], cutout_masked.shape[0]}, order={order}): {e}")
410
+ continue
411
+
412
+
413
+
414
+ if best_result is not None:
415
+ fit_status = 1 # 1 if True, 0 if False
416
+
417
+ yy, xx = np.indices(best_cutout.shape)
418
+ bg_vals = model_fn(best_result.params, xx, yy)
419
+ gauss_vals = np.zeros_like(bg_vals)
420
+
421
+ for i in range(len(xcen)):
422
+ prefix = f"g{i}_"
423
+ A = best_result.params[f"{prefix}amplitude"]
424
+ x0 = best_result.params[f"{prefix}x0"]
425
+ y0 = best_result.params[f"{prefix}y0"]
426
+ sx = best_result.params[f"{prefix}sx"]
427
+ sy = best_result.params[f"{prefix}sy"]
428
+ th = best_result.params[f"{prefix}theta"]
429
+ a = (np.cos(th)**2)/(2*sx**2) + (np.sin(th)**2)/(2*sy**2)
430
+ b = -np.sin(2*th)/(4*sx**2) + np.sin(2*th)/(4*sy**2)
431
+ c = (np.sin(th)**2)/(2*sx**2) + (np.cos(th)**2)/(2*sy**2)
432
+ gauss_vals += A * np.exp(- (a*(xx - x0)**2 + 2*b*(xx - x0)*(yy - y0) + c*(yy - y0)**2))
433
+
434
+ bg_component = bg_vals - gauss_vals if fit_gauss_and_bg_together else np.zeros_like(bg_vals)
435
+ bg_mean = np.mean(bg_component) if fit_gauss_and_bg_together else 0.0
436
+
437
+
438
+ model_eval = model_fn(best_result.params, xx, yy)
439
+ residual_map = best_cutout - model_eval
440
+
441
+
442
+ # --- save best fit in fits format --- #
443
+ try:
444
+ fits_fitting = config.get("fits_output", "fits_fitting", False)
445
+ fits_output_dir_fitting = os.path.join(dir_root, config.get("fits_output", "fits_output_dir_fitting", "fits/fitting"))
446
+ except:
447
+ fits_fitting = False
448
+
449
+ if fits_fitting:
450
+ def save_fits(array, output_dir, label_name, extension_name, header=None):
451
+ # Ensure the output directory exists
452
+ os.makedirs(output_dir, exist_ok=True)
453
+
454
+ # Create the FITS filename based on the label and extension type
455
+ filename = f"{output_dir}/{label_name}_{extension_name}.fits"
456
+
457
+ # Create a PrimaryHDU object and write the array into the FITS file
458
+ hdu = fits.PrimaryHDU(data=array, header=header)
459
+ if convert_mjy:
460
+ hdu.header['BUNIT'] = 'mJy/pixel'
461
+ else: hdu.header['BUNIT'] = 'Jy/pixel'
462
+ hdul = fits.HDUList([hdu])
463
+
464
+ # Write the FITS file
465
+ hdul.writeto(filename, overwrite=True)
466
+
467
+ save_fits(best_cutout, fits_output_dir_fitting, f"HYPER_MAP_{suffix}_ID_{count_source_blended_indexes[0]}_{count_source_blended_indexes[1]}", "cutout", header=best_header)
468
+ save_fits(best_cutout_masked_full, fits_output_dir_fitting, f"HYPER_MAP_{suffix}_ID_{count_source_blended_indexes[0]}_{count_source_blended_indexes[1]}", "cutout_masked_full", header=best_header)
469
+ save_fits(model_eval, fits_output_dir_fitting, f"HYPER_MAP_{suffix}_ID_{count_source_blended_indexes[0]}_{count_source_blended_indexes[1]}", "model", header=best_header)
470
+ save_fits(residual_map, fits_output_dir_fitting, f"HYPER_MAP_{suffix}_ID_{count_source_blended_indexes[0]}_{count_source_blended_indexes[1]}", "residual", header=best_header)
471
+
472
+ # --- visualize best fit in png format --- #
473
+ try:
474
+ visualize = config.get("visualization", "visualize_fitting")
475
+ except:
476
+ visualize = False
477
+
478
+ try:
479
+ output_dir_vis = os.path.join(dir_root, config.get("visualization", "output_dir_fitting", "images/fitting"))
480
+ except:
481
+ output_dir_vis = "Images/Fitting"
482
+
483
+ if visualize:
484
+ logger_file_only.info("2D and 3D visualization of the Gaussian fits and residual ON")
485
+
486
+ plot_fit_summary(
487
+ cutout=best_cutout,
488
+ cutout_masked_full=best_cutout_masked_full,
489
+ model=model_eval,
490
+ residual=residual_map,
491
+ output_dir=output_dir_vis,
492
+ label_name=f"HYPER_MAP_{suffix}_ID_{count_source_blended_indexes[0]}_{count_source_blended_indexes[1]}" if group_id is not None else "group",
493
+ box_size=best_box,
494
+ poly_order=best_order,
495
+ nmse=best_nmse
496
+ )
497
+
498
+
499
+
500
+ # --- Optionally save separated background model as FITS --- #
501
+ try:
502
+ fits_bg_separate = config.get("fits_output", "fits_bg_separate", False)
503
+ fits_output_dir_bg_separate = os.path.join(dir_root, config.get("fits_output", "fits_output_dir_bg_separate", "fits/bg_separate"))
504
+ except:
505
+ fits_bg_separate = False
506
+
507
+
508
+ if fits_bg_separate:
509
+ # Ensure the output directory exists
510
+ os.makedirs(fits_output_dir_bg_separate, exist_ok=True)
511
+
512
+ label_name = f"HYPER_MAP_{suffix}_ID_{count_source_blended_indexes[0]}_{count_source_blended_indexes[1]}"
513
+ filename = f"{fits_output_dir_bg_separate}/{label_name}_bg_masked3D.fits"
514
+
515
+ # Create a PrimaryHDU object and write the array into the FITS file
516
+ convert_mjy=config.get("units", "convert_mJy")
517
+
518
+ hdu = fits.PrimaryHDU(data=bg_model, header=cutout_header)
519
+ if convert_mjy:
520
+ hdu.header['BUNIT'] = 'mJy/pixel'
521
+ else: hdu.header['BUNIT'] = 'Jy/pixel'
522
+ hdu.writeto(filename, overwrite=True)
523
+
524
+
525
+ # --- Visualize 3D separated background estimation in png format --- #
526
+ try:
527
+ visualize_bg = config.get("visualization", "visualize_bg_separate", False)
528
+ except Exception:
529
+ visualize_bg = False
530
+
531
+ if visualize_bg:
532
+ logger_file_only.info("[INFO] Plotting 3D background model from masked map subtraction...")
533
+
534
+ output_dir_vis = os.path.join(dir_root, config.get("visualization", "output_dir_bg_separate", "plots/bg_separate"))
535
+ os.makedirs(output_dir_vis, exist_ok=True)
536
+
537
+ fig = plt.figure(figsize=(6, 5))
538
+ ax = fig.add_subplot(111, projection='3d')
539
+ ax.plot_surface(xx, yy, best_bg_model, cmap="viridis", linewidth=0, antialiased=True)
540
+
541
+ ax.set_xlabel("X (pix)", fontsize=8, fontweight="bold")
542
+ ax.set_ylabel("Y (pix)", fontsize=8, fontweight="bold")
543
+ ax.set_zlabel("Flux (Jy)", fontsize=8, fontweight="bold")
544
+ for label in (ax.get_xticklabels() + ax.get_yticklabels() + ax.get_zticklabels()):
545
+ label.set_fontsize(8)
546
+ label.set_fontweight("bold")
547
+
548
+ ax.set_title("Initial Background Model from Masked Map", fontsize=10, fontweight="bold")
549
+ plt.subplots_adjust(left=0.15, right=0.95, top=0.9, bottom=0.12)
550
+ label_str = f"HYPER_MAP_{suffix}_ID_{count_source_blended_indexes[0]}_{count_source_blended_indexes[1]}" if count_source_blended_indexes is not None else "group"
551
+ outname = os.path.join(output_dir_vis, f"{label_str}_bg_masked3D.png")
552
+ plt.savefig(outname, dpi=300, bbox_inches="tight")
553
+ plt.close()
554
+
555
+
556
+
557
+
558
+ return fit_status, best_result, model_fn, best_order, best_cutout, best_cutout_masked_full, best_slice, best_header, bg_mean, best_bg_model, best_box, best_nmse, best_redchi, best_bic
559
+
560
+ else:
561
+ # Ensure return is always complete
562
+ return 0, None, None, None, cutout_masked, None, (None, None), None, None, None, None, None, None, None