lime-stable 2.0.2__tar.gz → 2.2.dev1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. {lime_stable-2.0.2/src/lime_stable.egg-info → lime_stable-2.2.dev1}/PKG-INFO +2 -2
  2. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/pyproject.toml +3 -2
  3. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/archives/read_fits.py +69 -21
  4. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/changelog.txt +11 -4
  5. lime_stable-2.2.dev1/src/lime/fitting/redshift.py +338 -0
  6. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/io.py +30 -20
  7. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/lime.toml +1 -1
  8. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/observations.py +174 -27
  9. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/plotting/bokeh_plots.py +169 -50
  10. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/plotting/format.py +9 -5
  11. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/plotting/plots.py +140 -65
  12. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/plotting/plots_interactive.py +8 -8
  13. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/plotting/theme_lime.toml +21 -2
  14. lime_stable-2.2.dev1/src/lime/resources/generator_db.py +140 -0
  15. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/retrieve/line_bands.py +37 -8
  16. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/tools.py +10 -42
  17. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/transitions.py +291 -68
  18. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/workflow.py +419 -168
  19. {lime_stable-2.0.2 → lime_stable-2.2.dev1/src/lime_stable.egg-info}/PKG-INFO +2 -2
  20. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime_stable.egg-info/requires.txt +1 -1
  21. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/tests/test_io.py +1 -1
  22. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/tests/test_spectrum.py +1 -1
  23. lime_stable-2.0.2/src/lime/fitting/redshift.py +0 -317
  24. lime_stable-2.0.2/src/lime/resources/generator_db.py +0 -133
  25. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/LICENSE.rst +0 -0
  26. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/MANIFEST.in +0 -0
  27. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/README.md +0 -0
  28. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/setup.cfg +0 -0
  29. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/__init__.py +0 -0
  30. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/archives/__init__.py +0 -0
  31. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/archives/tables.py +0 -0
  32. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/fitting/__init__.py +0 -0
  33. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/fitting/lines.py +0 -0
  34. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/inference/detection.py +0 -0
  35. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/inference/intensity_threshold.py +0 -0
  36. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/plotting/__init__.py +0 -0
  37. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/plotting/utils.py +0 -0
  38. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/resources/__init__.py +0 -0
  39. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/resources/generator_logo.py +0 -0
  40. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/resources/lines_database_v2.0.0.txt +0 -0
  41. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/resources/types_params.txt +0 -0
  42. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/retrieve/__init__.py +0 -0
  43. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime/rsrc_manager.py +0 -0
  44. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime_stable.egg-info/SOURCES.txt +0 -0
  45. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime_stable.egg-info/dependency_links.txt +0 -0
  46. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/src/lime_stable.egg-info/top_level.txt +0 -0
  47. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/tests/test_astro.py +0 -0
  48. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/tests/test_cube.py +0 -0
  49. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/tests/test_line.py +0 -0
  50. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/tests/test_model.py +0 -0
  51. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/tests/test_plots.py +0 -0
  52. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/tests/test_read_fits.py +0 -0
  53. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/tests/test_redshift.py +0 -0
  54. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/tests/test_resources.py +0 -0
  55. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/tests/test_sample.py +0 -0
  56. {lime_stable-2.0.2 → lime_stable-2.2.dev1}/tests/test_tools.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lime-stable
3
- Version: 2.0.2
3
+ Version: 2.2.dev1
4
4
  Summary: Line measuring algorithm for astronomical spectra
5
5
  Author-email: Vital Fernández <vgf@stsci.edu>
6
6
  License: GPL-3.0-or-later
@@ -18,7 +18,7 @@ Requires-Dist: scipy~=1.16
18
18
  Requires-Dist: tomli>=2.0.0; python_version < "3.11"
19
19
  Provides-Extra: full
20
20
  Requires-Dist: asdf~=4.1; extra == "full"
21
- Requires-Dist: aspect-stable~=0.4.1; extra == "full"
21
+ Requires-Dist: aspect-stable~=0.7.dev1; extra == "full"
22
22
  Requires-Dist: bokeh~=3.8; extra == "full"
23
23
  Requires-Dist: mplcursors~=0.6; extra == "full"
24
24
  Requires-Dist: openpyxl~=3.1; extra == "full"
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "lime-stable"
7
- version = "2.0.2"
7
+ version = "2.2.dev1"
8
8
  readme = { file = "README.md", content-type = "text/markdown" }
9
9
  requires-python = ">=3.11"
10
10
  license = {text = "GPL-3.0-or-later"}
@@ -24,7 +24,7 @@ dependencies = ["astropy~=7.1",
24
24
 
25
25
  [project.optional-dependencies]
26
26
  full = ["asdf~=4.1",
27
- "aspect-stable~=0.4.1",
27
+ "aspect-stable~=0.7.dev1",
28
28
  "bokeh~=3.8",
29
29
  "mplcursors~=0.6",
30
30
  "openpyxl~=3.1",
@@ -46,4 +46,5 @@ pythonpath = ["src"]
46
46
  mpl-baseline-path = 'tests/baseline'
47
47
  mpl-results-path = 'tests/outputs'
48
48
  mpl-results-always = false
49
+ markers = ["mpl: matplotlib image comparison tests"]
49
50
  addopts = "-p no:asdf_schema_tester"
@@ -265,6 +265,7 @@ def check_fits_instructions(fits_source, online_provider=False):
265
265
  fits_reader = getattr(fits_manager, fits_source)
266
266
  else:
267
267
  source_type = 'instrument' if online_provider is False else 'survey'
268
+ # TODO show instruments supported
268
269
  raise LiMe_Error(f'Input {source_type} "{fits_source}" is not recognized. LiMe observation cannot be created.')
269
270
 
270
271
  else:
@@ -375,6 +376,40 @@ def load_fits(fits_address, data_ext_list=None, hdr_ext_list=None, url_check=Fal
375
376
  return data_list, header_list
376
377
 
377
378
 
379
+ def join_spectra_matrix(wave, flux, err=None):
380
+
381
+ # Make sure the wavelength array is increasing
382
+ # for i in range(wave.shape[0]):
383
+ # if wave[i, 0] > wave[i, -1]:
384
+ # wave[i] = wave[i, ::-1]
385
+ # flux[i] = flux[i, ::-1]
386
+ # err[i] = err[i, ::-1] if err is not None else None
387
+
388
+ # Make sure the passes are sorted:
389
+ key = np.nanmean(wave, axis=1)
390
+ order = np.argsort(key)
391
+ wave, flux, err = wave[order], flux[order], err[order] if err is not None else None
392
+
393
+ # Get dimensions
394
+ n_passes, n_pix = wave.shape
395
+
396
+ join_wl = 0.5 * (wave[:-1, -1] + wave[1:, 0])
397
+ cut_idx = np.sum(wave[:-1] <= join_wl[:, None], axis=1)
398
+ cut_idx = np.append(cut_idx, n_pix)
399
+
400
+ pix = np.arange(n_pix)
401
+ mask = pix < cut_idx[:, None]
402
+
403
+ wave_1d = wave[mask]
404
+ flux_1d = flux[mask]
405
+ err_1d = err[mask] if err is not None else None
406
+
407
+ lengths = np.sum(mask, axis=1)
408
+ starts = np.concatenate(([0], np.cumsum(lengths[:-1])))
409
+
410
+ return wave_1d, flux_1d, err_1d
411
+
412
+
378
413
  class OpenFits:
379
414
 
380
415
  def __init__(self, file_address, file_source=None, load_function=None, lime_object=None):
@@ -785,30 +820,42 @@ class OpenFits:
785
820
 
786
821
  """
787
822
 
788
- # Check dimensions of array
823
+ # Read the array
789
824
  data_list, header_list = load_fits(fits_address, data_ext_list, hdr_ext_list, url_check=False)
825
+
826
+ # Warning in the case of empty observartion
827
+ if len(data_list[0]['WAVELENGTH'].squeeze()) == 0:
828
+ _logger.critical(f'Input COS observation does not have scientific data ({fits_address}).')
829
+
830
+ # One single array with the data
790
831
  if data_list[0]['WAVELENGTH'].squeeze().ndim == 1:
791
832
  wave_arr = data_list[0]['WAVELENGTH'].squeeze()
792
833
  flux_arr = data_list[0]['FLUX'].squeeze()
793
834
  err_arr = data_list[0]['ERROR'].squeeze()
794
835
 
836
+ # Multiple passes
795
837
  else:
796
- # Get common middle index for joining the spectra
797
- # wave_matrix = data_list[0]['WAVELENGTH'][::-1]
798
- idcs_common = np.nonzero(data_list[0]['WAVELENGTH'][1, :] > data_list[0]['WAVELENGTH'][0, 0])[0]
799
- center_idx = idcs_common.shape[0] // 2
800
-
801
- # Create empty containers
802
- wave_arr = np.empty(data_list[0]['WAVELENGTH'].size - center_idx, data_list[0]['WAVELENGTH'].dtype) # TODO check for additional extension to join the spectra
803
- flux_arr = np.empty(data_list[0]['FLUX'].size - center_idx, data_list[0]['FLUX'].dtype) # dtype=data_list[0]['FLUX'].dtype)
804
- err_arr = np.empty(data_list[0]['ERROR'].size - center_idx, data_list[0]['ERROR'].dtype) # dtype=data_list[0]['ERROR'].dtype)
805
-
806
- # Fill with the array data
807
- arr_size = data_list[0]['WAVELENGTH'].shape[1]
808
- for key_arr, cont_arr in zip(['WAVELENGTH', 'FLUX', 'ERROR'], [wave_arr, flux_arr, err_arr]):
809
- cont_arr[0:arr_size - center_idx] = data_list[0][key_arr][1][0:arr_size - center_idx]
810
- cont_arr[arr_size - center_idx:] = data_list[0][key_arr][0]
811
- # print(key_arr, np.any(np.isnan(cont_arr)))
838
+
839
+ wave_arr, flux_arr, err_arr = join_spectra_matrix(data_list[0]['WAVELENGTH'],
840
+ data_list[0]['FLUX'],
841
+ data_list[0]['ERROR'])
842
+
843
+ # # Get common middle index for joining the spectra
844
+ # # wave_matrix = data_list[0]['WAVELENGTH'][::-1]
845
+ # idcs_common = np.nonzero(data_list[0]['WAVELENGTH'][1, :] > data_list[0]['WAVELENGTH'][0, 0])[0]
846
+ # center_idx = idcs_common.shape[0] // 2
847
+ #
848
+ # # Create empty containers
849
+ # wave_arr = np.empty(data_list[0]['WAVELENGTH'].size - center_idx, data_list[0]['WAVELENGTH'].dtype) # TODO check for additional extension to join the spectra
850
+ # flux_arr = np.empty(data_list[0]['FLUX'].size - center_idx, data_list[0]['FLUX'].dtype) # dtype=data_list[0]['FLUX'].dtype)
851
+ # err_arr = np.empty(data_list[0]['ERROR'].size - center_idx, data_list[0]['ERROR'].dtype) # dtype=data_list[0]['ERROR'].dtype)
852
+ #
853
+ # # Fill with the array data
854
+ # arr_size = data_list[0]['WAVELENGTH'].shape[1]
855
+ # for key_arr, cont_arr in zip(['WAVELENGTH', 'FLUX', 'ERROR'], [wave_arr, flux_arr, err_arr]):
856
+ # cont_arr[0:arr_size - center_idx] = data_list[0][key_arr][1][0:arr_size - center_idx]
857
+ # cont_arr[arr_size - center_idx:] = data_list[0][key_arr][0]
858
+ # # print(key_arr, np.any(np.isnan(cont_arr)))
812
859
 
813
860
  # Spectrum properties
814
861
  params_dict = SPECTRUM_FITS_PARAMS['cos']
@@ -960,7 +1007,7 @@ class OpenFits:
960
1007
  return wave_array, flux_cube, err_cube, header_list, fits_params
961
1008
 
962
1009
  @staticmethod
963
- def kcwi(fits_address, data_ext_list=(1, 2), hdr_ext_list=(1,2), **kwargs):
1010
+ def kcwi(fits_address, data_ext_list=(0, 1, 2), hdr_ext_list=(1,2), **kwargs):
964
1011
 
965
1012
  """
966
1013
 
@@ -993,13 +1040,14 @@ class OpenFits:
993
1040
  wave_array = np.linspace(w_min, w_max, pixels, endpoint=False)
994
1041
 
995
1042
  flux_cube = data_list[0]
996
- err_cube = data_list[1]
997
- pixel_mask_cube = np.isnan(flux_cube)
1043
+ err_cube = np.sqrt(data_list[1])
1044
+ mask_cube = (data_list[2] == 1) | np.isnan(flux_cube)
1045
+ # mask_cube = np.isnan(flux_cube)
998
1046
 
999
1047
  wcs = WCS(header_list[0])
1000
1048
 
1001
1049
  # Fits properties
1002
- fits_params = {**CUBE_FITS_PARAMS['kcwi'], 'pixel_mask': pixel_mask_cube, 'wcs': wcs}
1050
+ fits_params = {**CUBE_FITS_PARAMS['kcwi'], 'pixel_mask': mask_cube, 'wcs': wcs}
1003
1051
 
1004
1052
  return wave_array, flux_cube, err_cube, header_list, fits_params
1005
1053
 
@@ -69,7 +69,7 @@ LiMe minor update - 1.2.1 - 10/29/2024
69
69
  - Corrected bug on the latex label generation in the lime.line_bands command not reproducing the expected format
70
70
  - The unit_conversion function for Cube observations now uses the format from the previous update.
71
71
 
72
- LiMe medium update - 1.3.0 - 10/29/2024
72
+ LiMe medium update - 1.3.0 - 12/10/2024
73
73
  - This update includes ASPECT as an optional dependency and has adapted several functions to include the possibility to use its predictions
74
74
  - Added the Spectrum.retrieve attribute to group tasks which return data for the user related to the spectrum
75
75
  - Added the Spectrum.retrieve.line_bands to return the line bands which match the observation redshift, wave interval and components detection.
@@ -86,7 +86,7 @@ LiMe medium update - 1.3.0 - 10/29/2024
86
86
  - The air_to_vacuum_function now only applies the theoretical relation, its inputs and outputs are wavelength arrays.
87
87
  - The new lime.Line.update_label function can be used to update the line label by reviewing the line properties.
88
88
 
89
- LiMe Mayor update - 2.0.1 - XX/XX/XXXX
89
+ LiMe Mayor update - 2.0.1 - 10/20/2025
90
90
  - Change installer convention from "setup.py + requirements.txt" to pyproject.toml
91
91
  - Updated dependencies to current version of packages and for python 3.12
92
92
  - Renamed cfg.toml to lime.toml
@@ -114,5 +114,12 @@ LiMe Mayor update - 2.0.1 - XX/XX/XXXX
114
114
  - Change "output_address" argument name to "fname" across functions
115
115
  - Now the ``cont_source`` argument in the ``.fit`` functions gives the option to fit the line continuum using the ``central`` and ``adjacent`` bands in addition to the ``fit`` continuum.
116
116
 
117
- LiMe Mayor update - 2.0.2 - XX/XX/XXXX
118
- - Updated the documentation to show the line continua fitting options.
117
+ LiMe minor update - 2.0.4 - 12/05/2025
118
+ - Added function to rebinned spectrum given a wavelength range, a number of pixels, or a pixel width (no documenation entry yet): lime.Spectrum.retrieve.rebinned
119
+ - The lime.save_cfg function first two arguments have changed order (first file location and the file name)
120
+ - The default colors for the continuum fitting plots and peak dectection plots have changed
121
+ - Added suport for KCWI cubes
122
+ - Further development for the functions involving redshift a line fitting using aspect
123
+ - The spatial masking functions should not require the 'PARAM', 'PARAMIDX', 'PARAMVAL' and 'NUMSPAXE' in the .fits headers to plot the mask overlays
124
+
125
+
@@ -0,0 +1,338 @@
1
+ import logging
2
+
3
+ import matplotlib.pyplot as plt
4
+ import numpy as np
5
+ from scipy.optimize import minimize, linear_sum_assignment
6
+
7
+ from lime.io import LiMe_Error
8
+ from lime.fitting.lines import compute_inst_sigma_array, gaussian_model
9
+ from lime.plotting.plots import redshift_key_evaluation, redshift_permu_evaluation
10
+
11
+ try:
12
+ import aspect
13
+ aspect_check = True
14
+ except ImportError:
15
+ aspect_check = False
16
+
17
+ _logger = logging.getLogger('LiMe')
18
+
19
+ c_KMpS = 299792.458
20
+ k_gFWHM = 2 * np.sqrt(2 * np.log(2))
21
+
22
+
23
+ def comp_counter(arr_mask: np.ndarray) -> int:
24
+ return np.sum(~arr_mask[:-1] & arr_mask[1:]) + arr_mask[0]
25
+
26
+
27
+ def compute_gaussian_ridges(redshift, lines_lambda, wave_matrix, amp_arr, band_vsigma, resol_arr, n_sigma=3):
28
+
29
+ # Compute the observed line wavelengths
30
+ obs_lambda = lines_lambda * (1 + redshift)
31
+ obs_lambda = obs_lambda[(obs_lambda > wave_matrix[0, 0]) & (obs_lambda < wave_matrix[0, -1])]
32
+
33
+ if obs_lambda.size > 1:
34
+
35
+ # Compute Gaussian centroids
36
+ idcs_obs = np.searchsorted(wave_matrix[0, :], obs_lambda)
37
+ mu_lines = wave_matrix[0, :][idcs_obs]
38
+
39
+ # Compute Gaussian sigmas
40
+ sigma_lines = mu_lines * (band_vsigma / c_KMpS) + mu_lines / (resol_arr[idcs_obs] * k_gFWHM)
41
+
42
+ # Compute the Gaussian bands
43
+ x_matrix = wave_matrix[:idcs_obs.size, :]
44
+ gauss_matrix = gaussian_model(x_matrix, amp_arr, mu_lines[:, None], sigma_lines[:, None])
45
+ gauss_arr = gauss_matrix.sum(axis=0)
46
+
47
+ # Set maximum to 1:
48
+ idcs_one = gauss_arr > 1
49
+ gauss_arr[idcs_one] = 1
50
+
51
+ else:
52
+ gauss_arr = None
53
+
54
+ return gauss_arr
55
+
56
+
57
+ # def redshift_xor_method(spec, bands, z_min, z_max, z_nsteps, pred_arr, components_number, res_power, sigma_factor,
58
+ # sig_digits=2, plot_results=False):
59
+ #
60
+ # # Use the detection bands if provided
61
+ # if (pred_arr is not None) and (components_number is not None):
62
+ # idcs_lines = np.isin(pred_arr, components_number)
63
+ # else:
64
+ # idcs_lines = None
65
+ #
66
+ # # Continue with measurement
67
+ # if idcs_lines is not None:
68
+ #
69
+ # # Extract the data
70
+ # pixel_mask = spec.flux.mask
71
+ # wave_arr = spec.wave.data
72
+ # flux_arr = spec.flux.data
73
+ #
74
+ # # Loop throught the redshift steps
75
+ # if not np.all(pixel_mask):
76
+ #
77
+ # # Compute the resolution params
78
+ # sigma_arr = compute_inst_sigma_array(wave_arr, res_power)
79
+ # sigma_arr = sigma_arr if sigma_factor is None else sigma_arr * sigma_factor
80
+ #
81
+ # # Lines selection
82
+ # theo_lambda = bands.wavelength.to_numpy()
83
+ #
84
+ # # Parameters for the brute analysis
85
+ # z_arr = np.linspace(z_min, z_max, z_nsteps)
86
+ # wave_matrix = np.tile(wave_arr, (theo_lambda.size, 1))
87
+ # xor_sum = np.zeros(z_arr.size)
88
+ #
89
+ # # Invert the mask
90
+ # data_mask = ~pixel_mask
91
+ #
92
+ # # Revert the data
93
+ # for i, z_i in enumerate(z_arr):
94
+ # gauss_arr = compute_gaussian_ridges(z_i, theo_lambda, wave_matrix, 1, sigma_arr)
95
+ # xor_sum[i] = 0 if gauss_arr is None else np.sum(idcs_lines[data_mask] * gauss_arr[data_mask])
96
+ #
97
+ # z_infer = np.round(z_arr[np.argmax(xor_sum)], decimals=sig_digits)
98
+ #
99
+ # # No lines or all masked
100
+ # else:
101
+ # z_infer = None
102
+ #
103
+ # if plot_results and (z_infer is not None):
104
+ # gauss_arr_max = compute_gaussian_ridges(z_infer, theo_lambda, wave_matrix, 1, sigma_arr)
105
+ # redshift_key_evaluation(spec, z_infer, data_mask, gauss_arr_max, z_arr, xor_sum)
106
+ #
107
+ # # Do not attempt measurement
108
+ # else:
109
+ # z_infer = None
110
+ #
111
+ # return z_infer
112
+
113
+
114
+ def redshift_key_method(spec, bands, z_min, z_max, delta_z, pred_arr, components_number, band_vsigma,
115
+ method, sig_digits=2, detection_only=True, plot_results=False):
116
+
117
+ # Use the detection bands if provided
118
+ if (pred_arr is not None) and (components_number is not None):
119
+ idcs_lines = np.isin(pred_arr, components_number)
120
+ else:
121
+ idcs_lines = None
122
+
123
+ # For flux method give the option for fitting redshift without detection
124
+ method_flux = True if method == 'key' else False
125
+ if method_flux and not detection_only:
126
+ idcs_lines = np.ones(idcs_lines.shape).astype(bool)
127
+ else:
128
+ if np.all(idcs_lines):
129
+ _logger.warning('All the spectrum pixels match the input redshift components criteria')
130
+
131
+ # Continue with measurement
132
+ z_infer = None
133
+ if idcs_lines is not None:
134
+
135
+ # If there is only one line return nan
136
+ if not (method_flux and not detection_only):
137
+ # match np.count_nonzero(np.diff(np.r_[False, idcs_lines])):
138
+ match comp_counter(idcs_lines):
139
+ case 0:
140
+ return None # No components
141
+ case 1:
142
+ return np.nan # No components
143
+
144
+ # Extract the data
145
+ wave_arr = spec.wave.data
146
+ flux_arr = spec.flux.data
147
+
148
+ # Compute the resolving power if necessary
149
+ if spec.res_power is not None:
150
+ res_power = spec.res_power
151
+ else:
152
+ delta_lambda = np.ediff1d(wave_arr, to_end=0)
153
+ delta_lambda[-1] = delta_lambda[-2]
154
+ res_power = wave_arr / delta_lambda
155
+
156
+ # Lines selection
157
+ theo_lambda = bands.wavelength.to_numpy()
158
+
159
+ # Compute the redshift range
160
+ if delta_z is None:
161
+ delta_arr = np.diff(wave_arr)
162
+ delta_z = np.median(delta_arr)/np.median(wave_arr)
163
+ z_arr = np.arange(z_min, z_max + 0.5 * delta_z, delta_z)
164
+
165
+ # Parameters for the brute analysis
166
+ wave_matrix = np.tile(wave_arr, (theo_lambda.size, 1))
167
+ flux_sum = np.zeros(z_arr.size)
168
+
169
+ # Combine line and pixel_mask
170
+ mask = ~spec.flux.mask & idcs_lines
171
+
172
+ # Loop through the redshift steps
173
+ for i, z_i in enumerate(z_arr):
174
+
175
+ # Generate the redshift key
176
+ gauss_arr = compute_gaussian_ridges(z_i, theo_lambda, wave_matrix, 1, band_vsigma, res_power)
177
+
178
+ # Null gauss case
179
+ if gauss_arr is None:
180
+ flux_sum[i] = 0
181
+
182
+ # Compute cumulative flux or pixel-number sum
183
+ else:
184
+ # Check more than one line
185
+ if comp_counter((gauss_arr * mask) > 0.001) >= 2:
186
+ if method_flux:
187
+ flux_sum[i] = np.sum(flux_arr[mask] * gauss_arr[mask])
188
+ else:
189
+ flux_sum[i] = np.sum(idcs_lines[mask] * gauss_arr[mask])
190
+
191
+ z_infer = np.round(z_arr[np.argmax(flux_sum)], decimals=sig_digits)
192
+
193
+ if plot_results and (z_infer is not None):
194
+ gauss_arr_max = compute_gaussian_ridges(z_infer, theo_lambda, wave_matrix, 1, band_vsigma, res_power)
195
+ redshift_key_evaluation(spec, method, z_infer, mask, gauss_arr_max, z_arr, flux_sum, theo_lambda)
196
+
197
+ return z_infer
198
+
199
+
200
+ def permutation_objective_function(redshift, obs_arr, theo_arr):
201
+
202
+ adjusted_observed = obs_arr / (1 + redshift)
203
+ cost_matrix = np.abs(adjusted_observed[:, None] - theo_arr[None, :])
204
+
205
+ # Find the best matching subset using linear sum assignment
206
+ row_ind, col_ind = linear_sum_assignment(cost_matrix)
207
+ residual = np.sum(cost_matrix[row_ind, col_ind])
208
+
209
+ return
210
+
211
+
212
+ def permutation_residual(redshift, obs_arr, theo_arr):
213
+
214
+ return permutation_objective_function(redshift, obs_arr, theo_arr)
215
+
216
+
217
+ def compute_residual(Z, observed, theoretical):
218
+ """
219
+ Computes the residual for a given redshift Z.
220
+
221
+ Parameters:
222
+ - Z: Redshift value.
223
+ - observed: Observed transitions (array-like).
224
+ - theoretical: Theoretical transitions (array-like).
225
+
226
+ Returns:
227
+ - Residual value (float).
228
+ """
229
+ adjusted_observed = observed / (1 + Z)
230
+ cost_matrix = np.abs(adjusted_observed[:, None] - theoretical[None, :])
231
+
232
+ # Find the best matching subset using linear sum assignment
233
+ row_ind, col_ind = linear_sum_assignment(cost_matrix)
234
+ residual = np.sum(cost_matrix[row_ind, col_ind])
235
+
236
+ return residual
237
+
238
+
239
+ def redshift_permutation_method(spec, bands, z_min, z_max, pred_arr, components_number, plot_results):
240
+
241
+ # Use the detection array
242
+ if (pred_arr is not None) and (components_number is not None):
243
+ idcs_lines = np.isin(pred_arr, components_number)
244
+ else:
245
+ idcs_lines = None
246
+
247
+ # Decide if proceed
248
+ measure_check = True if np.any(idcs_lines) else False
249
+ if measure_check:
250
+
251
+ # Identify where changes occur (edges of ones and zeros)
252
+ edges = np.diff(np.concatenate(([0], idcs_lines, [0])))
253
+ start_indices = np.where(edges == 1)[0]
254
+ end_indices = np.where(edges == -1)[0] - 1
255
+
256
+ # Calculate observed wavelengths
257
+ central_indices = [(start + end) // 2 for start, end in zip(start_indices, end_indices)]
258
+ pixel_mask = spec.flux.mask
259
+ data_mask = ~pixel_mask
260
+ wave_arr = spec.wave.data if pixel_mask is not None else spec.wave
261
+ wave_obs = wave_arr[central_indices]
262
+
263
+ # Calculate theo wavelengths
264
+ wave_theo = bands.wavelength.to_numpy()
265
+
266
+ # Run the permutation
267
+ # result = minimize(permutation_residual, x0=5, bounds=[(z_min, z_max)])
268
+ result = minimize(lambda Z: compute_residual(Z, wave_obs, wave_theo), x0=0.01, bounds=[(z_min, z_max)],
269
+ method='L-BFGS-B')
270
+ z_infer = result.x[0]
271
+
272
+ # Recompute cost matrix and find the best matching subset
273
+ adjusted_observed = wave_obs / (1 + z_infer)
274
+ cost_matrix = np.abs(adjusted_observed[:, None] - wave_theo[None, :])
275
+ row_ind, col_ind = linear_sum_assignment(cost_matrix)
276
+
277
+ # Extract the best matching subset of theoretical transitions
278
+ best_matching_subset = wave_theo[col_ind]
279
+
280
+ if plot_results:
281
+ idcs_theo = (wave_theo * (1 + z_infer) >= wave_arr[0]) & (wave_theo * (1 + z_infer) <= wave_arr[-1])
282
+ redshift_permu_evaluation(spec, z_infer, wave_obs, wave_theo[idcs_theo] * (1 + z_infer))
283
+
284
+
285
+ else:
286
+ z_infer = None
287
+
288
+ return z_infer
289
+
290
+
291
+ class RedshiftFitting:
292
+
293
+ def __init__(self):
294
+
295
+ return
296
+
297
+ def redshift(self, bands, z_min=0, z_max=12, delta_z=None, mode='key', comps_list=['emission', 'doublet-em'],
298
+ res_power=None, detection_only=True, band_vsigma=70, sig_digits=2, plot_results=False):
299
+
300
+ '''
301
+ bands, z_min, z_max, z_nsteps, idcs_lines, res_power, sigma_factor, sig_digits=2,
302
+ detection_only=True, plot_results=False
303
+ '''
304
+
305
+ # Check that ASPECT is available
306
+ if not aspect_check:
307
+ _logger.info("ASPECT has not been installed the redshift measurements won't be constrained to lines")
308
+
309
+ # Get the features array
310
+ pred_arr, conf_arr = None, None
311
+ if aspect_check:
312
+ if self._spec.infer.pred_arr is None:
313
+ _logger.warning("The observation does not have a components detection array please run ASPECT")
314
+ else:
315
+ pred_arr, conf_arr = self._spec.infer.pred_arr, self._spec.infer.conf_arr
316
+
317
+ # Resolving power # TODO this should be read at another point...
318
+ res_power = self._spec.res_power if res_power is None else res_power
319
+
320
+ # Get the reference for the components
321
+ components_number = np.empty(len(comps_list)).astype(int)
322
+ for i, comp in enumerate(comps_list):
323
+ components_number[i] = aspect.cfg['shape_number'][comp]
324
+
325
+ # Set the type of fitting and the components to use
326
+ match mode:
327
+ case 'key' | 'xor':
328
+ z_infer = redshift_key_method(self._spec, bands, z_min, z_max, delta_z, pred_arr, components_number,
329
+ band_vsigma, mode, sig_digits=sig_digits,
330
+ detection_only=detection_only, plot_results=plot_results)
331
+ case 'permute':
332
+ z_infer = redshift_permutation_method(self._spec, bands, z_min, z_max, pred_arr, components_number,
333
+ plot_results=plot_results)
334
+ case _:
335
+ raise KeyError(f'Input redshift fitting technique "{mode}" is not recognized, please use: '
336
+ f'"key" or "xor"')
337
+
338
+ return z_infer
@@ -122,19 +122,6 @@ def hdu_to_log_df(file_path, page_name):
122
122
  hdu_log = hdul[page_name].data
123
123
 
124
124
  df_log = pd.DataFrame.from_records(data=hdu_log, index='index')
125
- #
126
- # # Change 'nan' to np.nan
127
- # if 'group_label' in df_log:
128
- # idcs_nan_str = df_log['group_label'] == 'nan'
129
- # df_log.loc[idcs_nan_str, 'group_label'] = np.nan
130
-
131
- # log_df = Table.read(file_path, page_name, character_as_bytes=False).to_pandas()
132
- # log_df.set_index('index', inplace=True)
133
- #
134
- # # Change 'nan' to np.nan
135
- # if 'group_label' in log_df:
136
- # idcs_nan_str = log_df['group_label'] == 'nan'
137
- # log_df.loc[idcs_nan_str, 'group_label'] = np.nan
138
125
 
139
126
  return df_log
140
127
 
@@ -154,7 +141,6 @@ def parse_lime_cfg(toml_cfg, fit_cfg_suffix='_line_fitting'):
154
141
  return toml_cfg
155
142
 
156
143
 
157
- # Function to load configuration file
158
144
  def load_cfg(file_address, fit_cfg_suffix='_line_fitting'):
159
145
 
160
146
  """
@@ -219,8 +205,7 @@ def load_cfg(file_address, fit_cfg_suffix='_line_fitting'):
219
205
  return cfg_lime
220
206
 
221
207
 
222
- # Function to save SpecSyzer configuration file
223
- def save_cfg(param_dict, output_file, section_name=None, clear_section=False):
208
+ def save_cfg(output_file, param_dict, section_name=None, clear_section=False):
224
209
 
225
210
  """
226
211
  This function safes the input dictionary into a configuration file. If no section is provided the input dictionary
@@ -231,11 +216,36 @@ def save_cfg(param_dict, output_file, section_name=None, clear_section=False):
231
216
  output_path = Path(output_file)
232
217
 
233
218
  if output_path.suffix == '.toml':
219
+
234
220
  # TODO review convert numpy arrays and floats64
235
221
  if toml_check:
236
- toml_str = toml.dumps(param_dict)
237
- with open(output_path, "w") as f:
238
- f.write(toml_str)
222
+
223
+ # Section dict or the default dictionary
224
+ output_data = param_dict if section_name is None else {section_name: param_dict}
225
+
226
+ # If the file does not exist create a new file
227
+ if not output_path.is_file():
228
+ with open(output_file, "w") as f:
229
+ toml.dump(output_data, f)
230
+
231
+ # Load the file and add the new section
232
+ else:
233
+ with open(output_path, 'r') as f:
234
+ full_config = toml.load(f)
235
+
236
+ # Update the section data
237
+ full_config.update(output_data)
238
+ # if section_name is not None:
239
+ # full_config.update(output_data)
240
+ #
241
+ # # Add the new data
242
+ # else:
243
+ # full_config.update(output_data)
244
+
245
+ # Save the new data
246
+ with open(output_file, "w") as f:
247
+ toml.dump(full_config, f)
248
+
239
249
  else:
240
250
  raise LiMe_Error(f'toml library is not installed. Toml files cannot be saved')
241
251
 
@@ -549,7 +559,7 @@ def save_frame(fname, dataframe, page='FRAME', parameters='all', header=None, co
549
559
 
550
560
  lineLogHDU = log_to_HDU(lines_log, ext_name=page, column_dtypes=column_dtypes, header_dict=header)
551
561
 
552
- if log_path.is_file(): # TODO this strategy is slow for many 2_guides
562
+ if log_path.is_file(): # TODO this strategy is slow for many configuration
553
563
  try:
554
564
  fits.update(log_path, data=lineLogHDU.data, header=lineLogHDU.header, extname=lineLogHDU.name, verify=True)
555
565
  except KeyError:
@@ -1,6 +1,6 @@
1
1
  [metadata]
2
2
  name = 'lime-stable'
3
- version = "2.0.2"
3
+ version = "2.2.dev1"
4
4
 
5
5
  # =====================
6
6
  # Spectrum / Long-slit