ACID-code-v2 0.2.3a2__tar.gz → 0.3.0a1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/.github/workflows/publish.yml +1 -1
  2. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/.gitignore +0 -1
  3. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/PKG-INFO +2 -2
  4. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/README.md +1 -1
  5. acid_code_v2-0.3.0a1/archive/LSD_archive.py +572 -0
  6. acid_code_v2-0.3.0a1/archive/mcmc_plotting_archive.py +100 -0
  7. acid_code_v2-0.3.0a1/example/tutorial_code.py +44 -0
  8. acid_code_v2-0.3.0a1/src/ACID_code_v2/ACID.py +967 -0
  9. acid_code_v2-0.3.0a1/src/ACID_code_v2/LSD.py +553 -0
  10. acid_code_v2-0.3.0a1/src/ACID_code_v2/__init__.py +2 -0
  11. acid_code_v2-0.3.0a1/src/ACID_code_v2/mcmc_utils.py +97 -0
  12. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/src/ACID_code_v2.egg-info/PKG-INFO +2 -2
  13. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/src/ACID_code_v2.egg-info/SOURCES.txt +3 -0
  14. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/tests.py +13 -10
  15. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/tutorial_test.py +23 -18
  16. acid_code_v2-0.2.3a2/example/tutorial_code.py +0 -43
  17. acid_code_v2-0.2.3a2/src/ACID_code_v2/ACID.py +0 -940
  18. acid_code_v2-0.2.3a2/src/ACID_code_v2/LSD.py +0 -1124
  19. acid_code_v2-0.2.3a2/src/ACID_code_v2/__init__.py +0 -2
  20. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/.gitattributes +0 -0
  21. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/.other_scripts/5_residual_profiles.py +0 -0
  22. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/.other_scripts/8_master_out_profiles.py +0 -0
  23. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/.readthedocs.yaml +0 -0
  24. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/.vscode/launch.json +0 -0
  25. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/LICENSE +0 -0
  26. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/acid.yml +0 -0
  27. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/docs/ACID.rst +0 -0
  28. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/docs/conf.py +0 -0
  29. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/docs/index.rst +0 -0
  30. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/docs/installation.rst +0 -0
  31. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/docs/make.bat +0 -0
  32. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/docs/requirements.txt +0 -0
  33. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/docs/using_ACID.rst +0 -0
  34. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/example/.make_syn_dat.py +0 -0
  35. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/example/example_linelist.txt +0 -0
  36. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/example/sample_spec_1.fits +0 -0
  37. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/example/sample_spec_2.fits +0 -0
  38. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/example/sample_spec_3.fits +0 -0
  39. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/pyproject.toml +0 -0
  40. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/setup.cfg +0 -0
  41. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/src/ACID_code_v2/utils.py +0 -0
  42. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/src/ACID_code_v2.egg-info/dependency_links.txt +0 -0
  43. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/src/ACID_code_v2.egg-info/requires.txt +0 -0
  44. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/src/ACID_code_v2.egg-info/top_level.txt +0 -0
  45. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/.DS_Store +0 -0
  46. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-28T21:11:56.678_blaze_B.fits +0 -0
  47. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-28T21:11:56.678_flat_A.fits +0 -0
  48. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-28T21:11:56.678_flat_B.fits +0 -0
  49. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-28T21:11:56.678_lamp_A.fits +0 -0
  50. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-28T21:11:56.678_lamp_B.fits +0 -0
  51. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-28T21:11:56.678_order_profile.fits +0 -0
  52. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:02:50.897_bis_G2_A.fits +0 -0
  53. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:02:50.897_bis_G2_B.fits +0 -0
  54. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:02:50.897_bis_K5_A.fits +0 -0
  55. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:02:50.897_ccf_G2_A.fits +0 -0
  56. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:02:50.897_ccf_G2_B.fits +0 -0
  57. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:02:50.897_ccf_K5_A.fits +0 -0
  58. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:02:50.897_e2ds_A.fits +0 -0
  59. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:02:50.897_e2ds_B.fits +0 -0
  60. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:02:50.897_s1d_A.fits +0 -0
  61. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:02:50.897_s1d_B.fits +0 -0
  62. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:08:22.216_bis_G2_A.fits +0 -0
  63. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:08:22.216_bis_G2_B.fits +0 -0
  64. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:08:22.216_bis_K5_A.fits +0 -0
  65. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:08:22.216_ccf_G2_A.fits +0 -0
  66. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:08:22.216_ccf_G2_B.fits +0 -0
  67. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:08:22.216_ccf_K5_A.fits +0 -0
  68. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:08:22.216_e2ds_A.fits +0 -0
  69. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:08:22.216_e2ds_B.fits +0 -0
  70. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:08:22.216_s1d_A.fits +0 -0
  71. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:08:22.216_s1d_B.fits +0 -0
  72. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:13:54.107_bis_G2_A.fits +0 -0
  73. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:13:54.107_bis_G2_B.fits +0 -0
  74. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:13:54.107_bis_K5_A.fits +0 -0
  75. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:13:54.107_ccf_G2_A.fits +0 -0
  76. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:13:54.107_ccf_G2_B.fits +0 -0
  77. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:13:54.107_ccf_K5_A.fits +0 -0
  78. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:13:54.107_e2ds_A.fits +0 -0
  79. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:13:54.107_e2ds_B.fits +0 -0
  80. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:13:54.107_s1d_A.fits +0 -0
  81. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:13:54.107_s1d_B.fits +0 -0
  82. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:19:25.377_bis_G2_A.fits +0 -0
  83. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:19:25.377_bis_G2_B.fits +0 -0
  84. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:19:25.377_bis_K5_A.fits +0 -0
  85. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:19:25.377_ccf_G2_A.fits +0 -0
  86. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:19:25.377_ccf_G2_B.fits +0 -0
  87. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:19:25.377_ccf_K5_A.fits +0 -0
  88. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:19:25.377_e2ds_A.fits +0 -0
  89. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:19:25.377_e2ds_B.fits +0 -0
  90. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:19:25.377_s1d_A.fits +0 -0
  91. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:19:25.377_s1d_B.fits +0 -0
  92. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:24:57.238_bis_G2_A.fits +0 -0
  93. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:24:57.238_bis_G2_B.fits +0 -0
  94. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:24:57.238_bis_K5_A.fits +0 -0
  95. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:24:57.238_ccf_G2_A.fits +0 -0
  96. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:24:57.238_ccf_G2_B.fits +0 -0
  97. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:24:57.238_ccf_K5_A.fits +0 -0
  98. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:24:57.238_e2ds_A.fits +0 -0
  99. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:24:57.238_e2ds_B.fits +0 -0
  100. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:24:57.238_s1d_A.fits +0 -0
  101. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:24:57.238_s1d_B.fits +0 -0
  102. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:30:28.617_bis_G2_A.fits +0 -0
  103. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:30:28.617_bis_G2_B.fits +0 -0
  104. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:30:28.617_bis_K5_A.fits +0 -0
  105. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:30:28.617_ccf_G2_A.fits +0 -0
  106. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:30:28.617_ccf_G2_B.fits +0 -0
  107. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:30:28.617_ccf_K5_A.fits +0 -0
  108. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:30:28.617_e2ds_A.fits +0 -0
  109. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:30:28.617_e2ds_B.fits +0 -0
  110. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:30:28.617_s1d_A.fits +0 -0
  111. {acid_code_v2-0.2.3a2 → acid_code_v2-0.3.0a1}/tests/data/HARPS.2007-08-29T00:30:28.617_s1d_B.fits +0 -0
@@ -10,7 +10,7 @@ jobs:
10
10
  - uses: actions/checkout@v3
11
11
  - uses: actions/setup-python@v4
12
12
  with:
13
- python-version: '3.11'
13
+ python-version: '3.13'
14
14
  - run: pip install build twine
15
15
  - run: python -m build
16
16
  - run: twine upload dist/* -u __token__ -p ${{ secrets.PYPI_TOKEN }}
@@ -26,5 +26,4 @@ wheels/
26
26
  *.egg-info/
27
27
  .installed.cfg
28
28
  *.egg
29
- unfinished/
30
29
  packager/
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ACID_code_v2
3
- Version: 0.2.3a2
3
+ Version: 0.3.0a1
4
4
  Summary: Returns line profiles from input spectra by fitting the stellar continuum and performing LSD
5
5
  Author: Lucy Dolan
6
6
  Author-email: Benjamin Cadell <bcadell01@qub.ac.uk>
@@ -26,7 +26,7 @@ Dynamic: license-file
26
26
  A.C.I.D v2 (Accurate Continuum fItting and Deconvolution)
27
27
  ==============================================================
28
28
 
29
- ACID_v2 is a fork of ACID (https://github.com/ldolan05/ACID) from the work of Lucy Dolan for her PhD. ACID_v2 improves on ACID by:
29
+ ACID_v2 (https://github.com/Benjamin-Cadell/ACID_v2) is a fork of ACID (https://github.com/ldolan05/ACID) from the work of Lucy Dolan for her PhD. ACID_v2 improves on ACID by:
30
30
  - Updating packages and code to work with newer and stable versions of python.
31
31
  - Improving memory management so that ACID can be run on MacOS without crashes (ie extending compatibility to all POSIX systems)
32
32
  - Adding additional kwargs to ACID to tailor output, including verbosity settings, MCMC number of steps, multiprocessing switch, and more.
@@ -1,7 +1,7 @@
1
1
  A.C.I.D v2 (Accurate Continuum fItting and Deconvolution)
2
2
  ==============================================================
3
3
 
4
- ACID_v2 is a fork of ACID (https://github.com/ldolan05/ACID) from the work of Lucy Dolan for her PhD. ACID_v2 improves on ACID by:
4
+ ACID_v2 (https://github.com/Benjamin-Cadell/ACID_v2) is a fork of ACID (https://github.com/ldolan05/ACID) from the work of Lucy Dolan for her PhD. ACID_v2 improves on ACID by:
5
5
  - Updating packages and code to work with newer and stable versions of python.
6
6
  - Improving memory management so that ACID can be run on MacOS without crashes (ie extending compatibility to all POSIX systems)
7
7
  - Adding additional kwargs to ACID to tailor output, including verbosity settings, MCMC number of steps, multiprocessing switch, and more.
@@ -0,0 +1,572 @@
1
+ # Old LSD alpha matrix code, iterative step:
2
+ # The below I think works similarly to above, but but uses only for loops, rather then using a for loop
3
+ # that only involves numpy calculations. With testing, the above is much faster.
4
+ else:
5
+ warnings.warn('Large wavelength ranges give large computation time. Seperate wavelength range into smaller chunks for faster computation.', DeprecationWarning, stacklevel=2)
6
+ alpha = np.zeros((len(blankwaves), len(velocities)))
7
+
8
+ for j in tqdm(range(0, len(blankwaves)), desc='Calculating alpha matrix'):
9
+ for i in (range(0, len(self.wavelengths_expected))):
10
+ vdiff = ((blankwaves[j] - self.wavelengths_expected[i]) * ckms) / self.wavelengths_expected[i]
11
+ if vdiff <= (np.max(velocities) + deltav) and vdiff >= (np.min(velocities) - deltav):
12
+ diff = blankwaves[j] - self.wavelengths_expected[i]
13
+ vel = const.c / 1e3 * (diff / self.wavelengths_expected[i])
14
+ for k in range(0, len(velocities)):
15
+ x = (velocities[k] - vel) / deltav
16
+ if -1. < x and x < 0.:
17
+ delta_x = (1 + x)
18
+ alpha[j, k] = alpha[j, k] + depths_expected[i] * delta_x
19
+ elif 0. <= x and x < 1.:
20
+ delta_x = (1 - x)
21
+ alpha[j, k] = alpha[j, k] + depths_expected[i] * delta_x
22
+ else:
23
+ pass
24
+
25
+ # Vectorized old part:
26
+ # The below was simplified using np.clip (as shown above)
27
+ alpha_mask_1 = np.logical_and(-1. < x, x < 0.)
28
+ alpha_mask_2 = np.logical_and(0. <= x, x < 1.)
29
+ delta_x_1 = 1 + x
30
+ delta_x_2 = 1 - x
31
+ delta_x_1[alpha_mask_1==False] = 0
32
+ delta_x_2[alpha_mask_2==False] = 0
33
+ # Update alpha array using calculated delta_x values
34
+ alpha = np.zeros((len(blankwaves), len(velocities)))
35
+ alpha += (depths_expected[:, np.newaxis] * delta_x_1).sum(axis=1)
36
+ alpha += (depths_expected[:, np.newaxis] * delta_x_2).sum(axis=1)
37
+
38
+ # Old blaze correct code part 1:
39
+ # ## test - s1d interpolated onto e2ds wavelength grid ##
40
+ # hdu_e2ds=fits.open('%s'%file.replace('s1d', 'e2ds'))
41
+ # spec_e2ds=hdu_e2ds[0].data
42
+ # header_e2ds=hdu_e2ds[0].header
43
+
44
+ # wave_e2ds=get_wave(spec_e2ds, header_e2ds)*(1.+brv/2.99792458e5)
45
+
46
+ # # plt.figure()
47
+ # # plt.scatter(np.arange(len(wave_e2ds[order][:-1])), wave_e2ds[order][1:]-wave_e2ds[order][:-1], label = 'e2ds wave (after berv)')
48
+ # # plt.scatter(np.arange(len(wave_e2ds[order][:-1])), get_wave(spec_e2ds, header_e2ds)[order][1:]-get_wave(spec_e2ds, header_e2ds)[order][:-1], label = 'e2ds wave (before berv)')
49
+ # # # plt.scatter(np.arange(len(wavelengths[:-1])), wavelengths[:-1]-wavelengths[1:], label = 's1d wave')
50
+ # # plt.legend()
51
+ # # plt.show()
52
+
53
+ # # id = np.logical_and(wave_e2ds<np.max(wavelengths), wave_e2ds>np.min(wavelengths))
54
+ # # print(wave_e2ds*u.AA)
55
+ # # print(wavelengths*u.AA)
56
+ # # print(fluxes*u.photon)
57
+
58
+ # blaze_file = glob.glob('%sblaze_folder/**blaze_A*.fits'%(directory))
59
+ # # print('%sblaze_folder/**blaze_A*.fits'%(directory))
60
+ # # print(blaze_file)
61
+ # blaze_file = blaze_file[0]
62
+ # blaze =fits.open('%s'%blaze_file)
63
+ # blaze_func = blaze[0].data
64
+ # spec_e2ds = spec_e2ds/blaze_func
65
+
66
+ # diff_arr = wavelengths[1:] - wavelengths[:-1]
67
+ # print(diff_arr)
68
+ # wavelengths = wavelengths[:-1]
69
+ # fluxes = fluxes[:-1]/diff_arr
70
+
71
+ # s1d_spec = Spectrum1D(spectral_axis = wavelengths*u.AA, flux = fluxes*u.Unit('photon AA-1'))
72
+ # fluxcon = FluxConservingResampler()
73
+ # new_spec = fluxcon(s1d_spec, wave_e2ds[order]*u.AA)
74
+
75
+ # wavelengths = new_spec.spectral_axis
76
+ # fluxes = new_spec.flux
77
+
78
+ # wavelengths = wavelengths[10:len(wave_e2ds[order])-9]/u.AA
79
+ # fluxes = fluxes[10:len(wave_e2ds[order])-9]/u.Unit('photon AA-1')
80
+ # flux_error_order = flux_error_order[10:len(wave_e2ds[order])-10]
81
+
82
+ # diff_arr = wavelengths[1:] - wavelengths[:-1]
83
+ # print(diff_arr)
84
+ # wavelengths = wavelengths[:-1]
85
+ # fluxes = fluxes[:-1]*diff_arr
86
+
87
+ # print(wavelengths)
88
+ # print(fluxes)
89
+
90
+ # plt.figure()
91
+ # plt.title('interpolated s1d comapred to actual e2ds spectrum')
92
+ # plt.plot(wavelengths, fluxes, label = 'interpolated s1d on e2ds wave grid')
93
+ # plt.plot(wave_e2ds[order], spec_e2ds[order], label = 'e2ds spectrum')
94
+ # plt.legend()
95
+ # plt.show()
96
+
97
+
98
+ # ## end of test ##
99
+
100
+ # ## test2 - synthetic s1d spectrum - using wavelength grid ##
101
+ # def gauss(x1, rv, sd, height, cont):
102
+ # y1 = height*np.exp(-(x1-rv)**2/(2*sd**2)) + cont
103
+ # return y1
104
+
105
+ # wavelength_grid = wavelengths
106
+ # flux_grid = np.ones(wavelength_grid.shape)
107
+
108
+ # linelist = '/home/lsd/Documents/fulllinelist0001.txt'
109
+
110
+ # linelist_expected = np.genfromtxt('%s'%linelist, skip_header=4, delimiter=',', usecols=(1,9))
111
+ # wavelengths_expected1 =np.array(linelist_expected[:,0])
112
+ # depths_expected1 = np.array(linelist_expected[:,1])
113
+ # # print(len(depths_expected1))
114
+
115
+ # wavelength_min = np.min(wavelengths)
116
+ # wavelength_max = np.max(wavelengths)
117
+
118
+ # print(wavelength_min, wavelength_max)
119
+
120
+ # wavelengths_expected=[]
121
+ # depths_expected=[]
122
+ # no_line =[]
123
+ # for some in range(0, len(wavelengths_expected1)):
124
+ # line_min = 0.25
125
+ # if wavelengths_expected1[some]>=wavelength_min and wavelengths_expected1[some]<=wavelength_max and depths_expected1[some]>=line_min:
126
+ # wavelengths_expected.append(wavelengths_expected1[some])
127
+ # #depths_expected.append(depths_expected1[some]+random.uniform(-0.1, 0.1))
128
+ # depths_expected.append(depths_expected1[some])
129
+ # else:
130
+ # pass
131
+
132
+ # count_range = np.array([len(wavelengths_expected)]*10)*np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
133
+ # count_range = np.array(count_range, dtype = int)
134
+ # print(count_range)
135
+ # vgrid = np.linspace(-21,18,48)
136
+ # try: ccf = fits.open(file.replace('s1d', 'ccf_K5'))
137
+ # except: ccf = fits.open(file.replace('s1d', 'ccf_G2'))
138
+ # rv = ccf[0].header['ESO DRS CCF RV']
139
+
140
+
141
+ # for line in count_range:
142
+ # mid_wave = wavelengths_expected[line]
143
+ # wgrid = 2.99792458e5*mid_wave/(2.99792458e5-vgrid)
144
+ # id = np.logical_and(wavelength_grid<np.max(wgrid), wavelength_grid>np.min(wgrid))
145
+ # prof_wavelength_grid = wavelength_grid[id]
146
+ # prof_v_grid = ((prof_wavelength_grid - mid_wave)*2.99792458e5)/prof_wavelength_grid
147
+ # prof = gauss(prof_v_grid, rv, 2.47, -depths_expected[line], 1.)
148
+ # # plt.figure()
149
+ # # plt.plot(prof_wavelength_grid, prof)
150
+ # # plt.show()
151
+ # flux_grid[id] = prof
152
+
153
+ # coeffs=np.polyfit(wavelengths, fluxes/fluxes[0], 3)
154
+ # poly = np.poly1d(coeffs*fluxes[0])
155
+ # fit = poly(wavelengths)
156
+
157
+ # wavelengths = wavelength_grid
158
+ # fluxes = flux_grid * fit
159
+
160
+ # Old blaze correct code part 2:
161
+
162
+ # plt.figure()
163
+ # plt.figure('blaze for orders 28, 29 and 30')
164
+ # plt.plot(wave[28], blaze[0].data[28])
165
+ # plt.plot(wave[29], blaze[0].data[29])
166
+ # plt.plot(wave[30], blaze[0].data[30])
167
+
168
+ # plt.figure()
169
+ # plt.title('blaze for orders 28, 29 and 30 summed together')
170
+ # pixel_grid = np.linspace(np.min(wave[28]), np.max(wave[30]), len(np.unique(wave[28:30])))
171
+ # blaze_sum = np.zeros(pixel_grid.shape)
172
+
173
+ # f28 = interp1d(wave[28], blaze[0].data[28], kind = 'linear', bounds_error=False, fill_value = 'extrapolate')
174
+ # f29 = interp1d(wave[29], blaze[0].data[29], kind = 'linear', bounds_error=False, fill_value = 'extrapolate')
175
+ # f30 = interp1d(wave[30], blaze[0].data[30], kind = 'linear', bounds_error=False, fill_value = 'extrapolate')
176
+
177
+ # idx28 = np.logical_and(pixel_grid>np.min(wave[28]), pixel_grid<np.max(wave[28]))
178
+ # idx29 = np.logical_and(pixel_grid>np.min(wave[29]), pixel_grid<np.max(wave[29]))
179
+ # idx30 = np.logical_and(pixel_grid>np.min(wave[30]), pixel_grid<np.max(wave[30]))
180
+
181
+ # blaze_28 = f28(pixel_grid[idx28])
182
+ # blaze_29 = f29(pixel_grid[idx29])
183
+ # blaze_30 = f30(pixel_grid[idx30])
184
+
185
+ # wave28 = pixel_grid[idx28]
186
+ # wave29 = pixel_grid[idx29]
187
+ # wave30 = pixel_grid[idx30]
188
+
189
+ # for pixel in range(len(pixel_grid)):
190
+ # wavell = pixel_grid[pixel]
191
+
192
+ # idx28 = tuple([wave28==wavell])
193
+ # idx29 = tuple([wave29==wavell])
194
+ # idx30 = tuple([wave30==wavell])
195
+
196
+ # try: b = blaze_28[idx28][0]
197
+ # except: b=0
198
+ # try: b1 = blaze_29[idx29][0]
199
+ # except: b1=0
200
+ # try: b2 = blaze_30[idx30][0]
201
+ # except: b2=0
202
+
203
+ # print(wavell, b, b1, b2)
204
+
205
+ # blaze_sum[pixel] = b + b1 + b2
206
+
207
+ # plt.plot(pixel_grid, blaze_sum)
208
+ # plt.show()
209
+
210
+ # plt.figure()
211
+ # plt.title('e2ds after blaze orders 28, 29 and 30')
212
+ # plt.plot(wave[28], spec[28])
213
+ # plt.plot(wave[29], spec[29])
214
+ # plt.plot(wave[30], spec[30])
215
+
216
+ ## TEST - adjusting e2ds spectrum onto s1d continuum ##
217
+
218
+ # ## first interpolate s1d onto e2ds wavelength grid ##
219
+ # s1d_file = fits.open(file.replace('e2ds', 's1d'))
220
+ # s1d_spec = s1d_file[0].data
221
+ # wave_s1d = s1d_file[0].header['CRVAL1']+(np.arange(s1d_spec.shape[0]))*s1d_file[0].header['CDELT1']
222
+
223
+ # wavelengths = wave_s1d
224
+ # fluxes = s1d_spec
225
+
226
+ # plt.figure()
227
+ # plt.plot(wavelengths, fluxes, label = 'e2ds spectrum - corrected to s1d continuum')
228
+ # plt.plot(wave_s1d, s1d_spec, label = 's1d spectrum')
229
+ # plt.legend()
230
+ # plt.show()
231
+
232
+ # plt.figure()
233
+ # plt.plot(wavelengths, fluxes, label = 's1d')
234
+ # plt.legend()
235
+
236
+ # diff_arr = wavelengths[1:] - wavelengths[:-1]
237
+ # # print(diff_arr)
238
+ # wavelengths = wavelengths[:-1]
239
+ # fluxes = fluxes[:-1]
240
+
241
+ # fluxes = fluxes/diff_arr
242
+
243
+ # fluxes = np.ones(fluxes.shape)
244
+ # for i in range(len(fluxes)):
245
+ # fluxes[i] = fluxes[i]*8000
246
+ # plt.figure()
247
+ # plt.plot(wavelengths, fluxes, label = 's1d in photons AA-1')
248
+ # #plt.legend()
249
+
250
+ # interpolate s1d onto e2ds wavlengths - non flux conserving
251
+ # s1dd_spec = Spectrum1D(spectral_axis = wavelengths*u.AA, flux = fluxes*u.Unit('photon AA-1'))
252
+ # fluxcon = FluxConservingResampler()
253
+ # extended_e2ds_wave = np.concatenate((wave[order], [wave[order][-1]+0.01]))
254
+
255
+ ## MM-LSD way
256
+
257
+ # fluxes = spec[order]
258
+ # flux_error_order = flux_error[order]
259
+ # wavelengths = wave[order]
260
+
261
+ # for i in range(len(fluxes)):
262
+ # if i ==0:
263
+ # # print(fluxes[i])
264
+ # fluxes[i] = fluxes[i]*(0.01/(wavelengths[1]-wavelengths[0]))
265
+ # # print(0.01/(2.99792458e5*(wavelengths[1]-wavelengths[0])))
266
+ # # print(fluxes[i])
267
+ # else:
268
+ # # print(fluxes[i])
269
+ # fluxes[i] = fluxes[i]*(0.01/(wavelengths[i]-wavelengths[i-1]))
270
+ # # print(0.01/(2.99792458e5*(wavelengths[1]-wavelengths[0])))
271
+ # # print(fluxes[i])
272
+
273
+ # ## end of MM-LSD way
274
+
275
+ # new_spec = fluxcon(s1dd_spec, wavelengths*u.AA)
276
+
277
+ # reference_wave = new_spec.spectral_axis/u.AA
278
+ # reference_flux = new_spec.flux/u.Unit('photon AA-1')
279
+
280
+ # # print(len(wavelengths_new))
281
+ # # print(len(wave[order]))
282
+
283
+ # # # plt.plot(wavelengths_new, fluxes, label = 'interpolated s1d in photons AA-1')
284
+ # # # plt.legend()
285
+
286
+ # #diff_arr = wavelengths_new[1:] - wavelengths_new[:-1]
287
+ # #reference_wave = wavelengths_new[:-1]
288
+ # #reference_flux = fluxes[:-1]*diff_arr
289
+
290
+ # # # print(len(reference_wave))
291
+ # # # print(len(wave[order]))
292
+
293
+ # # # print(reference_wave-wave[order])
294
+
295
+ # # # plt.figure()
296
+ # # # plt.plot(reference_wave, reference_flux, label = 'interpolated s1d in photons per bin')
297
+ # # # plt.legend()
298
+ # # # plt.show()
299
+ # # ## divide e2ds spectrum by interpolated s1d and fit polynomial to result
300
+
301
+ # reference_wave = np.array(reference_wave, dtype = float)
302
+ # reference_flux = np.array(reference_flux, dtype = float)
303
+ # div_frame = fluxes/reference_flux
304
+
305
+ # # plt.figure()
306
+ # # plt.plot(reference_wave, div_frame)
307
+ # # plt.show()
308
+
309
+ # # # ### creating windows to fit polynomial to
310
+ # # # binned = np.zeros(int(len(div_frame)/2))
311
+ # # # binned_waves = np.zeros(int(len(div_frame)/2))
312
+ # # # for i in range(0, len(div_frame)-1, 2):
313
+ # # # pos = int(i/2)
314
+ # # # binned[pos] = (div_frame[i]+div_frame[i+1])/2
315
+ # # # binned_waves[pos] = (reference_wave[i]+reference_wave[i+1])/2
316
+
317
+ # # # plt.plot(frame_wavelengths[n], frames_unadjusted[n], color = 'b', label = 'unadjusted')
318
+ # # # plt.figure()
319
+ # # # plt.plot(frame_wavelengths[n], frames[n])
320
+ # # # plt.show()
321
+
322
+ # ### fitting polynomial to div_frame
323
+ # coeffs=np.polyfit(reference_wave, div_frame, 3)
324
+ # poly = np.poly1d(coeffs)
325
+ # # print(coeffs)
326
+ # inputs = coeffs[::-1]
327
+ # # print(inputs)
328
+
329
+ # wavelengths = reference_wave
330
+
331
+ # fit = poly(wavelengths)
332
+
333
+ # # # plt.figure()
334
+ # # # plt.plot(reference_wave, reference_flux, label= 'reference')
335
+ # # # plt.plot(wave[order], spec[order], label = 'e2ds')
336
+ # # # plt.legend()
337
+
338
+ # # # plt.figure()
339
+ # # # plt.plot(reference_wave-wavelengths[:-1], label = 'reference_wave-wavelengths')
340
+ # # # plt.legend()
341
+ # # # plt.show()
342
+
343
+ # # plt.figure()
344
+ # # plt.scatter(reference_wave, div_frame, label = 'div flux')
345
+ # # plt.plot(wavelengths, fit, label = 'fit')
346
+ # # # plt.plot(reference_wave, poly(reference_wave), label = 'poly(reference_wave)')
347
+ # # plt.legend()
348
+ # # plt.show()
349
+
350
+ # fluxes = spec[order]/fit
351
+ # flux_error_order = flux_error[order]/fit
352
+
353
+ # # plt.figure()
354
+ # # plt.plot(wavelengths, fluxes/reference_flux, label = 'continuum adjusted e2ds/s1d')
355
+ # # plt.legend()
356
+ # # plt.show()
357
+
358
+ # # plt.figure()
359
+ # # plt.plot(wavelengths, fluxes-reference_flux, label = 'continuum adjusted e2ds-s1d')
360
+ # # plt.legend()
361
+ # # plt.show()
362
+ # # plt.figure()
363
+ # # plt.plot(wavelengths, spec[order], label = 'before')
364
+ # # plt.plot(wavelengths, fit)
365
+ # # plt.plot(wavelengths, fluxes, label= 'after')
366
+ # # plt.legend()
367
+ # # plt.show()
368
+
369
+ # # idx_full = np.logical_and(wave_s1d>np.min(wave[28]), wave_s1d<np.max(wave[30]))
370
+ # # plt.plot(wave_s1d[idx_full], s1d_spec[idx_full])
371
+ # # plt.show()
372
+
373
+ # blaze.close()
374
+ # # plt.figure('after blaze correction - e2ds vs s1d - after berv correction')
375
+ # # plt.plot(wave[order], spec[order], label = 'e2ds')
376
+ # # plt.plot(wave_s1d[wave_s1d>np.max(wave[order])], spec_s1d[wave_s1d>np.max(wave[order])], label = 's1d')
377
+ # # plt.show()
378
+
379
+
380
+
381
+ # # # test - e2ds interpolated onto s1d wavelength grid ##
382
+ # # hdu_s1d=fits.open('%s'%file.replace('e2ds', 's1d'))
383
+ # # spec_s1d=hdu_s1d[0].data
384
+ # # header_s1d=hdu_s1d[0].header
385
+
386
+ # # wave_s1d=header_s1d['CRVAL1']+(header_s1d['CRPIX1']+np.arange(spec_s1d.shape[0]))*header_s1d['CDELT1']
387
+ # # id = np.logical_and(wave_s1d<np.max(wavelengths), wave_s1d>np.min(wavelengths))
388
+ # # print(wave_s1d*u.AA)
389
+ # # print(wavelengths*u.AA)
390
+ # # print(fluxes*u.Unit('erg cm-2 s-1 AA-1'))
391
+ # # # plt.figure('s1d compared to interpolated e2ds')
392
+ # # # plt.title('s1d compared to interpolated e2ds')
393
+ # # # plt.plot(wave_s1d, spec_s1d, label = 's1d spectrum')
394
+
395
+ # # ## these fluxes are in photons per bin - I need them in photons per Angstrom
396
+ # # ## therefore i do flux/angstroms in pixel
397
+ # # diff_arr = wavelengths[1:] - wavelengths[:-1]
398
+ # # print(diff_arr)
399
+ # # wavelengths = wavelengths[:-1]
400
+ # # fluxes = fluxes[:-1]
401
+ # # # plt.figure('changing flux units')
402
+ # # # plt.plot(wavelengths, fluxes, label = 'flux per pixel')
403
+
404
+ # # fluxes = fluxes/diff_arr
405
+
406
+ # # # plt.plot(wavelengths, fluxes, label = ' flux per A')
407
+
408
+ # # e2ds_spec = Spectrum1D(spectral_axis = wavelengths*u.AA, flux = fluxes*u.Unit('photon AA-1'))
409
+ # # fluxcon = FluxConservingResampler()
410
+ # # new_spec = fluxcon(e2ds_spec, wave_s1d[id]*u.AA)
411
+
412
+ # # wavelengths = new_spec.spectral_axis
413
+ # # fluxes = new_spec.flux
414
+
415
+ # # wavelengths = wavelengths[:4097]/u.AA
416
+ # # fluxes = fluxes[:4097]/u.Unit('photon AA-1')
417
+
418
+ # # diff_arr = wavelengths[1:] - wavelengths[:-1]
419
+ # # wavelengths = wavelengths[:-1]
420
+ # # fluxes = fluxes[:-1]*diff_arr
421
+
422
+ # # # plt.figure('s1d compared to interpolated e2ds')
423
+ # # # plt.plot(wavelengths, fluxes, label = 'interpolated e2ds onto s1d')
424
+ # # # plt.xlim(np.min(wavelengths), np.max(wavelengths))
425
+ # # # plt.legend()
426
+ # # # plt.show()
427
+
428
+ # # print(wavelengths)
429
+ # # print(fluxes)
430
+
431
+ # #end of test ##
432
+
433
+ # # # test2 - synthetic e2ds spectrum - using wavelength grid ##
434
+ # # def gauss(x1, rv, sd, height, cont):
435
+ # # y1 = height*np.exp(-(x1-rv)**2/(2*sd**2)) + cont
436
+ # # return y1
437
+
438
+ # # wavelength_grid = wavelengths
439
+ # # flux_grid = np.ones(wavelength_grid.shape)
440
+
441
+ # # linelist = '/home/lsd/Documents/fulllinelist0001.txt'
442
+
443
+ # # linelist_expected = np.genfromtxt('%s'%linelist, skip_header=4, delimiter=',', usecols=(1,9))
444
+ # # wavelengths_expected1 =np.array(linelist_expected[:,0])
445
+ # # depths_expected1 = np.array(linelist_expected[:,1])
446
+ # # # print(len(depths_expected1))
447
+
448
+ # # wavelength_min = np.min(wavelengths)
449
+ # # wavelength_max = np.max(wavelengths)
450
+
451
+ # # print(wavelength_min, wavelength_max)
452
+
453
+ # # wavelengths_expected=[]
454
+ # # depths_expected=[]
455
+ # # no_line =[]
456
+ # # for some in range(0, len(wavelengths_expected1)):
457
+ # # line_min = 0.25
458
+ # # if wavelengths_expected1[some]>=wavelength_min and wavelengths_expected1[some]<=wavelength_max and depths_expected1[some]>=line_min:
459
+ # # wavelengths_expected.append(wavelengths_expected1[some])
460
+ # # #depths_expected.append(depths_expected1[some]+random.uniform(-0.1, 0.1))
461
+ # # depths_expected.append(depths_expected1[some])
462
+ # # else:
463
+ # # pass
464
+
465
+ # # count_range = np.array([len(wavelengths_expected)]*10)*np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
466
+ # # count_range = np.array(count_range, dtype = int)
467
+ # # print(count_range)
468
+ # # vgrid = np.linspace(-21,18,48)
469
+ # # try: ccf = fits.open(file.replace('e2ds', 'ccf_K5'))
470
+ # # except: ccf = fits.open(file.replace('e2ds', 'ccf_G2'))
471
+ # # rv = ccf[0].header['ESO DRS CCF RV']
472
+
473
+
474
+ # # for line in count_range:
475
+ # # mid_wave = wavelengths_expected[line]
476
+ # # wgrid = 2.99792458e5*mid_wave/(2.99792458e5-vgrid)
477
+ # # id = np.logical_and(wavelength_grid<np.max(wgrid), wavelength_grid>np.min(wgrid))
478
+ # # prof_wavelength_grid = wavelength_grid[id]
479
+ # # prof_v_grid = ((prof_wavelength_grid - mid_wave)*2.99792458e5)/prof_wavelength_grid
480
+ # # prof = gauss(prof_v_grid, rv, 2.47, -depths_expected[line], 1.)
481
+ # # # id = tuple([prof_v_grid<-0.99])
482
+ # # # plt.figure()
483
+ # # # plt.plot(prof_wavelength_grid, prof)
484
+ # # # plt.show()
485
+ # # flux_grid[id] = prof
486
+
487
+ # # # plt.figure()
488
+ # # # plt.plot(wavelength_grid, flux_grid)
489
+ # # # plt.show()
490
+
491
+ # # coeffs=np.polyfit(wavelengths, fluxes/fluxes[0], 3)
492
+ # # poly = np.poly1d(coeffs*fluxes[0])
493
+ # # fit = poly(wavelengths)
494
+
495
+ # # wavelengths = wavelength_grid
496
+ # # fluxes = flux_grid * fit
497
+
498
+ # # plt.figure()
499
+ # plt.plot(wavelengths, fluxes)
500
+ # plt.show()
501
+
502
+ # find overlapping regions
503
+ # last_wavelengths = wave[order-1]
504
+ # next_wavelengths = wave[order+1]
505
+ # last_spec = spec[order-1]
506
+ # next_spec = spec[order+1]
507
+ # last_error = flux_error[order-1]
508
+ # next_error = flux_error[order+1]
509
+ # min_overlap = np.min(wavelengths)
510
+ # max_overlap = np.max(wavelengths)
511
+
512
+
513
+ # idx_ = tuple([wavelengths>min_overlap])
514
+ # last_idx = np.logical_and(last_wavelengths>min_overlap, last_wavelengths<max_overlap)
515
+ # next_idx = np.logical_and(next_wavelengths>min_overlap, next_wavelengths<max_overlap)
516
+
517
+ # overlap = np.array(([list(last_wavelengths[last_idx]), list(last_spec[last_idx]), list(last_error[last_idx])], [list(next_wavelengths[next_idx]), list(next_spec[next_idx]), list(next_error[next_idx])]))
518
+
519
+ # # overlap[0, 0] = list(last_wavelengths[last_idx])
520
+ # # overlap[0, 1] = list(last_spec[last_idx])
521
+ # # overlap[1, 0] = list(next_wavelengths[next_idx])
522
+ # # overlap[1, 1] = list(next_spec[next_idx])
523
+
524
+ # print(overlap)
525
+ # plt.figure()
526
+ # plt.plot(wavelengths, fluxes)
527
+ # plt.plot(wavelengths[idx_overlap], fluxes[idx_overlap])
528
+ # plt.show()
529
+ # wavelengths = wavelengths[idx]
530
+ # fluxes = fluxes[idx]
531
+ # flux_error_order = flux_error_order[idx]
532
+
533
+ # Telluric correction:
534
+ ## telluric correction
535
+ # tapas = fits.open('/Users/lucydolan/Starbase/tapas_000001.fits')
536
+ # tapas_wvl = (tapas[1].data["wavelength"]) * 10.0
537
+ # tapas_trans = tapas[1].data["transmittance"]
538
+ # tapas.close()
539
+ # brv=header['ESO DRS BERV']
540
+ # tapas_wvl = tapas_wvl[::-1]/(1.+brv/2.99792458e5)
541
+ # tapas_trans = tapas_trans[::-1]
542
+
543
+ # background = upper_envelope(tapas_wvl, tapas_trans)
544
+ # f = interp1d(tapas_wvl, tapas_trans / background, bounds_error=False)
545
+
546
+ # plt.figure('telluric spec and real spec')
547
+ # plt.plot(wavelengths, continuumfit(wavelengths, fluxes, 3))
548
+ # plt.plot(wavelengths, f(wavelengths))
549
+ # plt.show()
550
+
551
+ # plt.figure()
552
+ # plt.plot(tapas_wvl, tapas_trans)
553
+ # plt.show()
554
+ # print('overlap accounted for')
555
+
556
+ # Overlap stuff?
557
+ # print(len(wavelengths))
558
+ # print(np.max(wavelengths), np.min(wavelengths))
559
+ # print(min_overlap)
560
+ # print(max_overlap)
561
+ # idx_overlap = np.logical_and(wavelengths>=min_overlap, wavelengths<=max_overlap)
562
+ # idx_overlap = tuple([idx_overlap==False])
563
+ # overlap = []
564
+
565
+ # plt.figure()
566
+ # plt.plot(wavelengths, fluxes, label = 'Flux')
567
+ # # plt.plot(wavelengths[idx_overlap], fluxes[idx_overlap])
568
+ # plt.show()
569
+
570
+ # plt.plot(wavelengths, fluxes, label = 's1d')
571
+ # plt.legend()
572
+ # plt.show()