arvi 0.2.4__tar.gz → 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. {arvi-0.2.4 → arvi-0.3.0}/.github/workflows/docs-gh-pages.yml +2 -2
  2. {arvi-0.2.4 → arvi-0.3.0}/.github/workflows/install.yml +2 -2
  3. {arvi-0.2.4 → arvi-0.3.0}/.github/workflows/python-publish.yml +3 -3
  4. {arvi-0.2.4/arvi.egg-info → arvi-0.3.0}/PKG-INFO +1 -1
  5. {arvi-0.2.4 → arvi-0.3.0}/arvi/__init__.py +1 -0
  6. {arvi-0.2.4 → arvi-0.3.0}/arvi/dace_wrapper.py +180 -102
  7. {arvi-0.2.4 → arvi-0.3.0}/arvi/gaia_wrapper.py +14 -4
  8. {arvi-0.2.4 → arvi-0.3.0}/arvi/instrument_specific.py +93 -54
  9. arvi-0.3.0/arvi/kepmodel_wrapper.py +296 -0
  10. arvi-0.3.0/arvi/kima_wrapper.py +130 -0
  11. {arvi-0.2.4 → arvi-0.3.0}/arvi/nasaexo_wrapper.py +7 -3
  12. {arvi-0.2.4 → arvi-0.3.0}/arvi/plots.py +1 -3
  13. {arvi-0.2.4 → arvi-0.3.0}/arvi/programs.py +8 -4
  14. {arvi-0.2.4 → arvi-0.3.0}/arvi/reports.py +108 -1
  15. {arvi-0.2.4 → arvi-0.3.0}/arvi/simbad_wrapper.py +53 -3
  16. arvi-0.3.0/arvi/sophie_wrapper.py +111 -0
  17. {arvi-0.2.4 → arvi-0.3.0}/arvi/stats.py +30 -5
  18. {arvi-0.2.4 → arvi-0.3.0}/arvi/timeseries.py +648 -218
  19. {arvi-0.2.4 → arvi-0.3.0}/arvi/utils.py +137 -10
  20. {arvi-0.2.4 → arvi-0.3.0/arvi.egg-info}/PKG-INFO +1 -1
  21. {arvi-0.2.4 → arvi-0.3.0}/arvi.egg-info/SOURCES.txt +2 -0
  22. arvi-0.3.0/tests/test_create_RV.py +43 -0
  23. arvi-0.2.4/arvi/kima_wrapper.py +0 -84
  24. arvi-0.2.4/tests/test_create_RV.py +0 -21
  25. {arvi-0.2.4 → arvi-0.3.0}/.github/dependabot.yml +0 -0
  26. {arvi-0.2.4 → arvi-0.3.0}/.gitignore +0 -0
  27. {arvi-0.2.4 → arvi-0.3.0}/LICENSE +0 -0
  28. {arvi-0.2.4 → arvi-0.3.0}/README.md +0 -0
  29. {arvi-0.2.4 → arvi-0.3.0}/arvi/HZ.py +0 -0
  30. {arvi-0.2.4 → arvi-0.3.0}/arvi/ariadne_wrapper.py +0 -0
  31. {arvi-0.2.4 → arvi-0.3.0}/arvi/berv.py +0 -0
  32. {arvi-0.2.4 → arvi-0.3.0}/arvi/binning.py +0 -0
  33. {arvi-0.2.4 → arvi-0.3.0}/arvi/config.py +0 -0
  34. {arvi-0.2.4 → arvi-0.3.0}/arvi/data/extra/HD86226_PFS1.rdb +0 -0
  35. {arvi-0.2.4 → arvi-0.3.0}/arvi/data/extra/HD86226_PFS2.rdb +0 -0
  36. {arvi-0.2.4 → arvi-0.3.0}/arvi/data/extra/metadata.json +0 -0
  37. {arvi-0.2.4 → arvi-0.3.0}/arvi/data/info.svg +0 -0
  38. {arvi-0.2.4 → arvi-0.3.0}/arvi/data/obs_affected_ADC_issues.dat +0 -0
  39. {arvi-0.2.4 → arvi-0.3.0}/arvi/data/obs_affected_blue_cryostat_issues.dat +0 -0
  40. {arvi-0.2.4 → arvi-0.3.0}/arvi/exofop_wrapper.py +0 -0
  41. {arvi-0.2.4 → arvi-0.3.0}/arvi/extra_data.py +0 -0
  42. {arvi-0.2.4 → arvi-0.3.0}/arvi/headers.py +0 -0
  43. {arvi-0.2.4 → arvi-0.3.0}/arvi/lbl_wrapper.py +0 -0
  44. {arvi-0.2.4 → arvi-0.3.0}/arvi/setup_logger.py +0 -0
  45. {arvi-0.2.4 → arvi-0.3.0}/arvi/spectra.py +0 -0
  46. {arvi-0.2.4 → arvi-0.3.0}/arvi/stellar.py +0 -0
  47. {arvi-0.2.4 → arvi-0.3.0}/arvi/translations.py +0 -0
  48. {arvi-0.2.4 → arvi-0.3.0}/arvi.egg-info/dependency_links.txt +0 -0
  49. {arvi-0.2.4 → arvi-0.3.0}/arvi.egg-info/requires.txt +0 -0
  50. {arvi-0.2.4 → arvi-0.3.0}/arvi.egg-info/top_level.txt +0 -0
  51. {arvi-0.2.4 → arvi-0.3.0}/docs/API.md +0 -0
  52. {arvi-0.2.4 → arvi-0.3.0}/docs/detailed.ipynb +0 -0
  53. {arvi-0.2.4 → arvi-0.3.0}/docs/downloading_data.md +0 -0
  54. {arvi-0.2.4 → arvi-0.3.0}/docs/index.md +0 -0
  55. {arvi-0.2.4 → arvi-0.3.0}/docs/logo/detective.png +0 -0
  56. {arvi-0.2.4 → arvi-0.3.0}/docs/logo/logo.png +0 -0
  57. {arvi-0.2.4 → arvi-0.3.0}/docs/stylesheets/extra.css +0 -0
  58. {arvi-0.2.4 → arvi-0.3.0}/mkdocs.yml +0 -0
  59. {arvi-0.2.4 → arvi-0.3.0}/pyproject.toml +0 -0
  60. {arvi-0.2.4 → arvi-0.3.0}/setup.cfg +0 -0
  61. {arvi-0.2.4 → arvi-0.3.0}/setup.py +0 -0
  62. {arvi-0.2.4 → arvi-0.3.0}/tests/HD10700-Bcor_ESPRESSO18.rdb +0 -0
  63. {arvi-0.2.4 → arvi-0.3.0}/tests/test_binning.py +0 -0
  64. {arvi-0.2.4 → arvi-0.3.0}/tests/test_config.py +0 -0
  65. {arvi-0.2.4 → arvi-0.3.0}/tests/test_import_object.py +0 -0
  66. {arvi-0.2.4 → arvi-0.3.0}/tests/test_simbad.py +0 -0
  67. {arvi-0.2.4 → arvi-0.3.0}/tests/test_stats.py +0 -0
@@ -26,13 +26,13 @@ jobs:
26
26
  runs-on: ubuntu-latest
27
27
  steps:
28
28
  - name: Checkout
29
- uses: actions/checkout@v4
29
+ uses: actions/checkout@v6
30
30
 
31
31
  - name: Setup Pages
32
32
  uses: actions/configure-pages@v5
33
33
 
34
34
  - name: Set up Python
35
- uses: actions/setup-python@v5
35
+ uses: actions/setup-python@v6
36
36
  with:
37
37
  python-version: "3.10"
38
38
 
@@ -16,9 +16,9 @@ jobs:
16
16
  python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
17
17
 
18
18
  steps:
19
- - uses: actions/checkout@v4
19
+ - uses: actions/checkout@v6
20
20
  - name: Set up Python ${{ matrix.python-version }}
21
- uses: actions/setup-python@v5
21
+ uses: actions/setup-python@v6
22
22
  with:
23
23
  python-version: ${{ matrix.python-version }}
24
24
 
@@ -22,9 +22,9 @@ jobs:
22
22
  runs-on: ubuntu-latest
23
23
 
24
24
  steps:
25
- - uses: actions/checkout@v4
25
+ - uses: actions/checkout@v6
26
26
  - name: Set up Python
27
- uses: actions/setup-python@v5
27
+ uses: actions/setup-python@v6
28
28
  with:
29
29
  python-version: '3.x'
30
30
  - name: Install dependencies
@@ -34,7 +34,7 @@ jobs:
34
34
  - name: Build package
35
35
  run: python -m build
36
36
  - name: Publish package
37
- uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
37
+ uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e
38
38
  with:
39
39
  user: __token__
40
40
  password: ${{ secrets.PYPI_API_TOKEN }}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: arvi
3
- Version: 0.2.4
3
+ Version: 0.3.0
4
4
  Summary: The Automated RV Inspector
5
5
  Author-email: João Faria <joao.faria@unige.ch>
6
6
  License: MIT
@@ -20,6 +20,7 @@ def __getattr__(name: str):
20
20
  if name in (
21
21
  '_ipython_canary_method_should_not_exist_',
22
22
  '_ipython_display_',
23
+ '__custom_documentations__',
23
24
  '_repr_mimebundle_',
24
25
  '__wrapped__'
25
26
  ):
@@ -2,15 +2,15 @@ import os
2
2
  import sys
3
3
  import tarfile
4
4
  import collections
5
- from functools import lru_cache
5
+ from functools import lru_cache, partial
6
6
  from itertools import islice
7
7
  import numpy as np
8
8
 
9
9
  from .setup_logger import setup_logger
10
- from .utils import create_directory, all_logging_disabled, stdout_disabled, tqdm
10
+ from .utils import create_directory, all_logging_disabled, stdout_disabled, timer, tqdm
11
11
 
12
12
 
13
- def load_spectroscopy(user=None):
13
+ def load_spectroscopy(user=None, verbose=True):
14
14
  logger = setup_logger()
15
15
  with all_logging_disabled():
16
16
  from dace_query.spectroscopy import SpectroscopyClass, Spectroscopy as default_Spectroscopy
@@ -19,7 +19,8 @@ def load_spectroscopy(user=None):
19
19
  from .config import config
20
20
  # requesting as public
21
21
  if config.request_as_public:
22
- logger.warning('requesting DACE data as public')
22
+ if verbose:
23
+ logger.warning('requesting DACE data as public')
23
24
  with all_logging_disabled():
24
25
  dace = DaceClass(dace_rc_config_path='none')
25
26
  return SpectroscopyClass(dace_instance=dace)
@@ -86,11 +87,16 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
86
87
  i = [i for i, pipe in enumerate(pipelines) if ESPRESSO_mode in pipe][0]
87
88
  pipelines = [pipelines[i]]
88
89
 
90
+ # select NIRPS mode
91
+ if 'NIRPS' in inst:
92
+ if any(this_mode := [p for p in pipelines if NIRPS_mode in p]):
93
+ pipelines = this_mode
94
+
89
95
  if latest_pipeline:
90
96
  npipe = len(pipelines)
91
97
  if 'NIRPS' in inst and any(['LBL' in p for p in pipelines]):
92
98
  # TODO: correctly load both CCF and LBL
93
- pipelines = [pipelines[1]]
99
+ pipelines = [pipelines[0]]
94
100
  if 'HARPS' in inst and npipe > 1 and pipelines[1] == pipelines[0] + '-EGGS':
95
101
  pipelines = pipelines[:2]
96
102
  else:
@@ -163,20 +169,20 @@ def get_observations_from_instrument(star, instrument, user=None, main_id=None,
163
169
  dict:
164
170
  dictionary with data from DACE
165
171
  """
166
- Spectroscopy = load_spectroscopy(user)
167
-
172
+ Spectroscopy = load_spectroscopy(user, verbose)
168
173
  found_dace_id = False
169
- try:
170
- dace_id = get_dace_id(star, verbose=verbose, raise_error=True)
171
- found_dace_id = True
172
- except ValueError as e:
173
- if main_id is not None:
174
- try:
175
- dace_id = get_dace_id(main_id, verbose=verbose, raise_error=True)
176
- found_dace_id = True
177
- except ValueError:
178
- pass
179
-
174
+ with timer('dace_id query'):
175
+ try:
176
+ dace_id = get_dace_id(star, verbose=verbose, raise_error=True)
177
+ found_dace_id = True
178
+ except ValueError as e:
179
+ if main_id is not None:
180
+ try:
181
+ dace_id = get_dace_id(main_id, verbose=verbose, raise_error=True)
182
+ found_dace_id = True
183
+ except ValueError:
184
+ pass
185
+
180
186
  if not found_dace_id:
181
187
  try:
182
188
  with all_logging_disabled():
@@ -188,10 +194,16 @@ def get_observations_from_instrument(star, instrument, user=None, main_id=None,
188
194
  msg = f'no {instrument} observations for {star}'
189
195
  raise ValueError(msg) from None
190
196
 
191
- filters = {
192
- "ins_name": {"contains": [instrument]},
193
- "obj_id_daceid": {"contains": [dace_id]}
194
- }
197
+ if (isinstance(instrument, str)):
198
+ filters = {
199
+ "ins_name": {"contains": [instrument]},
200
+ "obj_id_daceid": {"contains": [dace_id]}
201
+ }
202
+ elif (isinstance(instrument, (list, tuple, np.ndarray))):
203
+ filters = {
204
+ "ins_name": {"contains": instrument},
205
+ "obj_id_daceid": {"contains": [dace_id]}
206
+ }
195
207
  with all_logging_disabled():
196
208
  result = Spectroscopy.query_database(filters=filters)
197
209
 
@@ -202,63 +214,96 @@ def get_observations_from_instrument(star, instrument, user=None, main_id=None,
202
214
 
203
215
  for inst in np.unique(result['ins_name']):
204
216
  mask1 = result['ins_name'] == inst
205
- r[inst] = {}
217
+ r[str(inst)] = {}
206
218
 
207
- for pipe in np.unique(result['ins_drs_version'][mask1]):
208
- mask2 = mask1 & (result['ins_drs_version'] == pipe)
209
- r[inst][pipe] = {}
219
+ key2 = 'ins_drs_version'
220
+ n_key2 = len(np.unique(result[key2][mask1]))
221
+ if len(np.unique(result['pub_bibcode'][mask1])) >= n_key2:
222
+ key2 = 'pub_bibcode'
223
+
224
+ for pipe in np.unique(result[key2][mask1]):
225
+ mask2 = mask1 & (result[key2] == pipe)
226
+ r[str(inst)][str(pipe)] = {}
210
227
 
211
228
  for ins_mode in np.unique(result['ins_mode'][mask2]):
212
229
  mask3 = mask2 & (result['ins_mode'] == ins_mode)
213
230
  _nan = np.full(mask3.sum(), np.nan)
214
231
 
215
- r[inst][pipe][ins_mode] = {
216
- 'texp': result['texp'][mask3],
217
- 'bispan': result['spectro_ccf_bispan'][mask3],
218
- 'bispan_err': result['spectro_ccf_bispan_err'][mask3],
219
- 'drift_noise': result['spectro_cal_drift_noise'][mask3],
220
- 'rjd': result['obj_date_bjd'][mask3],
221
- 'cal_therror': _nan,
222
- 'fwhm': result['spectro_ccf_fwhm'][mask3],
223
- 'fwhm_err': result['spectro_ccf_fwhm_err'][mask3],
224
- 'rv': result['spectro_ccf_rv'][mask3],
225
- 'rv_err': result['spectro_ccf_rv_err'][mask3],
226
- 'berv': result['spectro_cal_berv'][mask3],
227
- 'ccf_noise': _nan,
228
- 'rhk': result['spectro_analysis_rhk'][mask3],
229
- 'rhk_err': result['spectro_analysis_rhk_err'][mask3],
230
- 'contrast': result['spectro_ccf_contrast'][mask3],
231
- 'contrast_err': result['spectro_ccf_contrast_err'][mask3],
232
- 'cal_thfile': result['spectro_cal_thfile'][mask3],
233
- 'spectroFluxSn50': result['spectro_flux_sn50'][mask3],
234
- 'protm08': result['spectro_analysis_protm08'][mask3],
235
- 'protm08_err': result['spectro_analysis_protm08_err'][mask3],
236
- 'caindex': result['spectro_analysis_ca'][mask3],
237
- 'caindex_err': result['spectro_analysis_ca_err'][mask3],
238
- 'pub_reference': result['pub_ref'][mask3],
239
- 'drs_qc': result['spectro_drs_qc'][mask3],
240
- 'haindex': result['spectro_analysis_halpha'][mask3],
241
- 'haindex_err': result['spectro_analysis_halpha_err'][mask3],
242
- 'protn84': result['spectro_analysis_protn84'][mask3],
243
- 'protn84_err': result['spectro_analysis_protn84_err'][mask3],
244
- 'naindex': result['spectro_analysis_na'][mask3],
245
- 'naindex_err': result['spectro_analysis_na_err'][mask3],
246
- 'snca2': _nan,
247
- 'mask': result['spectro_ccf_mask'][mask3],
248
- 'public': result['public'][mask3],
249
- 'spectroFluxSn20': result['spectro_flux_sn20'][mask3],
250
- 'sindex': result['spectro_analysis_smw'][mask3],
251
- 'sindex_err': result['spectro_analysis_smw_err'][mask3],
252
- 'drift_used': _nan,
253
- 'ccf_asym': result['spectro_ccf_asym'][mask3],
254
- 'ccf_asym_err': result['spectro_ccf_asym_err'][mask3],
255
- 'date_night': result['date_night'][mask3],
256
- 'raw_file': result['file_rootpath'][mask3],
257
- 'prog_id': result['prog_id'][mask3],
258
- 'th_ar': result['th_ar'][mask3],
259
- 'th_ar1': result['th_ar1'][mask3],
260
- 'th_ar2': result['th_ar2'][mask3],
232
+ translations = {
233
+ 'obj_date_bjd': 'rjd',
234
+ 'spectro_drs_qc': 'drs_qc',
235
+ 'spectro_cal_berv_mx': 'bervmax',
236
+ 'pub_ref': 'pub_reference',
237
+ 'file_rootpath': 'raw_file',
238
+ 'spectro_ccf_asym': 'ccf_asym',
239
+ 'spectro_ccf_asym_err': 'ccf_asym_err',
261
240
  }
241
+ new_result = {}
242
+ for key in result.keys():
243
+ if key in translations:
244
+ new_key = translations[key]
245
+ else:
246
+ new_key = key
247
+ new_key = new_key.replace('spectro_ccf_', '')
248
+ new_key = new_key.replace('spectro_cal_', '')
249
+ new_key = new_key.replace('spectro_analysis_', '')
250
+ new_result[new_key] = result[key][mask3]
251
+
252
+ new_result['ccf_noise'] = np.sqrt(
253
+ np.square(result['spectro_ccf_rv_err'][mask3]) - np.square(result['spectro_cal_drift_noise'][mask3])
254
+ )
255
+
256
+ r[str(inst)][str(pipe)][str(ins_mode)] = new_result
257
+
258
+ # r[str(inst)][str(pipe)][str(ins_mode)] = {
259
+ # 'texp': result['texp'][mask3],
260
+ # 'bispan': result['spectro_ccf_bispan'][mask3],
261
+ # 'bispan_err': result['spectro_ccf_bispan_err'][mask3],
262
+ # 'drift_noise': result['spectro_cal_drift_noise'][mask3],
263
+ # 'rjd': result['obj_date_bjd'][mask3],
264
+ # 'cal_therror': _nan,
265
+ # 'fwhm': result['spectro_ccf_fwhm'][mask3],
266
+ # 'fwhm_err': result['spectro_ccf_fwhm_err'][mask3],
267
+ # 'rv': result['spectro_ccf_rv'][mask3],
268
+ # 'rv_err': result['spectro_ccf_rv_err'][mask3],
269
+ # 'berv': result['spectro_cal_berv'][mask3],
270
+ # 'ccf_noise': np.sqrt(
271
+ # np.square(result['spectro_ccf_rv_err'][mask3]) - np.square(result['spectro_cal_drift_noise'][mask3])
272
+ # ),
273
+ # 'rhk': result['spectro_analysis_rhk'][mask3],
274
+ # 'rhk_err': result['spectro_analysis_rhk_err'][mask3],
275
+ # 'contrast': result['spectro_ccf_contrast'][mask3],
276
+ # 'contrast_err': result['spectro_ccf_contrast_err'][mask3],
277
+ # 'cal_thfile': result['spectro_cal_thfile'][mask3],
278
+ # 'spectroFluxSn50': result['spectro_flux_sn50'][mask3],
279
+ # 'protm08': result['spectro_analysis_protm08'][mask3],
280
+ # 'protm08_err': result['spectro_analysis_protm08_err'][mask3],
281
+ # 'caindex': result['spectro_analysis_ca'][mask3],
282
+ # 'caindex_err': result['spectro_analysis_ca_err'][mask3],
283
+ # 'pub_reference': result['pub_ref'][mask3],
284
+ # 'drs_qc': result['spectro_drs_qc'][mask3],
285
+ # 'haindex': result['spectro_analysis_halpha'][mask3],
286
+ # 'haindex_err': result['spectro_analysis_halpha_err'][mask3],
287
+ # 'protn84': result['spectro_analysis_protn84'][mask3],
288
+ # 'protn84_err': result['spectro_analysis_protn84_err'][mask3],
289
+ # 'naindex': result['spectro_analysis_na'][mask3],
290
+ # 'naindex_err': result['spectro_analysis_na_err'][mask3],
291
+ # 'snca2': _nan,
292
+ # 'mask': result['spectro_ccf_mask'][mask3],
293
+ # 'public': result['public'][mask3],
294
+ # 'spectroFluxSn20': result['spectro_flux_sn20'][mask3],
295
+ # 'sindex': result['spectro_analysis_smw'][mask3],
296
+ # 'sindex_err': result['spectro_analysis_smw_err'][mask3],
297
+ # 'drift_used': _nan,
298
+ # 'ccf_asym': result['spectro_ccf_asym'][mask3],
299
+ # 'ccf_asym_err': result['spectro_ccf_asym_err'][mask3],
300
+ # 'date_night': result['date_night'][mask3],
301
+ # 'raw_file': result['file_rootpath'][mask3],
302
+ # 'prog_id': result['prog_id'][mask3],
303
+ # 'th_ar': result['th_ar'][mask3],
304
+ # 'th_ar1': result['th_ar1'][mask3],
305
+ # 'th_ar2': result['th_ar2'][mask3],
306
+ # }
262
307
 
263
308
  # print(r.keys())
264
309
  # print([r[k].keys() for k in r.keys()])
@@ -268,7 +313,7 @@ def get_observations_from_instrument(star, instrument, user=None, main_id=None,
268
313
  def get_observations(star, instrument=None, user=None, main_id=None, verbose=True):
269
314
  logger = setup_logger()
270
315
  if instrument is None:
271
- Spectroscopy = load_spectroscopy(user)
316
+ Spectroscopy = load_spectroscopy(user, verbose)
272
317
 
273
318
  try:
274
319
  with stdout_disabled(), all_logging_disabled():
@@ -302,12 +347,14 @@ def get_observations(star, instrument=None, user=None, main_id=None, verbose=Tru
302
347
  result[inst] = dict(result[inst])
303
348
  #
304
349
 
305
- instruments = list(result.keys())
350
+ instruments = list(map(str, result.keys()))
306
351
 
307
352
  if instrument is not None:
308
353
  # select only the provided instrument (if it's there)
309
- instruments = [inst for inst in instruments if instrument in inst]
310
-
354
+ if (isinstance(instrument, str)):
355
+ instruments = [inst for inst in instruments if instrument in inst]
356
+ elif (isinstance(instrument, list)):
357
+ instruments = [inst for inst in instruments if any(i in inst for i in instrument)]
311
358
  if len(instruments) == 0:
312
359
  if instrument is None:
313
360
  msg = f'no observations for {star}'
@@ -331,30 +378,57 @@ def get_observations(star, instrument=None, user=None, main_id=None, verbose=Tru
331
378
  # else:
332
379
  # return -1
333
380
 
334
- # sort pipelines, must be extra careful with HARPS/HARPN pipeline version numbers
335
- # got here with the help of DeepSeek
336
- from re import match
337
- def custom_sort_key(s):
338
- s = s[0]
339
- # Check for version number pattern (e.g., 3.2.5 or 3.2.5-EGGS)
340
- version_match = match(r'^(\d+(?:\.\d+)*)(?:[-\s](.*))?$', s)
341
- if version_match:
342
- version_parts = list(map(int, version_match.group(1).split('.')))
343
- if len(version_parts) == 2:
344
- version_parts.insert(1, -1)
345
- return (0, 1, version_parts)
346
- # Check for scientific reference pattern (e.g., 2004A&A...)
347
- year_match = match(r'^(\d{4})', s)
348
- if year_match:
349
- year = int(year_match.group(1))
350
- return (1, year)
351
- # For all other strings, sort alphabetically
352
- return (2, s)
353
-
354
- # from functools import cmp_to_key
381
+ # # sort pipelines, must be extra careful with HARPS/HARPN pipeline version numbers
382
+ # # got here with the help of DeepSeek
383
+ # # from functools import cmp_to_key
384
+ # from re import match
385
+ # def custom_sort_key(s):
386
+ # s = s[0]
387
+ # # Check for version number pattern (e.g., 3.2.5 or 3.2.5-EGGS)
388
+ # version_match = match(r'^(\d+(?:\.\d+)*)(?:[-\s](.*))?$', s)
389
+ # if version_match:
390
+ # version_parts = list(map(int, version_match.group(1).split('.')))
391
+ # if len(version_parts) == 2:
392
+ # version_parts.insert(1, -1)
393
+ # # if version_match.group(2) and 'LBL' in version_match.group(2):
394
+ # # version_parts.append(-1)
395
+ # # else:
396
+ # # version_parts.append(0)
397
+ # if version_match.group(2) is None:
398
+ # version_parts.append('')
399
+ # else:
400
+ # version_parts.append(version_match.group(2))
401
+ # return (0, 1, version_parts)
402
+ # # Check for scientific reference pattern (e.g., 2004A&A...)
403
+ # year_match = match(r'^(\d{4})', s)
404
+ # if year_match:
405
+ # year = int(year_match.group(1))
406
+ # return (1, year)
407
+ # # For all other strings, sort alphabetically
408
+ # return (2, s)
409
+
410
+ def custom_key(val, strip_EGGS=False):
411
+ if strip_EGGS:
412
+ val = val.replace('-EGGS', '').replace(' EGGS', '')
413
+ key = 0
414
+ key -= 1 if '3.5' in val else 0
415
+ key -= 1 if 'EGGS' in val else 0
416
+ key -= 1 if ('UHR' in val or 'MR' in val) else 0
417
+ key -= 1 if 'LBL' in val else 0
418
+ return str(key) if key != 0 else val
419
+
355
420
  new_result = {}
356
421
  for inst in instruments:
357
- new_result[inst] = dict(sorted(result[inst].items(), key=custom_sort_key, reverse=True))
422
+ # new_result[inst] = dict(
423
+ # sorted(result[inst].items(), key=custom_sort_key, reverse=True)
424
+ # )
425
+ if all(['EGGS' in k for k in result[inst].keys()]):
426
+ custom_key = partial(custom_key, strip_EGGS=True)
427
+ # WARNING: not the same as reverse=True (not sure why)
428
+ sorted_keys = sorted(result[inst].keys(), key=custom_key)[::-1]
429
+ new_result[inst] = {}
430
+ for key in sorted_keys:
431
+ new_result[inst][key] = result[inst][key]
358
432
 
359
433
  if verbose:
360
434
  logger.info('RVs available from')
@@ -388,7 +462,7 @@ def check_existing(output_directory, files, type):
388
462
  ]
389
463
 
390
464
  if type == 'S2D':
391
- existing = [
465
+ existing += [
392
466
  f.partition('.fits')[0] for f in os.listdir(output_directory)
393
467
  if 'e2ds' in f
394
468
  ]
@@ -500,20 +574,21 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False, user=
500
574
  """ Download CCFs / S1Ds / S2Ds from DACE """
501
575
  logger = setup_logger()
502
576
  raw_files = np.atleast_1d(raw_files)
577
+ raw_files_original = raw_files.copy()
503
578
 
504
579
  create_directory(output_directory)
505
580
 
506
581
  # check existing files to avoid re-downloading
507
582
  if not clobber:
508
583
  raw_files = check_existing(output_directory, raw_files, type)
509
-
584
+
510
585
  n = raw_files.size
511
586
 
512
587
  # any file left to download?
513
588
  if n == 0:
514
589
  if verbose:
515
590
  logger.info('no files to download')
516
- return
591
+ return list(map(os.path.basename, raw_files_original))
517
592
 
518
593
  # avoid an empty chunk
519
594
  if chunk_size > n:
@@ -531,7 +606,9 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False, user=
531
606
 
532
607
  if n < parallel_limit:
533
608
  iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
534
- for files in tqdm(iterator, total=len(iterator)):
609
+ if len(iterator) > 1:
610
+ iterator = tqdm(iterator, total=len(iterator))
611
+ for files in iterator:
535
612
  download(files, type, output_directory, quiet=False, user=user)
536
613
  extract_fits(output_directory)
537
614
 
@@ -560,6 +637,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False, user=
560
637
 
561
638
  sys.stdout.flush()
562
639
  logger.info('extracted .fits files')
640
+ return list(map(os.path.basename, raw_files_original))
563
641
 
564
642
 
565
643
  # def do_download_s1d(raw_files, output_directory, clobber=False, verbose=True):
@@ -3,15 +3,14 @@ from io import StringIO
3
3
  from csv import DictReader
4
4
  import requests
5
5
 
6
- from astropy.coordinates import SkyCoord
7
-
8
6
  DATA_PATH = os.path.dirname(__file__)
9
7
  DATA_PATH = os.path.join(DATA_PATH, 'data')
10
8
 
11
9
  QUERY = """
12
10
  SELECT TOP 20 gaia_source.designation, gaia_source.source_id,
13
11
  gaia_source.ra, gaia_source.dec,
14
- gaia_source.parallax, gaia_source.pmra, gaia_source.pmdec,
12
+ gaia_source.parallax, gaia_source.parallax_error,
13
+ gaia_source.pmra, gaia_source.pmdec,
15
14
  gaia_source.ruwe, gaia_source.phot_g_mean_mag, gaia_source.bp_rp,
16
15
  gaia_source.radial_velocity, gaia_source.radial_velocity_error
17
16
  FROM gaiadr3.gaia_source
@@ -29,7 +28,8 @@ CONTAINS(
29
28
  QUERY_ID = """
30
29
  SELECT TOP 20 gaia_source.designation, gaia_source.source_id,
31
30
  gaia_source.ra, gaia_source.dec,
32
- gaia_source.parallax, gaia_source.pmra, gaia_source.pmdec,
31
+ gaia_source.parallax, gaia_source.parallax_error,
32
+ gaia_source.pmra, gaia_source.pmdec,
33
33
  gaia_source.ruwe, gaia_source.phot_g_mean_mag, gaia_source.bp_rp,
34
34
  gaia_source.radial_velocity, gaia_source.radial_velocity_error
35
35
  FROM gaiadr3.gaia_source
@@ -78,6 +78,8 @@ class gaia:
78
78
  Args:
79
79
  star (str): The name of the star to query simbad
80
80
  """
81
+ from astropy.coordinates import SkyCoord
82
+
81
83
  self.star = star
82
84
 
83
85
  if simbad is None:
@@ -120,6 +122,7 @@ class gaia:
120
122
  self.pmdec = float(results['pmdec'])
121
123
  self.coords = SkyCoord(self.ra, self.dec, unit='deg')
122
124
  self.plx = float(results['parallax'])
125
+ self.plx_err = float(results['parallax_error'])
123
126
  try:
124
127
  self.radial_velocity = float(results['radial_velocity'])
125
128
  except ValueError:
@@ -131,5 +134,12 @@ class gaia:
131
134
 
132
135
  return
133
136
 
137
+ def distance(self):
138
+ """ Calculate the distance to the star as 1 / parallax [pc] """
139
+ from astropy import units as u
140
+ d = (self.plx * u.mas).to(u.parsec,
141
+ equivalencies=u.equivalencies.parallax())
142
+ return d
143
+
134
144
  def __repr__(self):
135
145
  return f'{self.star} (DR3 id={self.dr3_id})'