arvi 0.1.22__tar.gz → 0.1.24__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. {arvi-0.1.22 → arvi-0.1.24}/.github/workflows/install.yml +1 -1
  2. {arvi-0.1.22 → arvi-0.1.24}/.gitignore +1 -1
  3. {arvi-0.1.22 → arvi-0.1.24}/PKG-INFO +1 -1
  4. {arvi-0.1.22 → arvi-0.1.24}/arvi/dace_wrapper.py +125 -45
  5. {arvi-0.1.22 → arvi-0.1.24}/arvi/kima_wrapper.py +18 -8
  6. arvi-0.1.24/arvi/programs.py +247 -0
  7. {arvi-0.1.22 → arvi-0.1.24}/arvi/spectra.py +6 -6
  8. {arvi-0.1.22 → arvi-0.1.24}/arvi/timeseries.py +136 -52
  9. {arvi-0.1.22 → arvi-0.1.24}/arvi/translations.py +5 -0
  10. {arvi-0.1.22 → arvi-0.1.24}/arvi/utils.py +1 -0
  11. {arvi-0.1.22 → arvi-0.1.24}/arvi.egg-info/PKG-INFO +1 -1
  12. arvi-0.1.22/arvi/programs.py +0 -159
  13. {arvi-0.1.22 → arvi-0.1.24}/.github/workflows/docs-gh-pages.yml +0 -0
  14. {arvi-0.1.22 → arvi-0.1.24}/.github/workflows/python-publish.yml +0 -0
  15. {arvi-0.1.22 → arvi-0.1.24}/LICENSE +0 -0
  16. {arvi-0.1.22 → arvi-0.1.24}/README.md +0 -0
  17. {arvi-0.1.22 → arvi-0.1.24}/arvi/HZ.py +0 -0
  18. {arvi-0.1.22 → arvi-0.1.24}/arvi/__init__.py +0 -0
  19. {arvi-0.1.22 → arvi-0.1.24}/arvi/ariadne_wrapper.py +0 -0
  20. {arvi-0.1.22 → arvi-0.1.24}/arvi/berv.py +0 -0
  21. {arvi-0.1.22 → arvi-0.1.24}/arvi/binning.py +0 -0
  22. {arvi-0.1.22 → arvi-0.1.24}/arvi/config.py +0 -0
  23. {arvi-0.1.22 → arvi-0.1.24}/arvi/data/extra/HD86226_PFS1.rdb +0 -0
  24. {arvi-0.1.22 → arvi-0.1.24}/arvi/data/extra/HD86226_PFS2.rdb +0 -0
  25. {arvi-0.1.22 → arvi-0.1.24}/arvi/data/extra/metadata.json +0 -0
  26. {arvi-0.1.22 → arvi-0.1.24}/arvi/data/info.svg +0 -0
  27. {arvi-0.1.22 → arvi-0.1.24}/arvi/data/obs_affected_ADC_issues.dat +0 -0
  28. {arvi-0.1.22 → arvi-0.1.24}/arvi/data/obs_affected_blue_cryostat_issues.dat +0 -0
  29. {arvi-0.1.22 → arvi-0.1.24}/arvi/extra_data.py +0 -0
  30. {arvi-0.1.22 → arvi-0.1.24}/arvi/gaia_wrapper.py +0 -0
  31. {arvi-0.1.22 → arvi-0.1.24}/arvi/headers.py +0 -0
  32. {arvi-0.1.22 → arvi-0.1.24}/arvi/instrument_specific.py +0 -0
  33. {arvi-0.1.22 → arvi-0.1.24}/arvi/lbl_wrapper.py +0 -0
  34. {arvi-0.1.22 → arvi-0.1.24}/arvi/nasaexo_wrapper.py +0 -0
  35. {arvi-0.1.22 → arvi-0.1.24}/arvi/plots.py +0 -0
  36. {arvi-0.1.22 → arvi-0.1.24}/arvi/reports.py +0 -0
  37. {arvi-0.1.22 → arvi-0.1.24}/arvi/setup_logger.py +0 -0
  38. {arvi-0.1.22 → arvi-0.1.24}/arvi/simbad_wrapper.py +0 -0
  39. {arvi-0.1.22 → arvi-0.1.24}/arvi/stats.py +0 -0
  40. {arvi-0.1.22 → arvi-0.1.24}/arvi/stellar.py +0 -0
  41. {arvi-0.1.22 → arvi-0.1.24}/arvi.egg-info/SOURCES.txt +0 -0
  42. {arvi-0.1.22 → arvi-0.1.24}/arvi.egg-info/dependency_links.txt +0 -0
  43. {arvi-0.1.22 → arvi-0.1.24}/arvi.egg-info/requires.txt +0 -0
  44. {arvi-0.1.22 → arvi-0.1.24}/arvi.egg-info/top_level.txt +0 -0
  45. {arvi-0.1.22 → arvi-0.1.24}/docs/API.md +0 -0
  46. {arvi-0.1.22 → arvi-0.1.24}/docs/detailed.md +0 -0
  47. {arvi-0.1.22 → arvi-0.1.24}/docs/index.md +0 -0
  48. {arvi-0.1.22 → arvi-0.1.24}/docs/logo/detective.png +0 -0
  49. {arvi-0.1.22 → arvi-0.1.24}/docs/logo/logo.png +0 -0
  50. {arvi-0.1.22 → arvi-0.1.24}/mkdocs.yml +0 -0
  51. {arvi-0.1.22 → arvi-0.1.24}/pyproject.toml +0 -0
  52. {arvi-0.1.22 → arvi-0.1.24}/setup.cfg +0 -0
  53. {arvi-0.1.22 → arvi-0.1.24}/setup.py +0 -0
  54. {arvi-0.1.22 → arvi-0.1.24}/tests/test_binning.py +0 -0
  55. {arvi-0.1.22 → arvi-0.1.24}/tests/test_import_object.py +0 -0
  56. {arvi-0.1.22 → arvi-0.1.24}/tests/test_simbad.py +0 -0
  57. {arvi-0.1.22 → arvi-0.1.24}/tests/test_stats.py +0 -0
@@ -16,7 +16,7 @@ jobs:
16
16
  strategy:
17
17
  fail-fast: false
18
18
  matrix:
19
- python-version: ["3.8", "3.9", "3.10", "3.11"]
19
+ python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
20
20
 
21
21
  steps:
22
22
  - uses: actions/checkout@v4
@@ -2,7 +2,7 @@
2
2
  *.pkl
3
3
  *.rdb
4
4
  arvi/data/KOBE-translate.csv
5
-
5
+ *_downloads
6
6
 
7
7
 
8
8
  # Byte-compiled / optimized / DLL files
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: arvi
3
- Version: 0.1.22
3
+ Version: 0.1.24
4
4
  Summary: The Automated RV Inspector
5
5
  Author-email: João Faria <joao.faria@unige.ch>
6
6
  License: MIT
@@ -1,7 +1,9 @@
1
1
  import os
2
+ import sys
2
3
  import tarfile
3
4
  import collections
4
5
  from functools import lru_cache
6
+ from itertools import islice
5
7
  import numpy as np
6
8
  from dace_query import DaceClass
7
9
  from dace_query.spectroscopy import SpectroscopyClass, Spectroscopy as default_Spectroscopy
@@ -22,22 +24,23 @@ def load_spectroscopy() -> SpectroscopyClass:
22
24
  return default_Spectroscopy
23
25
 
24
26
  @lru_cache()
25
- def get_dace_id(star):
27
+ def get_dace_id(star, verbose=True):
26
28
  filters = {"obj_id_catname": {"equal": [star]}}
27
29
  try:
28
- with stdout_disabled(), all_logging_disabled():
30
+ with all_logging_disabled():
29
31
  r = load_spectroscopy().query_database(filters=filters, limit=1)
30
32
  return r['obj_id_daceid'][0]
31
33
  except KeyError:
32
- logger.error(f"Could not find DACE ID for {star}")
34
+ if verbose:
35
+ logger.error(f"Could not find DACE ID for {star}")
33
36
  raise ValueError from None
34
37
 
35
38
  def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='HE', verbose=True):
36
39
  arrays = []
37
- instruments = list(result.keys())
40
+ instruments = [str(i) for i in result.keys()]
38
41
 
39
42
  for inst in instruments:
40
- pipelines = list(result[inst].keys())
43
+ pipelines = [str(p) for p in result[inst].keys()]
41
44
 
42
45
  # select ESPRESSO mode, which is defined at the level of the pipeline
43
46
  if 'ESPRESSO' in inst:
@@ -55,20 +58,27 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
55
58
  pipelines = [pipelines[i]]
56
59
 
57
60
  if latest_pipeline:
58
- if verbose and len(pipelines) > 1:
61
+ npipe = len(pipelines)
62
+ if 'NIRPS' in inst and any(['LBL' in p for p in pipelines]):
63
+ # TODO: correctly load both CCF and LBL
64
+ pipelines = [pipelines[1]]
65
+ if 'HARPS' in inst and npipe > 1 and pipelines[1] == pipelines[0] + '-EGGS':
66
+ pipelines = pipelines[:2]
67
+ else:
68
+ pipelines = [pipelines[0]]
69
+
70
+ if verbose and npipe > 1:
59
71
  logger.info(f'selecting latest pipeline ({pipelines[0]}) for {inst}')
60
72
 
61
- pipelines = [pipelines[0]]
62
73
 
63
74
  for pipe in pipelines:
64
- modes = list(result[inst][pipe].keys())
75
+ modes = [m for m in result[inst][pipe].keys()]
65
76
 
66
-
67
77
  # select NIRPS mode, which is defined at the level of the mode
68
- if 'NIRPS' in inst:
78
+ if 'NIRPS' in inst and len(modes) > 1:
69
79
  if NIRPS_mode in modes:
70
80
  if verbose:
71
- logger.info(f'selecting mode {NIRPS_mode} for NIRPS')
81
+ logger.info(f'selecting mode {NIRPS_mode} for NIRPS - {pipe}')
72
82
  i = modes.index(NIRPS_mode)
73
83
  modes = [modes[i]]
74
84
  else:
@@ -77,15 +87,25 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
77
87
 
78
88
  # HARPS15 observations should not be separated by 'mode' if some are
79
89
  # done together with NIRPS
80
- if 'HARPS15' in inst and 'HARPS+NIRPS' in modes:
81
- m0 = modes[0]
82
- data = {
83
- k: np.concatenate([result[inst][pipe][m][k] for m in modes])
84
- for k in result[inst][pipe][m0].keys()
85
- }
86
- arrays.append(
87
- ((inst, pipe, m0), data)
88
- )
90
+ if 'HARPS15' in inst:
91
+ if 'HARPS+NIRPS' in modes:
92
+ m0 = modes[0]
93
+ data = {
94
+ k: np.concatenate([result[inst][pipe][m][k] for m in modes])
95
+ for k in result[inst][pipe][m0].keys()
96
+ }
97
+ arrays.append(
98
+ ((str(inst), str(pipe), str(m0)), data)
99
+ )
100
+ if 'EGGS+NIRPS' in modes:
101
+ m0 = modes[0]
102
+ data = {
103
+ k: np.concatenate([result[inst][pipe][m][k] for m in modes])
104
+ for k in result[inst][pipe][m0].keys()
105
+ }
106
+ arrays.append(
107
+ ((str(inst + '_EGGS'), str(pipe), str(m0)), data)
108
+ )
89
109
  continue
90
110
 
91
111
  for mode in modes:
@@ -94,39 +114,61 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
94
114
  raise ValueError
95
115
 
96
116
  arrays.append(
97
- ((inst, pipe, mode), result[inst][pipe][mode])
117
+ ((str(inst), str(pipe), str(mode)), result[inst][pipe][mode])
98
118
  )
99
119
 
100
120
  return arrays
101
121
 
102
- def get_observations_from_instrument(star, instrument, main_id=None):
122
+ def get_observations_from_instrument(star, instrument, main_id=None, verbose=True):
103
123
  """ Query DACE for all observations of a given star and instrument
104
124
 
105
125
  Args:
106
- star (str): name of the star
107
- instrument (str): instrument name
108
- main_id (str, optional): Simbad main id of target to query DACE id. Defaults to None.
126
+ star (str):
127
+ name of the star
128
+ instrument (str):
129
+ instrument name
130
+ main_id (str, optional):
131
+ Simbad main id of target to query DACE id. Defaults to None.
132
+ verbose (bool, optional):
133
+ whether to print warnings. Defaults to True.
109
134
 
110
135
  Raises:
111
- ValueError: If query for DACE id fails
136
+ ValueError:
137
+ If query for DACE id fails
112
138
 
113
139
  Returns:
114
- dict: dictionary with data from DACE
140
+ dict:
141
+ dictionary with data from DACE
115
142
  """
143
+ Spectroscopy = load_spectroscopy()
144
+ found_dace_id = False
116
145
  try:
117
- dace_id = get_dace_id(star)
146
+ dace_id = get_dace_id(star, verbose=verbose)
147
+ found_dace_id = True
118
148
  except ValueError as e:
119
149
  if main_id is not None:
120
- dace_id = get_dace_id(main_id)
121
- else:
122
- raise e
150
+ try:
151
+ dace_id = get_dace_id(main_id, verbose=verbose)
152
+ found_dace_id = True
153
+ except ValueError:
154
+ pass
155
+
156
+ if not found_dace_id:
157
+ try:
158
+ with all_logging_disabled():
159
+ result = Spectroscopy.get_timeseries(target=star,
160
+ sorted_by_instrument=True,
161
+ output_format='numpy')
162
+ return result
163
+ except TypeError:
164
+ msg = f'no {instrument} observations for {star}'
165
+ raise ValueError(msg) from None
123
166
 
124
- Spectroscopy = load_spectroscopy()
125
167
  filters = {
126
168
  "ins_name": {"contains": [instrument]},
127
169
  "obj_id_daceid": {"contains": [dace_id]}
128
170
  }
129
- with stdout_disabled(), all_logging_disabled():
171
+ with all_logging_disabled():
130
172
  result = Spectroscopy.query_database(filters=filters)
131
173
 
132
174
  if len(result) == 0:
@@ -215,7 +257,7 @@ def get_observations(star, instrument=None, main_id=None, verbose=True):
215
257
  raise ValueError(msg) from None
216
258
  else:
217
259
  try:
218
- result = get_observations_from_instrument(star, instrument, main_id)
260
+ result = get_observations_from_instrument(star, instrument, main_id, verbose)
219
261
  except ValueError:
220
262
  msg = f'no {instrument} observations for {star}'
221
263
  raise ValueError(msg) from None
@@ -320,16 +362,29 @@ def check_existing(output_directory, files, type):
320
362
 
321
363
  return np.array(missing)
322
364
 
323
- def download(files, type, output_directory):
365
+ def download(files, type, output_directory, output_filename=None, quiet=True, pbar=None):
324
366
  """ Download files from DACE """
325
367
  Spectroscopy = load_spectroscopy()
326
- with stdout_disabled(), all_logging_disabled():
368
+ if isinstance(files, str):
369
+ files = [files]
370
+ if quiet:
371
+ with all_logging_disabled():
372
+ Spectroscopy.download_files(files, file_type=type.lower(),
373
+ output_directory=output_directory,
374
+ output_filename=output_filename)
375
+ else:
327
376
  Spectroscopy.download_files(files, file_type=type.lower(),
328
- output_directory=output_directory)
377
+ output_directory=output_directory,
378
+ output_filename=output_filename)
379
+ if pbar is not None:
380
+ pbar.update()
329
381
 
330
- def extract_fits(output_directory):
382
+
383
+ def extract_fits(output_directory, filename=None):
331
384
  """ Extract fits files from tar.gz file """
332
- file = os.path.join(output_directory, 'spectroscopy_download.tar.gz')
385
+ if filename is None:
386
+ filename = 'spectroscopy_download.tar.gz'
387
+ file = os.path.join(output_directory, filename)
333
388
  with tarfile.open(file, "r") as tar:
334
389
  files = []
335
390
  for member in tar.getmembers():
@@ -387,7 +442,7 @@ def do_symlink_filetype(type, raw_files, output_directory, clobber=False, top_le
387
442
 
388
443
 
389
444
  def do_download_filetype(type, raw_files, output_directory, clobber=False,
390
- verbose=True, chunk_size=20):
445
+ verbose=True, chunk_size=20, parallel_limit=30):
391
446
  """ Download CCFs / S1Ds / S2Ds from DACE """
392
447
  raw_files = np.atleast_1d(raw_files)
393
448
 
@@ -407,7 +462,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
407
462
 
408
463
  # avoid an empty chunk
409
464
  if chunk_size > n:
410
- chunk_size = n
465
+ chunk_size = n
411
466
 
412
467
  if verbose:
413
468
  if chunk_size < n:
@@ -419,11 +474,36 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
419
474
  msg = f"downloading {n} {type}s into '{output_directory}'..."
420
475
  logger.info(msg)
421
476
 
422
- iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
423
- for files in tqdm(iterator, total=len(iterator)):
424
- download(files, type, output_directory)
425
- extract_fits(output_directory)
477
+ if n < parallel_limit:
478
+ iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
479
+ for files in tqdm(iterator, total=len(iterator)):
480
+ download(files, type, output_directory, quiet=False)
481
+ extract_fits(output_directory)
426
482
 
483
+ else:
484
+ def chunker(it, size):
485
+ iterator = iter(it)
486
+ while chunk := list(islice(iterator, size)):
487
+ yield chunk
488
+
489
+ chunks = list(chunker(raw_files, chunk_size))
490
+ pbar = tqdm(total=len(chunks))
491
+ it1 = [
492
+ (files, type, output_directory, f'spectroscopy_download{i+1}.tar.gz', True, pbar)
493
+ for i, files in enumerate(chunks)
494
+ ]
495
+ it2 = [(output_directory, f'spectroscopy_download{i+1}.tar.gz') for i in range(len(chunks))]
496
+
497
+ # import multiprocessing as mp
498
+ # with mp.Pool(4) as pool:
499
+ from multiprocessing.pool import ThreadPool
500
+
501
+ with ThreadPool(4) as pool:
502
+ pool.starmap(download, it1)
503
+ pool.starmap(extract_fits, it2)
504
+ print('')
505
+
506
+ sys.stdout.flush()
427
507
  logger.info('extracted .fits files')
428
508
 
429
509
 
@@ -25,10 +25,11 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
25
25
  if not kima_available:
26
26
  raise ImportError('kima not available, please install with `pip install kima`')
27
27
 
28
- time = [getattr(self, inst).mtime for inst in self.instruments]
29
- vrad = [getattr(self, inst).mvrad for inst in self.instruments]
30
- err = [getattr(self, inst).msvrad for inst in self.instruments]
31
- data = RVData(time, vrad, err, instruments=self.instruments)
28
+ instruments = [inst for inst in self.instruments if self.NN[inst] > 1]
29
+ time = [getattr(self, inst).mtime for inst in instruments]
30
+ vrad = [getattr(self, inst).mvrad for inst in instruments]
31
+ err = [getattr(self, inst).msvrad for inst in instruments]
32
+ data = RVData(time, vrad, err, instruments=instruments)
32
33
 
33
34
  fix = kwargs.pop('fix', False)
34
35
  npmax = kwargs.pop('npmax', 1)
@@ -41,6 +42,13 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
41
42
  model.enforce_stability = kwargs.pop('enforce_stability', False)
42
43
  model.star_mass = kwargs.pop('star_mass', 1.0)
43
44
 
45
+ if kwargs.pop('gaussian_priors_individual_offsets', False):
46
+ from kima.pykima.utils import get_gaussian_priors_individual_offsets
47
+ model.individual_offset_prior = get_gaussian_priors_individual_offsets(data, use_std=True)
48
+
49
+ if kwargs.pop('kuma', False):
50
+ model.conditional.eprior = distributions.Kumaraswamy(0.867, 3.03)
51
+
44
52
  for k, v in priors.items():
45
53
  try:
46
54
  if 'conditional' in k:
@@ -55,9 +63,10 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
55
63
  logger.warning(msg)
56
64
  return
57
65
 
66
+ if run_directory is None:
67
+ run_directory = os.getcwd()
68
+
58
69
  if run:
59
- if run_directory is None:
60
- run_directory = os.getcwd()
61
70
 
62
71
  # TODO: use signature of kima.run to pop the correct kwargs
63
72
  # model_name = model.__class__.__name__
@@ -67,8 +76,9 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
67
76
  with chdir(run_directory):
68
77
  kima.run(model, **kwargs)
69
78
 
70
- if load:
79
+ if load:
80
+ with chdir(run_directory):
71
81
  res = kima.load_results(model)
72
- return data, model, res
82
+ return data, model, res
73
83
 
74
84
  return data, model
@@ -0,0 +1,247 @@
1
+ import os
2
+ import multiprocessing
3
+ from functools import partial, lru_cache
4
+ from itertools import chain
5
+ from collections import namedtuple
6
+ from multiprocessing.pool import ThreadPool
7
+ from tqdm import tqdm
8
+ # import numpy as np
9
+
10
+ from .setup_logger import logger
11
+ from .timeseries import RV
12
+
13
+ __all__ = ['ESPRESSO_GTO']
14
+
15
+ path = os.path.join(os.path.dirname(__file__), 'data')
16
+
17
+
18
+ def get_star(star, instrument=None, verbose=False, **kwargs):
19
+ return RV(star, instrument=instrument,
20
+ _raise_on_error=False, verbose=verbose, **kwargs)
21
+
22
+
23
+ class LazyRV:
24
+ def __init__(self, stars: list, instrument: str = None,
25
+ _parallel_limit=10):
26
+ self.stars = stars
27
+ if isinstance(self.stars, str):
28
+ self.stars = [self.stars]
29
+ self.instrument = instrument
30
+ self._saved = None
31
+ self._parallel_limit = _parallel_limit
32
+
33
+ @property
34
+ def N(self):
35
+ return len(self.stars)
36
+
37
+ def __repr__(self):
38
+ return f"RV({self.N} stars)"
39
+
40
+ def _get(self, **kwargs):
41
+ if self.N > self._parallel_limit:
42
+ # logger.info('Querying DACE...')
43
+ _get_star = partial(get_star, instrument=self.instrument, **kwargs)
44
+ with ThreadPool(8) as pool:
45
+ result = list(tqdm(pool.imap(_get_star, self.stars),
46
+ total=self.N, unit='star',
47
+ desc='Querying DACE (can take a while)'))
48
+ print('')
49
+ else:
50
+ result = []
51
+ logger.info('querying DACE...')
52
+ pbar = tqdm(self.stars, total=self.N, unit='star')
53
+ for star in pbar:
54
+ pbar.set_description(star)
55
+ result.append(get_star(star, self.instrument, **kwargs))
56
+
57
+ return result
58
+
59
+ # # use a with statement to ensure threads are cleaned up promptly
60
+ # with concurrent.futures.ThreadPoolExecutor(max_workers=8) as pool:
61
+ # star_to_RV = {
62
+ # pool.submit(get_star, star, self.instrument): star
63
+ # for star in self.stars
64
+ # }
65
+ # logger.info('Querying DACE...')
66
+ # pbar = tqdm(concurrent.futures.as_completed(star_to_RV),
67
+ # total=self.N, unit='star')
68
+ # for future in pbar:
69
+ # star = star_to_RV[future]
70
+ # pbar.set_description(star)
71
+ # try:
72
+ # result.append(future.result())
73
+ # except ValueError:
74
+ # print(f'{star} generated an exception')
75
+ # result.append(None)
76
+ # return result
77
+
78
+ def reload(self, **kwargs):
79
+ self._saved = self._get(**kwargs)
80
+ return self._saved
81
+
82
+ def __iter__(self):
83
+ return self._get()
84
+
85
+ def __call__(self, **kwargs):
86
+ if not self._saved:
87
+ self._saved = self._get(**kwargs)
88
+ return self._saved
89
+
90
+ @lru_cache(maxsize=10)
91
+ def __getitem__(self, index):
92
+ star = self.stars[index]
93
+ if self._saved is not None:
94
+ return self._saved[index]
95
+ return get_star(star, self.instrument, verbose=True)
96
+
97
+
98
+ # sorted by spectral type
99
+ WG1_stars = [
100
+ "HIP11533", # F2
101
+ "HD63077", # F9
102
+ "HD102365", # G2
103
+ "HD160691", # G3
104
+ "HD20794", # G6
105
+ "HD115617", # G6
106
+ "HD10700", # G8
107
+ "HD69830", # G8
108
+ "HD26965", # K0
109
+ "HD100623", # K0
110
+ "HD154088", # K0
111
+ "HD72673", # K1
112
+ "HD4628", # K2
113
+ "HD191408", # K2
114
+ "HD192310", # K2
115
+ "HD16160", # K3
116
+ "HD32147", # K3
117
+ "HD22496", # K5
118
+ "HIP93069", # K5
119
+ "HD209100", # K5
120
+ "HIP23708", # K7
121
+ "HD152606", # K8
122
+ "HD260655", # M0
123
+ "HIP40239", # M0
124
+ "HD304636", # M0
125
+ "HIP85647", # M0
126
+ "HD165222", # M0
127
+ "HD191849", # M0
128
+ "GJ191", # M1
129
+ "HD42581", # M1
130
+ "HIP42748", # M1
131
+ "HIP65859", # M1
132
+ "HIP86287", # M1
133
+ "HD176029", # M1
134
+ "GJ825", # M1
135
+ "HD225213", # M2
136
+ "GJ54", # M2
137
+ "HIP22627", # M2
138
+ "HIP51317", # M2
139
+ "HD119850", # M2
140
+ "GJ832", # M2
141
+ "HD217987", # M2
142
+ "GJ273", # M3
143
+ "GJ388", # M3
144
+ "HIP62452", # M3
145
+ "HIP67164", # M3
146
+ "HIP71253", # M3
147
+ "GJ628", # M3
148
+ "HIP85523", # M3
149
+ "HIP86214", # M3
150
+ "HIP92403", # M3
151
+ "HIP113020", # M3
152
+ "HIP1242", # M4
153
+ "GJ83.1", # M4
154
+ "HIP53020", # M4
155
+ "Ross128", # M4
156
+ "GJ699", # M4
157
+ "GJ1002", # M5
158
+ "GJ1061", # M5
159
+ "GJ3618", # M5
160
+ "Proxima", # M5
161
+ "GJ406", # M6
162
+ ]
163
+
164
+ ESPRESSO_GTO_nt = namedtuple('ESPRESSO_GTO', ['WG1', 'WG2', 'WG3'])
165
+ ESPRESSO_GTO = ESPRESSO_GTO_nt(
166
+ WG1=LazyRV(WG1_stars, instrument='ESPRESSO'),
167
+ WG2=LazyRV([], instrument='ESPRESSO'), # TODO
168
+ WG3=LazyRV([], instrument='ESPRESSO'), # TODO
169
+ )
170
+ ESPRESSO_GTO.WG1.__doc__ = 'RV observations for all WG1 targets. Call ESPRESSO_GTO.WG1() to load them.'
171
+ ESPRESSO_GTO.WG2.__doc__ = 'RV observations for all WG2 targets. Call ESPRESSO_GTO.WG2() to load them.'
172
+ ESPRESSO_GTO.WG3.__doc__ = 'RV observations for all WG3 targets. Call ESPRESSO_GTO.WG3() to load them.'
173
+
174
+
175
+ import requests
176
+
177
+ def _get_NIRPS_GTO_stars(WP=1):
178
+ from io import StringIO
179
+ import numpy as np
180
+
181
+ url = 'https://www.eso.org/sci/observing/teles-alloc/gto/113/NIRPS/P113_NIRPS-consortium.csv'
182
+ file = StringIO(requests.get(url).content.decode())
183
+ stars_P113 = np.loadtxt(file, delimiter=',', usecols=(0,), dtype=str, skiprows=3)
184
+
185
+ url = 'https://www.eso.org/sci/observing/teles-alloc/gto/114/NIRPS/P114_NIRPS-consortium.csv'
186
+ file = StringIO(requests.get(url).content.decode())
187
+ stars_P114 = np.loadtxt(file, delimiter=',', usecols=(0,), dtype=str, skiprows=3)
188
+
189
+ url = 'https://www.eso.org/sci/observing/teles-alloc/gto/115/NIRPS/P115_NIRPS-consortium.csv'
190
+ file = StringIO(requests.get(url).content.decode())
191
+ stars_P115 = np.loadtxt(file, delimiter=',', usecols=(0,), dtype=str, skiprows=3)
192
+
193
+ def _get_stars_period(stars, WP):
194
+ stars = np.delete(stars, stars=='')
195
+
196
+ stars = np.char.replace(stars, '_', ' ')
197
+ stars = np.char.replace(stars, "Proxima Centauri", "Proxima")
198
+ stars = np.char.replace(stars, "Barnard's star", "GJ699")
199
+ stars = np.char.replace(stars, "Teegarden's Star", 'Teegarden')
200
+
201
+ if WP in (1, 'WP1'):
202
+ wp1_indices = slice(np.where(stars == 'WP1')[0][0] + 1, np.where(stars == 'WP2')[0][0])
203
+ return stars[wp1_indices]
204
+ elif WP == 2:
205
+ wp2_indices = slice(np.where(stars == 'WP2')[0][0] + 1, np.where(stars == 'WP3')[0][0])
206
+ return stars[wp2_indices]
207
+ elif WP == 3:
208
+ wp3_indices = slice(np.where(stars == 'WP3')[0][0] + 1, np.where(stars == 'Other Science 1')[0][0])
209
+ return stars[wp3_indices]
210
+ elif WP == 'OS1':
211
+ os1_indices = slice(np.where(stars == 'Other Science 1')[0][0] + 1, np.where(stars == 'Other Science 2')[0][0])
212
+ return stars[os1_indices]
213
+ elif WP == 'OS2':
214
+ os2_indices = slice(np.where(stars == 'Other Science 2')[0][0] + 1, None)
215
+ stars = np.char.replace(stars, 'MMU', 'No')
216
+ stars = np.char.replace(stars, 'Cl*', '')
217
+ return stars[os2_indices]
218
+
219
+ stars_P113 = _get_stars_period(stars_P113, WP)
220
+ stars_P114 = _get_stars_period(stars_P114, WP)
221
+ stars_P115 = _get_stars_period(stars_P115, WP)
222
+ return np.union1d(np.union1d(stars_P113, stars_P114), stars_P115)
223
+
224
+ try:
225
+ NIRPS_GTO_WP1_stars = _get_NIRPS_GTO_stars(WP=1)
226
+ NIRPS_GTO_WP2_stars = _get_NIRPS_GTO_stars(WP=2)
227
+ NIRPS_GTO_WP3_stars = _get_NIRPS_GTO_stars(WP=3)
228
+ NIRPS_GTO_OS1_stars = _get_NIRPS_GTO_stars(WP='OS1')
229
+ NIRPS_GTO_OS2_stars = _get_NIRPS_GTO_stars(WP='OS2')
230
+ except requests.ConnectionError:
231
+ from .setup_logger import logger
232
+ logger.error('Cannot download NIRPS GTO protected target list')
233
+ else:
234
+ NIRPS_GTO_nt = namedtuple('NIRPS_GTO', ['WP1', 'WP2', 'WP3', 'OS1', 'OS2'])
235
+ NIRPS_GTO_nt.__doc__ = 'RV observations for all NIRPS GTO targets. See NIRPS_GTO.WP1, NIRPS_GTO.WP2, ...'
236
+ NIRPS_GTO = NIRPS_GTO_nt(
237
+ WP1=LazyRV(NIRPS_GTO_WP1_stars, instrument='NIRPS'),
238
+ WP2=LazyRV(NIRPS_GTO_WP2_stars, instrument='NIRPS'),
239
+ WP3=LazyRV(NIRPS_GTO_WP3_stars, instrument='NIRPS'),
240
+ OS1=LazyRV(NIRPS_GTO_OS1_stars, instrument='NIRPS'),
241
+ OS2=LazyRV(NIRPS_GTO_OS2_stars, instrument='NIRPS'),
242
+ )
243
+ NIRPS_GTO.WP1.__doc__ = 'RV observations for all WP1 targets. Call NIRPS_GTO.WP1() to load them.'
244
+ NIRPS_GTO.WP2.__doc__ = 'RV observations for all WP2 targets. Call NIRPS_GTO.WP2() to load them.'
245
+ NIRPS_GTO.WP3.__doc__ = 'RV observations for all WP3 targets. Call NIRPS_GTO.WP3() to load them.'
246
+ NIRPS_GTO.OS1.__doc__ = 'RV observations for all OS1 targets. Call NIRPS_GTO.OS1() to load them.'
247
+ NIRPS_GTO.OS2.__doc__ = 'RV observations for all OS2 targets. Call NIRPS_GTO.OS2() to load them.'
@@ -72,7 +72,7 @@ def fit_gaussian_to_line(wave, flux, center_wavelength, around=0.15 * u.angstrom
72
72
  ]).T
73
73
 
74
74
  try:
75
- popt, pcov = curve_fit(gaussian, w, f, p0=[-f.ptp(), center_wavelength.value, 0.1, f.max()],
75
+ popt, pcov = curve_fit(gaussian, w, f, p0=[-np.ptp(f), center_wavelength.value, 0.1, f.max()],
76
76
  bounds=(lower, upper))
77
77
  except RuntimeError as e:
78
78
  logger.warning(f'fit_gaussian_to_line: {e}')
@@ -115,7 +115,7 @@ def detrend(w, f):
115
115
  def build_master(self, limit=None, plot=True):
116
116
  files = sorted(glob(f'{self.star}_downloads/*S1D_A.fits'))
117
117
  if self.verbose:
118
- logger.info(f'Found {len(files)} S1D files')
118
+ logger.info(f'found {len(files)} S1D files')
119
119
 
120
120
  files = files[:limit]
121
121
 
@@ -168,8 +168,8 @@ def determine_stellar_parameters(self, linelist: str, plot=True, **kwargs):
168
168
  ]
169
169
 
170
170
  if self.verbose:
171
- logger.info(f'Found {len(lines)} lines in linelist')
172
- logger.info('Measuring EWs...')
171
+ logger.info(f'found {len(lines)} lines in linelist')
172
+ logger.info('measuring EWs...')
173
173
 
174
174
  EW = []
175
175
  pbar = tqdm(linelist)
@@ -183,14 +183,14 @@ def determine_stellar_parameters(self, linelist: str, plot=True, **kwargs):
183
183
  EW = np.array(EW)[~np.isnan(EW)]
184
184
 
185
185
  if self.verbose:
186
- logger.info('Determining stellar parameters (can take a few minutes)...')
186
+ logger.info('determining stellar parameters (can take a few minutes)...')
187
187
 
188
188
  callback = lambda p, r, A: print('current parameters:', p)
189
189
  result = Korg.Fit.ews_to_stellar_parameters(lines, EW, callback=callback)
190
190
  par, stat_err, sys_err = result
191
191
 
192
192
  if self.verbose:
193
- logger.info(f'Best fit stellar parameters:')
193
+ logger.info(f'best-fit stellar parameters:')
194
194
  logger.info(f' Teff: {par[0]:.0f} ± {sys_err[0]:.0f} K')
195
195
  logger.info(f' logg: {par[1]:.2f} ± {sys_err[1]:.2f} dex')
196
196
  logger.info(f' m/H : {par[3]:.2f} ± {sys_err[3]:.2f} dex')