arvi 0.1.23__tar.gz → 0.1.24__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {arvi-0.1.23 → arvi-0.1.24}/.github/workflows/install.yml +1 -1
- {arvi-0.1.23 → arvi-0.1.24}/.gitignore +1 -1
- {arvi-0.1.23 → arvi-0.1.24}/PKG-INFO +1 -1
- {arvi-0.1.23 → arvi-0.1.24}/arvi/dace_wrapper.py +115 -38
- {arvi-0.1.23 → arvi-0.1.24}/arvi/kima_wrapper.py +18 -8
- arvi-0.1.24/arvi/programs.py +247 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/spectra.py +6 -6
- {arvi-0.1.23 → arvi-0.1.24}/arvi/timeseries.py +109 -32
- {arvi-0.1.23 → arvi-0.1.24}/arvi/translations.py +5 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi.egg-info/PKG-INFO +1 -1
- arvi-0.1.23/arvi/programs.py +0 -159
- {arvi-0.1.23 → arvi-0.1.24}/.github/workflows/docs-gh-pages.yml +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/.github/workflows/python-publish.yml +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/LICENSE +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/README.md +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/HZ.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/__init__.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/ariadne_wrapper.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/berv.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/binning.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/config.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/data/extra/HD86226_PFS1.rdb +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/data/extra/HD86226_PFS2.rdb +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/data/extra/metadata.json +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/data/info.svg +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/data/obs_affected_ADC_issues.dat +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/data/obs_affected_blue_cryostat_issues.dat +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/extra_data.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/gaia_wrapper.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/headers.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/instrument_specific.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/lbl_wrapper.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/nasaexo_wrapper.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/plots.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/reports.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/setup_logger.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/simbad_wrapper.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/stats.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/stellar.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi/utils.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi.egg-info/SOURCES.txt +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi.egg-info/dependency_links.txt +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi.egg-info/requires.txt +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/arvi.egg-info/top_level.txt +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/docs/API.md +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/docs/detailed.md +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/docs/index.md +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/docs/logo/detective.png +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/docs/logo/logo.png +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/mkdocs.yml +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/pyproject.toml +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/setup.cfg +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/setup.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/tests/test_binning.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/tests/test_import_object.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/tests/test_simbad.py +0 -0
- {arvi-0.1.23 → arvi-0.1.24}/tests/test_stats.py +0 -0
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
import os
|
|
2
|
+
import sys
|
|
2
3
|
import tarfile
|
|
3
4
|
import collections
|
|
4
5
|
from functools import lru_cache
|
|
6
|
+
from itertools import islice
|
|
5
7
|
import numpy as np
|
|
6
8
|
from dace_query import DaceClass
|
|
7
9
|
from dace_query.spectroscopy import SpectroscopyClass, Spectroscopy as default_Spectroscopy
|
|
@@ -22,14 +24,15 @@ def load_spectroscopy() -> SpectroscopyClass:
|
|
|
22
24
|
return default_Spectroscopy
|
|
23
25
|
|
|
24
26
|
@lru_cache()
|
|
25
|
-
def get_dace_id(star):
|
|
27
|
+
def get_dace_id(star, verbose=True):
|
|
26
28
|
filters = {"obj_id_catname": {"equal": [star]}}
|
|
27
29
|
try:
|
|
28
|
-
with
|
|
30
|
+
with all_logging_disabled():
|
|
29
31
|
r = load_spectroscopy().query_database(filters=filters, limit=1)
|
|
30
32
|
return r['obj_id_daceid'][0]
|
|
31
33
|
except KeyError:
|
|
32
|
-
|
|
34
|
+
if verbose:
|
|
35
|
+
logger.error(f"Could not find DACE ID for {star}")
|
|
33
36
|
raise ValueError from None
|
|
34
37
|
|
|
35
38
|
def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='HE', verbose=True):
|
|
@@ -55,15 +58,19 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
|
|
|
55
58
|
pipelines = [pipelines[i]]
|
|
56
59
|
|
|
57
60
|
if latest_pipeline:
|
|
61
|
+
npipe = len(pipelines)
|
|
58
62
|
if 'NIRPS' in inst and any(['LBL' in p for p in pipelines]):
|
|
59
63
|
# TODO: correctly load both CCF and LBL
|
|
60
64
|
pipelines = [pipelines[1]]
|
|
65
|
+
if 'HARPS' in inst and npipe > 1 and pipelines[1] == pipelines[0] + '-EGGS':
|
|
66
|
+
pipelines = pipelines[:2]
|
|
61
67
|
else:
|
|
62
68
|
pipelines = [pipelines[0]]
|
|
63
69
|
|
|
64
|
-
if verbose and
|
|
70
|
+
if verbose and npipe > 1:
|
|
65
71
|
logger.info(f'selecting latest pipeline ({pipelines[0]}) for {inst}')
|
|
66
72
|
|
|
73
|
+
|
|
67
74
|
for pipe in pipelines:
|
|
68
75
|
modes = [m for m in result[inst][pipe].keys()]
|
|
69
76
|
|
|
@@ -80,15 +87,25 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
|
|
|
80
87
|
|
|
81
88
|
# HARPS15 observations should not be separated by 'mode' if some are
|
|
82
89
|
# done together with NIRPS
|
|
83
|
-
if 'HARPS15' in inst
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
(
|
|
91
|
-
|
|
90
|
+
if 'HARPS15' in inst:
|
|
91
|
+
if 'HARPS+NIRPS' in modes:
|
|
92
|
+
m0 = modes[0]
|
|
93
|
+
data = {
|
|
94
|
+
k: np.concatenate([result[inst][pipe][m][k] for m in modes])
|
|
95
|
+
for k in result[inst][pipe][m0].keys()
|
|
96
|
+
}
|
|
97
|
+
arrays.append(
|
|
98
|
+
((str(inst), str(pipe), str(m0)), data)
|
|
99
|
+
)
|
|
100
|
+
if 'EGGS+NIRPS' in modes:
|
|
101
|
+
m0 = modes[0]
|
|
102
|
+
data = {
|
|
103
|
+
k: np.concatenate([result[inst][pipe][m][k] for m in modes])
|
|
104
|
+
for k in result[inst][pipe][m0].keys()
|
|
105
|
+
}
|
|
106
|
+
arrays.append(
|
|
107
|
+
((str(inst + '_EGGS'), str(pipe), str(m0)), data)
|
|
108
|
+
)
|
|
92
109
|
continue
|
|
93
110
|
|
|
94
111
|
for mode in modes:
|
|
@@ -97,39 +114,61 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
|
|
|
97
114
|
raise ValueError
|
|
98
115
|
|
|
99
116
|
arrays.append(
|
|
100
|
-
((inst, pipe, mode), result[inst][pipe][mode])
|
|
117
|
+
((str(inst), str(pipe), str(mode)), result[inst][pipe][mode])
|
|
101
118
|
)
|
|
102
119
|
|
|
103
120
|
return arrays
|
|
104
121
|
|
|
105
|
-
def get_observations_from_instrument(star, instrument, main_id=None):
|
|
122
|
+
def get_observations_from_instrument(star, instrument, main_id=None, verbose=True):
|
|
106
123
|
""" Query DACE for all observations of a given star and instrument
|
|
107
124
|
|
|
108
125
|
Args:
|
|
109
|
-
star (str):
|
|
110
|
-
|
|
111
|
-
|
|
126
|
+
star (str):
|
|
127
|
+
name of the star
|
|
128
|
+
instrument (str):
|
|
129
|
+
instrument name
|
|
130
|
+
main_id (str, optional):
|
|
131
|
+
Simbad main id of target to query DACE id. Defaults to None.
|
|
132
|
+
verbose (bool, optional):
|
|
133
|
+
whether to print warnings. Defaults to True.
|
|
112
134
|
|
|
113
135
|
Raises:
|
|
114
|
-
ValueError:
|
|
136
|
+
ValueError:
|
|
137
|
+
If query for DACE id fails
|
|
115
138
|
|
|
116
139
|
Returns:
|
|
117
|
-
dict:
|
|
140
|
+
dict:
|
|
141
|
+
dictionary with data from DACE
|
|
118
142
|
"""
|
|
143
|
+
Spectroscopy = load_spectroscopy()
|
|
144
|
+
found_dace_id = False
|
|
119
145
|
try:
|
|
120
|
-
dace_id = get_dace_id(star)
|
|
146
|
+
dace_id = get_dace_id(star, verbose=verbose)
|
|
147
|
+
found_dace_id = True
|
|
121
148
|
except ValueError as e:
|
|
122
149
|
if main_id is not None:
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
150
|
+
try:
|
|
151
|
+
dace_id = get_dace_id(main_id, verbose=verbose)
|
|
152
|
+
found_dace_id = True
|
|
153
|
+
except ValueError:
|
|
154
|
+
pass
|
|
155
|
+
|
|
156
|
+
if not found_dace_id:
|
|
157
|
+
try:
|
|
158
|
+
with all_logging_disabled():
|
|
159
|
+
result = Spectroscopy.get_timeseries(target=star,
|
|
160
|
+
sorted_by_instrument=True,
|
|
161
|
+
output_format='numpy')
|
|
162
|
+
return result
|
|
163
|
+
except TypeError:
|
|
164
|
+
msg = f'no {instrument} observations for {star}'
|
|
165
|
+
raise ValueError(msg) from None
|
|
126
166
|
|
|
127
|
-
Spectroscopy = load_spectroscopy()
|
|
128
167
|
filters = {
|
|
129
168
|
"ins_name": {"contains": [instrument]},
|
|
130
169
|
"obj_id_daceid": {"contains": [dace_id]}
|
|
131
170
|
}
|
|
132
|
-
with
|
|
171
|
+
with all_logging_disabled():
|
|
133
172
|
result = Spectroscopy.query_database(filters=filters)
|
|
134
173
|
|
|
135
174
|
if len(result) == 0:
|
|
@@ -218,7 +257,7 @@ def get_observations(star, instrument=None, main_id=None, verbose=True):
|
|
|
218
257
|
raise ValueError(msg) from None
|
|
219
258
|
else:
|
|
220
259
|
try:
|
|
221
|
-
result = get_observations_from_instrument(star, instrument, main_id)
|
|
260
|
+
result = get_observations_from_instrument(star, instrument, main_id, verbose)
|
|
222
261
|
except ValueError:
|
|
223
262
|
msg = f'no {instrument} observations for {star}'
|
|
224
263
|
raise ValueError(msg) from None
|
|
@@ -323,16 +362,29 @@ def check_existing(output_directory, files, type):
|
|
|
323
362
|
|
|
324
363
|
return np.array(missing)
|
|
325
364
|
|
|
326
|
-
def download(files, type, output_directory):
|
|
365
|
+
def download(files, type, output_directory, output_filename=None, quiet=True, pbar=None):
|
|
327
366
|
""" Download files from DACE """
|
|
328
367
|
Spectroscopy = load_spectroscopy()
|
|
329
|
-
|
|
368
|
+
if isinstance(files, str):
|
|
369
|
+
files = [files]
|
|
370
|
+
if quiet:
|
|
371
|
+
with all_logging_disabled():
|
|
372
|
+
Spectroscopy.download_files(files, file_type=type.lower(),
|
|
373
|
+
output_directory=output_directory,
|
|
374
|
+
output_filename=output_filename)
|
|
375
|
+
else:
|
|
330
376
|
Spectroscopy.download_files(files, file_type=type.lower(),
|
|
331
|
-
output_directory=output_directory
|
|
377
|
+
output_directory=output_directory,
|
|
378
|
+
output_filename=output_filename)
|
|
379
|
+
if pbar is not None:
|
|
380
|
+
pbar.update()
|
|
332
381
|
|
|
333
|
-
|
|
382
|
+
|
|
383
|
+
def extract_fits(output_directory, filename=None):
|
|
334
384
|
""" Extract fits files from tar.gz file """
|
|
335
|
-
|
|
385
|
+
if filename is None:
|
|
386
|
+
filename = 'spectroscopy_download.tar.gz'
|
|
387
|
+
file = os.path.join(output_directory, filename)
|
|
336
388
|
with tarfile.open(file, "r") as tar:
|
|
337
389
|
files = []
|
|
338
390
|
for member in tar.getmembers():
|
|
@@ -390,7 +442,7 @@ def do_symlink_filetype(type, raw_files, output_directory, clobber=False, top_le
|
|
|
390
442
|
|
|
391
443
|
|
|
392
444
|
def do_download_filetype(type, raw_files, output_directory, clobber=False,
|
|
393
|
-
verbose=True, chunk_size=20):
|
|
445
|
+
verbose=True, chunk_size=20, parallel_limit=30):
|
|
394
446
|
""" Download CCFs / S1Ds / S2Ds from DACE """
|
|
395
447
|
raw_files = np.atleast_1d(raw_files)
|
|
396
448
|
|
|
@@ -410,7 +462,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
|
|
|
410
462
|
|
|
411
463
|
# avoid an empty chunk
|
|
412
464
|
if chunk_size > n:
|
|
413
|
-
chunk_size = n
|
|
465
|
+
chunk_size = n
|
|
414
466
|
|
|
415
467
|
if verbose:
|
|
416
468
|
if chunk_size < n:
|
|
@@ -422,11 +474,36 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
|
|
|
422
474
|
msg = f"downloading {n} {type}s into '{output_directory}'..."
|
|
423
475
|
logger.info(msg)
|
|
424
476
|
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
477
|
+
if n < parallel_limit:
|
|
478
|
+
iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
|
|
479
|
+
for files in tqdm(iterator, total=len(iterator)):
|
|
480
|
+
download(files, type, output_directory, quiet=False)
|
|
481
|
+
extract_fits(output_directory)
|
|
429
482
|
|
|
483
|
+
else:
|
|
484
|
+
def chunker(it, size):
|
|
485
|
+
iterator = iter(it)
|
|
486
|
+
while chunk := list(islice(iterator, size)):
|
|
487
|
+
yield chunk
|
|
488
|
+
|
|
489
|
+
chunks = list(chunker(raw_files, chunk_size))
|
|
490
|
+
pbar = tqdm(total=len(chunks))
|
|
491
|
+
it1 = [
|
|
492
|
+
(files, type, output_directory, f'spectroscopy_download{i+1}.tar.gz', True, pbar)
|
|
493
|
+
for i, files in enumerate(chunks)
|
|
494
|
+
]
|
|
495
|
+
it2 = [(output_directory, f'spectroscopy_download{i+1}.tar.gz') for i in range(len(chunks))]
|
|
496
|
+
|
|
497
|
+
# import multiprocessing as mp
|
|
498
|
+
# with mp.Pool(4) as pool:
|
|
499
|
+
from multiprocessing.pool import ThreadPool
|
|
500
|
+
|
|
501
|
+
with ThreadPool(4) as pool:
|
|
502
|
+
pool.starmap(download, it1)
|
|
503
|
+
pool.starmap(extract_fits, it2)
|
|
504
|
+
print('')
|
|
505
|
+
|
|
506
|
+
sys.stdout.flush()
|
|
430
507
|
logger.info('extracted .fits files')
|
|
431
508
|
|
|
432
509
|
|
|
@@ -25,10 +25,11 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
|
|
|
25
25
|
if not kima_available:
|
|
26
26
|
raise ImportError('kima not available, please install with `pip install kima`')
|
|
27
27
|
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
28
|
+
instruments = [inst for inst in self.instruments if self.NN[inst] > 1]
|
|
29
|
+
time = [getattr(self, inst).mtime for inst in instruments]
|
|
30
|
+
vrad = [getattr(self, inst).mvrad for inst in instruments]
|
|
31
|
+
err = [getattr(self, inst).msvrad for inst in instruments]
|
|
32
|
+
data = RVData(time, vrad, err, instruments=instruments)
|
|
32
33
|
|
|
33
34
|
fix = kwargs.pop('fix', False)
|
|
34
35
|
npmax = kwargs.pop('npmax', 1)
|
|
@@ -41,6 +42,13 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
|
|
|
41
42
|
model.enforce_stability = kwargs.pop('enforce_stability', False)
|
|
42
43
|
model.star_mass = kwargs.pop('star_mass', 1.0)
|
|
43
44
|
|
|
45
|
+
if kwargs.pop('gaussian_priors_individual_offsets', False):
|
|
46
|
+
from kima.pykima.utils import get_gaussian_priors_individual_offsets
|
|
47
|
+
model.individual_offset_prior = get_gaussian_priors_individual_offsets(data, use_std=True)
|
|
48
|
+
|
|
49
|
+
if kwargs.pop('kuma', False):
|
|
50
|
+
model.conditional.eprior = distributions.Kumaraswamy(0.867, 3.03)
|
|
51
|
+
|
|
44
52
|
for k, v in priors.items():
|
|
45
53
|
try:
|
|
46
54
|
if 'conditional' in k:
|
|
@@ -55,9 +63,10 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
|
|
|
55
63
|
logger.warning(msg)
|
|
56
64
|
return
|
|
57
65
|
|
|
66
|
+
if run_directory is None:
|
|
67
|
+
run_directory = os.getcwd()
|
|
68
|
+
|
|
58
69
|
if run:
|
|
59
|
-
if run_directory is None:
|
|
60
|
-
run_directory = os.getcwd()
|
|
61
70
|
|
|
62
71
|
# TODO: use signature of kima.run to pop the correct kwargs
|
|
63
72
|
# model_name = model.__class__.__name__
|
|
@@ -67,8 +76,9 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
|
|
|
67
76
|
with chdir(run_directory):
|
|
68
77
|
kima.run(model, **kwargs)
|
|
69
78
|
|
|
70
|
-
|
|
79
|
+
if load:
|
|
80
|
+
with chdir(run_directory):
|
|
71
81
|
res = kima.load_results(model)
|
|
72
|
-
|
|
82
|
+
return data, model, res
|
|
73
83
|
|
|
74
84
|
return data, model
|
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import multiprocessing
|
|
3
|
+
from functools import partial, lru_cache
|
|
4
|
+
from itertools import chain
|
|
5
|
+
from collections import namedtuple
|
|
6
|
+
from multiprocessing.pool import ThreadPool
|
|
7
|
+
from tqdm import tqdm
|
|
8
|
+
# import numpy as np
|
|
9
|
+
|
|
10
|
+
from .setup_logger import logger
|
|
11
|
+
from .timeseries import RV
|
|
12
|
+
|
|
13
|
+
__all__ = ['ESPRESSO_GTO']
|
|
14
|
+
|
|
15
|
+
path = os.path.join(os.path.dirname(__file__), 'data')
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def get_star(star, instrument=None, verbose=False, **kwargs):
|
|
19
|
+
return RV(star, instrument=instrument,
|
|
20
|
+
_raise_on_error=False, verbose=verbose, **kwargs)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class LazyRV:
|
|
24
|
+
def __init__(self, stars: list, instrument: str = None,
|
|
25
|
+
_parallel_limit=10):
|
|
26
|
+
self.stars = stars
|
|
27
|
+
if isinstance(self.stars, str):
|
|
28
|
+
self.stars = [self.stars]
|
|
29
|
+
self.instrument = instrument
|
|
30
|
+
self._saved = None
|
|
31
|
+
self._parallel_limit = _parallel_limit
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def N(self):
|
|
35
|
+
return len(self.stars)
|
|
36
|
+
|
|
37
|
+
def __repr__(self):
|
|
38
|
+
return f"RV({self.N} stars)"
|
|
39
|
+
|
|
40
|
+
def _get(self, **kwargs):
|
|
41
|
+
if self.N > self._parallel_limit:
|
|
42
|
+
# logger.info('Querying DACE...')
|
|
43
|
+
_get_star = partial(get_star, instrument=self.instrument, **kwargs)
|
|
44
|
+
with ThreadPool(8) as pool:
|
|
45
|
+
result = list(tqdm(pool.imap(_get_star, self.stars),
|
|
46
|
+
total=self.N, unit='star',
|
|
47
|
+
desc='Querying DACE (can take a while)'))
|
|
48
|
+
print('')
|
|
49
|
+
else:
|
|
50
|
+
result = []
|
|
51
|
+
logger.info('querying DACE...')
|
|
52
|
+
pbar = tqdm(self.stars, total=self.N, unit='star')
|
|
53
|
+
for star in pbar:
|
|
54
|
+
pbar.set_description(star)
|
|
55
|
+
result.append(get_star(star, self.instrument, **kwargs))
|
|
56
|
+
|
|
57
|
+
return result
|
|
58
|
+
|
|
59
|
+
# # use a with statement to ensure threads are cleaned up promptly
|
|
60
|
+
# with concurrent.futures.ThreadPoolExecutor(max_workers=8) as pool:
|
|
61
|
+
# star_to_RV = {
|
|
62
|
+
# pool.submit(get_star, star, self.instrument): star
|
|
63
|
+
# for star in self.stars
|
|
64
|
+
# }
|
|
65
|
+
# logger.info('Querying DACE...')
|
|
66
|
+
# pbar = tqdm(concurrent.futures.as_completed(star_to_RV),
|
|
67
|
+
# total=self.N, unit='star')
|
|
68
|
+
# for future in pbar:
|
|
69
|
+
# star = star_to_RV[future]
|
|
70
|
+
# pbar.set_description(star)
|
|
71
|
+
# try:
|
|
72
|
+
# result.append(future.result())
|
|
73
|
+
# except ValueError:
|
|
74
|
+
# print(f'{star} generated an exception')
|
|
75
|
+
# result.append(None)
|
|
76
|
+
# return result
|
|
77
|
+
|
|
78
|
+
def reload(self, **kwargs):
|
|
79
|
+
self._saved = self._get(**kwargs)
|
|
80
|
+
return self._saved
|
|
81
|
+
|
|
82
|
+
def __iter__(self):
|
|
83
|
+
return self._get()
|
|
84
|
+
|
|
85
|
+
def __call__(self, **kwargs):
|
|
86
|
+
if not self._saved:
|
|
87
|
+
self._saved = self._get(**kwargs)
|
|
88
|
+
return self._saved
|
|
89
|
+
|
|
90
|
+
@lru_cache(maxsize=10)
|
|
91
|
+
def __getitem__(self, index):
|
|
92
|
+
star = self.stars[index]
|
|
93
|
+
if self._saved is not None:
|
|
94
|
+
return self._saved[index]
|
|
95
|
+
return get_star(star, self.instrument, verbose=True)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
# sorted by spectral type
|
|
99
|
+
WG1_stars = [
|
|
100
|
+
"HIP11533", # F2
|
|
101
|
+
"HD63077", # F9
|
|
102
|
+
"HD102365", # G2
|
|
103
|
+
"HD160691", # G3
|
|
104
|
+
"HD20794", # G6
|
|
105
|
+
"HD115617", # G6
|
|
106
|
+
"HD10700", # G8
|
|
107
|
+
"HD69830", # G8
|
|
108
|
+
"HD26965", # K0
|
|
109
|
+
"HD100623", # K0
|
|
110
|
+
"HD154088", # K0
|
|
111
|
+
"HD72673", # K1
|
|
112
|
+
"HD4628", # K2
|
|
113
|
+
"HD191408", # K2
|
|
114
|
+
"HD192310", # K2
|
|
115
|
+
"HD16160", # K3
|
|
116
|
+
"HD32147", # K3
|
|
117
|
+
"HD22496", # K5
|
|
118
|
+
"HIP93069", # K5
|
|
119
|
+
"HD209100", # K5
|
|
120
|
+
"HIP23708", # K7
|
|
121
|
+
"HD152606", # K8
|
|
122
|
+
"HD260655", # M0
|
|
123
|
+
"HIP40239", # M0
|
|
124
|
+
"HD304636", # M0
|
|
125
|
+
"HIP85647", # M0
|
|
126
|
+
"HD165222", # M0
|
|
127
|
+
"HD191849", # M0
|
|
128
|
+
"GJ191", # M1
|
|
129
|
+
"HD42581", # M1
|
|
130
|
+
"HIP42748", # M1
|
|
131
|
+
"HIP65859", # M1
|
|
132
|
+
"HIP86287", # M1
|
|
133
|
+
"HD176029", # M1
|
|
134
|
+
"GJ825", # M1
|
|
135
|
+
"HD225213", # M2
|
|
136
|
+
"GJ54", # M2
|
|
137
|
+
"HIP22627", # M2
|
|
138
|
+
"HIP51317", # M2
|
|
139
|
+
"HD119850", # M2
|
|
140
|
+
"GJ832", # M2
|
|
141
|
+
"HD217987", # M2
|
|
142
|
+
"GJ273", # M3
|
|
143
|
+
"GJ388", # M3
|
|
144
|
+
"HIP62452", # M3
|
|
145
|
+
"HIP67164", # M3
|
|
146
|
+
"HIP71253", # M3
|
|
147
|
+
"GJ628", # M3
|
|
148
|
+
"HIP85523", # M3
|
|
149
|
+
"HIP86214", # M3
|
|
150
|
+
"HIP92403", # M3
|
|
151
|
+
"HIP113020", # M3
|
|
152
|
+
"HIP1242", # M4
|
|
153
|
+
"GJ83.1", # M4
|
|
154
|
+
"HIP53020", # M4
|
|
155
|
+
"Ross128", # M4
|
|
156
|
+
"GJ699", # M4
|
|
157
|
+
"GJ1002", # M5
|
|
158
|
+
"GJ1061", # M5
|
|
159
|
+
"GJ3618", # M5
|
|
160
|
+
"Proxima", # M5
|
|
161
|
+
"GJ406", # M6
|
|
162
|
+
]
|
|
163
|
+
|
|
164
|
+
ESPRESSO_GTO_nt = namedtuple('ESPRESSO_GTO', ['WG1', 'WG2', 'WG3'])
|
|
165
|
+
ESPRESSO_GTO = ESPRESSO_GTO_nt(
|
|
166
|
+
WG1=LazyRV(WG1_stars, instrument='ESPRESSO'),
|
|
167
|
+
WG2=LazyRV([], instrument='ESPRESSO'), # TODO
|
|
168
|
+
WG3=LazyRV([], instrument='ESPRESSO'), # TODO
|
|
169
|
+
)
|
|
170
|
+
ESPRESSO_GTO.WG1.__doc__ = 'RV observations for all WG1 targets. Call ESPRESSO_GTO.WG1() to load them.'
|
|
171
|
+
ESPRESSO_GTO.WG2.__doc__ = 'RV observations for all WG2 targets. Call ESPRESSO_GTO.WG2() to load them.'
|
|
172
|
+
ESPRESSO_GTO.WG3.__doc__ = 'RV observations for all WG3 targets. Call ESPRESSO_GTO.WG3() to load them.'
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
import requests
|
|
176
|
+
|
|
177
|
+
def _get_NIRPS_GTO_stars(WP=1):
|
|
178
|
+
from io import StringIO
|
|
179
|
+
import numpy as np
|
|
180
|
+
|
|
181
|
+
url = 'https://www.eso.org/sci/observing/teles-alloc/gto/113/NIRPS/P113_NIRPS-consortium.csv'
|
|
182
|
+
file = StringIO(requests.get(url).content.decode())
|
|
183
|
+
stars_P113 = np.loadtxt(file, delimiter=',', usecols=(0,), dtype=str, skiprows=3)
|
|
184
|
+
|
|
185
|
+
url = 'https://www.eso.org/sci/observing/teles-alloc/gto/114/NIRPS/P114_NIRPS-consortium.csv'
|
|
186
|
+
file = StringIO(requests.get(url).content.decode())
|
|
187
|
+
stars_P114 = np.loadtxt(file, delimiter=',', usecols=(0,), dtype=str, skiprows=3)
|
|
188
|
+
|
|
189
|
+
url = 'https://www.eso.org/sci/observing/teles-alloc/gto/115/NIRPS/P115_NIRPS-consortium.csv'
|
|
190
|
+
file = StringIO(requests.get(url).content.decode())
|
|
191
|
+
stars_P115 = np.loadtxt(file, delimiter=',', usecols=(0,), dtype=str, skiprows=3)
|
|
192
|
+
|
|
193
|
+
def _get_stars_period(stars, WP):
|
|
194
|
+
stars = np.delete(stars, stars=='')
|
|
195
|
+
|
|
196
|
+
stars = np.char.replace(stars, '_', ' ')
|
|
197
|
+
stars = np.char.replace(stars, "Proxima Centauri", "Proxima")
|
|
198
|
+
stars = np.char.replace(stars, "Barnard's star", "GJ699")
|
|
199
|
+
stars = np.char.replace(stars, "Teegarden's Star", 'Teegarden')
|
|
200
|
+
|
|
201
|
+
if WP in (1, 'WP1'):
|
|
202
|
+
wp1_indices = slice(np.where(stars == 'WP1')[0][0] + 1, np.where(stars == 'WP2')[0][0])
|
|
203
|
+
return stars[wp1_indices]
|
|
204
|
+
elif WP == 2:
|
|
205
|
+
wp2_indices = slice(np.where(stars == 'WP2')[0][0] + 1, np.where(stars == 'WP3')[0][0])
|
|
206
|
+
return stars[wp2_indices]
|
|
207
|
+
elif WP == 3:
|
|
208
|
+
wp3_indices = slice(np.where(stars == 'WP3')[0][0] + 1, np.where(stars == 'Other Science 1')[0][0])
|
|
209
|
+
return stars[wp3_indices]
|
|
210
|
+
elif WP == 'OS1':
|
|
211
|
+
os1_indices = slice(np.where(stars == 'Other Science 1')[0][0] + 1, np.where(stars == 'Other Science 2')[0][0])
|
|
212
|
+
return stars[os1_indices]
|
|
213
|
+
elif WP == 'OS2':
|
|
214
|
+
os2_indices = slice(np.where(stars == 'Other Science 2')[0][0] + 1, None)
|
|
215
|
+
stars = np.char.replace(stars, 'MMU', 'No')
|
|
216
|
+
stars = np.char.replace(stars, 'Cl*', '')
|
|
217
|
+
return stars[os2_indices]
|
|
218
|
+
|
|
219
|
+
stars_P113 = _get_stars_period(stars_P113, WP)
|
|
220
|
+
stars_P114 = _get_stars_period(stars_P114, WP)
|
|
221
|
+
stars_P115 = _get_stars_period(stars_P115, WP)
|
|
222
|
+
return np.union1d(np.union1d(stars_P113, stars_P114), stars_P115)
|
|
223
|
+
|
|
224
|
+
try:
|
|
225
|
+
NIRPS_GTO_WP1_stars = _get_NIRPS_GTO_stars(WP=1)
|
|
226
|
+
NIRPS_GTO_WP2_stars = _get_NIRPS_GTO_stars(WP=2)
|
|
227
|
+
NIRPS_GTO_WP3_stars = _get_NIRPS_GTO_stars(WP=3)
|
|
228
|
+
NIRPS_GTO_OS1_stars = _get_NIRPS_GTO_stars(WP='OS1')
|
|
229
|
+
NIRPS_GTO_OS2_stars = _get_NIRPS_GTO_stars(WP='OS2')
|
|
230
|
+
except requests.ConnectionError:
|
|
231
|
+
from .setup_logger import logger
|
|
232
|
+
logger.error('Cannot download NIRPS GTO protected target list')
|
|
233
|
+
else:
|
|
234
|
+
NIRPS_GTO_nt = namedtuple('NIRPS_GTO', ['WP1', 'WP2', 'WP3', 'OS1', 'OS2'])
|
|
235
|
+
NIRPS_GTO_nt.__doc__ = 'RV observations for all NIRPS GTO targets. See NIRPS_GTO.WP1, NIRPS_GTO.WP2, ...'
|
|
236
|
+
NIRPS_GTO = NIRPS_GTO_nt(
|
|
237
|
+
WP1=LazyRV(NIRPS_GTO_WP1_stars, instrument='NIRPS'),
|
|
238
|
+
WP2=LazyRV(NIRPS_GTO_WP2_stars, instrument='NIRPS'),
|
|
239
|
+
WP3=LazyRV(NIRPS_GTO_WP3_stars, instrument='NIRPS'),
|
|
240
|
+
OS1=LazyRV(NIRPS_GTO_OS1_stars, instrument='NIRPS'),
|
|
241
|
+
OS2=LazyRV(NIRPS_GTO_OS2_stars, instrument='NIRPS'),
|
|
242
|
+
)
|
|
243
|
+
NIRPS_GTO.WP1.__doc__ = 'RV observations for all WP1 targets. Call NIRPS_GTO.WP1() to load them.'
|
|
244
|
+
NIRPS_GTO.WP2.__doc__ = 'RV observations for all WP2 targets. Call NIRPS_GTO.WP2() to load them.'
|
|
245
|
+
NIRPS_GTO.WP3.__doc__ = 'RV observations for all WP3 targets. Call NIRPS_GTO.WP3() to load them.'
|
|
246
|
+
NIRPS_GTO.OS1.__doc__ = 'RV observations for all OS1 targets. Call NIRPS_GTO.OS1() to load them.'
|
|
247
|
+
NIRPS_GTO.OS2.__doc__ = 'RV observations for all OS2 targets. Call NIRPS_GTO.OS2() to load them.'
|
|
@@ -72,7 +72,7 @@ def fit_gaussian_to_line(wave, flux, center_wavelength, around=0.15 * u.angstrom
|
|
|
72
72
|
]).T
|
|
73
73
|
|
|
74
74
|
try:
|
|
75
|
-
popt, pcov = curve_fit(gaussian, w, f, p0=[-
|
|
75
|
+
popt, pcov = curve_fit(gaussian, w, f, p0=[-np.ptp(f), center_wavelength.value, 0.1, f.max()],
|
|
76
76
|
bounds=(lower, upper))
|
|
77
77
|
except RuntimeError as e:
|
|
78
78
|
logger.warning(f'fit_gaussian_to_line: {e}')
|
|
@@ -115,7 +115,7 @@ def detrend(w, f):
|
|
|
115
115
|
def build_master(self, limit=None, plot=True):
|
|
116
116
|
files = sorted(glob(f'{self.star}_downloads/*S1D_A.fits'))
|
|
117
117
|
if self.verbose:
|
|
118
|
-
logger.info(f'
|
|
118
|
+
logger.info(f'found {len(files)} S1D files')
|
|
119
119
|
|
|
120
120
|
files = files[:limit]
|
|
121
121
|
|
|
@@ -168,8 +168,8 @@ def determine_stellar_parameters(self, linelist: str, plot=True, **kwargs):
|
|
|
168
168
|
]
|
|
169
169
|
|
|
170
170
|
if self.verbose:
|
|
171
|
-
logger.info(f'
|
|
172
|
-
logger.info('
|
|
171
|
+
logger.info(f'found {len(lines)} lines in linelist')
|
|
172
|
+
logger.info('measuring EWs...')
|
|
173
173
|
|
|
174
174
|
EW = []
|
|
175
175
|
pbar = tqdm(linelist)
|
|
@@ -183,14 +183,14 @@ def determine_stellar_parameters(self, linelist: str, plot=True, **kwargs):
|
|
|
183
183
|
EW = np.array(EW)[~np.isnan(EW)]
|
|
184
184
|
|
|
185
185
|
if self.verbose:
|
|
186
|
-
logger.info('
|
|
186
|
+
logger.info('determining stellar parameters (can take a few minutes)...')
|
|
187
187
|
|
|
188
188
|
callback = lambda p, r, A: print('current parameters:', p)
|
|
189
189
|
result = Korg.Fit.ews_to_stellar_parameters(lines, EW, callback=callback)
|
|
190
190
|
par, stat_err, sys_err = result
|
|
191
191
|
|
|
192
192
|
if self.verbose:
|
|
193
|
-
logger.info(f'
|
|
193
|
+
logger.info(f'best-fit stellar parameters:')
|
|
194
194
|
logger.info(f' Teff: {par[0]:.0f} ± {sys_err[0]:.0f} K')
|
|
195
195
|
logger.info(f' logg: {par[1]:.2f} ± {sys_err[1]:.2f} dex')
|
|
196
196
|
logger.info(f' m/H : {par[3]:.2f} ± {sys_err[3]:.2f} dex')
|
|
@@ -193,7 +193,7 @@ class RV:
|
|
|
193
193
|
time_stamp = datetime.now(timezone.utc) #.isoformat().split('.')[0]
|
|
194
194
|
self._last_dace_query = time_stamp
|
|
195
195
|
|
|
196
|
-
_replacements = (('-', '_'), ('.', '_'), ('__', '_'))
|
|
196
|
+
_replacements = (('-', '_'), ('.', '_'), (' ', '_'), ('__', '_'))
|
|
197
197
|
def do_replacements(s):
|
|
198
198
|
for a, b in _replacements:
|
|
199
199
|
s = s.replace(a, b)
|
|
@@ -444,7 +444,7 @@ class RV:
|
|
|
444
444
|
|
|
445
445
|
s.instruments = [inst]
|
|
446
446
|
s.pipelines = [pipe]
|
|
447
|
-
s.modes = [mode]
|
|
447
|
+
s.modes = [str(mode)]
|
|
448
448
|
|
|
449
449
|
return s
|
|
450
450
|
|
|
@@ -598,29 +598,47 @@ class RV:
|
|
|
598
598
|
_quantities.append('rhk')
|
|
599
599
|
_quantities.append('rhk_err')
|
|
600
600
|
|
|
601
|
-
|
|
602
|
-
|
|
601
|
+
# try to find BISPAN and uncertainty
|
|
602
|
+
if (v := find_column(data, ['bispan'])) is not False:
|
|
603
|
+
_s.bispan = v
|
|
604
|
+
_s.bispan_err = np.full_like(time, np.nan)
|
|
605
|
+
if (sv := find_column(data, ['sbispan'])) is not False:
|
|
606
|
+
_s.bispan_err = sv
|
|
607
|
+
else:
|
|
608
|
+
_s.bispan = np.full_like(time, np.nan)
|
|
609
|
+
_s.bispan_err = np.full_like(time, np.nan)
|
|
610
|
+
|
|
611
|
+
_quantities.append('bispan')
|
|
612
|
+
_quantities.append('bispan_err')
|
|
613
|
+
|
|
614
|
+
# try to find BERV
|
|
615
|
+
if (v := find_column(data, ['berv', 'HIERARCH ESO QC BERV'])) is not False:
|
|
616
|
+
_s.berv = v
|
|
617
|
+
else:
|
|
618
|
+
_s.berv = np.full_like(time, np.nan)
|
|
619
|
+
_quantities.append('berv')
|
|
603
620
|
|
|
604
621
|
# other quantities, but all NaNs
|
|
605
|
-
for q in ['
|
|
622
|
+
for q in ['caindex', 'ccf_asym', 'contrast', 'haindex', 'naindex', 'sindex']:
|
|
606
623
|
setattr(_s, q, np.full_like(time, np.nan))
|
|
607
624
|
setattr(_s, q + '_err', np.full_like(time, np.nan))
|
|
608
625
|
_quantities.append(q)
|
|
609
626
|
_quantities.append(q + '_err')
|
|
610
|
-
for q in ['
|
|
627
|
+
for q in ['texp', ]:
|
|
611
628
|
setattr(_s, q, np.full_like(time, np.nan))
|
|
612
629
|
_quantities.append(q)
|
|
613
630
|
for q in ['ccf_mask', 'date_night', 'prog_id', 'raw_file', 'pub_reference']:
|
|
614
631
|
setattr(_s, q, np.full(time.size, ''))
|
|
615
632
|
_quantities.append(q)
|
|
616
|
-
for q in ['drs_qc']:
|
|
633
|
+
for q in ['drs_qc', ]:
|
|
617
634
|
setattr(_s, q, np.full(time.size, True))
|
|
618
635
|
_quantities.append(q)
|
|
619
636
|
|
|
620
637
|
_s.extra_fields = ExtraFields()
|
|
621
|
-
for
|
|
622
|
-
if
|
|
623
|
-
|
|
638
|
+
for name in data.dtype.names:
|
|
639
|
+
if name not in _quantities:
|
|
640
|
+
name_ = name.replace(' ', '_')
|
|
641
|
+
setattr(_s.extra_fields, name_, data[name])
|
|
624
642
|
# _quantities.append(field)
|
|
625
643
|
|
|
626
644
|
#! end hack
|
|
@@ -628,12 +646,12 @@ class RV:
|
|
|
628
646
|
_s.mask = np.ones_like(time, dtype=bool)
|
|
629
647
|
_s.obs = np.full_like(time, i + 1)
|
|
630
648
|
|
|
631
|
-
_s.instruments = [instrument]
|
|
649
|
+
_s.instruments = [str(instrument)]
|
|
632
650
|
_s._quantities = np.array(_quantities)
|
|
633
651
|
setattr(s, instrument, _s)
|
|
634
652
|
|
|
635
653
|
s._child = False
|
|
636
|
-
s.instruments = list(instruments)
|
|
654
|
+
s.instruments = list(map(str, instruments))
|
|
637
655
|
s._build_arrays()
|
|
638
656
|
|
|
639
657
|
if kwargs.get('do_adjust_means', False):
|
|
@@ -726,15 +744,17 @@ class RV:
|
|
|
726
744
|
return s
|
|
727
745
|
|
|
728
746
|
@classmethod
|
|
729
|
-
@lru_cache(maxsize=
|
|
747
|
+
@lru_cache(maxsize=60)
|
|
730
748
|
def from_KOBE_file(cls, star, **kwargs):
|
|
731
749
|
assert 'KOBE' in star, f'{star} is not a KOBE star?'
|
|
732
750
|
import requests
|
|
733
751
|
from requests.auth import HTTPBasicAuth
|
|
734
752
|
from io import BytesIO
|
|
735
753
|
import tarfile
|
|
754
|
+
from time import time as pytime
|
|
736
755
|
from astropy.io import fits
|
|
737
756
|
from .config import config
|
|
757
|
+
from .utils import get_data_path
|
|
738
758
|
|
|
739
759
|
try:
|
|
740
760
|
config.kobe_password
|
|
@@ -743,23 +763,11 @@ class RV:
|
|
|
743
763
|
return
|
|
744
764
|
|
|
745
765
|
tar = None
|
|
766
|
+
local_targz_file = os.path.join(get_data_path(), 'KOBE_fitsfiles.tar.gz')
|
|
746
767
|
fits_file = f'{star}_RVs.fits'
|
|
747
|
-
resp = requests.get(f'https://kobe.caha.es/internal/fitsfiles/{fits_file}',
|
|
748
|
-
auth=HTTPBasicAuth('kobeteam', config.kobe_password))
|
|
749
|
-
|
|
750
|
-
if resp.status_code != 200:
|
|
751
|
-
# something went wrong, try to extract the file by downloading the
|
|
752
|
-
# full tar.gz archive
|
|
753
|
-
|
|
754
|
-
logger.warning(f'could not find "{fits_file}" on server, trying to download full archive')
|
|
755
|
-
resp = requests.get('https://kobe.caha.es/internal/fitsfiles.tar.gz',
|
|
756
|
-
auth=HTTPBasicAuth('kobeteam', config.kobe_password))
|
|
757
|
-
|
|
758
|
-
if resp.status_code != 200:
|
|
759
|
-
logger.error(f'KOBE file not found for {star}')
|
|
760
|
-
return
|
|
761
768
|
|
|
762
|
-
|
|
769
|
+
if os.path.exists(local_targz_file) and os.path.getmtime(local_targz_file) > pytime() - 60*60*2:
|
|
770
|
+
tar = tarfile.open(local_targz_file)
|
|
763
771
|
|
|
764
772
|
if fits_file not in tar.getnames():
|
|
765
773
|
logger.error(f'KOBE file not found for {star}')
|
|
@@ -768,8 +776,36 @@ class RV:
|
|
|
768
776
|
hdul = fits.open(tar.extractfile(fits_file))
|
|
769
777
|
|
|
770
778
|
else:
|
|
771
|
-
|
|
772
|
-
|
|
779
|
+
resp = requests.get(f'https://kobe.caha.es/internal/fitsfiles/{fits_file}',
|
|
780
|
+
auth=HTTPBasicAuth('kobeteam', config.kobe_password))
|
|
781
|
+
|
|
782
|
+
if resp.status_code != 200:
|
|
783
|
+
# something went wrong, try to extract the file by downloading the
|
|
784
|
+
# full tar.gz archive
|
|
785
|
+
|
|
786
|
+
logger.warning(f'could not find "{fits_file}" on server, trying to download full archive')
|
|
787
|
+
resp = requests.get('https://kobe.caha.es/internal/fitsfiles.tar.gz',
|
|
788
|
+
auth=HTTPBasicAuth('kobeteam', config.kobe_password))
|
|
789
|
+
|
|
790
|
+
if resp.status_code != 200:
|
|
791
|
+
logger.error(f'KOBE file not found for {star}')
|
|
792
|
+
return
|
|
793
|
+
|
|
794
|
+
# save tar.gz file for later
|
|
795
|
+
with open(local_targz_file, 'wb') as tg:
|
|
796
|
+
tg.write(resp.content)
|
|
797
|
+
|
|
798
|
+
tar = tarfile.open(fileobj=BytesIO(resp.content))
|
|
799
|
+
|
|
800
|
+
if fits_file not in tar.getnames():
|
|
801
|
+
logger.error(f'KOBE file not found for {star}')
|
|
802
|
+
return
|
|
803
|
+
|
|
804
|
+
hdul = fits.open(tar.extractfile(fits_file))
|
|
805
|
+
|
|
806
|
+
else:
|
|
807
|
+
# found the file on the server, read it directly
|
|
808
|
+
hdul = fits.open(BytesIO(resp.content))
|
|
773
809
|
|
|
774
810
|
s = cls(star, _child=True)
|
|
775
811
|
|
|
@@ -780,26 +816,47 @@ class RV:
|
|
|
780
816
|
s.vrad_preNZP = hdul[1].data['RVd']
|
|
781
817
|
s.vrad_preNZP_err = hdul[1].data['eRVd']
|
|
782
818
|
|
|
819
|
+
s.fwhm = hdul[1].data['FWHM']
|
|
820
|
+
s.fwhm_err = hdul[1].data['eFWHM']
|
|
821
|
+
|
|
822
|
+
s.crx = hdul[1].data['CRX']
|
|
823
|
+
s.crx_err = hdul[1].data['eCRX']
|
|
824
|
+
s.dlw = hdul[1].data['DLW']
|
|
825
|
+
s.dlw_err = hdul[1].data['eDLW']
|
|
826
|
+
s.contrast = hdul[1].data['CONTRAST']
|
|
827
|
+
s.contrast_err = hdul[1].data['eCONTRAST']
|
|
828
|
+
s.bispan = hdul[1].data['BIS']
|
|
829
|
+
s.bispan_err = hdul[1].data['eBIS']
|
|
830
|
+
|
|
831
|
+
|
|
783
832
|
s.drift = hdul[1].data['drift']
|
|
784
833
|
s.drift_err = hdul[1].data['e_drift']
|
|
785
834
|
|
|
786
835
|
s.nzp = hdul[1].data['NZP']
|
|
787
836
|
s.nzp_err = hdul[1].data['eNZP']
|
|
788
837
|
|
|
838
|
+
s.texp = hdul[1].data['ExpTime']
|
|
789
839
|
s.berv = hdul[1].data['BERV']
|
|
840
|
+
s.units = 'km/s'
|
|
790
841
|
|
|
842
|
+
s.obs = np.ones_like(s.time, dtype=int)
|
|
791
843
|
s.mask = np.full_like(s.time, True, dtype=bool)
|
|
792
844
|
s.instruments = ['CARMENES']
|
|
845
|
+
s._quantities = np.array(['berv', ])
|
|
793
846
|
|
|
794
847
|
# so meta!
|
|
795
848
|
setattr(s, 'CARMENES', s)
|
|
796
849
|
|
|
797
850
|
s._kobe_result = hdul[1].data
|
|
798
851
|
|
|
852
|
+
s.mask = s._kobe_result['rvflag']
|
|
853
|
+
s._propagate_mask_changes()
|
|
854
|
+
|
|
799
855
|
if tar is not None:
|
|
800
856
|
tar.close()
|
|
801
857
|
hdul.close()
|
|
802
858
|
|
|
859
|
+
s._child = False
|
|
803
860
|
return s
|
|
804
861
|
|
|
805
862
|
|
|
@@ -1190,6 +1247,13 @@ class RV:
|
|
|
1190
1247
|
if (self.time < bjd).any():
|
|
1191
1248
|
ind = np.where(self.time < bjd)[0]
|
|
1192
1249
|
self.remove_point(ind)
|
|
1250
|
+
|
|
1251
|
+
def remove_between_bjds(self, bjd1, bjd2):
|
|
1252
|
+
""" Remove observations between two BJDs """
|
|
1253
|
+
to_remove = (self.time > bjd1) & (self.time < bjd2)
|
|
1254
|
+
if to_remove.any():
|
|
1255
|
+
ind = np.where(to_remove)[0]
|
|
1256
|
+
self.remove_point(ind)
|
|
1193
1257
|
|
|
1194
1258
|
def choose_n_points(self, n, seed=None, instrument=None):
|
|
1195
1259
|
""" Randomly choose `n` observations and mask out the remaining ones
|
|
@@ -1700,8 +1764,21 @@ class RV:
|
|
|
1700
1764
|
s = getattr(self, inst)
|
|
1701
1765
|
s.vrad *= factor
|
|
1702
1766
|
s.svrad *= factor
|
|
1703
|
-
|
|
1704
|
-
|
|
1767
|
+
try:
|
|
1768
|
+
s.fwhm *= factor
|
|
1769
|
+
s.fwhm_err *= factor
|
|
1770
|
+
except AttributeError:
|
|
1771
|
+
pass
|
|
1772
|
+
|
|
1773
|
+
for q in (
|
|
1774
|
+
'bispan',
|
|
1775
|
+
'nzp', 'vrad_preNZP',
|
|
1776
|
+
):
|
|
1777
|
+
try:
|
|
1778
|
+
setattr(s, q, getattr(s, q) * factor)
|
|
1779
|
+
setattr(s, f'{q}_err', getattr(s, f'{q}_err') * factor)
|
|
1780
|
+
except AttributeError:
|
|
1781
|
+
pass
|
|
1705
1782
|
|
|
1706
1783
|
self._build_arrays()
|
|
1707
1784
|
self.units = new_units
|
arvi-0.1.23/arvi/programs.py
DELETED
|
@@ -1,159 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import multiprocessing
|
|
3
|
-
from functools import partial
|
|
4
|
-
from itertools import chain
|
|
5
|
-
from collections import namedtuple
|
|
6
|
-
from tqdm import tqdm
|
|
7
|
-
# import numpy as np
|
|
8
|
-
|
|
9
|
-
from .setup_logger import logger
|
|
10
|
-
from .timeseries import RV
|
|
11
|
-
|
|
12
|
-
__all__ = ['ESPRESSO_GTO']
|
|
13
|
-
|
|
14
|
-
path = os.path.join(os.path.dirname(__file__), 'data')
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
def get_star(star, instrument=None):
|
|
18
|
-
return RV(star, instrument=instrument,
|
|
19
|
-
_raise_on_error=False, verbose=False, load_extra_data=False)
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class LazyRV:
|
|
23
|
-
def __init__(self, stars: list, instrument: str = None,
|
|
24
|
-
_parallel_limit=10):
|
|
25
|
-
self.stars = stars
|
|
26
|
-
if isinstance(self.stars, str):
|
|
27
|
-
self.stars = [self.stars]
|
|
28
|
-
self.instrument = instrument
|
|
29
|
-
self._saved = None
|
|
30
|
-
self._parallel_limit = _parallel_limit
|
|
31
|
-
|
|
32
|
-
@property
|
|
33
|
-
def N(self):
|
|
34
|
-
return len(self.stars)
|
|
35
|
-
|
|
36
|
-
def __repr__(self):
|
|
37
|
-
return f"RV({self.N} stars)"
|
|
38
|
-
|
|
39
|
-
def _get(self):
|
|
40
|
-
if self.N > self._parallel_limit:
|
|
41
|
-
# logger.info('Querying DACE...')
|
|
42
|
-
_get_star = partial(get_star, instrument=self.instrument)
|
|
43
|
-
with multiprocessing.Pool() as pool:
|
|
44
|
-
result = list(tqdm(pool.imap(_get_star, self.stars),
|
|
45
|
-
total=self.N, unit='star', desc='Querying DACE'))
|
|
46
|
-
# result = pool.map(get_star, self.stars)
|
|
47
|
-
else:
|
|
48
|
-
result = []
|
|
49
|
-
logger.info('Querying DACE...')
|
|
50
|
-
pbar = tqdm(self.stars, total=self.N, unit='star')
|
|
51
|
-
for star in pbar:
|
|
52
|
-
pbar.set_description(star)
|
|
53
|
-
result.append(get_star(star, self.instrument))
|
|
54
|
-
|
|
55
|
-
return result
|
|
56
|
-
|
|
57
|
-
# # use a with statement to ensure threads are cleaned up promptly
|
|
58
|
-
# with concurrent.futures.ThreadPoolExecutor(max_workers=8) as pool:
|
|
59
|
-
# star_to_RV = {
|
|
60
|
-
# pool.submit(get_star, star, self.instrument): star
|
|
61
|
-
# for star in self.stars
|
|
62
|
-
# }
|
|
63
|
-
# logger.info('Querying DACE...')
|
|
64
|
-
# pbar = tqdm(concurrent.futures.as_completed(star_to_RV),
|
|
65
|
-
# total=self.N, unit='star')
|
|
66
|
-
# for future in pbar:
|
|
67
|
-
# star = star_to_RV[future]
|
|
68
|
-
# pbar.set_description(star)
|
|
69
|
-
# try:
|
|
70
|
-
# result.append(future.result())
|
|
71
|
-
# except ValueError:
|
|
72
|
-
# print(f'{star} generated an exception')
|
|
73
|
-
# result.append(None)
|
|
74
|
-
# return result
|
|
75
|
-
|
|
76
|
-
def __iter__(self):
|
|
77
|
-
return self._get()
|
|
78
|
-
|
|
79
|
-
def __call__(self):
|
|
80
|
-
if not self._saved:
|
|
81
|
-
self._saved = self._get()
|
|
82
|
-
return self._saved
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
# sorted by spectral type
|
|
86
|
-
WG1_stars = [
|
|
87
|
-
"HIP11533", # F2
|
|
88
|
-
"HD63077", # F9
|
|
89
|
-
"HD102365", # G2
|
|
90
|
-
"HD160691", # G3
|
|
91
|
-
"HD20794", # G6
|
|
92
|
-
"HD115617", # G6
|
|
93
|
-
"HD10700", # G8
|
|
94
|
-
"HD69830", # G8
|
|
95
|
-
"HD26965", # K0
|
|
96
|
-
"HD100623", # K0
|
|
97
|
-
"HD154088", # K0
|
|
98
|
-
"HD72673", # K1
|
|
99
|
-
"HD4628", # K2
|
|
100
|
-
"HD191408", # K2
|
|
101
|
-
"HD192310", # K2
|
|
102
|
-
"HD16160", # K3
|
|
103
|
-
"HD32147", # K3
|
|
104
|
-
"HD22496", # K5
|
|
105
|
-
"HIP93069", # K5
|
|
106
|
-
"HD209100", # K5
|
|
107
|
-
"HIP23708", # K7
|
|
108
|
-
"HD152606", # K8
|
|
109
|
-
"HD260655", # M0
|
|
110
|
-
"HIP40239", # M0
|
|
111
|
-
"HD304636", # M0
|
|
112
|
-
"HIP85647", # M0
|
|
113
|
-
"HD165222", # M0
|
|
114
|
-
"HD191849", # M0
|
|
115
|
-
"GJ191", # M1
|
|
116
|
-
"HD42581", # M1
|
|
117
|
-
"HIP42748", # M1
|
|
118
|
-
"HIP65859", # M1
|
|
119
|
-
"HIP86287", # M1
|
|
120
|
-
"HD176029", # M1
|
|
121
|
-
"GJ825", # M1
|
|
122
|
-
"HD225213", # M2
|
|
123
|
-
"GJ54", # M2
|
|
124
|
-
"HIP22627", # M2
|
|
125
|
-
"HIP51317", # M2
|
|
126
|
-
"HD119850", # M2
|
|
127
|
-
"GJ832", # M2
|
|
128
|
-
"HD217987", # M2
|
|
129
|
-
"GJ273", # M3
|
|
130
|
-
"GJ388", # M3
|
|
131
|
-
"HIP62452", # M3
|
|
132
|
-
"HIP67164", # M3
|
|
133
|
-
"HIP71253", # M3
|
|
134
|
-
"GJ628", # M3
|
|
135
|
-
"HIP85523", # M3
|
|
136
|
-
"HIP86214", # M3
|
|
137
|
-
"HIP92403", # M3
|
|
138
|
-
"HIP113020", # M3
|
|
139
|
-
"HIP1242", # M4
|
|
140
|
-
"GJ83.1", # M4
|
|
141
|
-
"HIP53020", # M4
|
|
142
|
-
"Ross128", # M4
|
|
143
|
-
"GJ699", # M4
|
|
144
|
-
"GJ1002", # M5
|
|
145
|
-
"GJ1061", # M5
|
|
146
|
-
"GJ3618", # M5
|
|
147
|
-
"Proxima", # M5
|
|
148
|
-
"GJ406", # M6
|
|
149
|
-
]
|
|
150
|
-
|
|
151
|
-
ESPRESSO_GTO_nt = namedtuple('ESPRESSO_GTO', ['WG1', 'WG2', 'WG3'])
|
|
152
|
-
ESPRESSO_GTO = ESPRESSO_GTO_nt(
|
|
153
|
-
WG1=LazyRV(WG1_stars, instrument='ESPRESSO'),
|
|
154
|
-
WG2=LazyRV([], instrument='ESPRESSO'), # TODO
|
|
155
|
-
WG3=LazyRV([], instrument='ESPRESSO'), # TODO
|
|
156
|
-
)
|
|
157
|
-
ESPRESSO_GTO.WG1.__doc__ = 'RV observations for all WG1 targets. Call ESPRESSO_GTO.WG1() to load them.'
|
|
158
|
-
ESPRESSO_GTO.WG2.__doc__ = 'RV observations for all WG2 targets. Call ESPRESSO_GTO.WG2() to load them.'
|
|
159
|
-
ESPRESSO_GTO.WG3.__doc__ = 'RV observations for all WG3 targets. Call ESPRESSO_GTO.WG3() to load them.'
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|