arvi 0.1.23__py3-none-any.whl → 0.1.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of arvi might be problematic. Click here for more details.
- arvi/dace_wrapper.py +114 -39
- arvi/kima_wrapper.py +18 -8
- arvi/programs.py +100 -12
- arvi/spectra.py +6 -6
- arvi/timeseries.py +109 -32
- arvi/translations.py +5 -0
- {arvi-0.1.23.dist-info → arvi-0.1.25.dist-info}/METADATA +2 -2
- {arvi-0.1.23.dist-info → arvi-0.1.25.dist-info}/RECORD +11 -11
- {arvi-0.1.23.dist-info → arvi-0.1.25.dist-info}/WHEEL +1 -1
- {arvi-0.1.23.dist-info → arvi-0.1.25.dist-info}/LICENSE +0 -0
- {arvi-0.1.23.dist-info → arvi-0.1.25.dist-info}/top_level.txt +0 -0
arvi/dace_wrapper.py
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
import os
|
|
2
|
+
import sys
|
|
2
3
|
import tarfile
|
|
3
4
|
import collections
|
|
4
5
|
from functools import lru_cache
|
|
6
|
+
from itertools import islice
|
|
5
7
|
import numpy as np
|
|
6
8
|
from dace_query import DaceClass
|
|
7
9
|
from dace_query.spectroscopy import SpectroscopyClass, Spectroscopy as default_Spectroscopy
|
|
@@ -22,14 +24,15 @@ def load_spectroscopy() -> SpectroscopyClass:
|
|
|
22
24
|
return default_Spectroscopy
|
|
23
25
|
|
|
24
26
|
@lru_cache()
|
|
25
|
-
def get_dace_id(star):
|
|
27
|
+
def get_dace_id(star, verbose=True):
|
|
26
28
|
filters = {"obj_id_catname": {"equal": [star]}}
|
|
27
29
|
try:
|
|
28
|
-
with
|
|
30
|
+
with all_logging_disabled():
|
|
29
31
|
r = load_spectroscopy().query_database(filters=filters, limit=1)
|
|
30
32
|
return r['obj_id_daceid'][0]
|
|
31
33
|
except KeyError:
|
|
32
|
-
|
|
34
|
+
if verbose:
|
|
35
|
+
logger.error(f"Could not find DACE ID for {star}")
|
|
33
36
|
raise ValueError from None
|
|
34
37
|
|
|
35
38
|
def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='HE', verbose=True):
|
|
@@ -55,15 +58,19 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
|
|
|
55
58
|
pipelines = [pipelines[i]]
|
|
56
59
|
|
|
57
60
|
if latest_pipeline:
|
|
61
|
+
npipe = len(pipelines)
|
|
58
62
|
if 'NIRPS' in inst and any(['LBL' in p for p in pipelines]):
|
|
59
63
|
# TODO: correctly load both CCF and LBL
|
|
60
64
|
pipelines = [pipelines[1]]
|
|
65
|
+
if 'HARPS' in inst and npipe > 1 and pipelines[1] == pipelines[0] + '-EGGS':
|
|
66
|
+
pipelines = pipelines[:2]
|
|
61
67
|
else:
|
|
62
68
|
pipelines = [pipelines[0]]
|
|
63
69
|
|
|
64
|
-
if verbose and
|
|
70
|
+
if verbose and npipe > 1:
|
|
65
71
|
logger.info(f'selecting latest pipeline ({pipelines[0]}) for {inst}')
|
|
66
72
|
|
|
73
|
+
|
|
67
74
|
for pipe in pipelines:
|
|
68
75
|
modes = [m for m in result[inst][pipe].keys()]
|
|
69
76
|
|
|
@@ -80,16 +87,24 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
|
|
|
80
87
|
|
|
81
88
|
# HARPS15 observations should not be separated by 'mode' if some are
|
|
82
89
|
# done together with NIRPS
|
|
83
|
-
if 'HARPS15' in inst
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
((inst, pipe, m0), data)
|
|
91
|
-
|
|
92
|
-
|
|
90
|
+
if 'HARPS15' in inst:
|
|
91
|
+
if 'HARPS+NIRPS' in modes:
|
|
92
|
+
m0 = modes[0]
|
|
93
|
+
data = {
|
|
94
|
+
k: np.concatenate([result[inst][pipe][m][k] for m in modes])
|
|
95
|
+
for k in result[inst][pipe][m0].keys()
|
|
96
|
+
}
|
|
97
|
+
arrays.append( ((str(inst), str(pipe), str(m0)), data) )
|
|
98
|
+
continue
|
|
99
|
+
|
|
100
|
+
if 'EGGS+NIRPS' in modes:
|
|
101
|
+
m0 = modes[0]
|
|
102
|
+
data = {
|
|
103
|
+
k: np.concatenate([result[inst][pipe][m][k] for m in modes])
|
|
104
|
+
for k in result[inst][pipe][m0].keys()
|
|
105
|
+
}
|
|
106
|
+
arrays.append( ((str(inst + '_EGGS'), str(pipe), str(m0)), data) )
|
|
107
|
+
continue
|
|
93
108
|
|
|
94
109
|
for mode in modes:
|
|
95
110
|
if 'rjd' not in result[inst][pipe][mode]:
|
|
@@ -97,39 +112,61 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
|
|
|
97
112
|
raise ValueError
|
|
98
113
|
|
|
99
114
|
arrays.append(
|
|
100
|
-
((inst, pipe, mode), result[inst][pipe][mode])
|
|
115
|
+
((str(inst), str(pipe), str(mode)), result[inst][pipe][mode])
|
|
101
116
|
)
|
|
102
117
|
|
|
103
118
|
return arrays
|
|
104
119
|
|
|
105
|
-
def get_observations_from_instrument(star, instrument, main_id=None):
|
|
120
|
+
def get_observations_from_instrument(star, instrument, main_id=None, verbose=True):
|
|
106
121
|
""" Query DACE for all observations of a given star and instrument
|
|
107
122
|
|
|
108
123
|
Args:
|
|
109
|
-
star (str):
|
|
110
|
-
|
|
111
|
-
|
|
124
|
+
star (str):
|
|
125
|
+
name of the star
|
|
126
|
+
instrument (str):
|
|
127
|
+
instrument name
|
|
128
|
+
main_id (str, optional):
|
|
129
|
+
Simbad main id of target to query DACE id. Defaults to None.
|
|
130
|
+
verbose (bool, optional):
|
|
131
|
+
whether to print warnings. Defaults to True.
|
|
112
132
|
|
|
113
133
|
Raises:
|
|
114
|
-
ValueError:
|
|
134
|
+
ValueError:
|
|
135
|
+
If query for DACE id fails
|
|
115
136
|
|
|
116
137
|
Returns:
|
|
117
|
-
dict:
|
|
138
|
+
dict:
|
|
139
|
+
dictionary with data from DACE
|
|
118
140
|
"""
|
|
141
|
+
Spectroscopy = load_spectroscopy()
|
|
142
|
+
found_dace_id = False
|
|
119
143
|
try:
|
|
120
|
-
dace_id = get_dace_id(star)
|
|
144
|
+
dace_id = get_dace_id(star, verbose=verbose)
|
|
145
|
+
found_dace_id = True
|
|
121
146
|
except ValueError as e:
|
|
122
147
|
if main_id is not None:
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
148
|
+
try:
|
|
149
|
+
dace_id = get_dace_id(main_id, verbose=verbose)
|
|
150
|
+
found_dace_id = True
|
|
151
|
+
except ValueError:
|
|
152
|
+
pass
|
|
153
|
+
|
|
154
|
+
if not found_dace_id:
|
|
155
|
+
try:
|
|
156
|
+
with all_logging_disabled():
|
|
157
|
+
result = Spectroscopy.get_timeseries(target=star,
|
|
158
|
+
sorted_by_instrument=True,
|
|
159
|
+
output_format='numpy')
|
|
160
|
+
return result
|
|
161
|
+
except TypeError:
|
|
162
|
+
msg = f'no {instrument} observations for {star}'
|
|
163
|
+
raise ValueError(msg) from None
|
|
126
164
|
|
|
127
|
-
Spectroscopy = load_spectroscopy()
|
|
128
165
|
filters = {
|
|
129
166
|
"ins_name": {"contains": [instrument]},
|
|
130
167
|
"obj_id_daceid": {"contains": [dace_id]}
|
|
131
168
|
}
|
|
132
|
-
with
|
|
169
|
+
with all_logging_disabled():
|
|
133
170
|
result = Spectroscopy.query_database(filters=filters)
|
|
134
171
|
|
|
135
172
|
if len(result) == 0:
|
|
@@ -218,7 +255,7 @@ def get_observations(star, instrument=None, main_id=None, verbose=True):
|
|
|
218
255
|
raise ValueError(msg) from None
|
|
219
256
|
else:
|
|
220
257
|
try:
|
|
221
|
-
result = get_observations_from_instrument(star, instrument, main_id)
|
|
258
|
+
result = get_observations_from_instrument(star, instrument, main_id, verbose)
|
|
222
259
|
except ValueError:
|
|
223
260
|
msg = f'no {instrument} observations for {star}'
|
|
224
261
|
raise ValueError(msg) from None
|
|
@@ -323,16 +360,29 @@ def check_existing(output_directory, files, type):
|
|
|
323
360
|
|
|
324
361
|
return np.array(missing)
|
|
325
362
|
|
|
326
|
-
def download(files, type, output_directory):
|
|
363
|
+
def download(files, type, output_directory, output_filename=None, quiet=True, pbar=None):
|
|
327
364
|
""" Download files from DACE """
|
|
328
365
|
Spectroscopy = load_spectroscopy()
|
|
329
|
-
|
|
366
|
+
if isinstance(files, str):
|
|
367
|
+
files = [files]
|
|
368
|
+
if quiet:
|
|
369
|
+
with all_logging_disabled():
|
|
370
|
+
Spectroscopy.download_files(files, file_type=type.lower(),
|
|
371
|
+
output_directory=output_directory,
|
|
372
|
+
output_filename=output_filename)
|
|
373
|
+
else:
|
|
330
374
|
Spectroscopy.download_files(files, file_type=type.lower(),
|
|
331
|
-
output_directory=output_directory
|
|
375
|
+
output_directory=output_directory,
|
|
376
|
+
output_filename=output_filename)
|
|
377
|
+
if pbar is not None:
|
|
378
|
+
pbar.update()
|
|
332
379
|
|
|
333
|
-
|
|
380
|
+
|
|
381
|
+
def extract_fits(output_directory, filename=None):
|
|
334
382
|
""" Extract fits files from tar.gz file """
|
|
335
|
-
|
|
383
|
+
if filename is None:
|
|
384
|
+
filename = 'spectroscopy_download.tar.gz'
|
|
385
|
+
file = os.path.join(output_directory, filename)
|
|
336
386
|
with tarfile.open(file, "r") as tar:
|
|
337
387
|
files = []
|
|
338
388
|
for member in tar.getmembers():
|
|
@@ -390,7 +440,7 @@ def do_symlink_filetype(type, raw_files, output_directory, clobber=False, top_le
|
|
|
390
440
|
|
|
391
441
|
|
|
392
442
|
def do_download_filetype(type, raw_files, output_directory, clobber=False,
|
|
393
|
-
verbose=True, chunk_size=20):
|
|
443
|
+
verbose=True, chunk_size=20, parallel_limit=30):
|
|
394
444
|
""" Download CCFs / S1Ds / S2Ds from DACE """
|
|
395
445
|
raw_files = np.atleast_1d(raw_files)
|
|
396
446
|
|
|
@@ -410,7 +460,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
|
|
|
410
460
|
|
|
411
461
|
# avoid an empty chunk
|
|
412
462
|
if chunk_size > n:
|
|
413
|
-
chunk_size = n
|
|
463
|
+
chunk_size = n
|
|
414
464
|
|
|
415
465
|
if verbose:
|
|
416
466
|
if chunk_size < n:
|
|
@@ -422,11 +472,36 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
|
|
|
422
472
|
msg = f"downloading {n} {type}s into '{output_directory}'..."
|
|
423
473
|
logger.info(msg)
|
|
424
474
|
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
475
|
+
if n < parallel_limit:
|
|
476
|
+
iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
|
|
477
|
+
for files in tqdm(iterator, total=len(iterator)):
|
|
478
|
+
download(files, type, output_directory, quiet=False)
|
|
479
|
+
extract_fits(output_directory)
|
|
429
480
|
|
|
481
|
+
else:
|
|
482
|
+
def chunker(it, size):
|
|
483
|
+
iterator = iter(it)
|
|
484
|
+
while chunk := list(islice(iterator, size)):
|
|
485
|
+
yield chunk
|
|
486
|
+
|
|
487
|
+
chunks = list(chunker(raw_files, chunk_size))
|
|
488
|
+
pbar = tqdm(total=len(chunks))
|
|
489
|
+
it1 = [
|
|
490
|
+
(files, type, output_directory, f'spectroscopy_download{i+1}.tar.gz', True, pbar)
|
|
491
|
+
for i, files in enumerate(chunks)
|
|
492
|
+
]
|
|
493
|
+
it2 = [(output_directory, f'spectroscopy_download{i+1}.tar.gz') for i in range(len(chunks))]
|
|
494
|
+
|
|
495
|
+
# import multiprocessing as mp
|
|
496
|
+
# with mp.Pool(4) as pool:
|
|
497
|
+
from multiprocessing.pool import ThreadPool
|
|
498
|
+
|
|
499
|
+
with ThreadPool(4) as pool:
|
|
500
|
+
pool.starmap(download, it1)
|
|
501
|
+
pool.starmap(extract_fits, it2)
|
|
502
|
+
print('')
|
|
503
|
+
|
|
504
|
+
sys.stdout.flush()
|
|
430
505
|
logger.info('extracted .fits files')
|
|
431
506
|
|
|
432
507
|
|
arvi/kima_wrapper.py
CHANGED
|
@@ -25,10 +25,11 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
|
|
|
25
25
|
if not kima_available:
|
|
26
26
|
raise ImportError('kima not available, please install with `pip install kima`')
|
|
27
27
|
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
28
|
+
instruments = [inst for inst in self.instruments if self.NN[inst] > 1]
|
|
29
|
+
time = [getattr(self, inst).mtime for inst in instruments]
|
|
30
|
+
vrad = [getattr(self, inst).mvrad for inst in instruments]
|
|
31
|
+
err = [getattr(self, inst).msvrad for inst in instruments]
|
|
32
|
+
data = RVData(time, vrad, err, instruments=instruments)
|
|
32
33
|
|
|
33
34
|
fix = kwargs.pop('fix', False)
|
|
34
35
|
npmax = kwargs.pop('npmax', 1)
|
|
@@ -41,6 +42,13 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
|
|
|
41
42
|
model.enforce_stability = kwargs.pop('enforce_stability', False)
|
|
42
43
|
model.star_mass = kwargs.pop('star_mass', 1.0)
|
|
43
44
|
|
|
45
|
+
if kwargs.pop('gaussian_priors_individual_offsets', False):
|
|
46
|
+
from kima.pykima.utils import get_gaussian_priors_individual_offsets
|
|
47
|
+
model.individual_offset_prior = get_gaussian_priors_individual_offsets(data, use_std=True)
|
|
48
|
+
|
|
49
|
+
if kwargs.pop('kuma', False):
|
|
50
|
+
model.conditional.eprior = distributions.Kumaraswamy(0.867, 3.03)
|
|
51
|
+
|
|
44
52
|
for k, v in priors.items():
|
|
45
53
|
try:
|
|
46
54
|
if 'conditional' in k:
|
|
@@ -55,9 +63,10 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
|
|
|
55
63
|
logger.warning(msg)
|
|
56
64
|
return
|
|
57
65
|
|
|
66
|
+
if run_directory is None:
|
|
67
|
+
run_directory = os.getcwd()
|
|
68
|
+
|
|
58
69
|
if run:
|
|
59
|
-
if run_directory is None:
|
|
60
|
-
run_directory = os.getcwd()
|
|
61
70
|
|
|
62
71
|
# TODO: use signature of kima.run to pop the correct kwargs
|
|
63
72
|
# model_name = model.__class__.__name__
|
|
@@ -67,8 +76,9 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
|
|
|
67
76
|
with chdir(run_directory):
|
|
68
77
|
kima.run(model, **kwargs)
|
|
69
78
|
|
|
70
|
-
|
|
79
|
+
if load:
|
|
80
|
+
with chdir(run_directory):
|
|
71
81
|
res = kima.load_results(model)
|
|
72
|
-
|
|
82
|
+
return data, model, res
|
|
73
83
|
|
|
74
84
|
return data, model
|
arvi/programs.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
import os
|
|
2
2
|
import multiprocessing
|
|
3
|
-
from functools import partial
|
|
3
|
+
from functools import partial, lru_cache
|
|
4
4
|
from itertools import chain
|
|
5
5
|
from collections import namedtuple
|
|
6
|
+
from multiprocessing.pool import ThreadPool
|
|
6
7
|
from tqdm import tqdm
|
|
7
8
|
# import numpy as np
|
|
8
9
|
|
|
@@ -14,9 +15,9 @@ __all__ = ['ESPRESSO_GTO']
|
|
|
14
15
|
path = os.path.join(os.path.dirname(__file__), 'data')
|
|
15
16
|
|
|
16
17
|
|
|
17
|
-
def get_star(star, instrument=None):
|
|
18
|
+
def get_star(star, instrument=None, verbose=False, **kwargs):
|
|
18
19
|
return RV(star, instrument=instrument,
|
|
19
|
-
_raise_on_error=False, verbose=
|
|
20
|
+
_raise_on_error=False, verbose=verbose, **kwargs)
|
|
20
21
|
|
|
21
22
|
|
|
22
23
|
class LazyRV:
|
|
@@ -36,21 +37,22 @@ class LazyRV:
|
|
|
36
37
|
def __repr__(self):
|
|
37
38
|
return f"RV({self.N} stars)"
|
|
38
39
|
|
|
39
|
-
def _get(self):
|
|
40
|
+
def _get(self, **kwargs):
|
|
40
41
|
if self.N > self._parallel_limit:
|
|
41
42
|
# logger.info('Querying DACE...')
|
|
42
|
-
_get_star = partial(get_star, instrument=self.instrument)
|
|
43
|
-
with
|
|
43
|
+
_get_star = partial(get_star, instrument=self.instrument, **kwargs)
|
|
44
|
+
with ThreadPool(8) as pool:
|
|
44
45
|
result = list(tqdm(pool.imap(_get_star, self.stars),
|
|
45
|
-
total=self.N, unit='star',
|
|
46
|
-
|
|
46
|
+
total=self.N, unit='star',
|
|
47
|
+
desc='Querying DACE (can take a while)'))
|
|
48
|
+
print('')
|
|
47
49
|
else:
|
|
48
50
|
result = []
|
|
49
|
-
logger.info('
|
|
51
|
+
logger.info('querying DACE...')
|
|
50
52
|
pbar = tqdm(self.stars, total=self.N, unit='star')
|
|
51
53
|
for star in pbar:
|
|
52
54
|
pbar.set_description(star)
|
|
53
|
-
result.append(get_star(star, self.instrument))
|
|
55
|
+
result.append(get_star(star, self.instrument, **kwargs))
|
|
54
56
|
|
|
55
57
|
return result
|
|
56
58
|
|
|
@@ -73,14 +75,25 @@ class LazyRV:
|
|
|
73
75
|
# result.append(None)
|
|
74
76
|
# return result
|
|
75
77
|
|
|
78
|
+
def reload(self, **kwargs):
|
|
79
|
+
self._saved = self._get(**kwargs)
|
|
80
|
+
return self._saved
|
|
81
|
+
|
|
76
82
|
def __iter__(self):
|
|
77
83
|
return self._get()
|
|
78
84
|
|
|
79
|
-
def __call__(self):
|
|
85
|
+
def __call__(self, **kwargs):
|
|
80
86
|
if not self._saved:
|
|
81
|
-
self._saved = self._get()
|
|
87
|
+
self._saved = self._get(**kwargs)
|
|
82
88
|
return self._saved
|
|
83
89
|
|
|
90
|
+
@lru_cache(maxsize=10)
|
|
91
|
+
def __getitem__(self, index):
|
|
92
|
+
star = self.stars[index]
|
|
93
|
+
if self._saved is not None:
|
|
94
|
+
return self._saved[index]
|
|
95
|
+
return get_star(star, self.instrument, verbose=True)
|
|
96
|
+
|
|
84
97
|
|
|
85
98
|
# sorted by spectral type
|
|
86
99
|
WG1_stars = [
|
|
@@ -157,3 +170,78 @@ ESPRESSO_GTO = ESPRESSO_GTO_nt(
|
|
|
157
170
|
ESPRESSO_GTO.WG1.__doc__ = 'RV observations for all WG1 targets. Call ESPRESSO_GTO.WG1() to load them.'
|
|
158
171
|
ESPRESSO_GTO.WG2.__doc__ = 'RV observations for all WG2 targets. Call ESPRESSO_GTO.WG2() to load them.'
|
|
159
172
|
ESPRESSO_GTO.WG3.__doc__ = 'RV observations for all WG3 targets. Call ESPRESSO_GTO.WG3() to load them.'
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
import requests
|
|
176
|
+
|
|
177
|
+
def _get_NIRPS_GTO_stars(WP=1):
|
|
178
|
+
from io import StringIO
|
|
179
|
+
import numpy as np
|
|
180
|
+
|
|
181
|
+
url = 'https://www.eso.org/sci/observing/teles-alloc/gto/113/NIRPS/P113_NIRPS-consortium.csv'
|
|
182
|
+
file = StringIO(requests.get(url).content.decode())
|
|
183
|
+
stars_P113 = np.loadtxt(file, delimiter=',', usecols=(0,), dtype=str, skiprows=3)
|
|
184
|
+
|
|
185
|
+
url = 'https://www.eso.org/sci/observing/teles-alloc/gto/114/NIRPS/P114_NIRPS-consortium.csv'
|
|
186
|
+
file = StringIO(requests.get(url).content.decode())
|
|
187
|
+
stars_P114 = np.loadtxt(file, delimiter=',', usecols=(0,), dtype=str, skiprows=3)
|
|
188
|
+
|
|
189
|
+
url = 'https://www.eso.org/sci/observing/teles-alloc/gto/115/NIRPS/P115_NIRPS-consortium.csv'
|
|
190
|
+
file = StringIO(requests.get(url).content.decode())
|
|
191
|
+
stars_P115 = np.loadtxt(file, delimiter=',', usecols=(0,), dtype=str, skiprows=3)
|
|
192
|
+
|
|
193
|
+
def _get_stars_period(stars, WP):
|
|
194
|
+
stars = np.delete(stars, stars=='')
|
|
195
|
+
|
|
196
|
+
stars = np.char.replace(stars, '_', ' ')
|
|
197
|
+
stars = np.char.replace(stars, "Proxima Centauri", "Proxima")
|
|
198
|
+
stars = np.char.replace(stars, "Barnard's star", "GJ699")
|
|
199
|
+
stars = np.char.replace(stars, "Teegarden's Star", 'Teegarden')
|
|
200
|
+
|
|
201
|
+
if WP in (1, 'WP1'):
|
|
202
|
+
wp1_indices = slice(np.where(stars == 'WP1')[0][0] + 1, np.where(stars == 'WP2')[0][0])
|
|
203
|
+
return stars[wp1_indices]
|
|
204
|
+
elif WP == 2:
|
|
205
|
+
wp2_indices = slice(np.where(stars == 'WP2')[0][0] + 1, np.where(stars == 'WP3')[0][0])
|
|
206
|
+
return stars[wp2_indices]
|
|
207
|
+
elif WP == 3:
|
|
208
|
+
wp3_indices = slice(np.where(stars == 'WP3')[0][0] + 1, np.where(stars == 'Other Science 1')[0][0])
|
|
209
|
+
return stars[wp3_indices]
|
|
210
|
+
elif WP == 'OS1':
|
|
211
|
+
os1_indices = slice(np.where(stars == 'Other Science 1')[0][0] + 1, np.where(stars == 'Other Science 2')[0][0])
|
|
212
|
+
return stars[os1_indices]
|
|
213
|
+
elif WP == 'OS2':
|
|
214
|
+
os2_indices = slice(np.where(stars == 'Other Science 2')[0][0] + 1, None)
|
|
215
|
+
stars = np.char.replace(stars, 'MMU', 'No')
|
|
216
|
+
stars = np.char.replace(stars, 'Cl*', '')
|
|
217
|
+
return stars[os2_indices]
|
|
218
|
+
|
|
219
|
+
stars_P113 = _get_stars_period(stars_P113, WP)
|
|
220
|
+
stars_P114 = _get_stars_period(stars_P114, WP)
|
|
221
|
+
stars_P115 = _get_stars_period(stars_P115, WP)
|
|
222
|
+
return np.union1d(np.union1d(stars_P113, stars_P114), stars_P115)
|
|
223
|
+
|
|
224
|
+
try:
|
|
225
|
+
NIRPS_GTO_WP1_stars = _get_NIRPS_GTO_stars(WP=1)
|
|
226
|
+
NIRPS_GTO_WP2_stars = _get_NIRPS_GTO_stars(WP=2)
|
|
227
|
+
NIRPS_GTO_WP3_stars = _get_NIRPS_GTO_stars(WP=3)
|
|
228
|
+
NIRPS_GTO_OS1_stars = _get_NIRPS_GTO_stars(WP='OS1')
|
|
229
|
+
NIRPS_GTO_OS2_stars = _get_NIRPS_GTO_stars(WP='OS2')
|
|
230
|
+
except requests.ConnectionError:
|
|
231
|
+
from .setup_logger import logger
|
|
232
|
+
logger.error('Cannot download NIRPS GTO protected target list')
|
|
233
|
+
else:
|
|
234
|
+
NIRPS_GTO_nt = namedtuple('NIRPS_GTO', ['WP1', 'WP2', 'WP3', 'OS1', 'OS2'])
|
|
235
|
+
NIRPS_GTO_nt.__doc__ = 'RV observations for all NIRPS GTO targets. See NIRPS_GTO.WP1, NIRPS_GTO.WP2, ...'
|
|
236
|
+
NIRPS_GTO = NIRPS_GTO_nt(
|
|
237
|
+
WP1=LazyRV(NIRPS_GTO_WP1_stars, instrument='NIRPS'),
|
|
238
|
+
WP2=LazyRV(NIRPS_GTO_WP2_stars, instrument='NIRPS'),
|
|
239
|
+
WP3=LazyRV(NIRPS_GTO_WP3_stars, instrument='NIRPS'),
|
|
240
|
+
OS1=LazyRV(NIRPS_GTO_OS1_stars, instrument='NIRPS'),
|
|
241
|
+
OS2=LazyRV(NIRPS_GTO_OS2_stars, instrument='NIRPS'),
|
|
242
|
+
)
|
|
243
|
+
NIRPS_GTO.WP1.__doc__ = 'RV observations for all WP1 targets. Call NIRPS_GTO.WP1() to load them.'
|
|
244
|
+
NIRPS_GTO.WP2.__doc__ = 'RV observations for all WP2 targets. Call NIRPS_GTO.WP2() to load them.'
|
|
245
|
+
NIRPS_GTO.WP3.__doc__ = 'RV observations for all WP3 targets. Call NIRPS_GTO.WP3() to load them.'
|
|
246
|
+
NIRPS_GTO.OS1.__doc__ = 'RV observations for all OS1 targets. Call NIRPS_GTO.OS1() to load them.'
|
|
247
|
+
NIRPS_GTO.OS2.__doc__ = 'RV observations for all OS2 targets. Call NIRPS_GTO.OS2() to load them.'
|
arvi/spectra.py
CHANGED
|
@@ -72,7 +72,7 @@ def fit_gaussian_to_line(wave, flux, center_wavelength, around=0.15 * u.angstrom
|
|
|
72
72
|
]).T
|
|
73
73
|
|
|
74
74
|
try:
|
|
75
|
-
popt, pcov = curve_fit(gaussian, w, f, p0=[-
|
|
75
|
+
popt, pcov = curve_fit(gaussian, w, f, p0=[-np.ptp(f), center_wavelength.value, 0.1, f.max()],
|
|
76
76
|
bounds=(lower, upper))
|
|
77
77
|
except RuntimeError as e:
|
|
78
78
|
logger.warning(f'fit_gaussian_to_line: {e}')
|
|
@@ -115,7 +115,7 @@ def detrend(w, f):
|
|
|
115
115
|
def build_master(self, limit=None, plot=True):
|
|
116
116
|
files = sorted(glob(f'{self.star}_downloads/*S1D_A.fits'))
|
|
117
117
|
if self.verbose:
|
|
118
|
-
logger.info(f'
|
|
118
|
+
logger.info(f'found {len(files)} S1D files')
|
|
119
119
|
|
|
120
120
|
files = files[:limit]
|
|
121
121
|
|
|
@@ -168,8 +168,8 @@ def determine_stellar_parameters(self, linelist: str, plot=True, **kwargs):
|
|
|
168
168
|
]
|
|
169
169
|
|
|
170
170
|
if self.verbose:
|
|
171
|
-
logger.info(f'
|
|
172
|
-
logger.info('
|
|
171
|
+
logger.info(f'found {len(lines)} lines in linelist')
|
|
172
|
+
logger.info('measuring EWs...')
|
|
173
173
|
|
|
174
174
|
EW = []
|
|
175
175
|
pbar = tqdm(linelist)
|
|
@@ -183,14 +183,14 @@ def determine_stellar_parameters(self, linelist: str, plot=True, **kwargs):
|
|
|
183
183
|
EW = np.array(EW)[~np.isnan(EW)]
|
|
184
184
|
|
|
185
185
|
if self.verbose:
|
|
186
|
-
logger.info('
|
|
186
|
+
logger.info('determining stellar parameters (can take a few minutes)...')
|
|
187
187
|
|
|
188
188
|
callback = lambda p, r, A: print('current parameters:', p)
|
|
189
189
|
result = Korg.Fit.ews_to_stellar_parameters(lines, EW, callback=callback)
|
|
190
190
|
par, stat_err, sys_err = result
|
|
191
191
|
|
|
192
192
|
if self.verbose:
|
|
193
|
-
logger.info(f'
|
|
193
|
+
logger.info(f'best-fit stellar parameters:')
|
|
194
194
|
logger.info(f' Teff: {par[0]:.0f} ± {sys_err[0]:.0f} K')
|
|
195
195
|
logger.info(f' logg: {par[1]:.2f} ± {sys_err[1]:.2f} dex')
|
|
196
196
|
logger.info(f' m/H : {par[3]:.2f} ± {sys_err[3]:.2f} dex')
|
arvi/timeseries.py
CHANGED
|
@@ -193,7 +193,7 @@ class RV:
|
|
|
193
193
|
time_stamp = datetime.now(timezone.utc) #.isoformat().split('.')[0]
|
|
194
194
|
self._last_dace_query = time_stamp
|
|
195
195
|
|
|
196
|
-
_replacements = (('-', '_'), ('.', '_'), ('__', '_'))
|
|
196
|
+
_replacements = (('-', '_'), ('.', '_'), (' ', '_'), ('__', '_'))
|
|
197
197
|
def do_replacements(s):
|
|
198
198
|
for a, b in _replacements:
|
|
199
199
|
s = s.replace(a, b)
|
|
@@ -444,7 +444,7 @@ class RV:
|
|
|
444
444
|
|
|
445
445
|
s.instruments = [inst]
|
|
446
446
|
s.pipelines = [pipe]
|
|
447
|
-
s.modes = [mode]
|
|
447
|
+
s.modes = [str(mode)]
|
|
448
448
|
|
|
449
449
|
return s
|
|
450
450
|
|
|
@@ -598,29 +598,47 @@ class RV:
|
|
|
598
598
|
_quantities.append('rhk')
|
|
599
599
|
_quantities.append('rhk_err')
|
|
600
600
|
|
|
601
|
-
|
|
602
|
-
|
|
601
|
+
# try to find BISPAN and uncertainty
|
|
602
|
+
if (v := find_column(data, ['bispan'])) is not False:
|
|
603
|
+
_s.bispan = v
|
|
604
|
+
_s.bispan_err = np.full_like(time, np.nan)
|
|
605
|
+
if (sv := find_column(data, ['sbispan'])) is not False:
|
|
606
|
+
_s.bispan_err = sv
|
|
607
|
+
else:
|
|
608
|
+
_s.bispan = np.full_like(time, np.nan)
|
|
609
|
+
_s.bispan_err = np.full_like(time, np.nan)
|
|
610
|
+
|
|
611
|
+
_quantities.append('bispan')
|
|
612
|
+
_quantities.append('bispan_err')
|
|
613
|
+
|
|
614
|
+
# try to find BERV
|
|
615
|
+
if (v := find_column(data, ['berv', 'HIERARCH ESO QC BERV'])) is not False:
|
|
616
|
+
_s.berv = v
|
|
617
|
+
else:
|
|
618
|
+
_s.berv = np.full_like(time, np.nan)
|
|
619
|
+
_quantities.append('berv')
|
|
603
620
|
|
|
604
621
|
# other quantities, but all NaNs
|
|
605
|
-
for q in ['
|
|
622
|
+
for q in ['caindex', 'ccf_asym', 'contrast', 'haindex', 'naindex', 'sindex']:
|
|
606
623
|
setattr(_s, q, np.full_like(time, np.nan))
|
|
607
624
|
setattr(_s, q + '_err', np.full_like(time, np.nan))
|
|
608
625
|
_quantities.append(q)
|
|
609
626
|
_quantities.append(q + '_err')
|
|
610
|
-
for q in ['
|
|
627
|
+
for q in ['texp', ]:
|
|
611
628
|
setattr(_s, q, np.full_like(time, np.nan))
|
|
612
629
|
_quantities.append(q)
|
|
613
630
|
for q in ['ccf_mask', 'date_night', 'prog_id', 'raw_file', 'pub_reference']:
|
|
614
631
|
setattr(_s, q, np.full(time.size, ''))
|
|
615
632
|
_quantities.append(q)
|
|
616
|
-
for q in ['drs_qc']:
|
|
633
|
+
for q in ['drs_qc', ]:
|
|
617
634
|
setattr(_s, q, np.full(time.size, True))
|
|
618
635
|
_quantities.append(q)
|
|
619
636
|
|
|
620
637
|
_s.extra_fields = ExtraFields()
|
|
621
|
-
for
|
|
622
|
-
if
|
|
623
|
-
|
|
638
|
+
for name in data.dtype.names:
|
|
639
|
+
if name not in _quantities:
|
|
640
|
+
name_ = name.replace(' ', '_')
|
|
641
|
+
setattr(_s.extra_fields, name_, data[name])
|
|
624
642
|
# _quantities.append(field)
|
|
625
643
|
|
|
626
644
|
#! end hack
|
|
@@ -628,12 +646,12 @@ class RV:
|
|
|
628
646
|
_s.mask = np.ones_like(time, dtype=bool)
|
|
629
647
|
_s.obs = np.full_like(time, i + 1)
|
|
630
648
|
|
|
631
|
-
_s.instruments = [instrument]
|
|
649
|
+
_s.instruments = [str(instrument)]
|
|
632
650
|
_s._quantities = np.array(_quantities)
|
|
633
651
|
setattr(s, instrument, _s)
|
|
634
652
|
|
|
635
653
|
s._child = False
|
|
636
|
-
s.instruments = list(instruments)
|
|
654
|
+
s.instruments = list(map(str, instruments))
|
|
637
655
|
s._build_arrays()
|
|
638
656
|
|
|
639
657
|
if kwargs.get('do_adjust_means', False):
|
|
@@ -726,15 +744,17 @@ class RV:
|
|
|
726
744
|
return s
|
|
727
745
|
|
|
728
746
|
@classmethod
|
|
729
|
-
@lru_cache(maxsize=
|
|
747
|
+
@lru_cache(maxsize=60)
|
|
730
748
|
def from_KOBE_file(cls, star, **kwargs):
|
|
731
749
|
assert 'KOBE' in star, f'{star} is not a KOBE star?'
|
|
732
750
|
import requests
|
|
733
751
|
from requests.auth import HTTPBasicAuth
|
|
734
752
|
from io import BytesIO
|
|
735
753
|
import tarfile
|
|
754
|
+
from time import time as pytime
|
|
736
755
|
from astropy.io import fits
|
|
737
756
|
from .config import config
|
|
757
|
+
from .utils import get_data_path
|
|
738
758
|
|
|
739
759
|
try:
|
|
740
760
|
config.kobe_password
|
|
@@ -743,23 +763,11 @@ class RV:
|
|
|
743
763
|
return
|
|
744
764
|
|
|
745
765
|
tar = None
|
|
766
|
+
local_targz_file = os.path.join(get_data_path(), 'KOBE_fitsfiles.tar.gz')
|
|
746
767
|
fits_file = f'{star}_RVs.fits'
|
|
747
|
-
resp = requests.get(f'https://kobe.caha.es/internal/fitsfiles/{fits_file}',
|
|
748
|
-
auth=HTTPBasicAuth('kobeteam', config.kobe_password))
|
|
749
|
-
|
|
750
|
-
if resp.status_code != 200:
|
|
751
|
-
# something went wrong, try to extract the file by downloading the
|
|
752
|
-
# full tar.gz archive
|
|
753
|
-
|
|
754
|
-
logger.warning(f'could not find "{fits_file}" on server, trying to download full archive')
|
|
755
|
-
resp = requests.get('https://kobe.caha.es/internal/fitsfiles.tar.gz',
|
|
756
|
-
auth=HTTPBasicAuth('kobeteam', config.kobe_password))
|
|
757
|
-
|
|
758
|
-
if resp.status_code != 200:
|
|
759
|
-
logger.error(f'KOBE file not found for {star}')
|
|
760
|
-
return
|
|
761
768
|
|
|
762
|
-
|
|
769
|
+
if os.path.exists(local_targz_file) and os.path.getmtime(local_targz_file) > pytime() - 60*60*2:
|
|
770
|
+
tar = tarfile.open(local_targz_file)
|
|
763
771
|
|
|
764
772
|
if fits_file not in tar.getnames():
|
|
765
773
|
logger.error(f'KOBE file not found for {star}')
|
|
@@ -768,8 +776,36 @@ class RV:
|
|
|
768
776
|
hdul = fits.open(tar.extractfile(fits_file))
|
|
769
777
|
|
|
770
778
|
else:
|
|
771
|
-
|
|
772
|
-
|
|
779
|
+
resp = requests.get(f'https://kobe.caha.es/internal/fitsfiles/{fits_file}',
|
|
780
|
+
auth=HTTPBasicAuth('kobeteam', config.kobe_password))
|
|
781
|
+
|
|
782
|
+
if resp.status_code != 200:
|
|
783
|
+
# something went wrong, try to extract the file by downloading the
|
|
784
|
+
# full tar.gz archive
|
|
785
|
+
|
|
786
|
+
logger.warning(f'could not find "{fits_file}" on server, trying to download full archive')
|
|
787
|
+
resp = requests.get('https://kobe.caha.es/internal/fitsfiles.tar.gz',
|
|
788
|
+
auth=HTTPBasicAuth('kobeteam', config.kobe_password))
|
|
789
|
+
|
|
790
|
+
if resp.status_code != 200:
|
|
791
|
+
logger.error(f'KOBE file not found for {star}')
|
|
792
|
+
return
|
|
793
|
+
|
|
794
|
+
# save tar.gz file for later
|
|
795
|
+
with open(local_targz_file, 'wb') as tg:
|
|
796
|
+
tg.write(resp.content)
|
|
797
|
+
|
|
798
|
+
tar = tarfile.open(fileobj=BytesIO(resp.content))
|
|
799
|
+
|
|
800
|
+
if fits_file not in tar.getnames():
|
|
801
|
+
logger.error(f'KOBE file not found for {star}')
|
|
802
|
+
return
|
|
803
|
+
|
|
804
|
+
hdul = fits.open(tar.extractfile(fits_file))
|
|
805
|
+
|
|
806
|
+
else:
|
|
807
|
+
# found the file on the server, read it directly
|
|
808
|
+
hdul = fits.open(BytesIO(resp.content))
|
|
773
809
|
|
|
774
810
|
s = cls(star, _child=True)
|
|
775
811
|
|
|
@@ -780,26 +816,47 @@ class RV:
|
|
|
780
816
|
s.vrad_preNZP = hdul[1].data['RVd']
|
|
781
817
|
s.vrad_preNZP_err = hdul[1].data['eRVd']
|
|
782
818
|
|
|
819
|
+
s.fwhm = hdul[1].data['FWHM']
|
|
820
|
+
s.fwhm_err = hdul[1].data['eFWHM']
|
|
821
|
+
|
|
822
|
+
s.crx = hdul[1].data['CRX']
|
|
823
|
+
s.crx_err = hdul[1].data['eCRX']
|
|
824
|
+
s.dlw = hdul[1].data['DLW']
|
|
825
|
+
s.dlw_err = hdul[1].data['eDLW']
|
|
826
|
+
s.contrast = hdul[1].data['CONTRAST']
|
|
827
|
+
s.contrast_err = hdul[1].data['eCONTRAST']
|
|
828
|
+
s.bispan = hdul[1].data['BIS']
|
|
829
|
+
s.bispan_err = hdul[1].data['eBIS']
|
|
830
|
+
|
|
831
|
+
|
|
783
832
|
s.drift = hdul[1].data['drift']
|
|
784
833
|
s.drift_err = hdul[1].data['e_drift']
|
|
785
834
|
|
|
786
835
|
s.nzp = hdul[1].data['NZP']
|
|
787
836
|
s.nzp_err = hdul[1].data['eNZP']
|
|
788
837
|
|
|
838
|
+
s.texp = hdul[1].data['ExpTime']
|
|
789
839
|
s.berv = hdul[1].data['BERV']
|
|
840
|
+
s.units = 'km/s'
|
|
790
841
|
|
|
842
|
+
s.obs = np.ones_like(s.time, dtype=int)
|
|
791
843
|
s.mask = np.full_like(s.time, True, dtype=bool)
|
|
792
844
|
s.instruments = ['CARMENES']
|
|
845
|
+
s._quantities = np.array(['berv', ])
|
|
793
846
|
|
|
794
847
|
# so meta!
|
|
795
848
|
setattr(s, 'CARMENES', s)
|
|
796
849
|
|
|
797
850
|
s._kobe_result = hdul[1].data
|
|
798
851
|
|
|
852
|
+
s.mask = s._kobe_result['rvflag']
|
|
853
|
+
s._propagate_mask_changes()
|
|
854
|
+
|
|
799
855
|
if tar is not None:
|
|
800
856
|
tar.close()
|
|
801
857
|
hdul.close()
|
|
802
858
|
|
|
859
|
+
s._child = False
|
|
803
860
|
return s
|
|
804
861
|
|
|
805
862
|
|
|
@@ -1190,6 +1247,13 @@ class RV:
|
|
|
1190
1247
|
if (self.time < bjd).any():
|
|
1191
1248
|
ind = np.where(self.time < bjd)[0]
|
|
1192
1249
|
self.remove_point(ind)
|
|
1250
|
+
|
|
1251
|
+
def remove_between_bjds(self, bjd1, bjd2):
|
|
1252
|
+
""" Remove observations between two BJDs """
|
|
1253
|
+
to_remove = (self.time > bjd1) & (self.time < bjd2)
|
|
1254
|
+
if to_remove.any():
|
|
1255
|
+
ind = np.where(to_remove)[0]
|
|
1256
|
+
self.remove_point(ind)
|
|
1193
1257
|
|
|
1194
1258
|
def choose_n_points(self, n, seed=None, instrument=None):
|
|
1195
1259
|
""" Randomly choose `n` observations and mask out the remaining ones
|
|
@@ -1700,8 +1764,21 @@ class RV:
|
|
|
1700
1764
|
s = getattr(self, inst)
|
|
1701
1765
|
s.vrad *= factor
|
|
1702
1766
|
s.svrad *= factor
|
|
1703
|
-
|
|
1704
|
-
|
|
1767
|
+
try:
|
|
1768
|
+
s.fwhm *= factor
|
|
1769
|
+
s.fwhm_err *= factor
|
|
1770
|
+
except AttributeError:
|
|
1771
|
+
pass
|
|
1772
|
+
|
|
1773
|
+
for q in (
|
|
1774
|
+
'bispan',
|
|
1775
|
+
'nzp', 'vrad_preNZP',
|
|
1776
|
+
):
|
|
1777
|
+
try:
|
|
1778
|
+
setattr(s, q, getattr(s, q) * factor)
|
|
1779
|
+
setattr(s, f'{q}_err', getattr(s, f'{q}_err') * factor)
|
|
1780
|
+
except AttributeError:
|
|
1781
|
+
pass
|
|
1705
1782
|
|
|
1706
1783
|
self._build_arrays()
|
|
1707
1784
|
self.units = new_units
|
arvi/translations.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: arvi
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.25
|
|
4
4
|
Summary: The Automated RV Inspector
|
|
5
5
|
Author-email: João Faria <joao.faria@unige.ch>
|
|
6
6
|
License: MIT
|
|
@@ -10,6 +10,7 @@ Classifier: Programming Language :: Python :: 3
|
|
|
10
10
|
Requires-Python: >=3.8
|
|
11
11
|
Description-Content-Type: text/markdown
|
|
12
12
|
License-File: LICENSE
|
|
13
|
+
Requires-Dist: mock; python_version < "3.3"
|
|
13
14
|
Requires-Dist: numpy
|
|
14
15
|
Requires-Dist: scipy
|
|
15
16
|
Requires-Dist: matplotlib
|
|
@@ -19,7 +20,6 @@ Requires-Dist: loguru
|
|
|
19
20
|
Requires-Dist: tqdm
|
|
20
21
|
Requires-Dist: pySWEETCat
|
|
21
22
|
Requires-Dist: kepmodel
|
|
22
|
-
Requires-Dist: mock; python_version < "3.3"
|
|
23
23
|
|
|
24
24
|
<p align="center">
|
|
25
25
|
<img width = "140" src="https://github.com/j-faria/arvi/blob/main/docs/logo/logo.png?raw=true"/>
|
|
@@ -4,24 +4,24 @@ arvi/ariadne_wrapper.py,sha256=YvilopJa9T4NwPcj3Nah_U8smSeSAU5-HYZMb_GJ-BQ,2232
|
|
|
4
4
|
arvi/berv.py,sha256=eKnpuPC1w45UrUEyFRbs9F9j3bXz3kxYzNXbnRgvFQM,17596
|
|
5
5
|
arvi/binning.py,sha256=jbemJ-bM3aqoOsqMo_OhWt_co-JAQ0nhdG_GpTsrRsw,15403
|
|
6
6
|
arvi/config.py,sha256=W-v8NNhRd_PROu0wCMilXmOhYcju4xbUalugd5u7SRU,1881
|
|
7
|
-
arvi/dace_wrapper.py,sha256=
|
|
7
|
+
arvi/dace_wrapper.py,sha256=cH7pQgKZDdK1Buahyl_D__H9WqVzBNMJuV6Ebw48amo,21270
|
|
8
8
|
arvi/extra_data.py,sha256=WEEaYeLh52Zdv0uyHO72Ys5MWS3naTAP4wJV2BJ1mbk,2551
|
|
9
9
|
arvi/gaia_wrapper.py,sha256=icm3LJjG9pjP47_bM30NFyocUQO3X3SHS5yQ-Dwcr5w,4653
|
|
10
10
|
arvi/headers.py,sha256=uvdJebw1M5YkGjE3vJJwYBOnLikib75uuZE9FXB5JJM,1673
|
|
11
11
|
arvi/instrument_specific.py,sha256=-pbm2Vk3iK_1K7nDa1avlJOKHBcXllwILI4lQn-Ze-A,7761
|
|
12
|
-
arvi/kima_wrapper.py,sha256=
|
|
12
|
+
arvi/kima_wrapper.py,sha256=BvNTVqzM4lMNhLCyBFVh3T84hHfGKAFpgiYiOi4lh0g,2731
|
|
13
13
|
arvi/lbl_wrapper.py,sha256=_ViGVkpakvuBR_xhu9XJRV5EKHpj5Go6jBZGJZMIS2Y,11850
|
|
14
14
|
arvi/nasaexo_wrapper.py,sha256=mWt7eHgSZe4MBKCmUvMPTyUPGuiwGTqKugNBvmjOg9s,7306
|
|
15
15
|
arvi/plots.py,sha256=WUm-sqN0aZTNXvE1kYpvmHTW9QPWqSCpKhNjwaqxjEk,29628
|
|
16
|
-
arvi/programs.py,sha256=
|
|
16
|
+
arvi/programs.py,sha256=BW7xBNKLei7NVLLW3_lsVskwzkaIoNRiHK2jn9Tn2ZM,8879
|
|
17
17
|
arvi/reports.py,sha256=ayPdZ4HZO9iCDdnADQ18gQPJh79o-1UYG7TYkvm9Lrc,4051
|
|
18
18
|
arvi/setup_logger.py,sha256=pBzaRTn0hntozjbaRVx0JIbWGuENkvYUApa6uB-FsRo,279
|
|
19
19
|
arvi/simbad_wrapper.py,sha256=iAAwEMcr1Hgu6lnDctmaCC1TLPCB8yAfHG0wxh9K9C8,5791
|
|
20
|
-
arvi/spectra.py,sha256=
|
|
20
|
+
arvi/spectra.py,sha256=ebF1ocodTastLx0CyqLSpE8EZNDXBF8riyfxMr3L6H0,7491
|
|
21
21
|
arvi/stats.py,sha256=ilzzGL9ew-SyVa9eEdrYCpD3DliOAwhoNUg9LIlHjzU,2583
|
|
22
22
|
arvi/stellar.py,sha256=veuL_y9kJvvApU_jqYQqP3EkcRnQffTc8Us6iT5UrFI,3790
|
|
23
|
-
arvi/timeseries.py,sha256=
|
|
24
|
-
arvi/translations.py,sha256=
|
|
23
|
+
arvi/timeseries.py,sha256=NdmSSYeDdS-cYBXPt8NCeKS1jLdv8LP6Zh561KRGfZc,77328
|
|
24
|
+
arvi/translations.py,sha256=PUSrn4zvYO2MqGzUxlFGwev_tBkgJaJrIYs6NKHzbWo,951
|
|
25
25
|
arvi/utils.py,sha256=LImV8iPjG8ZKjPCT9lp25_pDb-51ZZk42Hc8bzZt7M0,6568
|
|
26
26
|
arvi/data/info.svg,sha256=0IMI6W-eFoTD8acnury79WJJakpBwLa4qKS4JWpsXiI,489
|
|
27
27
|
arvi/data/obs_affected_ADC_issues.dat,sha256=tn93uOL0eCTYhireqp1wG-_c3CbxPA7C-Rf-pejVY8M,10853
|
|
@@ -29,8 +29,8 @@ arvi/data/obs_affected_blue_cryostat_issues.dat,sha256=z4AK17xfz8tGTDv1FjRvQFnio
|
|
|
29
29
|
arvi/data/extra/HD86226_PFS1.rdb,sha256=vfAozbrKHM_j8dYkCBJsuHyD01KEM1asghe2KInwVao,3475
|
|
30
30
|
arvi/data/extra/HD86226_PFS2.rdb,sha256=F2P7dB6gVyzCglUjNheB0hIHVClC5RmARrGwbrY1cfo,4114
|
|
31
31
|
arvi/data/extra/metadata.json,sha256=C69hIw6CohyES6BI9vDWjxwSz7N4VOYX0PCgjXtYFmU,178
|
|
32
|
-
arvi-0.1.
|
|
33
|
-
arvi-0.1.
|
|
34
|
-
arvi-0.1.
|
|
35
|
-
arvi-0.1.
|
|
36
|
-
arvi-0.1.
|
|
32
|
+
arvi-0.1.25.dist-info/LICENSE,sha256=6JfQgl7SpM55t0EHMFNMnNh-AdkpGW25MwMiTnhdWQg,1068
|
|
33
|
+
arvi-0.1.25.dist-info/METADATA,sha256=kB1PpEDCZNesQG7KRzONb36uILEryuieL3QK8DbJAkU,1852
|
|
34
|
+
arvi-0.1.25.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
|
35
|
+
arvi-0.1.25.dist-info/top_level.txt,sha256=4EeiKDVLD45ztuflTGfQ3TU8GVjJg5Y95xS5XjI-utU,5
|
|
36
|
+
arvi-0.1.25.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|