arvi 0.1.25__tar.gz → 0.1.27__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of arvi might be problematic. Click here for more details.
- arvi-0.1.27/.github/dependabot.yml +6 -0
- {arvi-0.1.25 → arvi-0.1.27}/.github/workflows/docs-gh-pages.yml +5 -5
- {arvi-0.1.25 → arvi-0.1.27}/.github/workflows/install.yml +0 -3
- {arvi-0.1.25 → arvi-0.1.27}/PKG-INFO +2 -2
- {arvi-0.1.25 → arvi-0.1.27}/arvi/dace_wrapper.py +48 -28
- {arvi-0.1.25 → arvi-0.1.27}/arvi/extra_data.py +21 -5
- {arvi-0.1.25 → arvi-0.1.27}/arvi/gaia_wrapper.py +16 -3
- {arvi-0.1.25 → arvi-0.1.27}/arvi/plots.py +11 -6
- {arvi-0.1.25 → arvi-0.1.27}/arvi/simbad_wrapper.py +83 -4
- {arvi-0.1.25 → arvi-0.1.27}/arvi/timeseries.py +269 -53
- {arvi-0.1.25 → arvi-0.1.27}/arvi.egg-info/PKG-INFO +2 -2
- {arvi-0.1.25 → arvi-0.1.27}/arvi.egg-info/SOURCES.txt +3 -0
- arvi-0.1.27/tests/HD10700-Bcor_ESPRESSO18.rdb +3 -0
- arvi-0.1.27/tests/test_create_RV.py +21 -0
- {arvi-0.1.25 → arvi-0.1.27}/.github/workflows/python-publish.yml +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/.gitignore +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/LICENSE +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/README.md +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/HZ.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/__init__.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/ariadne_wrapper.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/berv.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/binning.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/config.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/data/extra/HD86226_PFS1.rdb +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/data/extra/HD86226_PFS2.rdb +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/data/extra/metadata.json +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/data/info.svg +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/data/obs_affected_ADC_issues.dat +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/data/obs_affected_blue_cryostat_issues.dat +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/headers.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/instrument_specific.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/kima_wrapper.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/lbl_wrapper.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/nasaexo_wrapper.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/programs.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/reports.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/setup_logger.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/spectra.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/stats.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/stellar.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/translations.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi/utils.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi.egg-info/dependency_links.txt +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi.egg-info/requires.txt +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/arvi.egg-info/top_level.txt +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/docs/API.md +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/docs/detailed.md +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/docs/index.md +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/docs/logo/detective.png +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/docs/logo/logo.png +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/mkdocs.yml +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/pyproject.toml +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/setup.cfg +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/setup.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/tests/test_binning.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/tests/test_import_object.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/tests/test_simbad.py +0 -0
- {arvi-0.1.25 → arvi-0.1.27}/tests/test_stats.py +0 -0
|
@@ -26,13 +26,13 @@ jobs:
|
|
|
26
26
|
runs-on: ubuntu-latest
|
|
27
27
|
steps:
|
|
28
28
|
- name: Checkout
|
|
29
|
-
uses: actions/checkout@
|
|
29
|
+
uses: actions/checkout@v4
|
|
30
30
|
|
|
31
31
|
- name: Setup Pages
|
|
32
|
-
uses: actions/configure-pages@
|
|
32
|
+
uses: actions/configure-pages@v5
|
|
33
33
|
|
|
34
34
|
- name: Set up Python
|
|
35
|
-
uses: actions/setup-python@
|
|
35
|
+
uses: actions/setup-python@v5
|
|
36
36
|
with:
|
|
37
37
|
python-version: "3.10"
|
|
38
38
|
|
|
@@ -48,7 +48,7 @@ jobs:
|
|
|
48
48
|
mkdocs build
|
|
49
49
|
|
|
50
50
|
- name: Upload artifact
|
|
51
|
-
uses: actions/upload-pages-artifact@
|
|
51
|
+
uses: actions/upload-pages-artifact@v3
|
|
52
52
|
|
|
53
53
|
# Deployment job
|
|
54
54
|
deploy:
|
|
@@ -60,4 +60,4 @@ jobs:
|
|
|
60
60
|
steps:
|
|
61
61
|
- name: Deploy to GitHub Pages
|
|
62
62
|
id: deployment
|
|
63
|
-
uses: actions/deploy-pages@
|
|
63
|
+
uses: actions/deploy-pages@v4
|
|
@@ -11,16 +11,33 @@ from .setup_logger import logger
|
|
|
11
11
|
from .utils import create_directory, all_logging_disabled, stdout_disabled, tqdm
|
|
12
12
|
|
|
13
13
|
|
|
14
|
-
def load_spectroscopy() -> SpectroscopyClass:
|
|
14
|
+
def load_spectroscopy(user=None) -> SpectroscopyClass:
|
|
15
15
|
from .config import config
|
|
16
|
+
# requesting as public
|
|
16
17
|
if config.request_as_public:
|
|
17
18
|
with all_logging_disabled():
|
|
18
19
|
dace = DaceClass(dace_rc_config_path='none')
|
|
19
20
|
return SpectroscopyClass(dace_instance=dace)
|
|
21
|
+
# DACERC environment variable is set, should point to a dacerc file with credentials
|
|
20
22
|
if 'DACERC' in os.environ:
|
|
21
23
|
dace = DaceClass(dace_rc_config_path=os.environ['DACERC'])
|
|
22
24
|
return SpectroscopyClass(dace_instance=dace)
|
|
23
|
-
#
|
|
25
|
+
# user provided, should be a section in ~/.dacerc
|
|
26
|
+
if user is not None:
|
|
27
|
+
import configparser
|
|
28
|
+
import tempfile
|
|
29
|
+
config = configparser.ConfigParser()
|
|
30
|
+
config.read(os.path.expanduser('~/.dacerc'))
|
|
31
|
+
if user not in config.sections():
|
|
32
|
+
raise ValueError(f'Section for user "{user}" not found in ~/.dacerc')
|
|
33
|
+
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
|
|
34
|
+
new_config = configparser.ConfigParser()
|
|
35
|
+
new_config['user'] = config[user]
|
|
36
|
+
new_config.write(f)
|
|
37
|
+
dace = DaceClass(dace_rc_config_path=f.name)
|
|
38
|
+
logger.info(f'using credentials for user {user} in ~/.dacerc')
|
|
39
|
+
return SpectroscopyClass(dace_instance=dace)
|
|
40
|
+
# default
|
|
24
41
|
return default_Spectroscopy
|
|
25
42
|
|
|
26
43
|
@lru_cache()
|
|
@@ -70,7 +87,6 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
|
|
|
70
87
|
if verbose and npipe > 1:
|
|
71
88
|
logger.info(f'selecting latest pipeline ({pipelines[0]}) for {inst}')
|
|
72
89
|
|
|
73
|
-
|
|
74
90
|
for pipe in pipelines:
|
|
75
91
|
modes = [m for m in result[inst][pipe].keys()]
|
|
76
92
|
|
|
@@ -85,24 +101,19 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
|
|
|
85
101
|
if verbose:
|
|
86
102
|
logger.warning(f'no observations for requested NIRPS mode ({NIRPS_mode})')
|
|
87
103
|
|
|
88
|
-
#
|
|
89
|
-
# done together with NIRPS
|
|
90
|
-
if '
|
|
104
|
+
# HARPS observations should not be separated by 'mode' if some are
|
|
105
|
+
# done together with NIRPS, but should be separated by 'EGGS' mode
|
|
106
|
+
if 'HARPS' in inst:
|
|
107
|
+
m0 = modes[0]
|
|
108
|
+
data = {
|
|
109
|
+
k: np.concatenate([result[inst][pipe][m][k] for m in modes])
|
|
110
|
+
for k in result[inst][pipe][m0].keys()
|
|
111
|
+
}
|
|
91
112
|
if 'HARPS+NIRPS' in modes:
|
|
92
|
-
m0 = modes[0]
|
|
93
|
-
data = {
|
|
94
|
-
k: np.concatenate([result[inst][pipe][m][k] for m in modes])
|
|
95
|
-
for k in result[inst][pipe][m0].keys()
|
|
96
|
-
}
|
|
97
113
|
arrays.append( ((str(inst), str(pipe), str(m0)), data) )
|
|
98
114
|
continue
|
|
99
115
|
|
|
100
|
-
if 'EGGS+NIRPS' in modes:
|
|
101
|
-
m0 = modes[0]
|
|
102
|
-
data = {
|
|
103
|
-
k: np.concatenate([result[inst][pipe][m][k] for m in modes])
|
|
104
|
-
for k in result[inst][pipe][m0].keys()
|
|
105
|
-
}
|
|
116
|
+
if 'EGGS+NIRPS' in modes or 'EGGS' in modes:
|
|
106
117
|
arrays.append( ((str(inst + '_EGGS'), str(pipe), str(m0)), data) )
|
|
107
118
|
continue
|
|
108
119
|
|
|
@@ -117,7 +128,7 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
|
|
|
117
128
|
|
|
118
129
|
return arrays
|
|
119
130
|
|
|
120
|
-
def get_observations_from_instrument(star, instrument, main_id=None, verbose=True):
|
|
131
|
+
def get_observations_from_instrument(star, instrument, user=None, main_id=None, verbose=True):
|
|
121
132
|
""" Query DACE for all observations of a given star and instrument
|
|
122
133
|
|
|
123
134
|
Args:
|
|
@@ -125,6 +136,8 @@ def get_observations_from_instrument(star, instrument, main_id=None, verbose=Tru
|
|
|
125
136
|
name of the star
|
|
126
137
|
instrument (str):
|
|
127
138
|
instrument name
|
|
139
|
+
user (str, optional):
|
|
140
|
+
DACERC user name. Defaults to None.
|
|
128
141
|
main_id (str, optional):
|
|
129
142
|
Simbad main id of target to query DACE id. Defaults to None.
|
|
130
143
|
verbose (bool, optional):
|
|
@@ -138,7 +151,7 @@ def get_observations_from_instrument(star, instrument, main_id=None, verbose=Tru
|
|
|
138
151
|
dict:
|
|
139
152
|
dictionary with data from DACE
|
|
140
153
|
"""
|
|
141
|
-
Spectroscopy = load_spectroscopy()
|
|
154
|
+
Spectroscopy = load_spectroscopy(user)
|
|
142
155
|
found_dace_id = False
|
|
143
156
|
try:
|
|
144
157
|
dace_id = get_dace_id(star, verbose=verbose)
|
|
@@ -239,9 +252,9 @@ def get_observations_from_instrument(star, instrument, main_id=None, verbose=Tru
|
|
|
239
252
|
# print([r[k1][k2].keys() for k1 in r.keys() for k2 in r[k1].keys()])
|
|
240
253
|
return r
|
|
241
254
|
|
|
242
|
-
def get_observations(star, instrument=None, main_id=None, verbose=True):
|
|
255
|
+
def get_observations(star, instrument=None, user=None, main_id=None, verbose=True):
|
|
243
256
|
if instrument is None:
|
|
244
|
-
Spectroscopy = load_spectroscopy()
|
|
257
|
+
Spectroscopy = load_spectroscopy(user)
|
|
245
258
|
try:
|
|
246
259
|
with stdout_disabled(), all_logging_disabled():
|
|
247
260
|
result = Spectroscopy.get_timeseries(target=star,
|
|
@@ -255,7 +268,7 @@ def get_observations(star, instrument=None, main_id=None, verbose=True):
|
|
|
255
268
|
raise ValueError(msg) from None
|
|
256
269
|
else:
|
|
257
270
|
try:
|
|
258
|
-
result = get_observations_from_instrument(star, instrument, main_id, verbose)
|
|
271
|
+
result = get_observations_from_instrument(star, instrument, user, main_id, verbose)
|
|
259
272
|
except ValueError:
|
|
260
273
|
msg = f'no {instrument} observations for {star}'
|
|
261
274
|
raise ValueError(msg) from None
|
|
@@ -339,6 +352,12 @@ def check_existing(output_directory, files, type):
|
|
|
339
352
|
if type in f
|
|
340
353
|
]
|
|
341
354
|
|
|
355
|
+
if type == 'S2D':
|
|
356
|
+
existing = [
|
|
357
|
+
f.partition('.fits')[0] for f in os.listdir(output_directory)
|
|
358
|
+
if 'e2ds' in f
|
|
359
|
+
]
|
|
360
|
+
|
|
342
361
|
# also check for lowercase type
|
|
343
362
|
existing += [
|
|
344
363
|
f.partition('.fits')[0] for f in os.listdir(output_directory)
|
|
@@ -351,7 +370,8 @@ def check_existing(output_directory, files, type):
|
|
|
351
370
|
|
|
352
371
|
# remove type of file (e.g. _CCF_A)
|
|
353
372
|
existing = [f.partition('_')[0] for f in existing]
|
|
354
|
-
|
|
373
|
+
existing = np.unique(existing)
|
|
374
|
+
|
|
355
375
|
missing = []
|
|
356
376
|
for file in files:
|
|
357
377
|
if any(other in file for other in existing):
|
|
@@ -360,9 +380,9 @@ def check_existing(output_directory, files, type):
|
|
|
360
380
|
|
|
361
381
|
return np.array(missing)
|
|
362
382
|
|
|
363
|
-
def download(files, type, output_directory, output_filename=None, quiet=True, pbar=None):
|
|
383
|
+
def download(files, type, output_directory, output_filename=None, user=None, quiet=True, pbar=None):
|
|
364
384
|
""" Download files from DACE """
|
|
365
|
-
Spectroscopy = load_spectroscopy()
|
|
385
|
+
Spectroscopy = load_spectroscopy(user)
|
|
366
386
|
if isinstance(files, str):
|
|
367
387
|
files = [files]
|
|
368
388
|
if quiet:
|
|
@@ -439,7 +459,7 @@ def do_symlink_filetype(type, raw_files, output_directory, clobber=False, top_le
|
|
|
439
459
|
logger.warning(f'file not found: {file}')
|
|
440
460
|
|
|
441
461
|
|
|
442
|
-
def do_download_filetype(type, raw_files, output_directory, clobber=False,
|
|
462
|
+
def do_download_filetype(type, raw_files, output_directory, clobber=False, user=None,
|
|
443
463
|
verbose=True, chunk_size=20, parallel_limit=30):
|
|
444
464
|
""" Download CCFs / S1Ds / S2Ds from DACE """
|
|
445
465
|
raw_files = np.atleast_1d(raw_files)
|
|
@@ -475,7 +495,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
|
|
|
475
495
|
if n < parallel_limit:
|
|
476
496
|
iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
|
|
477
497
|
for files in tqdm(iterator, total=len(iterator)):
|
|
478
|
-
download(files, type, output_directory, quiet=False)
|
|
498
|
+
download(files, type, output_directory, quiet=False, user=user)
|
|
479
499
|
extract_fits(output_directory)
|
|
480
500
|
|
|
481
501
|
else:
|
|
@@ -487,7 +507,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
|
|
|
487
507
|
chunks = list(chunker(raw_files, chunk_size))
|
|
488
508
|
pbar = tqdm(total=len(chunks))
|
|
489
509
|
it1 = [
|
|
490
|
-
(files, type, output_directory, f'spectroscopy_download{i+1}.tar.gz', True, pbar)
|
|
510
|
+
(files, type, output_directory, f'spectroscopy_download{i+1}.tar.gz', user, True, pbar)
|
|
491
511
|
for i, files in enumerate(chunks)
|
|
492
512
|
]
|
|
493
513
|
it2 = [(output_directory, f'spectroscopy_download{i+1}.tar.gz') for i in range(len(chunks))]
|
|
@@ -10,7 +10,8 @@ refs = {
|
|
|
10
10
|
'HD86226': 'Teske et al. 2020 (AJ, 160, 2)'
|
|
11
11
|
}
|
|
12
12
|
|
|
13
|
-
def get_extra_data(star, instrument=None, path=None, verbose=True
|
|
13
|
+
def get_extra_data(star, instrument=None, path=None, verbose=True,
|
|
14
|
+
check_for_kms=True):
|
|
14
15
|
if path is None:
|
|
15
16
|
path = os.path.dirname(__file__)
|
|
16
17
|
path = os.path.join(path, 'data', 'extra')
|
|
@@ -18,7 +19,7 @@ def get_extra_data(star, instrument=None, path=None, verbose=True):
|
|
|
18
19
|
metadata = json.load(open(os.path.join(path, 'metadata.json'), 'r'))
|
|
19
20
|
# print(metadata)
|
|
20
21
|
|
|
21
|
-
files = glob(os.path.join(path, star + '
|
|
22
|
+
files = glob(os.path.join(path, star.replace(' ', '') + '*.rdb'))
|
|
22
23
|
files = [f for f in files if os.path.isfile(f)]
|
|
23
24
|
files = [f for f in files if not os.path.basename(f).endswith('.zip')]
|
|
24
25
|
|
|
@@ -57,9 +58,24 @@ def get_extra_data(star, instrument=None, path=None, verbose=True):
|
|
|
57
58
|
if 'corrected_for_secular_acceleration' in metadata[file_basename]:
|
|
58
59
|
did_sa[i] = metadata[file_basename]['corrected_for_secular_acceleration']
|
|
59
60
|
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
61
|
+
with logger.contextualize(indent=' '):
|
|
62
|
+
s = timeseries.RV.from_rdb(files[0], star=star, instrument=instruments[0], units=units[0])
|
|
63
|
+
if check_for_kms and s.svrad.min() < 0.01:
|
|
64
|
+
units[0] = 'kms'
|
|
65
|
+
s = timeseries.RV.from_rdb(files[0], star=star, instrument=instruments[0], units=units[0])
|
|
66
|
+
if verbose:
|
|
67
|
+
logger.info(f'{instruments[0]:>12s} ├ ({s.N} observations)')
|
|
68
|
+
|
|
69
|
+
for file, instrument, unit in zip(files[1:], instruments[1:], units[1:]):
|
|
70
|
+
_s = timeseries.RV.from_rdb(file, star=star, instrument=instrument, units=unit)
|
|
71
|
+
if check_for_kms and _s.svrad.min() < 0.01:
|
|
72
|
+
unit = 'kms'
|
|
73
|
+
_s = timeseries.RV.from_rdb(file, star=star, instrument=instrument, units=unit)
|
|
74
|
+
if verbose:
|
|
75
|
+
logger.info(f'{instrument:>12s} ├ ({_s.N} observations)')
|
|
76
|
+
|
|
77
|
+
s = s + _s
|
|
78
|
+
|
|
63
79
|
|
|
64
80
|
for i, (inst, ref, inst_did_sa) in enumerate(zip(s.instruments, reference, did_sa)):
|
|
65
81
|
_s = getattr(s, inst)
|
|
@@ -9,7 +9,11 @@ DATA_PATH = os.path.dirname(__file__)
|
|
|
9
9
|
DATA_PATH = os.path.join(DATA_PATH, 'data')
|
|
10
10
|
|
|
11
11
|
QUERY = """
|
|
12
|
-
SELECT TOP 20 gaia_source.designation,gaia_source.source_id,
|
|
12
|
+
SELECT TOP 20 gaia_source.designation, gaia_source.source_id,
|
|
13
|
+
gaia_source.ra, gaia_source.dec,
|
|
14
|
+
gaia_source.parallax, gaia_source.pmra, gaia_source.pmdec,
|
|
15
|
+
gaia_source.ruwe, gaia_source.phot_g_mean_mag, gaia_source.bp_rp,
|
|
16
|
+
gaia_source.radial_velocity, gaia_source.radial_velocity_error
|
|
13
17
|
FROM gaiadr3.gaia_source
|
|
14
18
|
WHERE
|
|
15
19
|
CONTAINS(
|
|
@@ -23,7 +27,11 @@ CONTAINS(
|
|
|
23
27
|
"""
|
|
24
28
|
|
|
25
29
|
QUERY_ID = """
|
|
26
|
-
SELECT TOP 20 gaia_source.designation,gaia_source.source_id,
|
|
30
|
+
SELECT TOP 20 gaia_source.designation, gaia_source.source_id,
|
|
31
|
+
gaia_source.ra, gaia_source.dec,
|
|
32
|
+
gaia_source.parallax, gaia_source.pmra, gaia_source.pmdec,
|
|
33
|
+
gaia_source.ruwe, gaia_source.phot_g_mean_mag, gaia_source.bp_rp,
|
|
34
|
+
gaia_source.radial_velocity, gaia_source.radial_velocity_error
|
|
27
35
|
FROM gaiadr3.gaia_source
|
|
28
36
|
WHERE
|
|
29
37
|
gaia_source.source_id = {id}
|
|
@@ -31,6 +39,7 @@ gaia_source.source_id = {id}
|
|
|
31
39
|
|
|
32
40
|
translate = {
|
|
33
41
|
'Proxima': '5853498713190525696',
|
|
42
|
+
'GJ699': '4472832130942575872',
|
|
34
43
|
'LS II +14 13': '4318465066420528000',
|
|
35
44
|
}
|
|
36
45
|
|
|
@@ -81,7 +90,7 @@ class gaia:
|
|
|
81
90
|
pmra = simbad.pmra
|
|
82
91
|
pmdec = simbad.pmdec
|
|
83
92
|
rv = simbad.rvz_radvel
|
|
84
|
-
args = dict(ra=ra, dec=dec, plx=plx, pmra=pmra, pmdec=pmdec, rv=rv)
|
|
93
|
+
args = dict(ra=ra, dec=dec, plx=plx, pmra=pmra, pmdec=pmdec, rv=rv)
|
|
85
94
|
|
|
86
95
|
try:
|
|
87
96
|
if star in translate:
|
|
@@ -109,6 +118,10 @@ class gaia:
|
|
|
109
118
|
self.radial_velocity = float(results['radial_velocity'])
|
|
110
119
|
except ValueError:
|
|
111
120
|
self.radial_velocity = None
|
|
121
|
+
try:
|
|
122
|
+
self.radial_velocity_error = float(results['radial_velocity_error'])
|
|
123
|
+
except ValueError:
|
|
124
|
+
self.radial_velocity_error = None
|
|
112
125
|
|
|
113
126
|
return
|
|
114
127
|
|
|
@@ -6,7 +6,7 @@ import numpy as np
|
|
|
6
6
|
from astropy.timeseries import LombScargle
|
|
7
7
|
|
|
8
8
|
from .setup_logger import logger
|
|
9
|
-
from . import config
|
|
9
|
+
from .config import config
|
|
10
10
|
from .stats import wmean
|
|
11
11
|
|
|
12
12
|
from .utils import lazy_import
|
|
@@ -123,8 +123,12 @@ def clickable_legend(fig, ax, leg):
|
|
|
123
123
|
h = handles[labels.index(artist.get_text())]
|
|
124
124
|
alpha_text = {None:0.2, 1.0: 0.2, 0.2:1.0}[artist.get_alpha()]
|
|
125
125
|
alpha_point = {None: 0.0, 1.0: 0.0, 0.2: 1.0}[artist.get_alpha()]
|
|
126
|
-
|
|
127
|
-
|
|
126
|
+
try:
|
|
127
|
+
h[0].set_alpha(alpha_point)
|
|
128
|
+
h[2][0].set_alpha(alpha_point)
|
|
129
|
+
except TypeError:
|
|
130
|
+
h.set_alpha(alpha_point)
|
|
131
|
+
|
|
128
132
|
artist.set_alpha(alpha_text)
|
|
129
133
|
fig.canvas.draw()
|
|
130
134
|
except ValueError:
|
|
@@ -488,14 +492,15 @@ def plot_quantity(self, quantity, ax=None, show_masked=False, instrument=None,
|
|
|
488
492
|
|
|
489
493
|
|
|
490
494
|
plot_fwhm = partialmethod(plot_quantity, quantity='fwhm')
|
|
491
|
-
|
|
495
|
+
plot_bispan = partialmethod(plot_quantity, quantity='bispan')
|
|
492
496
|
plot_contrast = partialmethod(plot_quantity, quantity='contrast')
|
|
493
497
|
plot_rhk = partialmethod(plot_quantity, quantity='rhk')
|
|
494
498
|
plot_berv = partialmethod(plot_quantity, quantity='berv')
|
|
495
499
|
|
|
496
500
|
|
|
497
501
|
@plot_fast
|
|
498
|
-
def gls(self, ax=None, label=None, fap=True, instrument=None,
|
|
502
|
+
def gls(self, ax=None, label=None, fap=True, instrument=None,
|
|
503
|
+
adjust_means=config.adjust_means_gls,
|
|
499
504
|
picker=True, **kwargs):
|
|
500
505
|
"""
|
|
501
506
|
Calculate and plot the Generalised Lomb-Scargle periodogram of the radial
|
|
@@ -711,7 +716,7 @@ def gls_quantity(self, quantity, ax=None, fap=True, instrument=None,
|
|
|
711
716
|
|
|
712
717
|
|
|
713
718
|
gls_fwhm = partialmethod(gls_quantity, quantity='fwhm')
|
|
714
|
-
|
|
719
|
+
gls_bispan = partialmethod(gls_quantity, quantity='bispan')
|
|
715
720
|
gls_rhk = partialmethod(gls_quantity, quantity='rhk')
|
|
716
721
|
|
|
717
722
|
|
|
@@ -1,8 +1,15 @@
|
|
|
1
1
|
import os
|
|
2
|
+
import numpy as np
|
|
2
3
|
import requests
|
|
4
|
+
from dataclasses import dataclass
|
|
3
5
|
|
|
4
6
|
import pysweetcat
|
|
5
7
|
|
|
8
|
+
try:
|
|
9
|
+
from uncertainties import ufloat
|
|
10
|
+
except ImportError:
|
|
11
|
+
ufloat = lambda x, y: x
|
|
12
|
+
|
|
6
13
|
from .translations import translate
|
|
7
14
|
|
|
8
15
|
DATA_PATH = os.path.dirname(__file__)
|
|
@@ -15,19 +22,35 @@ SELECT basic.OID,
|
|
|
15
22
|
main_id,
|
|
16
23
|
pmra,
|
|
17
24
|
pmdec,
|
|
18
|
-
plx_value,
|
|
25
|
+
plx_value, plx_err,
|
|
19
26
|
rvz_radvel,
|
|
20
27
|
sp_type
|
|
21
28
|
FROM basic JOIN ident ON oidref = oid
|
|
22
29
|
WHERE id = '{star}';
|
|
23
30
|
"""
|
|
24
31
|
|
|
32
|
+
# SELECT filter, flux, flux_err
|
|
33
|
+
# FROM basic JOIN ident ON oid = ident.oidref JOIN flux ON oid = flux.oidref
|
|
34
|
+
# WHERE id = 'HD23079';
|
|
35
|
+
|
|
25
36
|
BV_QUERY = """
|
|
26
37
|
SELECT B, V FROM allfluxes
|
|
27
38
|
JOIN ident USING(oidref)
|
|
28
39
|
WHERE id = '{star}';
|
|
29
40
|
"""
|
|
30
41
|
|
|
42
|
+
FILTERS_QUERY = """
|
|
43
|
+
SELECT filter, flux, flux_err, bibcode FROM flux
|
|
44
|
+
JOIN ident USING(oidref)
|
|
45
|
+
WHERE id = '{star}';
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
MEAS_QUERY = """
|
|
49
|
+
SELECT teff, log_g, log_g_prec, fe_h, fe_h_prec, bibcode FROM mesFe_H
|
|
50
|
+
JOIN ident USING(oidref)
|
|
51
|
+
WHERE id = '{star}';
|
|
52
|
+
"""
|
|
53
|
+
|
|
31
54
|
IDS_QUERY = """
|
|
32
55
|
SELECT ids FROM ids
|
|
33
56
|
JOIN ident USING(oidref)
|
|
@@ -40,6 +63,13 @@ JOIN ident ON oidref = oid
|
|
|
40
63
|
WHERE id = '{star}';
|
|
41
64
|
"""
|
|
42
65
|
|
|
66
|
+
@dataclass
|
|
67
|
+
class Measurements:
|
|
68
|
+
teff: list
|
|
69
|
+
logg: list
|
|
70
|
+
feh: list
|
|
71
|
+
bibcode: list
|
|
72
|
+
|
|
43
73
|
|
|
44
74
|
def run_query(query):
|
|
45
75
|
url = 'http://simbad.u-strasbg.fr/simbad/sim-tap/sync'
|
|
@@ -52,7 +82,7 @@ def run_query(query):
|
|
|
52
82
|
raise IndexError(err)
|
|
53
83
|
return response.content.decode()
|
|
54
84
|
|
|
55
|
-
def
|
|
85
|
+
def parse_table1(table, cols=None, values=None):
|
|
56
86
|
header = table.splitlines()[0].split('|')
|
|
57
87
|
if cols is None:
|
|
58
88
|
cols = list(map(str.strip, header))
|
|
@@ -66,6 +96,29 @@ def parse_table(table, cols=None, values=None):
|
|
|
66
96
|
values = [value.replace('"', '') for value in values]
|
|
67
97
|
return cols, values
|
|
68
98
|
|
|
99
|
+
def parse_tablen(table, cols=None, values=None):
|
|
100
|
+
header = table.splitlines()[0].split('|')
|
|
101
|
+
cols = list(map(str.strip, header))
|
|
102
|
+
values = [list(map(str.strip, row.split('|'))) for row in table.splitlines()[2:]]
|
|
103
|
+
return cols, values
|
|
104
|
+
|
|
105
|
+
def parse_value(value, err=None, prec=None):
|
|
106
|
+
try:
|
|
107
|
+
v = float(value)
|
|
108
|
+
if err:
|
|
109
|
+
try:
|
|
110
|
+
v = ufloat(float(value), float(err))
|
|
111
|
+
except ValueError:
|
|
112
|
+
pass
|
|
113
|
+
if prec:
|
|
114
|
+
try:
|
|
115
|
+
v = ufloat(float(value), 10**-int(prec))
|
|
116
|
+
except ValueError:
|
|
117
|
+
pass
|
|
118
|
+
except ValueError:
|
|
119
|
+
v = np.nan
|
|
120
|
+
return v
|
|
121
|
+
|
|
69
122
|
|
|
70
123
|
effective_temperatures = {
|
|
71
124
|
'F0': 7350, 'F2': 7050, 'F3': 6850, 'F5': 6700, 'F6': 6550, 'F7': 6400, 'F8': 6300,
|
|
@@ -120,14 +173,37 @@ class simbad:
|
|
|
120
173
|
|
|
121
174
|
try:
|
|
122
175
|
table1 = run_query(query=QUERY.format(star=self.star))
|
|
123
|
-
cols, values =
|
|
176
|
+
cols, values = parse_table1(table1)
|
|
124
177
|
|
|
125
178
|
table2 = run_query(query=BV_QUERY.format(star=self.star))
|
|
126
|
-
cols, values =
|
|
179
|
+
cols, values = parse_table1(table2, cols, values)
|
|
127
180
|
|
|
128
181
|
table3 = run_query(query=IDS_QUERY.format(star=self.star))
|
|
129
182
|
line = table3.splitlines()[2]
|
|
130
183
|
self.ids = line.replace('"', '').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').split('|')
|
|
184
|
+
|
|
185
|
+
table4 = run_query(query=FILTERS_QUERY.format(star=self.star))
|
|
186
|
+
for row in table4.splitlines()[2:]:
|
|
187
|
+
filter_name, mag, mag_err, bibcode = row.replace('"', '').split('|')
|
|
188
|
+
filter_name = filter_name.strip()
|
|
189
|
+
try:
|
|
190
|
+
setattr(self, '_' + filter_name, ufloat(float(mag), float(mag_err)))
|
|
191
|
+
except ValueError:
|
|
192
|
+
setattr(self, '_' + filter_name, float(mag))
|
|
193
|
+
|
|
194
|
+
# measurements table
|
|
195
|
+
table5 = run_query(query=MEAS_QUERY.format(star=self.star))
|
|
196
|
+
_teff, _logg, _feh, _bibcode = [], [], [], []
|
|
197
|
+
for row in table5.splitlines()[2:]:
|
|
198
|
+
teff, log_g, log_g_prec, fe_h, fe_h_prec, bibcode = row.replace('"', '').split('|')
|
|
199
|
+
_bibcode.append(bibcode)
|
|
200
|
+
_teff.append(parse_value(teff))
|
|
201
|
+
_logg.append(parse_value(log_g, prec=log_g_prec))
|
|
202
|
+
_feh.append(parse_value(fe_h, prec=fe_h_prec))
|
|
203
|
+
|
|
204
|
+
self.measurements = Measurements(_teff, _logg, _feh, _bibcode)
|
|
205
|
+
|
|
206
|
+
|
|
131
207
|
except IndexError:
|
|
132
208
|
raise ValueError(f'simbad query for {star} failed')
|
|
133
209
|
|
|
@@ -176,6 +252,9 @@ class simbad:
|
|
|
176
252
|
sp_type = self.sp_type
|
|
177
253
|
return f'{self.star} ({V=}, {sp_type=})'
|
|
178
254
|
|
|
255
|
+
@property
|
|
256
|
+
def bmv(self):
|
|
257
|
+
return self.B - self.V
|
|
179
258
|
|
|
180
259
|
|
|
181
260
|
def argsort_by_spectral_type(sptypes):
|