arvi 0.1.23__py3-none-any.whl → 0.1.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arvi might be problematic. Click here for more details.

arvi/dace_wrapper.py CHANGED
@@ -1,7 +1,9 @@
1
1
  import os
2
+ import sys
2
3
  import tarfile
3
4
  import collections
4
5
  from functools import lru_cache
6
+ from itertools import islice
5
7
  import numpy as np
6
8
  from dace_query import DaceClass
7
9
  from dace_query.spectroscopy import SpectroscopyClass, Spectroscopy as default_Spectroscopy
@@ -22,14 +24,15 @@ def load_spectroscopy() -> SpectroscopyClass:
22
24
  return default_Spectroscopy
23
25
 
24
26
  @lru_cache()
25
- def get_dace_id(star):
27
+ def get_dace_id(star, verbose=True):
26
28
  filters = {"obj_id_catname": {"equal": [star]}}
27
29
  try:
28
- with stdout_disabled(), all_logging_disabled():
30
+ with all_logging_disabled():
29
31
  r = load_spectroscopy().query_database(filters=filters, limit=1)
30
32
  return r['obj_id_daceid'][0]
31
33
  except KeyError:
32
- logger.error(f"Could not find DACE ID for {star}")
34
+ if verbose:
35
+ logger.error(f"Could not find DACE ID for {star}")
33
36
  raise ValueError from None
34
37
 
35
38
  def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='HE', verbose=True):
@@ -55,15 +58,19 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
55
58
  pipelines = [pipelines[i]]
56
59
 
57
60
  if latest_pipeline:
61
+ npipe = len(pipelines)
58
62
  if 'NIRPS' in inst and any(['LBL' in p for p in pipelines]):
59
63
  # TODO: correctly load both CCF and LBL
60
64
  pipelines = [pipelines[1]]
65
+ if 'HARPS' in inst and npipe > 1 and pipelines[1] == pipelines[0] + '-EGGS':
66
+ pipelines = pipelines[:2]
61
67
  else:
62
68
  pipelines = [pipelines[0]]
63
69
 
64
- if verbose and len(pipelines) > 1:
70
+ if verbose and npipe > 1:
65
71
  logger.info(f'selecting latest pipeline ({pipelines[0]}) for {inst}')
66
72
 
73
+
67
74
  for pipe in pipelines:
68
75
  modes = [m for m in result[inst][pipe].keys()]
69
76
 
@@ -80,15 +87,25 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
80
87
 
81
88
  # HARPS15 observations should not be separated by 'mode' if some are
82
89
  # done together with NIRPS
83
- if 'HARPS15' in inst and 'HARPS+NIRPS' in modes:
84
- m0 = modes[0]
85
- data = {
86
- k: np.concatenate([result[inst][pipe][m][k] for m in modes])
87
- for k in result[inst][pipe][m0].keys()
88
- }
89
- arrays.append(
90
- ((inst, pipe, m0), data)
91
- )
90
+ if 'HARPS15' in inst:
91
+ if 'HARPS+NIRPS' in modes:
92
+ m0 = modes[0]
93
+ data = {
94
+ k: np.concatenate([result[inst][pipe][m][k] for m in modes])
95
+ for k in result[inst][pipe][m0].keys()
96
+ }
97
+ arrays.append(
98
+ ((str(inst), str(pipe), str(m0)), data)
99
+ )
100
+ if 'EGGS+NIRPS' in modes:
101
+ m0 = modes[0]
102
+ data = {
103
+ k: np.concatenate([result[inst][pipe][m][k] for m in modes])
104
+ for k in result[inst][pipe][m0].keys()
105
+ }
106
+ arrays.append(
107
+ ((str(inst + '_EGGS'), str(pipe), str(m0)), data)
108
+ )
92
109
  continue
93
110
 
94
111
  for mode in modes:
@@ -97,39 +114,61 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
97
114
  raise ValueError
98
115
 
99
116
  arrays.append(
100
- ((inst, pipe, mode), result[inst][pipe][mode])
117
+ ((str(inst), str(pipe), str(mode)), result[inst][pipe][mode])
101
118
  )
102
119
 
103
120
  return arrays
104
121
 
105
- def get_observations_from_instrument(star, instrument, main_id=None):
122
+ def get_observations_from_instrument(star, instrument, main_id=None, verbose=True):
106
123
  """ Query DACE for all observations of a given star and instrument
107
124
 
108
125
  Args:
109
- star (str): name of the star
110
- instrument (str): instrument name
111
- main_id (str, optional): Simbad main id of target to query DACE id. Defaults to None.
126
+ star (str):
127
+ name of the star
128
+ instrument (str):
129
+ instrument name
130
+ main_id (str, optional):
131
+ Simbad main id of target to query DACE id. Defaults to None.
132
+ verbose (bool, optional):
133
+ whether to print warnings. Defaults to True.
112
134
 
113
135
  Raises:
114
- ValueError: If query for DACE id fails
136
+ ValueError:
137
+ If query for DACE id fails
115
138
 
116
139
  Returns:
117
- dict: dictionary with data from DACE
140
+ dict:
141
+ dictionary with data from DACE
118
142
  """
143
+ Spectroscopy = load_spectroscopy()
144
+ found_dace_id = False
119
145
  try:
120
- dace_id = get_dace_id(star)
146
+ dace_id = get_dace_id(star, verbose=verbose)
147
+ found_dace_id = True
121
148
  except ValueError as e:
122
149
  if main_id is not None:
123
- dace_id = get_dace_id(main_id)
124
- else:
125
- raise e
150
+ try:
151
+ dace_id = get_dace_id(main_id, verbose=verbose)
152
+ found_dace_id = True
153
+ except ValueError:
154
+ pass
155
+
156
+ if not found_dace_id:
157
+ try:
158
+ with all_logging_disabled():
159
+ result = Spectroscopy.get_timeseries(target=star,
160
+ sorted_by_instrument=True,
161
+ output_format='numpy')
162
+ return result
163
+ except TypeError:
164
+ msg = f'no {instrument} observations for {star}'
165
+ raise ValueError(msg) from None
126
166
 
127
- Spectroscopy = load_spectroscopy()
128
167
  filters = {
129
168
  "ins_name": {"contains": [instrument]},
130
169
  "obj_id_daceid": {"contains": [dace_id]}
131
170
  }
132
- with stdout_disabled(), all_logging_disabled():
171
+ with all_logging_disabled():
133
172
  result = Spectroscopy.query_database(filters=filters)
134
173
 
135
174
  if len(result) == 0:
@@ -218,7 +257,7 @@ def get_observations(star, instrument=None, main_id=None, verbose=True):
218
257
  raise ValueError(msg) from None
219
258
  else:
220
259
  try:
221
- result = get_observations_from_instrument(star, instrument, main_id)
260
+ result = get_observations_from_instrument(star, instrument, main_id, verbose)
222
261
  except ValueError:
223
262
  msg = f'no {instrument} observations for {star}'
224
263
  raise ValueError(msg) from None
@@ -323,16 +362,29 @@ def check_existing(output_directory, files, type):
323
362
 
324
363
  return np.array(missing)
325
364
 
326
- def download(files, type, output_directory):
365
+ def download(files, type, output_directory, output_filename=None, quiet=True, pbar=None):
327
366
  """ Download files from DACE """
328
367
  Spectroscopy = load_spectroscopy()
329
- with stdout_disabled(), all_logging_disabled():
368
+ if isinstance(files, str):
369
+ files = [files]
370
+ if quiet:
371
+ with all_logging_disabled():
372
+ Spectroscopy.download_files(files, file_type=type.lower(),
373
+ output_directory=output_directory,
374
+ output_filename=output_filename)
375
+ else:
330
376
  Spectroscopy.download_files(files, file_type=type.lower(),
331
- output_directory=output_directory)
377
+ output_directory=output_directory,
378
+ output_filename=output_filename)
379
+ if pbar is not None:
380
+ pbar.update()
332
381
 
333
- def extract_fits(output_directory):
382
+
383
+ def extract_fits(output_directory, filename=None):
334
384
  """ Extract fits files from tar.gz file """
335
- file = os.path.join(output_directory, 'spectroscopy_download.tar.gz')
385
+ if filename is None:
386
+ filename = 'spectroscopy_download.tar.gz'
387
+ file = os.path.join(output_directory, filename)
336
388
  with tarfile.open(file, "r") as tar:
337
389
  files = []
338
390
  for member in tar.getmembers():
@@ -390,7 +442,7 @@ def do_symlink_filetype(type, raw_files, output_directory, clobber=False, top_le
390
442
 
391
443
 
392
444
  def do_download_filetype(type, raw_files, output_directory, clobber=False,
393
- verbose=True, chunk_size=20):
445
+ verbose=True, chunk_size=20, parallel_limit=30):
394
446
  """ Download CCFs / S1Ds / S2Ds from DACE """
395
447
  raw_files = np.atleast_1d(raw_files)
396
448
 
@@ -410,7 +462,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
410
462
 
411
463
  # avoid an empty chunk
412
464
  if chunk_size > n:
413
- chunk_size = n
465
+ chunk_size = n
414
466
 
415
467
  if verbose:
416
468
  if chunk_size < n:
@@ -422,11 +474,36 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
422
474
  msg = f"downloading {n} {type}s into '{output_directory}'..."
423
475
  logger.info(msg)
424
476
 
425
- iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
426
- for files in tqdm(iterator, total=len(iterator)):
427
- download(files, type, output_directory)
428
- extract_fits(output_directory)
477
+ if n < parallel_limit:
478
+ iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
479
+ for files in tqdm(iterator, total=len(iterator)):
480
+ download(files, type, output_directory, quiet=False)
481
+ extract_fits(output_directory)
429
482
 
483
+ else:
484
+ def chunker(it, size):
485
+ iterator = iter(it)
486
+ while chunk := list(islice(iterator, size)):
487
+ yield chunk
488
+
489
+ chunks = list(chunker(raw_files, chunk_size))
490
+ pbar = tqdm(total=len(chunks))
491
+ it1 = [
492
+ (files, type, output_directory, f'spectroscopy_download{i+1}.tar.gz', True, pbar)
493
+ for i, files in enumerate(chunks)
494
+ ]
495
+ it2 = [(output_directory, f'spectroscopy_download{i+1}.tar.gz') for i in range(len(chunks))]
496
+
497
+ # import multiprocessing as mp
498
+ # with mp.Pool(4) as pool:
499
+ from multiprocessing.pool import ThreadPool
500
+
501
+ with ThreadPool(4) as pool:
502
+ pool.starmap(download, it1)
503
+ pool.starmap(extract_fits, it2)
504
+ print('')
505
+
506
+ sys.stdout.flush()
430
507
  logger.info('extracted .fits files')
431
508
 
432
509
 
arvi/kima_wrapper.py CHANGED
@@ -25,10 +25,11 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
25
25
  if not kima_available:
26
26
  raise ImportError('kima not available, please install with `pip install kima`')
27
27
 
28
- time = [getattr(self, inst).mtime for inst in self.instruments]
29
- vrad = [getattr(self, inst).mvrad for inst in self.instruments]
30
- err = [getattr(self, inst).msvrad for inst in self.instruments]
31
- data = RVData(time, vrad, err, instruments=self.instruments)
28
+ instruments = [inst for inst in self.instruments if self.NN[inst] > 1]
29
+ time = [getattr(self, inst).mtime for inst in instruments]
30
+ vrad = [getattr(self, inst).mvrad for inst in instruments]
31
+ err = [getattr(self, inst).msvrad for inst in instruments]
32
+ data = RVData(time, vrad, err, instruments=instruments)
32
33
 
33
34
  fix = kwargs.pop('fix', False)
34
35
  npmax = kwargs.pop('npmax', 1)
@@ -41,6 +42,13 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
41
42
  model.enforce_stability = kwargs.pop('enforce_stability', False)
42
43
  model.star_mass = kwargs.pop('star_mass', 1.0)
43
44
 
45
+ if kwargs.pop('gaussian_priors_individual_offsets', False):
46
+ from kima.pykima.utils import get_gaussian_priors_individual_offsets
47
+ model.individual_offset_prior = get_gaussian_priors_individual_offsets(data, use_std=True)
48
+
49
+ if kwargs.pop('kuma', False):
50
+ model.conditional.eprior = distributions.Kumaraswamy(0.867, 3.03)
51
+
44
52
  for k, v in priors.items():
45
53
  try:
46
54
  if 'conditional' in k:
@@ -55,9 +63,10 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
55
63
  logger.warning(msg)
56
64
  return
57
65
 
66
+ if run_directory is None:
67
+ run_directory = os.getcwd()
68
+
58
69
  if run:
59
- if run_directory is None:
60
- run_directory = os.getcwd()
61
70
 
62
71
  # TODO: use signature of kima.run to pop the correct kwargs
63
72
  # model_name = model.__class__.__name__
@@ -67,8 +76,9 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
67
76
  with chdir(run_directory):
68
77
  kima.run(model, **kwargs)
69
78
 
70
- if load:
79
+ if load:
80
+ with chdir(run_directory):
71
81
  res = kima.load_results(model)
72
- return data, model, res
82
+ return data, model, res
73
83
 
74
84
  return data, model
arvi/programs.py CHANGED
@@ -1,8 +1,9 @@
1
1
  import os
2
2
  import multiprocessing
3
- from functools import partial
3
+ from functools import partial, lru_cache
4
4
  from itertools import chain
5
5
  from collections import namedtuple
6
+ from multiprocessing.pool import ThreadPool
6
7
  from tqdm import tqdm
7
8
  # import numpy as np
8
9
 
@@ -14,9 +15,9 @@ __all__ = ['ESPRESSO_GTO']
14
15
  path = os.path.join(os.path.dirname(__file__), 'data')
15
16
 
16
17
 
17
- def get_star(star, instrument=None):
18
+ def get_star(star, instrument=None, verbose=False, **kwargs):
18
19
  return RV(star, instrument=instrument,
19
- _raise_on_error=False, verbose=False, load_extra_data=False)
20
+ _raise_on_error=False, verbose=verbose, **kwargs)
20
21
 
21
22
 
22
23
  class LazyRV:
@@ -36,21 +37,22 @@ class LazyRV:
36
37
  def __repr__(self):
37
38
  return f"RV({self.N} stars)"
38
39
 
39
- def _get(self):
40
+ def _get(self, **kwargs):
40
41
  if self.N > self._parallel_limit:
41
42
  # logger.info('Querying DACE...')
42
- _get_star = partial(get_star, instrument=self.instrument)
43
- with multiprocessing.Pool() as pool:
43
+ _get_star = partial(get_star, instrument=self.instrument, **kwargs)
44
+ with ThreadPool(8) as pool:
44
45
  result = list(tqdm(pool.imap(_get_star, self.stars),
45
- total=self.N, unit='star', desc='Querying DACE'))
46
- # result = pool.map(get_star, self.stars)
46
+ total=self.N, unit='star',
47
+ desc='Querying DACE (can take a while)'))
48
+ print('')
47
49
  else:
48
50
  result = []
49
- logger.info('Querying DACE...')
51
+ logger.info('querying DACE...')
50
52
  pbar = tqdm(self.stars, total=self.N, unit='star')
51
53
  for star in pbar:
52
54
  pbar.set_description(star)
53
- result.append(get_star(star, self.instrument))
55
+ result.append(get_star(star, self.instrument, **kwargs))
54
56
 
55
57
  return result
56
58
 
@@ -73,14 +75,25 @@ class LazyRV:
73
75
  # result.append(None)
74
76
  # return result
75
77
 
78
+ def reload(self, **kwargs):
79
+ self._saved = self._get(**kwargs)
80
+ return self._saved
81
+
76
82
  def __iter__(self):
77
83
  return self._get()
78
84
 
79
- def __call__(self):
85
+ def __call__(self, **kwargs):
80
86
  if not self._saved:
81
- self._saved = self._get()
87
+ self._saved = self._get(**kwargs)
82
88
  return self._saved
83
89
 
90
+ @lru_cache(maxsize=10)
91
+ def __getitem__(self, index):
92
+ star = self.stars[index]
93
+ if self._saved is not None:
94
+ return self._saved[index]
95
+ return get_star(star, self.instrument, verbose=True)
96
+
84
97
 
85
98
  # sorted by spectral type
86
99
  WG1_stars = [
@@ -157,3 +170,78 @@ ESPRESSO_GTO = ESPRESSO_GTO_nt(
157
170
  ESPRESSO_GTO.WG1.__doc__ = 'RV observations for all WG1 targets. Call ESPRESSO_GTO.WG1() to load them.'
158
171
  ESPRESSO_GTO.WG2.__doc__ = 'RV observations for all WG2 targets. Call ESPRESSO_GTO.WG2() to load them.'
159
172
  ESPRESSO_GTO.WG3.__doc__ = 'RV observations for all WG3 targets. Call ESPRESSO_GTO.WG3() to load them.'
173
+
174
+
175
+ import requests
176
+
177
+ def _get_NIRPS_GTO_stars(WP=1):
178
+ from io import StringIO
179
+ import numpy as np
180
+
181
+ url = 'https://www.eso.org/sci/observing/teles-alloc/gto/113/NIRPS/P113_NIRPS-consortium.csv'
182
+ file = StringIO(requests.get(url).content.decode())
183
+ stars_P113 = np.loadtxt(file, delimiter=',', usecols=(0,), dtype=str, skiprows=3)
184
+
185
+ url = 'https://www.eso.org/sci/observing/teles-alloc/gto/114/NIRPS/P114_NIRPS-consortium.csv'
186
+ file = StringIO(requests.get(url).content.decode())
187
+ stars_P114 = np.loadtxt(file, delimiter=',', usecols=(0,), dtype=str, skiprows=3)
188
+
189
+ url = 'https://www.eso.org/sci/observing/teles-alloc/gto/115/NIRPS/P115_NIRPS-consortium.csv'
190
+ file = StringIO(requests.get(url).content.decode())
191
+ stars_P115 = np.loadtxt(file, delimiter=',', usecols=(0,), dtype=str, skiprows=3)
192
+
193
+ def _get_stars_period(stars, WP):
194
+ stars = np.delete(stars, stars=='')
195
+
196
+ stars = np.char.replace(stars, '_', ' ')
197
+ stars = np.char.replace(stars, "Proxima Centauri", "Proxima")
198
+ stars = np.char.replace(stars, "Barnard's star", "GJ699")
199
+ stars = np.char.replace(stars, "Teegarden's Star", 'Teegarden')
200
+
201
+ if WP in (1, 'WP1'):
202
+ wp1_indices = slice(np.where(stars == 'WP1')[0][0] + 1, np.where(stars == 'WP2')[0][0])
203
+ return stars[wp1_indices]
204
+ elif WP == 2:
205
+ wp2_indices = slice(np.where(stars == 'WP2')[0][0] + 1, np.where(stars == 'WP3')[0][0])
206
+ return stars[wp2_indices]
207
+ elif WP == 3:
208
+ wp3_indices = slice(np.where(stars == 'WP3')[0][0] + 1, np.where(stars == 'Other Science 1')[0][0])
209
+ return stars[wp3_indices]
210
+ elif WP == 'OS1':
211
+ os1_indices = slice(np.where(stars == 'Other Science 1')[0][0] + 1, np.where(stars == 'Other Science 2')[0][0])
212
+ return stars[os1_indices]
213
+ elif WP == 'OS2':
214
+ os2_indices = slice(np.where(stars == 'Other Science 2')[0][0] + 1, None)
215
+ stars = np.char.replace(stars, 'MMU', 'No')
216
+ stars = np.char.replace(stars, 'Cl*', '')
217
+ return stars[os2_indices]
218
+
219
+ stars_P113 = _get_stars_period(stars_P113, WP)
220
+ stars_P114 = _get_stars_period(stars_P114, WP)
221
+ stars_P115 = _get_stars_period(stars_P115, WP)
222
+ return np.union1d(np.union1d(stars_P113, stars_P114), stars_P115)
223
+
224
+ try:
225
+ NIRPS_GTO_WP1_stars = _get_NIRPS_GTO_stars(WP=1)
226
+ NIRPS_GTO_WP2_stars = _get_NIRPS_GTO_stars(WP=2)
227
+ NIRPS_GTO_WP3_stars = _get_NIRPS_GTO_stars(WP=3)
228
+ NIRPS_GTO_OS1_stars = _get_NIRPS_GTO_stars(WP='OS1')
229
+ NIRPS_GTO_OS2_stars = _get_NIRPS_GTO_stars(WP='OS2')
230
+ except requests.ConnectionError:
231
+ from .setup_logger import logger
232
+ logger.error('Cannot download NIRPS GTO protected target list')
233
+ else:
234
+ NIRPS_GTO_nt = namedtuple('NIRPS_GTO', ['WP1', 'WP2', 'WP3', 'OS1', 'OS2'])
235
+ NIRPS_GTO_nt.__doc__ = 'RV observations for all NIRPS GTO targets. See NIRPS_GTO.WP1, NIRPS_GTO.WP2, ...'
236
+ NIRPS_GTO = NIRPS_GTO_nt(
237
+ WP1=LazyRV(NIRPS_GTO_WP1_stars, instrument='NIRPS'),
238
+ WP2=LazyRV(NIRPS_GTO_WP2_stars, instrument='NIRPS'),
239
+ WP3=LazyRV(NIRPS_GTO_WP3_stars, instrument='NIRPS'),
240
+ OS1=LazyRV(NIRPS_GTO_OS1_stars, instrument='NIRPS'),
241
+ OS2=LazyRV(NIRPS_GTO_OS2_stars, instrument='NIRPS'),
242
+ )
243
+ NIRPS_GTO.WP1.__doc__ = 'RV observations for all WP1 targets. Call NIRPS_GTO.WP1() to load them.'
244
+ NIRPS_GTO.WP2.__doc__ = 'RV observations for all WP2 targets. Call NIRPS_GTO.WP2() to load them.'
245
+ NIRPS_GTO.WP3.__doc__ = 'RV observations for all WP3 targets. Call NIRPS_GTO.WP3() to load them.'
246
+ NIRPS_GTO.OS1.__doc__ = 'RV observations for all OS1 targets. Call NIRPS_GTO.OS1() to load them.'
247
+ NIRPS_GTO.OS2.__doc__ = 'RV observations for all OS2 targets. Call NIRPS_GTO.OS2() to load them.'
arvi/spectra.py CHANGED
@@ -72,7 +72,7 @@ def fit_gaussian_to_line(wave, flux, center_wavelength, around=0.15 * u.angstrom
72
72
  ]).T
73
73
 
74
74
  try:
75
- popt, pcov = curve_fit(gaussian, w, f, p0=[-f.ptp(), center_wavelength.value, 0.1, f.max()],
75
+ popt, pcov = curve_fit(gaussian, w, f, p0=[-np.ptp(f), center_wavelength.value, 0.1, f.max()],
76
76
  bounds=(lower, upper))
77
77
  except RuntimeError as e:
78
78
  logger.warning(f'fit_gaussian_to_line: {e}')
@@ -115,7 +115,7 @@ def detrend(w, f):
115
115
  def build_master(self, limit=None, plot=True):
116
116
  files = sorted(glob(f'{self.star}_downloads/*S1D_A.fits'))
117
117
  if self.verbose:
118
- logger.info(f'Found {len(files)} S1D files')
118
+ logger.info(f'found {len(files)} S1D files')
119
119
 
120
120
  files = files[:limit]
121
121
 
@@ -168,8 +168,8 @@ def determine_stellar_parameters(self, linelist: str, plot=True, **kwargs):
168
168
  ]
169
169
 
170
170
  if self.verbose:
171
- logger.info(f'Found {len(lines)} lines in linelist')
172
- logger.info('Measuring EWs...')
171
+ logger.info(f'found {len(lines)} lines in linelist')
172
+ logger.info('measuring EWs...')
173
173
 
174
174
  EW = []
175
175
  pbar = tqdm(linelist)
@@ -183,14 +183,14 @@ def determine_stellar_parameters(self, linelist: str, plot=True, **kwargs):
183
183
  EW = np.array(EW)[~np.isnan(EW)]
184
184
 
185
185
  if self.verbose:
186
- logger.info('Determining stellar parameters (can take a few minutes)...')
186
+ logger.info('determining stellar parameters (can take a few minutes)...')
187
187
 
188
188
  callback = lambda p, r, A: print('current parameters:', p)
189
189
  result = Korg.Fit.ews_to_stellar_parameters(lines, EW, callback=callback)
190
190
  par, stat_err, sys_err = result
191
191
 
192
192
  if self.verbose:
193
- logger.info(f'Best fit stellar parameters:')
193
+ logger.info(f'best-fit stellar parameters:')
194
194
  logger.info(f' Teff: {par[0]:.0f} ± {sys_err[0]:.0f} K')
195
195
  logger.info(f' logg: {par[1]:.2f} ± {sys_err[1]:.2f} dex')
196
196
  logger.info(f' m/H : {par[3]:.2f} ± {sys_err[3]:.2f} dex')
arvi/timeseries.py CHANGED
@@ -193,7 +193,7 @@ class RV:
193
193
  time_stamp = datetime.now(timezone.utc) #.isoformat().split('.')[0]
194
194
  self._last_dace_query = time_stamp
195
195
 
196
- _replacements = (('-', '_'), ('.', '_'), ('__', '_'))
196
+ _replacements = (('-', '_'), ('.', '_'), (' ', '_'), ('__', '_'))
197
197
  def do_replacements(s):
198
198
  for a, b in _replacements:
199
199
  s = s.replace(a, b)
@@ -444,7 +444,7 @@ class RV:
444
444
 
445
445
  s.instruments = [inst]
446
446
  s.pipelines = [pipe]
447
- s.modes = [mode]
447
+ s.modes = [str(mode)]
448
448
 
449
449
  return s
450
450
 
@@ -598,29 +598,47 @@ class RV:
598
598
  _quantities.append('rhk')
599
599
  _quantities.append('rhk_err')
600
600
 
601
- _s.bispan = np.zeros_like(time)
602
- _s.bispan_err = np.full_like(time, np.nan)
601
+ # try to find BISPAN and uncertainty
602
+ if (v := find_column(data, ['bispan'])) is not False:
603
+ _s.bispan = v
604
+ _s.bispan_err = np.full_like(time, np.nan)
605
+ if (sv := find_column(data, ['sbispan'])) is not False:
606
+ _s.bispan_err = sv
607
+ else:
608
+ _s.bispan = np.full_like(time, np.nan)
609
+ _s.bispan_err = np.full_like(time, np.nan)
610
+
611
+ _quantities.append('bispan')
612
+ _quantities.append('bispan_err')
613
+
614
+ # try to find BERV
615
+ if (v := find_column(data, ['berv', 'HIERARCH ESO QC BERV'])) is not False:
616
+ _s.berv = v
617
+ else:
618
+ _s.berv = np.full_like(time, np.nan)
619
+ _quantities.append('berv')
603
620
 
604
621
  # other quantities, but all NaNs
605
- for q in ['bispan', 'caindex', 'ccf_asym', 'contrast', 'haindex', 'naindex', 'sindex']:
622
+ for q in ['caindex', 'ccf_asym', 'contrast', 'haindex', 'naindex', 'sindex']:
606
623
  setattr(_s, q, np.full_like(time, np.nan))
607
624
  setattr(_s, q + '_err', np.full_like(time, np.nan))
608
625
  _quantities.append(q)
609
626
  _quantities.append(q + '_err')
610
- for q in ['berv', 'texp']:
627
+ for q in ['texp', ]:
611
628
  setattr(_s, q, np.full_like(time, np.nan))
612
629
  _quantities.append(q)
613
630
  for q in ['ccf_mask', 'date_night', 'prog_id', 'raw_file', 'pub_reference']:
614
631
  setattr(_s, q, np.full(time.size, ''))
615
632
  _quantities.append(q)
616
- for q in ['drs_qc']:
633
+ for q in ['drs_qc', ]:
617
634
  setattr(_s, q, np.full(time.size, True))
618
635
  _quantities.append(q)
619
636
 
620
637
  _s.extra_fields = ExtraFields()
621
- for field in data.dtype.names:
622
- if field not in _quantities:
623
- setattr(_s.extra_fields, field, data[field])
638
+ for name in data.dtype.names:
639
+ if name not in _quantities:
640
+ name_ = name.replace(' ', '_')
641
+ setattr(_s.extra_fields, name_, data[name])
624
642
  # _quantities.append(field)
625
643
 
626
644
  #! end hack
@@ -628,12 +646,12 @@ class RV:
628
646
  _s.mask = np.ones_like(time, dtype=bool)
629
647
  _s.obs = np.full_like(time, i + 1)
630
648
 
631
- _s.instruments = [instrument]
649
+ _s.instruments = [str(instrument)]
632
650
  _s._quantities = np.array(_quantities)
633
651
  setattr(s, instrument, _s)
634
652
 
635
653
  s._child = False
636
- s.instruments = list(instruments)
654
+ s.instruments = list(map(str, instruments))
637
655
  s._build_arrays()
638
656
 
639
657
  if kwargs.get('do_adjust_means', False):
@@ -726,15 +744,17 @@ class RV:
726
744
  return s
727
745
 
728
746
  @classmethod
729
- @lru_cache(maxsize=10)
747
+ @lru_cache(maxsize=60)
730
748
  def from_KOBE_file(cls, star, **kwargs):
731
749
  assert 'KOBE' in star, f'{star} is not a KOBE star?'
732
750
  import requests
733
751
  from requests.auth import HTTPBasicAuth
734
752
  from io import BytesIO
735
753
  import tarfile
754
+ from time import time as pytime
736
755
  from astropy.io import fits
737
756
  from .config import config
757
+ from .utils import get_data_path
738
758
 
739
759
  try:
740
760
  config.kobe_password
@@ -743,23 +763,11 @@ class RV:
743
763
  return
744
764
 
745
765
  tar = None
766
+ local_targz_file = os.path.join(get_data_path(), 'KOBE_fitsfiles.tar.gz')
746
767
  fits_file = f'{star}_RVs.fits'
747
- resp = requests.get(f'https://kobe.caha.es/internal/fitsfiles/{fits_file}',
748
- auth=HTTPBasicAuth('kobeteam', config.kobe_password))
749
-
750
- if resp.status_code != 200:
751
- # something went wrong, try to extract the file by downloading the
752
- # full tar.gz archive
753
-
754
- logger.warning(f'could not find "{fits_file}" on server, trying to download full archive')
755
- resp = requests.get('https://kobe.caha.es/internal/fitsfiles.tar.gz',
756
- auth=HTTPBasicAuth('kobeteam', config.kobe_password))
757
-
758
- if resp.status_code != 200:
759
- logger.error(f'KOBE file not found for {star}')
760
- return
761
768
 
762
- tar = tarfile.open(fileobj=BytesIO(resp.content))
769
+ if os.path.exists(local_targz_file) and os.path.getmtime(local_targz_file) > pytime() - 60*60*2:
770
+ tar = tarfile.open(local_targz_file)
763
771
 
764
772
  if fits_file not in tar.getnames():
765
773
  logger.error(f'KOBE file not found for {star}')
@@ -768,8 +776,36 @@ class RV:
768
776
  hdul = fits.open(tar.extractfile(fits_file))
769
777
 
770
778
  else:
771
- # found the file on the server, read it directly
772
- hdul = fits.open(BytesIO(resp.content))
779
+ resp = requests.get(f'https://kobe.caha.es/internal/fitsfiles/{fits_file}',
780
+ auth=HTTPBasicAuth('kobeteam', config.kobe_password))
781
+
782
+ if resp.status_code != 200:
783
+ # something went wrong, try to extract the file by downloading the
784
+ # full tar.gz archive
785
+
786
+ logger.warning(f'could not find "{fits_file}" on server, trying to download full archive')
787
+ resp = requests.get('https://kobe.caha.es/internal/fitsfiles.tar.gz',
788
+ auth=HTTPBasicAuth('kobeteam', config.kobe_password))
789
+
790
+ if resp.status_code != 200:
791
+ logger.error(f'KOBE file not found for {star}')
792
+ return
793
+
794
+ # save tar.gz file for later
795
+ with open(local_targz_file, 'wb') as tg:
796
+ tg.write(resp.content)
797
+
798
+ tar = tarfile.open(fileobj=BytesIO(resp.content))
799
+
800
+ if fits_file not in tar.getnames():
801
+ logger.error(f'KOBE file not found for {star}')
802
+ return
803
+
804
+ hdul = fits.open(tar.extractfile(fits_file))
805
+
806
+ else:
807
+ # found the file on the server, read it directly
808
+ hdul = fits.open(BytesIO(resp.content))
773
809
 
774
810
  s = cls(star, _child=True)
775
811
 
@@ -780,26 +816,47 @@ class RV:
780
816
  s.vrad_preNZP = hdul[1].data['RVd']
781
817
  s.vrad_preNZP_err = hdul[1].data['eRVd']
782
818
 
819
+ s.fwhm = hdul[1].data['FWHM']
820
+ s.fwhm_err = hdul[1].data['eFWHM']
821
+
822
+ s.crx = hdul[1].data['CRX']
823
+ s.crx_err = hdul[1].data['eCRX']
824
+ s.dlw = hdul[1].data['DLW']
825
+ s.dlw_err = hdul[1].data['eDLW']
826
+ s.contrast = hdul[1].data['CONTRAST']
827
+ s.contrast_err = hdul[1].data['eCONTRAST']
828
+ s.bispan = hdul[1].data['BIS']
829
+ s.bispan_err = hdul[1].data['eBIS']
830
+
831
+
783
832
  s.drift = hdul[1].data['drift']
784
833
  s.drift_err = hdul[1].data['e_drift']
785
834
 
786
835
  s.nzp = hdul[1].data['NZP']
787
836
  s.nzp_err = hdul[1].data['eNZP']
788
837
 
838
+ s.texp = hdul[1].data['ExpTime']
789
839
  s.berv = hdul[1].data['BERV']
840
+ s.units = 'km/s'
790
841
 
842
+ s.obs = np.ones_like(s.time, dtype=int)
791
843
  s.mask = np.full_like(s.time, True, dtype=bool)
792
844
  s.instruments = ['CARMENES']
845
+ s._quantities = np.array(['berv', ])
793
846
 
794
847
  # so meta!
795
848
  setattr(s, 'CARMENES', s)
796
849
 
797
850
  s._kobe_result = hdul[1].data
798
851
 
852
+ s.mask = s._kobe_result['rvflag']
853
+ s._propagate_mask_changes()
854
+
799
855
  if tar is not None:
800
856
  tar.close()
801
857
  hdul.close()
802
858
 
859
+ s._child = False
803
860
  return s
804
861
 
805
862
 
@@ -1190,6 +1247,13 @@ class RV:
1190
1247
  if (self.time < bjd).any():
1191
1248
  ind = np.where(self.time < bjd)[0]
1192
1249
  self.remove_point(ind)
1250
+
1251
+ def remove_between_bjds(self, bjd1, bjd2):
1252
+ """ Remove observations between two BJDs """
1253
+ to_remove = (self.time > bjd1) & (self.time < bjd2)
1254
+ if to_remove.any():
1255
+ ind = np.where(to_remove)[0]
1256
+ self.remove_point(ind)
1193
1257
 
1194
1258
  def choose_n_points(self, n, seed=None, instrument=None):
1195
1259
  """ Randomly choose `n` observations and mask out the remaining ones
@@ -1700,8 +1764,21 @@ class RV:
1700
1764
  s = getattr(self, inst)
1701
1765
  s.vrad *= factor
1702
1766
  s.svrad *= factor
1703
- s.fwhm *= factor
1704
- s.fwhm_err *= factor
1767
+ try:
1768
+ s.fwhm *= factor
1769
+ s.fwhm_err *= factor
1770
+ except AttributeError:
1771
+ pass
1772
+
1773
+ for q in (
1774
+ 'bispan',
1775
+ 'nzp', 'vrad_preNZP',
1776
+ ):
1777
+ try:
1778
+ setattr(s, q, getattr(s, q) * factor)
1779
+ setattr(s, f'{q}_err', getattr(s, f'{q}_err') * factor)
1780
+ except AttributeError:
1781
+ pass
1705
1782
 
1706
1783
  self._build_arrays()
1707
1784
  self.units = new_units
arvi/translations.py CHANGED
@@ -6,6 +6,11 @@ STARS = {
6
6
  "Barnard's": 'GJ699',
7
7
  'Ross128': 'Ross 128',
8
8
  'Ross 128': 'Ross 128',
9
+ #
10
+ 'Teegarden': 'GAT1370',
11
+ "Teegarden's Star": 'GAT1370',
12
+ #
13
+ "Smethells 20": 'TIC464410508',
9
14
  }
10
15
 
11
16
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: arvi
3
- Version: 0.1.23
3
+ Version: 0.1.24
4
4
  Summary: The Automated RV Inspector
5
5
  Author-email: João Faria <joao.faria@unige.ch>
6
6
  License: MIT
@@ -4,24 +4,24 @@ arvi/ariadne_wrapper.py,sha256=YvilopJa9T4NwPcj3Nah_U8smSeSAU5-HYZMb_GJ-BQ,2232
4
4
  arvi/berv.py,sha256=eKnpuPC1w45UrUEyFRbs9F9j3bXz3kxYzNXbnRgvFQM,17596
5
5
  arvi/binning.py,sha256=jbemJ-bM3aqoOsqMo_OhWt_co-JAQ0nhdG_GpTsrRsw,15403
6
6
  arvi/config.py,sha256=W-v8NNhRd_PROu0wCMilXmOhYcju4xbUalugd5u7SRU,1881
7
- arvi/dace_wrapper.py,sha256=U3N5iO_nRHU3FlxrjsMxFzl9O32UKpKuAjhkAM1dQfs,18432
7
+ arvi/dace_wrapper.py,sha256=CUHKN5m7KULM1sES5x0GXx8HQgJE7XdwwWv6_zAYhb4,21324
8
8
  arvi/extra_data.py,sha256=WEEaYeLh52Zdv0uyHO72Ys5MWS3naTAP4wJV2BJ1mbk,2551
9
9
  arvi/gaia_wrapper.py,sha256=icm3LJjG9pjP47_bM30NFyocUQO3X3SHS5yQ-Dwcr5w,4653
10
10
  arvi/headers.py,sha256=uvdJebw1M5YkGjE3vJJwYBOnLikib75uuZE9FXB5JJM,1673
11
11
  arvi/instrument_specific.py,sha256=-pbm2Vk3iK_1K7nDa1avlJOKHBcXllwILI4lQn-Ze-A,7761
12
- arvi/kima_wrapper.py,sha256=y_Z0Hl2ECbs2-B6ZR9retrjId7-QxcRylG7b5aDsiFk,2306
12
+ arvi/kima_wrapper.py,sha256=BvNTVqzM4lMNhLCyBFVh3T84hHfGKAFpgiYiOi4lh0g,2731
13
13
  arvi/lbl_wrapper.py,sha256=_ViGVkpakvuBR_xhu9XJRV5EKHpj5Go6jBZGJZMIS2Y,11850
14
14
  arvi/nasaexo_wrapper.py,sha256=mWt7eHgSZe4MBKCmUvMPTyUPGuiwGTqKugNBvmjOg9s,7306
15
15
  arvi/plots.py,sha256=WUm-sqN0aZTNXvE1kYpvmHTW9QPWqSCpKhNjwaqxjEk,29628
16
- arvi/programs.py,sha256=C0Fbldjf-QEZYYJp5wBKP3h7zraD0O2mJC7Su967STg,4607
16
+ arvi/programs.py,sha256=BW7xBNKLei7NVLLW3_lsVskwzkaIoNRiHK2jn9Tn2ZM,8879
17
17
  arvi/reports.py,sha256=ayPdZ4HZO9iCDdnADQ18gQPJh79o-1UYG7TYkvm9Lrc,4051
18
18
  arvi/setup_logger.py,sha256=pBzaRTn0hntozjbaRVx0JIbWGuENkvYUApa6uB-FsRo,279
19
19
  arvi/simbad_wrapper.py,sha256=iAAwEMcr1Hgu6lnDctmaCC1TLPCB8yAfHG0wxh9K9C8,5791
20
- arvi/spectra.py,sha256=pTAWSW4vk96DWRQ-6l5mNJHUhiAyaPR-QDjZdOT6Ak0,7489
20
+ arvi/spectra.py,sha256=ebF1ocodTastLx0CyqLSpE8EZNDXBF8riyfxMr3L6H0,7491
21
21
  arvi/stats.py,sha256=ilzzGL9ew-SyVa9eEdrYCpD3DliOAwhoNUg9LIlHjzU,2583
22
22
  arvi/stellar.py,sha256=veuL_y9kJvvApU_jqYQqP3EkcRnQffTc8Us6iT5UrFI,3790
23
- arvi/timeseries.py,sha256=qkBE6-NPBTXZyGLVHXRVUt5atrdcH3fEuaZ4SaHF7Bk,74443
24
- arvi/translations.py,sha256=FBF_2OrpMQBG4GtV4_UOspiaxetiGCY7TQFcwZMMVuQ,838
23
+ arvi/timeseries.py,sha256=NdmSSYeDdS-cYBXPt8NCeKS1jLdv8LP6Zh561KRGfZc,77328
24
+ arvi/translations.py,sha256=PUSrn4zvYO2MqGzUxlFGwev_tBkgJaJrIYs6NKHzbWo,951
25
25
  arvi/utils.py,sha256=LImV8iPjG8ZKjPCT9lp25_pDb-51ZZk42Hc8bzZt7M0,6568
26
26
  arvi/data/info.svg,sha256=0IMI6W-eFoTD8acnury79WJJakpBwLa4qKS4JWpsXiI,489
27
27
  arvi/data/obs_affected_ADC_issues.dat,sha256=tn93uOL0eCTYhireqp1wG-_c3CbxPA7C-Rf-pejVY8M,10853
@@ -29,8 +29,8 @@ arvi/data/obs_affected_blue_cryostat_issues.dat,sha256=z4AK17xfz8tGTDv1FjRvQFnio
29
29
  arvi/data/extra/HD86226_PFS1.rdb,sha256=vfAozbrKHM_j8dYkCBJsuHyD01KEM1asghe2KInwVao,3475
30
30
  arvi/data/extra/HD86226_PFS2.rdb,sha256=F2P7dB6gVyzCglUjNheB0hIHVClC5RmARrGwbrY1cfo,4114
31
31
  arvi/data/extra/metadata.json,sha256=C69hIw6CohyES6BI9vDWjxwSz7N4VOYX0PCgjXtYFmU,178
32
- arvi-0.1.23.dist-info/LICENSE,sha256=6JfQgl7SpM55t0EHMFNMnNh-AdkpGW25MwMiTnhdWQg,1068
33
- arvi-0.1.23.dist-info/METADATA,sha256=hI6gLlWx_4i0WSnXTaV6XGowzOOkWDfPW13t8Jx92Os,1852
34
- arvi-0.1.23.dist-info/WHEEL,sha256=a7TGlA-5DaHMRrarXjVbQagU3Man_dCnGIWMJr5kRWo,91
35
- arvi-0.1.23.dist-info/top_level.txt,sha256=4EeiKDVLD45ztuflTGfQ3TU8GVjJg5Y95xS5XjI-utU,5
36
- arvi-0.1.23.dist-info/RECORD,,
32
+ arvi-0.1.24.dist-info/LICENSE,sha256=6JfQgl7SpM55t0EHMFNMnNh-AdkpGW25MwMiTnhdWQg,1068
33
+ arvi-0.1.24.dist-info/METADATA,sha256=ijGk4XpvyZZ1IxV0_6iFuJkjZI4wtWW6A4kuCZE-gSA,1852
34
+ arvi-0.1.24.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
35
+ arvi-0.1.24.dist-info/top_level.txt,sha256=4EeiKDVLD45ztuflTGfQ3TU8GVjJg5Y95xS5XjI-utU,5
36
+ arvi-0.1.24.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.4.0)
2
+ Generator: setuptools (75.5.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
File without changes