arvi 0.1.22__py3-none-any.whl → 0.1.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arvi might be problematic. Click here for more details.

arvi/dace_wrapper.py CHANGED
@@ -1,7 +1,9 @@
1
1
  import os
2
+ import sys
2
3
  import tarfile
3
4
  import collections
4
5
  from functools import lru_cache
6
+ from itertools import islice
5
7
  import numpy as np
6
8
  from dace_query import DaceClass
7
9
  from dace_query.spectroscopy import SpectroscopyClass, Spectroscopy as default_Spectroscopy
@@ -22,22 +24,23 @@ def load_spectroscopy() -> SpectroscopyClass:
22
24
  return default_Spectroscopy
23
25
 
24
26
  @lru_cache()
25
- def get_dace_id(star):
27
+ def get_dace_id(star, verbose=True):
26
28
  filters = {"obj_id_catname": {"equal": [star]}}
27
29
  try:
28
- with stdout_disabled(), all_logging_disabled():
30
+ with all_logging_disabled():
29
31
  r = load_spectroscopy().query_database(filters=filters, limit=1)
30
32
  return r['obj_id_daceid'][0]
31
33
  except KeyError:
32
- logger.error(f"Could not find DACE ID for {star}")
34
+ if verbose:
35
+ logger.error(f"Could not find DACE ID for {star}")
33
36
  raise ValueError from None
34
37
 
35
38
  def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='HE', verbose=True):
36
39
  arrays = []
37
- instruments = list(result.keys())
40
+ instruments = [str(i) for i in result.keys()]
38
41
 
39
42
  for inst in instruments:
40
- pipelines = list(result[inst].keys())
43
+ pipelines = [str(p) for p in result[inst].keys()]
41
44
 
42
45
  # select ESPRESSO mode, which is defined at the level of the pipeline
43
46
  if 'ESPRESSO' in inst:
@@ -55,20 +58,27 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
55
58
  pipelines = [pipelines[i]]
56
59
 
57
60
  if latest_pipeline:
58
- if verbose and len(pipelines) > 1:
61
+ npipe = len(pipelines)
62
+ if 'NIRPS' in inst and any(['LBL' in p for p in pipelines]):
63
+ # TODO: correctly load both CCF and LBL
64
+ pipelines = [pipelines[1]]
65
+ if 'HARPS' in inst and npipe > 1 and pipelines[1] == pipelines[0] + '-EGGS':
66
+ pipelines = pipelines[:2]
67
+ else:
68
+ pipelines = [pipelines[0]]
69
+
70
+ if verbose and npipe > 1:
59
71
  logger.info(f'selecting latest pipeline ({pipelines[0]}) for {inst}')
60
72
 
61
- pipelines = [pipelines[0]]
62
73
 
63
74
  for pipe in pipelines:
64
- modes = list(result[inst][pipe].keys())
75
+ modes = [m for m in result[inst][pipe].keys()]
65
76
 
66
-
67
77
  # select NIRPS mode, which is defined at the level of the mode
68
- if 'NIRPS' in inst:
78
+ if 'NIRPS' in inst and len(modes) > 1:
69
79
  if NIRPS_mode in modes:
70
80
  if verbose:
71
- logger.info(f'selecting mode {NIRPS_mode} for NIRPS')
81
+ logger.info(f'selecting mode {NIRPS_mode} for NIRPS - {pipe}')
72
82
  i = modes.index(NIRPS_mode)
73
83
  modes = [modes[i]]
74
84
  else:
@@ -77,15 +87,25 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
77
87
 
78
88
  # HARPS15 observations should not be separated by 'mode' if some are
79
89
  # done together with NIRPS
80
- if 'HARPS15' in inst and 'HARPS+NIRPS' in modes:
81
- m0 = modes[0]
82
- data = {
83
- k: np.concatenate([result[inst][pipe][m][k] for m in modes])
84
- for k in result[inst][pipe][m0].keys()
85
- }
86
- arrays.append(
87
- ((inst, pipe, m0), data)
88
- )
90
+ if 'HARPS15' in inst:
91
+ if 'HARPS+NIRPS' in modes:
92
+ m0 = modes[0]
93
+ data = {
94
+ k: np.concatenate([result[inst][pipe][m][k] for m in modes])
95
+ for k in result[inst][pipe][m0].keys()
96
+ }
97
+ arrays.append(
98
+ ((str(inst), str(pipe), str(m0)), data)
99
+ )
100
+ if 'EGGS+NIRPS' in modes:
101
+ m0 = modes[0]
102
+ data = {
103
+ k: np.concatenate([result[inst][pipe][m][k] for m in modes])
104
+ for k in result[inst][pipe][m0].keys()
105
+ }
106
+ arrays.append(
107
+ ((str(inst + '_EGGS'), str(pipe), str(m0)), data)
108
+ )
89
109
  continue
90
110
 
91
111
  for mode in modes:
@@ -94,39 +114,61 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
94
114
  raise ValueError
95
115
 
96
116
  arrays.append(
97
- ((inst, pipe, mode), result[inst][pipe][mode])
117
+ ((str(inst), str(pipe), str(mode)), result[inst][pipe][mode])
98
118
  )
99
119
 
100
120
  return arrays
101
121
 
102
- def get_observations_from_instrument(star, instrument, main_id=None):
122
+ def get_observations_from_instrument(star, instrument, main_id=None, verbose=True):
103
123
  """ Query DACE for all observations of a given star and instrument
104
124
 
105
125
  Args:
106
- star (str): name of the star
107
- instrument (str): instrument name
108
- main_id (str, optional): Simbad main id of target to query DACE id. Defaults to None.
126
+ star (str):
127
+ name of the star
128
+ instrument (str):
129
+ instrument name
130
+ main_id (str, optional):
131
+ Simbad main id of target to query DACE id. Defaults to None.
132
+ verbose (bool, optional):
133
+ whether to print warnings. Defaults to True.
109
134
 
110
135
  Raises:
111
- ValueError: If query for DACE id fails
136
+ ValueError:
137
+ If query for DACE id fails
112
138
 
113
139
  Returns:
114
- dict: dictionary with data from DACE
140
+ dict:
141
+ dictionary with data from DACE
115
142
  """
143
+ Spectroscopy = load_spectroscopy()
144
+ found_dace_id = False
116
145
  try:
117
- dace_id = get_dace_id(star)
146
+ dace_id = get_dace_id(star, verbose=verbose)
147
+ found_dace_id = True
118
148
  except ValueError as e:
119
149
  if main_id is not None:
120
- dace_id = get_dace_id(main_id)
121
- else:
122
- raise e
150
+ try:
151
+ dace_id = get_dace_id(main_id, verbose=verbose)
152
+ found_dace_id = True
153
+ except ValueError:
154
+ pass
155
+
156
+ if not found_dace_id:
157
+ try:
158
+ with all_logging_disabled():
159
+ result = Spectroscopy.get_timeseries(target=star,
160
+ sorted_by_instrument=True,
161
+ output_format='numpy')
162
+ return result
163
+ except TypeError:
164
+ msg = f'no {instrument} observations for {star}'
165
+ raise ValueError(msg) from None
123
166
 
124
- Spectroscopy = load_spectroscopy()
125
167
  filters = {
126
168
  "ins_name": {"contains": [instrument]},
127
169
  "obj_id_daceid": {"contains": [dace_id]}
128
170
  }
129
- with stdout_disabled(), all_logging_disabled():
171
+ with all_logging_disabled():
130
172
  result = Spectroscopy.query_database(filters=filters)
131
173
 
132
174
  if len(result) == 0:
@@ -215,7 +257,7 @@ def get_observations(star, instrument=None, main_id=None, verbose=True):
215
257
  raise ValueError(msg) from None
216
258
  else:
217
259
  try:
218
- result = get_observations_from_instrument(star, instrument, main_id)
260
+ result = get_observations_from_instrument(star, instrument, main_id, verbose)
219
261
  except ValueError:
220
262
  msg = f'no {instrument} observations for {star}'
221
263
  raise ValueError(msg) from None
@@ -320,16 +362,29 @@ def check_existing(output_directory, files, type):
320
362
 
321
363
  return np.array(missing)
322
364
 
323
- def download(files, type, output_directory):
365
+ def download(files, type, output_directory, output_filename=None, quiet=True, pbar=None):
324
366
  """ Download files from DACE """
325
367
  Spectroscopy = load_spectroscopy()
326
- with stdout_disabled(), all_logging_disabled():
368
+ if isinstance(files, str):
369
+ files = [files]
370
+ if quiet:
371
+ with all_logging_disabled():
372
+ Spectroscopy.download_files(files, file_type=type.lower(),
373
+ output_directory=output_directory,
374
+ output_filename=output_filename)
375
+ else:
327
376
  Spectroscopy.download_files(files, file_type=type.lower(),
328
- output_directory=output_directory)
377
+ output_directory=output_directory,
378
+ output_filename=output_filename)
379
+ if pbar is not None:
380
+ pbar.update()
329
381
 
330
- def extract_fits(output_directory):
382
+
383
+ def extract_fits(output_directory, filename=None):
331
384
  """ Extract fits files from tar.gz file """
332
- file = os.path.join(output_directory, 'spectroscopy_download.tar.gz')
385
+ if filename is None:
386
+ filename = 'spectroscopy_download.tar.gz'
387
+ file = os.path.join(output_directory, filename)
333
388
  with tarfile.open(file, "r") as tar:
334
389
  files = []
335
390
  for member in tar.getmembers():
@@ -387,7 +442,7 @@ def do_symlink_filetype(type, raw_files, output_directory, clobber=False, top_le
387
442
 
388
443
 
389
444
  def do_download_filetype(type, raw_files, output_directory, clobber=False,
390
- verbose=True, chunk_size=20):
445
+ verbose=True, chunk_size=20, parallel_limit=30):
391
446
  """ Download CCFs / S1Ds / S2Ds from DACE """
392
447
  raw_files = np.atleast_1d(raw_files)
393
448
 
@@ -407,7 +462,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
407
462
 
408
463
  # avoid an empty chunk
409
464
  if chunk_size > n:
410
- chunk_size = n
465
+ chunk_size = n
411
466
 
412
467
  if verbose:
413
468
  if chunk_size < n:
@@ -419,11 +474,36 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
419
474
  msg = f"downloading {n} {type}s into '{output_directory}'..."
420
475
  logger.info(msg)
421
476
 
422
- iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
423
- for files in tqdm(iterator, total=len(iterator)):
424
- download(files, type, output_directory)
425
- extract_fits(output_directory)
477
+ if n < parallel_limit:
478
+ iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
479
+ for files in tqdm(iterator, total=len(iterator)):
480
+ download(files, type, output_directory, quiet=False)
481
+ extract_fits(output_directory)
426
482
 
483
+ else:
484
+ def chunker(it, size):
485
+ iterator = iter(it)
486
+ while chunk := list(islice(iterator, size)):
487
+ yield chunk
488
+
489
+ chunks = list(chunker(raw_files, chunk_size))
490
+ pbar = tqdm(total=len(chunks))
491
+ it1 = [
492
+ (files, type, output_directory, f'spectroscopy_download{i+1}.tar.gz', True, pbar)
493
+ for i, files in enumerate(chunks)
494
+ ]
495
+ it2 = [(output_directory, f'spectroscopy_download{i+1}.tar.gz') for i in range(len(chunks))]
496
+
497
+ # import multiprocessing as mp
498
+ # with mp.Pool(4) as pool:
499
+ from multiprocessing.pool import ThreadPool
500
+
501
+ with ThreadPool(4) as pool:
502
+ pool.starmap(download, it1)
503
+ pool.starmap(extract_fits, it2)
504
+ print('')
505
+
506
+ sys.stdout.flush()
427
507
  logger.info('extracted .fits files')
428
508
 
429
509
 
arvi/kima_wrapper.py CHANGED
@@ -25,10 +25,11 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
25
25
  if not kima_available:
26
26
  raise ImportError('kima not available, please install with `pip install kima`')
27
27
 
28
- time = [getattr(self, inst).mtime for inst in self.instruments]
29
- vrad = [getattr(self, inst).mvrad for inst in self.instruments]
30
- err = [getattr(self, inst).msvrad for inst in self.instruments]
31
- data = RVData(time, vrad, err, instruments=self.instruments)
28
+ instruments = [inst for inst in self.instruments if self.NN[inst] > 1]
29
+ time = [getattr(self, inst).mtime for inst in instruments]
30
+ vrad = [getattr(self, inst).mvrad for inst in instruments]
31
+ err = [getattr(self, inst).msvrad for inst in instruments]
32
+ data = RVData(time, vrad, err, instruments=instruments)
32
33
 
33
34
  fix = kwargs.pop('fix', False)
34
35
  npmax = kwargs.pop('npmax', 1)
@@ -41,6 +42,13 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
41
42
  model.enforce_stability = kwargs.pop('enforce_stability', False)
42
43
  model.star_mass = kwargs.pop('star_mass', 1.0)
43
44
 
45
+ if kwargs.pop('gaussian_priors_individual_offsets', False):
46
+ from kima.pykima.utils import get_gaussian_priors_individual_offsets
47
+ model.individual_offset_prior = get_gaussian_priors_individual_offsets(data, use_std=True)
48
+
49
+ if kwargs.pop('kuma', False):
50
+ model.conditional.eprior = distributions.Kumaraswamy(0.867, 3.03)
51
+
44
52
  for k, v in priors.items():
45
53
  try:
46
54
  if 'conditional' in k:
@@ -55,9 +63,10 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
55
63
  logger.warning(msg)
56
64
  return
57
65
 
66
+ if run_directory is None:
67
+ run_directory = os.getcwd()
68
+
58
69
  if run:
59
- if run_directory is None:
60
- run_directory = os.getcwd()
61
70
 
62
71
  # TODO: use signature of kima.run to pop the correct kwargs
63
72
  # model_name = model.__class__.__name__
@@ -67,8 +76,9 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
67
76
  with chdir(run_directory):
68
77
  kima.run(model, **kwargs)
69
78
 
70
- if load:
79
+ if load:
80
+ with chdir(run_directory):
71
81
  res = kima.load_results(model)
72
- return data, model, res
82
+ return data, model, res
73
83
 
74
84
  return data, model
arvi/programs.py CHANGED
@@ -1,8 +1,9 @@
1
1
  import os
2
2
  import multiprocessing
3
- from functools import partial
3
+ from functools import partial, lru_cache
4
4
  from itertools import chain
5
5
  from collections import namedtuple
6
+ from multiprocessing.pool import ThreadPool
6
7
  from tqdm import tqdm
7
8
  # import numpy as np
8
9
 
@@ -14,9 +15,9 @@ __all__ = ['ESPRESSO_GTO']
14
15
  path = os.path.join(os.path.dirname(__file__), 'data')
15
16
 
16
17
 
17
- def get_star(star, instrument=None):
18
+ def get_star(star, instrument=None, verbose=False, **kwargs):
18
19
  return RV(star, instrument=instrument,
19
- _raise_on_error=False, verbose=False, load_extra_data=False)
20
+ _raise_on_error=False, verbose=verbose, **kwargs)
20
21
 
21
22
 
22
23
  class LazyRV:
@@ -36,21 +37,22 @@ class LazyRV:
36
37
  def __repr__(self):
37
38
  return f"RV({self.N} stars)"
38
39
 
39
- def _get(self):
40
+ def _get(self, **kwargs):
40
41
  if self.N > self._parallel_limit:
41
42
  # logger.info('Querying DACE...')
42
- _get_star = partial(get_star, instrument=self.instrument)
43
- with multiprocessing.Pool() as pool:
43
+ _get_star = partial(get_star, instrument=self.instrument, **kwargs)
44
+ with ThreadPool(8) as pool:
44
45
  result = list(tqdm(pool.imap(_get_star, self.stars),
45
- total=self.N, unit='star', desc='Querying DACE'))
46
- # result = pool.map(get_star, self.stars)
46
+ total=self.N, unit='star',
47
+ desc='Querying DACE (can take a while)'))
48
+ print('')
47
49
  else:
48
50
  result = []
49
- logger.info('Querying DACE...')
51
+ logger.info('querying DACE...')
50
52
  pbar = tqdm(self.stars, total=self.N, unit='star')
51
53
  for star in pbar:
52
54
  pbar.set_description(star)
53
- result.append(get_star(star, self.instrument))
55
+ result.append(get_star(star, self.instrument, **kwargs))
54
56
 
55
57
  return result
56
58
 
@@ -73,14 +75,25 @@ class LazyRV:
73
75
  # result.append(None)
74
76
  # return result
75
77
 
78
+ def reload(self, **kwargs):
79
+ self._saved = self._get(**kwargs)
80
+ return self._saved
81
+
76
82
  def __iter__(self):
77
83
  return self._get()
78
84
 
79
- def __call__(self):
85
+ def __call__(self, **kwargs):
80
86
  if not self._saved:
81
- self._saved = self._get()
87
+ self._saved = self._get(**kwargs)
82
88
  return self._saved
83
89
 
90
+ @lru_cache(maxsize=10)
91
+ def __getitem__(self, index):
92
+ star = self.stars[index]
93
+ if self._saved is not None:
94
+ return self._saved[index]
95
+ return get_star(star, self.instrument, verbose=True)
96
+
84
97
 
85
98
  # sorted by spectral type
86
99
  WG1_stars = [
@@ -157,3 +170,78 @@ ESPRESSO_GTO = ESPRESSO_GTO_nt(
157
170
  ESPRESSO_GTO.WG1.__doc__ = 'RV observations for all WG1 targets. Call ESPRESSO_GTO.WG1() to load them.'
158
171
  ESPRESSO_GTO.WG2.__doc__ = 'RV observations for all WG2 targets. Call ESPRESSO_GTO.WG2() to load them.'
159
172
  ESPRESSO_GTO.WG3.__doc__ = 'RV observations for all WG3 targets. Call ESPRESSO_GTO.WG3() to load them.'
173
+
174
+
175
+ import requests
176
+
177
+ def _get_NIRPS_GTO_stars(WP=1):
178
+ from io import StringIO
179
+ import numpy as np
180
+
181
+ url = 'https://www.eso.org/sci/observing/teles-alloc/gto/113/NIRPS/P113_NIRPS-consortium.csv'
182
+ file = StringIO(requests.get(url).content.decode())
183
+ stars_P113 = np.loadtxt(file, delimiter=',', usecols=(0,), dtype=str, skiprows=3)
184
+
185
+ url = 'https://www.eso.org/sci/observing/teles-alloc/gto/114/NIRPS/P114_NIRPS-consortium.csv'
186
+ file = StringIO(requests.get(url).content.decode())
187
+ stars_P114 = np.loadtxt(file, delimiter=',', usecols=(0,), dtype=str, skiprows=3)
188
+
189
+ url = 'https://www.eso.org/sci/observing/teles-alloc/gto/115/NIRPS/P115_NIRPS-consortium.csv'
190
+ file = StringIO(requests.get(url).content.decode())
191
+ stars_P115 = np.loadtxt(file, delimiter=',', usecols=(0,), dtype=str, skiprows=3)
192
+
193
+ def _get_stars_period(stars, WP):
194
+ stars = np.delete(stars, stars=='')
195
+
196
+ stars = np.char.replace(stars, '_', ' ')
197
+ stars = np.char.replace(stars, "Proxima Centauri", "Proxima")
198
+ stars = np.char.replace(stars, "Barnard's star", "GJ699")
199
+ stars = np.char.replace(stars, "Teegarden's Star", 'Teegarden')
200
+
201
+ if WP in (1, 'WP1'):
202
+ wp1_indices = slice(np.where(stars == 'WP1')[0][0] + 1, np.where(stars == 'WP2')[0][0])
203
+ return stars[wp1_indices]
204
+ elif WP == 2:
205
+ wp2_indices = slice(np.where(stars == 'WP2')[0][0] + 1, np.where(stars == 'WP3')[0][0])
206
+ return stars[wp2_indices]
207
+ elif WP == 3:
208
+ wp3_indices = slice(np.where(stars == 'WP3')[0][0] + 1, np.where(stars == 'Other Science 1')[0][0])
209
+ return stars[wp3_indices]
210
+ elif WP == 'OS1':
211
+ os1_indices = slice(np.where(stars == 'Other Science 1')[0][0] + 1, np.where(stars == 'Other Science 2')[0][0])
212
+ return stars[os1_indices]
213
+ elif WP == 'OS2':
214
+ os2_indices = slice(np.where(stars == 'Other Science 2')[0][0] + 1, None)
215
+ stars = np.char.replace(stars, 'MMU', 'No')
216
+ stars = np.char.replace(stars, 'Cl*', '')
217
+ return stars[os2_indices]
218
+
219
+ stars_P113 = _get_stars_period(stars_P113, WP)
220
+ stars_P114 = _get_stars_period(stars_P114, WP)
221
+ stars_P115 = _get_stars_period(stars_P115, WP)
222
+ return np.union1d(np.union1d(stars_P113, stars_P114), stars_P115)
223
+
224
+ try:
225
+ NIRPS_GTO_WP1_stars = _get_NIRPS_GTO_stars(WP=1)
226
+ NIRPS_GTO_WP2_stars = _get_NIRPS_GTO_stars(WP=2)
227
+ NIRPS_GTO_WP3_stars = _get_NIRPS_GTO_stars(WP=3)
228
+ NIRPS_GTO_OS1_stars = _get_NIRPS_GTO_stars(WP='OS1')
229
+ NIRPS_GTO_OS2_stars = _get_NIRPS_GTO_stars(WP='OS2')
230
+ except requests.ConnectionError:
231
+ from .setup_logger import logger
232
+ logger.error('Cannot download NIRPS GTO protected target list')
233
+ else:
234
+ NIRPS_GTO_nt = namedtuple('NIRPS_GTO', ['WP1', 'WP2', 'WP3', 'OS1', 'OS2'])
235
+ NIRPS_GTO_nt.__doc__ = 'RV observations for all NIRPS GTO targets. See NIRPS_GTO.WP1, NIRPS_GTO.WP2, ...'
236
+ NIRPS_GTO = NIRPS_GTO_nt(
237
+ WP1=LazyRV(NIRPS_GTO_WP1_stars, instrument='NIRPS'),
238
+ WP2=LazyRV(NIRPS_GTO_WP2_stars, instrument='NIRPS'),
239
+ WP3=LazyRV(NIRPS_GTO_WP3_stars, instrument='NIRPS'),
240
+ OS1=LazyRV(NIRPS_GTO_OS1_stars, instrument='NIRPS'),
241
+ OS2=LazyRV(NIRPS_GTO_OS2_stars, instrument='NIRPS'),
242
+ )
243
+ NIRPS_GTO.WP1.__doc__ = 'RV observations for all WP1 targets. Call NIRPS_GTO.WP1() to load them.'
244
+ NIRPS_GTO.WP2.__doc__ = 'RV observations for all WP2 targets. Call NIRPS_GTO.WP2() to load them.'
245
+ NIRPS_GTO.WP3.__doc__ = 'RV observations for all WP3 targets. Call NIRPS_GTO.WP3() to load them.'
246
+ NIRPS_GTO.OS1.__doc__ = 'RV observations for all OS1 targets. Call NIRPS_GTO.OS1() to load them.'
247
+ NIRPS_GTO.OS2.__doc__ = 'RV observations for all OS2 targets. Call NIRPS_GTO.OS2() to load them.'
arvi/spectra.py CHANGED
@@ -72,7 +72,7 @@ def fit_gaussian_to_line(wave, flux, center_wavelength, around=0.15 * u.angstrom
72
72
  ]).T
73
73
 
74
74
  try:
75
- popt, pcov = curve_fit(gaussian, w, f, p0=[-f.ptp(), center_wavelength.value, 0.1, f.max()],
75
+ popt, pcov = curve_fit(gaussian, w, f, p0=[-np.ptp(f), center_wavelength.value, 0.1, f.max()],
76
76
  bounds=(lower, upper))
77
77
  except RuntimeError as e:
78
78
  logger.warning(f'fit_gaussian_to_line: {e}')
@@ -115,7 +115,7 @@ def detrend(w, f):
115
115
  def build_master(self, limit=None, plot=True):
116
116
  files = sorted(glob(f'{self.star}_downloads/*S1D_A.fits'))
117
117
  if self.verbose:
118
- logger.info(f'Found {len(files)} S1D files')
118
+ logger.info(f'found {len(files)} S1D files')
119
119
 
120
120
  files = files[:limit]
121
121
 
@@ -168,8 +168,8 @@ def determine_stellar_parameters(self, linelist: str, plot=True, **kwargs):
168
168
  ]
169
169
 
170
170
  if self.verbose:
171
- logger.info(f'Found {len(lines)} lines in linelist')
172
- logger.info('Measuring EWs...')
171
+ logger.info(f'found {len(lines)} lines in linelist')
172
+ logger.info('measuring EWs...')
173
173
 
174
174
  EW = []
175
175
  pbar = tqdm(linelist)
@@ -183,14 +183,14 @@ def determine_stellar_parameters(self, linelist: str, plot=True, **kwargs):
183
183
  EW = np.array(EW)[~np.isnan(EW)]
184
184
 
185
185
  if self.verbose:
186
- logger.info('Determining stellar parameters (can take a few minutes)...')
186
+ logger.info('determining stellar parameters (can take a few minutes)...')
187
187
 
188
188
  callback = lambda p, r, A: print('current parameters:', p)
189
189
  result = Korg.Fit.ews_to_stellar_parameters(lines, EW, callback=callback)
190
190
  par, stat_err, sys_err = result
191
191
 
192
192
  if self.verbose:
193
- logger.info(f'Best fit stellar parameters:')
193
+ logger.info(f'best-fit stellar parameters:')
194
194
  logger.info(f' Teff: {par[0]:.0f} ± {sys_err[0]:.0f} K')
195
195
  logger.info(f' logg: {par[1]:.2f} ± {sys_err[1]:.2f} dex')
196
196
  logger.info(f' m/H : {par[3]:.2f} ± {sys_err[3]:.2f} dex')
arvi/timeseries.py CHANGED
@@ -18,7 +18,7 @@ from .extra_data import get_extra_data
18
18
  from .stats import wmean, wrms
19
19
  from .binning import bin_ccf_mask, binRV
20
20
  from .HZ import getHZ_period
21
- from .utils import strtobool, there_is_internet, timer, chdir
21
+ from .utils import sanitize_path, strtobool, there_is_internet, timer, chdir
22
22
  from .utils import lazy_import
23
23
 
24
24
  units = lazy_import('astropy.units')
@@ -193,7 +193,7 @@ class RV:
193
193
  time_stamp = datetime.now(timezone.utc) #.isoformat().split('.')[0]
194
194
  self._last_dace_query = time_stamp
195
195
 
196
- _replacements = (('-', '_'), ('.', '_'), ('__', '_'))
196
+ _replacements = (('-', '_'), ('.', '_'), (' ', '_'), ('__', '_'))
197
197
  def do_replacements(s):
198
198
  for a, b in _replacements:
199
199
  s = s.replace(a, b)
@@ -240,7 +240,8 @@ class RV:
240
240
  else:
241
241
  path = None
242
242
  try:
243
- self.__add__(get_extra_data(self.star, instrument=self.instrument, path=path),
243
+ self.__add__(get_extra_data(self.star, instrument=self.instrument,
244
+ path=path, verbose=self.verbose),
244
245
  inplace=True)
245
246
 
246
247
  except FileNotFoundError:
@@ -263,7 +264,9 @@ class RV:
263
264
  if self.do_adjust_means:
264
265
  self.adjust_means()
265
266
 
266
- self._download_directory = f'{self.star.replace(" ", "")}_downloads'
267
+ _star_no_space = self.star.replace(' ', '')
268
+ _directory = sanitize_path(_star_no_space)
269
+ self._download_directory = f'{_directory}_downloads'
267
270
 
268
271
  def __add__(self, other, inplace=False):
269
272
  # if not isinstance(other, self.__class__):
@@ -441,7 +444,7 @@ class RV:
441
444
 
442
445
  s.instruments = [inst]
443
446
  s.pipelines = [pipe]
444
- s.modes = [mode]
447
+ s.modes = [str(mode)]
445
448
 
446
449
  return s
447
450
 
@@ -595,29 +598,47 @@ class RV:
595
598
  _quantities.append('rhk')
596
599
  _quantities.append('rhk_err')
597
600
 
598
- _s.bispan = np.zeros_like(time)
599
- _s.bispan_err = np.full_like(time, np.nan)
601
+ # try to find BISPAN and uncertainty
602
+ if (v := find_column(data, ['bispan'])) is not False:
603
+ _s.bispan = v
604
+ _s.bispan_err = np.full_like(time, np.nan)
605
+ if (sv := find_column(data, ['sbispan'])) is not False:
606
+ _s.bispan_err = sv
607
+ else:
608
+ _s.bispan = np.full_like(time, np.nan)
609
+ _s.bispan_err = np.full_like(time, np.nan)
610
+
611
+ _quantities.append('bispan')
612
+ _quantities.append('bispan_err')
613
+
614
+ # try to find BERV
615
+ if (v := find_column(data, ['berv', 'HIERARCH ESO QC BERV'])) is not False:
616
+ _s.berv = v
617
+ else:
618
+ _s.berv = np.full_like(time, np.nan)
619
+ _quantities.append('berv')
600
620
 
601
621
  # other quantities, but all NaNs
602
- for q in ['bispan', 'caindex', 'ccf_asym', 'contrast', 'haindex', 'naindex', 'sindex']:
622
+ for q in ['caindex', 'ccf_asym', 'contrast', 'haindex', 'naindex', 'sindex']:
603
623
  setattr(_s, q, np.full_like(time, np.nan))
604
624
  setattr(_s, q + '_err', np.full_like(time, np.nan))
605
625
  _quantities.append(q)
606
626
  _quantities.append(q + '_err')
607
- for q in ['berv', 'texp']:
627
+ for q in ['texp', ]:
608
628
  setattr(_s, q, np.full_like(time, np.nan))
609
629
  _quantities.append(q)
610
630
  for q in ['ccf_mask', 'date_night', 'prog_id', 'raw_file', 'pub_reference']:
611
631
  setattr(_s, q, np.full(time.size, ''))
612
632
  _quantities.append(q)
613
- for q in ['drs_qc']:
633
+ for q in ['drs_qc', ]:
614
634
  setattr(_s, q, np.full(time.size, True))
615
635
  _quantities.append(q)
616
636
 
617
637
  _s.extra_fields = ExtraFields()
618
- for field in data.dtype.names:
619
- if field not in _quantities:
620
- setattr(_s.extra_fields, field, data[field])
638
+ for name in data.dtype.names:
639
+ if name not in _quantities:
640
+ name_ = name.replace(' ', '_')
641
+ setattr(_s.extra_fields, name_, data[name])
621
642
  # _quantities.append(field)
622
643
 
623
644
  #! end hack
@@ -625,12 +646,12 @@ class RV:
625
646
  _s.mask = np.ones_like(time, dtype=bool)
626
647
  _s.obs = np.full_like(time, i + 1)
627
648
 
628
- _s.instruments = [instrument]
649
+ _s.instruments = [str(instrument)]
629
650
  _s._quantities = np.array(_quantities)
630
651
  setattr(s, instrument, _s)
631
652
 
632
653
  s._child = False
633
- s.instruments = list(instruments)
654
+ s.instruments = list(map(str, instruments))
634
655
  s._build_arrays()
635
656
 
636
657
  if kwargs.get('do_adjust_means', False):
@@ -723,15 +744,17 @@ class RV:
723
744
  return s
724
745
 
725
746
  @classmethod
726
- @lru_cache(maxsize=10)
747
+ @lru_cache(maxsize=60)
727
748
  def from_KOBE_file(cls, star, **kwargs):
728
749
  assert 'KOBE' in star, f'{star} is not a KOBE star?'
729
750
  import requests
730
751
  from requests.auth import HTTPBasicAuth
731
752
  from io import BytesIO
732
753
  import tarfile
754
+ from time import time as pytime
733
755
  from astropy.io import fits
734
756
  from .config import config
757
+ from .utils import get_data_path
735
758
 
736
759
  try:
737
760
  config.kobe_password
@@ -740,23 +763,11 @@ class RV:
740
763
  return
741
764
 
742
765
  tar = None
766
+ local_targz_file = os.path.join(get_data_path(), 'KOBE_fitsfiles.tar.gz')
743
767
  fits_file = f'{star}_RVs.fits'
744
- resp = requests.get(f'https://kobe.caha.es/internal/fitsfiles/{fits_file}',
745
- auth=HTTPBasicAuth('kobeteam', config.kobe_password))
746
-
747
- if resp.status_code != 200:
748
- # something went wrong, try to extract the file by downloading the
749
- # full tar.gz archive
750
-
751
- logger.warning(f'could not find "{fits_file}" on server, trying to download full archive')
752
- resp = requests.get('https://kobe.caha.es/internal/fitsfiles.tar.gz',
753
- auth=HTTPBasicAuth('kobeteam', config.kobe_password))
754
768
 
755
- if resp.status_code != 200:
756
- logger.error(f'KOBE file not found for {star}')
757
- return
758
-
759
- tar = tarfile.open(fileobj=BytesIO(resp.content))
769
+ if os.path.exists(local_targz_file) and os.path.getmtime(local_targz_file) > pytime() - 60*60*2:
770
+ tar = tarfile.open(local_targz_file)
760
771
 
761
772
  if fits_file not in tar.getnames():
762
773
  logger.error(f'KOBE file not found for {star}')
@@ -765,8 +776,36 @@ class RV:
765
776
  hdul = fits.open(tar.extractfile(fits_file))
766
777
 
767
778
  else:
768
- # found the file on the server, read it directly
769
- hdul = fits.open(BytesIO(resp.content))
779
+ resp = requests.get(f'https://kobe.caha.es/internal/fitsfiles/{fits_file}',
780
+ auth=HTTPBasicAuth('kobeteam', config.kobe_password))
781
+
782
+ if resp.status_code != 200:
783
+ # something went wrong, try to extract the file by downloading the
784
+ # full tar.gz archive
785
+
786
+ logger.warning(f'could not find "{fits_file}" on server, trying to download full archive')
787
+ resp = requests.get('https://kobe.caha.es/internal/fitsfiles.tar.gz',
788
+ auth=HTTPBasicAuth('kobeteam', config.kobe_password))
789
+
790
+ if resp.status_code != 200:
791
+ logger.error(f'KOBE file not found for {star}')
792
+ return
793
+
794
+ # save tar.gz file for later
795
+ with open(local_targz_file, 'wb') as tg:
796
+ tg.write(resp.content)
797
+
798
+ tar = tarfile.open(fileobj=BytesIO(resp.content))
799
+
800
+ if fits_file not in tar.getnames():
801
+ logger.error(f'KOBE file not found for {star}')
802
+ return
803
+
804
+ hdul = fits.open(tar.extractfile(fits_file))
805
+
806
+ else:
807
+ # found the file on the server, read it directly
808
+ hdul = fits.open(BytesIO(resp.content))
770
809
 
771
810
  s = cls(star, _child=True)
772
811
 
@@ -777,26 +816,47 @@ class RV:
777
816
  s.vrad_preNZP = hdul[1].data['RVd']
778
817
  s.vrad_preNZP_err = hdul[1].data['eRVd']
779
818
 
819
+ s.fwhm = hdul[1].data['FWHM']
820
+ s.fwhm_err = hdul[1].data['eFWHM']
821
+
822
+ s.crx = hdul[1].data['CRX']
823
+ s.crx_err = hdul[1].data['eCRX']
824
+ s.dlw = hdul[1].data['DLW']
825
+ s.dlw_err = hdul[1].data['eDLW']
826
+ s.contrast = hdul[1].data['CONTRAST']
827
+ s.contrast_err = hdul[1].data['eCONTRAST']
828
+ s.bispan = hdul[1].data['BIS']
829
+ s.bispan_err = hdul[1].data['eBIS']
830
+
831
+
780
832
  s.drift = hdul[1].data['drift']
781
833
  s.drift_err = hdul[1].data['e_drift']
782
834
 
783
835
  s.nzp = hdul[1].data['NZP']
784
836
  s.nzp_err = hdul[1].data['eNZP']
785
837
 
838
+ s.texp = hdul[1].data['ExpTime']
786
839
  s.berv = hdul[1].data['BERV']
840
+ s.units = 'km/s'
787
841
 
842
+ s.obs = np.ones_like(s.time, dtype=int)
788
843
  s.mask = np.full_like(s.time, True, dtype=bool)
789
844
  s.instruments = ['CARMENES']
845
+ s._quantities = np.array(['berv', ])
790
846
 
791
847
  # so meta!
792
848
  setattr(s, 'CARMENES', s)
793
849
 
794
850
  s._kobe_result = hdul[1].data
795
851
 
852
+ s.mask = s._kobe_result['rvflag']
853
+ s._propagate_mask_changes()
854
+
796
855
  if tar is not None:
797
856
  tar.close()
798
857
  hdul.close()
799
858
 
859
+ s._child = False
800
860
  return s
801
861
 
802
862
 
@@ -1187,6 +1247,13 @@ class RV:
1187
1247
  if (self.time < bjd).any():
1188
1248
  ind = np.where(self.time < bjd)[0]
1189
1249
  self.remove_point(ind)
1250
+
1251
+ def remove_between_bjds(self, bjd1, bjd2):
1252
+ """ Remove observations between two BJDs """
1253
+ to_remove = (self.time > bjd1) & (self.time < bjd2)
1254
+ if to_remove.any():
1255
+ ind = np.where(to_remove)[0]
1256
+ self.remove_point(ind)
1190
1257
 
1191
1258
  def choose_n_points(self, n, seed=None, instrument=None):
1192
1259
  """ Randomly choose `n` observations and mask out the remaining ones
@@ -1243,6 +1310,9 @@ class RV:
1243
1310
  mas_yr = units.milliarcsecond / units.year
1244
1311
  mas = units.milliarcsecond
1245
1312
 
1313
+ # store the source of coordinates and parallax, either Gaia or Simbad
1314
+ using = ''
1315
+
1246
1316
  try:
1247
1317
  if force_simbad:
1248
1318
  raise AttributeError
@@ -1255,8 +1325,7 @@ class RV:
1255
1325
  logger.error('negative Gaia parallax, falling back to Simbad')
1256
1326
  raise AttributeError
1257
1327
 
1258
- if self.verbose:
1259
- logger.info('using Gaia information to remove secular acceleration')
1328
+ using = 'Gaia'
1260
1329
 
1261
1330
  if epoch is None:
1262
1331
  # Gaia DR3 epoch (astropy.time.Time('J2016.0', format='jyear_str').jd)
@@ -1284,8 +1353,7 @@ class RV:
1284
1353
  logger.error('no parallax from simbad, cannot remove secular acceleration')
1285
1354
  return
1286
1355
 
1287
- if self.verbose:
1288
- logger.info('using Simbad information to remove secular acceleration')
1356
+ using = 'Simbad'
1289
1357
 
1290
1358
  if epoch is None:
1291
1359
  epoch = 55500
@@ -1303,14 +1371,14 @@ class RV:
1303
1371
 
1304
1372
  sa = sa.value
1305
1373
 
1306
- if self.verbose:
1307
- logger.info('removing secular acceleration from RVs')
1308
-
1309
1374
  if self.units == 'km/s':
1310
1375
  sa /= 1000
1311
1376
 
1377
+ actually_removed_sa = False
1378
+
1312
1379
  if self._child:
1313
1380
  self.vrad = self.vrad - sa * (self.time - epoch) / 365.25
1381
+ actually_removed_sa = True
1314
1382
  else:
1315
1383
  for inst in self.instruments:
1316
1384
  s = getattr(self, inst)
@@ -1320,9 +1388,7 @@ class RV:
1320
1388
  if np.all(s.pub_reference != ''):
1321
1389
  continue
1322
1390
 
1323
- if 'HIRES' in inst: # never remove it from HIRES...
1324
- continue
1325
- if 'NIRPS' in inst: # never remove it from NIRPS...
1391
+ if 'HIRES' in inst or 'HAMILTON' in inst:
1326
1392
  continue
1327
1393
 
1328
1394
  if hasattr(s, '_did_secular_acceleration') and s._did_secular_acceleration:
@@ -1330,8 +1396,14 @@ class RV:
1330
1396
 
1331
1397
  s.vrad = s.vrad - sa * (s.time - epoch) / 365.25
1332
1398
 
1399
+ actually_removed_sa = True
1400
+
1333
1401
  self._build_arrays()
1334
1402
 
1403
+ if actually_removed_sa and self.verbose:
1404
+ logger.info(f'using {using} information to remove secular acceleration')
1405
+ logger.info('removing secular acceleration from RVs')
1406
+
1335
1407
  self._did_secular_acceleration = True
1336
1408
  self._did_secular_acceleration_epoch = epoch
1337
1409
  self._did_secular_acceleration_simbad = force_simbad
@@ -1692,8 +1764,21 @@ class RV:
1692
1764
  s = getattr(self, inst)
1693
1765
  s.vrad *= factor
1694
1766
  s.svrad *= factor
1695
- s.fwhm *= factor
1696
- s.fwhm_err *= factor
1767
+ try:
1768
+ s.fwhm *= factor
1769
+ s.fwhm_err *= factor
1770
+ except AttributeError:
1771
+ pass
1772
+
1773
+ for q in (
1774
+ 'bispan',
1775
+ 'nzp', 'vrad_preNZP',
1776
+ ):
1777
+ try:
1778
+ setattr(s, q, getattr(s, q) * factor)
1779
+ setattr(s, f'{q}_err', getattr(s, f'{q}_err') * factor)
1780
+ except AttributeError:
1781
+ pass
1697
1782
 
1698
1783
  self._build_arrays()
1699
1784
  self.units = new_units
@@ -1710,11 +1795,11 @@ class RV:
1710
1795
  for inst in self.instruments:
1711
1796
  s = getattr(self, inst)
1712
1797
  if s.mask.any():
1713
- if np.abs(s.mvrad.mean()) < s.mvrad.ptp():
1798
+ if np.abs(s.mvrad.mean()) < np.ptp(s.mvrad):
1714
1799
  s.vrad += self.simbad.rvz_radvel * 1e3
1715
1800
  changed = True
1716
1801
  else: # all observations are masked, use non-masked arrays
1717
- if np.abs(s.vrad.mean()) < s.vrad.ptp():
1802
+ if np.abs(s.vrad.mean()) < np.ptp(s.vrad):
1718
1803
  s.vrad += self.simbad.rvz_radvel * 1e3
1719
1804
  changed = True
1720
1805
  if changed:
@@ -1991,15 +2076,14 @@ def fit_sine(t, y, yerr=None, period='gls', fix_period=False):
1991
2076
  if fix_period:
1992
2077
  def sine(t, p):
1993
2078
  return p[0] * np.sin(2 * np.pi * t / period + p[1]) + p[2]
1994
- f = lambda p, t, y, ye: (sine(t, p) - y) / ye
1995
- p0 = [y.ptp(), 0.0, 0.0]
2079
+ p0 = [np.ptp(y), 0.0, 0.0]
1996
2080
  else:
1997
2081
  def sine(t, p):
1998
2082
  return p[0] * np.sin(2 * np.pi * t / p[1] + p[2]) + p[3]
1999
- f = lambda p, t, y, ye: (sine(t, p) - y) / ye
2000
- p0 = [y.ptp(), period, 0.0, 0.0]
2083
+ p0 = [np.ptp(y), period, 0.0, 0.0]
2001
2084
 
2002
- xbest, _ = leastsq(f, p0, args=(t, y, yerr))
2085
+ xbest, _ = leastsq(lambda p, t, y, ye: (sine(t, p) - y) / ye, p0,
2086
+ args=(t, y, yerr))
2003
2087
  return xbest, partial(sine, p=xbest)
2004
2088
 
2005
2089
 
arvi/translations.py CHANGED
@@ -6,6 +6,11 @@ STARS = {
6
6
  "Barnard's": 'GJ699',
7
7
  'Ross128': 'Ross 128',
8
8
  'Ross 128': 'Ross 128',
9
+ #
10
+ 'Teegarden': 'GAT1370',
11
+ "Teegarden's Star": 'GAT1370',
12
+ #
13
+ "Smethells 20": 'TIC464410508',
9
14
  }
10
15
 
11
16
 
arvi/utils.py CHANGED
@@ -90,6 +90,7 @@ def timer(name=None):
90
90
  def sanitize_path(path):
91
91
  if os.name == 'nt': # on Windows, be careful with ':' in filename
92
92
  path = path.replace(':', '_')
93
+ path = path.replace('*', '_')
93
94
  return path
94
95
 
95
96
  def pretty_print_table(rows, line_between_rows=True, logger=None):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: arvi
3
- Version: 0.1.22
3
+ Version: 0.1.24
4
4
  Summary: The Automated RV Inspector
5
5
  Author-email: João Faria <joao.faria@unige.ch>
6
6
  License: MIT
@@ -4,33 +4,33 @@ arvi/ariadne_wrapper.py,sha256=YvilopJa9T4NwPcj3Nah_U8smSeSAU5-HYZMb_GJ-BQ,2232
4
4
  arvi/berv.py,sha256=eKnpuPC1w45UrUEyFRbs9F9j3bXz3kxYzNXbnRgvFQM,17596
5
5
  arvi/binning.py,sha256=jbemJ-bM3aqoOsqMo_OhWt_co-JAQ0nhdG_GpTsrRsw,15403
6
6
  arvi/config.py,sha256=W-v8NNhRd_PROu0wCMilXmOhYcju4xbUalugd5u7SRU,1881
7
- arvi/dace_wrapper.py,sha256=ml0xC_KR-oXwEo6ysODzfpx-eP7VNbxURZSns31WbK4,18196
7
+ arvi/dace_wrapper.py,sha256=CUHKN5m7KULM1sES5x0GXx8HQgJE7XdwwWv6_zAYhb4,21324
8
8
  arvi/extra_data.py,sha256=WEEaYeLh52Zdv0uyHO72Ys5MWS3naTAP4wJV2BJ1mbk,2551
9
9
  arvi/gaia_wrapper.py,sha256=icm3LJjG9pjP47_bM30NFyocUQO3X3SHS5yQ-Dwcr5w,4653
10
10
  arvi/headers.py,sha256=uvdJebw1M5YkGjE3vJJwYBOnLikib75uuZE9FXB5JJM,1673
11
11
  arvi/instrument_specific.py,sha256=-pbm2Vk3iK_1K7nDa1avlJOKHBcXllwILI4lQn-Ze-A,7761
12
- arvi/kima_wrapper.py,sha256=y_Z0Hl2ECbs2-B6ZR9retrjId7-QxcRylG7b5aDsiFk,2306
12
+ arvi/kima_wrapper.py,sha256=BvNTVqzM4lMNhLCyBFVh3T84hHfGKAFpgiYiOi4lh0g,2731
13
13
  arvi/lbl_wrapper.py,sha256=_ViGVkpakvuBR_xhu9XJRV5EKHpj5Go6jBZGJZMIS2Y,11850
14
14
  arvi/nasaexo_wrapper.py,sha256=mWt7eHgSZe4MBKCmUvMPTyUPGuiwGTqKugNBvmjOg9s,7306
15
15
  arvi/plots.py,sha256=WUm-sqN0aZTNXvE1kYpvmHTW9QPWqSCpKhNjwaqxjEk,29628
16
- arvi/programs.py,sha256=C0Fbldjf-QEZYYJp5wBKP3h7zraD0O2mJC7Su967STg,4607
16
+ arvi/programs.py,sha256=BW7xBNKLei7NVLLW3_lsVskwzkaIoNRiHK2jn9Tn2ZM,8879
17
17
  arvi/reports.py,sha256=ayPdZ4HZO9iCDdnADQ18gQPJh79o-1UYG7TYkvm9Lrc,4051
18
18
  arvi/setup_logger.py,sha256=pBzaRTn0hntozjbaRVx0JIbWGuENkvYUApa6uB-FsRo,279
19
19
  arvi/simbad_wrapper.py,sha256=iAAwEMcr1Hgu6lnDctmaCC1TLPCB8yAfHG0wxh9K9C8,5791
20
- arvi/spectra.py,sha256=pTAWSW4vk96DWRQ-6l5mNJHUhiAyaPR-QDjZdOT6Ak0,7489
20
+ arvi/spectra.py,sha256=ebF1ocodTastLx0CyqLSpE8EZNDXBF8riyfxMr3L6H0,7491
21
21
  arvi/stats.py,sha256=ilzzGL9ew-SyVa9eEdrYCpD3DliOAwhoNUg9LIlHjzU,2583
22
22
  arvi/stellar.py,sha256=veuL_y9kJvvApU_jqYQqP3EkcRnQffTc8Us6iT5UrFI,3790
23
- arvi/timeseries.py,sha256=IYv_9qTuvxMQWK8As1xRM1_X-ZAxisz1FRCydG2-900,74264
24
- arvi/translations.py,sha256=FBF_2OrpMQBG4GtV4_UOspiaxetiGCY7TQFcwZMMVuQ,838
25
- arvi/utils.py,sha256=ihQcYvk_ix52CW8fVP16CKTVEamGU5gPNRneVcgDrWc,6534
23
+ arvi/timeseries.py,sha256=NdmSSYeDdS-cYBXPt8NCeKS1jLdv8LP6Zh561KRGfZc,77328
24
+ arvi/translations.py,sha256=PUSrn4zvYO2MqGzUxlFGwev_tBkgJaJrIYs6NKHzbWo,951
25
+ arvi/utils.py,sha256=LImV8iPjG8ZKjPCT9lp25_pDb-51ZZk42Hc8bzZt7M0,6568
26
26
  arvi/data/info.svg,sha256=0IMI6W-eFoTD8acnury79WJJakpBwLa4qKS4JWpsXiI,489
27
27
  arvi/data/obs_affected_ADC_issues.dat,sha256=tn93uOL0eCTYhireqp1wG-_c3CbxPA7C-Rf-pejVY8M,10853
28
28
  arvi/data/obs_affected_blue_cryostat_issues.dat,sha256=z4AK17xfz8tGTDv1FjRvQFnio4XA6PNNfDXuicewHk4,1771
29
29
  arvi/data/extra/HD86226_PFS1.rdb,sha256=vfAozbrKHM_j8dYkCBJsuHyD01KEM1asghe2KInwVao,3475
30
30
  arvi/data/extra/HD86226_PFS2.rdb,sha256=F2P7dB6gVyzCglUjNheB0hIHVClC5RmARrGwbrY1cfo,4114
31
31
  arvi/data/extra/metadata.json,sha256=C69hIw6CohyES6BI9vDWjxwSz7N4VOYX0PCgjXtYFmU,178
32
- arvi-0.1.22.dist-info/LICENSE,sha256=6JfQgl7SpM55t0EHMFNMnNh-AdkpGW25MwMiTnhdWQg,1068
33
- arvi-0.1.22.dist-info/METADATA,sha256=J__w4ZE5d8POUPhA7vd_zoLkyxY-62ddCGZ0r3bJXf8,1852
34
- arvi-0.1.22.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
35
- arvi-0.1.22.dist-info/top_level.txt,sha256=4EeiKDVLD45ztuflTGfQ3TU8GVjJg5Y95xS5XjI-utU,5
36
- arvi-0.1.22.dist-info/RECORD,,
32
+ arvi-0.1.24.dist-info/LICENSE,sha256=6JfQgl7SpM55t0EHMFNMnNh-AdkpGW25MwMiTnhdWQg,1068
33
+ arvi-0.1.24.dist-info/METADATA,sha256=ijGk4XpvyZZ1IxV0_6iFuJkjZI4wtWW6A4kuCZE-gSA,1852
34
+ arvi-0.1.24.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
35
+ arvi-0.1.24.dist-info/top_level.txt,sha256=4EeiKDVLD45ztuflTGfQ3TU8GVjJg5Y95xS5XjI-utU,5
36
+ arvi-0.1.24.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.3.0)
2
+ Generator: setuptools (75.5.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
File without changes