arvi 0.1.25__py3-none-any.whl → 0.1.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arvi might be problematic. Click here for more details.

arvi/dace_wrapper.py CHANGED
@@ -11,16 +11,33 @@ from .setup_logger import logger
11
11
  from .utils import create_directory, all_logging_disabled, stdout_disabled, tqdm
12
12
 
13
13
 
14
- def load_spectroscopy() -> SpectroscopyClass:
14
+ def load_spectroscopy(user=None) -> SpectroscopyClass:
15
15
  from .config import config
16
+ # requesting as public
16
17
  if config.request_as_public:
17
18
  with all_logging_disabled():
18
19
  dace = DaceClass(dace_rc_config_path='none')
19
20
  return SpectroscopyClass(dace_instance=dace)
21
+ # DACERC environment variable is set, should point to a dacerc file with credentials
20
22
  if 'DACERC' in os.environ:
21
23
  dace = DaceClass(dace_rc_config_path=os.environ['DACERC'])
22
24
  return SpectroscopyClass(dace_instance=dace)
23
- # elif os.path.exists(os.path.expanduser('~/.dacerc')):
25
+ # user provided, should be a section in ~/.dacerc
26
+ if user is not None:
27
+ import configparser
28
+ import tempfile
29
+ config = configparser.ConfigParser()
30
+ config.read(os.path.expanduser('~/.dacerc'))
31
+ if user not in config.sections():
32
+ raise ValueError(f'Section for user "{user}" not found in ~/.dacerc')
33
+ with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
34
+ new_config = configparser.ConfigParser()
35
+ new_config['user'] = config[user]
36
+ new_config.write(f)
37
+ dace = DaceClass(dace_rc_config_path=f.name)
38
+ logger.info(f'using credentials for user {user} in ~/.dacerc')
39
+ return SpectroscopyClass(dace_instance=dace)
40
+ # default
24
41
  return default_Spectroscopy
25
42
 
26
43
  @lru_cache()
@@ -70,7 +87,6 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
70
87
  if verbose and npipe > 1:
71
88
  logger.info(f'selecting latest pipeline ({pipelines[0]}) for {inst}')
72
89
 
73
-
74
90
  for pipe in pipelines:
75
91
  modes = [m for m in result[inst][pipe].keys()]
76
92
 
@@ -85,24 +101,19 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
85
101
  if verbose:
86
102
  logger.warning(f'no observations for requested NIRPS mode ({NIRPS_mode})')
87
103
 
88
- # HARPS15 observations should not be separated by 'mode' if some are
89
- # done together with NIRPS
90
- if 'HARPS15' in inst:
104
+ # HARPS observations should not be separated by 'mode' if some are
105
+ # done together with NIRPS, but should be separated by 'EGGS' mode
106
+ if 'HARPS' in inst:
107
+ m0 = modes[0]
108
+ data = {
109
+ k: np.concatenate([result[inst][pipe][m][k] for m in modes])
110
+ for k in result[inst][pipe][m0].keys()
111
+ }
91
112
  if 'HARPS+NIRPS' in modes:
92
- m0 = modes[0]
93
- data = {
94
- k: np.concatenate([result[inst][pipe][m][k] for m in modes])
95
- for k in result[inst][pipe][m0].keys()
96
- }
97
113
  arrays.append( ((str(inst), str(pipe), str(m0)), data) )
98
114
  continue
99
115
 
100
- if 'EGGS+NIRPS' in modes:
101
- m0 = modes[0]
102
- data = {
103
- k: np.concatenate([result[inst][pipe][m][k] for m in modes])
104
- for k in result[inst][pipe][m0].keys()
105
- }
116
+ if 'EGGS+NIRPS' in modes or 'EGGS' in modes:
106
117
  arrays.append( ((str(inst + '_EGGS'), str(pipe), str(m0)), data) )
107
118
  continue
108
119
 
@@ -117,7 +128,7 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
117
128
 
118
129
  return arrays
119
130
 
120
- def get_observations_from_instrument(star, instrument, main_id=None, verbose=True):
131
+ def get_observations_from_instrument(star, instrument, user=None, main_id=None, verbose=True):
121
132
  """ Query DACE for all observations of a given star and instrument
122
133
 
123
134
  Args:
@@ -125,6 +136,8 @@ def get_observations_from_instrument(star, instrument, main_id=None, verbose=Tru
125
136
  name of the star
126
137
  instrument (str):
127
138
  instrument name
139
+ user (str, optional):
140
+ DACERC user name. Defaults to None.
128
141
  main_id (str, optional):
129
142
  Simbad main id of target to query DACE id. Defaults to None.
130
143
  verbose (bool, optional):
@@ -138,7 +151,7 @@ def get_observations_from_instrument(star, instrument, main_id=None, verbose=Tru
138
151
  dict:
139
152
  dictionary with data from DACE
140
153
  """
141
- Spectroscopy = load_spectroscopy()
154
+ Spectroscopy = load_spectroscopy(user)
142
155
  found_dace_id = False
143
156
  try:
144
157
  dace_id = get_dace_id(star, verbose=verbose)
@@ -239,9 +252,9 @@ def get_observations_from_instrument(star, instrument, main_id=None, verbose=Tru
239
252
  # print([r[k1][k2].keys() for k1 in r.keys() for k2 in r[k1].keys()])
240
253
  return r
241
254
 
242
- def get_observations(star, instrument=None, main_id=None, verbose=True):
255
+ def get_observations(star, instrument=None, user=None, main_id=None, verbose=True):
243
256
  if instrument is None:
244
- Spectroscopy = load_spectroscopy()
257
+ Spectroscopy = load_spectroscopy(user)
245
258
  try:
246
259
  with stdout_disabled(), all_logging_disabled():
247
260
  result = Spectroscopy.get_timeseries(target=star,
@@ -255,7 +268,7 @@ def get_observations(star, instrument=None, main_id=None, verbose=True):
255
268
  raise ValueError(msg) from None
256
269
  else:
257
270
  try:
258
- result = get_observations_from_instrument(star, instrument, main_id, verbose)
271
+ result = get_observations_from_instrument(star, instrument, user, main_id, verbose)
259
272
  except ValueError:
260
273
  msg = f'no {instrument} observations for {star}'
261
274
  raise ValueError(msg) from None
@@ -339,6 +352,12 @@ def check_existing(output_directory, files, type):
339
352
  if type in f
340
353
  ]
341
354
 
355
+ if type == 'S2D':
356
+ existing = [
357
+ f.partition('.fits')[0] for f in os.listdir(output_directory)
358
+ if 'e2ds' in f
359
+ ]
360
+
342
361
  # also check for lowercase type
343
362
  existing += [
344
363
  f.partition('.fits')[0] for f in os.listdir(output_directory)
@@ -351,7 +370,8 @@ def check_existing(output_directory, files, type):
351
370
 
352
371
  # remove type of file (e.g. _CCF_A)
353
372
  existing = [f.partition('_')[0] for f in existing]
354
-
373
+ existing = np.unique(existing)
374
+
355
375
  missing = []
356
376
  for file in files:
357
377
  if any(other in file for other in existing):
@@ -360,9 +380,9 @@ def check_existing(output_directory, files, type):
360
380
 
361
381
  return np.array(missing)
362
382
 
363
- def download(files, type, output_directory, output_filename=None, quiet=True, pbar=None):
383
+ def download(files, type, output_directory, output_filename=None, user=None, quiet=True, pbar=None):
364
384
  """ Download files from DACE """
365
- Spectroscopy = load_spectroscopy()
385
+ Spectroscopy = load_spectroscopy(user)
366
386
  if isinstance(files, str):
367
387
  files = [files]
368
388
  if quiet:
@@ -439,7 +459,7 @@ def do_symlink_filetype(type, raw_files, output_directory, clobber=False, top_le
439
459
  logger.warning(f'file not found: {file}')
440
460
 
441
461
 
442
- def do_download_filetype(type, raw_files, output_directory, clobber=False,
462
+ def do_download_filetype(type, raw_files, output_directory, clobber=False, user=None,
443
463
  verbose=True, chunk_size=20, parallel_limit=30):
444
464
  """ Download CCFs / S1Ds / S2Ds from DACE """
445
465
  raw_files = np.atleast_1d(raw_files)
@@ -475,7 +495,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
475
495
  if n < parallel_limit:
476
496
  iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
477
497
  for files in tqdm(iterator, total=len(iterator)):
478
- download(files, type, output_directory, quiet=False)
498
+ download(files, type, output_directory, quiet=False, user=user)
479
499
  extract_fits(output_directory)
480
500
 
481
501
  else:
@@ -487,7 +507,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
487
507
  chunks = list(chunker(raw_files, chunk_size))
488
508
  pbar = tqdm(total=len(chunks))
489
509
  it1 = [
490
- (files, type, output_directory, f'spectroscopy_download{i+1}.tar.gz', True, pbar)
510
+ (files, type, output_directory, f'spectroscopy_download{i+1}.tar.gz', user, True, pbar)
491
511
  for i, files in enumerate(chunks)
492
512
  ]
493
513
  it2 = [(output_directory, f'spectroscopy_download{i+1}.tar.gz') for i in range(len(chunks))]
arvi/extra_data.py CHANGED
@@ -10,7 +10,8 @@ refs = {
10
10
  'HD86226': 'Teske et al. 2020 (AJ, 160, 2)'
11
11
  }
12
12
 
13
- def get_extra_data(star, instrument=None, path=None, verbose=True):
13
+ def get_extra_data(star, instrument=None, path=None, verbose=True,
14
+ check_for_kms=True):
14
15
  if path is None:
15
16
  path = os.path.dirname(__file__)
16
17
  path = os.path.join(path, 'data', 'extra')
@@ -18,7 +19,7 @@ def get_extra_data(star, instrument=None, path=None, verbose=True):
18
19
  metadata = json.load(open(os.path.join(path, 'metadata.json'), 'r'))
19
20
  # print(metadata)
20
21
 
21
- files = glob(os.path.join(path, star + '*'))
22
+ files = glob(os.path.join(path, star.replace(' ', '') + '*.rdb'))
22
23
  files = [f for f in files if os.path.isfile(f)]
23
24
  files = [f for f in files if not os.path.basename(f).endswith('.zip')]
24
25
 
@@ -57,9 +58,24 @@ def get_extra_data(star, instrument=None, path=None, verbose=True):
57
58
  if 'corrected_for_secular_acceleration' in metadata[file_basename]:
58
59
  did_sa[i] = metadata[file_basename]['corrected_for_secular_acceleration']
59
60
 
60
- s = timeseries.RV.from_rdb(files[0], star=star, instrument=instruments[0], units=units[0])
61
- for file, instrument, unit in zip(files[1:], instruments[1:], units[1:]):
62
- s = s + timeseries.RV.from_rdb(file, star=star, instrument=instrument, units=unit)
61
+ with logger.contextualize(indent=' '):
62
+ s = timeseries.RV.from_rdb(files[0], star=star, instrument=instruments[0], units=units[0])
63
+ if check_for_kms and s.svrad.min() < 0.01:
64
+ units[0] = 'kms'
65
+ s = timeseries.RV.from_rdb(files[0], star=star, instrument=instruments[0], units=units[0])
66
+ if verbose:
67
+ logger.info(f'{instruments[0]:>12s} ├ ({s.N} observations)')
68
+
69
+ for file, instrument, unit in zip(files[1:], instruments[1:], units[1:]):
70
+ _s = timeseries.RV.from_rdb(file, star=star, instrument=instrument, units=unit)
71
+ if check_for_kms and _s.svrad.min() < 0.01:
72
+ unit = 'kms'
73
+ _s = timeseries.RV.from_rdb(file, star=star, instrument=instrument, units=unit)
74
+ if verbose:
75
+ logger.info(f'{instrument:>12s} ├ ({_s.N} observations)')
76
+
77
+ s = s + _s
78
+
63
79
 
64
80
  for i, (inst, ref, inst_did_sa) in enumerate(zip(s.instruments, reference, did_sa)):
65
81
  _s = getattr(s, inst)
arvi/gaia_wrapper.py CHANGED
@@ -9,7 +9,11 @@ DATA_PATH = os.path.dirname(__file__)
9
9
  DATA_PATH = os.path.join(DATA_PATH, 'data')
10
10
 
11
11
  QUERY = """
12
- SELECT TOP 20 gaia_source.designation,gaia_source.source_id,gaia_source.ra,gaia_source.dec,gaia_source.parallax,gaia_source.pmra,gaia_source.pmdec,gaia_source.ruwe,gaia_source.phot_g_mean_mag,gaia_source.bp_rp,gaia_source.radial_velocity,gaia_source.phot_variable_flag,gaia_source.non_single_star,gaia_source.has_xp_continuous,gaia_source.has_xp_sampled,gaia_source.has_rvs,gaia_source.has_epoch_photometry,gaia_source.has_epoch_rv,gaia_source.has_mcmc_gspphot,gaia_source.has_mcmc_msc,gaia_source.teff_gspphot,gaia_source.logg_gspphot,gaia_source.mh_gspphot,gaia_source.distance_gspphot,gaia_source.azero_gspphot,gaia_source.ag_gspphot,gaia_source.ebpminrp_gspphot
12
+ SELECT TOP 20 gaia_source.designation, gaia_source.source_id,
13
+ gaia_source.ra, gaia_source.dec,
14
+ gaia_source.parallax, gaia_source.pmra, gaia_source.pmdec,
15
+ gaia_source.ruwe, gaia_source.phot_g_mean_mag, gaia_source.bp_rp,
16
+ gaia_source.radial_velocity, gaia_source.radial_velocity_error
13
17
  FROM gaiadr3.gaia_source
14
18
  WHERE
15
19
  CONTAINS(
@@ -23,7 +27,11 @@ CONTAINS(
23
27
  """
24
28
 
25
29
  QUERY_ID = """
26
- SELECT TOP 20 gaia_source.designation,gaia_source.source_id,gaia_source.ra,gaia_source.dec,gaia_source.parallax,gaia_source.pmra,gaia_source.pmdec,gaia_source.ruwe,gaia_source.phot_g_mean_mag,gaia_source.bp_rp,gaia_source.radial_velocity,gaia_source.phot_variable_flag,gaia_source.non_single_star,gaia_source.has_xp_continuous,gaia_source.has_xp_sampled,gaia_source.has_rvs,gaia_source.has_epoch_photometry,gaia_source.has_epoch_rv,gaia_source.has_mcmc_gspphot,gaia_source.has_mcmc_msc,gaia_source.teff_gspphot,gaia_source.logg_gspphot,gaia_source.mh_gspphot,gaia_source.distance_gspphot,gaia_source.azero_gspphot,gaia_source.ag_gspphot,gaia_source.ebpminrp_gspphot
30
+ SELECT TOP 20 gaia_source.designation, gaia_source.source_id,
31
+ gaia_source.ra, gaia_source.dec,
32
+ gaia_source.parallax, gaia_source.pmra, gaia_source.pmdec,
33
+ gaia_source.ruwe, gaia_source.phot_g_mean_mag, gaia_source.bp_rp,
34
+ gaia_source.radial_velocity, gaia_source.radial_velocity_error
27
35
  FROM gaiadr3.gaia_source
28
36
  WHERE
29
37
  gaia_source.source_id = {id}
@@ -31,6 +39,7 @@ gaia_source.source_id = {id}
31
39
 
32
40
  translate = {
33
41
  'Proxima': '5853498713190525696',
42
+ 'GJ699': '4472832130942575872',
34
43
  'LS II +14 13': '4318465066420528000',
35
44
  }
36
45
 
@@ -81,7 +90,7 @@ class gaia:
81
90
  pmra = simbad.pmra
82
91
  pmdec = simbad.pmdec
83
92
  rv = simbad.rvz_radvel
84
- args = dict(ra=ra, dec=dec, plx=plx, pmra=pmra, pmdec=pmdec, rv=rv)
93
+ args = dict(ra=ra, dec=dec, plx=plx, pmra=pmra, pmdec=pmdec, rv=rv)
85
94
 
86
95
  try:
87
96
  if star in translate:
@@ -109,6 +118,10 @@ class gaia:
109
118
  self.radial_velocity = float(results['radial_velocity'])
110
119
  except ValueError:
111
120
  self.radial_velocity = None
121
+ try:
122
+ self.radial_velocity_error = float(results['radial_velocity_error'])
123
+ except ValueError:
124
+ self.radial_velocity_error = None
112
125
 
113
126
  return
114
127
 
arvi/plots.py CHANGED
@@ -6,7 +6,7 @@ import numpy as np
6
6
  from astropy.timeseries import LombScargle
7
7
 
8
8
  from .setup_logger import logger
9
- from . import config
9
+ from .config import config
10
10
  from .stats import wmean
11
11
 
12
12
  from .utils import lazy_import
@@ -123,8 +123,12 @@ def clickable_legend(fig, ax, leg):
123
123
  h = handles[labels.index(artist.get_text())]
124
124
  alpha_text = {None:0.2, 1.0: 0.2, 0.2:1.0}[artist.get_alpha()]
125
125
  alpha_point = {None: 0.0, 1.0: 0.0, 0.2: 1.0}[artist.get_alpha()]
126
- h[0].set_alpha(alpha_point)
127
- h[2][0].set_alpha(alpha_point)
126
+ try:
127
+ h[0].set_alpha(alpha_point)
128
+ h[2][0].set_alpha(alpha_point)
129
+ except TypeError:
130
+ h.set_alpha(alpha_point)
131
+
128
132
  artist.set_alpha(alpha_text)
129
133
  fig.canvas.draw()
130
134
  except ValueError:
@@ -488,14 +492,15 @@ def plot_quantity(self, quantity, ax=None, show_masked=False, instrument=None,
488
492
 
489
493
 
490
494
  plot_fwhm = partialmethod(plot_quantity, quantity='fwhm')
491
- plot_bis = partialmethod(plot_quantity, quantity='bispan')
495
+ plot_bispan = partialmethod(plot_quantity, quantity='bispan')
492
496
  plot_contrast = partialmethod(plot_quantity, quantity='contrast')
493
497
  plot_rhk = partialmethod(plot_quantity, quantity='rhk')
494
498
  plot_berv = partialmethod(plot_quantity, quantity='berv')
495
499
 
496
500
 
497
501
  @plot_fast
498
- def gls(self, ax=None, label=None, fap=True, instrument=None, adjust_means=config.adjust_means_gls,
502
+ def gls(self, ax=None, label=None, fap=True, instrument=None,
503
+ adjust_means=config.adjust_means_gls,
499
504
  picker=True, **kwargs):
500
505
  """
501
506
  Calculate and plot the Generalised Lomb-Scargle periodogram of the radial
@@ -711,7 +716,7 @@ def gls_quantity(self, quantity, ax=None, fap=True, instrument=None,
711
716
 
712
717
 
713
718
  gls_fwhm = partialmethod(gls_quantity, quantity='fwhm')
714
- gls_bis = partialmethod(gls_quantity, quantity='bispan')
719
+ gls_bispan = partialmethod(gls_quantity, quantity='bispan')
715
720
  gls_rhk = partialmethod(gls_quantity, quantity='rhk')
716
721
 
717
722
 
arvi/simbad_wrapper.py CHANGED
@@ -1,8 +1,15 @@
1
1
  import os
2
+ import numpy as np
2
3
  import requests
4
+ from dataclasses import dataclass
3
5
 
4
6
  import pysweetcat
5
7
 
8
+ try:
9
+ from uncertainties import ufloat
10
+ except ImportError:
11
+ ufloat = lambda x, y: x
12
+
6
13
  from .translations import translate
7
14
 
8
15
  DATA_PATH = os.path.dirname(__file__)
@@ -15,19 +22,35 @@ SELECT basic.OID,
15
22
  main_id,
16
23
  pmra,
17
24
  pmdec,
18
- plx_value,
25
+ plx_value, plx_err,
19
26
  rvz_radvel,
20
27
  sp_type
21
28
  FROM basic JOIN ident ON oidref = oid
22
29
  WHERE id = '{star}';
23
30
  """
24
31
 
32
+ # SELECT filter, flux, flux_err
33
+ # FROM basic JOIN ident ON oid = ident.oidref JOIN flux ON oid = flux.oidref
34
+ # WHERE id = 'HD23079';
35
+
25
36
  BV_QUERY = """
26
37
  SELECT B, V FROM allfluxes
27
38
  JOIN ident USING(oidref)
28
39
  WHERE id = '{star}';
29
40
  """
30
41
 
42
+ FILTERS_QUERY = """
43
+ SELECT filter, flux, flux_err, bibcode FROM flux
44
+ JOIN ident USING(oidref)
45
+ WHERE id = '{star}';
46
+ """
47
+
48
+ MEAS_QUERY = """
49
+ SELECT teff, log_g, log_g_prec, fe_h, fe_h_prec, bibcode FROM mesFe_H
50
+ JOIN ident USING(oidref)
51
+ WHERE id = '{star}';
52
+ """
53
+
31
54
  IDS_QUERY = """
32
55
  SELECT ids FROM ids
33
56
  JOIN ident USING(oidref)
@@ -40,6 +63,13 @@ JOIN ident ON oidref = oid
40
63
  WHERE id = '{star}';
41
64
  """
42
65
 
66
+ @dataclass
67
+ class Measurements:
68
+ teff: list
69
+ logg: list
70
+ feh: list
71
+ bibcode: list
72
+
43
73
 
44
74
  def run_query(query):
45
75
  url = 'http://simbad.u-strasbg.fr/simbad/sim-tap/sync'
@@ -52,7 +82,7 @@ def run_query(query):
52
82
  raise IndexError(err)
53
83
  return response.content.decode()
54
84
 
55
- def parse_table(table, cols=None, values=None):
85
+ def parse_table1(table, cols=None, values=None):
56
86
  header = table.splitlines()[0].split('|')
57
87
  if cols is None:
58
88
  cols = list(map(str.strip, header))
@@ -66,6 +96,29 @@ def parse_table(table, cols=None, values=None):
66
96
  values = [value.replace('"', '') for value in values]
67
97
  return cols, values
68
98
 
99
+ def parse_tablen(table, cols=None, values=None):
100
+ header = table.splitlines()[0].split('|')
101
+ cols = list(map(str.strip, header))
102
+ values = [list(map(str.strip, row.split('|'))) for row in table.splitlines()[2:]]
103
+ return cols, values
104
+
105
+ def parse_value(value, err=None, prec=None):
106
+ try:
107
+ v = float(value)
108
+ if err:
109
+ try:
110
+ v = ufloat(float(value), float(err))
111
+ except ValueError:
112
+ pass
113
+ if prec:
114
+ try:
115
+ v = ufloat(float(value), 10**-int(prec))
116
+ except ValueError:
117
+ pass
118
+ except ValueError:
119
+ v = np.nan
120
+ return v
121
+
69
122
 
70
123
  effective_temperatures = {
71
124
  'F0': 7350, 'F2': 7050, 'F3': 6850, 'F5': 6700, 'F6': 6550, 'F7': 6400, 'F8': 6300,
@@ -120,14 +173,37 @@ class simbad:
120
173
 
121
174
  try:
122
175
  table1 = run_query(query=QUERY.format(star=self.star))
123
- cols, values = parse_table(table1)
176
+ cols, values = parse_table1(table1)
124
177
 
125
178
  table2 = run_query(query=BV_QUERY.format(star=self.star))
126
- cols, values = parse_table(table2, cols, values)
179
+ cols, values = parse_table1(table2, cols, values)
127
180
 
128
181
  table3 = run_query(query=IDS_QUERY.format(star=self.star))
129
182
  line = table3.splitlines()[2]
130
183
  self.ids = line.replace('"', '').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').split('|')
184
+
185
+ table4 = run_query(query=FILTERS_QUERY.format(star=self.star))
186
+ for row in table4.splitlines()[2:]:
187
+ filter_name, mag, mag_err, bibcode = row.replace('"', '').split('|')
188
+ filter_name = filter_name.strip()
189
+ try:
190
+ setattr(self, '_' + filter_name, ufloat(float(mag), float(mag_err)))
191
+ except ValueError:
192
+ setattr(self, '_' + filter_name, float(mag))
193
+
194
+ # measurements table
195
+ table5 = run_query(query=MEAS_QUERY.format(star=self.star))
196
+ _teff, _logg, _feh, _bibcode = [], [], [], []
197
+ for row in table5.splitlines()[2:]:
198
+ teff, log_g, log_g_prec, fe_h, fe_h_prec, bibcode = row.replace('"', '').split('|')
199
+ _bibcode.append(bibcode)
200
+ _teff.append(parse_value(teff))
201
+ _logg.append(parse_value(log_g, prec=log_g_prec))
202
+ _feh.append(parse_value(fe_h, prec=fe_h_prec))
203
+
204
+ self.measurements = Measurements(_teff, _logg, _feh, _bibcode)
205
+
206
+
131
207
  except IndexError:
132
208
  raise ValueError(f'simbad query for {star} failed')
133
209
 
@@ -176,6 +252,9 @@ class simbad:
176
252
  sp_type = self.sp_type
177
253
  return f'{self.star} ({V=}, {sp_type=})'
178
254
 
255
+ @property
256
+ def bmv(self):
257
+ return self.B - self.V
179
258
 
180
259
 
181
260
  def argsort_by_spectral_type(sptypes):
arvi/timeseries.py CHANGED
@@ -25,7 +25,10 @@ units = lazy_import('astropy.units')
25
25
  # from astropy import units
26
26
 
27
27
  class ExtraFields:
28
- pass
28
+ @property
29
+ def fields(self):
30
+ return list(self.__dict__.keys())
31
+
29
32
 
30
33
  @dataclass
31
34
  class RV:
@@ -56,6 +59,7 @@ class RV:
56
59
  only_latest_pipeline: bool = field(init=True, repr=False, default=True)
57
60
  load_extra_data: Union[bool, str] = field(init=True, repr=False, default=False)
58
61
  check_drs_qc: bool = field(init=True, repr=False, default=True)
62
+ user: bool = field(init=True, repr=False, default=None)
59
63
  #
60
64
  units = 'm/s'
61
65
  _child: bool = field(init=True, repr=False, default=False)
@@ -70,13 +74,17 @@ class RV:
70
74
  _gaia = None
71
75
 
72
76
  def __repr__(self):
77
+ ni = len(self.instruments)
73
78
  if self.N == 0:
74
79
  return f"RV(star='{self.star}', N=0)"
80
+
81
+ i = f'{ni} instrument' + ('s' if ni > 1 else '')
82
+
75
83
  if self.time.size == self.mtime.size:
76
- return f"RV(star='{self.star}', N={self.N})"
84
+ return f"RV(star='{self.star}', N={self.N}, {i})"
77
85
  else:
78
86
  nmasked = self.N - self.mtime.size
79
- return f"RV(star='{self.star}', N={self.N}, masked={nmasked})"
87
+ return f"RV(star='{self.star}', N={self.N}, masked={nmasked}, {i})"
80
88
 
81
89
  @property
82
90
  def simbad(self):
@@ -178,7 +186,7 @@ class RV:
178
186
 
179
187
  with timer():
180
188
  self.dace_result = get_observations(self.__star__, self.instrument,
181
- main_id=mid, verbose=self.verbose)
189
+ user=self.user, main_id=mid, verbose=self.verbose)
182
190
  except ValueError as e:
183
191
  # querying DACE failed, should we raise an error?
184
192
  if self._raise_on_error:
@@ -272,11 +280,20 @@ class RV:
272
280
  # if not isinstance(other, self.__class__):
273
281
  # raise TypeError('unsupported operand type(s) for +: '
274
282
  # f"'{self.__class__.__name__}' and '{other.__class__.__name__}'")
283
+ if other is None:
284
+ if inplace:
285
+ return
286
+ else:
287
+ return deepcopy(self)
275
288
 
276
289
  if np.isin(self.instruments, other.instruments).any():
277
290
  logger.error('the two objects share instrument(s), cannot add them')
278
291
  return
279
292
 
293
+ if self._did_adjust_means or other._did_adjust_means:
294
+ self.adjust_means()
295
+ other.adjust_means()
296
+
280
297
  if inplace:
281
298
  #? could it be as simple as this?
282
299
  for i in other.instruments:
@@ -511,18 +528,33 @@ class RV:
511
528
  Examples:
512
529
  s = RV.from_rdb('star_HARPS.rdb')
513
530
  """
531
+ from glob import glob
532
+ from os.path import splitext, basename
533
+
534
+ verbose = kwargs.pop('verbose', True)
535
+
514
536
  if isinstance(files, str):
515
- files = [files]
537
+ if '*' in files:
538
+ files = glob(files)
539
+ else:
540
+ files = [files]
541
+
542
+ if len(files) == 0:
543
+ if verbose:
544
+ logger.error('no files found')
545
+ return
516
546
 
517
547
  if star is None:
518
- star_ = np.unique([os.path.splitext(os.path.basename(f))[0].split('_')[0] for f in files])
548
+ star_ = np.unique([splitext(basename(f))[0].split('_')[0] for f in files])
519
549
  if star_.size == 1:
520
- logger.info(f'assuming star is {star_[0]}')
521
- star = star_[0]
550
+ star = star_[0].replace('-', '_')
551
+ if verbose:
552
+ logger.info(f'assuming star is {star}')
522
553
 
523
554
  if instrument is None:
524
- instruments = np.array([os.path.splitext(f)[0].split('_')[1] for f in files])
525
- logger.info(f'assuming instruments: {instruments}')
555
+ instruments = np.array([splitext(basename(f))[0].split('_')[1] for f in files])
556
+ if verbose:
557
+ logger.info(f'assuming instruments: {instruments}')
526
558
  else:
527
559
  instruments = np.atleast_1d(instrument)
528
560
 
@@ -537,11 +569,14 @@ class RV:
537
569
  has_col = np.array([name in data.dtype.fields for name in names])
538
570
  if any(has_col):
539
571
  col = np.where(has_col)[0][0]
540
- return data[names[col]]
572
+ return np.atleast_1d(data[names[col]])
541
573
  return False
542
574
 
543
575
  for i, (f, instrument) in enumerate(zip(files, instruments)):
544
576
  data = np.loadtxt(f, skiprows=2, usecols=range(3), unpack=True)
577
+ if data.ndim == 1:
578
+ data = data.reshape(-1, 1)
579
+
545
580
  _s = cls(star, _child=True, **kwargs)
546
581
  time = data[0]
547
582
  _s.time = time
@@ -559,14 +594,20 @@ class RV:
559
594
  names = header.split()
560
595
 
561
596
  if len(names) > 3:
562
- if f.endswith('.rdb'):
563
- kw = dict(skip_header=2, dtype=None, encoding=None)
564
- else:
565
- kw = dict(skip_header=0, comments='--', names=True, dtype=None, encoding=None)
597
+ # if f.endswith('.rdb'):
598
+ # kw = dict(skip_header=2, dtype=None, encoding=None)
599
+ # else:
600
+ comments = '#'
601
+ kw = dict(skip_header=2, comments=comments,
602
+ names=names, dtype=None, encoding=None)
566
603
  if '\t' in header:
567
604
  data = np.genfromtxt(f, **kw, delimiter='\t')
568
605
  else:
569
606
  data = np.genfromtxt(f, **kw)
607
+
608
+ # if data.ndim in (0, 1):
609
+ # data = data.reshape(-1, 1)
610
+
570
611
  if len(names) == len(data.dtype.names):
571
612
  data.dtype.names = names
572
613
  else:
@@ -580,19 +621,20 @@ class RV:
580
621
  else:
581
622
  _s.fwhm_err = 2 * _s.svrad
582
623
  else:
583
- _s.fwhm = np.zeros_like(time)
624
+ _s.fwhm = np.full_like(time, np.nan)
584
625
  _s.fwhm_err = np.full_like(time, np.nan)
585
626
 
586
627
  _quantities.append('fwhm')
587
628
  _quantities.append('fwhm_err')
588
629
 
630
+ # try to find R'HK and uncertainty
589
631
  if (v := find_column(data, ['rhk'])) is not False:
590
632
  _s.rhk = v
591
633
  _s.rhk_err = np.full_like(time, np.nan)
592
634
  if (sv := find_column(data, ['srhk', 'rhk_err', 'sig_rhk'])) is not False:
593
635
  _s.rhk_err = sv
594
636
  else:
595
- _s.rhk = np.zeros_like(time)
637
+ _s.rhk = np.full_like(time, np.nan)
596
638
  _s.rhk_err = np.full_like(time, np.nan)
597
639
 
598
640
  _quantities.append('rhk')
@@ -636,8 +678,9 @@ class RV:
636
678
 
637
679
  _s.extra_fields = ExtraFields()
638
680
  for name in data.dtype.names:
639
- if name not in _quantities:
640
- name_ = name.replace(' ', '_')
681
+ # don't repeat some quantities
682
+ if name not in _quantities + ['bjd', 'rjd', 'vrad', 'svrad']:
683
+ name_ = name.replace(' ', '_').replace('-', '_')
641
684
  setattr(_s.extra_fields, name_, data[name])
642
685
  # _quantities.append(field)
643
686
 
@@ -744,8 +787,8 @@ class RV:
744
787
  return s
745
788
 
746
789
  @classmethod
747
- @lru_cache(maxsize=60)
748
- def from_KOBE_file(cls, star, **kwargs):
790
+ # @lru_cache(maxsize=60)
791
+ def from_KOBE_file(cls, star, directory='.', force_download=False, **kwargs):
749
792
  assert 'KOBE' in star, f'{star} is not a KOBE star?'
750
793
  import requests
751
794
  from requests.auth import HTTPBasicAuth
@@ -766,7 +809,14 @@ class RV:
766
809
  local_targz_file = os.path.join(get_data_path(), 'KOBE_fitsfiles.tar.gz')
767
810
  fits_file = f'{star}_RVs.fits'
768
811
 
769
- if os.path.exists(local_targz_file) and os.path.getmtime(local_targz_file) > pytime() - 60*60*2:
812
+ local_exists = os.path.exists(local_targz_file)
813
+ local_recent = os.path.getmtime(local_targz_file) > pytime() - 60*60*2
814
+
815
+ if os.path.exists(os.path.join(directory, fits_file)):
816
+ logger.info(f'found file "{fits_file}" in "{directory}"')
817
+ hdul = fits.open(fits_file)
818
+
819
+ elif local_exists and local_recent and not force_download:
770
820
  tar = tarfile.open(local_targz_file)
771
821
 
772
822
  if fits_file not in tar.getnames():
@@ -778,6 +828,7 @@ class RV:
778
828
  else:
779
829
  resp = requests.get(f'https://kobe.caha.es/internal/fitsfiles/{fits_file}',
780
830
  auth=HTTPBasicAuth('kobeteam', config.kobe_password))
831
+ logger.info(f'found file "{fits_file}" on server')
781
832
 
782
833
  if resp.status_code != 200:
783
834
  # something went wrong, try to extract the file by downloading the
@@ -979,17 +1030,23 @@ class RV:
979
1030
  logger.warning('may need to provide `top_level` in kwargs to find file')
980
1031
  do_symlink_filetype('CCF', files[:limit], directory, **kwargs)
981
1032
  else:
982
- do_download_filetype('CCF', files[:limit], directory, verbose=self.verbose, **kwargs)
1033
+ do_download_filetype('CCF', files[:limit], directory,
1034
+ verbose=self.verbose, user=self.user, **kwargs)
983
1035
 
984
1036
  if load:
985
1037
  try:
986
- from os.path import basename, join
1038
+ from os.path import basename, join, exists
987
1039
  from .utils import sanitize_path
988
1040
  import iCCF
989
1041
  downloaded = [
990
1042
  sanitize_path(join(directory, basename(f).replace('.fits', '_CCF_A.fits')))
991
1043
  for f in files[:limit]
992
1044
  ]
1045
+ downloaded = [
1046
+ skysub
1047
+ if exists(skysub := f.replace('CCF_A.fits', 'CCF_SKYSUB_A.fits')) else f
1048
+ for f in downloaded
1049
+ ]
993
1050
  if self.verbose:
994
1051
  logger.info('loading the CCF(s) into `.CCF` attribute')
995
1052
 
@@ -1028,7 +1085,8 @@ class RV:
1028
1085
  logger.warning('may need to provide `top_level` in kwargs to find file')
1029
1086
  do_symlink_filetype('S1D', files[:limit], directory, **kwargs)
1030
1087
  else:
1031
- do_download_filetype('S1D', files[:limit], directory, verbose=self.verbose, **kwargs)
1088
+ do_download_filetype('S1D', files[:limit], directory,
1089
+ verbose=self.verbose, user=self.user, **kwargs)
1032
1090
 
1033
1091
  def download_s2d(self, instrument=None, index=None, limit=None,
1034
1092
  directory=None, symlink=False, **kwargs):
@@ -1060,15 +1118,50 @@ class RV:
1060
1118
  logger.warning('may need to provide `top_level` in kwargs to find file')
1061
1119
  do_symlink_filetype('S2D', files[:limit], directory, **kwargs)
1062
1120
  else:
1063
- do_download_filetype('S2D', files[:limit], directory, verbose=self.verbose, **kwargs)
1121
+ do_download_filetype('S2D', files[:limit], directory,
1122
+ verbose=self.verbose, user=self.user, **kwargs)
1123
+
1064
1124
 
1065
1125
 
1066
- from .plots import plot, plot_fwhm, plot_bis, plot_rhk, plot_berv, plot_quantity
1067
- from .plots import gls, gls_fwhm, gls_bis, gls_rhk, window_function
1126
+ from .plots import plot, plot_fwhm, plot_bispan, plot_contrast, plot_rhk, plot_berv, plot_quantity
1127
+ from .plots import gls, gls_fwhm, gls_bispan, gls_rhk, gls_quantity, window_function
1068
1128
  from .reports import report
1069
1129
 
1070
1130
  from .instrument_specific import known_issues
1071
1131
 
1132
+ def change_instrument_name(self, old_name, new_name, strict=False):
1133
+ """ Change the name of an instrument
1134
+
1135
+ Args:
1136
+ old_name (str):
1137
+ The old name of the instrument
1138
+ new_name (str):
1139
+ The new name of the instrument, or postfix if `strict` is False
1140
+ strict (bool):
1141
+ Whether to match (each) `instrument` exactly
1142
+ """
1143
+ if new_name == '':
1144
+ if self.verbose:
1145
+ logger.error('new name cannot be empty string')
1146
+ return
1147
+
1148
+ instruments = self._check_instrument(old_name, strict, log=True)
1149
+ if instruments is not None:
1150
+ several = len(instruments) >= 2
1151
+ for instrument in instruments:
1152
+ if several:
1153
+ new_name_instrument = f'{instrument}_{new_name}'
1154
+ else:
1155
+ new_name_instrument = new_name
1156
+ if self.verbose:
1157
+ logger.info(f'Renaming {instrument} to {new_name_instrument}')
1158
+
1159
+ setattr(self, new_name_instrument, getattr(self, instrument))
1160
+ delattr(self, instrument)
1161
+ self.instruments[self.instruments.index(instrument)] = new_name_instrument
1162
+
1163
+ self._build_arrays()
1164
+
1072
1165
 
1073
1166
  def remove_instrument(self, instrument, strict=False):
1074
1167
  """ Remove all observations from one instrument
@@ -1212,10 +1305,9 @@ class RV:
1212
1305
 
1213
1306
  def remove_single_observations(self):
1214
1307
  """ Remove instruments for which there is a single observation """
1215
- instruments = deepcopy(self.instruments)
1216
- for inst in instruments:
1217
- if getattr(self, inst).mtime.size == 1:
1218
- self.remove_instrument(inst)
1308
+ singles = [i for i in self.instruments if getattr(self, i).mtime.size == 1]
1309
+ for inst in singles:
1310
+ self.remove_instrument(inst, strict=True)
1219
1311
 
1220
1312
  def remove_prog_id(self, prog_id):
1221
1313
  """ Remove observations from a given program ID """
@@ -1697,8 +1789,52 @@ class RV:
1697
1789
  if config.return_self:
1698
1790
  return self
1699
1791
 
1792
+ def detrend(self, degree=1):
1793
+ """ Detrend the RVs of all instruments """
1794
+ instrument_indices = np.unique_inverse(self.instrument_array).inverse_indices
1795
+ def fun(p, t, degree, ninstruments, just_model=False, index=None):
1796
+ polyp, offsets = p[:degree], p[-ninstruments:]
1797
+ polyp = np.r_[polyp, 0.0]
1798
+ if index is None:
1799
+ model = offsets[instrument_indices] + np.polyval(polyp, t)
1800
+ else:
1801
+ model = offsets[index] + np.polyval(polyp, t)
1802
+ if just_model:
1803
+ return model
1804
+ return self.mvrad - model
1805
+ coef = np.polyfit(self.mtime, self.mvrad, degree)
1806
+ x0 = np.append(coef, [0.0] * (len(self.instruments) - 1))
1807
+ print(x0)
1808
+ fun(x0, self.mtime, degree, len(self.instruments))
1809
+ from scipy.optimize import leastsq
1810
+ xbest, _ = leastsq(fun, x0, args=(self.mtime, degree, len(self.instruments)))
1811
+
1812
+ fig, ax = self.plot()
1813
+ ax.remove()
1814
+ ax = fig.add_subplot(2, 1, 1)
1815
+ self.plot(ax=ax)
1816
+ for i, inst in enumerate(self.instruments):
1817
+ s = getattr(self, inst)
1818
+ ax.plot(s.time, fun(xbest, s.time, degree, len(self.instruments), just_model=True, index=i),
1819
+ color=f'C{i}')
1820
+ ax.set_title('original', loc='left', fontsize=10)
1821
+ ax.set_title(f'coefficients: {xbest[:degree]}', loc='right', fontsize=10)
1822
+
1823
+ self.add_to_vrad(-fun(xbest, self.time, degree, len(self.instruments), just_model=True))
1824
+ ax = fig.add_subplot(2, 1, 2)
1825
+ self.plot(ax=ax)
1826
+ ax.set_title('detrended', loc='left', fontsize=10)
1827
+
1828
+ # axs[0].plot(self.time, fun(xbest, self.time, degree, len(self.instruments), just_model=True))
1829
+ # axs[1].errorbar(self.mtime, fun(xbest, self.mtime, degree, len(self.instruments)), self.msvrad, fmt='o')
1830
+
1831
+ return
1832
+
1833
+
1834
+
1835
+
1700
1836
  def add_to_vrad(self, values):
1701
- """ Add a value of array of values to the RVs of all instruments """
1837
+ """ Add a value or array of values to the RVs of all instruments """
1702
1838
  values = np.atleast_1d(values)
1703
1839
  if values.size == 1:
1704
1840
  values = np.full_like(self.vrad, values)
@@ -1723,7 +1859,7 @@ class RV:
1723
1859
 
1724
1860
  def add_to_quantity(self, quantity, values):
1725
1861
  """
1726
- Add a value of array of values to the given quantity of all instruments
1862
+ Add a value or array of values to the given quantity of all instruments
1727
1863
  """
1728
1864
  if not hasattr(self, quantity):
1729
1865
  logger.error(f"cannot find '{quantity}' attribute")
@@ -1742,6 +1878,75 @@ class RV:
1742
1878
  setattr(s, quantity, getattr(s, quantity) + values[mask])
1743
1879
  self._build_arrays()
1744
1880
 
1881
+ def replace_vrad(self, values):
1882
+ """ Replace the RVs of all instruments with a value or array of values """
1883
+ values = np.atleast_1d(values)
1884
+ if values.size == 1:
1885
+ values = np.full_like(self.vrad, values)
1886
+
1887
+ masked = False
1888
+ if values.size != self.vrad.size:
1889
+ if values.size == self.mvrad.size:
1890
+ logger.warning('adding to masked RVs only')
1891
+ masked = True
1892
+ else:
1893
+ raise ValueError(f"incompatible sizes: len(values) must equal self.N, got {values.size} != {self.vrad.size}")
1894
+
1895
+ for inst in self.instruments:
1896
+ s = getattr(self, inst)
1897
+ if masked:
1898
+ mask = self.instrument_array[self.mask] == inst
1899
+ s.vrad[s.mask] = values[mask]
1900
+ else:
1901
+ mask = self.instrument_array == inst
1902
+ s.vrad = values[mask]
1903
+ self._build_arrays()
1904
+
1905
+ def replace_svrad(self, values):
1906
+ """ Replace the RV uncertainties of all instruments with a value or array of values """
1907
+ values = np.atleast_1d(values)
1908
+ if values.size == 1:
1909
+ values = np.full_like(self.svrad, values)
1910
+
1911
+ masked = False
1912
+ if values.size != self.svrad.size:
1913
+ if values.size == self.msvrad.size:
1914
+ logger.warning('adding to masked RV uncertainties only')
1915
+ masked = True
1916
+ else:
1917
+ raise ValueError(f"incompatible sizes: len(values) must equal self.N, got {values.size} != {self.svrad.size}")
1918
+
1919
+ for inst in self.instruments:
1920
+ s = getattr(self, inst)
1921
+ if masked:
1922
+ mask = self.instrument_array[self.mask] == inst
1923
+ s.svrad[s.mask] = values[mask]
1924
+ else:
1925
+ mask = self.instrument_array == inst
1926
+ s.svrad = values[mask]
1927
+ self._build_arrays()
1928
+
1929
+ def replace_quantity(self, quantity, values):
1930
+ """ Replace the given quantity of all instruments by a value or array of values """
1931
+ if not hasattr(self, quantity):
1932
+ logger.error(f"cannot find '{quantity}' attribute")
1933
+ return
1934
+ q = getattr(self, quantity)
1935
+
1936
+ values = np.atleast_1d(values)
1937
+ if values.size == 1:
1938
+ values = np.full_like(q, values)
1939
+ if values.size != q.size:
1940
+ raise ValueError(f"incompatible sizes: len(values) must equal self.N, got {values.size} != {q.size}")
1941
+
1942
+ for inst in self.instruments:
1943
+ s = getattr(self, inst)
1944
+ mask = self.instrument_array == inst
1945
+ setattr(s, quantity, values[mask])
1946
+ self._build_arrays()
1947
+
1948
+
1949
+
1745
1950
  def change_units(self, new_units):
1746
1951
  possible = {'m/s': 'm/s', 'km/s': 'km/s', 'ms': 'm/s', 'kms': 'km/s'}
1747
1952
  if new_units not in possible:
@@ -1809,20 +2014,18 @@ class RV:
1809
2014
  """ Sort instruments by first or last observation date.
1810
2015
 
1811
2016
  Args:
1812
- by_first_observation (bool, optional):
2017
+ by_first_observation (bool, optional, default=True):
1813
2018
  Sort by first observation date.
1814
- by_last_observation (bool, optional):
1815
- Sort by last observation data.
2019
+ by_last_observation (bool, optional, default=False):
2020
+ Sort by last observation date.
1816
2021
  """
1817
2022
  if by_last_observation:
1818
2023
  by_first_observation = False
1819
2024
  if by_first_observation:
1820
- fun = lambda i: getattr(self, i).time.min()
1821
- self.instruments = sorted(self.instruments, key=fun)
2025
+ self.instruments = sorted(self.instruments, key=lambda i: getattr(self, i).time.min())
1822
2026
  self._build_arrays()
1823
2027
  if by_last_observation:
1824
- fun = lambda i: getattr(self, i).time.max()
1825
- self.instruments = sorted(self.instruments, key=fun)
2028
+ self.instruments = sorted(self.instruments, key=lambda i: getattr(self, i).time.max())
1826
2029
  self._build_arrays()
1827
2030
 
1828
2031
 
@@ -1841,7 +2044,7 @@ class RV:
1841
2044
  Postfix to add to the filenames ([star]_[instrument]_[postfix].rdb).
1842
2045
  save_nans (bool, optional)
1843
2046
  Whether to save NaN values in the indicators, if they exist. If
1844
- False, the full observation is not saved.
2047
+ False, the full observation which contains NaN values is not saved.
1845
2048
  """
1846
2049
  star_name = self.star.replace(' ', '')
1847
2050
 
@@ -1864,7 +2067,7 @@ class RV:
1864
2067
 
1865
2068
  if full:
1866
2069
  if save_masked:
1867
- d = np.c_[
2070
+ arrays = [
1868
2071
  _s.time, _s.vrad, _s.svrad,
1869
2072
  _s.fwhm, _s.fwhm_err,
1870
2073
  _s.bispan, _s.bispan_err,
@@ -1873,7 +2076,7 @@ class RV:
1873
2076
  _s.berv,
1874
2077
  ]
1875
2078
  else:
1876
- d = np.c_[
2079
+ arrays = [
1877
2080
  _s.mtime, _s.mvrad, _s.msvrad,
1878
2081
  _s.fwhm[_s.mask], _s.fwhm_err[_s.mask],
1879
2082
  _s.bispan[_s.mask], _s.bispan_err[_s.mask],
@@ -1882,12 +2085,13 @@ class RV:
1882
2085
  _s.berv[_s.mask],
1883
2086
  ]
1884
2087
  if not save_nans:
1885
- if np.isnan(d).any():
1886
- # remove observations where any of the indicators are # NaN
1887
- nan_mask = np.isnan(d[:, 3:]).any(axis=1)
1888
- d = d[~nan_mask]
1889
- if self.verbose:
1890
- logger.warning(f'masking {nan_mask.sum()} observations with NaN in indicators')
2088
+ raise NotImplementedError
2089
+ # if np.isnan(d).any():
2090
+ # # remove observations where any of the indicators are # NaN
2091
+ # nan_mask = np.isnan(d[:, 3:]).any(axis=1)
2092
+ # d = d[~nan_mask]
2093
+ # if self.verbose:
2094
+ # logger.warning(f'masking {nan_mask.sum()} observations with NaN in indicators')
1891
2095
 
1892
2096
  header = '\t'.join(['bjd', 'vrad', 'svrad',
1893
2097
  'fwhm', 'sig_fwhm',
@@ -1901,9 +2105,11 @@ class RV:
1901
2105
 
1902
2106
  else:
1903
2107
  if save_masked:
1904
- d = np.c_[_s.time, _s.vrad, _s.svrad]
2108
+ arrays = [_s.time, _s.vrad, _s.svrad]
1905
2109
  else:
1906
- d = np.c_[_s.mtime, _s.mvrad, _s.msvrad]
2110
+ arrays = [_s.mtime, _s.mvrad, _s.msvrad]
2111
+
2112
+ # d = np.stack(arrays, axis=1)
1907
2113
  header = 'bjd\tvrad\tsvrad\n---\t----\t-----'
1908
2114
 
1909
2115
  file = f'{star_name}_{inst}.rdb'
@@ -1913,7 +2119,17 @@ class RV:
1913
2119
  files.append(file)
1914
2120
  file = os.path.join(directory, file)
1915
2121
 
1916
- np.savetxt(file, d, fmt='%9.5f', header=header, delimiter='\t', comments='')
2122
+ N = len(arrays[0])
2123
+ with open(file, 'w') as f:
2124
+ f.write(header + '\n')
2125
+ for i in range(N):
2126
+ for j, a in enumerate(arrays):
2127
+ f.write(str(a[i]))
2128
+ if j < len(arrays) - 1:
2129
+ f.write('\t')
2130
+ f.write('\n')
2131
+
2132
+ # np.savetxt(file, d, header=header, delimiter='\t', comments='', fmt='%f')
1917
2133
 
1918
2134
  if self.verbose:
1919
2135
  logger.info(f'saving to {file}')
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: arvi
3
- Version: 0.1.25
3
+ Version: 0.1.27
4
4
  Summary: The Automated RV Inspector
5
5
  Author-email: João Faria <joao.faria@unige.ch>
6
6
  License: MIT
@@ -4,23 +4,23 @@ arvi/ariadne_wrapper.py,sha256=YvilopJa9T4NwPcj3Nah_U8smSeSAU5-HYZMb_GJ-BQ,2232
4
4
  arvi/berv.py,sha256=eKnpuPC1w45UrUEyFRbs9F9j3bXz3kxYzNXbnRgvFQM,17596
5
5
  arvi/binning.py,sha256=jbemJ-bM3aqoOsqMo_OhWt_co-JAQ0nhdG_GpTsrRsw,15403
6
6
  arvi/config.py,sha256=W-v8NNhRd_PROu0wCMilXmOhYcju4xbUalugd5u7SRU,1881
7
- arvi/dace_wrapper.py,sha256=cH7pQgKZDdK1Buahyl_D__H9WqVzBNMJuV6Ebw48amo,21270
8
- arvi/extra_data.py,sha256=WEEaYeLh52Zdv0uyHO72Ys5MWS3naTAP4wJV2BJ1mbk,2551
9
- arvi/gaia_wrapper.py,sha256=icm3LJjG9pjP47_bM30NFyocUQO3X3SHS5yQ-Dwcr5w,4653
7
+ arvi/dace_wrapper.py,sha256=dwGj_XuN8J5An9I8ioeK7saj2TNLwwcobOu6oRo_HmM,22228
8
+ arvi/extra_data.py,sha256=cpJGMle0ZqY_dtrmbbMQcyU48PkNjfzUgQ-qY-2XTj8,3249
9
+ arvi/gaia_wrapper.py,sha256=2q_7bm6MGvTLlegfNUCY_EhnMKYv1CZmcbanOm_ot-k,4197
10
10
  arvi/headers.py,sha256=uvdJebw1M5YkGjE3vJJwYBOnLikib75uuZE9FXB5JJM,1673
11
11
  arvi/instrument_specific.py,sha256=-pbm2Vk3iK_1K7nDa1avlJOKHBcXllwILI4lQn-Ze-A,7761
12
12
  arvi/kima_wrapper.py,sha256=BvNTVqzM4lMNhLCyBFVh3T84hHfGKAFpgiYiOi4lh0g,2731
13
13
  arvi/lbl_wrapper.py,sha256=_ViGVkpakvuBR_xhu9XJRV5EKHpj5Go6jBZGJZMIS2Y,11850
14
14
  arvi/nasaexo_wrapper.py,sha256=mWt7eHgSZe4MBKCmUvMPTyUPGuiwGTqKugNBvmjOg9s,7306
15
- arvi/plots.py,sha256=WUm-sqN0aZTNXvE1kYpvmHTW9QPWqSCpKhNjwaqxjEk,29628
15
+ arvi/plots.py,sha256=ys3M_zU701ubz-hEcE8Jpp4fG9D4yoZE7o7q1qW_-nA,29758
16
16
  arvi/programs.py,sha256=BW7xBNKLei7NVLLW3_lsVskwzkaIoNRiHK2jn9Tn2ZM,8879
17
17
  arvi/reports.py,sha256=ayPdZ4HZO9iCDdnADQ18gQPJh79o-1UYG7TYkvm9Lrc,4051
18
18
  arvi/setup_logger.py,sha256=pBzaRTn0hntozjbaRVx0JIbWGuENkvYUApa6uB-FsRo,279
19
- arvi/simbad_wrapper.py,sha256=iAAwEMcr1Hgu6lnDctmaCC1TLPCB8yAfHG0wxh9K9C8,5791
19
+ arvi/simbad_wrapper.py,sha256=hyMnTeZ4DpnTzyEopkdUfNtJ_roSgdvYPXwYcmXVX2U,8238
20
20
  arvi/spectra.py,sha256=ebF1ocodTastLx0CyqLSpE8EZNDXBF8riyfxMr3L6H0,7491
21
21
  arvi/stats.py,sha256=ilzzGL9ew-SyVa9eEdrYCpD3DliOAwhoNUg9LIlHjzU,2583
22
22
  arvi/stellar.py,sha256=veuL_y9kJvvApU_jqYQqP3EkcRnQffTc8Us6iT5UrFI,3790
23
- arvi/timeseries.py,sha256=NdmSSYeDdS-cYBXPt8NCeKS1jLdv8LP6Zh561KRGfZc,77328
23
+ arvi/timeseries.py,sha256=s_1bKO9sl09mGFXCp3iyUezFzCdiEwO7IU68mq8LH-Q,85746
24
24
  arvi/translations.py,sha256=PUSrn4zvYO2MqGzUxlFGwev_tBkgJaJrIYs6NKHzbWo,951
25
25
  arvi/utils.py,sha256=LImV8iPjG8ZKjPCT9lp25_pDb-51ZZk42Hc8bzZt7M0,6568
26
26
  arvi/data/info.svg,sha256=0IMI6W-eFoTD8acnury79WJJakpBwLa4qKS4JWpsXiI,489
@@ -29,8 +29,8 @@ arvi/data/obs_affected_blue_cryostat_issues.dat,sha256=z4AK17xfz8tGTDv1FjRvQFnio
29
29
  arvi/data/extra/HD86226_PFS1.rdb,sha256=vfAozbrKHM_j8dYkCBJsuHyD01KEM1asghe2KInwVao,3475
30
30
  arvi/data/extra/HD86226_PFS2.rdb,sha256=F2P7dB6gVyzCglUjNheB0hIHVClC5RmARrGwbrY1cfo,4114
31
31
  arvi/data/extra/metadata.json,sha256=C69hIw6CohyES6BI9vDWjxwSz7N4VOYX0PCgjXtYFmU,178
32
- arvi-0.1.25.dist-info/LICENSE,sha256=6JfQgl7SpM55t0EHMFNMnNh-AdkpGW25MwMiTnhdWQg,1068
33
- arvi-0.1.25.dist-info/METADATA,sha256=kB1PpEDCZNesQG7KRzONb36uILEryuieL3QK8DbJAkU,1852
34
- arvi-0.1.25.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
35
- arvi-0.1.25.dist-info/top_level.txt,sha256=4EeiKDVLD45ztuflTGfQ3TU8GVjJg5Y95xS5XjI-utU,5
36
- arvi-0.1.25.dist-info/RECORD,,
32
+ arvi-0.1.27.dist-info/LICENSE,sha256=6JfQgl7SpM55t0EHMFNMnNh-AdkpGW25MwMiTnhdWQg,1068
33
+ arvi-0.1.27.dist-info/METADATA,sha256=-x1faGqEzZcmUovGR1PApN_7fDp9PqmcWinEZYNV-KE,1852
34
+ arvi-0.1.27.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
35
+ arvi-0.1.27.dist-info/top_level.txt,sha256=4EeiKDVLD45ztuflTGfQ3TU8GVjJg5Y95xS5XjI-utU,5
36
+ arvi-0.1.27.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.6.0)
2
+ Generator: setuptools (75.8.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
File without changes