arvi 0.2.6__tar.gz → 0.2.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arvi might be problematic. Click here for more details.

Files changed (64) hide show
  1. {arvi-0.2.6/arvi.egg-info → arvi-0.2.8}/PKG-INFO +1 -1
  2. {arvi-0.2.6 → arvi-0.2.8}/arvi/dace_wrapper.py +25 -12
  3. {arvi-0.2.6 → arvi-0.2.8}/arvi/instrument_specific.py +19 -18
  4. {arvi-0.2.6 → arvi-0.2.8}/arvi/kima_wrapper.py +42 -7
  5. {arvi-0.2.6 → arvi-0.2.8}/arvi/programs.py +8 -4
  6. {arvi-0.2.6 → arvi-0.2.8}/arvi/simbad_wrapper.py +20 -0
  7. {arvi-0.2.6 → arvi-0.2.8}/arvi/timeseries.py +243 -77
  8. {arvi-0.2.6 → arvi-0.2.8/arvi.egg-info}/PKG-INFO +1 -1
  9. {arvi-0.2.6 → arvi-0.2.8}/.github/dependabot.yml +0 -0
  10. {arvi-0.2.6 → arvi-0.2.8}/.github/workflows/docs-gh-pages.yml +0 -0
  11. {arvi-0.2.6 → arvi-0.2.8}/.github/workflows/install.yml +0 -0
  12. {arvi-0.2.6 → arvi-0.2.8}/.github/workflows/python-publish.yml +0 -0
  13. {arvi-0.2.6 → arvi-0.2.8}/.gitignore +0 -0
  14. {arvi-0.2.6 → arvi-0.2.8}/LICENSE +0 -0
  15. {arvi-0.2.6 → arvi-0.2.8}/README.md +0 -0
  16. {arvi-0.2.6 → arvi-0.2.8}/arvi/HZ.py +0 -0
  17. {arvi-0.2.6 → arvi-0.2.8}/arvi/__init__.py +0 -0
  18. {arvi-0.2.6 → arvi-0.2.8}/arvi/ariadne_wrapper.py +0 -0
  19. {arvi-0.2.6 → arvi-0.2.8}/arvi/berv.py +0 -0
  20. {arvi-0.2.6 → arvi-0.2.8}/arvi/binning.py +0 -0
  21. {arvi-0.2.6 → arvi-0.2.8}/arvi/config.py +0 -0
  22. {arvi-0.2.6 → arvi-0.2.8}/arvi/data/extra/HD86226_PFS1.rdb +0 -0
  23. {arvi-0.2.6 → arvi-0.2.8}/arvi/data/extra/HD86226_PFS2.rdb +0 -0
  24. {arvi-0.2.6 → arvi-0.2.8}/arvi/data/extra/metadata.json +0 -0
  25. {arvi-0.2.6 → arvi-0.2.8}/arvi/data/info.svg +0 -0
  26. {arvi-0.2.6 → arvi-0.2.8}/arvi/data/obs_affected_ADC_issues.dat +0 -0
  27. {arvi-0.2.6 → arvi-0.2.8}/arvi/data/obs_affected_blue_cryostat_issues.dat +0 -0
  28. {arvi-0.2.6 → arvi-0.2.8}/arvi/exofop_wrapper.py +0 -0
  29. {arvi-0.2.6 → arvi-0.2.8}/arvi/extra_data.py +0 -0
  30. {arvi-0.2.6 → arvi-0.2.8}/arvi/gaia_wrapper.py +0 -0
  31. {arvi-0.2.6 → arvi-0.2.8}/arvi/headers.py +0 -0
  32. {arvi-0.2.6 → arvi-0.2.8}/arvi/lbl_wrapper.py +0 -0
  33. {arvi-0.2.6 → arvi-0.2.8}/arvi/nasaexo_wrapper.py +0 -0
  34. {arvi-0.2.6 → arvi-0.2.8}/arvi/plots.py +0 -0
  35. {arvi-0.2.6 → arvi-0.2.8}/arvi/reports.py +0 -0
  36. {arvi-0.2.6 → arvi-0.2.8}/arvi/setup_logger.py +0 -0
  37. {arvi-0.2.6 → arvi-0.2.8}/arvi/sophie_wrapper.py +0 -0
  38. {arvi-0.2.6 → arvi-0.2.8}/arvi/spectra.py +0 -0
  39. {arvi-0.2.6 → arvi-0.2.8}/arvi/stats.py +0 -0
  40. {arvi-0.2.6 → arvi-0.2.8}/arvi/stellar.py +0 -0
  41. {arvi-0.2.6 → arvi-0.2.8}/arvi/translations.py +0 -0
  42. {arvi-0.2.6 → arvi-0.2.8}/arvi/utils.py +0 -0
  43. {arvi-0.2.6 → arvi-0.2.8}/arvi.egg-info/SOURCES.txt +0 -0
  44. {arvi-0.2.6 → arvi-0.2.8}/arvi.egg-info/dependency_links.txt +0 -0
  45. {arvi-0.2.6 → arvi-0.2.8}/arvi.egg-info/requires.txt +0 -0
  46. {arvi-0.2.6 → arvi-0.2.8}/arvi.egg-info/top_level.txt +0 -0
  47. {arvi-0.2.6 → arvi-0.2.8}/docs/API.md +0 -0
  48. {arvi-0.2.6 → arvi-0.2.8}/docs/detailed.ipynb +0 -0
  49. {arvi-0.2.6 → arvi-0.2.8}/docs/downloading_data.md +0 -0
  50. {arvi-0.2.6 → arvi-0.2.8}/docs/index.md +0 -0
  51. {arvi-0.2.6 → arvi-0.2.8}/docs/logo/detective.png +0 -0
  52. {arvi-0.2.6 → arvi-0.2.8}/docs/logo/logo.png +0 -0
  53. {arvi-0.2.6 → arvi-0.2.8}/docs/stylesheets/extra.css +0 -0
  54. {arvi-0.2.6 → arvi-0.2.8}/mkdocs.yml +0 -0
  55. {arvi-0.2.6 → arvi-0.2.8}/pyproject.toml +0 -0
  56. {arvi-0.2.6 → arvi-0.2.8}/setup.cfg +0 -0
  57. {arvi-0.2.6 → arvi-0.2.8}/setup.py +0 -0
  58. {arvi-0.2.6 → arvi-0.2.8}/tests/HD10700-Bcor_ESPRESSO18.rdb +0 -0
  59. {arvi-0.2.6 → arvi-0.2.8}/tests/test_binning.py +0 -0
  60. {arvi-0.2.6 → arvi-0.2.8}/tests/test_config.py +0 -0
  61. {arvi-0.2.6 → arvi-0.2.8}/tests/test_create_RV.py +0 -0
  62. {arvi-0.2.6 → arvi-0.2.8}/tests/test_import_object.py +0 -0
  63. {arvi-0.2.6 → arvi-0.2.8}/tests/test_simbad.py +0 -0
  64. {arvi-0.2.6 → arvi-0.2.8}/tests/test_stats.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: arvi
3
- Version: 0.2.6
3
+ Version: 0.2.8
4
4
  Summary: The Automated RV Inspector
5
5
  Author-email: João Faria <joao.faria@unige.ch>
6
6
  License: MIT
@@ -2,7 +2,7 @@ import os
2
2
  import sys
3
3
  import tarfile
4
4
  import collections
5
- from functools import lru_cache
5
+ from functools import lru_cache, partial
6
6
  from itertools import islice
7
7
  import numpy as np
8
8
 
@@ -95,7 +95,7 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
95
95
  npipe = len(pipelines)
96
96
  if 'NIRPS' in inst and any(['LBL' in p for p in pipelines]):
97
97
  # TODO: correctly load both CCF and LBL
98
- pipelines = [pipelines[1]]
98
+ pipelines = [pipelines[0]]
99
99
  if 'HARPS' in inst and npipe > 1 and pipelines[1] == pipelines[0] + '-EGGS':
100
100
  pipelines = pipelines[:2]
101
101
  else:
@@ -170,7 +170,7 @@ def get_observations_from_instrument(star, instrument, user=None, main_id=None,
170
170
  """
171
171
  Spectroscopy = load_spectroscopy(user)
172
172
  found_dace_id = False
173
- with timer('simbad query'):
173
+ with timer('dace_id query'):
174
174
  try:
175
175
  dace_id = get_dace_id(star, verbose=verbose, raise_error=True)
176
176
  found_dace_id = True
@@ -212,17 +212,22 @@ def get_observations_from_instrument(star, instrument, user=None, main_id=None,
212
212
 
213
213
  for inst in np.unique(result['ins_name']):
214
214
  mask1 = result['ins_name'] == inst
215
- r[inst] = {}
215
+ r[str(inst)] = {}
216
+
217
+ key2 = 'ins_drs_version'
218
+ n_key2 = len(np.unique(result[key2][mask1]))
219
+ if len(np.unique(result['pub_bibcode'][mask1])) >= n_key2:
220
+ key2 = 'pub_bibcode'
216
221
 
217
- for pipe in np.unique(result['ins_drs_version'][mask1]):
218
- mask2 = mask1 & (result['ins_drs_version'] == pipe)
219
- r[inst][pipe] = {}
222
+ for pipe in np.unique(result[key2][mask1]):
223
+ mask2 = mask1 & (result[key2] == pipe)
224
+ r[str(inst)][str(pipe)] = {}
220
225
 
221
226
  for ins_mode in np.unique(result['ins_mode'][mask2]):
222
227
  mask3 = mask2 & (result['ins_mode'] == ins_mode)
223
228
  _nan = np.full(mask3.sum(), np.nan)
224
229
 
225
- r[inst][pipe][ins_mode] = {
230
+ r[str(inst)][str(pipe)][str(ins_mode)] = {
226
231
  'texp': result['texp'][mask3],
227
232
  'bispan': result['spectro_ccf_bispan'][mask3],
228
233
  'bispan_err': result['spectro_ccf_bispan_err'][mask3],
@@ -372,9 +377,11 @@ def get_observations(star, instrument=None, user=None, main_id=None, verbose=Tru
372
377
  # # For all other strings, sort alphabetically
373
378
  # return (2, s)
374
379
 
375
- def custom_key(val):
380
+ def custom_key(val, strip_EGGS=False):
381
+ if strip_EGGS:
382
+ val = val.replace('-EGGS', '').replace(' EGGS', '')
376
383
  key = 0
377
- key -= 2 if val == '3.5' else 0
384
+ key -= 1 if '3.5' in val else 0
378
385
  key -= 1 if 'EGGS' in val else 0
379
386
  key -= 1 if ('UHR' in val or 'MR' in val) else 0
380
387
  key -= 1 if 'LBL' in val else 0
@@ -385,6 +392,8 @@ def get_observations(star, instrument=None, user=None, main_id=None, verbose=Tru
385
392
  # new_result[inst] = dict(
386
393
  # sorted(result[inst].items(), key=custom_sort_key, reverse=True)
387
394
  # )
395
+ if all(['EGGS' in k for k in result[inst].keys()]):
396
+ custom_key = partial(custom_key, strip_EGGS=True)
388
397
  # WARNING: not the same as reverse=True (not sure why)
389
398
  sorted_keys = sorted(result[inst].keys(), key=custom_key)[::-1]
390
399
  new_result[inst] = {}
@@ -535,6 +544,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False, user=
535
544
  """ Download CCFs / S1Ds / S2Ds from DACE """
536
545
  logger = setup_logger()
537
546
  raw_files = np.atleast_1d(raw_files)
547
+ raw_files_original = raw_files.copy()
538
548
 
539
549
  create_directory(output_directory)
540
550
 
@@ -548,7 +558,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False, user=
548
558
  if n == 0:
549
559
  if verbose:
550
560
  logger.info('no files to download')
551
- return
561
+ return list(map(os.path.basename, raw_files_original))
552
562
 
553
563
  # avoid an empty chunk
554
564
  if chunk_size > n:
@@ -566,7 +576,9 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False, user=
566
576
 
567
577
  if n < parallel_limit:
568
578
  iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
569
- for files in tqdm(iterator, total=len(iterator)):
579
+ if len(iterator) > 1:
580
+ iterator = tqdm(iterator, total=len(iterator))
581
+ for files in iterator:
570
582
  download(files, type, output_directory, quiet=False, user=user)
571
583
  extract_fits(output_directory)
572
584
 
@@ -595,6 +607,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False, user=
595
607
 
596
608
  sys.stdout.flush()
597
609
  logger.info('extracted .fits files')
610
+ return list(map(os.path.basename, raw_files_original))
598
611
 
599
612
 
600
613
  # def do_download_s1d(raw_files, output_directory, clobber=False, verbose=True):
@@ -127,9 +127,9 @@ def HARPS_commissioning(self, mask=True, plot=True):
127
127
  total_affected = affected.sum()
128
128
 
129
129
  if self.verbose:
130
- n = total_affected
131
- logger.info(f"there {'are'[:n^1]}{'is'[n^1:]} {n} frame{'s'[:n^1]} "
132
- "during HARPS commissioning")
130
+ n, i = total_affected, int(total_affected != 1)
131
+ logger.info(f"there {['is', 'are'][i]} {n} frame{['', 's'][i]} "
132
+ "during HARPS commissioning")
133
133
 
134
134
  if mask:
135
135
  self.mask[affected] = False
@@ -159,9 +159,9 @@ def HARPS_fiber_commissioning(self, mask=True, plot=True):
159
159
  total_affected = affected.sum()
160
160
 
161
161
  if self.verbose:
162
- n = total_affected
163
- logger.info(f"there {'are'[:n^1]}{'is'[n^1:]} {n} frame{'s'[:n^1]} "
164
- "during the HARPS fiber commissioning period")
162
+ n, i = total_affected, int(total_affected != 1)
163
+ logger.info(f"there {['is', 'are'][i]} {n} frame{['', 's'][i]} "
164
+ "during the HARPS fiber commissioning period")
165
165
 
166
166
  if mask:
167
167
  self.mask[affected] = False
@@ -191,15 +191,15 @@ def ESPRESSO_commissioning(self, mask=True, plot=True):
191
191
  total_affected = affected.sum()
192
192
 
193
193
  if self.verbose:
194
- n = total_affected
195
- logger.info(f"there {'are'[:n^1]}{'is'[n^1:]} {n} frame{'s'[:n^1]} "
196
- "during ESPRESSO commissioning")
194
+ n, i = total_affected, int(total_affected != 1)
195
+ logger.info(f"there {['is', 'are'][i]} {n} frame{['', 's'][i]} "
196
+ "during ESPRESSO commissioning")
197
197
 
198
198
  if mask:
199
199
  self.mask[affected] = False
200
200
  self._propagate_mask_changes()
201
201
 
202
- if plot:
202
+ if plot and total_affected > 0:
203
203
  self.plot(show_masked=True)
204
204
 
205
205
  return affected
@@ -240,9 +240,9 @@ def ADC_issues(self, mask=True, plot=True, check_headers=False):
240
240
  total_affected = intersect.sum()
241
241
 
242
242
  if self.verbose:
243
- n = total_affected
244
- logger.info(f"there {'are'[:n^1]}{'is'[n^1:]} {n} frame{'s'[:n^1]} "
245
- "affected by ADC issues")
243
+ n, i = total_affected, int(total_affected != 1)
244
+ logger.info(f"there {['is', 'are'][i]} {n} frame{['', 's'][i]} "
245
+ "affected by ADC issues")
246
246
 
247
247
  if mask:
248
248
  self.mask[intersect] = False
@@ -276,9 +276,9 @@ def blue_cryostat_issues(self, mask=True, plot=True):
276
276
  total_affected = intersect.sum()
277
277
 
278
278
  if self.verbose:
279
- n = total_affected
280
- logger.info(f"there {'are'[:n^1]}{'is'[n^1:]} {n} frame{'s'[:n^1]} "
281
- "affected by blue cryostat issues")
279
+ n, i = total_affected, int(total_affected != 1)
280
+ logger.info(f"there {['is', 'are'][i]} {n} frame{['', 's'][i]} "
281
+ "affected by blue cryostat issues")
282
282
 
283
283
  if mask:
284
284
  self.mask[intersect] = False
@@ -322,8 +322,9 @@ def qc_scired_issues(self, plot=False, **kwargs):
322
322
  n = affected.sum()
323
323
 
324
324
  if self.verbose:
325
- logger.info(f"there {'are'[:n^1]}{'is'[n^1:]} {n} frame{'s'[:n^1]} "
326
- "where QC SCIRED CHECK is 0")
325
+ i = int(n != 1)
326
+ logger.info(f"there {['is', 'are'][i]} {n} frame{['', 's'][i]} "
327
+ "where QC SCIRED CHECK is 0")
327
328
 
328
329
  if n == 0:
329
330
  return
@@ -1,13 +1,14 @@
1
1
  import os
2
2
  import numpy as np
3
3
 
4
- from .setup_logger import logger
4
+ from .setup_logger import setup_logger
5
5
 
6
6
  try:
7
7
  import kima
8
8
  from kima.pykima.utils import chdir
9
9
  from kima import distributions
10
- from kima import RVData, RVmodel
10
+ from kima import RVData, HGPMdata
11
+ from kima import RVmodel, GPmodel, RVHGPMmodel
11
12
  kima_available = True
12
13
  except ImportError:
13
14
  kima_available = False
@@ -21,9 +22,12 @@ def try_to_guess_prior(model, prior):
21
22
  return None
22
23
 
23
24
 
24
- def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwargs):
25
+ def run_kima(self, run=False, load=False, run_directory=None,
26
+ model=RVmodel, priors={}, **kwargs):
25
27
  if not kima_available:
26
28
  raise ImportError('kima not available, please install with `pip install kima`')
29
+
30
+ logger = setup_logger()
27
31
 
28
32
  instruments = [inst for inst in self.instruments if self.NN[inst] > 1]
29
33
  time = [getattr(self, inst).mtime for inst in instruments]
@@ -33,12 +37,34 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
33
37
 
34
38
  fix = kwargs.pop('fix', False)
35
39
  npmax = kwargs.pop('npmax', 1)
36
- model = RVmodel(fix=fix, npmax=npmax, data=data)
40
+
41
+ if isinstance(model, str):
42
+ try:
43
+ model = {
44
+ 'RVmodel': RVmodel,
45
+ 'GPmodel': GPmodel,
46
+ 'RVHGPMmodel': RVHGPMmodel
47
+ }[model]
48
+ except KeyError:
49
+ raise ValueError(f'unknown model: {model}')
50
+
51
+ if model is RVHGPMmodel:
52
+ pm_data = HGPMdata(self.simbad.gaia_id)
53
+ model = model(fix=fix, npmax=npmax, data=data, pm_data=pm_data)
54
+ else:
55
+ model = model(fix=fix, npmax=npmax, data=data)
37
56
 
38
57
  model.trend = kwargs.pop('trend', False)
39
58
  model.degree = kwargs.pop('degree', 0)
40
59
 
41
- model.studentt = kwargs.pop('studentt', False)
60
+ if isinstance(model, RVmodel):
61
+ model.studentt = kwargs.pop('studentt', False)
62
+
63
+ if isinstance(model, GPmodel):
64
+ if 'kernel' in kwargs:
65
+ model.kernel = kwargs.pop('kernel')
66
+
67
+
42
68
  model.enforce_stability = kwargs.pop('enforce_stability', False)
43
69
  model.star_mass = kwargs.pop('star_mass', 1.0)
44
70
 
@@ -49,6 +75,13 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
49
75
  if kwargs.pop('kuma', False):
50
76
  model.conditional.eprior = distributions.Kumaraswamy(0.867, 3.03)
51
77
 
78
+ if isinstance(model, RVHGPMmodel):
79
+ model.pm_ra_bary_prior = priors.pop('pm_ra_bary_prior',
80
+ distributions.Gaussian(pm_data.pm_ra_hg, pm_data.sig_hg_ra))
81
+ model.pm_dec_bary_prior = priors.pop('pm_dec_bary_prior',
82
+ distributions.Gaussian(pm_data.pm_dec_hg, pm_data.sig_hg_dec))
83
+
84
+
52
85
  for k, v in priors.items():
53
86
  try:
54
87
  if 'conditional' in k:
@@ -67,7 +100,6 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
67
100
  run_directory = os.getcwd()
68
101
 
69
102
  if run:
70
-
71
103
  # TODO: use signature of kima.run to pop the correct kwargs
72
104
  # model_name = model.__class__.__name__
73
105
  # model_name = f'kima.{model_name}.{model_name}'
@@ -75,7 +107,10 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
75
107
 
76
108
  with chdir(run_directory):
77
109
  kima.run(model, **kwargs)
78
-
110
+
111
+ if isinstance(model, RVHGPMmodel):
112
+ data = (data, pm_data)
113
+
79
114
  if load:
80
115
  with chdir(run_directory):
81
116
  res = kima.load_results(model)
@@ -3,11 +3,13 @@ import multiprocessing
3
3
  from functools import partial, lru_cache
4
4
  from itertools import chain
5
5
  from collections import namedtuple
6
- from multiprocessing.pool import ThreadPool
6
+ from multiprocessing.pool import ThreadPool, Pool
7
+ import concurrent.futures
8
+ import sys
7
9
  from tqdm import tqdm
8
10
  # import numpy as np
9
11
 
10
- from .setup_logger import logger
12
+ from .setup_logger import setup_logger
11
13
  from .timeseries import RV
12
14
 
13
15
  __all__ = ['ESPRESSO_GTO']
@@ -22,13 +24,14 @@ def get_star(star, instrument=None, verbose=False, **kwargs):
22
24
 
23
25
  class LazyRV:
24
26
  def __init__(self, stars: list, instrument: str = None,
25
- _parallel_limit=10):
27
+ _parallel_limit=10, _parallel_workers=8):
26
28
  self.stars = stars
27
29
  if isinstance(self.stars, str):
28
30
  self.stars = [self.stars]
29
31
  self.instrument = instrument
30
32
  self._saved = None
31
33
  self._parallel_limit = _parallel_limit
34
+ self._parallel_workers = _parallel_workers
32
35
 
33
36
  @property
34
37
  def N(self):
@@ -38,10 +41,11 @@ class LazyRV:
38
41
  return f"RV({self.N} stars)"
39
42
 
40
43
  def _get(self, **kwargs):
44
+ logger = setup_logger()
41
45
  if self.N > self._parallel_limit:
42
46
  # logger.info('Querying DACE...')
43
47
  _get_star = partial(get_star, instrument=self.instrument, **kwargs)
44
- with ThreadPool(8) as pool:
48
+ with Pool(self._parallel_workers) as pool:
45
49
  result = list(tqdm(pool.imap(_get_star, self.stars),
46
50
  total=self.N, unit='star',
47
51
  desc='Querying DACE (can take a while)'))
@@ -70,6 +70,17 @@ FROM ident AS id1 JOIN ident AS id2 USING(oidref)
70
70
  WHERE id1.id = '{star}' AND id2.id LIKE '{name}%';
71
71
  """
72
72
 
73
+ RA_DEC_QUERY = """
74
+ SELECT DISTINCT basic.OID,
75
+ RA,
76
+ DEC,
77
+ main_id,
78
+ DISTANCE(POINT('ICRS', RA, DEC), POINT('ICRS', {ra}, {dec})) as "dist"
79
+ FROM basic JOIN flux ON oidref = oid
80
+ WHERE CONTAINS(POINT('ICRS', RA, DEC), CIRCLE('ICRS', {ra}, {dec}, 0.02)) = 1
81
+ ORDER BY "dist";
82
+ """
83
+
73
84
  def find_identifier(identifier, star):
74
85
  response = run_query(HD_GJ_HIP_QUERY.format(name=identifier, star=star))
75
86
  if identifier in response:
@@ -288,6 +299,15 @@ class simbad:
288
299
  def bmv(self):
289
300
  return self.B - self.V
290
301
 
302
+ @classmethod
303
+ def from_ra_dec(cls, ra, dec, **kwargs):
304
+ table1 = run_query(query=RA_DEC_QUERY.format(ra=ra, dec=dec))
305
+ cols, values = parse_tablen(table1)
306
+ if len(values) == 0:
307
+ raise ValueError(f'no Simbad results for ra={ra}, dec={dec}')
308
+ assert cols == ['oid', 'ra', 'dec', 'main_id', 'dist']
309
+ star = values[0][cols.index('main_id')].replace('"', '')
310
+ return cls(star, **kwargs)
291
311
 
292
312
  def argsort_by_spectral_type(sptypes):
293
313
  STs = [f'{letter}{n}' for letter in ('F', 'G', 'K', 'M') for n in range(10)]
@@ -40,24 +40,53 @@ class RV(ISSUES, REPORTS):
40
40
  """
41
41
  A class holding RV observations
42
42
 
43
+ Args:
44
+ star (str):
45
+ Name of the star
46
+ instrument (str, list):
47
+ Name of the instrument or list of instruments
48
+ verbose (bool):
49
+ Print logging messages
50
+ do_maxerror:
51
+ Mask points based on a maximum RV uncertainty
52
+ do_secular_acceleration:
53
+ Apply secular acceleration correction. This only applies
54
+ to certain instruments.
55
+ do_sigma_clip (bool):
56
+ Apply sigma clipping on the RVs
57
+ do_adjust_means (bool):
58
+ Subtract individual weighted mean RV from each instrument
59
+ only_latest_pipeline (bool):
60
+ Select only the latest pipeline from each instrument
61
+ load_extra_data (bool):
62
+ check_drs_qc (bool):
63
+ Mask points based on DRS quality control flags
64
+ user (str):
65
+ User name for DACE queries (should be a section in `~/.dacerc` file)
66
+
43
67
  Examples:
44
68
  >>> s = RV('Proxima')
69
+ >>> s = RV('HD10180', instrument='HARPS')
45
70
 
46
- Attributes:
47
- star (str):
48
- The name of the star
49
- N (int):
50
- Total number of observations
51
- instruments (list):
52
- List of instruments for which there are RVs. Each instrument is also
53
- stored as an attribute (e.g. `self.CORALIE98` or `self.HARPS`)
54
- simbad (simbad):
55
- Information on the target from Simbad
56
71
  """
72
+ # Attributes:
73
+ # star (str):
74
+ # The name of the star
75
+ # N (int):
76
+ # Total number of observations
77
+ # NN (dict):
78
+ # Number of observations per instrument
79
+ # instruments (list):
80
+ # List of instruments for which there are RVs. Each instrument is also
81
+ # stored as an attribute (e.g. `self.CORALIE98` or `self.HARPS`)
82
+ # simbad (simbad):
83
+ # Information on the target from Simbad
84
+ # gaia (gaia):
85
+ # Information on the target from Gaia DR3
57
86
  star: str
58
87
  instrument: Union[str, list] = field(init=True, repr=False, default=None)
59
88
  verbose: bool = field(init=True, repr=False, default=True)
60
- do_maxerror: Union[bool, float] = field(init=True, repr=False, default=False)
89
+ do_maxerror: float = field(init=True, repr=False, default=None)
61
90
  do_secular_acceleration: bool = field(init=True, repr=False, default=True)
62
91
  do_sigma_clip: bool = field(init=True, repr=False, default=False)
63
92
  do_adjust_means: bool = field(init=True, repr=False, default=True)
@@ -217,7 +246,7 @@ class RV(ISSUES, REPORTS):
217
246
  else:
218
247
  mid = None
219
248
 
220
- with timer():
249
+ with timer('dace query'):
221
250
  self.dace_result = get_observations(self.__star__, self.instrument,
222
251
  user=self.user, main_id=mid, verbose=self.verbose)
223
252
  except ValueError as e:
@@ -318,7 +347,7 @@ class RV(ISSUES, REPORTS):
318
347
 
319
348
  if self.do_adjust_means:
320
349
  self.adjust_means()
321
-
350
+
322
351
  _star_no_space = self.star.replace(' ', '')
323
352
  _directory = sanitize_path(_star_no_space)
324
353
  self._download_directory = f'{_directory}_downloads'
@@ -379,16 +408,41 @@ class RV(ISSUES, REPORTS):
379
408
  self._did_correct_berv = False
380
409
  self.__post_init__()
381
410
 
382
- def snapshot(self):
411
+ def snapshot(self, directory=None, delete_others=False):
383
412
  import pickle
384
413
  from datetime import datetime
385
414
  ts = datetime.now().timestamp()
386
415
  star_name = self.star.replace(' ', '')
387
416
  file = f'{star_name}_{ts}.pkl'
388
- pickle.dump(self, open(file, 'wb'), protocol=0)
417
+
418
+ if directory is None:
419
+ directory = '.'
420
+ else:
421
+ os.makedirs(directory, exist_ok=True)
422
+
423
+ file = os.path.join(directory, file)
424
+
425
+ if delete_others:
426
+ import re
427
+ other_pkls = [
428
+ f for f in os.listdir(directory)
429
+ if re.search(fr'{star_name}_\d+.\d+.pkl', f)
430
+ ]
431
+ for pkl in other_pkls:
432
+ os.remove(os.path.join(directory, pkl))
433
+
434
+ metadata = {
435
+ 'star': self.star,
436
+ 'timestamp': ts,
437
+ 'description': 'arvi snapshot'
438
+ }
439
+ pickle.dump((self, metadata), open(file, 'wb'), protocol=0)
440
+
389
441
  if self.verbose:
390
442
  logger.info(f'saved snapshot to {file}')
391
443
 
444
+ return file
445
+
392
446
  @property
393
447
  def N(self) -> int:
394
448
  """Total number of observations"""
@@ -543,7 +597,7 @@ class RV(ISSUES, REPORTS):
543
597
  return s
544
598
 
545
599
  @classmethod
546
- def from_arrays(cls, star, time, vrad, svrad, inst, *args, **kwargs):
600
+ def from_arrays(cls, star, time, vrad, svrad, inst, **kwargs):
547
601
  s = cls(star, _child=True)
548
602
  time, vrad, svrad = map(np.atleast_1d, (time, vrad, svrad))
549
603
 
@@ -558,11 +612,15 @@ class RV(ISSUES, REPORTS):
558
612
  s.time = time
559
613
  s.vrad = vrad
560
614
  s.svrad = svrad
561
- # mask
562
- s.mask = kwargs.get('mask', np.full_like(s.time, True, dtype=bool))
615
+
616
+ s.mask = kwargs.pop('mask', np.full_like(s.time, True, dtype=bool))
617
+ s.units = kwargs.pop('units', 'm/s')
618
+
619
+ for k, v in kwargs.items():
620
+ setattr(s, k, np.atleast_1d(v))
563
621
 
564
622
  s.instruments = [inst]
565
- s._quantities = np.array([])
623
+ s._quantities = np.array(list(kwargs.keys()))
566
624
 
567
625
  return s
568
626
 
@@ -583,8 +641,10 @@ class RV(ISSUES, REPORTS):
583
641
  dt = datetime.fromtimestamp(float(timestamp))
584
642
  if verbose:
585
643
  logger.info(f'reading snapshot of {star} from {dt}')
586
-
644
+
587
645
  s = pickle.load(open(file, 'rb'))
646
+ if isinstance(s, tuple) and len(s) == 2:
647
+ s, _metadata = s
588
648
  s._snapshot = file
589
649
  return s
590
650
 
@@ -595,7 +655,7 @@ class RV(ISSUES, REPORTS):
595
655
 
596
656
  Args:
597
657
  files (str, list):
598
- File name or list of file names
658
+ File name, file object, or list of file names
599
659
  star (str, optional):
600
660
  Name of the star. If None, try to infer it from file name
601
661
  instrument (str, list, optional):
@@ -606,37 +666,69 @@ class RV(ISSUES, REPORTS):
606
666
  Number of lines to skip in the header. Defaults to 2.
607
667
 
608
668
  Examples:
609
- s = RV.from_rdb('star_HARPS.rdb')
669
+ >>> s = RV.from_rdb('star_HARPS.rdb')
610
670
  """
611
671
  from glob import glob
612
672
  from os.path import splitext, basename
613
673
 
614
674
  verbose = kwargs.pop('verbose', True)
615
675
 
676
+ file_object = False
677
+
616
678
  if isinstance(files, str):
617
679
  if '*' in files:
618
680
  files = glob(files)
619
681
  else:
620
682
  files = [files]
683
+ elif isinstance(files, list):
684
+ pass
685
+ else:
686
+ file_object = hasattr(files, 'read')
687
+ files = [files]
621
688
 
622
689
  if len(files) == 0:
623
690
  if verbose:
624
- logger.error('no files found')
691
+ logger.error('from_rdb: no files found')
625
692
  return
626
693
 
627
- if star is None:
628
- star_ = np.unique([splitext(basename(f))[0].split('_')[0] for f in files])
629
- if star_.size == 1:
630
- star = star_[0].replace('-', '_')
694
+ def get_star_name(file):
695
+ return splitext(basename(file))[0].split('_')[0].replace('-', '_')
696
+
697
+ def get_instrument(file):
698
+ return splitext(basename(file))[0].split('_')[1]
699
+
700
+ if file_object:
701
+ if star is None:
702
+ try:
703
+ star = get_star_name(files[0].name)
704
+ except Exception:
705
+ star ='unknown'
631
706
  if verbose:
632
707
  logger.info(f'assuming star is {star}')
633
708
 
634
- if instrument is None:
635
- instruments = np.array([splitext(basename(f))[0].split('_')[1] for f in files])
636
- if verbose:
637
- logger.info(f'assuming instruments: {instruments}')
709
+ if instrument is None:
710
+ try:
711
+ instrument = get_instrument(files[0].name)
712
+ except Exception:
713
+ instrument = 'unknown'
714
+ if verbose:
715
+ logger.info(f'assuming instrument is {instrument}')
716
+
717
+ instruments = np.array([instrument])
638
718
  else:
639
- instruments = np.atleast_1d(instrument)
719
+ if star is None:
720
+ star = np.unique([get_star_name(f) for f in files])[0]
721
+ if verbose:
722
+ logger.info(f'assuming star is {star}')
723
+ else:
724
+ star = 'unknown'
725
+
726
+ if instrument is None:
727
+ instruments = np.array([splitext(basename(f))[0].split('_')[1] for f in files])
728
+ if verbose:
729
+ logger.info(f'assuming instruments: {instruments}')
730
+ else:
731
+ instruments = np.atleast_1d(instrument)
640
732
 
641
733
  if instruments.size == 1 and len(files) > 1:
642
734
  instruments = np.repeat(instruments, len(files))
@@ -669,12 +761,16 @@ class RV(ISSUES, REPORTS):
669
761
  _quantities = []
670
762
 
671
763
  #! hack
672
- with open(f) as ff:
673
- header = ff.readline().strip()
674
- if '\t' in header:
675
- names = header.split('\t')
676
- else:
677
- names = header.split()
764
+ if file_object:
765
+ header = f.readline().strip()
766
+ else:
767
+ with open(f) as ff:
768
+ header = ff.readline().strip()
769
+
770
+ if '\t' in header:
771
+ names = header.split('\t')
772
+ else:
773
+ names = header.split()
678
774
 
679
775
  if len(names) > 3:
680
776
  # if f.endswith('.rdb'):
@@ -687,7 +783,7 @@ class RV(ISSUES, REPORTS):
687
783
  data = np.genfromtxt(f, **kw, delimiter='\t')
688
784
  else:
689
785
  data = np.genfromtxt(f, **kw)
690
-
786
+
691
787
  # if data.ndim in (0, 1):
692
788
  # data = data.reshape(-1, 1)
693
789
 
@@ -831,7 +927,22 @@ class RV(ISSUES, REPORTS):
831
927
 
832
928
  @classmethod
833
929
  def from_ccf(cls, files, star=None, instrument=None, **kwargs):
834
- """ Create an RV object from a CCF file or a list of CCF files """
930
+ """ Create an RV object from a CCF file or a list of CCF files
931
+
932
+ !!! Note
933
+ This function relies on the `iCCF` package
934
+
935
+ Args:
936
+ files (str or list):
937
+ CCF file or list of CCF files
938
+ star (str):
939
+ Star name. If not provided, it will be inferred from the header
940
+ of the CCF file
941
+ instrument (str):
942
+ Instrument name. If not provided, it will be inferred from the
943
+ header of the CCF file
944
+
945
+ """
835
946
  try:
836
947
  import iCCF
837
948
  except ImportError:
@@ -856,7 +967,7 @@ class RV(ISSUES, REPORTS):
856
967
  objects = np.unique([i.HDU[0].header['OBJECT'].replace(' ', '') for i in CCFs])
857
968
 
858
969
  if len(objects) != 1:
859
- logger.warning(f'found {objects.size} different stars in the CCF files, '
970
+ logger.warning(f'found {objects.size} different stars in the CCF files ({objects}), '
860
971
  'choosing the first one')
861
972
  star = objects[0]
862
973
 
@@ -882,9 +993,22 @@ class RV(ISSUES, REPORTS):
882
993
  _quantities.append('contrast')
883
994
  _quantities.append('contrast_err')
884
995
 
996
+ _s.bispan = np.array([i.BIS*1e3 for i in CCFs])
997
+ _s.bispan_err = np.array([i.BISerror*1e3 for i in CCFs])
998
+ _quantities.append('bispan')
999
+ _quantities.append('bispan_err')
1000
+
1001
+ _s.rhk = np.full_like(time, np.nan)
1002
+ _s.rhk_err = np.full_like(time, np.nan)
1003
+ _quantities.append('rhk')
1004
+ _quantities.append('rhk_err')
1005
+
885
1006
  _s.texp = np.array([i.HDU[0].header['EXPTIME'] for i in CCFs])
886
1007
  _quantities.append('texp')
887
1008
 
1009
+ _s.berv = np.array([i.HDU[0].header['HIERARCH ESO QC BERV'] for i in CCFs])
1010
+ _quantities.append('berv')
1011
+
888
1012
  _s.date_night = np.array([
889
1013
  i.HDU[0].header['DATE-OBS'].split('T')[0] for i in CCFs
890
1014
  ])
@@ -955,17 +1079,17 @@ class RV(ISSUES, REPORTS):
955
1079
  if fits_file not in tar.getnames():
956
1080
  logger.error(f'KOBE file not found for {star}')
957
1081
  return
958
-
1082
+
959
1083
  hdul = fits.open(tar.extractfile(fits_file))
960
1084
 
961
1085
  else:
962
1086
  resp = requests.get(f'https://kobe.caha.es/internal/fitsfiles/{fits_file}',
963
1087
  auth=HTTPBasicAuth('kobeteam', config.kobe_password))
964
-
1088
+
965
1089
  if resp.status_code != 200:
966
1090
  # something went wrong, try to extract the file by downloading the
967
1091
  # full tar.gz archive
968
-
1092
+
969
1093
  logger.warning(f'could not find "{fits_file}" on server, trying to download full archive')
970
1094
  resp = requests.get('https://kobe.caha.es/internal/fitsfiles.tar.gz',
971
1095
  auth=HTTPBasicAuth('kobeteam', config.kobe_password))
@@ -983,7 +1107,7 @@ class RV(ISSUES, REPORTS):
983
1107
  if fits_file not in tar.getnames():
984
1108
  logger.error(f'KOBE file not found for {star}')
985
1109
  return
986
-
1110
+
987
1111
  hdul = fits.open(tar.extractfile(fits_file))
988
1112
 
989
1113
  else:
@@ -1164,8 +1288,9 @@ class RV(ISSUES, REPORTS):
1164
1288
  logger.warning('may need to provide `top_level` in kwargs to find file')
1165
1289
  do_symlink_filetype('CCF', files[:limit], directory, **kwargs)
1166
1290
  else:
1167
- do_download_filetype('CCF', files[:limit], directory, clobber=clobber,
1168
- verbose=self.verbose, user=self.user, **kwargs)
1291
+ downloaded = do_download_filetype('CCF', files[:limit], directory,
1292
+ clobber=clobber, verbose=self.verbose,
1293
+ user=self.user, **kwargs)
1169
1294
 
1170
1295
  if load:
1171
1296
  try:
@@ -1177,17 +1302,26 @@ class RV(ISSUES, REPORTS):
1177
1302
  for f in files[:limit]
1178
1303
  ]
1179
1304
  downloaded = [
1180
- skysub
1305
+ skysub
1181
1306
  if exists(skysub := f.replace('CCF_A.fits', 'CCF_SKYSUB_A.fits')) else f
1182
1307
  for f in downloaded
1183
1308
  ]
1184
1309
  if self.verbose:
1185
1310
  logger.info('loading the CCF(s) into `.CCF` attribute')
1186
1311
 
1187
- self.CCF = iCCF.from_file(downloaded)
1312
+ self.CCF = iCCF.from_file(downloaded, verbose=False)
1313
+ if len(self.CCF) == 1:
1314
+ self.CCF = [self.CCF]
1188
1315
 
1189
- except (ImportError, ValueError):
1190
- pass
1316
+ if self.simbad is None:
1317
+ if self.verbose:
1318
+ logger.info('querying Simbad with RA/DEC from CCF header')
1319
+ ra = self.CCF[0].HDU[0].header['RA']
1320
+ dec = self.CCF[0].HDU[0].header['DEC']
1321
+ self._simbad = simbad.from_ra_dec(ra, dec)
1322
+
1323
+ except (ImportError, ValueError, FileNotFoundError):
1324
+ logger.error('could not load CCF(s) into `.CCF` attribute')
1191
1325
 
1192
1326
  def download_s1d(self, instrument=None, index=None, limit=None,
1193
1327
  directory=None, clobber=False, apply_mask=True, symlink=False, **kwargs):
@@ -1382,7 +1516,9 @@ class RV(ISSUES, REPORTS):
1382
1516
  def remove_point(self, index):
1383
1517
  """
1384
1518
  Remove individual observations at a given index (or indices).
1385
- NOTE: Like Python, the index is 0-based.
1519
+
1520
+ !!! Note
1521
+ Like Python, the index is 0-based.
1386
1522
 
1387
1523
  Args:
1388
1524
  index (int, list, ndarray):
@@ -1414,11 +1550,14 @@ class RV(ISSUES, REPORTS):
1414
1550
  def restore_point(self, index):
1415
1551
  """
1416
1552
  Restore previously deleted individual observations at a given index (or
1417
- indices). NOTE: Like Python, the index is 0-based.
1553
+ indices).
1554
+
1555
+ !!! Note
1556
+ Like Python, the index is 0-based
1418
1557
 
1419
1558
  Args:
1420
1559
  index (int, list, ndarray):
1421
- Single index, list, or array of indices to restore.
1560
+ Single index, list, or array of indices to restore
1422
1561
  """
1423
1562
  index = np.atleast_1d(index)
1424
1563
  try:
@@ -1444,6 +1583,14 @@ class RV(ISSUES, REPORTS):
1444
1583
  self.mask = self.mask & self.public
1445
1584
  self._propagate_mask_changes()
1446
1585
 
1586
+ def remove_public(self):
1587
+ """ Remove public observations """
1588
+ if self.verbose:
1589
+ n = self.public.sum()
1590
+ logger.info(f'masking public observations ({n})')
1591
+ self.mask = self.mask & (~self.public)
1592
+ self._propagate_mask_changes()
1593
+
1447
1594
  def remove_single_observations(self):
1448
1595
  """ Remove instruments for which there is a single observation """
1449
1596
  singles = [i for i in self.instruments if getattr(self, i).mtime.size == 1]
@@ -1469,26 +1616,26 @@ class RV(ISSUES, REPORTS):
1469
1616
  if self.verbose:
1470
1617
  logger.warning(f'no observations for prog_id "{prog_id}"')
1471
1618
 
1472
- def remove_after_bjd(self, bjd):
1619
+ def remove_after_bjd(self, bjd: float):
1473
1620
  """ Remove observations after a given BJD """
1474
1621
  if (self.time > bjd).any():
1475
1622
  ind = np.where(self.time > bjd)[0]
1476
1623
  self.remove_point(ind)
1477
1624
 
1478
- def remove_before_bjd(self, bjd):
1625
+ def remove_before_bjd(self, bjd: float):
1479
1626
  """ Remove observations before a given BJD """
1480
1627
  if (self.time < bjd).any():
1481
1628
  ind = np.where(self.time < bjd)[0]
1482
1629
  self.remove_point(ind)
1483
-
1484
- def remove_between_bjds(self, bjd1, bjd2):
1630
+
1631
+ def remove_between_bjds(self, bjd1: float, bjd2: float):
1485
1632
  """ Remove observations between two BJDs """
1486
1633
  to_remove = (self.time > bjd1) & (self.time < bjd2)
1487
1634
  if to_remove.any():
1488
1635
  ind = np.where(to_remove)[0]
1489
1636
  self.remove_point(ind)
1490
1637
 
1491
- def choose_n_points(self, n, seed=None, instrument=None):
1638
+ def choose_n_points(self, n: int, seed=None, instrument=None):
1492
1639
  """ Randomly choose `n` observations and mask out the remaining ones
1493
1640
 
1494
1641
  Args:
@@ -1524,17 +1671,25 @@ class RV(ISSUES, REPORTS):
1524
1671
  inst = self.instruments[self.obs[m] - 1]
1525
1672
  n_before = (self.obs < self.obs[m]).sum()
1526
1673
  getattr(self, inst).mask[m - n_before] = False
1674
+ for inst in self.instruments:
1675
+ if getattr(self, inst).mtime.size == 0:
1676
+ self.remove_instrument(inst, strict=True)
1527
1677
 
1528
1678
  def secular_acceleration(self, epoch=None, just_compute=False, force_simbad=False):
1529
- """
1530
- Remove secular acceleration from RVs
1679
+ """
1680
+ Remove secular acceleration from RVs. This uses the proper motions from
1681
+ Gaia (in `self.gaia`) if available, otherwise from Simbad (in
1682
+ `self.simbad`), unless `force_simbad=True`.
1683
+
1531
1684
 
1532
1685
  Args:
1533
1686
  epoch (float, optional):
1534
1687
  The reference epoch (DACE uses 55500, 31/10/2010)
1535
- instruments (bool or collection of str):
1536
- Only remove secular acceleration for some instruments, or for all
1537
- if `instruments=True`
1688
+ just_compute (bool, optional):
1689
+ Just compute the secular acceleration and return, without
1690
+ changing the RVs
1691
+ force_simbad (bool, optional):
1692
+ Use Simbad proper motions even if Gaia is available
1538
1693
  """
1539
1694
  if self._did_secular_acceleration and not just_compute: # don't do it twice
1540
1695
  return
@@ -1643,7 +1798,7 @@ class RV(ISSUES, REPORTS):
1643
1798
 
1644
1799
  if config.return_self:
1645
1800
  return self
1646
-
1801
+
1647
1802
  def _undo_secular_acceleration(self):
1648
1803
  if self._did_secular_acceleration:
1649
1804
  _old_verbose = self.verbose
@@ -1671,7 +1826,15 @@ class RV(ISSUES, REPORTS):
1671
1826
  self._did_secular_acceleration = False
1672
1827
 
1673
1828
  def sigmaclip(self, sigma=5, instrument=None, strict=True):
1674
- """ Sigma-clip RVs (per instrument!) """
1829
+ """
1830
+ Sigma-clip RVs (per instrument!), by MAD away from the median.
1831
+
1832
+ Args:
1833
+ sigma (float):
1834
+ Number of MADs to clip
1835
+ instrument (str, list):
1836
+ Instrument(s) to sigma-clip
1837
+ """
1675
1838
  #from scipy.stats import sigmaclip as dosigmaclip
1676
1839
  from .stats import sigmaclip_median as dosigmaclip
1677
1840
 
@@ -1684,9 +1847,11 @@ class RV(ISSUES, REPORTS):
1684
1847
  for inst in instruments:
1685
1848
  m = self.instrument_array == inst
1686
1849
  result = dosigmaclip(self.vrad[m], low=sigma, high=sigma)
1687
- n = self.vrad[m].size - result.clipped.size
1850
+ # n = self.vrad[m].size - result.clipped.size
1688
1851
 
1689
- ind = m & ((self.vrad < result.lower) | (self.vrad > result.upper))
1852
+ ind = m & self.mask & \
1853
+ ((self.vrad < result.lower) | (self.vrad > result.upper))
1854
+ n = ind.sum()
1690
1855
 
1691
1856
  if self.verbose and n > 0:
1692
1857
  s = 's' if (n == 0 or n > 1) else ''
@@ -1708,7 +1873,7 @@ class RV(ISSUES, REPORTS):
1708
1873
 
1709
1874
  self._propagate_mask_changes()
1710
1875
 
1711
- if self._did_adjust_means:
1876
+ if len(changed_instruments) > 0 and self._did_adjust_means:
1712
1877
  self._did_adjust_means = False
1713
1878
  self.adjust_means(instrument=changed_instruments)
1714
1879
 
@@ -1741,7 +1906,8 @@ class RV(ISSUES, REPORTS):
1741
1906
  """
1742
1907
  Nightly bin the observations.
1743
1908
 
1744
- WARNING: This creates and returns a new object and does not modify self.
1909
+ !!! Warning
1910
+ This creates and returns a new object and does not modify self.
1745
1911
  """
1746
1912
 
1747
1913
  # create copy of self to be returned
@@ -1845,7 +2011,7 @@ class RV(ISSUES, REPORTS):
1845
2011
  return np.nanmean(z, axis=0)
1846
2012
 
1847
2013
  def subtract_mean(self):
1848
- """ Subtract (single) mean RV from all instruments """
2014
+ """ Subtract (a single) non-weighted mean RV from all instruments """
1849
2015
  self._meanRV = meanRV = self.mvrad.mean()
1850
2016
  for inst in self.instruments:
1851
2017
  s = getattr(self, inst)
@@ -1881,7 +2047,7 @@ class RV(ISSUES, REPORTS):
1881
2047
  # row = []
1882
2048
  # if print_as_table:
1883
2049
  # logger.info('subtracted weighted average from each instrument:')
1884
-
2050
+
1885
2051
  others = ('fwhm', 'bispan', )
1886
2052
 
1887
2053
  instruments = self._check_instrument(instrument, strict=kwargs.get('strict', False))
@@ -2160,10 +2326,10 @@ class RV(ISSUES, REPORTS):
2160
2326
  """ Sort instruments by first or last observation date.
2161
2327
 
2162
2328
  Args:
2163
- by_first_observation (bool, optional, default=True):
2164
- Sort by first observation date.
2165
- by_last_observation (bool, optional, default=False):
2166
- Sort by last observation date.
2329
+ by_first_observation (bool, optional):
2330
+ Sort by first observation date
2331
+ by_last_observation (bool, optional):
2332
+ Sort by last observation date
2167
2333
  """
2168
2334
  if by_last_observation:
2169
2335
  by_first_observation = False
@@ -2239,7 +2405,7 @@ class RV(ISSUES, REPORTS):
2239
2405
  # if self.verbose:
2240
2406
  # logger.warning(f'masking {nan_mask.sum()} observations with NaN in indicators')
2241
2407
 
2242
- header = '\t'.join(['rjd', 'vrad', 'svrad',
2408
+ header = '\t'.join(['rjd', 'vrad', 'svrad',
2243
2409
  'fwhm', 'sig_fwhm',
2244
2410
  'bispan', 'sig_bispan',
2245
2411
  'contrast', 'sig_contrast',
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: arvi
3
- Version: 0.2.6
3
+ Version: 0.2.8
4
4
  Summary: The Automated RV Inspector
5
5
  Author-email: João Faria <joao.faria@unige.ch>
6
6
  License: MIT
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes