arvi 0.2.7__tar.gz → 0.2.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arvi might be problematic. Click here for more details.

Files changed (64) hide show
  1. {arvi-0.2.7/arvi.egg-info → arvi-0.2.9}/PKG-INFO +1 -1
  2. {arvi-0.2.7 → arvi-0.2.9}/arvi/dace_wrapper.py +12 -7
  3. {arvi-0.2.7 → arvi-0.2.9}/arvi/instrument_specific.py +19 -18
  4. {arvi-0.2.7 → arvi-0.2.9}/arvi/kima_wrapper.py +42 -7
  5. {arvi-0.2.7 → arvi-0.2.9}/arvi/plots.py +1 -3
  6. {arvi-0.2.7 → arvi-0.2.9}/arvi/programs.py +8 -4
  7. {arvi-0.2.7 → arvi-0.2.9}/arvi/simbad_wrapper.py +20 -0
  8. {arvi-0.2.7 → arvi-0.2.9}/arvi/timeseries.py +211 -95
  9. {arvi-0.2.7 → arvi-0.2.9}/arvi/utils.py +86 -8
  10. {arvi-0.2.7 → arvi-0.2.9/arvi.egg-info}/PKG-INFO +1 -1
  11. {arvi-0.2.7 → arvi-0.2.9}/.github/dependabot.yml +0 -0
  12. {arvi-0.2.7 → arvi-0.2.9}/.github/workflows/docs-gh-pages.yml +0 -0
  13. {arvi-0.2.7 → arvi-0.2.9}/.github/workflows/install.yml +0 -0
  14. {arvi-0.2.7 → arvi-0.2.9}/.github/workflows/python-publish.yml +0 -0
  15. {arvi-0.2.7 → arvi-0.2.9}/.gitignore +0 -0
  16. {arvi-0.2.7 → arvi-0.2.9}/LICENSE +0 -0
  17. {arvi-0.2.7 → arvi-0.2.9}/README.md +0 -0
  18. {arvi-0.2.7 → arvi-0.2.9}/arvi/HZ.py +0 -0
  19. {arvi-0.2.7 → arvi-0.2.9}/arvi/__init__.py +0 -0
  20. {arvi-0.2.7 → arvi-0.2.9}/arvi/ariadne_wrapper.py +0 -0
  21. {arvi-0.2.7 → arvi-0.2.9}/arvi/berv.py +0 -0
  22. {arvi-0.2.7 → arvi-0.2.9}/arvi/binning.py +0 -0
  23. {arvi-0.2.7 → arvi-0.2.9}/arvi/config.py +0 -0
  24. {arvi-0.2.7 → arvi-0.2.9}/arvi/data/extra/HD86226_PFS1.rdb +0 -0
  25. {arvi-0.2.7 → arvi-0.2.9}/arvi/data/extra/HD86226_PFS2.rdb +0 -0
  26. {arvi-0.2.7 → arvi-0.2.9}/arvi/data/extra/metadata.json +0 -0
  27. {arvi-0.2.7 → arvi-0.2.9}/arvi/data/info.svg +0 -0
  28. {arvi-0.2.7 → arvi-0.2.9}/arvi/data/obs_affected_ADC_issues.dat +0 -0
  29. {arvi-0.2.7 → arvi-0.2.9}/arvi/data/obs_affected_blue_cryostat_issues.dat +0 -0
  30. {arvi-0.2.7 → arvi-0.2.9}/arvi/exofop_wrapper.py +0 -0
  31. {arvi-0.2.7 → arvi-0.2.9}/arvi/extra_data.py +0 -0
  32. {arvi-0.2.7 → arvi-0.2.9}/arvi/gaia_wrapper.py +0 -0
  33. {arvi-0.2.7 → arvi-0.2.9}/arvi/headers.py +0 -0
  34. {arvi-0.2.7 → arvi-0.2.9}/arvi/lbl_wrapper.py +0 -0
  35. {arvi-0.2.7 → arvi-0.2.9}/arvi/nasaexo_wrapper.py +0 -0
  36. {arvi-0.2.7 → arvi-0.2.9}/arvi/reports.py +0 -0
  37. {arvi-0.2.7 → arvi-0.2.9}/arvi/setup_logger.py +0 -0
  38. {arvi-0.2.7 → arvi-0.2.9}/arvi/sophie_wrapper.py +0 -0
  39. {arvi-0.2.7 → arvi-0.2.9}/arvi/spectra.py +0 -0
  40. {arvi-0.2.7 → arvi-0.2.9}/arvi/stats.py +0 -0
  41. {arvi-0.2.7 → arvi-0.2.9}/arvi/stellar.py +0 -0
  42. {arvi-0.2.7 → arvi-0.2.9}/arvi/translations.py +0 -0
  43. {arvi-0.2.7 → arvi-0.2.9}/arvi.egg-info/SOURCES.txt +0 -0
  44. {arvi-0.2.7 → arvi-0.2.9}/arvi.egg-info/dependency_links.txt +0 -0
  45. {arvi-0.2.7 → arvi-0.2.9}/arvi.egg-info/requires.txt +0 -0
  46. {arvi-0.2.7 → arvi-0.2.9}/arvi.egg-info/top_level.txt +0 -0
  47. {arvi-0.2.7 → arvi-0.2.9}/docs/API.md +0 -0
  48. {arvi-0.2.7 → arvi-0.2.9}/docs/detailed.ipynb +0 -0
  49. {arvi-0.2.7 → arvi-0.2.9}/docs/downloading_data.md +0 -0
  50. {arvi-0.2.7 → arvi-0.2.9}/docs/index.md +0 -0
  51. {arvi-0.2.7 → arvi-0.2.9}/docs/logo/detective.png +0 -0
  52. {arvi-0.2.7 → arvi-0.2.9}/docs/logo/logo.png +0 -0
  53. {arvi-0.2.7 → arvi-0.2.9}/docs/stylesheets/extra.css +0 -0
  54. {arvi-0.2.7 → arvi-0.2.9}/mkdocs.yml +0 -0
  55. {arvi-0.2.7 → arvi-0.2.9}/pyproject.toml +0 -0
  56. {arvi-0.2.7 → arvi-0.2.9}/setup.cfg +0 -0
  57. {arvi-0.2.7 → arvi-0.2.9}/setup.py +0 -0
  58. {arvi-0.2.7 → arvi-0.2.9}/tests/HD10700-Bcor_ESPRESSO18.rdb +0 -0
  59. {arvi-0.2.7 → arvi-0.2.9}/tests/test_binning.py +0 -0
  60. {arvi-0.2.7 → arvi-0.2.9}/tests/test_config.py +0 -0
  61. {arvi-0.2.7 → arvi-0.2.9}/tests/test_create_RV.py +0 -0
  62. {arvi-0.2.7 → arvi-0.2.9}/tests/test_import_object.py +0 -0
  63. {arvi-0.2.7 → arvi-0.2.9}/tests/test_simbad.py +0 -0
  64. {arvi-0.2.7 → arvi-0.2.9}/tests/test_stats.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: arvi
3
- Version: 0.2.7
3
+ Version: 0.2.9
4
4
  Summary: The Automated RV Inspector
5
5
  Author-email: João Faria <joao.faria@unige.ch>
6
6
  License: MIT
@@ -10,7 +10,7 @@ from .setup_logger import setup_logger
10
10
  from .utils import create_directory, all_logging_disabled, stdout_disabled, timer, tqdm
11
11
 
12
12
 
13
- def load_spectroscopy(user=None):
13
+ def load_spectroscopy(user=None, verbose=True):
14
14
  logger = setup_logger()
15
15
  with all_logging_disabled():
16
16
  from dace_query.spectroscopy import SpectroscopyClass, Spectroscopy as default_Spectroscopy
@@ -19,7 +19,8 @@ def load_spectroscopy(user=None):
19
19
  from .config import config
20
20
  # requesting as public
21
21
  if config.request_as_public:
22
- logger.warning('requesting DACE data as public')
22
+ if verbose:
23
+ logger.warning('requesting DACE data as public')
23
24
  with all_logging_disabled():
24
25
  dace = DaceClass(dace_rc_config_path='none')
25
26
  return SpectroscopyClass(dace_instance=dace)
@@ -95,7 +96,7 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
95
96
  npipe = len(pipelines)
96
97
  if 'NIRPS' in inst and any(['LBL' in p for p in pipelines]):
97
98
  # TODO: correctly load both CCF and LBL
98
- pipelines = [pipelines[1]]
99
+ pipelines = [pipelines[0]]
99
100
  if 'HARPS' in inst and npipe > 1 and pipelines[1] == pipelines[0] + '-EGGS':
100
101
  pipelines = pipelines[:2]
101
102
  else:
@@ -168,7 +169,7 @@ def get_observations_from_instrument(star, instrument, user=None, main_id=None,
168
169
  dict:
169
170
  dictionary with data from DACE
170
171
  """
171
- Spectroscopy = load_spectroscopy(user)
172
+ Spectroscopy = load_spectroscopy(user, verbose)
172
173
  found_dace_id = False
173
174
  with timer('dace_id query'):
174
175
  try:
@@ -283,7 +284,7 @@ def get_observations_from_instrument(star, instrument, user=None, main_id=None,
283
284
  def get_observations(star, instrument=None, user=None, main_id=None, verbose=True):
284
285
  logger = setup_logger()
285
286
  if instrument is None:
286
- Spectroscopy = load_spectroscopy(user)
287
+ Spectroscopy = load_spectroscopy(user, verbose)
287
288
 
288
289
  try:
289
290
  with stdout_disabled(), all_logging_disabled():
@@ -544,6 +545,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False, user=
544
545
  """ Download CCFs / S1Ds / S2Ds from DACE """
545
546
  logger = setup_logger()
546
547
  raw_files = np.atleast_1d(raw_files)
548
+ raw_files_original = raw_files.copy()
547
549
 
548
550
  create_directory(output_directory)
549
551
 
@@ -557,7 +559,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False, user=
557
559
  if n == 0:
558
560
  if verbose:
559
561
  logger.info('no files to download')
560
- return
562
+ return list(map(os.path.basename, raw_files_original))
561
563
 
562
564
  # avoid an empty chunk
563
565
  if chunk_size > n:
@@ -575,7 +577,9 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False, user=
575
577
 
576
578
  if n < parallel_limit:
577
579
  iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
578
- for files in tqdm(iterator, total=len(iterator)):
580
+ if len(iterator) > 1:
581
+ iterator = tqdm(iterator, total=len(iterator))
582
+ for files in iterator:
579
583
  download(files, type, output_directory, quiet=False, user=user)
580
584
  extract_fits(output_directory)
581
585
 
@@ -604,6 +608,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False, user=
604
608
 
605
609
  sys.stdout.flush()
606
610
  logger.info('extracted .fits files')
611
+ return list(map(os.path.basename, raw_files_original))
607
612
 
608
613
 
609
614
  # def do_download_s1d(raw_files, output_directory, clobber=False, verbose=True):
@@ -127,9 +127,9 @@ def HARPS_commissioning(self, mask=True, plot=True):
127
127
  total_affected = affected.sum()
128
128
 
129
129
  if self.verbose:
130
- n = total_affected
131
- logger.info(f"there {'are'[:n^1]}{'is'[n^1:]} {n} frame{'s'[:n^1]} "
132
- "during HARPS commissioning")
130
+ n, i = total_affected, int(total_affected != 1)
131
+ logger.info(f"there {['is', 'are'][i]} {n} frame{['', 's'][i]} "
132
+ "during HARPS commissioning")
133
133
 
134
134
  if mask:
135
135
  self.mask[affected] = False
@@ -159,9 +159,9 @@ def HARPS_fiber_commissioning(self, mask=True, plot=True):
159
159
  total_affected = affected.sum()
160
160
 
161
161
  if self.verbose:
162
- n = total_affected
163
- logger.info(f"there {'are'[:n^1]}{'is'[n^1:]} {n} frame{'s'[:n^1]} "
164
- "during the HARPS fiber commissioning period")
162
+ n, i = total_affected, int(total_affected != 1)
163
+ logger.info(f"there {['is', 'are'][i]} {n} frame{['', 's'][i]} "
164
+ "during the HARPS fiber commissioning period")
165
165
 
166
166
  if mask:
167
167
  self.mask[affected] = False
@@ -191,15 +191,15 @@ def ESPRESSO_commissioning(self, mask=True, plot=True):
191
191
  total_affected = affected.sum()
192
192
 
193
193
  if self.verbose:
194
- n = total_affected
195
- logger.info(f"there {'are'[:n^1]}{'is'[n^1:]} {n} frame{'s'[:n^1]} "
196
- "during ESPRESSO commissioning")
194
+ n, i = total_affected, int(total_affected != 1)
195
+ logger.info(f"there {['is', 'are'][i]} {n} frame{['', 's'][i]} "
196
+ "during ESPRESSO commissioning")
197
197
 
198
198
  if mask:
199
199
  self.mask[affected] = False
200
200
  self._propagate_mask_changes()
201
201
 
202
- if plot:
202
+ if plot and total_affected > 0:
203
203
  self.plot(show_masked=True)
204
204
 
205
205
  return affected
@@ -240,9 +240,9 @@ def ADC_issues(self, mask=True, plot=True, check_headers=False):
240
240
  total_affected = intersect.sum()
241
241
 
242
242
  if self.verbose:
243
- n = total_affected
244
- logger.info(f"there {'are'[:n^1]}{'is'[n^1:]} {n} frame{'s'[:n^1]} "
245
- "affected by ADC issues")
243
+ n, i = total_affected, int(total_affected != 1)
244
+ logger.info(f"there {['is', 'are'][i]} {n} frame{['', 's'][i]} "
245
+ "affected by ADC issues")
246
246
 
247
247
  if mask:
248
248
  self.mask[intersect] = False
@@ -276,9 +276,9 @@ def blue_cryostat_issues(self, mask=True, plot=True):
276
276
  total_affected = intersect.sum()
277
277
 
278
278
  if self.verbose:
279
- n = total_affected
280
- logger.info(f"there {'are'[:n^1]}{'is'[n^1:]} {n} frame{'s'[:n^1]} "
281
- "affected by blue cryostat issues")
279
+ n, i = total_affected, int(total_affected != 1)
280
+ logger.info(f"there {['is', 'are'][i]} {n} frame{['', 's'][i]} "
281
+ "affected by blue cryostat issues")
282
282
 
283
283
  if mask:
284
284
  self.mask[intersect] = False
@@ -322,8 +322,9 @@ def qc_scired_issues(self, plot=False, **kwargs):
322
322
  n = affected.sum()
323
323
 
324
324
  if self.verbose:
325
- logger.info(f"there {'are'[:n^1]}{'is'[n^1:]} {n} frame{'s'[:n^1]} "
326
- "where QC SCIRED CHECK is 0")
325
+ i = int(n != 1)
326
+ logger.info(f"there {['is', 'are'][i]} {n} frame{['', 's'][i]} "
327
+ "where QC SCIRED CHECK is 0")
327
328
 
328
329
  if n == 0:
329
330
  return
@@ -1,13 +1,14 @@
1
1
  import os
2
2
  import numpy as np
3
3
 
4
- from .setup_logger import logger
4
+ from .setup_logger import setup_logger
5
5
 
6
6
  try:
7
7
  import kima
8
8
  from kima.pykima.utils import chdir
9
9
  from kima import distributions
10
- from kima import RVData, RVmodel
10
+ from kima import RVData, HGPMdata
11
+ from kima import RVmodel, GPmodel, RVHGPMmodel
11
12
  kima_available = True
12
13
  except ImportError:
13
14
  kima_available = False
@@ -21,9 +22,12 @@ def try_to_guess_prior(model, prior):
21
22
  return None
22
23
 
23
24
 
24
- def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwargs):
25
+ def run_kima(self, run=False, load=False, run_directory=None,
26
+ model=RVmodel, priors={}, **kwargs):
25
27
  if not kima_available:
26
28
  raise ImportError('kima not available, please install with `pip install kima`')
29
+
30
+ logger = setup_logger()
27
31
 
28
32
  instruments = [inst for inst in self.instruments if self.NN[inst] > 1]
29
33
  time = [getattr(self, inst).mtime for inst in instruments]
@@ -33,12 +37,34 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
33
37
 
34
38
  fix = kwargs.pop('fix', False)
35
39
  npmax = kwargs.pop('npmax', 1)
36
- model = RVmodel(fix=fix, npmax=npmax, data=data)
40
+
41
+ if isinstance(model, str):
42
+ try:
43
+ model = {
44
+ 'RVmodel': RVmodel,
45
+ 'GPmodel': GPmodel,
46
+ 'RVHGPMmodel': RVHGPMmodel
47
+ }[model]
48
+ except KeyError:
49
+ raise ValueError(f'unknown model: {model}')
50
+
51
+ if model is RVHGPMmodel:
52
+ pm_data = HGPMdata(self.simbad.gaia_id)
53
+ model = model(fix=fix, npmax=npmax, data=data, pm_data=pm_data)
54
+ else:
55
+ model = model(fix=fix, npmax=npmax, data=data)
37
56
 
38
57
  model.trend = kwargs.pop('trend', False)
39
58
  model.degree = kwargs.pop('degree', 0)
40
59
 
41
- model.studentt = kwargs.pop('studentt', False)
60
+ if isinstance(model, RVmodel):
61
+ model.studentt = kwargs.pop('studentt', False)
62
+
63
+ if isinstance(model, GPmodel):
64
+ if 'kernel' in kwargs:
65
+ model.kernel = kwargs.pop('kernel')
66
+
67
+
42
68
  model.enforce_stability = kwargs.pop('enforce_stability', False)
43
69
  model.star_mass = kwargs.pop('star_mass', 1.0)
44
70
 
@@ -49,6 +75,13 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
49
75
  if kwargs.pop('kuma', False):
50
76
  model.conditional.eprior = distributions.Kumaraswamy(0.867, 3.03)
51
77
 
78
+ if isinstance(model, RVHGPMmodel):
79
+ model.pm_ra_bary_prior = priors.pop('pm_ra_bary_prior',
80
+ distributions.Gaussian(pm_data.pm_ra_hg, pm_data.sig_hg_ra))
81
+ model.pm_dec_bary_prior = priors.pop('pm_dec_bary_prior',
82
+ distributions.Gaussian(pm_data.pm_dec_hg, pm_data.sig_hg_dec))
83
+
84
+
52
85
  for k, v in priors.items():
53
86
  try:
54
87
  if 'conditional' in k:
@@ -67,7 +100,6 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
67
100
  run_directory = os.getcwd()
68
101
 
69
102
  if run:
70
-
71
103
  # TODO: use signature of kima.run to pop the correct kwargs
72
104
  # model_name = model.__class__.__name__
73
105
  # model_name = f'kima.{model_name}.{model_name}'
@@ -75,7 +107,10 @@ def run_kima(self, run=False, load=False, run_directory=None, priors={}, **kwarg
75
107
 
76
108
  with chdir(run_directory):
77
109
  kima.run(model, **kwargs)
78
-
110
+
111
+ if isinstance(model, RVHGPMmodel):
112
+ data = (data, pm_data)
113
+
79
114
  if load:
80
115
  with chdir(run_directory):
81
116
  res = kima.load_results(model)
@@ -1,6 +1,7 @@
1
1
  from functools import partialmethod, wraps
2
2
  from itertools import cycle
3
3
 
4
+ import matplotlib.pyplot as plt
4
5
  import numpy as np
5
6
 
6
7
  from astropy.timeseries import LombScargle
@@ -9,9 +10,6 @@ from .setup_logger import setup_logger
9
10
  from .config import config
10
11
  from .stats import wmean
11
12
 
12
- from .utils import lazy_import
13
- plt = lazy_import('matplotlib.pyplot')
14
-
15
13
 
16
14
  def plot_settings(func):
17
15
  @wraps(func)
@@ -3,11 +3,13 @@ import multiprocessing
3
3
  from functools import partial, lru_cache
4
4
  from itertools import chain
5
5
  from collections import namedtuple
6
- from multiprocessing.pool import ThreadPool
6
+ from multiprocessing.pool import ThreadPool, Pool
7
+ import concurrent.futures
8
+ import sys
7
9
  from tqdm import tqdm
8
10
  # import numpy as np
9
11
 
10
- from .setup_logger import logger
12
+ from .setup_logger import setup_logger
11
13
  from .timeseries import RV
12
14
 
13
15
  __all__ = ['ESPRESSO_GTO']
@@ -22,13 +24,14 @@ def get_star(star, instrument=None, verbose=False, **kwargs):
22
24
 
23
25
  class LazyRV:
24
26
  def __init__(self, stars: list, instrument: str = None,
25
- _parallel_limit=10):
27
+ _parallel_limit=10, _parallel_workers=8):
26
28
  self.stars = stars
27
29
  if isinstance(self.stars, str):
28
30
  self.stars = [self.stars]
29
31
  self.instrument = instrument
30
32
  self._saved = None
31
33
  self._parallel_limit = _parallel_limit
34
+ self._parallel_workers = _parallel_workers
32
35
 
33
36
  @property
34
37
  def N(self):
@@ -38,10 +41,11 @@ class LazyRV:
38
41
  return f"RV({self.N} stars)"
39
42
 
40
43
  def _get(self, **kwargs):
44
+ logger = setup_logger()
41
45
  if self.N > self._parallel_limit:
42
46
  # logger.info('Querying DACE...')
43
47
  _get_star = partial(get_star, instrument=self.instrument, **kwargs)
44
- with ThreadPool(8) as pool:
48
+ with Pool(self._parallel_workers) as pool:
45
49
  result = list(tqdm(pool.imap(_get_star, self.stars),
46
50
  total=self.N, unit='star',
47
51
  desc='Querying DACE (can take a while)'))
@@ -70,6 +70,17 @@ FROM ident AS id1 JOIN ident AS id2 USING(oidref)
70
70
  WHERE id1.id = '{star}' AND id2.id LIKE '{name}%';
71
71
  """
72
72
 
73
+ RA_DEC_QUERY = """
74
+ SELECT DISTINCT basic.OID,
75
+ RA,
76
+ DEC,
77
+ main_id,
78
+ DISTANCE(POINT('ICRS', RA, DEC), POINT('ICRS', {ra}, {dec})) as "dist"
79
+ FROM basic JOIN flux ON oidref = oid
80
+ WHERE CONTAINS(POINT('ICRS', RA, DEC), CIRCLE('ICRS', {ra}, {dec}, 0.02)) = 1
81
+ ORDER BY "dist";
82
+ """
83
+
73
84
  def find_identifier(identifier, star):
74
85
  response = run_query(HD_GJ_HIP_QUERY.format(name=identifier, star=star))
75
86
  if identifier in response:
@@ -288,6 +299,15 @@ class simbad:
288
299
  def bmv(self):
289
300
  return self.B - self.V
290
301
 
302
+ @classmethod
303
+ def from_ra_dec(cls, ra, dec, **kwargs):
304
+ table1 = run_query(query=RA_DEC_QUERY.format(ra=ra, dec=dec))
305
+ cols, values = parse_tablen(table1)
306
+ if len(values) == 0:
307
+ raise ValueError(f'no Simbad results for ra={ra}, dec={dec}')
308
+ assert cols == ['oid', 'ra', 'dec', 'main_id', 'dist']
309
+ star = values[0][cols.index('main_id')].replace('"', '')
310
+ return cls(star, **kwargs)
291
311
 
292
312
  def argsort_by_spectral_type(sptypes):
293
313
  STs = [f'{letter}{n}' for letter in ('F', 'G', 'K', 'M') for n in range(10)]
@@ -6,6 +6,8 @@ from glob import glob
6
6
  import warnings
7
7
  from copy import deepcopy
8
8
  from datetime import datetime, timezone
9
+
10
+ # import lazy_loader as lazy
9
11
  import numpy as np
10
12
 
11
13
  from .setup_logger import setup_logger
@@ -24,10 +26,11 @@ from .HZ import getHZ_period
24
26
  from .instrument_specific import ISSUES
25
27
  from .reports import REPORTS
26
28
  from .utils import sanitize_path, strtobool, there_is_internet, timer, chdir
27
- from .utils import lazy_import
29
+ # from .utils import lazy_import
28
30
 
29
- units = lazy_import('astropy.units')
30
- # from astropy import units
31
+ # units = lazy_import('astropy.units')
32
+ # units = lazy.load('astropy.units')
33
+ from astropy import units
31
34
 
32
35
  class ExtraFields:
33
36
  @property
@@ -408,16 +411,41 @@ class RV(ISSUES, REPORTS):
408
411
  self._did_correct_berv = False
409
412
  self.__post_init__()
410
413
 
411
- def snapshot(self):
414
+ def snapshot(self, directory=None, delete_others=False):
412
415
  import pickle
413
416
  from datetime import datetime
414
417
  ts = datetime.now().timestamp()
415
418
  star_name = self.star.replace(' ', '')
416
419
  file = f'{star_name}_{ts}.pkl'
417
- pickle.dump(self, open(file, 'wb'), protocol=0)
420
+
421
+ if directory is None:
422
+ directory = '.'
423
+ else:
424
+ os.makedirs(directory, exist_ok=True)
425
+
426
+ file = os.path.join(directory, file)
427
+
428
+ if delete_others:
429
+ import re
430
+ other_pkls = [
431
+ f for f in os.listdir(directory)
432
+ if re.search(fr'{star_name}_\d+.\d+.pkl', f)
433
+ ]
434
+ for pkl in other_pkls:
435
+ os.remove(os.path.join(directory, pkl))
436
+
437
+ metadata = {
438
+ 'star': self.star,
439
+ 'timestamp': ts,
440
+ 'description': 'arvi snapshot'
441
+ }
442
+ pickle.dump((self, metadata), open(file, 'wb'), protocol=0)
443
+
418
444
  if self.verbose:
419
445
  logger.info(f'saved snapshot to {file}')
420
446
 
447
+ return file
448
+
421
449
  @property
422
450
  def N(self) -> int:
423
451
  """Total number of observations"""
@@ -552,7 +580,8 @@ class RV(ISSUES, REPORTS):
552
580
  # --> not just in rhk and rhk_err...
553
581
  if data[arr].dtype == float and (bad := data[arr] == -99999).any():
554
582
  data[arr][bad] = np.nan
555
-
583
+ if data[arr].dtype == float and (bad := data[arr] == -99).any():
584
+ data[arr][bad] = np.nan
556
585
  setattr(s, arr, data[arr][ind])
557
586
  s._quantities.append(arr)
558
587
 
@@ -618,6 +647,8 @@ class RV(ISSUES, REPORTS):
618
647
  logger.info(f'reading snapshot of {star} from {dt}')
619
648
 
620
649
  s = pickle.load(open(file, 'rb'))
650
+ if isinstance(s, tuple) and len(s) == 2:
651
+ s, _metadata = s
621
652
  s._snapshot = file
622
653
  return s
623
654
 
@@ -659,10 +690,11 @@ class RV(ISSUES, REPORTS):
659
690
  file_object = hasattr(files, 'read')
660
691
  files = [files]
661
692
 
662
- # if len(files) == 0:
663
- # if verbose:
664
- # logger.error('no files found')
665
- # return
693
+ if len(files) == 0:
694
+ if verbose:
695
+ logger.error('from_rdb: no files found')
696
+ return
697
+
666
698
  def get_star_name(file):
667
699
  return splitext(basename(file))[0].split('_')[0].replace('-', '_')
668
700
 
@@ -1260,8 +1292,9 @@ class RV(ISSUES, REPORTS):
1260
1292
  logger.warning('may need to provide `top_level` in kwargs to find file')
1261
1293
  do_symlink_filetype('CCF', files[:limit], directory, **kwargs)
1262
1294
  else:
1263
- do_download_filetype('CCF', files[:limit], directory, clobber=clobber,
1264
- verbose=self.verbose, user=self.user, **kwargs)
1295
+ downloaded = do_download_filetype('CCF', files[:limit], directory,
1296
+ clobber=clobber, verbose=self.verbose,
1297
+ user=self.user, **kwargs)
1265
1298
 
1266
1299
  if load:
1267
1300
  try:
@@ -1273,17 +1306,26 @@ class RV(ISSUES, REPORTS):
1273
1306
  for f in files[:limit]
1274
1307
  ]
1275
1308
  downloaded = [
1276
- skysub
1309
+ skysub
1277
1310
  if exists(skysub := f.replace('CCF_A.fits', 'CCF_SKYSUB_A.fits')) else f
1278
1311
  for f in downloaded
1279
1312
  ]
1280
1313
  if self.verbose:
1281
1314
  logger.info('loading the CCF(s) into `.CCF` attribute')
1282
1315
 
1283
- self.CCF = iCCF.from_file(downloaded)
1316
+ self.CCF = iCCF.from_file(downloaded, verbose=False)
1317
+ if len(self.CCF) == 1:
1318
+ self.CCF = [self.CCF]
1284
1319
 
1285
- except (ImportError, ValueError):
1286
- pass
1320
+ if self.simbad is None:
1321
+ if self.verbose:
1322
+ logger.info('querying Simbad with RA/DEC from CCF header')
1323
+ ra = self.CCF[0].HDU[0].header['RA']
1324
+ dec = self.CCF[0].HDU[0].header['DEC']
1325
+ self._simbad = simbad.from_ra_dec(ra, dec)
1326
+
1327
+ except (ImportError, ValueError, FileNotFoundError):
1328
+ logger.error('could not load CCF(s) into `.CCF` attribute')
1287
1329
 
1288
1330
  def download_s1d(self, instrument=None, index=None, limit=None,
1289
1331
  directory=None, clobber=False, apply_mask=True, symlink=False, **kwargs):
@@ -1633,6 +1675,9 @@ class RV(ISSUES, REPORTS):
1633
1675
  inst = self.instruments[self.obs[m] - 1]
1634
1676
  n_before = (self.obs < self.obs[m]).sum()
1635
1677
  getattr(self, inst).mask[m - n_before] = False
1678
+ for inst in self.instruments:
1679
+ if getattr(self, inst).mtime.size == 0:
1680
+ self.remove_instrument(inst, strict=True)
1636
1681
 
1637
1682
  def secular_acceleration(self, epoch=None, just_compute=False, force_simbad=False):
1638
1683
  """
@@ -1806,9 +1851,11 @@ class RV(ISSUES, REPORTS):
1806
1851
  for inst in instruments:
1807
1852
  m = self.instrument_array == inst
1808
1853
  result = dosigmaclip(self.vrad[m], low=sigma, high=sigma)
1809
- n = self.vrad[m].size - result.clipped.size
1854
+ # n = self.vrad[m].size - result.clipped.size
1810
1855
 
1811
- ind = m & ((self.vrad < result.lower) | (self.vrad > result.upper))
1856
+ ind = m & self.mask & \
1857
+ ((self.vrad < result.lower) | (self.vrad > result.upper))
1858
+ n = ind.sum()
1812
1859
 
1813
1860
  if self.verbose and n > 0:
1814
1861
  s = 's' if (n == 0 or n > 1) else ''
@@ -1869,6 +1916,8 @@ class RV(ISSUES, REPORTS):
1869
1916
 
1870
1917
  # create copy of self to be returned
1871
1918
  snew = deepcopy(self)
1919
+ # store original object
1920
+ snew._unbinned = deepcopy(self)
1872
1921
 
1873
1922
  all_bad_quantities = []
1874
1923
 
@@ -2058,23 +2107,32 @@ class RV(ISSUES, REPORTS):
2058
2107
  if config.return_self:
2059
2108
  return self
2060
2109
 
2061
- def detrend(self, degree=1):
2062
- """ Detrend the RVs of all instruments """
2110
+ def detrend(self, degree: int=1):
2111
+ """
2112
+ Detrend the RVs of all instruments using a polynomial of degree `degree`
2113
+ """
2063
2114
  instrument_indices = np.unique_inverse(self.instrument_array).inverse_indices
2064
- def fun(p, t, degree, ninstruments, just_model=False, index=None):
2115
+ instrument_indices_masked = np.unique_inverse(self.instrument_array[self.mask]).inverse_indices
2116
+
2117
+ def fun(p, t, degree, ninstruments, just_model=False, index=None, masked=True):
2065
2118
  polyp, offsets = p[:degree], p[-ninstruments:]
2066
2119
  polyp = np.r_[polyp, 0.0]
2067
2120
  if index is None:
2068
- model = offsets[instrument_indices] + np.polyval(polyp, t)
2121
+ if masked:
2122
+ model = offsets[instrument_indices_masked] + np.polyval(polyp, t)
2123
+ else:
2124
+ model = offsets[instrument_indices] + np.polyval(polyp, t)
2069
2125
  else:
2070
2126
  model = offsets[index] + np.polyval(polyp, t)
2071
2127
  if just_model:
2072
2128
  return model
2073
2129
  return self.mvrad - model
2130
+
2074
2131
  coef = np.polyfit(self.mtime, self.mvrad, degree)
2075
2132
  x0 = np.append(coef, [0.0] * (len(self.instruments) - 1))
2076
- print(x0)
2133
+ # print(x0)
2077
2134
  fun(x0, self.mtime, degree, len(self.instruments))
2135
+
2078
2136
  from scipy.optimize import leastsq
2079
2137
  xbest, _ = leastsq(fun, x0, args=(self.mtime, degree, len(self.instruments)))
2080
2138
 
@@ -2084,12 +2142,13 @@ class RV(ISSUES, REPORTS):
2084
2142
  self.plot(ax=ax)
2085
2143
  for i, inst in enumerate(self.instruments):
2086
2144
  s = getattr(self, inst)
2087
- ax.plot(s.time, fun(xbest, s.time, degree, len(self.instruments), just_model=True, index=i),
2145
+ ax.plot(s.time,
2146
+ fun(xbest, s.time, degree, len(self.instruments), just_model=True, index=i, masked=False),
2088
2147
  color=f'C{i}')
2089
2148
  ax.set_title('original', loc='left', fontsize=10)
2090
2149
  ax.set_title(f'coefficients: {xbest[:degree]}', loc='right', fontsize=10)
2091
2150
 
2092
- self.add_to_vrad(-fun(xbest, self.time, degree, len(self.instruments), just_model=True))
2151
+ self.add_to_vrad(-fun(xbest, self.time, degree, len(self.instruments), just_model=True, masked=False))
2093
2152
  ax = fig.add_subplot(2, 1, 2)
2094
2153
  self.plot(ax=ax)
2095
2154
  ax.set_title('detrended', loc='left', fontsize=10)
@@ -2098,7 +2157,7 @@ class RV(ISSUES, REPORTS):
2098
2157
  # axs[1].errorbar(self.mtime, fun(xbest, self.mtime, degree, len(self.instruments)), self.msvrad, fmt='o')
2099
2158
 
2100
2159
  return
2101
-
2160
+
2102
2161
 
2103
2162
 
2104
2163
 
@@ -2258,24 +2317,28 @@ class RV(ISSUES, REPORTS):
2258
2317
  self.units = new_units
2259
2318
 
2260
2319
 
2261
- def put_at_systemic_velocity(self):
2320
+ def put_at_systemic_velocity(self, factor=1.0):
2262
2321
  """
2263
- For instruments in which mean(RV) < ptp(RV), "move" RVs to the systemic
2264
- velocity from simbad. This is useful if some instruments are centered
2265
- at zero while others are not, and instead of calling `.adjust_means()`,
2266
- but it only works when the systemic velocity is smaller than ptp(RV).
2322
+ For instruments in which mean(RV) < `factor` * ptp(RV), "move" RVs to
2323
+ the systemic velocity from simbad. This is useful if some instruments
2324
+ are centered at zero while others are not, and instead of calling
2325
+ `.adjust_means()`, but it only works when the systemic velocity is
2326
+ smaller than `factor` * ptp(RV).
2267
2327
  """
2268
2328
  changed = False
2269
2329
  for inst in self.instruments:
2330
+ changed_inst = False
2270
2331
  s = getattr(self, inst)
2271
2332
  if s.mask.any():
2272
- if np.abs(s.mvrad.mean()) < np.ptp(s.mvrad):
2333
+ if np.abs(s.mvrad.mean()) < factor * np.ptp(s.mvrad):
2273
2334
  s.vrad += self.simbad.rvz_radvel * 1e3
2274
- changed = True
2335
+ changed = changed_inst = True
2275
2336
  else: # all observations are masked, use non-masked arrays
2276
- if np.abs(s.vrad.mean()) < np.ptp(s.vrad):
2337
+ if np.abs(s.vrad.mean()) < factor * np.ptp(s.vrad):
2277
2338
  s.vrad += self.simbad.rvz_radvel * 1e3
2278
- changed = True
2339
+ changed = changed_inst = True
2340
+ if changed_inst and self.verbose:
2341
+ logger.info(f"putting {inst} RVs at systemic velocity")
2279
2342
  if changed:
2280
2343
  self._build_arrays()
2281
2344
 
@@ -2298,33 +2361,66 @@ class RV(ISSUES, REPORTS):
2298
2361
  self._build_arrays()
2299
2362
 
2300
2363
 
2301
- def save(self, directory=None, instrument=None, full=False, postfix=None,
2302
- save_masked=False, save_nans=True):
2303
- """ Save the observations in .rdb files.
2364
+ def save(self, directory=None, instrument=None, format='rdb',
2365
+ indicators=False, join_instruments=False, postfix=None,
2366
+ save_masked=False, save_nans=True, **kwargs):
2367
+ """ Save the observations in .rdb or .csv files.
2304
2368
 
2305
2369
  Args:
2306
2370
  directory (str, optional):
2307
2371
  Directory where to save the .rdb files.
2308
2372
  instrument (str, optional):
2309
2373
  Instrument for which to save observations.
2310
- full (bool, optional):
2311
- Save just RVs and errors (False) or more indicators (True).
2374
+ format (str, optional):
2375
+ Format to use ('rdb' or 'csv').
2376
+ indicators (bool, str, list[str], optional):
2377
+ Save only RVs and errors (False) or more indicators. If True,
2378
+ use a default list, if `str`, use an existing list, if list[str]
2379
+ provide a sequence of specific indicators.
2380
+ join_instruments (bool, optional):
2381
+ Join all instruments in a single file.
2312
2382
  postfix (str, optional):
2313
2383
  Postfix to add to the filenames ([star]_[instrument]_[postfix].rdb).
2384
+ save_masked (bool, optional)
2385
+ If True, also save masked observations (those for which
2386
+ self.mask == True)
2314
2387
  save_nans (bool, optional)
2315
2388
  Whether to save NaN values in the indicators, if they exist. If
2316
2389
  False, the full observation which contains NaN values is not saved.
2317
2390
  """
2391
+ if format not in ('rdb', 'csv'):
2392
+ logger.error(f"format must be 'rdb' or 'csv', got '{format}'")
2393
+ return
2394
+
2318
2395
  star_name = self.star.replace(' ', '')
2319
2396
 
2320
- if directory is None:
2321
- directory = '.'
2322
- else:
2397
+ if directory is not None:
2323
2398
  os.makedirs(directory, exist_ok=True)
2324
2399
 
2400
+ indicator_sets = {
2401
+ "default": [
2402
+ "fwhm", "fwhm_err",
2403
+ "bispan", "bispan_err",
2404
+ "contrast", "contrast_err",
2405
+ "rhk", "rhk_err",
2406
+ "berv",
2407
+ ],
2408
+ "CORALIE": [
2409
+ "fwhm", "fwhm_err",
2410
+ "bispan", "bispan_err",
2411
+ "contrast", "contrast_err",
2412
+ "haindex", "haindex_err",
2413
+ "berv",
2414
+ ],
2415
+ }
2416
+
2417
+ if 'full' in kwargs:
2418
+ logger.warning('argument `full` is deprecated, use `indicators` instead')
2419
+ indicators = kwargs['full']
2420
+
2325
2421
  files = []
2326
2422
 
2327
- for inst in self.instruments:
2423
+ for _i, inst in enumerate(self.instruments):
2328
2424
  if instrument is not None:
2329
2425
  if instrument not in inst:
2330
2426
  continue
@@ -2334,75 +2430,95 @@ class RV(ISSUES, REPORTS):
2334
2430
  if not _s.mask.any(): # all observations are masked, don't save
2335
2431
  continue
2336
2432
 
2337
- if full:
2338
- if save_masked:
2339
- arrays = [
2340
- _s.time, _s.vrad, _s.svrad,
2341
- _s.fwhm, _s.fwhm_err,
2342
- _s.bispan, _s.bispan_err,
2343
- _s.contrast, _s.contrast_err,
2344
- _s.rhk, _s.rhk_err,
2345
- _s.berv,
2346
- ]
2347
- else:
2348
- arrays = [
2349
- _s.mtime, _s.mvrad, _s.msvrad,
2350
- _s.fwhm[_s.mask], _s.fwhm_err[_s.mask],
2351
- _s.bispan[_s.mask], _s.bispan_err[_s.mask],
2352
- _s.contrast[_s.mask], _s.contrast_err[_s.mask],
2353
- _s.rhk[_s.mask], _s.rhk_err[_s.mask],
2354
- _s.berv[_s.mask],
2355
- ]
2356
- if not save_nans:
2357
- raise NotImplementedError
2358
- # if np.isnan(d).any():
2359
- # # remove observations where any of the indicators are # NaN
2360
- # nan_mask = np.isnan(d[:, 3:]).any(axis=1)
2361
- # d = d[~nan_mask]
2362
- # if self.verbose:
2363
- # logger.warning(f'masking {nan_mask.sum()} observations with NaN in indicators')
2364
-
2365
- header = '\t'.join(['rjd', 'vrad', 'svrad',
2366
- 'fwhm', 'sig_fwhm',
2367
- 'bispan', 'sig_bispan',
2368
- 'contrast', 'sig_contrast',
2369
- 'rhk', 'sig_rhk',
2370
- 'berv',
2371
- ])
2433
+ if save_masked:
2434
+ arrays = [_s.time, _s.vrad, _s.svrad]
2435
+ if join_instruments:
2436
+ arrays += [_s.instrument_array]
2437
+ else:
2438
+ arrays = [_s.mtime, _s.mvrad, _s.msvrad]
2439
+ if join_instruments:
2440
+ arrays += [_s.instrument_array[_s.mask]]
2441
+
2442
+ if indicators in (False, None):
2443
+ indicator_names = []
2444
+ else:
2445
+ if indicators is True:
2446
+ indicator_names = indicator_sets["default"]
2447
+ elif isinstance(indicators, str):
2448
+ try:
2449
+ indicator_names = indicator_sets[indicators]
2450
+ except KeyError:
2451
+ logger.error(f"unknown indicator set '{indicators}'")
2452
+ logger.error(f"available: {list(indicator_sets.keys())}")
2453
+ return
2454
+ elif isinstance(indicators, list) and all(isinstance(i, str) for i in indicators):
2455
+ indicator_names = indicators
2456
+
2457
+ if save_masked:
2458
+ arrays += [getattr(_s, ind) for ind in indicator_names]
2459
+ else:
2460
+ arrays += [getattr(_s, ind)[_s.mask] for ind in indicator_names]
2461
+
2462
+ d = np.stack(arrays, axis=1)
2463
+ if not save_nans:
2464
+ # raise NotImplementedError
2465
+ if np.isnan(d).any():
2466
+ # remove observations where any of the indicators are # NaN
2467
+ nan_mask = np.isnan(d[:, 3:]).any(axis=1)
2468
+ d = d[~nan_mask]
2469
+ if self.verbose:
2470
+ msg = f'{inst}: masking {nan_mask.sum()} observations with NaN in indicators'
2471
+ logger.warning(msg)
2472
+
2473
+ cols = ['rjd', 'vrad', 'svrad']
2474
+ cols += ['inst'] if join_instruments else []
2475
+ cols += indicator_names
2476
+
2477
+ if format == 'rdb':
2478
+ header = '\t'.join(cols)
2372
2479
  header += '\n'
2373
2480
  header += '\t'.join(['-' * len(c) for c in header.strip().split('\t')])
2374
-
2375
2481
  else:
2376
- if save_masked:
2377
- arrays = [_s.time, _s.vrad, _s.svrad]
2378
- else:
2379
- arrays = [_s.mtime, _s.mvrad, _s.msvrad]
2380
-
2381
- # d = np.stack(arrays, axis=1)
2382
- header = 'rjd\tvrad\tsvrad\n---\t----\t-----'
2482
+ header = ','.join(cols)
2383
2483
 
2384
- file = f'{star_name}_{inst}.rdb'
2385
- if postfix is not None:
2386
- file = f'{star_name}_{inst}_{postfix}.rdb'
2484
+ if join_instruments:
2485
+ file = f'{star_name}.{format}'
2486
+ if postfix is not None:
2487
+ file = f'{star_name}_{postfix}.{format}'
2488
+ else:
2489
+ file = f'{star_name}_{inst}.{format}'
2490
+ if postfix is not None:
2491
+ file = f'{star_name}_{inst}_{postfix}.{format}'
2387
2492
 
2493
+ if directory is not None:
2494
+ file = os.path.join(directory, file)
2388
2495
  files.append(file)
2389
- file = os.path.join(directory, file)
2390
2496
 
2391
2497
  N = len(arrays[0])
2392
- with open(file, 'w') as f:
2393
- f.write(header + '\n')
2498
+ with open(file, 'a' if join_instruments and _i != 0 else 'w') as f:
2499
+ if join_instruments and _i != 0:
2500
+ pass
2501
+ else:
2502
+ f.write(header + '\n')
2503
+
2394
2504
  for i in range(N):
2395
2505
  for j, a in enumerate(arrays):
2396
2506
  f.write(str(a[i]))
2397
2507
  if j < len(arrays) - 1:
2398
- f.write('\t')
2508
+ f.write('\t' if format == 'rdb' else ',')
2399
2509
  f.write('\n')
2400
2510
 
2401
2511
  # np.savetxt(file, d, header=header, delimiter='\t', comments='', fmt='%f')
2402
2512
 
2403
- if self.verbose:
2513
+ if self.verbose and not join_instruments:
2404
2514
  logger.info(f'saving to {file}')
2405
2515
 
2516
+ if self.verbose and join_instruments:
2517
+ logger.info(f'saving to {files[0]}')
2518
+
2519
+ if join_instruments:
2520
+ files = [files[0]]
2521
+
2406
2522
  return files
2407
2523
 
2408
2524
  def checksum(self, write_to=None):
@@ -2,6 +2,7 @@ import os
2
2
  import sys
3
3
  import time
4
4
  from contextlib import contextmanager
5
+
5
6
  try:
6
7
  from unittest.mock import patch
7
8
  except ImportError:
@@ -98,8 +99,9 @@ def sanitize_path(path):
98
99
  path = path.replace('*', '_')
99
100
  return path
100
101
 
101
- def pretty_print_table(rows, line_between_rows=True, logger=None):
102
- """
102
+ def pretty_print_table(rows, line_between_rows=True, string=False,
103
+ markdown=False, latex=False, logger=None):
104
+ r"""
103
105
  Example Output
104
106
  ┌──────┬─────────────┬────┬───────┐
105
107
  │ True │ short │ 77 │ catty │
@@ -110,25 +112,76 @@ def pretty_print_table(rows, line_between_rows=True, logger=None):
110
112
  └──────┴─────────────┴────┴───────┘
111
113
  """
112
114
  _print = logger.info if logger else print
115
+ if string:
116
+ def _print(x, s):
117
+ s += x + '\n'
118
+ return s
119
+ else:
120
+ if logger:
121
+ def _print(x, _):
122
+ logger.info(x)
123
+ else:
124
+ def _print(x, _):
125
+ print(x)
126
+
127
+ if latex or markdown:
128
+ line_between_rows = False
129
+
130
+ s = ''
113
131
 
114
132
  # find the max length of each column
115
133
  max_col_lens = list(map(max, zip(*[(len(str(cell)) for cell in row) for row in rows])))
116
134
 
135
+ if markdown:
136
+ bar_char = '|'
137
+ else:
138
+ bar_char = r'│'
139
+
117
140
  # print the table's top border
118
- _print('┌' + '┬'.join('─' * (n + 2) for n in max_col_lens) + '┐')
141
+ if markdown:
142
+ pass
143
+ elif latex:
144
+ s = _print(r'\begin{table*}', s)
145
+ # s = _print(r'\centering', s)
146
+ s = _print(r'\begin{tabular}' + '{' + ' c ' * len(rows[0]) + '}', s)
147
+ else:
148
+ s = _print(r'┌' + r'┬'.join(r'─' * (n + 2) for n in max_col_lens) + r'┐', s)
119
149
 
120
- rows_separator = '├' + '┼'.join('─' * (n + 2) for n in max_col_lens) + '┤'
150
+ if markdown:
151
+ header_separator = bar_char + bar_char.join('-' * (n + 2) for n in max_col_lens) + bar_char
121
152
 
122
- row_fstring = ' '.join("{: <%s}" % n for n in max_col_lens)
153
+ rows_separator = r'├' + r'┼'.join(r'─' * (n + 2) for n in max_col_lens) + r'┤'
154
+
155
+ if latex:
156
+ row_fstring = ' & '.join("{: <%s}" % n for n in max_col_lens)
157
+ else:
158
+ row_fstring = bar_char.center(3).join("{: <%s}" % n for n in max_col_lens)
123
159
 
124
160
  for i, row in enumerate(rows):
125
- _print('│ ' + row_fstring.format(*map(str, row)) + ' │')
161
+ if markdown and i == 1:
162
+ s = _print(header_separator, s)
163
+
164
+ if latex:
165
+ s = _print(row_fstring.format(*map(str, row)) + r' \\', s)
166
+ else:
167
+ s = _print(bar_char + ' ' + row_fstring.format(*map(str, row)) + ' ' + bar_char, s)
126
168
 
169
+
127
170
  if line_between_rows and i < len(rows) - 1:
128
- _print(rows_separator)
171
+ s = _print(rows_separator, s)
172
+
129
173
 
130
174
  # print the table's bottom border
131
- _print('└' + '┴'.join('─' * (n + 2) for n in max_col_lens) + '┘')
175
+ if markdown:
176
+ pass
177
+ elif latex:
178
+ s = _print(r'\end{tabular}', s)
179
+ s = _print(r'\end{table*}', s)
180
+ else:
181
+ s = _print(r'└' + r'┴'.join(r'─' * (n + 2) for n in max_col_lens) + r'┘', s)
182
+
183
+ if string:
184
+ return s
132
185
 
133
186
 
134
187
  def strtobool(val):
@@ -238,3 +291,28 @@ def get_object_fast(file):
238
291
  value = f.read(20)
239
292
  return value.decode().split("'")[1].strip()
240
293
 
294
+
295
+ def get_simbad_oid(self):
296
+ import requests
297
+ if isinstance(self, str):
298
+ star = self
299
+ else:
300
+ star = self.star
301
+ oid = requests.post('https://simbad.cds.unistra.fr/simbad/sim-tap/sync',
302
+ data=dict(format='text', request='doQuery', lang='adql', phase='run',
303
+ query=f"SELECT basic.OID FROM basic JOIN ident ON oidref = oid WHERE id = '{star}';"))
304
+ oid = oid.text.split()[-1]
305
+ return oid
306
+
307
+
308
+
309
+ # from https://stackoverflow.com/questions/37765197/darken-or-lighten-a-color-in-matplotlib
310
+ def adjust_lightness(color, amount=0.5):
311
+ import matplotlib.colors as mc
312
+ import colorsys
313
+ try:
314
+ c = mc.cnames[color]
315
+ except KeyError:
316
+ c = color
317
+ c = colorsys.rgb_to_hls(*mc.to_rgb(c))
318
+ return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: arvi
3
- Version: 0.2.7
3
+ Version: 0.2.9
4
4
  Summary: The Automated RV Inspector
5
5
  Author-email: João Faria <joao.faria@unige.ch>
6
6
  License: MIT
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes