arvi 0.1.26__tar.gz → 0.1.28__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arvi might be problematic. Click here for more details.

Files changed (59) hide show
  1. arvi-0.1.28/.github/dependabot.yml +6 -0
  2. {arvi-0.1.26 → arvi-0.1.28}/.github/workflows/docs-gh-pages.yml +5 -5
  3. {arvi-0.1.26 → arvi-0.1.28}/.github/workflows/install.yml +0 -3
  4. {arvi-0.1.26 → arvi-0.1.28}/PKG-INFO +2 -2
  5. {arvi-0.1.26 → arvi-0.1.28}/arvi/dace_wrapper.py +39 -13
  6. {arvi-0.1.26 → arvi-0.1.28}/arvi/gaia_wrapper.py +14 -2
  7. {arvi-0.1.26 → arvi-0.1.28}/arvi/plots.py +18 -6
  8. {arvi-0.1.26 → arvi-0.1.28}/arvi/simbad_wrapper.py +83 -4
  9. {arvi-0.1.26 → arvi-0.1.28}/arvi/timeseries.py +76 -23
  10. {arvi-0.1.26 → arvi-0.1.28}/arvi.egg-info/PKG-INFO +2 -2
  11. {arvi-0.1.26 → arvi-0.1.28}/arvi.egg-info/SOURCES.txt +1 -0
  12. {arvi-0.1.26 → arvi-0.1.28}/.github/workflows/python-publish.yml +0 -0
  13. {arvi-0.1.26 → arvi-0.1.28}/.gitignore +0 -0
  14. {arvi-0.1.26 → arvi-0.1.28}/LICENSE +0 -0
  15. {arvi-0.1.26 → arvi-0.1.28}/README.md +0 -0
  16. {arvi-0.1.26 → arvi-0.1.28}/arvi/HZ.py +0 -0
  17. {arvi-0.1.26 → arvi-0.1.28}/arvi/__init__.py +0 -0
  18. {arvi-0.1.26 → arvi-0.1.28}/arvi/ariadne_wrapper.py +0 -0
  19. {arvi-0.1.26 → arvi-0.1.28}/arvi/berv.py +0 -0
  20. {arvi-0.1.26 → arvi-0.1.28}/arvi/binning.py +0 -0
  21. {arvi-0.1.26 → arvi-0.1.28}/arvi/config.py +0 -0
  22. {arvi-0.1.26 → arvi-0.1.28}/arvi/data/extra/HD86226_PFS1.rdb +0 -0
  23. {arvi-0.1.26 → arvi-0.1.28}/arvi/data/extra/HD86226_PFS2.rdb +0 -0
  24. {arvi-0.1.26 → arvi-0.1.28}/arvi/data/extra/metadata.json +0 -0
  25. {arvi-0.1.26 → arvi-0.1.28}/arvi/data/info.svg +0 -0
  26. {arvi-0.1.26 → arvi-0.1.28}/arvi/data/obs_affected_ADC_issues.dat +0 -0
  27. {arvi-0.1.26 → arvi-0.1.28}/arvi/data/obs_affected_blue_cryostat_issues.dat +0 -0
  28. {arvi-0.1.26 → arvi-0.1.28}/arvi/extra_data.py +0 -0
  29. {arvi-0.1.26 → arvi-0.1.28}/arvi/headers.py +0 -0
  30. {arvi-0.1.26 → arvi-0.1.28}/arvi/instrument_specific.py +0 -0
  31. {arvi-0.1.26 → arvi-0.1.28}/arvi/kima_wrapper.py +0 -0
  32. {arvi-0.1.26 → arvi-0.1.28}/arvi/lbl_wrapper.py +0 -0
  33. {arvi-0.1.26 → arvi-0.1.28}/arvi/nasaexo_wrapper.py +0 -0
  34. {arvi-0.1.26 → arvi-0.1.28}/arvi/programs.py +0 -0
  35. {arvi-0.1.26 → arvi-0.1.28}/arvi/reports.py +0 -0
  36. {arvi-0.1.26 → arvi-0.1.28}/arvi/setup_logger.py +0 -0
  37. {arvi-0.1.26 → arvi-0.1.28}/arvi/spectra.py +0 -0
  38. {arvi-0.1.26 → arvi-0.1.28}/arvi/stats.py +0 -0
  39. {arvi-0.1.26 → arvi-0.1.28}/arvi/stellar.py +0 -0
  40. {arvi-0.1.26 → arvi-0.1.28}/arvi/translations.py +0 -0
  41. {arvi-0.1.26 → arvi-0.1.28}/arvi/utils.py +0 -0
  42. {arvi-0.1.26 → arvi-0.1.28}/arvi.egg-info/dependency_links.txt +0 -0
  43. {arvi-0.1.26 → arvi-0.1.28}/arvi.egg-info/requires.txt +0 -0
  44. {arvi-0.1.26 → arvi-0.1.28}/arvi.egg-info/top_level.txt +0 -0
  45. {arvi-0.1.26 → arvi-0.1.28}/docs/API.md +0 -0
  46. {arvi-0.1.26 → arvi-0.1.28}/docs/detailed.md +0 -0
  47. {arvi-0.1.26 → arvi-0.1.28}/docs/index.md +0 -0
  48. {arvi-0.1.26 → arvi-0.1.28}/docs/logo/detective.png +0 -0
  49. {arvi-0.1.26 → arvi-0.1.28}/docs/logo/logo.png +0 -0
  50. {arvi-0.1.26 → arvi-0.1.28}/mkdocs.yml +0 -0
  51. {arvi-0.1.26 → arvi-0.1.28}/pyproject.toml +0 -0
  52. {arvi-0.1.26 → arvi-0.1.28}/setup.cfg +0 -0
  53. {arvi-0.1.26 → arvi-0.1.28}/setup.py +0 -0
  54. {arvi-0.1.26 → arvi-0.1.28}/tests/HD10700-Bcor_ESPRESSO18.rdb +0 -0
  55. {arvi-0.1.26 → arvi-0.1.28}/tests/test_binning.py +0 -0
  56. {arvi-0.1.26 → arvi-0.1.28}/tests/test_create_RV.py +0 -0
  57. {arvi-0.1.26 → arvi-0.1.28}/tests/test_import_object.py +0 -0
  58. {arvi-0.1.26 → arvi-0.1.28}/tests/test_simbad.py +0 -0
  59. {arvi-0.1.26 → arvi-0.1.28}/tests/test_stats.py +0 -0
@@ -0,0 +1,6 @@
1
+ version: 2
2
+ updates:
3
+ - package-ecosystem: "github-actions"
4
+ directory: ".github/workflows/" # Location of the directory containing the GitHub Actions workflows
5
+ schedule:
6
+ interval: "weekly" # Check for updates weekly
@@ -26,13 +26,13 @@ jobs:
26
26
  runs-on: ubuntu-latest
27
27
  steps:
28
28
  - name: Checkout
29
- uses: actions/checkout@v3
29
+ uses: actions/checkout@v4
30
30
 
31
31
  - name: Setup Pages
32
- uses: actions/configure-pages@v3
32
+ uses: actions/configure-pages@v5
33
33
 
34
34
  - name: Set up Python
35
- uses: actions/setup-python@v3
35
+ uses: actions/setup-python@v5
36
36
  with:
37
37
  python-version: "3.10"
38
38
 
@@ -48,7 +48,7 @@ jobs:
48
48
  mkdocs build
49
49
 
50
50
  - name: Upload artifact
51
- uses: actions/upload-pages-artifact@v2
51
+ uses: actions/upload-pages-artifact@v3
52
52
 
53
53
  # Deployment job
54
54
  deploy:
@@ -60,4 +60,4 @@ jobs:
60
60
  steps:
61
61
  - name: Deploy to GitHub Pages
62
62
  id: deployment
63
- uses: actions/deploy-pages@v2
63
+ uses: actions/deploy-pages@v4
@@ -5,9 +5,6 @@ name: Install-Test
5
5
 
6
6
  on:
7
7
  push:
8
- #branches: [ "main" ]
9
- pull_request:
10
- branches: [ "main" ]
11
8
 
12
9
  jobs:
13
10
  build:
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: arvi
3
- Version: 0.1.26
3
+ Version: 0.1.28
4
4
  Summary: The Automated RV Inspector
5
5
  Author-email: João Faria <joao.faria@unige.ch>
6
6
  License: MIT
@@ -11,16 +11,33 @@ from .setup_logger import logger
11
11
  from .utils import create_directory, all_logging_disabled, stdout_disabled, tqdm
12
12
 
13
13
 
14
- def load_spectroscopy() -> SpectroscopyClass:
14
+ def load_spectroscopy(user=None) -> SpectroscopyClass:
15
15
  from .config import config
16
+ # requesting as public
16
17
  if config.request_as_public:
17
18
  with all_logging_disabled():
18
19
  dace = DaceClass(dace_rc_config_path='none')
19
20
  return SpectroscopyClass(dace_instance=dace)
21
+ # DACERC environment variable is set, should point to a dacerc file with credentials
20
22
  if 'DACERC' in os.environ:
21
23
  dace = DaceClass(dace_rc_config_path=os.environ['DACERC'])
22
24
  return SpectroscopyClass(dace_instance=dace)
23
- # elif os.path.exists(os.path.expanduser('~/.dacerc')):
25
+ # user provided, should be a section in ~/.dacerc
26
+ if user is not None:
27
+ import configparser
28
+ import tempfile
29
+ config = configparser.ConfigParser()
30
+ config.read(os.path.expanduser('~/.dacerc'))
31
+ if user not in config.sections():
32
+ raise ValueError(f'Section for user "{user}" not found in ~/.dacerc')
33
+ with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
34
+ new_config = configparser.ConfigParser()
35
+ new_config['user'] = config[user]
36
+ new_config.write(f)
37
+ dace = DaceClass(dace_rc_config_path=f.name)
38
+ logger.info(f'using credentials for user {user} in ~/.dacerc')
39
+ return SpectroscopyClass(dace_instance=dace)
40
+ # default
24
41
  return default_Spectroscopy
25
42
 
26
43
  @lru_cache()
@@ -111,7 +128,7 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
111
128
 
112
129
  return arrays
113
130
 
114
- def get_observations_from_instrument(star, instrument, main_id=None, verbose=True):
131
+ def get_observations_from_instrument(star, instrument, user=None, main_id=None, verbose=True):
115
132
  """ Query DACE for all observations of a given star and instrument
116
133
 
117
134
  Args:
@@ -119,6 +136,8 @@ def get_observations_from_instrument(star, instrument, main_id=None, verbose=Tru
119
136
  name of the star
120
137
  instrument (str):
121
138
  instrument name
139
+ user (str, optional):
140
+ DACERC user name. Defaults to None.
122
141
  main_id (str, optional):
123
142
  Simbad main id of target to query DACE id. Defaults to None.
124
143
  verbose (bool, optional):
@@ -132,7 +151,7 @@ def get_observations_from_instrument(star, instrument, main_id=None, verbose=Tru
132
151
  dict:
133
152
  dictionary with data from DACE
134
153
  """
135
- Spectroscopy = load_spectroscopy()
154
+ Spectroscopy = load_spectroscopy(user)
136
155
  found_dace_id = False
137
156
  try:
138
157
  dace_id = get_dace_id(star, verbose=verbose)
@@ -233,9 +252,9 @@ def get_observations_from_instrument(star, instrument, main_id=None, verbose=Tru
233
252
  # print([r[k1][k2].keys() for k1 in r.keys() for k2 in r[k1].keys()])
234
253
  return r
235
254
 
236
- def get_observations(star, instrument=None, main_id=None, verbose=True):
255
+ def get_observations(star, instrument=None, user=None, main_id=None, verbose=True):
237
256
  if instrument is None:
238
- Spectroscopy = load_spectroscopy()
257
+ Spectroscopy = load_spectroscopy(user)
239
258
  try:
240
259
  with stdout_disabled(), all_logging_disabled():
241
260
  result = Spectroscopy.get_timeseries(target=star,
@@ -249,7 +268,7 @@ def get_observations(star, instrument=None, main_id=None, verbose=True):
249
268
  raise ValueError(msg) from None
250
269
  else:
251
270
  try:
252
- result = get_observations_from_instrument(star, instrument, main_id, verbose)
271
+ result = get_observations_from_instrument(star, instrument, user, main_id, verbose)
253
272
  except ValueError:
254
273
  msg = f'no {instrument} observations for {star}'
255
274
  raise ValueError(msg) from None
@@ -333,6 +352,12 @@ def check_existing(output_directory, files, type):
333
352
  if type in f
334
353
  ]
335
354
 
355
+ if type == 'S2D':
356
+ existing = [
357
+ f.partition('.fits')[0] for f in os.listdir(output_directory)
358
+ if 'e2ds' in f
359
+ ]
360
+
336
361
  # also check for lowercase type
337
362
  existing += [
338
363
  f.partition('.fits')[0] for f in os.listdir(output_directory)
@@ -345,7 +370,8 @@ def check_existing(output_directory, files, type):
345
370
 
346
371
  # remove type of file (e.g. _CCF_A)
347
372
  existing = [f.partition('_')[0] for f in existing]
348
-
373
+ existing = np.unique(existing)
374
+
349
375
  missing = []
350
376
  for file in files:
351
377
  if any(other in file for other in existing):
@@ -354,9 +380,9 @@ def check_existing(output_directory, files, type):
354
380
 
355
381
  return np.array(missing)
356
382
 
357
- def download(files, type, output_directory, output_filename=None, quiet=True, pbar=None):
383
+ def download(files, type, output_directory, output_filename=None, user=None, quiet=True, pbar=None):
358
384
  """ Download files from DACE """
359
- Spectroscopy = load_spectroscopy()
385
+ Spectroscopy = load_spectroscopy(user)
360
386
  if isinstance(files, str):
361
387
  files = [files]
362
388
  if quiet:
@@ -433,7 +459,7 @@ def do_symlink_filetype(type, raw_files, output_directory, clobber=False, top_le
433
459
  logger.warning(f'file not found: {file}')
434
460
 
435
461
 
436
- def do_download_filetype(type, raw_files, output_directory, clobber=False,
462
+ def do_download_filetype(type, raw_files, output_directory, clobber=False, user=None,
437
463
  verbose=True, chunk_size=20, parallel_limit=30):
438
464
  """ Download CCFs / S1Ds / S2Ds from DACE """
439
465
  raw_files = np.atleast_1d(raw_files)
@@ -469,7 +495,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
469
495
  if n < parallel_limit:
470
496
  iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
471
497
  for files in tqdm(iterator, total=len(iterator)):
472
- download(files, type, output_directory, quiet=False)
498
+ download(files, type, output_directory, quiet=False, user=user)
473
499
  extract_fits(output_directory)
474
500
 
475
501
  else:
@@ -481,7 +507,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
481
507
  chunks = list(chunker(raw_files, chunk_size))
482
508
  pbar = tqdm(total=len(chunks))
483
509
  it1 = [
484
- (files, type, output_directory, f'spectroscopy_download{i+1}.tar.gz', True, pbar)
510
+ (files, type, output_directory, f'spectroscopy_download{i+1}.tar.gz', user, True, pbar)
485
511
  for i, files in enumerate(chunks)
486
512
  ]
487
513
  it2 = [(output_directory, f'spectroscopy_download{i+1}.tar.gz') for i in range(len(chunks))]
@@ -9,7 +9,11 @@ DATA_PATH = os.path.dirname(__file__)
9
9
  DATA_PATH = os.path.join(DATA_PATH, 'data')
10
10
 
11
11
  QUERY = """
12
- SELECT TOP 20 gaia_source.designation,gaia_source.source_id,gaia_source.ra,gaia_source.dec,gaia_source.parallax,gaia_source.pmra,gaia_source.pmdec,gaia_source.ruwe,gaia_source.phot_g_mean_mag,gaia_source.bp_rp,gaia_source.radial_velocity,gaia_source.phot_variable_flag,gaia_source.non_single_star,gaia_source.has_xp_continuous,gaia_source.has_xp_sampled,gaia_source.has_rvs,gaia_source.has_epoch_photometry,gaia_source.has_epoch_rv,gaia_source.has_mcmc_gspphot,gaia_source.has_mcmc_msc,gaia_source.teff_gspphot,gaia_source.logg_gspphot,gaia_source.mh_gspphot,gaia_source.distance_gspphot,gaia_source.azero_gspphot,gaia_source.ag_gspphot,gaia_source.ebpminrp_gspphot
12
+ SELECT TOP 20 gaia_source.designation, gaia_source.source_id,
13
+ gaia_source.ra, gaia_source.dec,
14
+ gaia_source.parallax, gaia_source.pmra, gaia_source.pmdec,
15
+ gaia_source.ruwe, gaia_source.phot_g_mean_mag, gaia_source.bp_rp,
16
+ gaia_source.radial_velocity, gaia_source.radial_velocity_error
13
17
  FROM gaiadr3.gaia_source
14
18
  WHERE
15
19
  CONTAINS(
@@ -23,7 +27,11 @@ CONTAINS(
23
27
  """
24
28
 
25
29
  QUERY_ID = """
26
- SELECT TOP 20 gaia_source.designation,gaia_source.source_id,gaia_source.ra,gaia_source.dec,gaia_source.parallax,gaia_source.pmra,gaia_source.pmdec,gaia_source.ruwe,gaia_source.phot_g_mean_mag,gaia_source.bp_rp,gaia_source.radial_velocity,gaia_source.phot_variable_flag,gaia_source.non_single_star,gaia_source.has_xp_continuous,gaia_source.has_xp_sampled,gaia_source.has_rvs,gaia_source.has_epoch_photometry,gaia_source.has_epoch_rv,gaia_source.has_mcmc_gspphot,gaia_source.has_mcmc_msc,gaia_source.teff_gspphot,gaia_source.logg_gspphot,gaia_source.mh_gspphot,gaia_source.distance_gspphot,gaia_source.azero_gspphot,gaia_source.ag_gspphot,gaia_source.ebpminrp_gspphot
30
+ SELECT TOP 20 gaia_source.designation, gaia_source.source_id,
31
+ gaia_source.ra, gaia_source.dec,
32
+ gaia_source.parallax, gaia_source.pmra, gaia_source.pmdec,
33
+ gaia_source.ruwe, gaia_source.phot_g_mean_mag, gaia_source.bp_rp,
34
+ gaia_source.radial_velocity, gaia_source.radial_velocity_error
27
35
  FROM gaiadr3.gaia_source
28
36
  WHERE
29
37
  gaia_source.source_id = {id}
@@ -110,6 +118,10 @@ class gaia:
110
118
  self.radial_velocity = float(results['radial_velocity'])
111
119
  except ValueError:
112
120
  self.radial_velocity = None
121
+ try:
122
+ self.radial_velocity_error = float(results['radial_velocity_error'])
123
+ except ValueError:
124
+ self.radial_velocity_error = None
113
125
 
114
126
  return
115
127
 
@@ -471,13 +471,16 @@ def plot_quantity(self, quantity, ax=None, show_masked=False, instrument=None,
471
471
 
472
472
  ax.minorticks_on()
473
473
 
474
+ delta = 'Δ' if self._did_adjust_means else ''
475
+
474
476
  ylabel = {
475
477
  quantity.lower(): quantity,
476
- 'fwhm': f'FWHM [{self.units}]',
477
- 'bispan': f'BIS [{self.units}]',
478
+ 'fwhm': f'{delta}FWHM [{self.units}]',
479
+ 'bispan': f'{delta}BIS [{self.units}]',
478
480
  'rhk': r"$\log$ R'$_{HK}$",
479
481
  'berv': 'BERV [km/s]',
480
482
  }
483
+
481
484
  ax.set_ylabel(ylabel[quantity.lower()])
482
485
 
483
486
  if remove_50000:
@@ -591,7 +594,10 @@ def gls(self, ax=None, label=None, fap=True, instrument=None,
591
594
  ax.semilogx(1/freq, power, picker=picker, label=label, **kwargs)
592
595
 
593
596
  if fap:
594
- ax.axhline(gls.false_alarm_level(0.01),
597
+ fap_level = 0.01
598
+ if isinstance(fap, float):
599
+ fap_level = fap
600
+ ax.axhline(gls.false_alarm_level(fap_level),
595
601
  color='k', alpha=0.2, zorder=-1)
596
602
 
597
603
  ax.set(xlabel='Period [days]', ylabel='Normalized power', ylim=(0, None))
@@ -698,12 +704,18 @@ def gls_quantity(self, quantity, ax=None, fap=True, instrument=None,
698
704
  else:
699
705
  fig = ax.figure
700
706
 
707
+ spp = kwargs.get('samples_per_peak', 5)
708
+
701
709
  gls = LombScargle(t, y, ye)
702
- freq, power = gls.autopower(maximum_frequency=1.0)
703
- ax.semilogx(1/freq, power, picker=picker)
710
+ freq, power = gls.autopower(maximum_frequency=1.0, samples_per_peak=spp)
711
+
712
+ ax.semilogx(1/freq, power, picker=picker, **kwargs)
704
713
 
705
714
  if fap:
706
- ax.axhline(gls.false_alarm_level(0.01),
715
+ fap_level = 0.01
716
+ if isinstance(fap, float):
717
+ fap_level = fap
718
+ ax.axhline(gls.false_alarm_level(fap_level),
707
719
  color='k', alpha=0.2, zorder=-1)
708
720
 
709
721
  ax.set(xlabel='Period [days]', ylabel='Normalized power', ylim=(0, None))
@@ -1,8 +1,15 @@
1
1
  import os
2
+ import numpy as np
2
3
  import requests
4
+ from dataclasses import dataclass
3
5
 
4
6
  import pysweetcat
5
7
 
8
+ try:
9
+ from uncertainties import ufloat
10
+ except ImportError:
11
+ ufloat = lambda x, y: x
12
+
6
13
  from .translations import translate
7
14
 
8
15
  DATA_PATH = os.path.dirname(__file__)
@@ -15,19 +22,35 @@ SELECT basic.OID,
15
22
  main_id,
16
23
  pmra,
17
24
  pmdec,
18
- plx_value,
25
+ plx_value, plx_err,
19
26
  rvz_radvel,
20
27
  sp_type
21
28
  FROM basic JOIN ident ON oidref = oid
22
29
  WHERE id = '{star}';
23
30
  """
24
31
 
32
+ # SELECT filter, flux, flux_err
33
+ # FROM basic JOIN ident ON oid = ident.oidref JOIN flux ON oid = flux.oidref
34
+ # WHERE id = 'HD23079';
35
+
25
36
  BV_QUERY = """
26
37
  SELECT B, V FROM allfluxes
27
38
  JOIN ident USING(oidref)
28
39
  WHERE id = '{star}';
29
40
  """
30
41
 
42
+ FILTERS_QUERY = """
43
+ SELECT filter, flux, flux_err, bibcode FROM flux
44
+ JOIN ident USING(oidref)
45
+ WHERE id = '{star}';
46
+ """
47
+
48
+ MEAS_QUERY = """
49
+ SELECT teff, log_g, log_g_prec, fe_h, fe_h_prec, bibcode FROM mesFe_H
50
+ JOIN ident USING(oidref)
51
+ WHERE id = '{star}';
52
+ """
53
+
31
54
  IDS_QUERY = """
32
55
  SELECT ids FROM ids
33
56
  JOIN ident USING(oidref)
@@ -40,6 +63,13 @@ JOIN ident ON oidref = oid
40
63
  WHERE id = '{star}';
41
64
  """
42
65
 
66
+ @dataclass
67
+ class Measurements:
68
+ teff: list
69
+ logg: list
70
+ feh: list
71
+ bibcode: list
72
+
43
73
 
44
74
  def run_query(query):
45
75
  url = 'http://simbad.u-strasbg.fr/simbad/sim-tap/sync'
@@ -52,7 +82,7 @@ def run_query(query):
52
82
  raise IndexError(err)
53
83
  return response.content.decode()
54
84
 
55
- def parse_table(table, cols=None, values=None):
85
+ def parse_table1(table, cols=None, values=None):
56
86
  header = table.splitlines()[0].split('|')
57
87
  if cols is None:
58
88
  cols = list(map(str.strip, header))
@@ -66,6 +96,29 @@ def parse_table(table, cols=None, values=None):
66
96
  values = [value.replace('"', '') for value in values]
67
97
  return cols, values
68
98
 
99
+ def parse_tablen(table, cols=None, values=None):
100
+ header = table.splitlines()[0].split('|')
101
+ cols = list(map(str.strip, header))
102
+ values = [list(map(str.strip, row.split('|'))) for row in table.splitlines()[2:]]
103
+ return cols, values
104
+
105
+ def parse_value(value, err=None, prec=None):
106
+ try:
107
+ v = float(value)
108
+ if err:
109
+ try:
110
+ v = ufloat(float(value), float(err))
111
+ except ValueError:
112
+ pass
113
+ if prec:
114
+ try:
115
+ v = ufloat(float(value), 10**-int(prec))
116
+ except ValueError:
117
+ pass
118
+ except ValueError:
119
+ v = np.nan
120
+ return v
121
+
69
122
 
70
123
  effective_temperatures = {
71
124
  'F0': 7350, 'F2': 7050, 'F3': 6850, 'F5': 6700, 'F6': 6550, 'F7': 6400, 'F8': 6300,
@@ -120,14 +173,37 @@ class simbad:
120
173
 
121
174
  try:
122
175
  table1 = run_query(query=QUERY.format(star=self.star))
123
- cols, values = parse_table(table1)
176
+ cols, values = parse_table1(table1)
124
177
 
125
178
  table2 = run_query(query=BV_QUERY.format(star=self.star))
126
- cols, values = parse_table(table2, cols, values)
179
+ cols, values = parse_table1(table2, cols, values)
127
180
 
128
181
  table3 = run_query(query=IDS_QUERY.format(star=self.star))
129
182
  line = table3.splitlines()[2]
130
183
  self.ids = line.replace('"', '').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').split('|')
184
+
185
+ table4 = run_query(query=FILTERS_QUERY.format(star=self.star))
186
+ for row in table4.splitlines()[2:]:
187
+ filter_name, mag, mag_err, bibcode = row.replace('"', '').split('|')
188
+ filter_name = filter_name.strip()
189
+ try:
190
+ setattr(self, '_' + filter_name, ufloat(float(mag), float(mag_err)))
191
+ except ValueError:
192
+ setattr(self, '_' + filter_name, float(mag))
193
+
194
+ # measurements table
195
+ table5 = run_query(query=MEAS_QUERY.format(star=self.star))
196
+ _teff, _logg, _feh, _bibcode = [], [], [], []
197
+ for row in table5.splitlines()[2:]:
198
+ teff, log_g, log_g_prec, fe_h, fe_h_prec, bibcode = row.replace('"', '').split('|')
199
+ _bibcode.append(bibcode)
200
+ _teff.append(parse_value(teff))
201
+ _logg.append(parse_value(log_g, prec=log_g_prec))
202
+ _feh.append(parse_value(fe_h, prec=fe_h_prec))
203
+
204
+ self.measurements = Measurements(_teff, _logg, _feh, _bibcode)
205
+
206
+
131
207
  except IndexError:
132
208
  raise ValueError(f'simbad query for {star} failed')
133
209
 
@@ -176,6 +252,9 @@ class simbad:
176
252
  sp_type = self.sp_type
177
253
  return f'{self.star} ({V=}, {sp_type=})'
178
254
 
255
+ @property
256
+ def bmv(self):
257
+ return self.B - self.V
179
258
 
180
259
 
181
260
  def argsort_by_spectral_type(sptypes):
@@ -59,6 +59,7 @@ class RV:
59
59
  only_latest_pipeline: bool = field(init=True, repr=False, default=True)
60
60
  load_extra_data: Union[bool, str] = field(init=True, repr=False, default=False)
61
61
  check_drs_qc: bool = field(init=True, repr=False, default=True)
62
+ user: bool = field(init=True, repr=False, default=None)
62
63
  #
63
64
  units = 'm/s'
64
65
  _child: bool = field(init=True, repr=False, default=False)
@@ -185,7 +186,7 @@ class RV:
185
186
 
186
187
  with timer():
187
188
  self.dace_result = get_observations(self.__star__, self.instrument,
188
- main_id=mid, verbose=self.verbose)
189
+ user=self.user, main_id=mid, verbose=self.verbose)
189
190
  except ValueError as e:
190
191
  # querying DACE failed, should we raise an error?
191
192
  if self._raise_on_error:
@@ -309,6 +310,10 @@ class RV:
309
310
  new_self._build_arrays()
310
311
  return new_self
311
312
 
313
+ def __iter__(self):
314
+ for inst in self.instruments:
315
+ yield getattr(self, inst)
316
+
312
317
 
313
318
  def reload(self):
314
319
  self._did_secular_acceleration = False
@@ -511,7 +516,8 @@ class RV:
511
516
  return s
512
517
 
513
518
  @classmethod
514
- def from_rdb(cls, files, star=None, instrument=None, units='ms', **kwargs):
519
+ def from_rdb(cls, files, star=None, instrument=None, units='ms',
520
+ header_skip=2, **kwargs):
515
521
  """ Create an RV object from an rdb file or a list of rdb files
516
522
 
517
523
  Args:
@@ -523,6 +529,8 @@ class RV:
523
529
  Name of the instrument(s). If None, try to infer it from file name
524
530
  units (str, optional):
525
531
  Units of the radial velocities. Defaults to 'ms'.
532
+ header_skip (int, optional):
533
+ Number of lines to skip in the header. Defaults to 2.
526
534
 
527
535
  Examples:
528
536
  s = RV.from_rdb('star_HARPS.rdb')
@@ -565,14 +573,14 @@ class RV:
565
573
  s = cls(star, _child=True, **kwargs)
566
574
 
567
575
  def find_column(data, names):
568
- has_col = np.array([name in data.dtype.fields for name in names])
576
+ has_col = np.array([name.casefold() in data.dtype.fields for name in names])
569
577
  if any(has_col):
570
578
  col = np.where(has_col)[0][0]
571
579
  return np.atleast_1d(data[names[col]])
572
580
  return False
573
581
 
574
582
  for i, (f, instrument) in enumerate(zip(files, instruments)):
575
- data = np.loadtxt(f, skiprows=2, usecols=range(3), unpack=True)
583
+ data = np.loadtxt(f, skiprows=header_skip, usecols=range(3), unpack=True)
576
584
  if data.ndim == 1:
577
585
  data = data.reshape(-1, 1)
578
586
 
@@ -614,11 +622,13 @@ class RV:
614
622
 
615
623
  # try to find FWHM and uncertainty
616
624
  if (v := find_column(data, ['fwhm'])) is not False: # walrus !!
617
- _s.fwhm = v
625
+ _s.fwhm = v * factor
618
626
  if (sv := find_column(data, ['sfwhm', 'fwhm_err', 'sig_fwhm'])) is not False:
619
- _s.fwhm_err = sv
627
+ _s.fwhm_err = sv * factor
628
+ logger.debug('found columns for FWHM and uncertainty') if verbose else None
620
629
  else:
621
630
  _s.fwhm_err = 2 * _s.svrad
631
+ logger.debug('found column for FWHM') if verbose else None
622
632
  else:
623
633
  _s.fwhm = np.full_like(time, np.nan)
624
634
  _s.fwhm_err = np.full_like(time, np.nan)
@@ -632,6 +642,7 @@ class RV:
632
642
  _s.rhk_err = np.full_like(time, np.nan)
633
643
  if (sv := find_column(data, ['srhk', 'rhk_err', 'sig_rhk'])) is not False:
634
644
  _s.rhk_err = sv
645
+ logger.debug('found columns for logRhk and uncertainty') if verbose else None
635
646
  else:
636
647
  _s.rhk = np.full_like(time, np.nan)
637
648
  _s.rhk_err = np.full_like(time, np.nan)
@@ -640,11 +651,11 @@ class RV:
640
651
  _quantities.append('rhk_err')
641
652
 
642
653
  # try to find BISPAN and uncertainty
643
- if (v := find_column(data, ['bispan'])) is not False:
644
- _s.bispan = v
654
+ if (v := find_column(data, ['bis', 'bispan'])) is not False:
655
+ _s.bispan = v * factor
645
656
  _s.bispan_err = np.full_like(time, np.nan)
646
- if (sv := find_column(data, ['sbispan'])) is not False:
647
- _s.bispan_err = sv
657
+ if (sv := find_column(data, ['sbispan', 'sig_bispan', 'bispan_err'])) is not False:
658
+ _s.bispan_err = sv * factor
648
659
  else:
649
660
  _s.bispan = np.full_like(time, np.nan)
650
661
  _s.bispan_err = np.full_like(time, np.nan)
@@ -659,18 +670,57 @@ class RV:
659
670
  _s.berv = np.full_like(time, np.nan)
660
671
  _quantities.append('berv')
661
672
 
662
- # other quantities, but all NaNs
663
- for q in ['caindex', 'ccf_asym', 'contrast', 'haindex', 'naindex', 'sindex']:
664
- setattr(_s, q, np.full_like(time, np.nan))
665
- setattr(_s, q + '_err', np.full_like(time, np.nan))
673
+ # other quantities
674
+ msg = ''
675
+
676
+ for q, possible in {
677
+ 'caindex': ['caindex', 'ca', 'caII'],
678
+ 'ccf_asym': ['ccf_asym'],
679
+ 'contrast': ['contrast'],
680
+ 'haindex': ['haindex', 'ha', 'halpha'],
681
+ 'heindex': ['heindex', 'he', 'heII'],
682
+ 'naindex': ['naindex', 'na'],
683
+ 'sindex': ['sindex', 's_mw'],
684
+ }.items():
685
+ # try to find columns for each quantity
686
+ if (v := find_column(data, possible)) is not False:
687
+ msg += f'{q}, '
688
+ setattr(_s, q, v)
689
+ # try to find uncertainty column for each quantity
690
+ possible_errors = ['s' + p for p in possible] + ['sig_' + p for p in possible] + [p + '_err' for p in possible]
691
+ if (sv := find_column(data, possible_errors)) is not False:
692
+ setattr(_s, q + '_err', sv)
693
+ else:
694
+ setattr(_s, q + '_err', np.full_like(time, np.nan))
695
+ else:
696
+ setattr(_s, q, np.full_like(time, np.nan))
697
+ setattr(_s, q + '_err', np.full_like(time, np.nan))
666
698
  _quantities.append(q)
667
699
  _quantities.append(q + '_err')
700
+
701
+ if verbose and msg != '':
702
+ if msg.endswith(', '):
703
+ msg = msg[:-2]
704
+ logger.debug('found columns for ' + msg)
705
+
706
+
707
+ # more values
668
708
  for q in ['texp', ]:
669
- setattr(_s, q, np.full_like(time, np.nan))
709
+ if (v := find_column(data, q)) is not False:
710
+ setattr(_s, q, v)
711
+ else:
712
+ setattr(_s, q, np.full_like(time, np.nan))
670
713
  _quantities.append(q)
714
+
715
+ # strings
671
716
  for q in ['ccf_mask', 'date_night', 'prog_id', 'raw_file', 'pub_reference']:
672
- setattr(_s, q, np.full(time.size, ''))
717
+ if (v := find_column(data, q)) is not False:
718
+ setattr(_s, q, v)
719
+ else:
720
+ setattr(_s, q, np.full(time.size, ''))
673
721
  _quantities.append(q)
722
+
723
+ # booleans
674
724
  for q in ['drs_qc', ]:
675
725
  setattr(_s, q, np.full(time.size, True))
676
726
  _quantities.append(q)
@@ -1029,7 +1079,8 @@ class RV:
1029
1079
  logger.warning('may need to provide `top_level` in kwargs to find file')
1030
1080
  do_symlink_filetype('CCF', files[:limit], directory, **kwargs)
1031
1081
  else:
1032
- do_download_filetype('CCF', files[:limit], directory, verbose=self.verbose, **kwargs)
1082
+ do_download_filetype('CCF', files[:limit], directory,
1083
+ verbose=self.verbose, user=self.user, **kwargs)
1033
1084
 
1034
1085
  if load:
1035
1086
  try:
@@ -1083,7 +1134,8 @@ class RV:
1083
1134
  logger.warning('may need to provide `top_level` in kwargs to find file')
1084
1135
  do_symlink_filetype('S1D', files[:limit], directory, **kwargs)
1085
1136
  else:
1086
- do_download_filetype('S1D', files[:limit], directory, verbose=self.verbose, **kwargs)
1137
+ do_download_filetype('S1D', files[:limit], directory,
1138
+ verbose=self.verbose, user=self.user, **kwargs)
1087
1139
 
1088
1140
  def download_s2d(self, instrument=None, index=None, limit=None,
1089
1141
  directory=None, symlink=False, **kwargs):
@@ -1115,7 +1167,9 @@ class RV:
1115
1167
  logger.warning('may need to provide `top_level` in kwargs to find file')
1116
1168
  do_symlink_filetype('S2D', files[:limit], directory, **kwargs)
1117
1169
  else:
1118
- do_download_filetype('S2D', files[:limit], directory, verbose=self.verbose, **kwargs)
1170
+ do_download_filetype('S2D', files[:limit], directory,
1171
+ verbose=self.verbose, user=self.user, **kwargs)
1172
+
1119
1173
 
1120
1174
 
1121
1175
  from .plots import plot, plot_fwhm, plot_bispan, plot_contrast, plot_rhk, plot_berv, plot_quantity
@@ -1300,10 +1354,9 @@ class RV:
1300
1354
 
1301
1355
  def remove_single_observations(self):
1302
1356
  """ Remove instruments for which there is a single observation """
1303
- instruments = deepcopy(self.instruments)
1304
- for inst in instruments:
1305
- if getattr(self, inst).mtime.size == 1:
1306
- self.remove_instrument(inst)
1357
+ singles = [i for i in self.instruments if getattr(self, i).mtime.size == 1]
1358
+ for inst in singles:
1359
+ self.remove_instrument(inst, strict=True)
1307
1360
 
1308
1361
  def remove_prog_id(self, prog_id):
1309
1362
  """ Remove observations from a given program ID """
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: arvi
3
- Version: 0.1.26
3
+ Version: 0.1.28
4
4
  Summary: The Automated RV Inspector
5
5
  Author-email: João Faria <joao.faria@unige.ch>
6
6
  License: MIT
@@ -4,6 +4,7 @@ README.md
4
4
  mkdocs.yml
5
5
  pyproject.toml
6
6
  setup.py
7
+ .github/dependabot.yml
7
8
  .github/workflows/docs-gh-pages.yml
8
9
  .github/workflows/install.yml
9
10
  .github/workflows/python-publish.yml
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes