arvi 0.2.11__tar.gz → 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. {arvi-0.2.11 → arvi-0.3.0}/.github/workflows/docs-gh-pages.yml +1 -1
  2. {arvi-0.2.11 → arvi-0.3.0}/.github/workflows/install.yml +1 -1
  3. {arvi-0.2.11 → arvi-0.3.0}/.github/workflows/python-publish.yml +1 -1
  4. {arvi-0.2.11/arvi.egg-info → arvi-0.3.0}/PKG-INFO +1 -1
  5. {arvi-0.2.11 → arvi-0.3.0}/arvi/__init__.py +1 -0
  6. {arvi-0.2.11 → arvi-0.3.0}/arvi/dace_wrapper.py +74 -48
  7. {arvi-0.2.11 → arvi-0.3.0}/arvi/gaia_wrapper.py +12 -2
  8. {arvi-0.2.11 → arvi-0.3.0}/arvi/kima_wrapper.py +33 -22
  9. {arvi-0.2.11 → arvi-0.3.0}/arvi/simbad_wrapper.py +15 -3
  10. {arvi-0.2.11 → arvi-0.3.0}/arvi/timeseries.py +86 -35
  11. {arvi-0.2.11 → arvi-0.3.0/arvi.egg-info}/PKG-INFO +1 -1
  12. {arvi-0.2.11 → arvi-0.3.0}/.github/dependabot.yml +0 -0
  13. {arvi-0.2.11 → arvi-0.3.0}/.gitignore +0 -0
  14. {arvi-0.2.11 → arvi-0.3.0}/LICENSE +0 -0
  15. {arvi-0.2.11 → arvi-0.3.0}/README.md +0 -0
  16. {arvi-0.2.11 → arvi-0.3.0}/arvi/HZ.py +0 -0
  17. {arvi-0.2.11 → arvi-0.3.0}/arvi/ariadne_wrapper.py +0 -0
  18. {arvi-0.2.11 → arvi-0.3.0}/arvi/berv.py +0 -0
  19. {arvi-0.2.11 → arvi-0.3.0}/arvi/binning.py +0 -0
  20. {arvi-0.2.11 → arvi-0.3.0}/arvi/config.py +0 -0
  21. {arvi-0.2.11 → arvi-0.3.0}/arvi/data/extra/HD86226_PFS1.rdb +0 -0
  22. {arvi-0.2.11 → arvi-0.3.0}/arvi/data/extra/HD86226_PFS2.rdb +0 -0
  23. {arvi-0.2.11 → arvi-0.3.0}/arvi/data/extra/metadata.json +0 -0
  24. {arvi-0.2.11 → arvi-0.3.0}/arvi/data/info.svg +0 -0
  25. {arvi-0.2.11 → arvi-0.3.0}/arvi/data/obs_affected_ADC_issues.dat +0 -0
  26. {arvi-0.2.11 → arvi-0.3.0}/arvi/data/obs_affected_blue_cryostat_issues.dat +0 -0
  27. {arvi-0.2.11 → arvi-0.3.0}/arvi/exofop_wrapper.py +0 -0
  28. {arvi-0.2.11 → arvi-0.3.0}/arvi/extra_data.py +0 -0
  29. {arvi-0.2.11 → arvi-0.3.0}/arvi/headers.py +0 -0
  30. {arvi-0.2.11 → arvi-0.3.0}/arvi/instrument_specific.py +0 -0
  31. {arvi-0.2.11 → arvi-0.3.0}/arvi/kepmodel_wrapper.py +0 -0
  32. {arvi-0.2.11 → arvi-0.3.0}/arvi/lbl_wrapper.py +0 -0
  33. {arvi-0.2.11 → arvi-0.3.0}/arvi/nasaexo_wrapper.py +0 -0
  34. {arvi-0.2.11 → arvi-0.3.0}/arvi/plots.py +0 -0
  35. {arvi-0.2.11 → arvi-0.3.0}/arvi/programs.py +0 -0
  36. {arvi-0.2.11 → arvi-0.3.0}/arvi/reports.py +0 -0
  37. {arvi-0.2.11 → arvi-0.3.0}/arvi/setup_logger.py +0 -0
  38. {arvi-0.2.11 → arvi-0.3.0}/arvi/sophie_wrapper.py +0 -0
  39. {arvi-0.2.11 → arvi-0.3.0}/arvi/spectra.py +0 -0
  40. {arvi-0.2.11 → arvi-0.3.0}/arvi/stats.py +0 -0
  41. {arvi-0.2.11 → arvi-0.3.0}/arvi/stellar.py +0 -0
  42. {arvi-0.2.11 → arvi-0.3.0}/arvi/translations.py +0 -0
  43. {arvi-0.2.11 → arvi-0.3.0}/arvi/utils.py +0 -0
  44. {arvi-0.2.11 → arvi-0.3.0}/arvi.egg-info/SOURCES.txt +0 -0
  45. {arvi-0.2.11 → arvi-0.3.0}/arvi.egg-info/dependency_links.txt +0 -0
  46. {arvi-0.2.11 → arvi-0.3.0}/arvi.egg-info/requires.txt +0 -0
  47. {arvi-0.2.11 → arvi-0.3.0}/arvi.egg-info/top_level.txt +0 -0
  48. {arvi-0.2.11 → arvi-0.3.0}/docs/API.md +0 -0
  49. {arvi-0.2.11 → arvi-0.3.0}/docs/detailed.ipynb +0 -0
  50. {arvi-0.2.11 → arvi-0.3.0}/docs/downloading_data.md +0 -0
  51. {arvi-0.2.11 → arvi-0.3.0}/docs/index.md +0 -0
  52. {arvi-0.2.11 → arvi-0.3.0}/docs/logo/detective.png +0 -0
  53. {arvi-0.2.11 → arvi-0.3.0}/docs/logo/logo.png +0 -0
  54. {arvi-0.2.11 → arvi-0.3.0}/docs/stylesheets/extra.css +0 -0
  55. {arvi-0.2.11 → arvi-0.3.0}/mkdocs.yml +0 -0
  56. {arvi-0.2.11 → arvi-0.3.0}/pyproject.toml +0 -0
  57. {arvi-0.2.11 → arvi-0.3.0}/setup.cfg +0 -0
  58. {arvi-0.2.11 → arvi-0.3.0}/setup.py +0 -0
  59. {arvi-0.2.11 → arvi-0.3.0}/tests/HD10700-Bcor_ESPRESSO18.rdb +0 -0
  60. {arvi-0.2.11 → arvi-0.3.0}/tests/test_binning.py +0 -0
  61. {arvi-0.2.11 → arvi-0.3.0}/tests/test_config.py +0 -0
  62. {arvi-0.2.11 → arvi-0.3.0}/tests/test_create_RV.py +0 -0
  63. {arvi-0.2.11 → arvi-0.3.0}/tests/test_import_object.py +0 -0
  64. {arvi-0.2.11 → arvi-0.3.0}/tests/test_simbad.py +0 -0
  65. {arvi-0.2.11 → arvi-0.3.0}/tests/test_stats.py +0 -0
@@ -26,7 +26,7 @@ jobs:
26
26
  runs-on: ubuntu-latest
27
27
  steps:
28
28
  - name: Checkout
29
- uses: actions/checkout@v5
29
+ uses: actions/checkout@v6
30
30
 
31
31
  - name: Setup Pages
32
32
  uses: actions/configure-pages@v5
@@ -16,7 +16,7 @@ jobs:
16
16
  python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
17
17
 
18
18
  steps:
19
- - uses: actions/checkout@v5
19
+ - uses: actions/checkout@v6
20
20
  - name: Set up Python ${{ matrix.python-version }}
21
21
  uses: actions/setup-python@v6
22
22
  with:
@@ -22,7 +22,7 @@ jobs:
22
22
  runs-on: ubuntu-latest
23
23
 
24
24
  steps:
25
- - uses: actions/checkout@v5
25
+ - uses: actions/checkout@v6
26
26
  - name: Set up Python
27
27
  uses: actions/setup-python@v6
28
28
  with:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: arvi
3
- Version: 0.2.11
3
+ Version: 0.3.0
4
4
  Summary: The Automated RV Inspector
5
5
  Author-email: João Faria <joao.faria@unige.ch>
6
6
  License: MIT
@@ -20,6 +20,7 @@ def __getattr__(name: str):
20
20
  if name in (
21
21
  '_ipython_canary_method_should_not_exist_',
22
22
  '_ipython_display_',
23
+ '__custom_documentations__',
23
24
  '_repr_mimebundle_',
24
25
  '__wrapped__'
25
26
  ):
@@ -229,55 +229,81 @@ def get_observations_from_instrument(star, instrument, user=None, main_id=None,
229
229
  mask3 = mask2 & (result['ins_mode'] == ins_mode)
230
230
  _nan = np.full(mask3.sum(), np.nan)
231
231
 
232
- r[str(inst)][str(pipe)][str(ins_mode)] = {
233
- 'texp': result['texp'][mask3],
234
- 'bispan': result['spectro_ccf_bispan'][mask3],
235
- 'bispan_err': result['spectro_ccf_bispan_err'][mask3],
236
- 'drift_noise': result['spectro_cal_drift_noise'][mask3],
237
- 'rjd': result['obj_date_bjd'][mask3],
238
- 'cal_therror': _nan,
239
- 'fwhm': result['spectro_ccf_fwhm'][mask3],
240
- 'fwhm_err': result['spectro_ccf_fwhm_err'][mask3],
241
- 'rv': result['spectro_ccf_rv'][mask3],
242
- 'rv_err': result['spectro_ccf_rv_err'][mask3],
243
- 'berv': result['spectro_cal_berv'][mask3],
244
- 'ccf_noise': np.sqrt(
245
- np.square(result['spectro_ccf_rv_err'][mask3]) - np.square(result['spectro_cal_drift_noise'][mask3])
246
- ),
247
- 'rhk': result['spectro_analysis_rhk'][mask3],
248
- 'rhk_err': result['spectro_analysis_rhk_err'][mask3],
249
- 'contrast': result['spectro_ccf_contrast'][mask3],
250
- 'contrast_err': result['spectro_ccf_contrast_err'][mask3],
251
- 'cal_thfile': result['spectro_cal_thfile'][mask3],
252
- 'spectroFluxSn50': result['spectro_flux_sn50'][mask3],
253
- 'protm08': result['spectro_analysis_protm08'][mask3],
254
- 'protm08_err': result['spectro_analysis_protm08_err'][mask3],
255
- 'caindex': result['spectro_analysis_ca'][mask3],
256
- 'caindex_err': result['spectro_analysis_ca_err'][mask3],
257
- 'pub_reference': result['pub_ref'][mask3],
258
- 'drs_qc': result['spectro_drs_qc'][mask3],
259
- 'haindex': result['spectro_analysis_halpha'][mask3],
260
- 'haindex_err': result['spectro_analysis_halpha_err'][mask3],
261
- 'protn84': result['spectro_analysis_protn84'][mask3],
262
- 'protn84_err': result['spectro_analysis_protn84_err'][mask3],
263
- 'naindex': result['spectro_analysis_na'][mask3],
264
- 'naindex_err': result['spectro_analysis_na_err'][mask3],
265
- 'snca2': _nan,
266
- 'mask': result['spectro_ccf_mask'][mask3],
267
- 'public': result['public'][mask3],
268
- 'spectroFluxSn20': result['spectro_flux_sn20'][mask3],
269
- 'sindex': result['spectro_analysis_smw'][mask3],
270
- 'sindex_err': result['spectro_analysis_smw_err'][mask3],
271
- 'drift_used': _nan,
272
- 'ccf_asym': result['spectro_ccf_asym'][mask3],
273
- 'ccf_asym_err': result['spectro_ccf_asym_err'][mask3],
274
- 'date_night': result['date_night'][mask3],
275
- 'raw_file': result['file_rootpath'][mask3],
276
- 'prog_id': result['prog_id'][mask3],
277
- 'th_ar': result['th_ar'][mask3],
278
- 'th_ar1': result['th_ar1'][mask3],
279
- 'th_ar2': result['th_ar2'][mask3],
232
+ translations = {
233
+ 'obj_date_bjd': 'rjd',
234
+ 'spectro_drs_qc': 'drs_qc',
235
+ 'spectro_cal_berv_mx': 'bervmax',
236
+ 'pub_ref': 'pub_reference',
237
+ 'file_rootpath': 'raw_file',
238
+ 'spectro_ccf_asym': 'ccf_asym',
239
+ 'spectro_ccf_asym_err': 'ccf_asym_err',
280
240
  }
241
+ new_result = {}
242
+ for key in result.keys():
243
+ if key in translations:
244
+ new_key = translations[key]
245
+ else:
246
+ new_key = key
247
+ new_key = new_key.replace('spectro_ccf_', '')
248
+ new_key = new_key.replace('spectro_cal_', '')
249
+ new_key = new_key.replace('spectro_analysis_', '')
250
+ new_result[new_key] = result[key][mask3]
251
+
252
+ new_result['ccf_noise'] = np.sqrt(
253
+ np.square(result['spectro_ccf_rv_err'][mask3]) - np.square(result['spectro_cal_drift_noise'][mask3])
254
+ )
255
+
256
+ r[str(inst)][str(pipe)][str(ins_mode)] = new_result
257
+
258
+ # r[str(inst)][str(pipe)][str(ins_mode)] = {
259
+ # 'texp': result['texp'][mask3],
260
+ # 'bispan': result['spectro_ccf_bispan'][mask3],
261
+ # 'bispan_err': result['spectro_ccf_bispan_err'][mask3],
262
+ # 'drift_noise': result['spectro_cal_drift_noise'][mask3],
263
+ # 'rjd': result['obj_date_bjd'][mask3],
264
+ # 'cal_therror': _nan,
265
+ # 'fwhm': result['spectro_ccf_fwhm'][mask3],
266
+ # 'fwhm_err': result['spectro_ccf_fwhm_err'][mask3],
267
+ # 'rv': result['spectro_ccf_rv'][mask3],
268
+ # 'rv_err': result['spectro_ccf_rv_err'][mask3],
269
+ # 'berv': result['spectro_cal_berv'][mask3],
270
+ # 'ccf_noise': np.sqrt(
271
+ # np.square(result['spectro_ccf_rv_err'][mask3]) - np.square(result['spectro_cal_drift_noise'][mask3])
272
+ # ),
273
+ # 'rhk': result['spectro_analysis_rhk'][mask3],
274
+ # 'rhk_err': result['spectro_analysis_rhk_err'][mask3],
275
+ # 'contrast': result['spectro_ccf_contrast'][mask3],
276
+ # 'contrast_err': result['spectro_ccf_contrast_err'][mask3],
277
+ # 'cal_thfile': result['spectro_cal_thfile'][mask3],
278
+ # 'spectroFluxSn50': result['spectro_flux_sn50'][mask3],
279
+ # 'protm08': result['spectro_analysis_protm08'][mask3],
280
+ # 'protm08_err': result['spectro_analysis_protm08_err'][mask3],
281
+ # 'caindex': result['spectro_analysis_ca'][mask3],
282
+ # 'caindex_err': result['spectro_analysis_ca_err'][mask3],
283
+ # 'pub_reference': result['pub_ref'][mask3],
284
+ # 'drs_qc': result['spectro_drs_qc'][mask3],
285
+ # 'haindex': result['spectro_analysis_halpha'][mask3],
286
+ # 'haindex_err': result['spectro_analysis_halpha_err'][mask3],
287
+ # 'protn84': result['spectro_analysis_protn84'][mask3],
288
+ # 'protn84_err': result['spectro_analysis_protn84_err'][mask3],
289
+ # 'naindex': result['spectro_analysis_na'][mask3],
290
+ # 'naindex_err': result['spectro_analysis_na_err'][mask3],
291
+ # 'snca2': _nan,
292
+ # 'mask': result['spectro_ccf_mask'][mask3],
293
+ # 'public': result['public'][mask3],
294
+ # 'spectroFluxSn20': result['spectro_flux_sn20'][mask3],
295
+ # 'sindex': result['spectro_analysis_smw'][mask3],
296
+ # 'sindex_err': result['spectro_analysis_smw_err'][mask3],
297
+ # 'drift_used': _nan,
298
+ # 'ccf_asym': result['spectro_ccf_asym'][mask3],
299
+ # 'ccf_asym_err': result['spectro_ccf_asym_err'][mask3],
300
+ # 'date_night': result['date_night'][mask3],
301
+ # 'raw_file': result['file_rootpath'][mask3],
302
+ # 'prog_id': result['prog_id'][mask3],
303
+ # 'th_ar': result['th_ar'][mask3],
304
+ # 'th_ar1': result['th_ar1'][mask3],
305
+ # 'th_ar2': result['th_ar2'][mask3],
306
+ # }
281
307
 
282
308
  # print(r.keys())
283
309
  # print([r[k].keys() for k in r.keys()])
@@ -9,7 +9,8 @@ DATA_PATH = os.path.join(DATA_PATH, 'data')
9
9
  QUERY = """
10
10
  SELECT TOP 20 gaia_source.designation, gaia_source.source_id,
11
11
  gaia_source.ra, gaia_source.dec,
12
- gaia_source.parallax, gaia_source.pmra, gaia_source.pmdec,
12
+ gaia_source.parallax, gaia_source.parallax_error,
13
+ gaia_source.pmra, gaia_source.pmdec,
13
14
  gaia_source.ruwe, gaia_source.phot_g_mean_mag, gaia_source.bp_rp,
14
15
  gaia_source.radial_velocity, gaia_source.radial_velocity_error
15
16
  FROM gaiadr3.gaia_source
@@ -27,7 +28,8 @@ CONTAINS(
27
28
  QUERY_ID = """
28
29
  SELECT TOP 20 gaia_source.designation, gaia_source.source_id,
29
30
  gaia_source.ra, gaia_source.dec,
30
- gaia_source.parallax, gaia_source.pmra, gaia_source.pmdec,
31
+ gaia_source.parallax, gaia_source.parallax_error,
32
+ gaia_source.pmra, gaia_source.pmdec,
31
33
  gaia_source.ruwe, gaia_source.phot_g_mean_mag, gaia_source.bp_rp,
32
34
  gaia_source.radial_velocity, gaia_source.radial_velocity_error
33
35
  FROM gaiadr3.gaia_source
@@ -120,6 +122,7 @@ class gaia:
120
122
  self.pmdec = float(results['pmdec'])
121
123
  self.coords = SkyCoord(self.ra, self.dec, unit='deg')
122
124
  self.plx = float(results['parallax'])
125
+ self.plx_err = float(results['parallax_error'])
123
126
  try:
124
127
  self.radial_velocity = float(results['radial_velocity'])
125
128
  except ValueError:
@@ -131,5 +134,12 @@ class gaia:
131
134
 
132
135
  return
133
136
 
137
+ def distance(self):
138
+ """ Calculate the distance to the star as 1 / parallax [pc] """
139
+ from astropy import units as u
140
+ d = (self.plx * u.mas).to(u.parsec,
141
+ equivalencies=u.equivalencies.parallax())
142
+ return d
143
+
134
144
  def __repr__(self):
135
145
  return f'{self.star} (DR3 id={self.dr3_id})'
@@ -1,19 +1,8 @@
1
1
  import os
2
- import numpy as np
2
+ import ast
3
3
 
4
4
  from .setup_logger import setup_logger
5
5
 
6
- try:
7
- import kima
8
- from kima.pykima.utils import chdir
9
- from kima import distributions
10
- from kima import RVData, HGPMdata
11
- from kima import RVmodel, GPmodel, RVHGPMmodel
12
- kima_available = True
13
- except ImportError:
14
- kima_available = False
15
-
16
-
17
6
  def try_to_guess_prior(model, prior):
18
7
  if 'jitter' in prior:
19
8
  return 'Jprior'
@@ -23,8 +12,14 @@ def try_to_guess_prior(model, prior):
23
12
 
24
13
 
25
14
  def run_kima(self, run=False, load=False, run_directory=None,
26
- model=RVmodel, priors={}, **kwargs):
27
- if not kima_available:
15
+ model='RVmodel', priors={}, **kwargs):
16
+ try:
17
+ import kima
18
+ from kima.pykima.utils import chdir
19
+ from kima import distributions
20
+ from kima import RVData, HGPMdata
21
+ from kima import RVmodel, GPmodel, RVHGPMmodel
22
+ except ImportError:
28
23
  raise ImportError('kima not available, please install with `pip install kima`')
29
24
 
30
25
  logger = setup_logger()
@@ -57,7 +52,7 @@ def run_kima(self, run=False, load=False, run_directory=None,
57
52
  model.trend = kwargs.pop('trend', False)
58
53
  model.degree = kwargs.pop('degree', 0)
59
54
 
60
- if isinstance(model, RVmodel):
55
+ if isinstance(model, (RVmodel, RVHGPMmodel)):
61
56
  model.studentt = kwargs.pop('studentt', False)
62
57
 
63
58
  if isinstance(model, GPmodel):
@@ -81,6 +76,16 @@ def run_kima(self, run=False, load=False, run_directory=None,
81
76
  model.pm_dec_bary_prior = priors.pop('pm_dec_bary_prior',
82
77
  distributions.Gaussian(pm_data.pm_dec_hg, pm_data.sig_hg_dec))
83
78
 
79
+ KO = kwargs.pop('known_object', False)
80
+ if KO:
81
+ if isinstance(KO, int) and KO is not True:
82
+ model.set_known_object(KO)
83
+ else:
84
+ try:
85
+ model.set_known_object(kwargs.pop('n_known_object'))
86
+ except KeyError:
87
+ msg = 'if `known_object` is True, specify `n_known_object` or pass `known_object` as an integer'
88
+ raise ValueError(msg) from None
84
89
 
85
90
  for k, v in priors.items():
86
91
  try:
@@ -99,21 +104,27 @@ def run_kima(self, run=False, load=False, run_directory=None,
99
104
  if run_directory is None:
100
105
  run_directory = os.getcwd()
101
106
 
102
- if run:
103
- # TODO: use signature of kima.run to pop the correct kwargs
104
- # model_name = model.__class__.__name__
105
- # model_name = f'kima.{model_name}.{model_name}'
106
- # signature, defaults = [sig for sig in kima.run.__nb_signature__ if model_name in sig[0]]
107
+ diagnostic = kwargs.pop('diagnostic', False)
107
108
 
109
+ if run:
110
+ model_name = model.__class__.__name__
111
+ model_name = f'kima.{model_name}.{model_name}'
112
+ signature, _, defaults = [sig for sig in kima._run_really.__nb_signature__ if model_name in sig[0]][0]
113
+ signature = signature.replace('\\', '')
114
+ args = ast.parse(signature + ':\n pass').body[0].args
115
+ defaults = {arg.arg: d for arg, d in zip(args.args[1:], defaults)}
116
+ defaults.update(kwargs)
108
117
  with chdir(run_directory):
109
- kima.run(model, **kwargs)
118
+ kima.run(model, **defaults)
110
119
 
111
120
  if isinstance(model, RVHGPMmodel):
112
121
  data = (data, pm_data)
113
122
 
114
123
  if load:
115
124
  with chdir(run_directory):
116
- res = kima.load_results(model)
125
+ res = kima.load_results(model, diagnostic=diagnostic)
126
+
127
+ res.star = self.star
117
128
  return data, model, res
118
129
 
119
130
  return data, model
@@ -204,7 +204,7 @@ class simbad:
204
204
  try:
205
205
  table2 = run_query(query=BV_QUERY.format(star=self.star))
206
206
  if _debug:
207
- print('table2:', table2)
207
+ print('table2:\n', table2)
208
208
  cols, values = parse_table1(table2, cols, values)
209
209
  except IndexError:
210
210
  self.B = self.V = np.nan
@@ -228,6 +228,14 @@ class simbad:
228
228
  setattr(self, '_' + filter_name, ufloat(float(mag), float(mag_err)))
229
229
  except ValueError:
230
230
  setattr(self, '_' + filter_name, float(mag))
231
+
232
+ # substitute missing V magnitude
233
+ if filter_name == 'V' and values[cols.index('V')] == '':
234
+ values[cols.index('V')] = mag
235
+ # substitute missing B magnitude
236
+ if filter_name == 'B' and values[cols.index('B')] == '':
237
+ values[cols.index('B')] = mag
238
+
231
239
 
232
240
  # measurements table
233
241
  table5 = run_query(query=MEAS_QUERY.format(star=self.star))
@@ -285,8 +293,12 @@ class simbad:
285
293
  except IndexError:
286
294
  if self.sp_type == '':
287
295
  if len(self.measurements.teff) > 0:
288
- self.teff = int(np.mean(self.measurements.teff))
289
- self.sp_type = teff_to_sptype(self.teff)
296
+ try:
297
+ self.teff = int(np.mean(self.measurements.teff))
298
+ self.sp_type = teff_to_sptype(self.teff)
299
+ except ValueError:
300
+ self.teff = np.nan
301
+ self.sp_type = ''
290
302
  elif self.sp_type[:2] in EFFECTIVE_TEMPERATURES:
291
303
  self.teff = EFFECTIVE_TEMPERATURES[self.sp_type[:2]]
292
304
 
@@ -145,6 +145,7 @@ class RV(ISSUES, REPORTS):
145
145
  if self.verbose:
146
146
  logger.info('querying Simbad...')
147
147
 
148
+ # TODO: removing the 'A' might not be a good idea
148
149
  # complicated way to query Simbad with self.__star__ or, if that
149
150
  # fails, try after removing a trailing 'A'
150
151
  for target in set([self.__star__, self.__star__.replace('A', '')]):
@@ -590,9 +591,12 @@ class RV(ISSUES, REPORTS):
590
591
  """ Total time span of the (masked) observations """
591
592
  return np.ptp(self.mtime)
592
593
 
593
- def _index_from_instrument_index(self, index, instrument):
594
+ def _index_from_instrument_index(self, index, instrument, masked=True):
594
595
  ind = np.where(self.instrument_array == instrument)[0]
595
- return ind[getattr(self, instrument).mask][index]
596
+ if masked:
597
+ return ind[getattr(self, instrument).mask][index]
598
+ else:
599
+ return ind[index]
596
600
 
597
601
  # @property
598
602
  def _tt(self, f=20) -> np.ndarray:
@@ -651,7 +655,11 @@ class RV(ISSUES, REPORTS):
651
655
  return s
652
656
 
653
657
  @classmethod
654
- def from_arrays(cls, star, time, vrad, svrad, inst, **kwargs):
658
+ def from_arrays(cls, star, time, vrad, svrad, instrument:str, **kwargs):
659
+ if 'inst' in kwargs:
660
+ logger.warning('`inst` is deprecated. Use `instrument` instead.')
661
+ instrument = kwargs.pop('inst')
662
+
655
663
  s = cls(star, _child=True)
656
664
  time, vrad, svrad = map(np.atleast_1d, (time, vrad, svrad))
657
665
 
@@ -673,8 +681,12 @@ class RV(ISSUES, REPORTS):
673
681
  for k, v in kwargs.items():
674
682
  setattr(s, k, np.atleast_1d(v))
675
683
 
676
- s.instruments = [inst]
677
684
  s._quantities = np.array(list(kwargs.keys()))
685
+ _instrument = instrument.replace(' ', '_').replace('-', '_')
686
+ s.instruments = [_instrument]
687
+
688
+ setattr(s, _instrument, deepcopy(s))
689
+ s._child = False
678
690
 
679
691
  return s
680
692
 
@@ -1039,7 +1051,7 @@ class RV(ISSUES, REPORTS):
1039
1051
  time = np.array([i.bjd for i in CCFs])
1040
1052
  vrad = np.array([i.RV*1e3 for i in CCFs])
1041
1053
  svrad = np.array([i.RVerror*1e3 for i in CCFs])
1042
- _s = RV.from_arrays(star, time, vrad, svrad, inst=instrument)
1054
+ _s = RV.from_arrays(star, time, vrad, svrad, instrument=instrument)
1043
1055
 
1044
1056
  _quantities = []
1045
1057
 
@@ -1066,9 +1078,13 @@ class RV(ISSUES, REPORTS):
1066
1078
  _s.texp = np.array([i.HDU[0].header['EXPTIME'] for i in CCFs])
1067
1079
  _quantities.append('texp')
1068
1080
 
1069
- _s.berv = np.array([i.HDU[0].header['HIERARCH ESO QC BERV'] for i in CCFs])
1081
+ try:
1082
+ _s.berv = np.array([i.HDU[0].header['HIERARCH ESO QC BERV'] for i in CCFs])
1083
+ except KeyError:
1084
+ _s.berv = np.full_like(time, np.nan)
1070
1085
  _quantities.append('berv')
1071
1086
 
1087
+
1072
1088
  _s.date_night = np.array([
1073
1089
  i.HDU[0].header['DATE-OBS'].split('T')[0] for i in CCFs
1074
1090
  ])
@@ -1567,6 +1583,11 @@ class RV(ISSUES, REPORTS):
1567
1583
  condition (ndarray):
1568
1584
  Boolean array of the same length as the observations
1569
1585
  """
1586
+ condition = np.asarray(condition, dtype=bool)
1587
+ if not np.any(condition):
1588
+ if self.verbose:
1589
+ logger.info('no points to remove')
1590
+ return
1570
1591
  if self.verbose:
1571
1592
  inst = np.unique(self.instrument_array[condition])
1572
1593
  logger.info(f"Removing {condition.sum()} points from instruments {inst}")
@@ -1657,6 +1678,20 @@ class RV(ISSUES, REPORTS):
1657
1678
  for inst in singles:
1658
1679
  self.remove_instrument(inst, strict=True)
1659
1680
 
1681
+ def remove_more_than_n_per_night(self, n=2):
1682
+ """ Remove whenever there are more than `n` observations per night """
1683
+ ind = np.array([], dtype=int)
1684
+ for s in self:
1685
+ # how many observations per night
1686
+ n_night = (np.abs(s.time[:, None] - s.time[None, :]) < 0.5).sum(axis=0)
1687
+ # indices for this instrument
1688
+ ind_s = np.where(n_night >= n)[0]
1689
+ # translate to indices in self
1690
+ ind_self = self._index_from_instrument_index(ind_s, s.instruments[0], masked=False)
1691
+ ind = np.r_[ind, ind_self]
1692
+ if len(ind) > 0:
1693
+ self.remove_point(ind)
1694
+
1660
1695
  def remove_prog_id(self, prog_id):
1661
1696
  """ Remove observations from a given program ID """
1662
1697
  from glob import has_magic
@@ -1695,7 +1730,7 @@ class RV(ISSUES, REPORTS):
1695
1730
  ind = np.where(to_remove)[0]
1696
1731
  self.remove_point(ind)
1697
1732
 
1698
- def choose_n_points(self, n: int, seed=None, instrument=None):
1733
+ def choose_n_points(self, n: int, instrument=None, seed=None):
1699
1734
  """ Randomly choose `n` observations and mask out the remaining ones
1700
1735
 
1701
1736
  Args:
@@ -1706,21 +1741,21 @@ class RV(ISSUES, REPORTS):
1706
1741
  instrument (str or list, optional):
1707
1742
  For which instrument to choose points (default is all).
1708
1743
  """
1709
- instruments = self._check_instrument(instrument)
1744
+ if not self._check_instrument(instrument):
1745
+ return
1746
+ # instruments = self._check_instrument(instrument)
1747
+ mask_for_this_inst = self._instrument_mask(instrument)
1710
1748
  rng = np.random.default_rng(seed=seed)
1711
- for inst in instruments:
1712
- # s = getattr(self, inst)
1713
- mask_for_this_inst = self.obs == self.instruments.index(inst) + 1
1714
- # only choose if there are more than n points
1715
- if self.mask[mask_for_this_inst].sum() > n:
1716
- if self.verbose:
1717
- logger.info(f'selecting {n} points from {inst}')
1718
- # indices of points for this instrument which are not masked already
1719
- available = np.where(self.mask & mask_for_this_inst)[0]
1720
- # choose n randomly
1721
- i = rng.choice(available, size=n, replace=False)
1722
- # mask the others out
1723
- self.mask[np.setdiff1d(available, i)] = False
1749
+ # only choose if there are more than n points
1750
+ if self.mask[mask_for_this_inst].sum() > n:
1751
+ if self.verbose:
1752
+ logger.info(f'selecting {n} points from {instrument}')
1753
+ # indices of points for this instrument which are not masked already
1754
+ available = np.where(self.mask & mask_for_this_inst)[0]
1755
+ # choose n randomly
1756
+ i = rng.choice(available, size=n, replace=False)
1757
+ # mask the others out
1758
+ self.mask[np.setdiff1d(available, i)] = False
1724
1759
  self._propagate_mask_changes()
1725
1760
 
1726
1761
 
@@ -1924,10 +1959,14 @@ class RV(ISSUES, REPORTS):
1924
1959
  if np.isnan(d[m]).all():
1925
1960
  continue
1926
1961
 
1927
- result = dosigmaclip(d[m], low=sigma, high=sigma)
1962
+ result = dosigmaclip(d[m & self.mask], low=sigma, high=sigma)
1928
1963
  # n = self.vrad[m].size - result.clipped.size
1929
1964
 
1930
- ind = m & self.mask & ((d < result.lower) | (d > result.upper))
1965
+ # NOTE: result.lower and result.upper are updated values, calculated
1966
+ # *after* the last point has been removed. So the previous solution
1967
+ # getting points outside the range [result.lower, result.upper] is
1968
+ # not correct and we need to use result.clipped instead.
1969
+ ind = m & self.mask & ~np.isin(d, result.clipped)
1931
1970
  n = ind.sum()
1932
1971
 
1933
1972
  if self.verbose and n > 0:
@@ -2020,9 +2059,9 @@ class RV(ISSUES, REPORTS):
2020
2059
 
2021
2060
 
2022
2061
 
2023
- def bin(self):
2062
+ def bin(self, daily=False):
2024
2063
  """
2025
- Nightly bin the observations.
2064
+ Bin the observations, nightly by default.
2026
2065
 
2027
2066
  !!! Warning
2028
2067
  This creates and returns a new object and does not modify self.
@@ -2033,6 +2072,8 @@ class RV(ISSUES, REPORTS):
2033
2072
  # store original object
2034
2073
  snew._unbinned = deepcopy(self)
2035
2074
 
2075
+ time_offset = 0.5 if daily else 0.0
2076
+
2036
2077
  all_bad_quantities = []
2037
2078
 
2038
2079
  for inst in snew.instruments:
@@ -2046,7 +2087,8 @@ class RV(ISSUES, REPORTS):
2046
2087
  if s.mtime.size == 0:
2047
2088
  continue
2048
2089
 
2049
- tb, vb, svb = binRV(s.mtime, s.mvrad, s.msvrad)
2090
+ tb, vb, svb = binRV(s.mtime + time_offset, s.mvrad, s.msvrad)
2091
+ tb -= time_offset
2050
2092
  s.vrad = vb
2051
2093
  s.svrad = svb
2052
2094
 
@@ -2057,14 +2099,14 @@ class RV(ISSUES, REPORTS):
2057
2099
 
2058
2100
  # treat date_night specially, basically doing a group-by
2059
2101
  if q == 'date_night':
2060
- inds = binRV(s.mtime, None, None, binning_indices=True)
2102
+ inds = binRV(s.mtime + time_offset, None, None, binning_indices=True)
2061
2103
  setattr(s, q, Q[s.mask][inds])
2062
2104
  continue
2063
2105
 
2064
2106
  # treat ccf_mask specially, doing a 'unique' bin
2065
2107
  if q == 'ccf_mask':
2066
2108
  ccf_mask = getattr(s, q)[s.mask]
2067
- setattr(s, q, bin_ccf_mask(s.mtime, ccf_mask))
2109
+ setattr(s, q, bin_ccf_mask(s.mtime + time_offset, ccf_mask))
2068
2110
  continue
2069
2111
 
2070
2112
  if Q.dtype != np.float64:
@@ -2079,12 +2121,14 @@ class RV(ISSUES, REPORTS):
2079
2121
  elif q + '_err' in s._quantities:
2080
2122
  Qerr = getattr(s, q + '_err')
2081
2123
  if (Qerr == 0.0).all(): # if all errors are NaN, don't use them
2082
- _, yb = binRV(s.mtime, Q[s.mask], stat='mean', tstat='mean')
2124
+ _, yb = binRV(s.mtime + time_offset, Q[s.mask],
2125
+ stat='mean', tstat='mean')
2083
2126
  else:
2084
2127
  if (Qerr <= 0.0).any(): # if any error is <= 0, set it to NaN
2085
2128
  Qerr[Qerr <= 0.0] = np.nan
2086
2129
 
2087
- _, yb, eb = binRV(s.mtime, Q[s.mask], Qerr[s.mask], remove_nans=False)
2130
+ _, yb, eb = binRV(s.mtime + time_offset, Q[s.mask], Qerr[s.mask],
2131
+ remove_nans=False)
2088
2132
  setattr(s, q + '_err', eb)
2089
2133
 
2090
2134
  setattr(s, q, yb)
@@ -2093,7 +2137,7 @@ class RV(ISSUES, REPORTS):
2093
2137
  with warnings.catch_warnings():
2094
2138
  warnings.filterwarnings('ignore', category=RuntimeWarning)
2095
2139
  try:
2096
- _, yb = binRV(s.mtime, Q[s.mask],
2140
+ _, yb = binRV(s.mtime + time_offset, Q[s.mask],
2097
2141
  stat=np.nanmean, tstat=np.nanmean)
2098
2142
  setattr(s, q, yb)
2099
2143
  except TypeError:
@@ -2395,7 +2439,7 @@ class RV(ISSUES, REPORTS):
2395
2439
  if new_units not in possible:
2396
2440
  msg = f"new_units must be one of 'm/s', 'km/s', 'ms', 'kms', got '{new_units}'"
2397
2441
  raise ValueError(msg)
2398
-
2442
+
2399
2443
  new_units = possible[new_units]
2400
2444
  if new_units == self.units:
2401
2445
  return
@@ -2403,11 +2447,14 @@ class RV(ISSUES, REPORTS):
2403
2447
  if self.verbose:
2404
2448
  logger.info(f"changing units from {self.units} to {new_units}")
2405
2449
 
2406
- if new_units == 'm/s' and self.units == 'km/s':
2450
+ if new_units == 'm/s' and self.units in ('km/s', 'kms'):
2407
2451
  factor = 1e3
2408
- elif new_units == 'km/s' and self.units == 'm/s':
2452
+ elif new_units == 'km/s' and self.units in ('m/s', 'ms'):
2409
2453
  factor = 1e-3
2410
-
2454
+ else:
2455
+ logger.warning(f"no known conversion from {self.units} to {new_units}")
2456
+ return
2457
+
2411
2458
  for inst in self.instruments:
2412
2459
  s = getattr(self, inst)
2413
2460
  s.vrad *= factor
@@ -2469,6 +2516,10 @@ class RV(ISSUES, REPORTS):
2469
2516
  by_last_observation (bool, optional):
2470
2517
  Sort by last observation date
2471
2518
  """
2519
+ if self.N == 0:
2520
+ if self.verbose:
2521
+ logger.warning("no observations to sort")
2522
+ return
2472
2523
  if by_last_observation:
2473
2524
  by_first_observation = False
2474
2525
  if by_first_observation:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: arvi
3
- Version: 0.2.11
3
+ Version: 0.3.0
4
4
  Summary: The Automated RV Inspector
5
5
  Author-email: João Faria <joao.faria@unige.ch>
6
6
  License: MIT
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes