arvi 0.1.30__tar.gz → 0.2.11__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. {arvi-0.1.30 → arvi-0.2.11}/.github/workflows/docs-gh-pages.yml +7 -2
  2. {arvi-0.1.30 → arvi-0.2.11}/.github/workflows/install.yml +2 -2
  3. {arvi-0.1.30 → arvi-0.2.11}/.github/workflows/python-publish.yml +3 -3
  4. {arvi-0.1.30/arvi.egg-info → arvi-0.2.11}/PKG-INFO +2 -2
  5. {arvi-0.1.30 → arvi-0.2.11}/README.md +1 -1
  6. {arvi-0.1.30 → arvi-0.2.11}/arvi/__init__.py +10 -20
  7. {arvi-0.1.30 → arvi-0.2.11}/arvi/binning.py +2 -1
  8. {arvi-0.1.30 → arvi-0.2.11}/arvi/config.py +15 -4
  9. {arvi-0.1.30 → arvi-0.2.11}/arvi/dace_wrapper.py +132 -72
  10. arvi-0.2.11/arvi/exofop_wrapper.py +62 -0
  11. {arvi-0.1.30 → arvi-0.2.11}/arvi/extra_data.py +11 -7
  12. {arvi-0.1.30 → arvi-0.2.11}/arvi/gaia_wrapper.py +10 -4
  13. {arvi-0.1.30 → arvi-0.2.11}/arvi/instrument_specific.py +178 -40
  14. arvi-0.2.11/arvi/kepmodel_wrapper.py +296 -0
  15. {arvi-0.1.30 → arvi-0.2.11}/arvi/kima_wrapper.py +42 -7
  16. {arvi-0.1.30 → arvi-0.2.11}/arvi/nasaexo_wrapper.py +7 -3
  17. {arvi-0.1.30 → arvi-0.2.11}/arvi/plots.py +142 -33
  18. {arvi-0.1.30 → arvi-0.2.11}/arvi/programs.py +8 -4
  19. arvi-0.2.11/arvi/reports.py +311 -0
  20. arvi-0.2.11/arvi/setup_logger.py +24 -0
  21. {arvi-0.1.30 → arvi-0.2.11}/arvi/simbad_wrapper.py +90 -38
  22. arvi-0.2.11/arvi/sophie_wrapper.py +111 -0
  23. {arvi-0.1.30 → arvi-0.2.11}/arvi/stats.py +30 -5
  24. {arvi-0.1.30 → arvi-0.2.11}/arvi/stellar.py +26 -1
  25. {arvi-0.1.30 → arvi-0.2.11}/arvi/timeseries.py +683 -226
  26. {arvi-0.1.30 → arvi-0.2.11}/arvi/utils.py +152 -11
  27. {arvi-0.1.30 → arvi-0.2.11/arvi.egg-info}/PKG-INFO +2 -2
  28. {arvi-0.1.30 → arvi-0.2.11}/arvi.egg-info/SOURCES.txt +7 -1
  29. {arvi-0.1.30 → arvi-0.2.11}/docs/API.md +10 -6
  30. arvi-0.2.11/docs/detailed.ipynb +660 -0
  31. arvi-0.2.11/docs/downloading_data.md +57 -0
  32. {arvi-0.1.30 → arvi-0.2.11}/docs/index.md +4 -1
  33. arvi-0.2.11/docs/stylesheets/extra.css +3 -0
  34. arvi-0.2.11/mkdocs.yml +79 -0
  35. arvi-0.2.11/tests/test_config.py +22 -0
  36. arvi-0.2.11/tests/test_create_RV.py +43 -0
  37. arvi-0.2.11/tests/test_import_object.py +33 -0
  38. arvi-0.1.30/arvi/reports.py +0 -130
  39. arvi-0.1.30/arvi/setup_logger.py +0 -11
  40. arvi-0.1.30/docs/detailed.md +0 -0
  41. arvi-0.1.30/mkdocs.yml +0 -51
  42. arvi-0.1.30/tests/test_create_RV.py +0 -21
  43. arvi-0.1.30/tests/test_import_object.py +0 -17
  44. {arvi-0.1.30 → arvi-0.2.11}/.github/dependabot.yml +0 -0
  45. {arvi-0.1.30 → arvi-0.2.11}/.gitignore +0 -0
  46. {arvi-0.1.30 → arvi-0.2.11}/LICENSE +0 -0
  47. {arvi-0.1.30 → arvi-0.2.11}/arvi/HZ.py +0 -0
  48. {arvi-0.1.30 → arvi-0.2.11}/arvi/ariadne_wrapper.py +0 -0
  49. {arvi-0.1.30 → arvi-0.2.11}/arvi/berv.py +0 -0
  50. {arvi-0.1.30 → arvi-0.2.11}/arvi/data/extra/HD86226_PFS1.rdb +0 -0
  51. {arvi-0.1.30 → arvi-0.2.11}/arvi/data/extra/HD86226_PFS2.rdb +0 -0
  52. {arvi-0.1.30 → arvi-0.2.11}/arvi/data/extra/metadata.json +0 -0
  53. {arvi-0.1.30 → arvi-0.2.11}/arvi/data/info.svg +0 -0
  54. {arvi-0.1.30 → arvi-0.2.11}/arvi/data/obs_affected_ADC_issues.dat +0 -0
  55. {arvi-0.1.30 → arvi-0.2.11}/arvi/data/obs_affected_blue_cryostat_issues.dat +0 -0
  56. {arvi-0.1.30 → arvi-0.2.11}/arvi/headers.py +0 -0
  57. {arvi-0.1.30 → arvi-0.2.11}/arvi/lbl_wrapper.py +0 -0
  58. {arvi-0.1.30 → arvi-0.2.11}/arvi/spectra.py +0 -0
  59. {arvi-0.1.30 → arvi-0.2.11}/arvi/translations.py +0 -0
  60. {arvi-0.1.30 → arvi-0.2.11}/arvi.egg-info/dependency_links.txt +0 -0
  61. {arvi-0.1.30 → arvi-0.2.11}/arvi.egg-info/requires.txt +0 -0
  62. {arvi-0.1.30 → arvi-0.2.11}/arvi.egg-info/top_level.txt +0 -0
  63. {arvi-0.1.30 → arvi-0.2.11}/docs/logo/detective.png +0 -0
  64. {arvi-0.1.30 → arvi-0.2.11}/docs/logo/logo.png +0 -0
  65. {arvi-0.1.30 → arvi-0.2.11}/pyproject.toml +0 -0
  66. {arvi-0.1.30 → arvi-0.2.11}/setup.cfg +0 -0
  67. {arvi-0.1.30 → arvi-0.2.11}/setup.py +0 -0
  68. {arvi-0.1.30 → arvi-0.2.11}/tests/HD10700-Bcor_ESPRESSO18.rdb +0 -0
  69. {arvi-0.1.30 → arvi-0.2.11}/tests/test_binning.py +0 -0
  70. {arvi-0.1.30 → arvi-0.2.11}/tests/test_simbad.py +0 -0
  71. {arvi-0.1.30 → arvi-0.2.11}/tests/test_stats.py +0 -0
@@ -26,13 +26,13 @@ jobs:
26
26
  runs-on: ubuntu-latest
27
27
  steps:
28
28
  - name: Checkout
29
- uses: actions/checkout@v4
29
+ uses: actions/checkout@v5
30
30
 
31
31
  - name: Setup Pages
32
32
  uses: actions/configure-pages@v5
33
33
 
34
34
  - name: Set up Python
35
- uses: actions/setup-python@v5
35
+ uses: actions/setup-python@v6
36
36
  with:
37
37
  python-version: "3.10"
38
38
 
@@ -42,6 +42,11 @@ jobs:
42
42
  pip install mkdocs mkdocs-material
43
43
  pip install "mkdocstrings[crystal,python]" mkdocs-autorefs mkdocstrings-python
44
44
  pip install mkdocs-git-revision-date-localized-plugin
45
+ pip install mkdocs-jupyter
46
+
47
+ - name: Install arvi itself
48
+ run: |
49
+ pip install arvi
45
50
 
46
51
  - name: Build with Mkdocs
47
52
  run: |
@@ -16,9 +16,9 @@ jobs:
16
16
  python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
17
17
 
18
18
  steps:
19
- - uses: actions/checkout@v4
19
+ - uses: actions/checkout@v5
20
20
  - name: Set up Python ${{ matrix.python-version }}
21
- uses: actions/setup-python@v5
21
+ uses: actions/setup-python@v6
22
22
  with:
23
23
  python-version: ${{ matrix.python-version }}
24
24
 
@@ -22,9 +22,9 @@ jobs:
22
22
  runs-on: ubuntu-latest
23
23
 
24
24
  steps:
25
- - uses: actions/checkout@v4
25
+ - uses: actions/checkout@v5
26
26
  - name: Set up Python
27
- uses: actions/setup-python@v5
27
+ uses: actions/setup-python@v6
28
28
  with:
29
29
  python-version: '3.x'
30
30
  - name: Install dependencies
@@ -34,7 +34,7 @@ jobs:
34
34
  - name: Build package
35
35
  run: python -m build
36
36
  - name: Publish package
37
- uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
37
+ uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e
38
38
  with:
39
39
  user: __token__
40
40
  password: ${{ secrets.PYPI_API_TOKEN }}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: arvi
3
- Version: 0.1.30
3
+ Version: 0.2.11
4
4
  Summary: The Automated RV Inspector
5
5
  Author-email: João Faria <joao.faria@unige.ch>
6
6
  License: MIT
@@ -23,7 +23,7 @@ Requires-Dist: kepmodel
23
23
  Dynamic: license-file
24
24
 
25
25
  <p align="center">
26
- <img width = "140" src="https://github.com/j-faria/arvi/blob/main/docs/logo/logo.png?raw=true"/>
26
+ <img width = "140" src="https://raw.githubusercontent.com/j-faria/arvi/refs/heads/main/docs/logo/logo.png"/>
27
27
  </p>
28
28
 
29
29
  This package sits alongside [DACE](https://dace.unige.ch/) to help with the
@@ -1,5 +1,5 @@
1
1
  <p align="center">
2
- <img width = "140" src="https://github.com/j-faria/arvi/blob/main/docs/logo/logo.png?raw=true"/>
2
+ <img width = "140" src="https://raw.githubusercontent.com/j-faria/arvi/refs/heads/main/docs/logo/logo.png"/>
3
3
  </p>
4
4
 
5
5
  This package sits alongside [DACE](https://dace.unige.ch/) to help with the
@@ -1,22 +1,22 @@
1
- __all__ = ['RV']
1
+ __all__ = ['RV', 'config', 'simbad', 'gaia']
2
2
 
3
3
  from importlib.metadata import version, PackageNotFoundError
4
-
5
- from .config import config
6
- from .timeseries import RV
7
-
8
4
  try:
9
5
  __version__ = version("arvi")
10
6
  except PackageNotFoundError:
11
7
  # package is not installed
12
8
  pass
13
9
 
14
- ## OLD
15
- # # the __getattr__ function is always called twice, so we need this
16
- # # to only build and return the RV object on the second time
17
- # _ran_once = False
10
+ from .config import config
11
+ from .simbad_wrapper import simbad
12
+ from .gaia_wrapper import gaia
13
+
14
+ from .timeseries import RV
18
15
 
19
16
  def __getattr__(name: str):
17
+ if not config.fancy_import:
18
+ raise AttributeError
19
+
20
20
  if name in (
21
21
  '_ipython_canary_method_should_not_exist_',
22
22
  '_ipython_display_',
@@ -29,15 +29,5 @@ def __getattr__(name: str):
29
29
  globals()[name] = RV(name)
30
30
  return globals()[name]
31
31
  except ValueError as e:
32
- raise ImportError(e) from None
33
- # raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
34
-
35
- ## OLD
36
- # # can't do it any other way :(
37
- # global _ran_once
32
+ raise AttributeError(e)
38
33
 
39
- # if _ran_once:
40
- # _ran_once = False
41
- # return RV(name)
42
- # else:
43
- # _ran_once = True
@@ -1,6 +1,6 @@
1
1
  import numpy as np
2
2
 
3
- from .setup_logger import logger
3
+ from .setup_logger import setup_logger
4
4
 
5
5
  ###############################################################################
6
6
  # the following is mostly a copy of the scipy implementation of
@@ -390,6 +390,7 @@ def binRV(time, rv, err=None, stat='wmean', tstat='wmean', estat='addquad',
390
390
 
391
391
 
392
392
  def bin_ccf_mask(time, ccf_mask):
393
+ logger = setup_logger()
393
394
  indices = binRV(time, None, binning_indices=True)
394
395
  indices = np.r_[indices, time.size]
395
396
  bmask = []
@@ -8,6 +8,7 @@ def get_config_path():
8
8
 
9
9
  def get_config():
10
10
  config = configparser.ConfigParser()
11
+ config.add_section('config')
11
12
  if (path := get_config_path()).exists():
12
13
  config.read(path)
13
14
  return config
@@ -31,6 +32,10 @@ class config:
31
32
  'check_internet': False,
32
33
  # make all DACE requests without using a .dacerc file
33
34
  'request_as_public': False,
35
+ # enable from arvi import star_name
36
+ 'fancy_import': True,
37
+ # use the 'dark_background' matplotlib theme
38
+ 'dark_plots': False,
34
39
  # debug
35
40
  'debug': False,
36
41
  }
@@ -43,10 +48,16 @@ class config:
43
48
  # return {'return_self': 'help!'}
44
49
  return {}
45
50
 
46
- if self.__user_config.has_option('config', name):
47
- self.__conf[name] = self.__user_config.get('config', name)
51
+ try:
52
+ if self.__user_config.has_option('config', name):
53
+ value = self.__user_config.get('config', name)
54
+ value = True if value == 'True' else value
55
+ value = False if value == 'False' else value
56
+ self.__conf[name] = value
48
57
 
49
- return self.__conf[name]
58
+ return self.__conf[name]
59
+ except KeyError:
60
+ raise KeyError(f"unknown config option '{name}'")
50
61
 
51
62
  def __setattr__(self, name, value):
52
63
  if name in config.__setters:
@@ -54,7 +65,7 @@ class config:
54
65
  else:
55
66
  if 'config' not in self.__user_config:
56
67
  self.__user_config.add_section('config')
57
- self.__user_config.set('config', name, value)
68
+ self.__user_config.set('config', name, str(value))
58
69
  save_config(self.__user_config)
59
70
  # raise NameError(f"unknown configuration name '{name}'")
60
71
 
@@ -2,20 +2,25 @@ import os
2
2
  import sys
3
3
  import tarfile
4
4
  import collections
5
- from functools import lru_cache
5
+ from functools import lru_cache, partial
6
6
  from itertools import islice
7
7
  import numpy as np
8
- from dace_query import DaceClass
9
- from dace_query.spectroscopy import SpectroscopyClass, Spectroscopy as default_Spectroscopy
10
- from .setup_logger import logger
11
- from .utils import create_directory, all_logging_disabled, stdout_disabled, tqdm
12
8
 
9
+ from .setup_logger import setup_logger
10
+ from .utils import create_directory, all_logging_disabled, stdout_disabled, timer, tqdm
11
+
12
+
13
+ def load_spectroscopy(user=None, verbose=True):
14
+ logger = setup_logger()
15
+ with all_logging_disabled():
16
+ from dace_query.spectroscopy import SpectroscopyClass, Spectroscopy as default_Spectroscopy
17
+ from dace_query import DaceClass
13
18
 
14
- def load_spectroscopy(user=None) -> SpectroscopyClass:
15
19
  from .config import config
16
20
  # requesting as public
17
21
  if config.request_as_public:
18
- logger.warning('requesting DACE data as public')
22
+ if verbose:
23
+ logger.warning('requesting DACE data as public')
19
24
  with all_logging_disabled():
20
25
  dace = DaceClass(dace_rc_config_path='none')
21
26
  return SpectroscopyClass(dace_instance=dace)
@@ -39,21 +44,29 @@ def load_spectroscopy(user=None) -> SpectroscopyClass:
39
44
  logger.info(f'using credentials for user {user} in ~/.dacerc')
40
45
  return SpectroscopyClass(dace_instance=dace)
41
46
  # default
47
+ if not os.path.exists(os.path.expanduser('~/.dacerc')):
48
+ logger.warning('requesting DACE data as public (no .dacerc file found)')
42
49
  return default_Spectroscopy
43
50
 
44
- @lru_cache()
45
- def get_dace_id(star, verbose=True):
51
+
52
+ @lru_cache(maxsize=1024)
53
+ def get_dace_id(star, verbose=True, raise_error=False):
54
+ logger = setup_logger()
46
55
  filters = {"obj_id_catname": {"equal": [star]}}
47
56
  try:
48
57
  with all_logging_disabled():
49
58
  r = load_spectroscopy().query_database(filters=filters, limit=1)
50
- return r['obj_id_daceid'][0]
59
+ return str(r['obj_id_daceid'][0])
51
60
  except KeyError:
52
61
  if verbose:
53
62
  logger.error(f"Could not find DACE ID for {star}")
63
+ if not raise_error:
64
+ return None
54
65
  raise ValueError from None
55
66
 
67
+
56
68
  def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='HE', verbose=True):
69
+ logger = setup_logger()
57
70
  arrays = []
58
71
  instruments = [str(i) for i in result.keys()]
59
72
 
@@ -62,7 +75,6 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
62
75
 
63
76
  # select ESPRESSO mode, which is defined at the level of the pipeline
64
77
  if 'ESPRESSO' in inst:
65
-
66
78
  find_mode = [ESPRESSO_mode in pipe for pipe in pipelines]
67
79
  # the mode was not found
68
80
  if not any(find_mode):
@@ -75,11 +87,16 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
75
87
  i = [i for i, pipe in enumerate(pipelines) if ESPRESSO_mode in pipe][0]
76
88
  pipelines = [pipelines[i]]
77
89
 
90
+ # select NIRPS mode
91
+ if 'NIRPS' in inst:
92
+ if any(this_mode := [p for p in pipelines if NIRPS_mode in p]):
93
+ pipelines = this_mode
94
+
78
95
  if latest_pipeline:
79
96
  npipe = len(pipelines)
80
97
  if 'NIRPS' in inst and any(['LBL' in p for p in pipelines]):
81
98
  # TODO: correctly load both CCF and LBL
82
- pipelines = [pipelines[1]]
99
+ pipelines = [pipelines[0]]
83
100
  if 'HARPS' in inst and npipe > 1 and pipelines[1] == pipelines[0] + '-EGGS':
84
101
  pipelines = pipelines[:2]
85
102
  else:
@@ -152,19 +169,20 @@ def get_observations_from_instrument(star, instrument, user=None, main_id=None,
152
169
  dict:
153
170
  dictionary with data from DACE
154
171
  """
155
- Spectroscopy = load_spectroscopy(user)
172
+ Spectroscopy = load_spectroscopy(user, verbose)
156
173
  found_dace_id = False
157
- try:
158
- dace_id = get_dace_id(star, verbose=verbose)
159
- found_dace_id = True
160
- except ValueError as e:
161
- if main_id is not None:
162
- try:
163
- dace_id = get_dace_id(main_id, verbose=verbose)
164
- found_dace_id = True
165
- except ValueError:
166
- pass
167
-
174
+ with timer('dace_id query'):
175
+ try:
176
+ dace_id = get_dace_id(star, verbose=verbose, raise_error=True)
177
+ found_dace_id = True
178
+ except ValueError as e:
179
+ if main_id is not None:
180
+ try:
181
+ dace_id = get_dace_id(main_id, verbose=verbose, raise_error=True)
182
+ found_dace_id = True
183
+ except ValueError:
184
+ pass
185
+
168
186
  if not found_dace_id:
169
187
  try:
170
188
  with all_logging_disabled():
@@ -176,10 +194,16 @@ def get_observations_from_instrument(star, instrument, user=None, main_id=None,
176
194
  msg = f'no {instrument} observations for {star}'
177
195
  raise ValueError(msg) from None
178
196
 
179
- filters = {
180
- "ins_name": {"contains": [instrument]},
181
- "obj_id_daceid": {"contains": [dace_id]}
182
- }
197
+ if (isinstance(instrument, str)):
198
+ filters = {
199
+ "ins_name": {"contains": [instrument]},
200
+ "obj_id_daceid": {"contains": [dace_id]}
201
+ }
202
+ elif (isinstance(instrument, (list, tuple, np.ndarray))):
203
+ filters = {
204
+ "ins_name": {"contains": instrument},
205
+ "obj_id_daceid": {"contains": [dace_id]}
206
+ }
183
207
  with all_logging_disabled():
184
208
  result = Spectroscopy.query_database(filters=filters)
185
209
 
@@ -190,17 +214,22 @@ def get_observations_from_instrument(star, instrument, user=None, main_id=None,
190
214
 
191
215
  for inst in np.unique(result['ins_name']):
192
216
  mask1 = result['ins_name'] == inst
193
- r[inst] = {}
217
+ r[str(inst)] = {}
194
218
 
195
- for pipe in np.unique(result['ins_drs_version'][mask1]):
196
- mask2 = mask1 & (result['ins_drs_version'] == pipe)
197
- r[inst][pipe] = {}
219
+ key2 = 'ins_drs_version'
220
+ n_key2 = len(np.unique(result[key2][mask1]))
221
+ if len(np.unique(result['pub_bibcode'][mask1])) >= n_key2:
222
+ key2 = 'pub_bibcode'
223
+
224
+ for pipe in np.unique(result[key2][mask1]):
225
+ mask2 = mask1 & (result[key2] == pipe)
226
+ r[str(inst)][str(pipe)] = {}
198
227
 
199
228
  for ins_mode in np.unique(result['ins_mode'][mask2]):
200
229
  mask3 = mask2 & (result['ins_mode'] == ins_mode)
201
230
  _nan = np.full(mask3.sum(), np.nan)
202
231
 
203
- r[inst][pipe][ins_mode] = {
232
+ r[str(inst)][str(pipe)][str(ins_mode)] = {
204
233
  'texp': result['texp'][mask3],
205
234
  'bispan': result['spectro_ccf_bispan'][mask3],
206
235
  'bispan_err': result['spectro_ccf_bispan_err'][mask3],
@@ -212,7 +241,9 @@ def get_observations_from_instrument(star, instrument, user=None, main_id=None,
212
241
  'rv': result['spectro_ccf_rv'][mask3],
213
242
  'rv_err': result['spectro_ccf_rv_err'][mask3],
214
243
  'berv': result['spectro_cal_berv'][mask3],
215
- 'ccf_noise': _nan,
244
+ 'ccf_noise': np.sqrt(
245
+ np.square(result['spectro_ccf_rv_err'][mask3]) - np.square(result['spectro_cal_drift_noise'][mask3])
246
+ ),
216
247
  'rhk': result['spectro_analysis_rhk'][mask3],
217
248
  'rhk_err': result['spectro_analysis_rhk_err'][mask3],
218
249
  'contrast': result['spectro_ccf_contrast'][mask3],
@@ -254,8 +285,10 @@ def get_observations_from_instrument(star, instrument, user=None, main_id=None,
254
285
  return r
255
286
 
256
287
  def get_observations(star, instrument=None, user=None, main_id=None, verbose=True):
288
+ logger = setup_logger()
257
289
  if instrument is None:
258
- Spectroscopy = load_spectroscopy(user)
290
+ Spectroscopy = load_spectroscopy(user, verbose)
291
+
259
292
  try:
260
293
  with stdout_disabled(), all_logging_disabled():
261
294
  result = Spectroscopy.get_timeseries(target=star,
@@ -288,12 +321,14 @@ def get_observations(star, instrument=None, user=None, main_id=None, verbose=Tru
288
321
  result[inst] = dict(result[inst])
289
322
  #
290
323
 
291
- instruments = list(result.keys())
324
+ instruments = list(map(str, result.keys()))
292
325
 
293
326
  if instrument is not None:
294
327
  # select only the provided instrument (if it's there)
295
- instruments = [inst for inst in instruments if instrument in inst]
296
-
328
+ if (isinstance(instrument, str)):
329
+ instruments = [inst for inst in instruments if instrument in inst]
330
+ elif (isinstance(instrument, list)):
331
+ instruments = [inst for inst in instruments if any(i in inst for i in instrument)]
297
332
  if len(instruments) == 0:
298
333
  if instrument is None:
299
334
  msg = f'no observations for {star}'
@@ -317,38 +352,57 @@ def get_observations(star, instrument=None, user=None, main_id=None, verbose=Tru
317
352
  # else:
318
353
  # return -1
319
354
 
320
- # sort pipelines, must be extra careful with HARPS/HARPN pipeline version numbers
321
- # got here with the help of DeepSeek
322
- from re import match
323
- def custom_sort_key(s):
324
- s = s[0]
325
- print(s)
326
- # Check for version number pattern (e.g., 3.2.5 or 3.2.5-EGGS)
327
- version_match = match(r'^(\d+(?:\.\d+)*)(?:-(.*))?$', s)
328
- if version_match:
329
- version_parts = tuple(map(int, version_match.group(1).split('.')))
330
- suffix = version_match.group(2)
331
-
332
- if suffix is not None:
333
- # Suffixed versions: sort in ascending order (3.2.5-HR11 < 3.3.1-HR11)
334
- return (0, 0, version_parts, suffix)
335
- else:
336
- # Unsuffixed versions: sort in descending order (3.5 > 3.2.5)
337
- return (0, 1, tuple(-x for x in version_parts))
338
-
339
- # Check for scientific reference pattern (e.g., 2004A&A...)
340
- year_match = match(r'^(\d{4})', s)
341
- if year_match:
342
- year = int(year_match.group(1))
343
- return (1, year)
344
-
345
- # For all other strings, sort alphabetically
346
- return (2, s)
347
-
348
- # from functools import cmp_to_key
355
+ # # sort pipelines, must be extra careful with HARPS/HARPN pipeline version numbers
356
+ # # got here with the help of DeepSeek
357
+ # # from functools import cmp_to_key
358
+ # from re import match
359
+ # def custom_sort_key(s):
360
+ # s = s[0]
361
+ # # Check for version number pattern (e.g., 3.2.5 or 3.2.5-EGGS)
362
+ # version_match = match(r'^(\d+(?:\.\d+)*)(?:[-\s](.*))?$', s)
363
+ # if version_match:
364
+ # version_parts = list(map(int, version_match.group(1).split('.')))
365
+ # if len(version_parts) == 2:
366
+ # version_parts.insert(1, -1)
367
+ # # if version_match.group(2) and 'LBL' in version_match.group(2):
368
+ # # version_parts.append(-1)
369
+ # # else:
370
+ # # version_parts.append(0)
371
+ # if version_match.group(2) is None:
372
+ # version_parts.append('')
373
+ # else:
374
+ # version_parts.append(version_match.group(2))
375
+ # return (0, 1, version_parts)
376
+ # # Check for scientific reference pattern (e.g., 2004A&A...)
377
+ # year_match = match(r'^(\d{4})', s)
378
+ # if year_match:
379
+ # year = int(year_match.group(1))
380
+ # return (1, year)
381
+ # # For all other strings, sort alphabetically
382
+ # return (2, s)
383
+
384
+ def custom_key(val, strip_EGGS=False):
385
+ if strip_EGGS:
386
+ val = val.replace('-EGGS', '').replace(' EGGS', '')
387
+ key = 0
388
+ key -= 1 if '3.5' in val else 0
389
+ key -= 1 if 'EGGS' in val else 0
390
+ key -= 1 if ('UHR' in val or 'MR' in val) else 0
391
+ key -= 1 if 'LBL' in val else 0
392
+ return str(key) if key != 0 else val
393
+
349
394
  new_result = {}
350
395
  for inst in instruments:
351
- new_result[inst] = dict(sorted(result[inst].items(), key=custom_sort_key, reverse=True))
396
+ # new_result[inst] = dict(
397
+ # sorted(result[inst].items(), key=custom_sort_key, reverse=True)
398
+ # )
399
+ if all(['EGGS' in k for k in result[inst].keys()]):
400
+ custom_key = partial(custom_key, strip_EGGS=True)
401
+ # WARNING: not the same as reverse=True (not sure why)
402
+ sorted_keys = sorted(result[inst].keys(), key=custom_key)[::-1]
403
+ new_result[inst] = {}
404
+ for key in sorted_keys:
405
+ new_result[inst][key] = result[inst][key]
352
406
 
353
407
  if verbose:
354
408
  logger.info('RVs available from')
@@ -382,7 +436,7 @@ def check_existing(output_directory, files, type):
382
436
  ]
383
437
 
384
438
  if type == 'S2D':
385
- existing = [
439
+ existing += [
386
440
  f.partition('.fits')[0] for f in os.listdir(output_directory)
387
441
  if 'e2ds' in f
388
442
  ]
@@ -446,6 +500,7 @@ def extract_fits(output_directory, filename=None):
446
500
 
447
501
 
448
502
  def do_symlink_filetype(type, raw_files, output_directory, clobber=False, top_level=None, verbose=True):
503
+ logger = setup_logger()
449
504
  terminations = {
450
505
  'CCF': '_CCF_A.fits',
451
506
  'S1D': '_S1D_A.fits',
@@ -491,21 +546,23 @@ def do_symlink_filetype(type, raw_files, output_directory, clobber=False, top_le
491
546
  def do_download_filetype(type, raw_files, output_directory, clobber=False, user=None,
492
547
  verbose=True, chunk_size=20, parallel_limit=30):
493
548
  """ Download CCFs / S1Ds / S2Ds from DACE """
549
+ logger = setup_logger()
494
550
  raw_files = np.atleast_1d(raw_files)
551
+ raw_files_original = raw_files.copy()
495
552
 
496
553
  create_directory(output_directory)
497
554
 
498
555
  # check existing files to avoid re-downloading
499
556
  if not clobber:
500
557
  raw_files = check_existing(output_directory, raw_files, type)
501
-
558
+
502
559
  n = raw_files.size
503
560
 
504
561
  # any file left to download?
505
562
  if n == 0:
506
563
  if verbose:
507
564
  logger.info('no files to download')
508
- return
565
+ return list(map(os.path.basename, raw_files_original))
509
566
 
510
567
  # avoid an empty chunk
511
568
  if chunk_size > n:
@@ -523,7 +580,9 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False, user=
523
580
 
524
581
  if n < parallel_limit:
525
582
  iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
526
- for files in tqdm(iterator, total=len(iterator)):
583
+ if len(iterator) > 1:
584
+ iterator = tqdm(iterator, total=len(iterator))
585
+ for files in iterator:
527
586
  download(files, type, output_directory, quiet=False, user=user)
528
587
  extract_fits(output_directory)
529
588
 
@@ -552,6 +611,7 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False, user=
552
611
 
553
612
  sys.stdout.flush()
554
613
  logger.info('extracted .fits files')
614
+ return list(map(os.path.basename, raw_files_original))
555
615
 
556
616
 
557
617
  # def do_download_s1d(raw_files, output_directory, clobber=False, verbose=True):
@@ -0,0 +1,62 @@
1
+ import csv
2
+ import requests
3
+ import time
4
+ import importlib.resources as resources
5
+ import numpy as np
6
+
7
+ from .setup_logger import setup_logger
8
+
9
+ def get_toi_list(verbose=True):
10
+ logger = setup_logger()
11
+ toi_list = resources.files('arvi') / 'data' / 'exofop_toi_list.csv'
12
+ now = time.time()
13
+ download = not toi_list.exists() or toi_list.stat().st_mtime < now - 48 * 60 * 60
14
+ if download:
15
+ if verbose:
16
+ logger.info('Downloading exofop TOI list (can take a while)...')
17
+ r = requests.get('https://exofop.ipac.caltech.edu/tess/download_toi.php?sort=toi&output=csv')
18
+ with open(toi_list, 'wb') as f:
19
+ f.write(r.content)
20
+ return toi_list
21
+
22
+ class exofop:
23
+ def __init__(self, star: str, verbose=True, _debug=False):
24
+ self.star = star
25
+ self.verbose = verbose
26
+
27
+ toi_list = get_toi_list(verbose=verbose)
28
+ tsv = ('|'.join(i) for i in csv.reader(open(toi_list, encoding='utf8')))
29
+ self.data = np.genfromtxt(tsv, delimiter='|',
30
+ names=True, encoding='utf8', dtype=None)
31
+
32
+
33
+ try:
34
+ if self.star.startswith('TIC'):
35
+ self.tic = self.star
36
+ w = self.data['TIC_ID'] == int(self.star[3:])
37
+ self.toi = 'TOI-' + str(int(self.data['TOI'][w][0]))
38
+ else:
39
+ toi = self.star.replace('TOI-', '')
40
+ toi = toi if toi.endswith('.01') else toi + '.01'
41
+ toi_float = float(toi)
42
+ if toi_float not in self.data['TOI']:
43
+ raise ValueError
44
+ w = self.data['TOI'] == toi_float
45
+ self.tic = 'TIC' + str(int(self.data['TIC_ID'][w][0]))
46
+ self.toi = self.star
47
+ except ValueError:
48
+ raise ValueError(f'{self.star} not found in exofop TOI list')
49
+ else:
50
+ self.ra = str(self.data['RA'][w][0])
51
+ self.dec = str(self.data['Dec'][w][0])
52
+
53
+ self.epoch = float(self.data['Epoch_BJD'][w][0])
54
+ self.period = float(self.data['Period_days'][w][0])
55
+ if self.period == 0.0:
56
+ self.period = np.nan
57
+ self.duration = float(self.data['Duration_hours'][w][0])
58
+ self.depth = float(self.data['Depth_ppm'][w][0])
59
+
60
+
61
+ def __repr__(self):
62
+ return f'{self.star} (TIC={self.tic}, epoch={self.epoch:.3f}, period={self.period:.3f})'
@@ -3,8 +3,7 @@ from glob import glob
3
3
  import json
4
4
 
5
5
  from numpy import full
6
- from .setup_logger import logger
7
- from . import timeseries
6
+ from .setup_logger import setup_logger
8
7
 
9
8
  refs = {
10
9
  'HD86226': 'Teske et al. 2020 (AJ, 160, 2)'
@@ -12,16 +11,21 @@ refs = {
12
11
 
13
12
  def get_extra_data(star, instrument=None, path=None, verbose=True,
14
13
  check_for_kms=True):
14
+ from . import timeseries
15
+ logger = setup_logger()
15
16
  if path is None:
16
17
  path = os.path.dirname(__file__)
17
18
  path = os.path.join(path, 'data', 'extra')
19
+ metadata = json.load(open(os.path.join(path, 'metadata.json'), 'r'))
20
+ # print(metadata)
21
+ else:
22
+ metadata = {}
18
23
 
19
- metadata = json.load(open(os.path.join(path, 'metadata.json'), 'r'))
20
- # print(metadata)
21
-
22
- files = glob(os.path.join(path, star.replace(' ', '') + '*.rdb'))
24
+ files = glob(os.path.join(path, star + '*.rdb'))
25
+ files += glob(os.path.join(path, star.replace(' ', '') + '*.rdb'))
23
26
  files = [f for f in files if os.path.isfile(f)]
24
- files = [f for f in files if not os.path.basename(f).endswith('.zip')]
27
+ files = [f for f in files if not f.endswith('_actin.rdb')]
28
+ files = list(set(files))
25
29
 
26
30
  if len(files) == 0:
27
31
  raise FileNotFoundError