arvi 0.1.11__py3-none-any.whl → 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arvi might be problematic. Click here for more details.

arvi/HZ.py CHANGED
@@ -1,4 +1,3 @@
1
- from collections import namedtuple
2
1
  import numpy as np
3
2
  from astropy.constants import G
4
3
  from astropy import units
arvi/__init__.py CHANGED
@@ -8,6 +8,7 @@ def __getattr__(name: str):
8
8
  if name in (
9
9
  '_ipython_canary_method_should_not_exist_',
10
10
  '_repr_mimebundle_',
11
+ '__wrapped__'
11
12
  ):
12
13
  return
13
14
 
@@ -0,0 +1,70 @@
1
+ import os
2
+ import sys
3
+ from matplotlib import pyplot as plt
4
+
5
+ try:
6
+ from astroARIADNE.star import Star
7
+ from astroARIADNE.fitter import Fitter
8
+ except ImportError:
9
+ print('This module requires astroARIADNE. Install with `pip install astroARIADNE`')
10
+ sys.exit(0)
11
+
12
+
13
+ def run_ariadne(self, fit=True, plot=True, priors={},
14
+ models = ('phoenix', 'btsettl', 'btnextgen', 'btcond', 'kurucz', 'ck04'),
15
+ nlive=300, dlogz=1, threads=6, dynamic=False, **kwargs):
16
+ if hasattr(self, 'gaia'):
17
+ s = Star(self.star, self.gaia.ra, self.gaia.dec, g_id=self.gaia.dr3_id,
18
+ search_radius=1)
19
+ else:
20
+ s = Star(self.star, self.simbad.ra, self.simbad.dec, g_id=self.simbad.gaia_id,
21
+ search_radius=1)
22
+
23
+ out_folder = f'{self.star}_ariadne'
24
+
25
+ setup = dict(engine='dynesty', nlive=nlive, dlogz=dlogz,
26
+ bound='multi', sample='auto', threads=threads, dynamic=dynamic)
27
+ setup = list(setup.values())
28
+
29
+ f = Fitter()
30
+ f.star = s
31
+ f.setup = setup
32
+ f.av_law = 'fitzpatrick'
33
+ f.out_folder = out_folder
34
+ f.bma = True
35
+ f.models = models
36
+ f.n_samples = 10_000
37
+
38
+ f.prior_setup = {
39
+ 'teff': priors.get('teff', ('default')),
40
+ 'logg': ('default'),
41
+ 'z': priors.get('feh', ('default')),
42
+ 'dist': ('default'),
43
+ 'rad': ('default'),
44
+ 'Av': ('default')
45
+ }
46
+
47
+ if fit:
48
+ f.initialize()
49
+ f.fit_bma()
50
+
51
+ if plot:
52
+ from pkg_resources import resource_filename
53
+ from astroARIADNE.plotter import SEDPlotter
54
+ modelsdir = resource_filename('astroARIADNE', 'Datafiles/models')
55
+ artist = SEDPlotter(os.path.join(out_folder, 'BMA.pkl'), out_folder, models_dir=modelsdir)
56
+
57
+ artist.plot_SED_no_model()
58
+ try:
59
+ artist.plot_SED()
60
+ except FileNotFoundError as e:
61
+ print('No model found:', e)
62
+ except IndexError as e:
63
+ print('Error!')
64
+ artist.plot_bma_hist()
65
+ artist.plot_bma_HR(10)
66
+ artist.plot_corner()
67
+ plt.close('all')
68
+ return s, f, artist
69
+
70
+ return s, f
arvi/binning.py CHANGED
@@ -1,6 +1,9 @@
1
1
  import numpy as np
2
2
  from numpy.testing import suppress_warnings
3
3
 
4
+ from scipy.stats import binned_statistic as old_binned_statistic,\
5
+ binned_statistic_dd as old_binned_statistic_dd
6
+
4
7
  from .setup_logger import logger
5
8
 
6
9
  ###############################################################################
@@ -212,8 +215,6 @@ def binned_statistic_dd(sample, values, statistic='mean', bins=10, range=None,
212
215
 
213
216
 
214
217
  # put back the documentation
215
- from scipy.stats import binned_statistic as old_binned_statistic,\
216
- binned_statistic_dd as old_binned_statistic_dd
217
218
  doc1 = old_binned_statistic.__doc__
218
219
  doc2 = old_binned_statistic_dd.__doc__
219
220
  binned_statistic.__doc__ = doc1
arvi/config.py CHANGED
@@ -1,2 +1,14 @@
1
+ # whether to return self from (some) RV methods
1
2
  return_self = False
3
+
4
+ # whether to check internet connection before querying DACE
2
5
  check_internet = False
6
+
7
+ # make all DACE requests without using a .dacerc file
8
+ request_as_public = False
9
+
10
+ # whether to adjust instrument means before gls by default
11
+ adjust_means_gls = True
12
+
13
+ # debug
14
+ debug = False
arvi/dace_wrapper.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import os
2
2
  import tarfile
3
3
  import collections
4
+ from functools import lru_cache
4
5
  import numpy as np
5
6
  from dace_query import DaceClass
6
7
  from dace_query.spectroscopy import SpectroscopyClass, Spectroscopy as default_Spectroscopy
@@ -9,12 +10,28 @@ from .utils import create_directory, all_logging_disabled, stdout_disabled, tqdm
9
10
 
10
11
 
11
12
  def load_spectroscopy() -> SpectroscopyClass:
13
+ from .config import request_as_public
14
+ if request_as_public:
15
+ with all_logging_disabled():
16
+ dace = DaceClass(dace_rc_config_path='none')
17
+ return SpectroscopyClass(dace_instance=dace)
12
18
  if 'DACERC' in os.environ:
13
19
  dace = DaceClass(dace_rc_config_path=os.environ['DACERC'])
14
20
  return SpectroscopyClass(dace_instance=dace)
15
21
  # elif os.path.exists(os.path.expanduser('~/.dacerc')):
16
22
  return default_Spectroscopy
17
23
 
24
+ @lru_cache()
25
+ def get_dace_id(star):
26
+ filters = {"obj_id_catname": {"contains": [star]}}
27
+ try:
28
+ with stdout_disabled(), all_logging_disabled():
29
+ r = load_spectroscopy().query_database(filters=filters, limit=1)
30
+ return r['obj_id_daceid'][0]
31
+ except KeyError:
32
+ logger.error(f"Could not find DACE ID for {star}")
33
+ raise ValueError from None
34
+
18
35
  def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='HE', verbose=True):
19
36
  arrays = []
20
37
  instruments = list(result.keys())
@@ -34,7 +51,7 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
34
51
  i = [i for i, pipe in enumerate(pipelines) if ESPRESSO_mode in pipe][0]
35
52
  pipelines = [pipelines[i]]
36
53
  else:
37
- if verbose:
54
+ if len(pipelines) > 1 and verbose:
38
55
  logger.warning(f'no observations for requested ESPRESSO mode ({ESPRESSO_mode})')
39
56
 
40
57
  if latest_pipeline:
@@ -68,19 +85,118 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
68
85
 
69
86
  return arrays
70
87
 
71
- def get_observations(star, instrument=None, verbose=True):
72
- Spectroscopy = load_spectroscopy()
88
+ def get_observations_from_instrument(star, instrument, main_id=None):
89
+ """ Query DACE for all observations of a given star and instrument
90
+
91
+ Args:
92
+ star (str): name of the star
93
+ instrument (str): instrument name
94
+ main_id (str, optional): Simbad main id of target to query DACE id. Defaults to None.
95
+
96
+ Raises:
97
+ ValueError: If query for DACE id fails
98
+
99
+ Returns:
100
+ dict: dictionary with data from DACE
101
+ """
73
102
  try:
74
- with stdout_disabled(), all_logging_disabled():
75
- result = Spectroscopy.get_timeseries(target=star,
76
- sorted_by_instrument=True,
77
- output_format='numpy')
78
- except TypeError:
79
- if instrument is None:
80
- msg = f'no observations for {star}'
103
+ dace_id = get_dace_id(star)
104
+ except ValueError as e:
105
+ if main_id is not None:
106
+ dace_id = get_dace_id(main_id)
81
107
  else:
108
+ raise e
109
+
110
+ Spectroscopy = load_spectroscopy()
111
+ filters = {
112
+ "ins_name": {"contains": [instrument]},
113
+ "obj_id_daceid": {"contains": [dace_id]}
114
+ }
115
+ with stdout_disabled(), all_logging_disabled():
116
+ result = Spectroscopy.query_database(filters=filters)
117
+
118
+ if len(result) == 0:
119
+ raise ValueError
120
+
121
+ r = {}
122
+ for inst in np.unique(result['ins_name']):
123
+ mask1 = result['ins_name'] == inst
124
+ r[inst] = {}
125
+ for pipe in np.unique(result['ins_drs_version'][mask1]):
126
+ mask2 = mask1 & (result['ins_drs_version'] == pipe)
127
+ ins_mode = np.unique(result['ins_mode'][mask2])[0]
128
+ _nan = np.full(mask2.sum(), np.nan)
129
+ r[inst][pipe] = {
130
+ ins_mode: {
131
+ 'texp': result['texp'][mask2],
132
+ 'bispan': result['spectro_ccf_bispan'][mask2],
133
+ 'bispan_err': result['spectro_ccf_bispan_err'][mask2],
134
+ 'drift_noise': result['spectro_cal_drift_noise'][mask2],
135
+ 'rjd': result['obj_date_bjd'][mask2],
136
+ 'cal_therror': _nan,
137
+ 'fwhm': result['spectro_ccf_fwhm'][mask2],
138
+ 'fwhm_err': result['spectro_ccf_fwhm_err'][mask2],
139
+ 'rv': result['spectro_ccf_rv'][mask2],
140
+ 'rv_err': result['spectro_ccf_rv_err'][mask2],
141
+ 'berv': result['spectro_cal_berv'][mask2],
142
+ 'ccf_noise': _nan,
143
+ 'rhk': result['spectro_analysis_rhk'][mask2],
144
+ 'rhk_err': result['spectro_analysis_rhk_err'][mask2],
145
+ 'contrast': result['spectro_ccf_contrast'][mask2],
146
+ 'contrast_err': result['spectro_ccf_contrast_err'][mask2],
147
+ 'cal_thfile': result['spectro_cal_thfile'][mask2],
148
+ 'spectroFluxSn50': result['spectro_flux_sn50'][mask2],
149
+ 'protm08': result['spectro_analysis_protm08'][mask2],
150
+ 'protm08_err': result['spectro_analysis_protm08_err'][mask2],
151
+ 'caindex': result['spectro_analysis_ca'][mask2],
152
+ 'caindex_err': result['spectro_analysis_ca_err'][mask2],
153
+ 'pub_reference': result['pub_ref'][mask2],
154
+ 'drs_qc': result['spectro_drs_qc'][mask2],
155
+ 'haindex': result['spectro_analysis_halpha'][mask2],
156
+ 'haindex_err': result['spectro_analysis_halpha_err'][mask2],
157
+ 'protn84': result['spectro_analysis_protn84'][mask2],
158
+ 'protn84_err': result['spectro_analysis_protn84_err'][mask2],
159
+ 'naindex': result['spectro_analysis_na'][mask2],
160
+ 'naindex_err': result['spectro_analysis_na_err'][mask2],
161
+ 'snca2': _nan,
162
+ 'mask': result['spectro_ccf_mask'][mask2],
163
+ 'public': result['public'][mask2],
164
+ 'spectroFluxSn20': result['spectro_flux_sn20'][mask2],
165
+ 'sindex': result['spectro_analysis_smw'][mask2],
166
+ 'sindex_err': result['spectro_analysis_smw_err'][mask2],
167
+ 'drift_used': _nan,
168
+ 'ccf_asym': result['spectro_ccf_asym'][mask2],
169
+ 'ccf_asym_err': result['spectro_ccf_asym_err'][mask2],
170
+ 'date_night': result['date_night'][mask2],
171
+ 'raw_file': result['file_rootpath'][mask2],
172
+ 'prog_id': result['prog_id'][mask2],
173
+ 'th_ar': result['th_ar'][mask2],
174
+ 'th_ar1': result['th_ar1'][mask2],
175
+ 'th_ar2': result['th_ar2'][mask2],
176
+ }
177
+ }
178
+ return r
179
+
180
+ def get_observations(star, instrument=None, main_id=None, verbose=True):
181
+ if instrument is None:
182
+ Spectroscopy = load_spectroscopy()
183
+ try:
184
+ with stdout_disabled(), all_logging_disabled():
185
+ result = Spectroscopy.get_timeseries(target=star,
186
+ sorted_by_instrument=True,
187
+ output_format='numpy')
188
+ except TypeError:
189
+ if instrument is None:
190
+ msg = f'no observations for {star}'
191
+ else:
192
+ msg = f'no {instrument} observations for {star}'
193
+ raise ValueError(msg) from None
194
+ else:
195
+ try:
196
+ result = get_observations_from_instrument(star, instrument, main_id, verbose)
197
+ except ValueError:
82
198
  msg = f'no {instrument} observations for {star}'
83
- raise ValueError(msg) from None
199
+ raise ValueError(msg) from None
84
200
 
85
201
  # defaultdict --> dict
86
202
  if isinstance(result, collections.defaultdict):
@@ -181,9 +297,9 @@ def check_existing(output_directory, files, type):
181
297
  def download(files, type, output_directory):
182
298
  """ Download files from DACE """
183
299
  Spectroscopy = load_spectroscopy()
184
- # with stdout_disabled(), all_logging_disabled():
185
- Spectroscopy.download_files(files, file_type=type.lower(),
186
- output_directory=output_directory)
300
+ with stdout_disabled(), all_logging_disabled():
301
+ Spectroscopy.download_files(files, file_type=type.lower(),
302
+ output_directory=output_directory)
187
303
 
188
304
  def extract_fits(output_directory):
189
305
  """ Extract fits files from tar.gz file """
@@ -201,6 +317,49 @@ def extract_fits(output_directory):
201
317
  return files
202
318
 
203
319
 
320
+ def do_symlink_filetype(type, raw_files, output_directory, clobber=False, top_level=None, verbose=True):
321
+ terminations = {
322
+ 'CCF': '_CCF_A.fits',
323
+ 'S1D': '_S1D_A.fits',
324
+ 'S2D': '_S2D_A.fits',
325
+ }
326
+
327
+ create_directory(output_directory)
328
+
329
+ raw_files = np.atleast_1d(raw_files)
330
+
331
+ # check existing files
332
+ if not clobber:
333
+ raw_files = check_existing(output_directory, raw_files, type)
334
+
335
+ n = raw_files.size
336
+
337
+ # any file left?
338
+ if n == 0:
339
+ if verbose:
340
+ logger.info('no files to symlink')
341
+ return
342
+
343
+ if verbose:
344
+ msg = f"symlinking {n} {type}s into '{output_directory}'..."
345
+ logger.info(msg)
346
+
347
+ for file in tqdm(raw_files):
348
+ if top_level is not None:
349
+ top = file.split('/')[0] + '/'
350
+ if not top_level.endswith('/'):
351
+ top_level = top_level + '/'
352
+ file = file.replace(top, top_level)
353
+
354
+ file = file.replace('.fits', terminations[type])
355
+
356
+ if os.path.exists(file):
357
+ os.symlink(file, os.path.join(output_directory, os.path.basename(file)))
358
+ # print(file, os.path.join(output_directory, os.path.basename(file)))
359
+ else:
360
+ logger.warning(f'file not found: {file}')
361
+
362
+
204
363
  def do_download_filetype(type, raw_files, output_directory, clobber=False,
205
364
  verbose=True, chunk_size=20):
206
365
  """ Download CCFs / S1Ds / S2Ds from DACE """
@@ -212,21 +371,30 @@ def do_download_filetype(type, raw_files, output_directory, clobber=False,
212
371
  if not clobber:
213
372
  raw_files = check_existing(output_directory, raw_files, type)
214
373
 
374
+ n = raw_files.size
375
+
215
376
  # any file left to download?
216
- if raw_files.size == 0:
377
+ if n == 0:
217
378
  if verbose:
218
379
  logger.info('no files to download')
219
380
  return
220
381
 
221
- if verbose:
222
- n = raw_files.size
223
- logger.info(f"Downloading {n} {type}s into '{output_directory}'...")
224
-
225
382
  # avoid an empty chunk
226
383
  if chunk_size > n:
227
384
  chunk_size = n
228
385
 
229
- for files in tqdm(zip(*(iter(raw_files),) * chunk_size), total=n // chunk_size):
386
+ if verbose:
387
+ if chunk_size < n:
388
+ msg = f"Downloading {n} {type}s "
389
+ msg += f"(in chunks of {chunk_size}) "
390
+ msg += f"into '{output_directory}'..."
391
+ logger.info(msg)
392
+ else:
393
+ msg = f"Downloading {n} {type}s into '{output_directory}'..."
394
+ logger.info(msg)
395
+
396
+ iterator = [raw_files[i:i + chunk_size] for i in range(0, n, chunk_size)]
397
+ for files in tqdm(iterator, total=len(iterator)):
230
398
  download(files, type, output_directory)
231
399
  extract_fits(output_directory)
232
400
 
@@ -155,4 +155,7 @@
155
155
  2021-01-28 r.ESPRE.2021-01-29T01:46:34.785 106.21M2.002 2897966
156
156
  2021-01-28 r.ESPRE.2021-01-29T02:06:35.596 106.21M2.002 2899965
157
157
  2021-01-31 r.ESPRE.2021-02-01T01:34:18.507 106.21M2.001 2899996
158
- 2019-12-22 r.ESPRE.2019-12-23T00:30:55.824 1104.C-0350(A) 2496531
158
+ 2019-12-22 r.ESPRE.2019-12-23T00:30:55.824 1104.C-0350(A) 2496531
159
+ # additional frames with ADC2 absent or set to zero
160
+ 2021-04-06 r.ESPRE.2021-04-06T23:30:53.085 1104.C-0350 3026604
161
+ 2021-04-09 r.ESPRE.2021-04-10T00:40:18.132 1104.C-0350 3027219
arvi/gaia_wrapper.py ADDED
@@ -0,0 +1,98 @@
1
+ import os
2
+ from io import StringIO
3
+ from csv import DictReader
4
+ import requests
5
+
6
+ from astropy.coordinates import SkyCoord
7
+
8
+ DATA_PATH = os.path.dirname(__file__)
9
+ DATA_PATH = os.path.join(DATA_PATH, 'data')
10
+
11
+ QUERY = """
12
+ SELECT TOP 20 gaia_source.designation,gaia_source.source_id,gaia_source.ra,gaia_source.dec,gaia_source.parallax,gaia_source.pmra,gaia_source.pmdec,gaia_source.ruwe,gaia_source.phot_g_mean_mag,gaia_source.bp_rp,gaia_source.radial_velocity,gaia_source.phot_variable_flag,gaia_source.non_single_star,gaia_source.has_xp_continuous,gaia_source.has_xp_sampled,gaia_source.has_rvs,gaia_source.has_epoch_photometry,gaia_source.has_epoch_rv,gaia_source.has_mcmc_gspphot,gaia_source.has_mcmc_msc,gaia_source.teff_gspphot,gaia_source.logg_gspphot,gaia_source.mh_gspphot,gaia_source.distance_gspphot,gaia_source.azero_gspphot,gaia_source.ag_gspphot,gaia_source.ebpminrp_gspphot
13
+ FROM gaiadr3.gaia_source
14
+ WHERE
15
+ CONTAINS(
16
+ POINT('ICRS',gaiadr3.gaia_source.ra,gaiadr3.gaia_source.dec),
17
+ CIRCLE(
18
+ 'ICRS',
19
+ COORD1(EPOCH_PROP_POS({ra},{dec},{plx},{pmra},{pmdec},{rv},2000,2016.0)),
20
+ COORD2(EPOCH_PROP_POS({ra},{dec},{plx},{pmra},{pmdec},{rv},2000,2016.0)),
21
+ 0.001388888888888889)
22
+ )=1
23
+ """
24
+
25
+ def run_query(query):
26
+ url = 'https://gea.esac.esa.int/tap-server/tap/sync'
27
+ data = dict(query=query, request='doQuery', lang='ADQL', format='csv')
28
+ try:
29
+ response = requests.post(url, data=data, timeout=10)
30
+ except requests.ReadTimeout as err:
31
+ raise IndexError(err)
32
+ except requests.ConnectionError as err:
33
+ raise IndexError(err)
34
+ return response.content.decode()
35
+
36
+ def parse_csv(csv):
37
+ reader = DictReader(StringIO(csv))
38
+ return list(reader)
39
+
40
+
41
+ class gaia:
42
+ """
43
+ A very simple wrapper around a TAP query to gaia for a given target. This
44
+ class simply runs a few TAP queries and stores the result as attributes.
45
+
46
+ Attributes:
47
+ ra (float): right ascension
48
+ dec (float): declination
49
+ coords (SkyCoord): coordinates as a SkyCoord object
50
+ dr3_id (int): Gaia DR3 identifier
51
+ plx (float): parallax
52
+ radial_velocity (float): radial velocity
53
+ """
54
+ def __init__(self, star:str, simbad=None):
55
+ """
56
+ Args:
57
+ star (str): The name of the star to query simbad
58
+ """
59
+ self.star = star
60
+
61
+ if simbad is None:
62
+ from .simbad_wrapper import simbad as Simbad
63
+ simbad = Simbad(star)
64
+
65
+ ra = simbad.ra
66
+ dec = simbad.dec
67
+ plx = simbad.plx
68
+ pmra = simbad.pmra
69
+ pmdec = simbad.pmdec
70
+ rv = simbad.rvz_radvel
71
+ args = dict(ra=ra, dec=dec, plx=plx, pmra=pmra, pmdec=pmdec, rv=rv)
72
+
73
+ try:
74
+ table1 = run_query(query=QUERY.format(**args))
75
+ results = parse_csv(table1)[0]
76
+ except IndexError:
77
+ raise ValueError(f'Gaia query for {star} failed')
78
+
79
+ try:
80
+ self.dr3_id = int(results['source_id'])
81
+ except KeyError:
82
+ raise ValueError(f'Gaia query for {star} failed')
83
+
84
+ self.ra = float(results['ra'])
85
+ self.dec = float(results['dec'])
86
+ self.pmra = float(results['pmra'])
87
+ self.pmdec = float(results['pmdec'])
88
+ self.coords = SkyCoord(self.ra, self.dec, unit='deg')
89
+ self.plx = float(results['parallax'])
90
+ try:
91
+ self.radial_velocity = float(results['radial_velocity'])
92
+ except ValueError:
93
+ self.radial_velocity = None
94
+
95
+ return
96
+
97
+ def __repr__(self):
98
+ return f'{self.star} (DR3 id={self.dr3_id})'
@@ -1,17 +1,19 @@
1
1
  import os, sys
2
2
  import numpy as np
3
- import matplotlib.pyplot as plt
4
3
 
5
4
  from .setup_logger import logger
6
5
 
7
6
  # ESPRESSO ADC issues
8
7
  from .utils import ESPRESSO_ADC_issues
9
8
 
10
- def ADC_issues(self, plot=True):
9
+ def ADC_issues(self, plot=True, check_headers=False):
11
10
  """ Identify and mask points affected by ADC issues (ESPRESSO).
12
11
 
13
12
  Args:
14
- plot (bool, optional): Whether to plot the masked points.
13
+ plot (bool, optional):
14
+ Whether to plot the masked points.
15
+ check_headers (bool, optional):
16
+ Whether to (double-)check the headers for missing/zero keywords.
15
17
  """
16
18
  instruments = self._check_instrument('ESPRESSO')
17
19
 
@@ -25,6 +27,14 @@ def ADC_issues(self, plot=True):
25
27
  file_roots = [os.path.basename(f).replace('.fits', '') for f in self.raw_file]
26
28
  intersect = np.in1d(file_roots, affected_file_roots)
27
29
 
30
+ if check_headers:
31
+ from .headers import get_headers
32
+ H = get_headers(self, check_lesta=False, check_exo2=False, instrument='ESPRE')
33
+ badACD2 = np.array([h['*ADC2 RA'][0] for h in H]) == 0
34
+ badACD2 |= np.array([h['*ADC2 SENS1'][0] for h in H]) == 0
35
+ badACD2 |= np.array([h['*ADC2 TEMP'][0] for h in H]) == 0
36
+ intersect = np.logical_or(intersect, badACD2)
37
+
28
38
  total_affected = intersect.sum()
29
39
 
30
40
  if self.verbose:
@@ -76,6 +86,52 @@ def blue_cryostat_issues(self, plot=True):
76
86
  return intersect
77
87
 
78
88
 
89
+ def qc_scired_issues(self, plot=False, **kwargs):
90
+ """ Identify and mask points with failed SCIRED QC
91
+
92
+ Args:
93
+ plot (bool, optional): Whether to plot the masked points.
94
+ """
95
+ from .headers import get_headers
96
+
97
+ instruments = self._check_instrument('ESPRESSO')
98
+
99
+ if instruments is None:
100
+ if self.verbose:
101
+ logger.error(f"no data from ESPRESSO")
102
+ logger.info(f'available: {self.instruments}')
103
+ return
104
+
105
+ H = kwargs.get('H', None)
106
+ if H is None:
107
+ H = get_headers(self, check_lesta=False, check_exo2=False, instrument='ESPRE')
108
+ if len(H) == 0:
109
+ if self.verbose:
110
+ logger.warning('this function requires access to headers, but none found')
111
+ logger.warning('trying to download')
112
+ self.download_ccf()
113
+ H = get_headers(self, check_lesta=False, check_exo2=False, instrument='ESPRE')
114
+
115
+ scired_check = np.array([h['HIERARCH ESO QC SCIRED CHECK'] for h in H])
116
+ affected = scired_check == 0
117
+ n = affected.sum()
118
+
119
+ if self.verbose:
120
+ logger.info(f"there {'are'[:n^1]}{'is'[n^1:]} {n} frame{'s'[:n^1]} "
121
+ "where QC SCIRED CHECK is 0")
122
+
123
+ if n == 0:
124
+ return
125
+
126
+ self.mask[affected] = False
127
+ self._propagate_mask_changes()
128
+
129
+ if plot:
130
+ self.plot(show_masked=True)
131
+
132
+ return affected
133
+
134
+
79
135
  def known_issues(self, plot=False, **kwargs):
80
136
  """ Identify and mask known instrumental issues (ADC and blue cryostat for ESPRESSO)
81
137
 
arvi/nasaexo_wrapper.py CHANGED
@@ -125,7 +125,7 @@ class Planets:
125
125
  self.model.keplerian.uid = 0
126
126
 
127
127
  for i in range(self.np):
128
- if self.K[i] == False or np.isnan(self.K[i]):
128
+ if not self.K[i] or np.isnan(self.K[i]):
129
129
  self.model.add_keplerian_from_period(self.P[i], fit=True)
130
130
  else:
131
131
  self.model.add_keplerian([self.P[i], self.K[i], self.e[i], 0.0, self.w[i]],