arvi 0.1.16__py3-none-any.whl → 0.1.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of arvi might be problematic. Click here for more details.
- arvi/dace_wrapper.py +84 -54
- arvi/gaia_wrapper.py +17 -2
- arvi/instrument_specific.py +105 -15
- arvi/plots.py +67 -23
- arvi/simbad_wrapper.py +4 -1
- arvi/stellar.py +30 -11
- arvi/utils.py +42 -0
- {arvi-0.1.16.dist-info → arvi-0.1.18.dist-info}/METADATA +1 -1
- {arvi-0.1.16.dist-info → arvi-0.1.18.dist-info}/RECORD +12 -12
- {arvi-0.1.16.dist-info → arvi-0.1.18.dist-info}/LICENSE +0 -0
- {arvi-0.1.16.dist-info → arvi-0.1.18.dist-info}/WHEEL +0 -0
- {arvi-0.1.16.dist-info → arvi-0.1.18.dist-info}/top_level.txt +0 -0
arvi/dace_wrapper.py
CHANGED
|
@@ -41,14 +41,18 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
|
|
|
41
41
|
|
|
42
42
|
# select ESPRESSO mode, which is defined at the level of the pipeline
|
|
43
43
|
if 'ESPRESSO' in inst:
|
|
44
|
-
|
|
44
|
+
|
|
45
|
+
find_mode = [ESPRESSO_mode in pipe for pipe in pipelines]
|
|
46
|
+
# the mode was not found
|
|
47
|
+
if not any(find_mode):
|
|
48
|
+
if len(pipelines) > 1 and verbose:
|
|
49
|
+
logger.warning(f'no observations for requested ESPRESSO mode ({ESPRESSO_mode})')
|
|
50
|
+
# the mode was found but do nothing if it's the only one
|
|
51
|
+
elif any(find_mode) and not all(find_mode):
|
|
45
52
|
if verbose:
|
|
46
53
|
logger.info(f'selecting mode {ESPRESSO_mode} for ESPRESSO')
|
|
47
54
|
i = [i for i, pipe in enumerate(pipelines) if ESPRESSO_mode in pipe][0]
|
|
48
55
|
pipelines = [pipelines[i]]
|
|
49
|
-
else:
|
|
50
|
-
if len(pipelines) > 1 and verbose:
|
|
51
|
-
logger.warning(f'no observations for requested ESPRESSO mode ({ESPRESSO_mode})')
|
|
52
56
|
|
|
53
57
|
if latest_pipeline:
|
|
54
58
|
if verbose and len(pipelines) > 1:
|
|
@@ -59,6 +63,7 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
|
|
|
59
63
|
for pipe in pipelines:
|
|
60
64
|
modes = list(result[inst][pipe].keys())
|
|
61
65
|
|
|
66
|
+
|
|
62
67
|
# select NIRPS mode, which is defined at the level of the mode
|
|
63
68
|
if 'NIRPS' in inst:
|
|
64
69
|
if NIRPS_mode in modes:
|
|
@@ -70,6 +75,19 @@ def get_arrays(result, latest_pipeline=True, ESPRESSO_mode='HR11', NIRPS_mode='H
|
|
|
70
75
|
if verbose:
|
|
71
76
|
logger.warning(f'no observations for requested NIRPS mode ({NIRPS_mode})')
|
|
72
77
|
|
|
78
|
+
# HARPS15 observations should not be separated by 'mode' if some are
|
|
79
|
+
# done together with NIRPS
|
|
80
|
+
if 'HARPS15' in inst and 'HARPS+NIRPS' in modes:
|
|
81
|
+
m0 = modes[0]
|
|
82
|
+
data = {
|
|
83
|
+
k: np.concatenate([result[inst][pipe][m][k] for m in modes])
|
|
84
|
+
for k in result[inst][pipe][m0].keys()
|
|
85
|
+
}
|
|
86
|
+
arrays.append(
|
|
87
|
+
((inst, pipe, m0), data)
|
|
88
|
+
)
|
|
89
|
+
continue
|
|
90
|
+
|
|
73
91
|
for mode in modes:
|
|
74
92
|
if 'rjd' not in result[inst][pipe][mode]:
|
|
75
93
|
logger.error(f"No 'rjd' key for {inst} - {pipe}")
|
|
@@ -115,62 +133,70 @@ def get_observations_from_instrument(star, instrument, main_id=None):
|
|
|
115
133
|
raise ValueError
|
|
116
134
|
|
|
117
135
|
r = {}
|
|
136
|
+
|
|
118
137
|
for inst in np.unique(result['ins_name']):
|
|
119
138
|
mask1 = result['ins_name'] == inst
|
|
120
139
|
r[inst] = {}
|
|
140
|
+
|
|
121
141
|
for pipe in np.unique(result['ins_drs_version'][mask1]):
|
|
122
142
|
mask2 = mask1 & (result['ins_drs_version'] == pipe)
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
ins_mode
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
'
|
|
131
|
-
'
|
|
143
|
+
r[inst][pipe] = {}
|
|
144
|
+
|
|
145
|
+
for ins_mode in np.unique(result['ins_mode'][mask2]):
|
|
146
|
+
mask3 = mask2 & (result['ins_mode'] == ins_mode)
|
|
147
|
+
_nan = np.full(mask3.sum(), np.nan)
|
|
148
|
+
|
|
149
|
+
r[inst][pipe][ins_mode] = {
|
|
150
|
+
'texp': result['texp'][mask3],
|
|
151
|
+
'bispan': result['spectro_ccf_bispan'][mask3],
|
|
152
|
+
'bispan_err': result['spectro_ccf_bispan_err'][mask3],
|
|
153
|
+
'drift_noise': result['spectro_cal_drift_noise'][mask3],
|
|
154
|
+
'rjd': result['obj_date_bjd'][mask3],
|
|
132
155
|
'cal_therror': _nan,
|
|
133
|
-
'fwhm': result['spectro_ccf_fwhm'][
|
|
134
|
-
'fwhm_err': result['spectro_ccf_fwhm_err'][
|
|
135
|
-
'rv': result['spectro_ccf_rv'][
|
|
136
|
-
'rv_err': result['spectro_ccf_rv_err'][
|
|
137
|
-
'berv': result['spectro_cal_berv'][
|
|
156
|
+
'fwhm': result['spectro_ccf_fwhm'][mask3],
|
|
157
|
+
'fwhm_err': result['spectro_ccf_fwhm_err'][mask3],
|
|
158
|
+
'rv': result['spectro_ccf_rv'][mask3],
|
|
159
|
+
'rv_err': result['spectro_ccf_rv_err'][mask3],
|
|
160
|
+
'berv': result['spectro_cal_berv'][mask3],
|
|
138
161
|
'ccf_noise': _nan,
|
|
139
|
-
'rhk': result['spectro_analysis_rhk'][
|
|
140
|
-
'rhk_err': result['spectro_analysis_rhk_err'][
|
|
141
|
-
'contrast': result['spectro_ccf_contrast'][
|
|
142
|
-
'contrast_err': result['spectro_ccf_contrast_err'][
|
|
143
|
-
'cal_thfile': result['spectro_cal_thfile'][
|
|
144
|
-
'spectroFluxSn50': result['spectro_flux_sn50'][
|
|
145
|
-
'protm08': result['spectro_analysis_protm08'][
|
|
146
|
-
'protm08_err': result['spectro_analysis_protm08_err'][
|
|
147
|
-
'caindex': result['spectro_analysis_ca'][
|
|
148
|
-
'caindex_err': result['spectro_analysis_ca_err'][
|
|
149
|
-
'pub_reference': result['pub_ref'][
|
|
150
|
-
'drs_qc': result['spectro_drs_qc'][
|
|
151
|
-
'haindex': result['spectro_analysis_halpha'][
|
|
152
|
-
'haindex_err': result['spectro_analysis_halpha_err'][
|
|
153
|
-
'protn84': result['spectro_analysis_protn84'][
|
|
154
|
-
'protn84_err': result['spectro_analysis_protn84_err'][
|
|
155
|
-
'naindex': result['spectro_analysis_na'][
|
|
156
|
-
'naindex_err': result['spectro_analysis_na_err'][
|
|
162
|
+
'rhk': result['spectro_analysis_rhk'][mask3],
|
|
163
|
+
'rhk_err': result['spectro_analysis_rhk_err'][mask3],
|
|
164
|
+
'contrast': result['spectro_ccf_contrast'][mask3],
|
|
165
|
+
'contrast_err': result['spectro_ccf_contrast_err'][mask3],
|
|
166
|
+
'cal_thfile': result['spectro_cal_thfile'][mask3],
|
|
167
|
+
'spectroFluxSn50': result['spectro_flux_sn50'][mask3],
|
|
168
|
+
'protm08': result['spectro_analysis_protm08'][mask3],
|
|
169
|
+
'protm08_err': result['spectro_analysis_protm08_err'][mask3],
|
|
170
|
+
'caindex': result['spectro_analysis_ca'][mask3],
|
|
171
|
+
'caindex_err': result['spectro_analysis_ca_err'][mask3],
|
|
172
|
+
'pub_reference': result['pub_ref'][mask3],
|
|
173
|
+
'drs_qc': result['spectro_drs_qc'][mask3],
|
|
174
|
+
'haindex': result['spectro_analysis_halpha'][mask3],
|
|
175
|
+
'haindex_err': result['spectro_analysis_halpha_err'][mask3],
|
|
176
|
+
'protn84': result['spectro_analysis_protn84'][mask3],
|
|
177
|
+
'protn84_err': result['spectro_analysis_protn84_err'][mask3],
|
|
178
|
+
'naindex': result['spectro_analysis_na'][mask3],
|
|
179
|
+
'naindex_err': result['spectro_analysis_na_err'][mask3],
|
|
157
180
|
'snca2': _nan,
|
|
158
|
-
'mask': result['spectro_ccf_mask'][
|
|
159
|
-
'public': result['public'][
|
|
160
|
-
'spectroFluxSn20': result['spectro_flux_sn20'][
|
|
161
|
-
'sindex': result['spectro_analysis_smw'][
|
|
162
|
-
'sindex_err': result['spectro_analysis_smw_err'][
|
|
181
|
+
'mask': result['spectro_ccf_mask'][mask3],
|
|
182
|
+
'public': result['public'][mask3],
|
|
183
|
+
'spectroFluxSn20': result['spectro_flux_sn20'][mask3],
|
|
184
|
+
'sindex': result['spectro_analysis_smw'][mask3],
|
|
185
|
+
'sindex_err': result['spectro_analysis_smw_err'][mask3],
|
|
163
186
|
'drift_used': _nan,
|
|
164
|
-
'ccf_asym': result['spectro_ccf_asym'][
|
|
165
|
-
'ccf_asym_err': result['spectro_ccf_asym_err'][
|
|
166
|
-
'date_night': result['date_night'][
|
|
167
|
-
'raw_file': result['file_rootpath'][
|
|
168
|
-
'prog_id': result['prog_id'][
|
|
169
|
-
'th_ar': result['th_ar'][
|
|
170
|
-
'th_ar1': result['th_ar1'][
|
|
171
|
-
'th_ar2': result['th_ar2'][
|
|
187
|
+
'ccf_asym': result['spectro_ccf_asym'][mask3],
|
|
188
|
+
'ccf_asym_err': result['spectro_ccf_asym_err'][mask3],
|
|
189
|
+
'date_night': result['date_night'][mask3],
|
|
190
|
+
'raw_file': result['file_rootpath'][mask3],
|
|
191
|
+
'prog_id': result['prog_id'][mask3],
|
|
192
|
+
'th_ar': result['th_ar'][mask3],
|
|
193
|
+
'th_ar1': result['th_ar1'][mask3],
|
|
194
|
+
'th_ar2': result['th_ar2'][mask3],
|
|
172
195
|
}
|
|
173
|
-
|
|
196
|
+
|
|
197
|
+
# print(r.keys())
|
|
198
|
+
# print([r[k].keys() for k in r.keys()])
|
|
199
|
+
# print([r[k1][k2].keys() for k1 in r.keys() for k2 in r[k1].keys()])
|
|
174
200
|
return r
|
|
175
201
|
|
|
176
202
|
def get_observations(star, instrument=None, main_id=None, verbose=True):
|
|
@@ -248,16 +274,20 @@ def get_observations(star, instrument=None, main_id=None, verbose=True):
|
|
|
248
274
|
_inst = ''
|
|
249
275
|
for inst in instruments:
|
|
250
276
|
pipelines = list(new_result[inst].keys())
|
|
277
|
+
max_len = max([len(pipe) for pipe in pipelines])
|
|
251
278
|
for pipe in pipelines:
|
|
279
|
+
last_pipe = pipe == pipelines[-1]
|
|
252
280
|
modes = list(new_result[inst][pipe].keys())
|
|
253
281
|
for mode in modes:
|
|
254
282
|
N = len(new_result[inst][pipe][mode]['rjd'])
|
|
255
283
|
# LOG
|
|
256
|
-
if inst == _inst:
|
|
257
|
-
logger.info(f'{" ":>12s} └ {pipe} - {mode} ({N} observations)')
|
|
284
|
+
if inst == _inst and last_pipe:
|
|
285
|
+
logger.info(f'{" ":>12s} └ {pipe:{max_len}s} - {mode} ({N} observations)')
|
|
286
|
+
elif inst == _inst:
|
|
287
|
+
logger.info(f'{" ":>12s} ├ {pipe:{max_len}s} - {mode} ({N} observations)')
|
|
258
288
|
else:
|
|
259
|
-
logger.info(f'{inst:>12s} ├ {pipe} - {mode} ({N} observations)')
|
|
260
|
-
|
|
289
|
+
logger.info(f'{inst:>12s} ├ {pipe:{max_len}s} - {mode} ({N} observations)')
|
|
290
|
+
_inst = inst
|
|
261
291
|
|
|
262
292
|
return new_result
|
|
263
293
|
|
arvi/gaia_wrapper.py
CHANGED
|
@@ -22,6 +22,18 @@ CONTAINS(
|
|
|
22
22
|
)=1
|
|
23
23
|
"""
|
|
24
24
|
|
|
25
|
+
QUERY_ID = """
|
|
26
|
+
SELECT TOP 20 gaia_source.designation,gaia_source.source_id,gaia_source.ra,gaia_source.dec,gaia_source.parallax,gaia_source.pmra,gaia_source.pmdec,gaia_source.ruwe,gaia_source.phot_g_mean_mag,gaia_source.bp_rp,gaia_source.radial_velocity,gaia_source.phot_variable_flag,gaia_source.non_single_star,gaia_source.has_xp_continuous,gaia_source.has_xp_sampled,gaia_source.has_rvs,gaia_source.has_epoch_photometry,gaia_source.has_epoch_rv,gaia_source.has_mcmc_gspphot,gaia_source.has_mcmc_msc,gaia_source.teff_gspphot,gaia_source.logg_gspphot,gaia_source.mh_gspphot,gaia_source.distance_gspphot,gaia_source.azero_gspphot,gaia_source.ag_gspphot,gaia_source.ebpminrp_gspphot
|
|
27
|
+
FROM gaiadr3.gaia_source
|
|
28
|
+
WHERE
|
|
29
|
+
gaia_source.source_id = {id}
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
translate = {
|
|
33
|
+
'LS II +14 13': '4318465066420528000',
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
|
|
25
37
|
def run_query(query):
|
|
26
38
|
url = 'https://gea.esac.esa.int/tap-server/tap/sync'
|
|
27
39
|
data = dict(query=query, request='doQuery', lang='ADQL', format='csv')
|
|
@@ -71,8 +83,11 @@ class gaia:
|
|
|
71
83
|
args = dict(ra=ra, dec=dec, plx=plx, pmra=pmra, pmdec=pmdec, rv=rv)
|
|
72
84
|
|
|
73
85
|
try:
|
|
74
|
-
|
|
75
|
-
|
|
86
|
+
if star in translate:
|
|
87
|
+
table = run_query(query=QUERY_ID.format(id=translate[star]))
|
|
88
|
+
else:
|
|
89
|
+
table = run_query(query=QUERY.format(**args))
|
|
90
|
+
results = parse_csv(table)[0]
|
|
76
91
|
except IndexError:
|
|
77
92
|
raise ValueError(f'Gaia query for {star} failed')
|
|
78
93
|
|
arvi/instrument_specific.py
CHANGED
|
@@ -3,13 +3,99 @@ import numpy as np
|
|
|
3
3
|
|
|
4
4
|
from .setup_logger import logger
|
|
5
5
|
|
|
6
|
+
|
|
7
|
+
# HARPS fiber upgrade (28 May 2015)
|
|
8
|
+
# https://www.eso.org/sci/facilities/lasilla/instruments/harps/news/harps_upgrade_2015.html
|
|
9
|
+
HARPS_technical_intervention = 57170
|
|
10
|
+
|
|
11
|
+
# ESPRESSO fiber link upgrade (1 July 2019)
|
|
12
|
+
ESPRESSO_technical_intervention = 58665
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def divide_ESPRESSO(self):
|
|
16
|
+
""" Split ESPRESSO data into separate sub ESP18 and ESP19 subsets """
|
|
17
|
+
if self._check_instrument('ESPRESSO', strict=False) is None:
|
|
18
|
+
return
|
|
19
|
+
if 'ESPRESSO18' in self.instruments and 'ESPRESSO19' in self.instruments:
|
|
20
|
+
if self.verbose:
|
|
21
|
+
logger.info('ESPRESSO data seems to be split already, doing nothing')
|
|
22
|
+
return
|
|
23
|
+
|
|
24
|
+
from .timeseries import RV
|
|
25
|
+
|
|
26
|
+
before = self.time < ESPRESSO_technical_intervention
|
|
27
|
+
after = self.time >= ESPRESSO_technical_intervention
|
|
28
|
+
new_instruments = []
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
for inst, mask in zip(['ESPRESSO18', 'ESPRESSO19'], [before, after]):
|
|
32
|
+
if not mask.any():
|
|
33
|
+
continue
|
|
34
|
+
|
|
35
|
+
_s = RV.from_arrays(self.star, self.time[mask], self.vrad[mask], self.svrad[mask],
|
|
36
|
+
inst=inst)
|
|
37
|
+
for q in self._quantities:
|
|
38
|
+
setattr(_s, q, getattr(self, q)[mask])
|
|
39
|
+
setattr(self, inst, _s)
|
|
40
|
+
_s._quantities = self._quantities
|
|
41
|
+
_s.mask = self.mask[mask]
|
|
42
|
+
new_instruments.append(inst)
|
|
43
|
+
|
|
44
|
+
delattr(self, 'ESPRESSO')
|
|
45
|
+
self.instruments = new_instruments
|
|
46
|
+
self._build_arrays()
|
|
47
|
+
|
|
48
|
+
if self.verbose:
|
|
49
|
+
logger.info(f'divided ESPRESSO into {self.instruments}')
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def divide_HARPS(self):
|
|
53
|
+
""" Split HARPS data into separate sub HARPS03 and HARPS15 subsets """
|
|
54
|
+
if self._check_instrument('HARPS', strict=False) is None:
|
|
55
|
+
return
|
|
56
|
+
if 'HARPS03' in self.instruments and 'HARPS15' in self.instruments:
|
|
57
|
+
if self.verbose:
|
|
58
|
+
logger.info('HARPS data seems to be split already, doing nothing')
|
|
59
|
+
return
|
|
60
|
+
|
|
61
|
+
from .timeseries import RV
|
|
62
|
+
|
|
63
|
+
new_instruments = []
|
|
64
|
+
before = self.time < HARPS_technical_intervention
|
|
65
|
+
if before.any():
|
|
66
|
+
new_instruments += ['HARPS03']
|
|
67
|
+
|
|
68
|
+
after = self.time >= HARPS_technical_intervention
|
|
69
|
+
if after.any():
|
|
70
|
+
new_instruments += ['HARPS15']
|
|
71
|
+
|
|
72
|
+
for inst, mask in zip(new_instruments, [before, after]):
|
|
73
|
+
_s = RV.from_arrays(self.star, self.time[mask], self.vrad[mask], self.svrad[mask],
|
|
74
|
+
inst=inst)
|
|
75
|
+
for q in self._quantities:
|
|
76
|
+
setattr(_s, q, getattr(self, q)[mask])
|
|
77
|
+
setattr(self, inst, _s)
|
|
78
|
+
_s._quantities = self._quantities
|
|
79
|
+
_s.mask = self.mask[mask]
|
|
80
|
+
|
|
81
|
+
delattr(self, 'HARPS')
|
|
82
|
+
self.instruments = new_instruments
|
|
83
|
+
self._build_arrays()
|
|
84
|
+
|
|
85
|
+
if self.verbose:
|
|
86
|
+
logger.info(f'divided HARPS into {self.instruments}')
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
|
|
6
90
|
# ESPRESSO ADC issues
|
|
7
91
|
from .utils import ESPRESSO_ADC_issues
|
|
8
92
|
|
|
9
|
-
def ADC_issues(self, plot=True, check_headers=False):
|
|
10
|
-
""" Identify and mask points affected by ADC issues (ESPRESSO).
|
|
93
|
+
def ADC_issues(self, mask=True, plot=True, check_headers=False):
|
|
94
|
+
""" Identify and optionally mask points affected by ADC issues (ESPRESSO).
|
|
11
95
|
|
|
12
96
|
Args:
|
|
97
|
+
mask (bool, optional):
|
|
98
|
+
Whether to mask out the points.
|
|
13
99
|
plot (bool, optional):
|
|
14
100
|
Whether to plot the masked points.
|
|
15
101
|
check_headers (bool, optional):
|
|
@@ -42,21 +128,23 @@ def ADC_issues(self, plot=True, check_headers=False):
|
|
|
42
128
|
logger.info(f"there {'are'[:n^1]}{'is'[n^1:]} {n} frame{'s'[:n^1]} "
|
|
43
129
|
"affected by ADC issues")
|
|
44
130
|
|
|
45
|
-
|
|
46
|
-
|
|
131
|
+
if mask:
|
|
132
|
+
self.mask[intersect] = False
|
|
133
|
+
self._propagate_mask_changes()
|
|
47
134
|
|
|
48
|
-
|
|
49
|
-
|
|
135
|
+
if plot:
|
|
136
|
+
self.plot(show_masked=True)
|
|
50
137
|
|
|
51
138
|
return intersect
|
|
52
139
|
|
|
53
140
|
# ESPRESSO cryostat issues
|
|
54
141
|
from .utils import ESPRESSO_cryostat_issues
|
|
55
142
|
|
|
56
|
-
def blue_cryostat_issues(self, plot=True):
|
|
143
|
+
def blue_cryostat_issues(self, mask=True, plot=True):
|
|
57
144
|
""" Identify and mask points affected by blue cryostat issues (ESPRESSO).
|
|
58
145
|
|
|
59
146
|
Args:
|
|
147
|
+
mask (bool, optional): Whether to mask out the points.
|
|
60
148
|
plot (bool, optional): Whether to plot the masked points.
|
|
61
149
|
"""
|
|
62
150
|
instruments = self._check_instrument('ESPRESSO')
|
|
@@ -77,11 +165,12 @@ def blue_cryostat_issues(self, plot=True):
|
|
|
77
165
|
logger.info(f"there {'are'[:n^1]}{'is'[n^1:]} {n} frame{'s'[:n^1]} "
|
|
78
166
|
"affected by blue cryostat issues")
|
|
79
167
|
|
|
80
|
-
|
|
81
|
-
|
|
168
|
+
if mask:
|
|
169
|
+
self.mask[intersect] = False
|
|
170
|
+
self._propagate_mask_changes()
|
|
82
171
|
|
|
83
|
-
|
|
84
|
-
|
|
172
|
+
if plot:
|
|
173
|
+
self.plot(show_masked=True)
|
|
85
174
|
|
|
86
175
|
return intersect
|
|
87
176
|
|
|
@@ -132,20 +221,21 @@ def qc_scired_issues(self, plot=False, **kwargs):
|
|
|
132
221
|
return affected
|
|
133
222
|
|
|
134
223
|
|
|
135
|
-
def known_issues(self, plot=False, **kwargs):
|
|
136
|
-
""" Identify and mask known instrumental issues (ADC and blue cryostat for ESPRESSO)
|
|
224
|
+
def known_issues(self, mask=True, plot=False, **kwargs):
|
|
225
|
+
""" Identify and optionally mask known instrumental issues (ADC and blue cryostat for ESPRESSO)
|
|
137
226
|
|
|
138
227
|
Args:
|
|
228
|
+
mask (bool, optional): Whether to mask out the points.
|
|
139
229
|
plot (bool, optional): Whether to plot the masked points.
|
|
140
230
|
"""
|
|
141
231
|
try:
|
|
142
|
-
adc = ADC_issues(self, plot, **kwargs)
|
|
232
|
+
adc = ADC_issues(self, mask, plot, **kwargs)
|
|
143
233
|
except IndexError as e:
|
|
144
234
|
# logger.error(e)
|
|
145
235
|
logger.error('are the data binned? cannot proceed to mask these points...')
|
|
146
236
|
|
|
147
237
|
try:
|
|
148
|
-
cryostat = blue_cryostat_issues(self, plot)
|
|
238
|
+
cryostat = blue_cryostat_issues(self, mask, plot)
|
|
149
239
|
except IndexError as e:
|
|
150
240
|
# logger.error(e)
|
|
151
241
|
logger.error('are the data binned? cannot proceed to mask these points...')
|
arvi/plots.py
CHANGED
|
@@ -431,12 +431,16 @@ def plot_quantity(self, quantity, ax=None, show_masked=False, instrument=None,
|
|
|
431
431
|
p = p.replace('_', '.')
|
|
432
432
|
label = f'{i}-{p}'
|
|
433
433
|
|
|
434
|
-
y = getattr(s, quantity)
|
|
434
|
+
y = getattr(s, quantity).copy()
|
|
435
435
|
try:
|
|
436
|
-
ye = getattr(s, quantity + '_err')
|
|
436
|
+
ye = getattr(s, quantity + '_err').copy()
|
|
437
437
|
except AttributeError:
|
|
438
438
|
ye = np.zeros_like(y)
|
|
439
439
|
|
|
440
|
+
if (nans := np.isnan(y)).any():
|
|
441
|
+
if self.verbose:
|
|
442
|
+
logger.warning(f'{nans.sum()} NaN values for {inst}')
|
|
443
|
+
ye[nans] = np.nan
|
|
440
444
|
|
|
441
445
|
if np.isnan(y).all() or np.isnan(ye).all():
|
|
442
446
|
lines, *_ = ax.errorbar([], [], [],
|
|
@@ -464,7 +468,7 @@ def plot_quantity(self, quantity, ax=None, show_masked=False, instrument=None,
|
|
|
464
468
|
ax.minorticks_on()
|
|
465
469
|
|
|
466
470
|
ylabel = {
|
|
467
|
-
quantity: quantity,
|
|
471
|
+
quantity.lower(): quantity,
|
|
468
472
|
'fwhm': f'FWHM [{self.units}]',
|
|
469
473
|
'bispan': f'BIS [{self.units}]',
|
|
470
474
|
'rhk': r"$\log$ R'$_{HK}$",
|
|
@@ -485,6 +489,7 @@ def plot_quantity(self, quantity, ax=None, show_masked=False, instrument=None,
|
|
|
485
489
|
|
|
486
490
|
plot_fwhm = partialmethod(plot_quantity, quantity='fwhm')
|
|
487
491
|
plot_bis = partialmethod(plot_quantity, quantity='bispan')
|
|
492
|
+
plot_contrast = partialmethod(plot_quantity, quantity='contrast')
|
|
488
493
|
plot_rhk = partialmethod(plot_quantity, quantity='rhk')
|
|
489
494
|
plot_berv = partialmethod(plot_quantity, quantity='berv')
|
|
490
495
|
|
|
@@ -518,7 +523,7 @@ def gls(self, ax=None, label=None, fap=True, instrument=None, adjust_means=confi
|
|
|
518
523
|
|
|
519
524
|
if not self._did_adjust_means and not adjust_means:
|
|
520
525
|
logger.warning('gls() called before adjusting instrument means, '
|
|
521
|
-
'consider using `adjust_means` argument')
|
|
526
|
+
'consider using the `adjust_means` argument')
|
|
522
527
|
|
|
523
528
|
if instrument is not None:
|
|
524
529
|
strict = kwargs.pop('strict', False)
|
|
@@ -536,11 +541,11 @@ def gls(self, ax=None, label=None, fap=True, instrument=None, adjust_means=confi
|
|
|
536
541
|
if adjust_means and not self._child:
|
|
537
542
|
if self.verbose:
|
|
538
543
|
logger.info('adjusting instrument means before gls')
|
|
539
|
-
means = np.
|
|
544
|
+
means = np.zeros_like(y)
|
|
540
545
|
for i in instrument:
|
|
541
546
|
mask = self.instrument_array[instrument_mask & self.mask] == i
|
|
542
547
|
if len(y[mask]) > 0:
|
|
543
|
-
means
|
|
548
|
+
means = means + wmean(y[mask], e[mask]) * mask
|
|
544
549
|
y = y - means
|
|
545
550
|
|
|
546
551
|
else:
|
|
@@ -551,11 +556,11 @@ def gls(self, ax=None, label=None, fap=True, instrument=None, adjust_means=confi
|
|
|
551
556
|
if adjust_means and not self._child:
|
|
552
557
|
if self.verbose:
|
|
553
558
|
logger.info('adjusting instrument means before gls')
|
|
554
|
-
means = np.
|
|
559
|
+
means = np.zeros_like(y)
|
|
555
560
|
for i in self.instruments:
|
|
556
561
|
mask = self.instrument_array[self.mask] == i
|
|
557
562
|
if len(y[mask]) > 0:
|
|
558
|
-
means
|
|
563
|
+
means = means + wmean(y[mask], e[mask]) * mask
|
|
559
564
|
y = y - means
|
|
560
565
|
|
|
561
566
|
self._gls = gls = LombScargle(t, y, e)
|
|
@@ -623,31 +628,70 @@ def gls(self, ax=None, label=None, fap=True, instrument=None, adjust_means=confi
|
|
|
623
628
|
return fig, ax
|
|
624
629
|
|
|
625
630
|
|
|
626
|
-
@plot_fast
|
|
627
|
-
def gls_quantity(self, quantity, ax=None, fap=True,
|
|
631
|
+
# @plot_fast
|
|
632
|
+
def gls_quantity(self, quantity, ax=None, fap=True, instrument=None,
|
|
633
|
+
adjust_means=True, picker=True, **kwargs):
|
|
634
|
+
|
|
628
635
|
if not hasattr(self, quantity):
|
|
629
|
-
|
|
636
|
+
if self.verbose:
|
|
637
|
+
logger.error(f"cannot find '{quantity}' attribute")
|
|
630
638
|
return
|
|
631
639
|
|
|
632
|
-
if
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
640
|
+
if self.N == 0:
|
|
641
|
+
if self.verbose:
|
|
642
|
+
logger.error('no data to compute gls')
|
|
643
|
+
return
|
|
644
|
+
|
|
645
|
+
if not self._did_adjust_means and not adjust_means:
|
|
646
|
+
logger.warning('gls() called before adjusting instrument means, '
|
|
647
|
+
'consider using the `adjust_means` argument')
|
|
648
|
+
|
|
649
|
+
strict = kwargs.pop('strict', False)
|
|
650
|
+
instrument = self._check_instrument(instrument, strict=strict, log=True)
|
|
651
|
+
if instrument is None:
|
|
652
|
+
return
|
|
653
|
+
|
|
654
|
+
instrument_mask = np.isin(self.instrument_array, instrument)
|
|
655
|
+
final_mask = instrument_mask & self.mask
|
|
636
656
|
|
|
637
|
-
t = self.
|
|
638
|
-
y = getattr(self, quantity)[
|
|
657
|
+
t = self.time[final_mask].copy()
|
|
658
|
+
y = getattr(self, quantity)[final_mask].copy()
|
|
639
659
|
try:
|
|
640
|
-
ye = getattr(self, quantity + '_err')[
|
|
660
|
+
ye = getattr(self, quantity + '_err')[final_mask].copy()
|
|
641
661
|
except AttributeError:
|
|
642
662
|
ye = None
|
|
643
663
|
|
|
644
|
-
if
|
|
664
|
+
if self.verbose:
|
|
665
|
+
logger.info(f'calculating periodogram for instrument {instrument}')
|
|
666
|
+
|
|
667
|
+
nan_mask = np.isnan(y)
|
|
668
|
+
if nan_mask.any():
|
|
645
669
|
if self.verbose:
|
|
646
670
|
logger.warning(f'{quantity} contains NaNs, ignoring them')
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
671
|
+
|
|
672
|
+
if adjust_means and not self._child:
|
|
673
|
+
if self.verbose:
|
|
674
|
+
logger.info('adjusting instrument means before gls')
|
|
675
|
+
means = np.zeros_like(y)
|
|
676
|
+
for i in instrument:
|
|
677
|
+
mask = (self.instrument_array[final_mask] == i) & ~nan_mask
|
|
678
|
+
if len(y[mask]) > 0:
|
|
679
|
+
# print(wmean(y[mask], ye[mask]))
|
|
680
|
+
means += wmean(y[mask], ye[mask]) * mask
|
|
681
|
+
y = y - means
|
|
682
|
+
|
|
683
|
+
t = t[~nan_mask]
|
|
684
|
+
y = y[~nan_mask]
|
|
685
|
+
ye = ye[~nan_mask]
|
|
686
|
+
|
|
687
|
+
if len(y) == 0:
|
|
688
|
+
logger.error('no data left to compute gls after removing NaNs')
|
|
689
|
+
return
|
|
690
|
+
|
|
691
|
+
if ax is None:
|
|
692
|
+
fig, ax = plt.subplots(1, 1, constrained_layout=True)
|
|
693
|
+
else:
|
|
694
|
+
fig = ax.figure
|
|
651
695
|
|
|
652
696
|
gls = LombScargle(t, y, ye)
|
|
653
697
|
freq, power = gls.autopower(maximum_frequency=1.0)
|
arvi/simbad_wrapper.py
CHANGED
|
@@ -108,7 +108,10 @@ class simbad:
|
|
|
108
108
|
for line in f.readlines():
|
|
109
109
|
kobe_id, catname = line.strip().split(',')
|
|
110
110
|
kobe_translate[kobe_id] = catname
|
|
111
|
-
|
|
111
|
+
try:
|
|
112
|
+
self.star = star = kobe_translate[self.star]
|
|
113
|
+
except KeyError:
|
|
114
|
+
raise ValueError(f'simbad query for {star} failed')
|
|
112
115
|
|
|
113
116
|
# oid = run_query(query=OID_QUERY.format(star=star))
|
|
114
117
|
# self.oid = str(oid.split()[-1])
|
arvi/stellar.py
CHANGED
|
@@ -1,22 +1,32 @@
|
|
|
1
|
+
|
|
1
2
|
import numpy as np
|
|
2
3
|
|
|
3
4
|
class prot_age_result:
|
|
4
|
-
prot_n84
|
|
5
|
-
prot_n84_err
|
|
6
|
-
prot_m08
|
|
7
|
-
prot_m08_err
|
|
8
|
-
age_m08
|
|
9
|
-
age_m08_err
|
|
5
|
+
prot_n84 = None #: float | np.ndarray
|
|
6
|
+
prot_n84_err = None #: float | np.ndarray
|
|
7
|
+
prot_m08 = None #: float | np.ndarray
|
|
8
|
+
prot_m08_err = None #: float | np.ndarray
|
|
9
|
+
age_m08 = None #: float | np.ndarray
|
|
10
|
+
age_m08_err = None #: float | np.ndarray
|
|
10
11
|
def __init__(self):
|
|
11
12
|
pass
|
|
12
13
|
def __repr__(self):
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
14
|
+
if isinstance(self.prot_n84, np.ndarray):
|
|
15
|
+
s = f'{self.prot_n84.mean()=:.2f} ± {self.prot_n84_err.mean():.2f}, '
|
|
16
|
+
else:
|
|
17
|
+
s = f'{self.prot_n84=:.2f} ± {self.prot_n84_err:.2f}, '
|
|
18
|
+
if isinstance(self.prot_m08, np.ndarray):
|
|
19
|
+
s += f'{self.prot_m08.mean()=:.2f} ± {self.prot_m08_err.mean():.2f}, '
|
|
20
|
+
else:
|
|
21
|
+
s += f'{self.prot_m08=:.2f} ± {self.prot_m08_err:.2f}, '
|
|
22
|
+
if isinstance(self.age_m08, np.ndarray):
|
|
23
|
+
s += f'{self.age_m08.mean()=:.2f} ± {self.age_m08_err.mean():.2f}'
|
|
24
|
+
else:
|
|
25
|
+
s += f'{self.age_m08=:.2f} ± {self.age_m08_err:.2f}'
|
|
16
26
|
return s.replace('self.', '')
|
|
17
27
|
|
|
18
28
|
|
|
19
|
-
def calc_prot_age(self, bv=None):
|
|
29
|
+
def calc_prot_age(self, bv=None, array=False):
|
|
20
30
|
"""
|
|
21
31
|
Calculate rotation period and age from logR'HK activity level, based on the
|
|
22
32
|
empirical relations of Noyes et al. (1984) and Mamajek & Hillenbrand (2008).
|
|
@@ -46,7 +56,12 @@ def calc_prot_age(self, bv=None):
|
|
|
46
56
|
Range of logR'HK-Prot relation: -5.5 < logR'HK < -4.3
|
|
47
57
|
Range of Mamajek & Hillenbrand (2008) relation for ages: 0.5 < B-V < 0.9
|
|
48
58
|
"""
|
|
49
|
-
|
|
59
|
+
|
|
60
|
+
if array:
|
|
61
|
+
log_rhk = self.rhk[~np.isnan(self.rhk)]
|
|
62
|
+
else:
|
|
63
|
+
log_rhk = np.nanmean(self.rhk[self.mask])
|
|
64
|
+
|
|
50
65
|
if bv is None:
|
|
51
66
|
bv = self.simbad.B - self.simbad.V
|
|
52
67
|
|
|
@@ -60,9 +75,13 @@ def calc_prot_age(self, bv=None):
|
|
|
60
75
|
prot_n84 = 0.324 - 0.400*(5 + log_rhk) - 0.283*(5 + log_rhk)**2 - 1.325*(5 + log_rhk)**3 + tau
|
|
61
76
|
prot_n84 = 10**prot_n84
|
|
62
77
|
prot_n84_err = np.log(10)*0.08*prot_n84
|
|
78
|
+
if array:
|
|
79
|
+
prot_n84_err = np.full_like(log_rhk, prot_n84_err)
|
|
63
80
|
|
|
64
81
|
prot_m08 = (0.808 - 2.966*(log_rhk + 4.52))*10**tau
|
|
65
82
|
prot_m08_err = 4.4*bv*1.7 - 1.7
|
|
83
|
+
if array:
|
|
84
|
+
prot_m08_err = np.full_like(log_rhk, prot_m08_err)
|
|
66
85
|
else:
|
|
67
86
|
prot_n84 = np.nan
|
|
68
87
|
prot_n84_err = np.nan
|
arvi/utils.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import os
|
|
2
|
+
import sys
|
|
2
3
|
import time
|
|
3
4
|
from contextlib import contextmanager
|
|
4
5
|
try:
|
|
@@ -82,6 +83,39 @@ def timer():
|
|
|
82
83
|
logger.debug(f'elapsed time: {end - start:.2f} seconds')
|
|
83
84
|
|
|
84
85
|
|
|
86
|
+
def pretty_print_table(rows, line_between_rows=True, logger=None):
|
|
87
|
+
"""
|
|
88
|
+
Example Output
|
|
89
|
+
┌──────┬─────────────┬────┬───────┐
|
|
90
|
+
│ True │ short │ 77 │ catty │
|
|
91
|
+
├──────┼─────────────┼────┼───────┤
|
|
92
|
+
│ 36 │ long phrase │ 9 │ dog │
|
|
93
|
+
├──────┼─────────────┼────┼───────┤
|
|
94
|
+
│ 8 │ medium │ 3 │ zebra │
|
|
95
|
+
└──────┴─────────────┴────┴───────┘
|
|
96
|
+
"""
|
|
97
|
+
_print = logger.info if logger else print
|
|
98
|
+
|
|
99
|
+
# find the max length of each column
|
|
100
|
+
max_col_lens = list(map(max, zip(*[(len(str(cell)) for cell in row) for row in rows])))
|
|
101
|
+
|
|
102
|
+
# print the table's top border
|
|
103
|
+
_print('┌' + '┬'.join('─' * (n + 2) for n in max_col_lens) + '┐')
|
|
104
|
+
|
|
105
|
+
rows_separator = '├' + '┼'.join('─' * (n + 2) for n in max_col_lens) + '┤'
|
|
106
|
+
|
|
107
|
+
row_fstring = ' │ '.join("{: <%s}" % n for n in max_col_lens)
|
|
108
|
+
|
|
109
|
+
for i, row in enumerate(rows):
|
|
110
|
+
_print('│ ' + row_fstring.format(*map(str, row)) + ' │')
|
|
111
|
+
|
|
112
|
+
if line_between_rows and i < len(rows) - 1:
|
|
113
|
+
_print(rows_separator)
|
|
114
|
+
|
|
115
|
+
# print the table's bottom border
|
|
116
|
+
_print('└' + '┴'.join('─' * (n + 2) for n in max_col_lens) + '┘')
|
|
117
|
+
|
|
118
|
+
|
|
85
119
|
def strtobool(val):
|
|
86
120
|
"""Convert a string representation of truth to true (1) or false (0).
|
|
87
121
|
|
|
@@ -163,4 +197,12 @@ def get_max_berv_span(self, n=None):
|
|
|
163
197
|
inds.append(b2)
|
|
164
198
|
return np.array(inds[:n])
|
|
165
199
|
|
|
200
|
+
def get_object_fast(file):
|
|
201
|
+
with open(file, 'rb') as f:
|
|
202
|
+
f.read(800) # read first 10 keywords
|
|
203
|
+
key = f.read(8)
|
|
204
|
+
assert key == b'OBJECT ', 'Object keyword not found.'
|
|
205
|
+
f.read(2)
|
|
206
|
+
value = f.read(20)
|
|
207
|
+
return value.decode().split("'")[1].strip()
|
|
166
208
|
|
|
@@ -4,32 +4,32 @@ arvi/ariadne_wrapper.py,sha256=jv8Wl35LfHl1UH1EklbvxHcQHqaEDhRGNogSYjFt7Y4,2141
|
|
|
4
4
|
arvi/berv.py,sha256=5avwcmc2nkYH1KDo-z4eMPsS1ElNvold2DWkziV13gE,17633
|
|
5
5
|
arvi/binning.py,sha256=jbemJ-bM3aqoOsqMo_OhWt_co-JAQ0nhdG_GpTsrRsw,15403
|
|
6
6
|
arvi/config.py,sha256=wyj6FTxN7QOZj8LaHAe_ZdPKVfrNfobNNOHRNLH-S7Q,339
|
|
7
|
-
arvi/dace_wrapper.py,sha256=
|
|
7
|
+
arvi/dace_wrapper.py,sha256=YZ7V_l--weBnsguGEBq3tLTbHbCdKlJUrWmtV6hk8XU,18165
|
|
8
8
|
arvi/extra_data.py,sha256=WEEaYeLh52Zdv0uyHO72Ys5MWS3naTAP4wJV2BJ1mbk,2551
|
|
9
|
-
arvi/gaia_wrapper.py,sha256=
|
|
9
|
+
arvi/gaia_wrapper.py,sha256=R3wHoMuuuQFp9Wa6Ee8dxjffd21xZsns7vNmUrjVogE,4494
|
|
10
10
|
arvi/headers.py,sha256=uvdJebw1M5YkGjE3vJJwYBOnLikib75uuZE9FXB5JJM,1673
|
|
11
|
-
arvi/instrument_specific.py,sha256=
|
|
11
|
+
arvi/instrument_specific.py,sha256=BCRhM74nCIiPfETEZW94zPHTymKnOExPcQ5wPjiblYU,7774
|
|
12
12
|
arvi/lbl_wrapper.py,sha256=_ViGVkpakvuBR_xhu9XJRV5EKHpj5Go6jBZGJZMIS2Y,11850
|
|
13
13
|
arvi/nasaexo_wrapper.py,sha256=mWt7eHgSZe4MBKCmUvMPTyUPGuiwGTqKugNBvmjOg9s,7306
|
|
14
|
-
arvi/plots.py,sha256=
|
|
14
|
+
arvi/plots.py,sha256=WUm-sqN0aZTNXvE1kYpvmHTW9QPWqSCpKhNjwaqxjEk,29628
|
|
15
15
|
arvi/programs.py,sha256=C0Fbldjf-QEZYYJp5wBKP3h7zraD0O2mJC7Su967STg,4607
|
|
16
16
|
arvi/reports.py,sha256=8HiwWdaOh_P2V4-F6PV4TjasZLeZ8kC5dUYB1tQam1o,3368
|
|
17
17
|
arvi/setup_logger.py,sha256=pBzaRTn0hntozjbaRVx0JIbWGuENkvYUApa6uB-FsRo,279
|
|
18
|
-
arvi/simbad_wrapper.py,sha256=
|
|
18
|
+
arvi/simbad_wrapper.py,sha256=d-LX_zBEwcBGo4b5SkzRDb6TanV6n-Tg1OHTcoeRAjA,5709
|
|
19
19
|
arvi/spectra.py,sha256=pTAWSW4vk96DWRQ-6l5mNJHUhiAyaPR-QDjZdOT6Ak0,7489
|
|
20
20
|
arvi/stats.py,sha256=ilzzGL9ew-SyVa9eEdrYCpD3DliOAwhoNUg9LIlHjzU,2583
|
|
21
|
-
arvi/stellar.py,sha256=
|
|
21
|
+
arvi/stellar.py,sha256=veuL_y9kJvvApU_jqYQqP3EkcRnQffTc8Us6iT5UrFI,3790
|
|
22
22
|
arvi/timeseries.py,sha256=sdm9l_LHItZHDAVI6r_wsQwkrfbuMX46q3Wqs0y-KGA,60787
|
|
23
23
|
arvi/translations.py,sha256=SUIrJHt3JZdL_GQh3OJyg2Gm3X5ut86w5zW8hZpxHe0,498
|
|
24
|
-
arvi/utils.py,sha256=
|
|
24
|
+
arvi/utils.py,sha256=DP8ryH7RZCWmV99fUJ2zImyiWn-mHbkbd-hhCHYRsFc,6145
|
|
25
25
|
arvi/data/info.svg,sha256=0IMI6W-eFoTD8acnury79WJJakpBwLa4qKS4JWpsXiI,489
|
|
26
26
|
arvi/data/obs_affected_ADC_issues.dat,sha256=tn93uOL0eCTYhireqp1wG-_c3CbxPA7C-Rf-pejVY8M,10853
|
|
27
27
|
arvi/data/obs_affected_blue_cryostat_issues.dat,sha256=z4AK17xfz8tGTDv1FjRvQFnio4XA6PNNfDXuicewHk4,1771
|
|
28
28
|
arvi/data/extra/HD86226_PFS1.rdb,sha256=vfAozbrKHM_j8dYkCBJsuHyD01KEM1asghe2KInwVao,3475
|
|
29
29
|
arvi/data/extra/HD86226_PFS2.rdb,sha256=F2P7dB6gVyzCglUjNheB0hIHVClC5RmARrGwbrY1cfo,4114
|
|
30
30
|
arvi/data/extra/metadata.json,sha256=C69hIw6CohyES6BI9vDWjxwSz7N4VOYX0PCgjXtYFmU,178
|
|
31
|
-
arvi-0.1.
|
|
32
|
-
arvi-0.1.
|
|
33
|
-
arvi-0.1.
|
|
34
|
-
arvi-0.1.
|
|
35
|
-
arvi-0.1.
|
|
31
|
+
arvi-0.1.18.dist-info/LICENSE,sha256=6JfQgl7SpM55t0EHMFNMnNh-AdkpGW25MwMiTnhdWQg,1068
|
|
32
|
+
arvi-0.1.18.dist-info/METADATA,sha256=KMcNbj2gpWbTJgfLTi6QcnrVhUU3v_ir7v_vx1Q1XFs,1250
|
|
33
|
+
arvi-0.1.18.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
34
|
+
arvi-0.1.18.dist-info/top_level.txt,sha256=4EeiKDVLD45ztuflTGfQ3TU8GVjJg5Y95xS5XjI-utU,5
|
|
35
|
+
arvi-0.1.18.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|