pz-rail-astro-tools 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pz-rail-astro-tools might be problematic. Click here for more details.
- pz_rail_astro_tools-0.0.1.dist-info/LICENSE +21 -0
- pz_rail_astro_tools-0.0.1.dist-info/METADATA +83 -0
- pz_rail_astro_tools-0.0.1.dist-info/RECORD +17 -0
- pz_rail_astro_tools-0.0.1.dist-info/WHEEL +5 -0
- pz_rail_astro_tools-0.0.1.dist-info/top_level.txt +1 -0
- rail/astro_tools/__init__.py +7 -0
- rail/astro_tools/_version.py +4 -0
- rail/creation/degradation/grid_selection.py +212 -0
- rail/creation/degradation/observing_condition_degrader.py +405 -0
- rail/creation/degradation/spectroscopic_degraders.py +139 -0
- rail/creation/degradation/spectroscopic_selections.py +617 -0
- rail/examples_data/creation_data/data/hsc_ratios_and_specz.hdf5 +0 -0
- rail/examples_data/creation_data/data/survey_conditions/DC2-dr6-galcounts-i20-i25.3-nside-128.fits +0 -0
- rail/examples_data/creation_data/data/survey_conditions/DC2-mask-neg-nside-128.fits +0 -0
- rail/examples_data/creation_data/data/survey_conditions/minion_1016_dc2_Median_airmass_i_and_nightlt1825_HEAL.fits +0 -0
- rail/examples_data/creation_data/data/survey_conditions/minion_1016_dc2_Median_fiveSigmaDepth_i_and_nightlt1825_HEAL.fits +0 -0
- rail/tools/utilPhotometry.py +488 -0
|
@@ -0,0 +1,488 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Module that implements operations on photometric data such as magnitudes and fluxes.
|
|
3
|
+
"""
|
|
4
|
+
from abc import ABC, abstractmethod
|
|
5
|
+
|
|
6
|
+
import os
|
|
7
|
+
import numpy as np
|
|
8
|
+
import pandas as pd
|
|
9
|
+
import tables_io
|
|
10
|
+
from astropy.coordinates import SkyCoord
|
|
11
|
+
|
|
12
|
+
from ceci.config import StageParameter as Param
|
|
13
|
+
from rail.core.data import PqHandle
|
|
14
|
+
from rail.core.stage import RailStage
|
|
15
|
+
from rail.core.data import PqHandle, Hdf5Handle
|
|
16
|
+
|
|
17
|
+
import hyperbolic # https://github.com/jlvdb/hyperbolic
|
|
18
|
+
|
|
19
|
+
dustmaps_config = tables_io.lazy_modules.lazyImport('dustmaps.config')
|
|
20
|
+
dustmaps_sfd = tables_io.lazy_modules.lazyImport('dustmaps.sfd')
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
# default column names in DC2
|
|
26
|
+
LSST_BANDS = 'ugrizy'
|
|
27
|
+
DEFAULT_MAG_COLS = [f"mag_{band}_lsst" for band in LSST_BANDS]
|
|
28
|
+
DEFAULT_MAGERR_COLS = [f"mag_err_{band}_lsst" for band in LSST_BANDS]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _compute_flux(magnitude, zeropoint):
|
|
32
|
+
"""
|
|
33
|
+
Compute the flux corresponding to a given magnitude and photometric zeropoint.
|
|
34
|
+
|
|
35
|
+
Parameters
|
|
36
|
+
----------
|
|
37
|
+
magnitude : array-like
|
|
38
|
+
Magnitude or array of magnitudes.
|
|
39
|
+
zeropoint : array-like
|
|
40
|
+
Photometric zeropoint used for conversion.
|
|
41
|
+
|
|
42
|
+
Returns
|
|
43
|
+
-------
|
|
44
|
+
flux : array-like
|
|
45
|
+
Flux value(s).
|
|
46
|
+
"""
|
|
47
|
+
flux = np.exp((zeropoint - magnitude) / hyperbolic.pogson)
|
|
48
|
+
return flux
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _compute_flux_error(flux, magnitude_error):
|
|
52
|
+
"""
|
|
53
|
+
Compute the flux error corresponding to a given flux and magnitude error.
|
|
54
|
+
|
|
55
|
+
Parameters
|
|
56
|
+
----------
|
|
57
|
+
flux : array-like
|
|
58
|
+
Flux or array of fluxes.
|
|
59
|
+
magnitude_error : array-like
|
|
60
|
+
Magnitude error or array of magnitude errors.
|
|
61
|
+
|
|
62
|
+
Returns
|
|
63
|
+
-------
|
|
64
|
+
flux_error : array-like
|
|
65
|
+
Flux error value(s).
|
|
66
|
+
"""
|
|
67
|
+
flux_error = magnitude_error / hyperbolic.pogson * flux
|
|
68
|
+
return flux_error
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class PhotormetryManipulator(RailStage, ABC):
|
|
72
|
+
"""
|
|
73
|
+
Base class to perform opertations on magnitudes. A table with input magnitudes and errors is
|
|
74
|
+
processed and transformed into an output table with new magnitudes and errors.
|
|
75
|
+
|
|
76
|
+
Subclasses must implement the run() and compute() method.
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
name = 'PhotormetryManipulator'
|
|
80
|
+
config_options = RailStage.config_options.copy()
|
|
81
|
+
config_options.update(
|
|
82
|
+
value_columns=Param(
|
|
83
|
+
list, default=DEFAULT_MAG_COLS,
|
|
84
|
+
msg="list of columns that prove photometric measurements (fluxes or magnitudes)"),
|
|
85
|
+
error_columns=Param(
|
|
86
|
+
list, default=DEFAULT_MAGERR_COLS,
|
|
87
|
+
msg="list of columns with errors corresponding to value_columns "
|
|
88
|
+
"(assuming same ordering)"),
|
|
89
|
+
zeropoints=Param(
|
|
90
|
+
list, default=[], required=False,
|
|
91
|
+
msg="optional list of magnitude zeropoints for value_columns "
|
|
92
|
+
"(assuming same ordering, defaults to 0.0)"),
|
|
93
|
+
is_flux=Param(
|
|
94
|
+
bool, default=False,
|
|
95
|
+
msg="whether the provided quantities are fluxes or magnitudes"))
|
|
96
|
+
inputs = [('input', PqHandle)]
|
|
97
|
+
outputs = [('output', PqHandle)]
|
|
98
|
+
|
|
99
|
+
def __init__(self, args, comm=None):
|
|
100
|
+
super().__init__(args, comm)
|
|
101
|
+
self._check_config()
|
|
102
|
+
# convenience remapping of parameters
|
|
103
|
+
self.value_columns = self.config.value_columns
|
|
104
|
+
self.error_columns = self.config.error_columns
|
|
105
|
+
self.zeropoints = self.config.zeropoints
|
|
106
|
+
self.n_col = len(self.value_columns)
|
|
107
|
+
|
|
108
|
+
def _check_config(self):
|
|
109
|
+
# compare column definitions
|
|
110
|
+
n_mag = len(self.config.value_columns)
|
|
111
|
+
n_err = len(self.config.error_columns)
|
|
112
|
+
n_zpt = len(self.config.zeropoints)
|
|
113
|
+
if n_mag != n_err:
|
|
114
|
+
raise IndexError(
|
|
115
|
+
f"number of magnitude and error columns do not match ({n_mag} != {n_err})")
|
|
116
|
+
# check and zeropoints or parse default value
|
|
117
|
+
if n_zpt == 0:
|
|
118
|
+
self.config.zeropoints = [0.0] * n_mag
|
|
119
|
+
elif n_zpt != n_mag:
|
|
120
|
+
raise IndexError(
|
|
121
|
+
f"number of zeropoints and magnitude columns do not match ({n_zpt} != {n_mag})")
|
|
122
|
+
|
|
123
|
+
def get_as_fluxes(self):
|
|
124
|
+
"""
|
|
125
|
+
Loads specified photometric data as fluxes, converting magnitudes on the fly.
|
|
126
|
+
"""
|
|
127
|
+
input_data = self.get_data('input', allow_missing=True)
|
|
128
|
+
if self.config.is_flux:
|
|
129
|
+
data = input_data[self.value_columns + self.error_columns]
|
|
130
|
+
else:
|
|
131
|
+
data = pd.DataFrame()
|
|
132
|
+
# convert magnitudes to fluxes
|
|
133
|
+
for val_name, zeropoint in zip(self.value_columns, self.zeropoints):
|
|
134
|
+
data[val_name] = _compute_flux(
|
|
135
|
+
input_data[val_name],
|
|
136
|
+
zeropoint=zeropoint)
|
|
137
|
+
# compute flux errors from magnitude errors
|
|
138
|
+
for val_name, err_name in zip(self.value_columns, self.error_columns):
|
|
139
|
+
data[err_name] = _compute_flux_error(
|
|
140
|
+
data[val_name],
|
|
141
|
+
input_data[err_name])
|
|
142
|
+
return data
|
|
143
|
+
|
|
144
|
+
@abstractmethod
|
|
145
|
+
def run(self): # pragma: no cover
|
|
146
|
+
"""
|
|
147
|
+
Implements the operation performed on the photometric data.
|
|
148
|
+
"""
|
|
149
|
+
data = self.get_as_fluxes()
|
|
150
|
+
# do work
|
|
151
|
+
self.add_data('output', data)
|
|
152
|
+
|
|
153
|
+
@abstractmethod
|
|
154
|
+
def compute(self, data): # pragma: no cover
|
|
155
|
+
"""
|
|
156
|
+
Main method to call.
|
|
157
|
+
|
|
158
|
+
Parameters
|
|
159
|
+
----------
|
|
160
|
+
data : `PqHandle`
|
|
161
|
+
Input tabular data with column names as defined in the configuration.
|
|
162
|
+
|
|
163
|
+
Returns
|
|
164
|
+
-------
|
|
165
|
+
output: `PqHandle`
|
|
166
|
+
Output tabular data.
|
|
167
|
+
"""
|
|
168
|
+
self.set_data('input', data)
|
|
169
|
+
self.run()
|
|
170
|
+
self.finalize()
|
|
171
|
+
return self.get_handle('output')
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
class HyperbolicSmoothing(PhotormetryManipulator):
|
|
175
|
+
"""
|
|
176
|
+
Initial stage to compute hyperbolic magnitudes (Lupton et al. 1999). Estimates the smoothing
|
|
177
|
+
parameter b that is used by the second stage (`HyperbolicMagnitudes`) to convert classical to
|
|
178
|
+
hyperbolic magnitudes.
|
|
179
|
+
"""
|
|
180
|
+
|
|
181
|
+
name = 'HyperbolicSmoothing'
|
|
182
|
+
config_options = PhotormetryManipulator.config_options.copy()
|
|
183
|
+
inputs = [('input', PqHandle)]
|
|
184
|
+
outputs = [('parameters', PqHandle)]
|
|
185
|
+
|
|
186
|
+
def run(self):
|
|
187
|
+
"""
|
|
188
|
+
Computes the smoothing parameter b (see Lupton et al. 1999) per photometric band.
|
|
189
|
+
"""
|
|
190
|
+
# get input data
|
|
191
|
+
data = self.get_as_fluxes()
|
|
192
|
+
fields = np.zeros(len(data), dtype=int) # placeholder
|
|
193
|
+
|
|
194
|
+
# compute the optimal smoothing factor b for each photometric band
|
|
195
|
+
stats = []
|
|
196
|
+
for fx_col, fxerr_col, zeropoint in zip(
|
|
197
|
+
self.value_columns, self.error_columns, self.zeropoints):
|
|
198
|
+
|
|
199
|
+
# compute the median flux error and zeropoint
|
|
200
|
+
stats_filt = hyperbolic.compute_flux_stats(
|
|
201
|
+
data[fx_col], data[fxerr_col], fields, zeropoint=zeropoint)
|
|
202
|
+
# compute the smoothing parameter b (in normalised flux)
|
|
203
|
+
stats_filt[hyperbolic.Keys.b] = hyperbolic.estimate_b(
|
|
204
|
+
stats_filt[hyperbolic.Keys.zp],
|
|
205
|
+
stats_filt[hyperbolic.Keys.flux_err])
|
|
206
|
+
# compute the smoothing parameter b (in absolute flux)
|
|
207
|
+
stats_filt[hyperbolic.Keys.b_abs] = (
|
|
208
|
+
stats_filt[hyperbolic.Keys.ref_flux] *
|
|
209
|
+
stats_filt[hyperbolic.Keys.b])
|
|
210
|
+
|
|
211
|
+
# collect results
|
|
212
|
+
stats_filt[hyperbolic.Keys.filter] = fx_col
|
|
213
|
+
stats_filt = stats_filt.reset_index().set_index([
|
|
214
|
+
hyperbolic.Keys.filter,
|
|
215
|
+
hyperbolic.Keys.field])
|
|
216
|
+
stats.append(stats_filt)
|
|
217
|
+
|
|
218
|
+
# store resulting smoothing parameters for next stage
|
|
219
|
+
self.add_data('parameters', pd.concat(stats))
|
|
220
|
+
|
|
221
|
+
def compute(self, data):
|
|
222
|
+
"""
|
|
223
|
+
Main method to call. Computes the set of smoothing parameters (b) for an input catalogue
|
|
224
|
+
with classical photometry and their respective errors. These parameters are required by the
|
|
225
|
+
follow-up stage `HyperbolicMagnitudes` and are parsed as tabular data.
|
|
226
|
+
|
|
227
|
+
Parameters
|
|
228
|
+
----------
|
|
229
|
+
data : `PqHandle`
|
|
230
|
+
Input table with magnitude and magnitude error columns as defined in the configuration.
|
|
231
|
+
|
|
232
|
+
Returns
|
|
233
|
+
-------
|
|
234
|
+
parameters : `PqHandle`
|
|
235
|
+
Table with smoothing parameters per photometric band and additional meta data.
|
|
236
|
+
"""
|
|
237
|
+
self.set_data('input', data)
|
|
238
|
+
self.run()
|
|
239
|
+
self.finalize()
|
|
240
|
+
return self.get_handle('parameters')
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
class HyperbolicMagnitudes(PhotormetryManipulator):
|
|
244
|
+
"""
|
|
245
|
+
Convert a set of classical magnitudes to hyperbolic magnitudes (Lupton et al. 1999). Requires
|
|
246
|
+
input from the initial stage (`HyperbolicSmoothing`) to supply optimal values for the smoothing
|
|
247
|
+
parameters (b).
|
|
248
|
+
"""
|
|
249
|
+
|
|
250
|
+
name = 'HyperbolicMagnitudes'
|
|
251
|
+
config_options = PhotormetryManipulator.config_options.copy()
|
|
252
|
+
inputs = [('input', PqHandle),
|
|
253
|
+
('parameters', PqHandle)]
|
|
254
|
+
outputs = [('output', PqHandle)]
|
|
255
|
+
|
|
256
|
+
def _check_filters(self, stats_table):
|
|
257
|
+
"""
|
|
258
|
+
Check whether the column definition matches the loaded smoothing parameters.
|
|
259
|
+
|
|
260
|
+
Parameters:
|
|
261
|
+
-----------
|
|
262
|
+
stats_table : `pd.DataFrame`
|
|
263
|
+
Data table that contains smoothing parameters per photometric band (from
|
|
264
|
+
`HyperbolicSmoothing`).
|
|
265
|
+
|
|
266
|
+
Raises
|
|
267
|
+
------
|
|
268
|
+
KeyError : Filter defined in magnitude_columns is not found in smoothing parameter table.
|
|
269
|
+
"""
|
|
270
|
+
# filters in the parameter table
|
|
271
|
+
param_filters = set(stats_table.reset_index()[hyperbolic.Keys.filter])
|
|
272
|
+
# filters parsed through configuration
|
|
273
|
+
config_filters = set(self.value_columns)
|
|
274
|
+
# check if the filters match
|
|
275
|
+
filter_diff = config_filters - param_filters
|
|
276
|
+
if len(filter_diff) != 0:
|
|
277
|
+
strdiff = ", ".join(sorted(filter_diff))
|
|
278
|
+
raise KeyError(f"parameter table contains no smoothing parameters for: {strdiff}")
|
|
279
|
+
|
|
280
|
+
def run(self):
|
|
281
|
+
"""
|
|
282
|
+
Compute hyperbolic magnitudes and their error based on the parameters determined by
|
|
283
|
+
`HyperbolicSmoothing`.
|
|
284
|
+
"""
|
|
285
|
+
# get input data
|
|
286
|
+
data = self.get_as_fluxes()
|
|
287
|
+
stats = self.get_data('parameters', allow_missing=True)
|
|
288
|
+
self._check_filters(stats)
|
|
289
|
+
fields = np.zeros(len(data), dtype=int) # placeholder for variable field/pointing depth
|
|
290
|
+
|
|
291
|
+
# intialise the output data
|
|
292
|
+
output = pd.DataFrame(index=data.index) # allows joining on input
|
|
293
|
+
|
|
294
|
+
# compute smoothing parameter b
|
|
295
|
+
b = stats[hyperbolic.Keys.b].groupby( # median flux error in each filter
|
|
296
|
+
hyperbolic.Keys.filter).agg(np.nanmedian)
|
|
297
|
+
b = b.to_dict()
|
|
298
|
+
|
|
299
|
+
# hyperbolic magnitudes
|
|
300
|
+
for val_col, err_col in zip(self.value_columns, self.error_columns):
|
|
301
|
+
# get the smoothing parameters
|
|
302
|
+
stats_filt = hyperbolic.fill_missing_stats(stats.loc[val_col])
|
|
303
|
+
|
|
304
|
+
# map reference flux from fields/pointings to sources
|
|
305
|
+
ref_flux_per_source = hyperbolic.fields_to_source(
|
|
306
|
+
stats_filt[hyperbolic.Keys.ref_flux], fields, index=data.index)
|
|
307
|
+
norm_flux = data[val_col] / ref_flux_per_source
|
|
308
|
+
norm_flux_err = data[err_col] / ref_flux_per_source
|
|
309
|
+
|
|
310
|
+
# compute the hyperbolic magnitudes
|
|
311
|
+
hyp_mag = hyperbolic.compute_magnitude(
|
|
312
|
+
norm_flux, b[val_col])
|
|
313
|
+
hyp_mag_err = hyperbolic.compute_magnitude_error(
|
|
314
|
+
norm_flux, b[val_col], norm_flux_err)
|
|
315
|
+
|
|
316
|
+
# add data to catalogue
|
|
317
|
+
key_mag = val_col.replace("mag_", "mag_hyp_")
|
|
318
|
+
key_mag_err = err_col.replace("mag_", "mag_hyp_")
|
|
319
|
+
output[key_mag] = hyp_mag
|
|
320
|
+
output[key_mag_err] = hyp_mag_err
|
|
321
|
+
|
|
322
|
+
# store results
|
|
323
|
+
self.add_data('output', output)
|
|
324
|
+
|
|
325
|
+
def compute(self, data, parameters):
|
|
326
|
+
"""
|
|
327
|
+
Main method to call. Outputs hyperbolic magnitudes compuated from a set of smoothing
|
|
328
|
+
parameters and input catalogue with classical magitudes and their respective errors.
|
|
329
|
+
|
|
330
|
+
Parameters
|
|
331
|
+
----------
|
|
332
|
+
data : `PqHandle`
|
|
333
|
+
Input table with photometry (magnitudes or flux columns and their respective
|
|
334
|
+
uncertainties) as defined by the configuration.
|
|
335
|
+
parameters : `PqHandle`
|
|
336
|
+
Table with smoothing parameters per photometric band, determined by
|
|
337
|
+
`HyperbolicSmoothing`.
|
|
338
|
+
|
|
339
|
+
Returns
|
|
340
|
+
-------
|
|
341
|
+
output: `PqHandle`
|
|
342
|
+
Output table containting hyperbolic magnitudes and their uncertainties. If the columns
|
|
343
|
+
in the input table contain a prefix `mag_`, this output tabel will replace the prefix
|
|
344
|
+
with `hyp_mag_`, otherwise the column names will be identical to the input table.
|
|
345
|
+
"""
|
|
346
|
+
self.set_data('input', data)
|
|
347
|
+
self.set_data('parameters', parameters)
|
|
348
|
+
self.run()
|
|
349
|
+
self.finalize()
|
|
350
|
+
return self.get_handle('output')
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
class LSSTFluxToMagConverter(RailStage):
|
|
354
|
+
"""Utility stage that converts from fluxes to magnitudes
|
|
355
|
+
|
|
356
|
+
Note, this is hardwired to take parquet files as input
|
|
357
|
+
and provide hdf5 files as output
|
|
358
|
+
"""
|
|
359
|
+
name = 'LSSTFluxToMagConverter'
|
|
360
|
+
|
|
361
|
+
config_options = RailStage.config_options.copy()
|
|
362
|
+
config_options.update(bands='ugrizy')
|
|
363
|
+
config_options.update(flux_name="{band}_gaap1p0Flux")
|
|
364
|
+
config_options.update(flux_err_name="{band}_gaap1p0FluxErr")
|
|
365
|
+
config_options.update(mag_name="mag_{band}_lsst")
|
|
366
|
+
config_options.update(mag_err_name="mag_err_{band}_lsst")
|
|
367
|
+
config_options.update(copy_cols={})
|
|
368
|
+
config_options.update(mag_offset=31.4)
|
|
369
|
+
|
|
370
|
+
mag_conv = np.log(10)*0.4
|
|
371
|
+
|
|
372
|
+
inputs = [('input', PqHandle)]
|
|
373
|
+
outputs = [('output', Hdf5Handle)]
|
|
374
|
+
|
|
375
|
+
def __init__(self, args, comm=None):
|
|
376
|
+
RailStage.__init__(self, args, comm=comm)
|
|
377
|
+
|
|
378
|
+
def _flux_to_mag(self, flux_vals):
|
|
379
|
+
return -2.5*np.log10(flux_vals) + self.config.mag_offset
|
|
380
|
+
|
|
381
|
+
def _flux_err_to_mag_err(self, flux_vals, flux_err_vals):
|
|
382
|
+
return flux_err_vals / (flux_vals*self.mag_conv)
|
|
383
|
+
|
|
384
|
+
def run(self):
|
|
385
|
+
data = self.get_data('input', allow_missing=True)
|
|
386
|
+
out_data = {}
|
|
387
|
+
const = np.log(10.)*0.4
|
|
388
|
+
for band_ in self.config.bands:
|
|
389
|
+
flux_col_name = self.config.flux_name.format(band=band_)
|
|
390
|
+
flux_err_col_name = self.config.flux_err_name.format(band=band_)
|
|
391
|
+
out_data[self.config.mag_name.format(band=band_)] = self._flux_to_mag(data[flux_col_name].values)
|
|
392
|
+
out_data[self.config.mag_err_name.format(band=band_)] = self._flux_err_to_mag_err(data[flux_col_name].values, data[flux_err_col_name].values)
|
|
393
|
+
for key, val in self.config.copy_cols.items(): # pragma: no cover
|
|
394
|
+
out_data[key] = data[val].values
|
|
395
|
+
self.add_data('output', out_data)
|
|
396
|
+
|
|
397
|
+
def __call__(self, data):
|
|
398
|
+
"""Return a converted table
|
|
399
|
+
|
|
400
|
+
Parameters
|
|
401
|
+
----------
|
|
402
|
+
data : table-like
|
|
403
|
+
The data to be converted
|
|
404
|
+
|
|
405
|
+
Returns
|
|
406
|
+
-------
|
|
407
|
+
out_data : table-like
|
|
408
|
+
The converted version of the table
|
|
409
|
+
"""
|
|
410
|
+
self.set_data('input', data)
|
|
411
|
+
self.run()
|
|
412
|
+
return self.get_handle('output')
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
class Dereddener(RailStage):
|
|
416
|
+
"""Utility stage that does dereddening
|
|
417
|
+
|
|
418
|
+
"""
|
|
419
|
+
name = 'Dereddener'
|
|
420
|
+
|
|
421
|
+
config_options = RailStage.config_options.copy()
|
|
422
|
+
config_options.update(bands='ugrizy')
|
|
423
|
+
config_options.update(mag_name="mag_{band}_lsst")
|
|
424
|
+
config_options.update(band_a_env=[4.81,3.64,2.70,2.06,1.58,1.31])
|
|
425
|
+
config_options.update(dustmap_name='sfd')
|
|
426
|
+
config_options.update(dustmap_dir=str)
|
|
427
|
+
config_options.update(copy_cols=[])
|
|
428
|
+
|
|
429
|
+
inputs = [('input', Hdf5Handle)]
|
|
430
|
+
outputs = [('output', Hdf5Handle)]
|
|
431
|
+
|
|
432
|
+
def fetch_map(self):
|
|
433
|
+
dust_map_dict = dict(sfd=dustmaps_sfd)
|
|
434
|
+
try:
|
|
435
|
+
dust_map_submod = dust_map_dict[self.config.dustmap_name]
|
|
436
|
+
except KeyError as msg: # pragma: no cover
|
|
437
|
+
raise KeyError(f"Unknown dustmap {self.config.dustmap_name}, options are {list(dust_map_dict.keys())}") from msg
|
|
438
|
+
|
|
439
|
+
if os.path.exists(os.path.join(self.config.dustmap_dir, self.config.dustmap_name)): # pragma: no cover
|
|
440
|
+
# already downloaded, return
|
|
441
|
+
return
|
|
442
|
+
|
|
443
|
+
dust_map_config = dustmaps_config.config
|
|
444
|
+
dust_map_config['data_dir'] = self.config.dustmap_dir
|
|
445
|
+
fetch_func = dust_map_submod.fetch
|
|
446
|
+
fetch_func()
|
|
447
|
+
|
|
448
|
+
|
|
449
|
+
def __init__(self, args, comm=None):
|
|
450
|
+
RailStage.__init__(self, args, comm=comm)
|
|
451
|
+
|
|
452
|
+
def run(self):
|
|
453
|
+
data = self.get_data('input', allow_missing=True)
|
|
454
|
+
out_data = {}
|
|
455
|
+
coords = SkyCoord(data['ra'], data['decl'], unit = 'deg',frame='fk5')
|
|
456
|
+
dust_map_dict = dict(sfd=dustmaps_sfd.SFDQuery)
|
|
457
|
+
try:
|
|
458
|
+
dust_map_class = dust_map_dict[self.config.dustmap_name]
|
|
459
|
+
dust_map_config = dustmaps_config.config
|
|
460
|
+
dust_map_config['data_dir'] = self.config.dustmap_dir
|
|
461
|
+
dust_map = dust_map_class()
|
|
462
|
+
except KeyError as msg: # pragma: no cover
|
|
463
|
+
raise KeyError(f"Unknown dustmap {self.config.dustmap_name}, options are {list(dust_map_dict.keys())}") from msg
|
|
464
|
+
ebvvec = dust_map(coords)
|
|
465
|
+
for i, band_ in enumerate(self.config.bands):
|
|
466
|
+
band_mag_name = self.config.mag_name.format(band=band_)
|
|
467
|
+
mag_vals = data[band_mag_name]
|
|
468
|
+
out_data[band_mag_name] = mag_vals - ebvvec*self.config.band_a_env[i]
|
|
469
|
+
for col_ in self.config.copy_cols: # pragma: no cover
|
|
470
|
+
out_data[col_] = data[col_]
|
|
471
|
+
self.add_data('output', out_data)
|
|
472
|
+
|
|
473
|
+
def __call__(self, data):
|
|
474
|
+
"""Return a converted table
|
|
475
|
+
|
|
476
|
+
Parameters
|
|
477
|
+
----------
|
|
478
|
+
data : table-like
|
|
479
|
+
The data to be converted
|
|
480
|
+
|
|
481
|
+
Returns
|
|
482
|
+
-------
|
|
483
|
+
out_data : table-like
|
|
484
|
+
The converted version of the table
|
|
485
|
+
"""
|
|
486
|
+
self.set_data('input', data)
|
|
487
|
+
self.run()
|
|
488
|
+
return self.get_handle('output')
|