astro-otter 0.3.3__py3-none-any.whl → 0.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of astro-otter might be problematic. Click here for more details.
- {astro_otter-0.3.3.dist-info → astro_otter-0.3.5.dist-info}/METADATA +1 -1
- astro_otter-0.3.5.dist-info/RECORD +18 -0
- otter/_version.py +1 -1
- otter/io/data_finder.py +10 -2
- otter/io/otter.py +275 -50
- otter/io/transient.py +208 -2
- otter/plotter/otter_plotter.py +1 -1
- otter/plotter/plotter.py +1 -9
- otter/schema.py +29 -19
- otter/util.py +14 -0
- astro_otter-0.3.3.dist-info/RECORD +0 -18
- {astro_otter-0.3.3.dist-info → astro_otter-0.3.5.dist-info}/WHEEL +0 -0
- {astro_otter-0.3.3.dist-info → astro_otter-0.3.5.dist-info}/licenses/LICENSE +0 -0
- {astro_otter-0.3.3.dist-info → astro_otter-0.3.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
astro_otter-0.3.5.dist-info/licenses/LICENSE,sha256=s9IPE8A3CAMEaZpDhj4eaorpmfLYGB0mIGphq301PUY,1067
|
|
2
|
+
otter/__init__.py,sha256=pvX-TN7nLVmvKpkDi89Zxe-jMfHNiVMD3zsd_bPEK9Y,535
|
|
3
|
+
otter/_version.py,sha256=5LVB9FiBWDmYEmn6fLO_jW-OA3WAt_uN2xs5Dc4KpNY,76
|
|
4
|
+
otter/exceptions.py,sha256=3lQF4AXVTfs9VRsVePQoIrXnramsPZbUL5crvf1s9Ng,1702
|
|
5
|
+
otter/schema.py,sha256=J-iI_kEEd0aHN_Hr49HFEa8W07enh1FSDbU99NwIz3Y,11240
|
|
6
|
+
otter/util.py,sha256=G5M2PdtPGKpRXCqkItyXMLj6h5F3EboBBDKWMViw04k,23333
|
|
7
|
+
otter/io/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
|
+
otter/io/data_finder.py,sha256=M6kzqMyy-yhwQfxbOs2E7i_8bCcZFW4Zyf8z-6LCxNQ,38425
|
|
9
|
+
otter/io/host.py,sha256=xv_SznZuvMoMVsZLqlcmlOyaqKCMZqlTQ_gkN4VBSTw,7139
|
|
10
|
+
otter/io/otter.py,sha256=qTKTfGkBdmsWjLF1cxjiX-SvOaKOPK-nd0aK7TtPQO8,61076
|
|
11
|
+
otter/io/transient.py,sha256=1yVy-9NEo9ozM4B-sqEcGlJW_1abD0sWSXYG0Ex-3Jc,58020
|
|
12
|
+
otter/plotter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
|
+
otter/plotter/otter_plotter.py,sha256=yUjGHR0FcbndwC1yLQekJWqX2KBMAJXtjFKbbASG_Cc,2144
|
|
14
|
+
otter/plotter/plotter.py,sha256=ni4WV63wIjhMHStDmuccltaMHSaFbwwfztYpuMdYAz8,9340
|
|
15
|
+
astro_otter-0.3.5.dist-info/METADATA,sha256=CTru7txtl0-m5zmGInfuJ_qd0upiUjF1eGTU6Eo-ZEk,7046
|
|
16
|
+
astro_otter-0.3.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
17
|
+
astro_otter-0.3.5.dist-info/top_level.txt,sha256=Wth72sCwBRUk3KZGknSKvLQDMFuJk6qiaAavMDOdG5k,6
|
|
18
|
+
astro_otter-0.3.5.dist-info/RECORD,,
|
otter/_version.py
CHANGED
otter/io/data_finder.py
CHANGED
|
@@ -602,8 +602,16 @@ class DataFinder(object):
|
|
|
602
602
|
cone_search_res = qc.query(adql=adql, fmt="pandas")
|
|
603
603
|
|
|
604
604
|
# then retrieve all of the spectra corresponding to those sparcl_ids
|
|
605
|
-
|
|
606
|
-
|
|
605
|
+
spec_ids = cone_search_res.targetid.tolist()
|
|
606
|
+
if len(spec_ids) == 0:
|
|
607
|
+
logger.warn("Object not found in Sparcl!")
|
|
608
|
+
return
|
|
609
|
+
|
|
610
|
+
res = client.retrieve_by_specid(spec_ids, include=include)
|
|
611
|
+
if res.count == 0:
|
|
612
|
+
logger.warn("No Spectra available in sparcl!")
|
|
613
|
+
return
|
|
614
|
+
|
|
607
615
|
all_spec = pd.concat([pd.DataFrame([record]) for record in res.records])
|
|
608
616
|
return Table.from_pandas(all_spec)
|
|
609
617
|
|
otter/io/otter.py
CHANGED
|
@@ -154,6 +154,7 @@ class Otter(Database):
|
|
|
154
154
|
keep_raw=False,
|
|
155
155
|
wave_unit="nm",
|
|
156
156
|
freq_unit="GHz",
|
|
157
|
+
deduplicate=None,
|
|
157
158
|
**kwargs,
|
|
158
159
|
) -> Table:
|
|
159
160
|
"""
|
|
@@ -176,6 +177,11 @@ class Otter(Database):
|
|
|
176
177
|
is False.
|
|
177
178
|
wave_unit (str): The astropy wavelength unit to return with
|
|
178
179
|
freq_unit (str): The astropy frequency unit to return with`
|
|
180
|
+
deduplicate (Callable|None|False): if we should deduplicate the dataset
|
|
181
|
+
using the deduplicate Callable. Set to
|
|
182
|
+
False if you don't want this to happen.
|
|
183
|
+
None defaults to
|
|
184
|
+
Transient.deduplicate_photometry
|
|
179
185
|
**kwargs : Arguments to pass to Otter.query(). Can be::
|
|
180
186
|
|
|
181
187
|
names (list[str]): A list of names to get the metadata for
|
|
@@ -215,6 +221,7 @@ class Otter(Database):
|
|
|
215
221
|
wave_unit=wave_unit,
|
|
216
222
|
freq_unit=freq_unit,
|
|
217
223
|
obs_type=obs_type,
|
|
224
|
+
deduplicate=deduplicate,
|
|
218
225
|
)
|
|
219
226
|
|
|
220
227
|
phot["name"] = [default_name] * len(phot)
|
|
@@ -291,8 +298,13 @@ class Otter(Database):
|
|
|
291
298
|
radius: float = 5,
|
|
292
299
|
minz: float = None,
|
|
293
300
|
maxz: float = None,
|
|
301
|
+
mindec: float = -90,
|
|
302
|
+
maxdec: float = 90,
|
|
294
303
|
refs: list[str] = None,
|
|
295
304
|
hasphot: bool = False,
|
|
305
|
+
has_radio_phot: bool = False,
|
|
306
|
+
has_uvoir_phot: bool = False,
|
|
307
|
+
has_xray_phot: bool = False,
|
|
296
308
|
hasspec: bool = False,
|
|
297
309
|
spec_classed: bool = False,
|
|
298
310
|
unambiguous: bool = False,
|
|
@@ -316,9 +328,14 @@ class Otter(Database):
|
|
|
316
328
|
radius (float): The radius in arcseconds for a cone search, default is 0.05"
|
|
317
329
|
minz (float): The minimum redshift to search for
|
|
318
330
|
maxz (float): The maximum redshift to search for
|
|
331
|
+
mindec (float): The minimum declination in degrees
|
|
332
|
+
maxdec (float): Tje maximum declination in degrees
|
|
319
333
|
refs (list[str]): A list of ads bibcodes to match to. Will only return
|
|
320
334
|
metadata for transients that have this as a reference.
|
|
321
335
|
hasphot (bool): if True, only returns transients which have photometry.
|
|
336
|
+
has_radio_phot (bool): if True, only returns transients with radio phot.
|
|
337
|
+
has_uvoir_phot (bool): if True, only returns transients with uvoir phot.
|
|
338
|
+
has_xray_phot (bool): if True, only returns transients with X-ray phot.
|
|
322
339
|
hasspec (bool): NOT IMPLEMENTED! Will return False for all targets!
|
|
323
340
|
spec_classed (bool): If True, only returns transients that have been
|
|
324
341
|
specotroscopically classified/confirmed
|
|
@@ -337,9 +354,18 @@ class Otter(Database):
|
|
|
337
354
|
# write some AQL filters based on the inputs
|
|
338
355
|
query_filters = ""
|
|
339
356
|
|
|
340
|
-
if hasphot
|
|
357
|
+
if hasphot or has_radio_phot or has_xray_phot or has_uvoir_phot:
|
|
341
358
|
query_filters += "FILTER 'photometry' IN ATTRIBUTES(transient)\n"
|
|
342
359
|
|
|
360
|
+
if has_radio_phot:
|
|
361
|
+
query_filters += "FILTER 'radio' IN transient.photometry[*].obs_type\n"
|
|
362
|
+
|
|
363
|
+
if has_uvoir_phot:
|
|
364
|
+
query_filters += "FILTER 'uvoir' IN transient.photometry[*].obs_type\n"
|
|
365
|
+
|
|
366
|
+
if has_xray_phot:
|
|
367
|
+
query_filters += "FILTER 'xray' IN transient.photometry[*].obs_type\n"
|
|
368
|
+
|
|
343
369
|
if hasspec is True:
|
|
344
370
|
query_filters += "FILTER 'spectra' IN ATTRIBUTES(transient)\n"
|
|
345
371
|
|
|
@@ -451,6 +477,11 @@ class Otter(Database):
|
|
|
451
477
|
else:
|
|
452
478
|
arango_query_results = [Transient(res) for res in result.result]
|
|
453
479
|
|
|
480
|
+
# filter based on the min and max declination query options
|
|
481
|
+
decs = np.array([t.get_skycoord().dec.deg for t in arango_query_results])
|
|
482
|
+
where_dec = np.where((decs > mindec) * (decs < maxdec))[0]
|
|
483
|
+
arango_query_results = [arango_query_results[i] for i in where_dec]
|
|
484
|
+
|
|
454
485
|
if not query_private:
|
|
455
486
|
return arango_query_results
|
|
456
487
|
|
|
@@ -665,7 +696,10 @@ class Otter(Database):
|
|
|
665
696
|
if len(res) > 1:
|
|
666
697
|
raise OtterLimitationError("Some objects in Otter are too close!")
|
|
667
698
|
|
|
668
|
-
elif len(res) == 1:
|
|
699
|
+
elif len(res) == 1 and collection != "vetting":
|
|
700
|
+
# if the collection is the vetting collection we don't want to do the
|
|
701
|
+
# merging yet, even if the object already exists in OTTER
|
|
702
|
+
|
|
669
703
|
# this object exists in otter already, let's grab the transient data and
|
|
670
704
|
# merge the files
|
|
671
705
|
merged = t + res[0]
|
|
@@ -674,13 +708,6 @@ class Otter(Database):
|
|
|
674
708
|
merged["_key"] = res[0]["_key"]
|
|
675
709
|
merged["_id"] = res[0]["_id"]
|
|
676
710
|
|
|
677
|
-
# we also have to delete the document from the OTTER database
|
|
678
|
-
doc = self.fetchDocument(merged["_id"])
|
|
679
|
-
if not testing:
|
|
680
|
-
doc.delete()
|
|
681
|
-
else:
|
|
682
|
-
print(f"Would delete\n{doc}")
|
|
683
|
-
|
|
684
711
|
else:
|
|
685
712
|
# this means the object doesn't exist in otter already
|
|
686
713
|
merged = t
|
|
@@ -832,7 +859,7 @@ class Otter(Database):
|
|
|
832
859
|
def from_csvs(
|
|
833
860
|
metafile: str,
|
|
834
861
|
photfile: str = None,
|
|
835
|
-
local_outpath: Optional[str] =
|
|
862
|
+
local_outpath: Optional[str] = os.path.join(os.getcwd(), "private-data"),
|
|
836
863
|
db: Otter = None,
|
|
837
864
|
) -> Otter:
|
|
838
865
|
"""
|
|
@@ -973,11 +1000,13 @@ class Otter(Database):
|
|
|
973
1000
|
pd.isna(tde["luminosity_distance"])
|
|
974
1001
|
):
|
|
975
1002
|
json["distance"].append(
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
1003
|
+
dict(
|
|
1004
|
+
value=tde.luminosity_distance[0],
|
|
1005
|
+
reference=[tde.luminosity_distance_bibcode[0]],
|
|
1006
|
+
unit=tde.luminosity_distance_unit[0],
|
|
1007
|
+
computed=False,
|
|
1008
|
+
distance_type="luminosity",
|
|
1009
|
+
)
|
|
981
1010
|
)
|
|
982
1011
|
|
|
983
1012
|
# comoving distance
|
|
@@ -985,11 +1014,13 @@ class Otter(Database):
|
|
|
985
1014
|
pd.isna(tde["comoving_distance"])
|
|
986
1015
|
):
|
|
987
1016
|
json["distance"].append(
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
1017
|
+
dict(
|
|
1018
|
+
value=tde.comoving_distance[0],
|
|
1019
|
+
reference=[tde.comoving_distance_bibcode[0]],
|
|
1020
|
+
unit=tde.comoving_distance_unit[0],
|
|
1021
|
+
computed=False,
|
|
1022
|
+
distance_type="comoving",
|
|
1023
|
+
)
|
|
993
1024
|
)
|
|
994
1025
|
|
|
995
1026
|
# remove the distance list if it is empty still
|
|
@@ -1081,11 +1112,6 @@ class Otter(Database):
|
|
|
1081
1112
|
if src not in phot_sources:
|
|
1082
1113
|
phot_sources.append(src)
|
|
1083
1114
|
|
|
1084
|
-
if len(np.unique(p.flux_unit)) == 1:
|
|
1085
|
-
raw_units = p.flux_unit.tolist()[0]
|
|
1086
|
-
else:
|
|
1087
|
-
raw_units = p.flux_unit.tolist()
|
|
1088
|
-
|
|
1089
1115
|
# add a column to phot with the unique filter key
|
|
1090
1116
|
if obstype == "radio":
|
|
1091
1117
|
filter_uq_key = (
|
|
@@ -1105,17 +1131,73 @@ class Otter(Database):
|
|
|
1105
1131
|
if "upperlimit" not in p:
|
|
1106
1132
|
p["upperlimit"] = False
|
|
1107
1133
|
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1134
|
+
if "raw" in p.columns and "flux" in p.columns:
|
|
1135
|
+
if len(np.unique(p.raw_unit)) == 1:
|
|
1136
|
+
raw_units = p.raw_unit.tolist()[0]
|
|
1137
|
+
else:
|
|
1138
|
+
raw_units = p.raw_unit.tolist()
|
|
1139
|
+
|
|
1140
|
+
if len(np.unique(p.flux_unit)) == 1:
|
|
1141
|
+
val_units = p.flux_unit.tolist()[0]
|
|
1142
|
+
else:
|
|
1143
|
+
val_units = p.flux_unit.tolist()
|
|
1144
|
+
|
|
1145
|
+
# treat "raw" as the "raw" keyword and "flux" as the "value"
|
|
1146
|
+
json_phot = dict(
|
|
1147
|
+
reference=src,
|
|
1148
|
+
raw=p.raw.astype(float).tolist(),
|
|
1149
|
+
raw_err=p.raw_err.astype(float).tolist(),
|
|
1150
|
+
raw_units=raw_units,
|
|
1151
|
+
value=p.flux.astype(float).tolist(),
|
|
1152
|
+
value_err=p.flux_err.astype(float).tolist(),
|
|
1153
|
+
value_units=val_units,
|
|
1154
|
+
date=p.date.tolist(),
|
|
1155
|
+
date_format=p.date_format.tolist(),
|
|
1156
|
+
upperlimit=p.upperlimit.tolist(),
|
|
1157
|
+
filter_key=filter_uq_key,
|
|
1158
|
+
obs_type=obstype,
|
|
1159
|
+
)
|
|
1160
|
+
|
|
1161
|
+
elif "flux" in p.columns and "raw" not in p.columns:
|
|
1162
|
+
if len(np.unique(p.flux_unit)) == 1:
|
|
1163
|
+
raw_units = p.flux_unit.tolist()[0]
|
|
1164
|
+
else:
|
|
1165
|
+
raw_units = p.flux_unit.tolist()
|
|
1166
|
+
|
|
1167
|
+
# treat "flux" as the "raw" keyword
|
|
1168
|
+
json_phot = dict(
|
|
1169
|
+
reference=src,
|
|
1170
|
+
raw=p.flux.astype(float).tolist(),
|
|
1171
|
+
raw_err=p.flux_err.astype(float).tolist(),
|
|
1172
|
+
raw_units=raw_units,
|
|
1173
|
+
date=p.date.tolist(),
|
|
1174
|
+
date_format=p.date_format.tolist(),
|
|
1175
|
+
upperlimit=p.upperlimit.tolist(),
|
|
1176
|
+
filter_key=filter_uq_key,
|
|
1177
|
+
obs_type=obstype,
|
|
1178
|
+
)
|
|
1179
|
+
|
|
1180
|
+
elif "raw" in p.columns and "flux" not in p.columns:
|
|
1181
|
+
if len(np.unique(p.raw_unit)) == 1:
|
|
1182
|
+
raw_units = p.raw_unit.tolist()[0]
|
|
1183
|
+
else:
|
|
1184
|
+
raw_units = p.raw_unit.tolist()
|
|
1185
|
+
|
|
1186
|
+
# treat "raw" as the "raw" keyword
|
|
1187
|
+
json_phot = dict(
|
|
1188
|
+
reference=src,
|
|
1189
|
+
raw=p.raw.astype(float).tolist(),
|
|
1190
|
+
raw_err=p.raw_err.astype(float).tolist(),
|
|
1191
|
+
raw_units=raw_units,
|
|
1192
|
+
date=p.date.tolist(),
|
|
1193
|
+
date_format=p.date_format.tolist(),
|
|
1194
|
+
upperlimit=p.upperlimit.tolist(),
|
|
1195
|
+
filter_key=filter_uq_key,
|
|
1196
|
+
obs_type=obstype,
|
|
1197
|
+
)
|
|
1198
|
+
|
|
1199
|
+
else:
|
|
1200
|
+
raise ValueError("`raw` and/or `flux` key(s) must be provided!")
|
|
1119
1201
|
|
|
1120
1202
|
if not pd.isna(tele):
|
|
1121
1203
|
json_phot["telescope"] = tele
|
|
@@ -1131,6 +1213,13 @@ class Otter(Database):
|
|
|
1131
1213
|
"Minimum and maximum filters required for X-ray data!"
|
|
1132
1214
|
)
|
|
1133
1215
|
|
|
1216
|
+
if ("date_min" in p and "date_max" not in p) or (
|
|
1217
|
+
"date_min" not in p and "date_max" in p
|
|
1218
|
+
):
|
|
1219
|
+
raise ValueError(
|
|
1220
|
+
"If date_min/date_max is provided, the other must be too!"
|
|
1221
|
+
)
|
|
1222
|
+
|
|
1134
1223
|
# check optional keys
|
|
1135
1224
|
optional_keys = [
|
|
1136
1225
|
"date_err",
|
|
@@ -1142,6 +1231,8 @@ class Otter(Database):
|
|
|
1142
1231
|
"observer",
|
|
1143
1232
|
"reducer",
|
|
1144
1233
|
"pipeline",
|
|
1234
|
+
"date_min",
|
|
1235
|
+
"date_max",
|
|
1145
1236
|
]
|
|
1146
1237
|
for k in optional_keys:
|
|
1147
1238
|
if k in p and not np.all(pd.isna(p[k])):
|
|
@@ -1154,7 +1245,13 @@ class Otter(Database):
|
|
|
1154
1245
|
|
|
1155
1246
|
# handle more detailed uncertainty information
|
|
1156
1247
|
raw_err_detail = {}
|
|
1157
|
-
for key in [
|
|
1248
|
+
for key in [
|
|
1249
|
+
"statistical_err",
|
|
1250
|
+
"systematic_err",
|
|
1251
|
+
"iss_err",
|
|
1252
|
+
"upper_err",
|
|
1253
|
+
"lower_err",
|
|
1254
|
+
]:
|
|
1158
1255
|
if key in p and not np.all(pd.isna(p[key])):
|
|
1159
1256
|
k = key.split("_")[0]
|
|
1160
1257
|
|
|
@@ -1183,6 +1280,126 @@ class Otter(Database):
|
|
|
1183
1280
|
json_phot[c] = p[c].tolist()
|
|
1184
1281
|
json_phot[bool_v_key] = [v != "null" for v in json_phot[c]]
|
|
1185
1282
|
|
|
1283
|
+
# deal with the xray model
|
|
1284
|
+
if "xray_model_name" not in p and obstype == "xray":
|
|
1285
|
+
raise ValueError(
|
|
1286
|
+
"You must provide the xray model for xray data!"
|
|
1287
|
+
)
|
|
1288
|
+
if obstype == "xray" and "xray_model_name" in p:
|
|
1289
|
+
# get various sets of keywords
|
|
1290
|
+
model_val_cols = list(
|
|
1291
|
+
p.columns[p.columns.str.contains("xray_model_param_value")]
|
|
1292
|
+
)
|
|
1293
|
+
model_up_err_cols = list(
|
|
1294
|
+
p.columns[p.columns.str.contains("xray_model_param_up_err")]
|
|
1295
|
+
)
|
|
1296
|
+
model_lo_err_cols = list(
|
|
1297
|
+
p.columns[p.columns.str.contains("xray_model_param_lo_err")]
|
|
1298
|
+
)
|
|
1299
|
+
model_val_units_cols = list(
|
|
1300
|
+
p.columns[p.columns.str.contains("xray_model_param_unit")]
|
|
1301
|
+
)
|
|
1302
|
+
model_uplim_cols = list(
|
|
1303
|
+
p.columns[
|
|
1304
|
+
p.columns.str.contains("xray_model_param_upperlimit")
|
|
1305
|
+
]
|
|
1306
|
+
)
|
|
1307
|
+
|
|
1308
|
+
param_names = [v.split("::")[-1] for v in model_val_cols]
|
|
1309
|
+
|
|
1310
|
+
xray_model_info = p[
|
|
1311
|
+
model_val_cols
|
|
1312
|
+
+ model_up_err_cols
|
|
1313
|
+
+ model_lo_err_cols
|
|
1314
|
+
+ model_val_units_cols
|
|
1315
|
+
+ model_uplim_cols
|
|
1316
|
+
+ [
|
|
1317
|
+
"xray_model_name",
|
|
1318
|
+
"xray_model_reference",
|
|
1319
|
+
"filter_min",
|
|
1320
|
+
"filter_max",
|
|
1321
|
+
"filter_eff_units",
|
|
1322
|
+
]
|
|
1323
|
+
]
|
|
1324
|
+
|
|
1325
|
+
if len(model_uplim_cols) == 0:
|
|
1326
|
+
# assume they are all false
|
|
1327
|
+
for param_name in param_names:
|
|
1328
|
+
colname = f"xray_model_param_upperlimit::{param_name}"
|
|
1329
|
+
xray_model_info[colname] = False
|
|
1330
|
+
model_uplim_cols.append(colname)
|
|
1331
|
+
|
|
1332
|
+
if not all(
|
|
1333
|
+
len(model_val_cols) == len(p)
|
|
1334
|
+
for p in [
|
|
1335
|
+
model_up_err_cols,
|
|
1336
|
+
model_lo_err_cols,
|
|
1337
|
+
model_val_units_cols,
|
|
1338
|
+
model_uplim_cols,
|
|
1339
|
+
]
|
|
1340
|
+
):
|
|
1341
|
+
raise ValueError(
|
|
1342
|
+
"Missing a column for one of the X-ray parameters!"
|
|
1343
|
+
)
|
|
1344
|
+
|
|
1345
|
+
xray_models = []
|
|
1346
|
+
for _, row in xray_model_info.iterrows():
|
|
1347
|
+
energy1 = (
|
|
1348
|
+
(row["filter_min"] * u.Unit(row["filter_eff_units"]))
|
|
1349
|
+
.to("keV", equivalencies=u.spectral())
|
|
1350
|
+
.value
|
|
1351
|
+
)
|
|
1352
|
+
energy2 = (
|
|
1353
|
+
(row["filter_max"] * u.Unit(row["filter_eff_units"]))
|
|
1354
|
+
.to("keV", equivalencies=u.spectral())
|
|
1355
|
+
.value
|
|
1356
|
+
)
|
|
1357
|
+
|
|
1358
|
+
if energy1 > energy2:
|
|
1359
|
+
min_energy = energy2
|
|
1360
|
+
max_energy = energy1
|
|
1361
|
+
else:
|
|
1362
|
+
min_energy = energy1
|
|
1363
|
+
max_energy = energy2
|
|
1364
|
+
|
|
1365
|
+
param_names_not_na = []
|
|
1366
|
+
for n in param_names:
|
|
1367
|
+
if not pd.isna(row[f"xray_model_param_value::{n}"]):
|
|
1368
|
+
param_names_not_na.append(n)
|
|
1369
|
+
|
|
1370
|
+
xray_models.append(
|
|
1371
|
+
{
|
|
1372
|
+
"model_name": row.xray_model_name,
|
|
1373
|
+
"param_names": [n for n in param_names_not_na],
|
|
1374
|
+
"param_values": [
|
|
1375
|
+
row[f"xray_model_param_value::{n}"]
|
|
1376
|
+
for n in param_names_not_na
|
|
1377
|
+
],
|
|
1378
|
+
"param_value_upper_err": [
|
|
1379
|
+
row[f"xray_model_param_up_err::{n}"]
|
|
1380
|
+
for n in param_names_not_na
|
|
1381
|
+
],
|
|
1382
|
+
"param_value_lower_err": [
|
|
1383
|
+
row[f"xray_model_param_lo_err::{n}"]
|
|
1384
|
+
for n in param_names_not_na
|
|
1385
|
+
],
|
|
1386
|
+
"param_upperlimit": [
|
|
1387
|
+
row[f"xray_model_param_upperlimit::{n}"]
|
|
1388
|
+
for n in param_names_not_na
|
|
1389
|
+
],
|
|
1390
|
+
"param_units": [
|
|
1391
|
+
row[f"xray_model_param_unit::{n}"]
|
|
1392
|
+
for n in param_names_not_na
|
|
1393
|
+
],
|
|
1394
|
+
"model_reference": row["xray_model_reference"],
|
|
1395
|
+
"min_energy": min_energy,
|
|
1396
|
+
"max_energy": max_energy,
|
|
1397
|
+
"energy_units": "keV",
|
|
1398
|
+
}
|
|
1399
|
+
)
|
|
1400
|
+
|
|
1401
|
+
json_phot["xray_model"] = xray_models
|
|
1402
|
+
|
|
1186
1403
|
json["photometry"].append(json_phot)
|
|
1187
1404
|
|
|
1188
1405
|
tde["filter_uq_key"] = pd.Series(
|
|
@@ -1191,14 +1408,20 @@ class Otter(Database):
|
|
|
1191
1408
|
|
|
1192
1409
|
# filter alias
|
|
1193
1410
|
# radio filters first
|
|
1194
|
-
filter_keys1 = [
|
|
1411
|
+
filter_keys1 = [
|
|
1412
|
+
"filter_uq_key",
|
|
1413
|
+
"band_eff_wave",
|
|
1414
|
+
"band_eff_wave_unit",
|
|
1415
|
+
"filter_eff_units",
|
|
1416
|
+
]
|
|
1195
1417
|
if "filter_min" in tde:
|
|
1196
1418
|
filter_keys1.append("filter_min")
|
|
1197
1419
|
if "filter_max" in tde:
|
|
1198
1420
|
filter_keys1.append("filter_max")
|
|
1199
1421
|
|
|
1200
|
-
|
|
1201
|
-
|
|
1422
|
+
filt_info = tde[filter_keys1]
|
|
1423
|
+
filter_map = filt_info.drop_duplicates().set_index(
|
|
1424
|
+
"filter_uq_key"
|
|
1202
1425
|
) # .to_dict(orient='index')
|
|
1203
1426
|
try:
|
|
1204
1427
|
filter_map_radio = filter_map.to_dict(orient="index")
|
|
@@ -1229,22 +1452,24 @@ class Otter(Database):
|
|
|
1229
1452
|
wave_units=val["band_eff_wave_unit"],
|
|
1230
1453
|
)
|
|
1231
1454
|
|
|
1232
|
-
if "filter_min" in val:
|
|
1455
|
+
if "filter_min" in val and not pd.isna(val["filter_min"]):
|
|
1233
1456
|
filter_alias_dict["wave_min"] = (
|
|
1234
|
-
val["filter_min"] * u.Unit(
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1457
|
+
(val["filter_min"] * u.Unit(val["filter_eff_units"]))
|
|
1458
|
+
.to(
|
|
1459
|
+
u.Unit(val["band_eff_wave_unit"]),
|
|
1460
|
+
equivalencies=u.spectral(),
|
|
1238
1461
|
)
|
|
1462
|
+
.value
|
|
1239
1463
|
)
|
|
1240
1464
|
|
|
1241
|
-
if "filter_max" in val:
|
|
1465
|
+
if "filter_max" in val and not pd.isna(val["filter_max"]):
|
|
1242
1466
|
filter_alias_dict["wave_max"] = (
|
|
1243
|
-
val["filter_max"] * u.Unit(
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1467
|
+
(val["filter_max"] * u.Unit(val["filter_eff_units"]))
|
|
1468
|
+
.to(
|
|
1469
|
+
u.Unit(val["band_eff_wave_unit"]),
|
|
1470
|
+
equivalencies=u.spectral(),
|
|
1247
1471
|
)
|
|
1472
|
+
.value
|
|
1248
1473
|
)
|
|
1249
1474
|
|
|
1250
1475
|
json["filter_alias"].append(filter_alias_dict)
|
otter/io/transient.py
CHANGED
|
@@ -8,6 +8,7 @@ import warnings
|
|
|
8
8
|
from copy import deepcopy
|
|
9
9
|
import re
|
|
10
10
|
from collections.abc import MutableMapping
|
|
11
|
+
from typing import Callable
|
|
11
12
|
from typing_extensions import Self
|
|
12
13
|
import logging
|
|
13
14
|
|
|
@@ -70,7 +71,7 @@ class Transient(MutableMapping):
|
|
|
70
71
|
"""
|
|
71
72
|
|
|
72
73
|
if isinstance(keys, (list, tuple)):
|
|
73
|
-
return Transient({key:
|
|
74
|
+
return Transient({key: self[key] for key in keys if key in self})
|
|
74
75
|
elif isinstance(keys, str) and "/" in keys: # this is for a path
|
|
75
76
|
s = "']['".join(keys.split("/"))
|
|
76
77
|
s = "['" + s
|
|
@@ -510,6 +511,7 @@ class Transient(MutableMapping):
|
|
|
510
511
|
freq_unit: u.Unit = "GHz",
|
|
511
512
|
wave_unit: u.Unit = "nm",
|
|
512
513
|
obs_type: str = None,
|
|
514
|
+
deduplicate: Callable | None = None,
|
|
513
515
|
) -> pd.DataFrame:
|
|
514
516
|
"""
|
|
515
517
|
Ensure the photometry associated with this transient is all in the same
|
|
@@ -529,10 +531,20 @@ class Transient(MutableMapping):
|
|
|
529
531
|
obs_type (str): "radio", "xray", or "uvoir". If provided, it only returns
|
|
530
532
|
data taken within that range of wavelengths/frequencies.
|
|
531
533
|
Default is None which will return all of the data.
|
|
532
|
-
|
|
534
|
+
deduplicate (Callable|None): A function to be used to remove duplicate
|
|
535
|
+
reductions of the same data that produces
|
|
536
|
+
different flux values. The default is the
|
|
537
|
+
otter.deduplicate_photometry method,
|
|
538
|
+
but you can pass
|
|
539
|
+
any callable that takes the output pandas
|
|
540
|
+
dataframe as input. Set this to False if you
|
|
541
|
+
don't want deduplication to occur.
|
|
533
542
|
Returns:
|
|
534
543
|
A pandas DataFrame of the cleaned up photometry in the requested units
|
|
535
544
|
"""
|
|
545
|
+
if deduplicate is None:
|
|
546
|
+
deduplicate = self.deduplicate_photometry
|
|
547
|
+
|
|
536
548
|
warn_filt = _DuplicateFilter()
|
|
537
549
|
logger.addFilter(warn_filt)
|
|
538
550
|
|
|
@@ -916,9 +928,203 @@ class Transient(MutableMapping):
|
|
|
916
928
|
|
|
917
929
|
outdata["upperlimit"] = outdata.apply(is_upperlimit, axis=1)
|
|
918
930
|
|
|
931
|
+
# perform some more complex deduplication of the dataset
|
|
932
|
+
if deduplicate:
|
|
933
|
+
outdata = deduplicate(outdata)
|
|
934
|
+
|
|
935
|
+
# throw a warning if the output dataframe has UV/Optical/IR or Radio data
|
|
936
|
+
# where we don't know if the dataset has been host corrected or not
|
|
937
|
+
if ("corr_host" not in outdata) or (
|
|
938
|
+
len(outdata[pd.isna(outdata.corr_host) * (outdata.obs_type != "xray")]) >= 0
|
|
939
|
+
):
|
|
940
|
+
logger.warning(
|
|
941
|
+
f"{self.default_name} has at least one photometry point where it is "
|
|
942
|
+
+ "unclear if a host subtraction was performed. This can be especially "
|
|
943
|
+
+ "detrimental for UV data. Please consider filtering out UV/Optical/IR"
|
|
944
|
+
+ " or radio rows where the corr_host column is null/None/NaN."
|
|
945
|
+
)
|
|
946
|
+
|
|
919
947
|
logger.removeFilter(warn_filt)
|
|
920
948
|
return outdata
|
|
921
949
|
|
|
950
|
+
@classmethod
|
|
951
|
+
def deduplicate_photometry(cls, phot: pd.DataFrame, date_tol: int | float = 1):
|
|
952
|
+
"""
|
|
953
|
+
This deduplicates a pandas dataframe of photometry that could potentially
|
|
954
|
+
have rows/datasets that are the result of different reductions of the same
|
|
955
|
+
data. This is especially relevant for X-ray and UV observations where different
|
|
956
|
+
reductions can produce different flux values from the same observation.
|
|
957
|
+
|
|
958
|
+
The algorithm used here first finds duplicates by normalizing the telescope
|
|
959
|
+
names, then grouping the dataframe by transient name, norm telescope name,
|
|
960
|
+
filter_key, and the obs_type. It then assumes that data from the same
|
|
961
|
+
reference will not produce duplicated data. Finally, it finds the overlapping
|
|
962
|
+
regions within date +/- date_tol (or between date_min and date_max for binned
|
|
963
|
+
data), and uses any data within that region as duplicated. From there, it
|
|
964
|
+
first tries to choose the reduction that is host subtracted (if only one is
|
|
965
|
+
host subtracted), then if neither or more than one of the datasets are host
|
|
966
|
+
subtracted then it just takes the most recent reduction.
|
|
967
|
+
|
|
968
|
+
Args:
|
|
969
|
+
phot (pd.DataFrame): A pandas dataframe of the photometry with keys defined
|
|
970
|
+
by the OTTER schema
|
|
971
|
+
date_tol (int|float): The default tolerance (or "uncertainty") to use on the
|
|
972
|
+
dates in the "date" column of phot. In days. Defaults
|
|
973
|
+
to 1 day.
|
|
974
|
+
"""
|
|
975
|
+
# we need to reset the index to keep track of things appropriately
|
|
976
|
+
phot = phot.reset_index(drop=True)
|
|
977
|
+
|
|
978
|
+
if "telescope" not in phot:
|
|
979
|
+
phot["telescope"] = np.nan
|
|
980
|
+
|
|
981
|
+
# we first have to standardize some columns given some basic assumptions
|
|
982
|
+
phot["_ref_str"] = phot.reference.astype(str)
|
|
983
|
+
|
|
984
|
+
# normalize the telescope name so we can group by it
|
|
985
|
+
phot["_norm_tele_name"] = phot.telescope.apply(cls._normalize_tele_name)
|
|
986
|
+
|
|
987
|
+
# now find the duplicated data
|
|
988
|
+
dups = []
|
|
989
|
+
phot_grpby = phot.groupby(
|
|
990
|
+
["_norm_tele_name", "filter_key", "obs_type"], dropna=False
|
|
991
|
+
)
|
|
992
|
+
for (tele, filter_key, obs_type), grp in phot_grpby:
|
|
993
|
+
# by definition, there can only be dups if the name, telescope, and filter
|
|
994
|
+
# are the same
|
|
995
|
+
|
|
996
|
+
# if there is only one reference in this group of data, there's no way
|
|
997
|
+
# there are duplicate reductions of the same dataset
|
|
998
|
+
if len(grp._ref_str.unique()) <= 1:
|
|
999
|
+
continue
|
|
1000
|
+
|
|
1001
|
+
# the next trick is that the dates don't need to be the same, but need to
|
|
1002
|
+
# fall inside the same range
|
|
1003
|
+
grp["_mean_dates"] = grp.apply(cls._convert_dates, axis=1)
|
|
1004
|
+
|
|
1005
|
+
if "date_min" in grp and not np.all(pd.isna(grp.date_min)):
|
|
1006
|
+
grp["min_dates"] = grp.apply(
|
|
1007
|
+
lambda row: cls._convert_dates(row, date_key="date_min"), axis=1
|
|
1008
|
+
).astype(float)
|
|
1009
|
+
grp["max_dates"] = grp.apply(
|
|
1010
|
+
lambda row: cls._convert_dates(row, date_key="date_max"), axis=1
|
|
1011
|
+
).astype(float)
|
|
1012
|
+
|
|
1013
|
+
# in case any of the min_date and max_date in the grp are nan
|
|
1014
|
+
grp.fillna(
|
|
1015
|
+
{
|
|
1016
|
+
"min_dates": grp._mean_dates - date_tol,
|
|
1017
|
+
"max_dates": grp._mean_dates + date_tol,
|
|
1018
|
+
},
|
|
1019
|
+
inplace=True,
|
|
1020
|
+
)
|
|
1021
|
+
|
|
1022
|
+
elif "date_err" in grp and not np.any(pd.isna(grp.date_err)):
|
|
1023
|
+
grp["min_dates"] = (grp._mean_dates - grp.date_err).astype(float)
|
|
1024
|
+
grp["max_dates"] = (grp._mean_dates + grp.date_err).astype(float)
|
|
1025
|
+
else:
|
|
1026
|
+
# then assume some uncertainty on the date
|
|
1027
|
+
grp["min_dates"] = (grp._mean_dates - date_tol).astype(float)
|
|
1028
|
+
grp["max_dates"] = (grp._mean_dates + date_tol).astype(float)
|
|
1029
|
+
|
|
1030
|
+
ref_ranges = [
|
|
1031
|
+
(subgrp.min_dates.min(), subgrp.max_dates.max())
|
|
1032
|
+
for _, subgrp in grp.groupby("_ref_str")
|
|
1033
|
+
]
|
|
1034
|
+
|
|
1035
|
+
overlaps = cls._find_overlapping_regions(ref_ranges)
|
|
1036
|
+
|
|
1037
|
+
if len(overlaps) == 0:
|
|
1038
|
+
continue # then there are no dups
|
|
1039
|
+
|
|
1040
|
+
for min_overlap, max_overlap in overlaps:
|
|
1041
|
+
dup_data = grp[
|
|
1042
|
+
(grp.min_dates >= min_overlap) * (grp.max_dates <= max_overlap)
|
|
1043
|
+
]
|
|
1044
|
+
|
|
1045
|
+
if len(dup_data) == 0:
|
|
1046
|
+
continue # no data falls in this range!
|
|
1047
|
+
|
|
1048
|
+
dups.append(dup_data)
|
|
1049
|
+
|
|
1050
|
+
# now that we've found the duplicated datasets, we can iterate through them
|
|
1051
|
+
# and choose the "default"
|
|
1052
|
+
phot_res = deepcopy(phot)
|
|
1053
|
+
undupd = []
|
|
1054
|
+
for dup in dups:
|
|
1055
|
+
try:
|
|
1056
|
+
phot_res = phot_res.drop(dup.index) # we'll append back in the non dup
|
|
1057
|
+
except KeyError:
|
|
1058
|
+
continue # we already deleted these ones
|
|
1059
|
+
|
|
1060
|
+
# first, check if only one of the dup reductions host subtracted
|
|
1061
|
+
if "corr_host" in dup:
|
|
1062
|
+
dup_host_corr = dup[dup.corr_host.astype(bool)]
|
|
1063
|
+
host_corr_refs = dup_host_corr.human_readable_refs.unique()
|
|
1064
|
+
if len(host_corr_refs) == 1:
|
|
1065
|
+
# then one of the reductions is host corrected and the other isn't!
|
|
1066
|
+
undupd.append(dup[dup.human_readable_refs == host_corr_refs[0]])
|
|
1067
|
+
continue
|
|
1068
|
+
|
|
1069
|
+
bibcodes_sorted_by_year = sorted(dup._ref_str.unique(), key=cls._find_year)
|
|
1070
|
+
dataset_to_use = dup[dup._ref_str == bibcodes_sorted_by_year[0]]
|
|
1071
|
+
undupd.append(dataset_to_use)
|
|
1072
|
+
|
|
1073
|
+
# then return the full photometry dataset but with the dups removed!
|
|
1074
|
+
return pd.concat([phot_res] + undupd).reset_index()
|
|
1075
|
+
|
|
1076
|
+
@staticmethod
|
|
1077
|
+
def _normalize_tele_name(tele_name):
|
|
1078
|
+
if pd.isna(tele_name):
|
|
1079
|
+
return tele_name
|
|
1080
|
+
|
|
1081
|
+
common_delims = ["-", "/", " ", "."]
|
|
1082
|
+
for delim in common_delims:
|
|
1083
|
+
tele_name = tele_name.replace(delim, ":*:")
|
|
1084
|
+
|
|
1085
|
+
# this assumes that the telescope name will almost always be first,
|
|
1086
|
+
# before other delimiters
|
|
1087
|
+
return tele_name.split(":*:")[0].lower()
|
|
1088
|
+
|
|
1089
|
+
@staticmethod
|
|
1090
|
+
def _convert_dates(row, date_key="date"):
|
|
1091
|
+
"""Make sure the dates are in MJD"""
|
|
1092
|
+
if pd.isna(row[date_key]):
|
|
1093
|
+
return row[date_key]
|
|
1094
|
+
|
|
1095
|
+
return Time(row[date_key], format=row.date_format.lower()).mjd
|
|
1096
|
+
|
|
1097
|
+
@staticmethod
|
|
1098
|
+
def _find_overlapping_regions(intervals):
|
|
1099
|
+
"""Find the overlaps in a list of tuples of mins and maxs. This is relatively
|
|
1100
|
+
inefficient but the len(intervals) should be < 10 so it should be fine"""
|
|
1101
|
+
overlap_ranges = []
|
|
1102
|
+
for ii, (start_ii, end_ii) in enumerate(intervals):
|
|
1103
|
+
for jj, (start_jj, end_jj) in enumerate(intervals):
|
|
1104
|
+
if ii <= jj:
|
|
1105
|
+
continue
|
|
1106
|
+
|
|
1107
|
+
if start_ii > start_jj:
|
|
1108
|
+
start = start_ii
|
|
1109
|
+
else:
|
|
1110
|
+
start = start_jj
|
|
1111
|
+
|
|
1112
|
+
if end_ii > end_jj:
|
|
1113
|
+
end = end_jj
|
|
1114
|
+
else:
|
|
1115
|
+
end = end_ii
|
|
1116
|
+
|
|
1117
|
+
if start < end:
|
|
1118
|
+
# then there is an overlap!
|
|
1119
|
+
overlap_ranges.append((start, end))
|
|
1120
|
+
|
|
1121
|
+
return overlap_ranges
|
|
1122
|
+
|
|
1123
|
+
@staticmethod
|
|
1124
|
+
def _find_year(s):
|
|
1125
|
+
match = re.search(r"\d{4}", s)
|
|
1126
|
+
return int(match.group()) if match else float("inf")
|
|
1127
|
+
|
|
922
1128
|
def _merge_names(t1, t2, out): # noqa: N805
|
|
923
1129
|
"""
|
|
924
1130
|
Private method to merge the name data in t1 and t2 and put it in out
|
otter/plotter/otter_plotter.py
CHANGED
|
@@ -23,7 +23,7 @@ class OtterPlotter:
|
|
|
23
23
|
def __init__(self, backend):
|
|
24
24
|
if backend == "matplotlib.pyplot":
|
|
25
25
|
self.backend = backend
|
|
26
|
-
elif backend == "
|
|
26
|
+
elif backend == "plotly.graph_objects":
|
|
27
27
|
self.backend = backend
|
|
28
28
|
elif "plotly" in backend and "graph_objects" not in backend:
|
|
29
29
|
self.backend = "plotly.graph_objects"
|
otter/plotter/plotter.py
CHANGED
|
@@ -15,8 +15,7 @@ from ..io.otter import Transient, Otter
|
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
def query_quick_view(
|
|
18
|
-
db: Otter
|
|
19
|
-
otter_path: str = None,
|
|
18
|
+
db: Otter,
|
|
20
19
|
ptype: str = "both",
|
|
21
20
|
sed_dim: str = "freq",
|
|
22
21
|
dt_over_t: float = 0,
|
|
@@ -31,7 +30,6 @@ def query_quick_view(
|
|
|
31
30
|
|
|
32
31
|
Args:
|
|
33
32
|
db (otter.Otter) : The otter object to query
|
|
34
|
-
otter_path (str) : The path to construct an otter path from
|
|
35
33
|
ptype (str) : The plot type to generate. Valid options are
|
|
36
34
|
- both -> Plot both light curve and sed (default)
|
|
37
35
|
- sed -> Plot just the sed
|
|
@@ -55,12 +53,6 @@ def query_quick_view(
|
|
|
55
53
|
A list of matplotlib pyplot Figure objects that we plotted
|
|
56
54
|
|
|
57
55
|
"""
|
|
58
|
-
if db is None:
|
|
59
|
-
if otter_path is not None:
|
|
60
|
-
db = Otter(otter_path)
|
|
61
|
-
else:
|
|
62
|
-
raise ValueError("Either the db or otter_path arguments must be provided!")
|
|
63
|
-
|
|
64
56
|
res = db.query(**kwargs)
|
|
65
57
|
|
|
66
58
|
if len(res) > result_length_tol:
|
otter/schema.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
Pydantic Schema Model of our JSON schema
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
from pydantic import BaseModel, model_validator, field_validator
|
|
5
|
+
from pydantic import BaseModel, model_validator, field_validator
|
|
6
6
|
from typing import Optional, Union, List
|
|
7
7
|
|
|
8
8
|
|
|
@@ -86,24 +86,24 @@ class CoordinateSchema(BaseModel):
|
|
|
86
86
|
|
|
87
87
|
if uses_ra_dec:
|
|
88
88
|
if self.ra_units is None:
|
|
89
|
-
raise
|
|
89
|
+
raise ValueError("ra_units must be provided for RA!")
|
|
90
90
|
if self.dec_units is None:
|
|
91
|
-
raise
|
|
91
|
+
raise ValueError("dec_units must be provided for Dec!")
|
|
92
92
|
|
|
93
93
|
elif uses_galactic:
|
|
94
94
|
if self.l_units is None:
|
|
95
|
-
raise
|
|
95
|
+
raise ValueError("l_units must be provided for RA!")
|
|
96
96
|
if self.b_units is None:
|
|
97
|
-
raise
|
|
97
|
+
raise ValueError("b_units must be provided for Dec!")
|
|
98
98
|
|
|
99
99
|
elif uses_lon_lat:
|
|
100
100
|
if self.lon_units is None:
|
|
101
|
-
raise
|
|
101
|
+
raise ValueError("lon_units must be provided for RA!")
|
|
102
102
|
if self.lat_units is None:
|
|
103
|
-
raise
|
|
103
|
+
raise ValueError("lat_units must be provided for Dec!")
|
|
104
104
|
|
|
105
105
|
else:
|
|
106
|
-
|
|
106
|
+
raise ValueError("Must have RA/Dec, l/b, and/or lon/lat!")
|
|
107
107
|
|
|
108
108
|
return self
|
|
109
109
|
|
|
@@ -122,7 +122,7 @@ class DistanceSchema(BaseModel):
|
|
|
122
122
|
@model_validator(mode="after")
|
|
123
123
|
def _has_units(self):
|
|
124
124
|
if self.distance_type != "redshift" and self.unit is None:
|
|
125
|
-
raise
|
|
125
|
+
raise ValueError("Need units if the distance_type is not redshift!")
|
|
126
126
|
|
|
127
127
|
return self
|
|
128
128
|
|
|
@@ -171,6 +171,8 @@ class PhotometrySchema(BaseModel):
|
|
|
171
171
|
date: Union[str, float, List[Union[str, float]]]
|
|
172
172
|
date_format: Union[str, List[str]]
|
|
173
173
|
date_err: Optional[Union[str, float, List[Union[str, float]]]] = None
|
|
174
|
+
date_min: Optional[Union[str, float, List[Union[str, float]]]] = None
|
|
175
|
+
date_max: Optional[Union[str, float, List[Union[str, float]]]] = None
|
|
174
176
|
ignore: Optional[Union[bool, List[bool]]] = None
|
|
175
177
|
upperlimit: Optional[Union[bool, List[bool]]] = None
|
|
176
178
|
sigma: Optional[Union[str, float, List[Union[str, float]]]] = None
|
|
@@ -213,6 +215,18 @@ class PhotometrySchema(BaseModel):
|
|
|
213
215
|
return [v]
|
|
214
216
|
return v
|
|
215
217
|
|
|
218
|
+
@model_validator(mode="after")
|
|
219
|
+
def _ensure_min_and_max_date(self):
|
|
220
|
+
"""
|
|
221
|
+
This will make sure that if date_min is provided so is date_max
|
|
222
|
+
"""
|
|
223
|
+
if (self.date_min is not None and self.date_max is None) or (
|
|
224
|
+
self.date_min is None and self.date_max is not None
|
|
225
|
+
):
|
|
226
|
+
raise ValueError(
|
|
227
|
+
"If you provide date_min or date_max you must provide the other!"
|
|
228
|
+
)
|
|
229
|
+
|
|
216
230
|
@model_validator(mode="after")
|
|
217
231
|
def _ensure_xray_model(self):
|
|
218
232
|
"""
|
|
@@ -221,7 +235,7 @@ class PhotometrySchema(BaseModel):
|
|
|
221
235
|
It will be commented out until we get the data setup correctly
|
|
222
236
|
"""
|
|
223
237
|
# if self.obs_type == "xray" and self.xray_model is None:
|
|
224
|
-
# raise
|
|
238
|
+
# raise ValueError(
|
|
225
239
|
# "Need an xray_model for this xray data!"
|
|
226
240
|
# )
|
|
227
241
|
|
|
@@ -262,24 +276,20 @@ class HostSchema(BaseModel):
|
|
|
262
276
|
# if it has the RA/Dec keys, make sure it also has ra_unit, dec_unit keys
|
|
263
277
|
if has_coordinate:
|
|
264
278
|
if self.host_ra_units is None:
|
|
265
|
-
raise
|
|
279
|
+
raise ValueError("Need RA unit if coordinates are provided!")
|
|
266
280
|
if self.host_dec_units is None:
|
|
267
|
-
raise
|
|
281
|
+
raise ValueError("Need Dec unit if coordinates are provided!")
|
|
268
282
|
|
|
269
283
|
# we need either the coordinate or name to identify this object
|
|
270
284
|
# Both are okay too (more info is always better)
|
|
271
285
|
if not has_coordinate and not has_name:
|
|
272
|
-
raise
|
|
273
|
-
"Need to provide a Host name and/or host coordinates!"
|
|
274
|
-
)
|
|
286
|
+
raise ValueError("Need to provide a Host name and/or host coordinates!")
|
|
275
287
|
|
|
276
288
|
# Make sure that if one of RA/Dec is given then both are given
|
|
277
289
|
if (self.host_ra is None and self.host_dec is not None) or (
|
|
278
290
|
self.host_ra is not None and self.host_dec is None
|
|
279
291
|
):
|
|
280
|
-
raise
|
|
281
|
-
"Please provide RA AND Dec, not just one or the other!"
|
|
282
|
-
)
|
|
292
|
+
raise ValueError("Please provide RA AND Dec, not just one or the other!")
|
|
283
293
|
|
|
284
294
|
return self
|
|
285
295
|
|
|
@@ -299,4 +309,4 @@ class OtterSchema(BaseModel):
|
|
|
299
309
|
@model_validator(mode="after")
|
|
300
310
|
def _verify_filter_alias(self):
|
|
301
311
|
if self.photometry is not None and self.filter_alias is None:
|
|
302
|
-
raise
|
|
312
|
+
raise ValueError("filter_alias is needed if photometry is given!")
|
otter/util.py
CHANGED
|
@@ -121,6 +121,10 @@ def _bibcode_to_hrn_with_query(bibcode):
|
|
|
121
121
|
if val in bibcodes:
|
|
122
122
|
bibcodes.pop(bibcodes.index(val))
|
|
123
123
|
|
|
124
|
+
if len(bibcodes) == 0:
|
|
125
|
+
# then all of the bibcodes were "private"
|
|
126
|
+
return bibcodes, bibcodes
|
|
127
|
+
|
|
124
128
|
query = f"bibcode:{bibcodes[0]}"
|
|
125
129
|
if len(bibcodes) > 1:
|
|
126
130
|
for b in bibcodes[1:]:
|
|
@@ -469,6 +473,7 @@ Mapping for the effective frequencies in THz for all the filters used in OTTER
|
|
|
469
473
|
XRAY_AREAS = {
|
|
470
474
|
# https://swift.gsfc.nasa.gov/about_swift/Sci_Fact_Sheet.pdf
|
|
471
475
|
"swift": 135 * u.cm**2,
|
|
476
|
+
"swift-xrt": 135 * u.cm**2,
|
|
472
477
|
# https://heasarc.gsfc.nasa.gov/docs/rosat/ruh/handbook/node39.html#SECTION00634000000000000000
|
|
473
478
|
"rosat": 400 * u.cm**2,
|
|
474
479
|
# https://www.cosmos.esa.int/web/xmm-newton/technical-details-mirrors
|
|
@@ -481,6 +486,15 @@ XRAY_AREAS = {
|
|
|
481
486
|
"chandra": 600 * u.cm**2,
|
|
482
487
|
# https://www.cosmos.esa.int/documents/332006/954765/Brunner_TopicK.pdf
|
|
483
488
|
"erosita": 1500 * u.cm**2,
|
|
489
|
+
# https://en.wikipedia.org/wiki/NuSTAR
|
|
490
|
+
"nustar": 847 * u.cm**2,
|
|
491
|
+
# https://iss.jaxa.jp/en/kiboexp/ef/maxi/
|
|
492
|
+
"maxi": 200 * u.cm**2,
|
|
493
|
+
# https://iopscience.iop.org/article/10.3847/1538-4357/abd569
|
|
494
|
+
"konus-wind": 120 * u.cm**2,
|
|
495
|
+
# https://www.cosmos.esa.int/web/einstein-probe/mission
|
|
496
|
+
"ep": 600 * u.cm**2,
|
|
497
|
+
"ep-fxt": 600 * u.cm**2,
|
|
484
498
|
}
|
|
485
499
|
"""
|
|
486
500
|
X-Ray telescope areas that are used for converting from counts to other units.
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
astro_otter-0.3.3.dist-info/licenses/LICENSE,sha256=s9IPE8A3CAMEaZpDhj4eaorpmfLYGB0mIGphq301PUY,1067
|
|
2
|
-
otter/__init__.py,sha256=pvX-TN7nLVmvKpkDi89Zxe-jMfHNiVMD3zsd_bPEK9Y,535
|
|
3
|
-
otter/_version.py,sha256=IP9YVdiq2R5ldxON8iRmWBlg638Aq0aZCl5syP2mi78,76
|
|
4
|
-
otter/exceptions.py,sha256=3lQF4AXVTfs9VRsVePQoIrXnramsPZbUL5crvf1s9Ng,1702
|
|
5
|
-
otter/schema.py,sha256=lOn-9FX4EuVYkRY97j64yGQM9fsTdKUCeYHqkJ8-kN0,10790
|
|
6
|
-
otter/util.py,sha256=Ve_3iHmTcdcS_X2zzBg07WQTlWPbx4CBAZf4Gye65Og,22831
|
|
7
|
-
otter/io/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
|
-
otter/io/data_finder.py,sha256=v3jZCOhvysHUQG1FOwHdeJ-psTT-MsdO_GZalBpMBGc,38218
|
|
9
|
-
otter/io/host.py,sha256=xv_SznZuvMoMVsZLqlcmlOyaqKCMZqlTQ_gkN4VBSTw,7139
|
|
10
|
-
otter/io/otter.py,sha256=lJStat9oefnQCT4rHrI39Lq0Of-uWl2Dsq7A8Gsy4SY,49892
|
|
11
|
-
otter/io/transient.py,sha256=ANYnqWN1bJuefkTQqyPdt95V33DaQxUOOG_PPU86l3A,48832
|
|
12
|
-
otter/plotter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
|
-
otter/plotter/otter_plotter.py,sha256=OQhuLgnMSzgtAjJF8SYBuQOyYcu7Pr0uia5P0G_7z5Q,2144
|
|
14
|
-
otter/plotter/plotter.py,sha256=z09NwQVJS2tuwH3sv95DZv8xogjvf-7Gvj6iWCEx-gQ,9635
|
|
15
|
-
astro_otter-0.3.3.dist-info/METADATA,sha256=EJGa_8TqBnyNJsxHDLh9h48UcwxgJyVcra4aAW7iATQ,7046
|
|
16
|
-
astro_otter-0.3.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
17
|
-
astro_otter-0.3.3.dist-info/top_level.txt,sha256=Wth72sCwBRUk3KZGknSKvLQDMFuJk6qiaAavMDOdG5k,6
|
|
18
|
-
astro_otter-0.3.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|