astro-otter 0.3.3__tar.gz → 0.3.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of astro-otter might be problematic. Click here for more details.

Files changed (33) hide show
  1. {astro_otter-0.3.3/src/astro_otter.egg-info → astro_otter-0.3.5}/PKG-INFO +1 -1
  2. {astro_otter-0.3.3 → astro_otter-0.3.5/src/astro_otter.egg-info}/PKG-INFO +1 -1
  3. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/astro_otter.egg-info/SOURCES.txt +2 -0
  4. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/otter/_version.py +1 -1
  5. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/otter/io/data_finder.py +10 -2
  6. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/otter/io/otter.py +275 -50
  7. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/otter/io/transient.py +208 -2
  8. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/otter/plotter/otter_plotter.py +1 -1
  9. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/otter/plotter/plotter.py +1 -9
  10. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/otter/schema.py +29 -19
  11. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/otter/util.py +14 -0
  12. {astro_otter-0.3.3 → astro_otter-0.3.5}/tests/test_otter.py +5 -3
  13. astro_otter-0.3.5/tests/test_plotter.py +180 -0
  14. astro_otter-0.3.5/tests/test_schema.py +189 -0
  15. astro_otter-0.3.5/tests/test_util.py +94 -0
  16. astro_otter-0.3.3/tests/test_util.py +0 -44
  17. {astro_otter-0.3.3 → astro_otter-0.3.5}/LICENSE +0 -0
  18. {astro_otter-0.3.3 → astro_otter-0.3.5}/README.md +0 -0
  19. {astro_otter-0.3.3 → astro_otter-0.3.5}/pyproject.toml +0 -0
  20. {astro_otter-0.3.3 → astro_otter-0.3.5}/setup.cfg +0 -0
  21. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/astro_otter.egg-info/dependency_links.txt +0 -0
  22. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/astro_otter.egg-info/requires.txt +0 -0
  23. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/astro_otter.egg-info/top_level.txt +0 -0
  24. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/otter/__init__.py +0 -0
  25. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/otter/exceptions.py +0 -0
  26. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/otter/io/__init__.py +0 -0
  27. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/otter/io/host.py +0 -0
  28. {astro_otter-0.3.3 → astro_otter-0.3.5}/src/otter/plotter/__init__.py +0 -0
  29. {astro_otter-0.3.3 → astro_otter-0.3.5}/tests/test_data_finder.py +0 -0
  30. {astro_otter-0.3.3 → astro_otter-0.3.5}/tests/test_exceptions.py +0 -0
  31. {astro_otter-0.3.3 → astro_otter-0.3.5}/tests/test_host.py +0 -0
  32. {astro_otter-0.3.3 → astro_otter-0.3.5}/tests/test_package.py +0 -0
  33. {astro_otter-0.3.3 → astro_otter-0.3.5}/tests/test_transient.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: astro-otter
3
- Version: 0.3.3
3
+ Version: 0.3.5
4
4
  Author-email: Noah Franz <nfranz@arizona.edu>
5
5
  License: MIT License
6
6
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: astro-otter
3
- Version: 0.3.3
3
+ Version: 0.3.5
4
4
  Author-email: Noah Franz <nfranz@arizona.edu>
5
5
  License: MIT License
6
6
 
@@ -24,5 +24,7 @@ tests/test_exceptions.py
24
24
  tests/test_host.py
25
25
  tests/test_otter.py
26
26
  tests/test_package.py
27
+ tests/test_plotter.py
28
+ tests/test_schema.py
27
29
  tests/test_transient.py
28
30
  tests/test_util.py
@@ -2,4 +2,4 @@
2
2
  Just define the package version in one place
3
3
  """
4
4
 
5
- __version__ = "0.3.3"
5
+ __version__ = "0.3.5"
@@ -602,8 +602,16 @@ class DataFinder(object):
602
602
  cone_search_res = qc.query(adql=adql, fmt="pandas")
603
603
 
604
604
  # then retrieve all of the spectra corresponding to those sparcl_ids
605
- sparcl_ids = cone_search_res.sparcl_id.tolist()
606
- res = client.retrieve(uuid_list=sparcl_ids, include=include)
605
+ spec_ids = cone_search_res.targetid.tolist()
606
+ if len(spec_ids) == 0:
607
+ logger.warn("Object not found in Sparcl!")
608
+ return
609
+
610
+ res = client.retrieve_by_specid(spec_ids, include=include)
611
+ if res.count == 0:
612
+ logger.warn("No Spectra available in sparcl!")
613
+ return
614
+
607
615
  all_spec = pd.concat([pd.DataFrame([record]) for record in res.records])
608
616
  return Table.from_pandas(all_spec)
609
617
 
@@ -154,6 +154,7 @@ class Otter(Database):
154
154
  keep_raw=False,
155
155
  wave_unit="nm",
156
156
  freq_unit="GHz",
157
+ deduplicate=None,
157
158
  **kwargs,
158
159
  ) -> Table:
159
160
  """
@@ -176,6 +177,11 @@ class Otter(Database):
176
177
  is False.
177
178
  wave_unit (str): The astropy wavelength unit to return with
178
179
  freq_unit (str): The astropy frequency unit to return with`
180
+ deduplicate (Callable|None|False): if we should deduplicate the dataset
181
+ using the deduplicate Callable. Set to
182
+ False if you don't want this to happen.
183
+ None defaults to
184
+ Transient.deduplicate_photometry
179
185
  **kwargs : Arguments to pass to Otter.query(). Can be::
180
186
 
181
187
  names (list[str]): A list of names to get the metadata for
@@ -215,6 +221,7 @@ class Otter(Database):
215
221
  wave_unit=wave_unit,
216
222
  freq_unit=freq_unit,
217
223
  obs_type=obs_type,
224
+ deduplicate=deduplicate,
218
225
  )
219
226
 
220
227
  phot["name"] = [default_name] * len(phot)
@@ -291,8 +298,13 @@ class Otter(Database):
291
298
  radius: float = 5,
292
299
  minz: float = None,
293
300
  maxz: float = None,
301
+ mindec: float = -90,
302
+ maxdec: float = 90,
294
303
  refs: list[str] = None,
295
304
  hasphot: bool = False,
305
+ has_radio_phot: bool = False,
306
+ has_uvoir_phot: bool = False,
307
+ has_xray_phot: bool = False,
296
308
  hasspec: bool = False,
297
309
  spec_classed: bool = False,
298
310
  unambiguous: bool = False,
@@ -316,9 +328,14 @@ class Otter(Database):
316
328
  radius (float): The radius in arcseconds for a cone search, default is 0.05"
317
329
  minz (float): The minimum redshift to search for
318
330
  maxz (float): The maximum redshift to search for
331
+ mindec (float): The minimum declination in degrees
332
+ maxdec (float): Tje maximum declination in degrees
319
333
  refs (list[str]): A list of ads bibcodes to match to. Will only return
320
334
  metadata for transients that have this as a reference.
321
335
  hasphot (bool): if True, only returns transients which have photometry.
336
+ has_radio_phot (bool): if True, only returns transients with radio phot.
337
+ has_uvoir_phot (bool): if True, only returns transients with uvoir phot.
338
+ has_xray_phot (bool): if True, only returns transients with X-ray phot.
322
339
  hasspec (bool): NOT IMPLEMENTED! Will return False for all targets!
323
340
  spec_classed (bool): If True, only returns transients that have been
324
341
  specotroscopically classified/confirmed
@@ -337,9 +354,18 @@ class Otter(Database):
337
354
  # write some AQL filters based on the inputs
338
355
  query_filters = ""
339
356
 
340
- if hasphot is True:
357
+ if hasphot or has_radio_phot or has_xray_phot or has_uvoir_phot:
341
358
  query_filters += "FILTER 'photometry' IN ATTRIBUTES(transient)\n"
342
359
 
360
+ if has_radio_phot:
361
+ query_filters += "FILTER 'radio' IN transient.photometry[*].obs_type\n"
362
+
363
+ if has_uvoir_phot:
364
+ query_filters += "FILTER 'uvoir' IN transient.photometry[*].obs_type\n"
365
+
366
+ if has_xray_phot:
367
+ query_filters += "FILTER 'xray' IN transient.photometry[*].obs_type\n"
368
+
343
369
  if hasspec is True:
344
370
  query_filters += "FILTER 'spectra' IN ATTRIBUTES(transient)\n"
345
371
 
@@ -451,6 +477,11 @@ class Otter(Database):
451
477
  else:
452
478
  arango_query_results = [Transient(res) for res in result.result]
453
479
 
480
+ # filter based on the min and max declination query options
481
+ decs = np.array([t.get_skycoord().dec.deg for t in arango_query_results])
482
+ where_dec = np.where((decs > mindec) * (decs < maxdec))[0]
483
+ arango_query_results = [arango_query_results[i] for i in where_dec]
484
+
454
485
  if not query_private:
455
486
  return arango_query_results
456
487
 
@@ -665,7 +696,10 @@ class Otter(Database):
665
696
  if len(res) > 1:
666
697
  raise OtterLimitationError("Some objects in Otter are too close!")
667
698
 
668
- elif len(res) == 1:
699
+ elif len(res) == 1 and collection != "vetting":
700
+ # if the collection is the vetting collection we don't want to do the
701
+ # merging yet, even if the object already exists in OTTER
702
+
669
703
  # this object exists in otter already, let's grab the transient data and
670
704
  # merge the files
671
705
  merged = t + res[0]
@@ -674,13 +708,6 @@ class Otter(Database):
674
708
  merged["_key"] = res[0]["_key"]
675
709
  merged["_id"] = res[0]["_id"]
676
710
 
677
- # we also have to delete the document from the OTTER database
678
- doc = self.fetchDocument(merged["_id"])
679
- if not testing:
680
- doc.delete()
681
- else:
682
- print(f"Would delete\n{doc}")
683
-
684
711
  else:
685
712
  # this means the object doesn't exist in otter already
686
713
  merged = t
@@ -832,7 +859,7 @@ class Otter(Database):
832
859
  def from_csvs(
833
860
  metafile: str,
834
861
  photfile: str = None,
835
- local_outpath: Optional[str] = None,
862
+ local_outpath: Optional[str] = os.path.join(os.getcwd(), "private-data"),
836
863
  db: Otter = None,
837
864
  ) -> Otter:
838
865
  """
@@ -973,11 +1000,13 @@ class Otter(Database):
973
1000
  pd.isna(tde["luminosity_distance"])
974
1001
  ):
975
1002
  json["distance"].append(
976
- value=tde.luminosity_distance[0],
977
- reference=[tde.luminosity_distance_bibcode[0]],
978
- unit=tde.luminosity_distance_unit[0],
979
- computed=False,
980
- distance_type="luminosity",
1003
+ dict(
1004
+ value=tde.luminosity_distance[0],
1005
+ reference=[tde.luminosity_distance_bibcode[0]],
1006
+ unit=tde.luminosity_distance_unit[0],
1007
+ computed=False,
1008
+ distance_type="luminosity",
1009
+ )
981
1010
  )
982
1011
 
983
1012
  # comoving distance
@@ -985,11 +1014,13 @@ class Otter(Database):
985
1014
  pd.isna(tde["comoving_distance"])
986
1015
  ):
987
1016
  json["distance"].append(
988
- value=tde.comoving_distance[0],
989
- reference=[tde.comoving_distance_bibcode[0]],
990
- unit=tde.comoving_distance_unit[0],
991
- computed=False,
992
- distance_type="comoving",
1017
+ dict(
1018
+ value=tde.comoving_distance[0],
1019
+ reference=[tde.comoving_distance_bibcode[0]],
1020
+ unit=tde.comoving_distance_unit[0],
1021
+ computed=False,
1022
+ distance_type="comoving",
1023
+ )
993
1024
  )
994
1025
 
995
1026
  # remove the distance list if it is empty still
@@ -1081,11 +1112,6 @@ class Otter(Database):
1081
1112
  if src not in phot_sources:
1082
1113
  phot_sources.append(src)
1083
1114
 
1084
- if len(np.unique(p.flux_unit)) == 1:
1085
- raw_units = p.flux_unit.tolist()[0]
1086
- else:
1087
- raw_units = p.flux_unit.tolist()
1088
-
1089
1115
  # add a column to phot with the unique filter key
1090
1116
  if obstype == "radio":
1091
1117
  filter_uq_key = (
@@ -1105,17 +1131,73 @@ class Otter(Database):
1105
1131
  if "upperlimit" not in p:
1106
1132
  p["upperlimit"] = False
1107
1133
 
1108
- json_phot = dict(
1109
- reference=src,
1110
- raw=p.flux.astype(float).tolist(),
1111
- raw_err=p.flux_err.astype(float).tolist(),
1112
- raw_units=raw_units,
1113
- date=p.date.tolist(),
1114
- date_format=p.date_format.tolist(),
1115
- upperlimit=p.upperlimit.tolist(),
1116
- filter_key=filter_uq_key,
1117
- obs_type=obstype,
1118
- )
1134
+ if "raw" in p.columns and "flux" in p.columns:
1135
+ if len(np.unique(p.raw_unit)) == 1:
1136
+ raw_units = p.raw_unit.tolist()[0]
1137
+ else:
1138
+ raw_units = p.raw_unit.tolist()
1139
+
1140
+ if len(np.unique(p.flux_unit)) == 1:
1141
+ val_units = p.flux_unit.tolist()[0]
1142
+ else:
1143
+ val_units = p.flux_unit.tolist()
1144
+
1145
+ # treat "raw" as the "raw" keyword and "flux" as the "value"
1146
+ json_phot = dict(
1147
+ reference=src,
1148
+ raw=p.raw.astype(float).tolist(),
1149
+ raw_err=p.raw_err.astype(float).tolist(),
1150
+ raw_units=raw_units,
1151
+ value=p.flux.astype(float).tolist(),
1152
+ value_err=p.flux_err.astype(float).tolist(),
1153
+ value_units=val_units,
1154
+ date=p.date.tolist(),
1155
+ date_format=p.date_format.tolist(),
1156
+ upperlimit=p.upperlimit.tolist(),
1157
+ filter_key=filter_uq_key,
1158
+ obs_type=obstype,
1159
+ )
1160
+
1161
+ elif "flux" in p.columns and "raw" not in p.columns:
1162
+ if len(np.unique(p.flux_unit)) == 1:
1163
+ raw_units = p.flux_unit.tolist()[0]
1164
+ else:
1165
+ raw_units = p.flux_unit.tolist()
1166
+
1167
+ # treat "flux" as the "raw" keyword
1168
+ json_phot = dict(
1169
+ reference=src,
1170
+ raw=p.flux.astype(float).tolist(),
1171
+ raw_err=p.flux_err.astype(float).tolist(),
1172
+ raw_units=raw_units,
1173
+ date=p.date.tolist(),
1174
+ date_format=p.date_format.tolist(),
1175
+ upperlimit=p.upperlimit.tolist(),
1176
+ filter_key=filter_uq_key,
1177
+ obs_type=obstype,
1178
+ )
1179
+
1180
+ elif "raw" in p.columns and "flux" not in p.columns:
1181
+ if len(np.unique(p.raw_unit)) == 1:
1182
+ raw_units = p.raw_unit.tolist()[0]
1183
+ else:
1184
+ raw_units = p.raw_unit.tolist()
1185
+
1186
+ # treat "raw" as the "raw" keyword
1187
+ json_phot = dict(
1188
+ reference=src,
1189
+ raw=p.raw.astype(float).tolist(),
1190
+ raw_err=p.raw_err.astype(float).tolist(),
1191
+ raw_units=raw_units,
1192
+ date=p.date.tolist(),
1193
+ date_format=p.date_format.tolist(),
1194
+ upperlimit=p.upperlimit.tolist(),
1195
+ filter_key=filter_uq_key,
1196
+ obs_type=obstype,
1197
+ )
1198
+
1199
+ else:
1200
+ raise ValueError("`raw` and/or `flux` key(s) must be provided!")
1119
1201
 
1120
1202
  if not pd.isna(tele):
1121
1203
  json_phot["telescope"] = tele
@@ -1131,6 +1213,13 @@ class Otter(Database):
1131
1213
  "Minimum and maximum filters required for X-ray data!"
1132
1214
  )
1133
1215
 
1216
+ if ("date_min" in p and "date_max" not in p) or (
1217
+ "date_min" not in p and "date_max" in p
1218
+ ):
1219
+ raise ValueError(
1220
+ "If date_min/date_max is provided, the other must be too!"
1221
+ )
1222
+
1134
1223
  # check optional keys
1135
1224
  optional_keys = [
1136
1225
  "date_err",
@@ -1142,6 +1231,8 @@ class Otter(Database):
1142
1231
  "observer",
1143
1232
  "reducer",
1144
1233
  "pipeline",
1234
+ "date_min",
1235
+ "date_max",
1145
1236
  ]
1146
1237
  for k in optional_keys:
1147
1238
  if k in p and not np.all(pd.isna(p[k])):
@@ -1154,7 +1245,13 @@ class Otter(Database):
1154
1245
 
1155
1246
  # handle more detailed uncertainty information
1156
1247
  raw_err_detail = {}
1157
- for key in ["statistical_err", "systematic_err", "iss_err"]:
1248
+ for key in [
1249
+ "statistical_err",
1250
+ "systematic_err",
1251
+ "iss_err",
1252
+ "upper_err",
1253
+ "lower_err",
1254
+ ]:
1158
1255
  if key in p and not np.all(pd.isna(p[key])):
1159
1256
  k = key.split("_")[0]
1160
1257
 
@@ -1183,6 +1280,126 @@ class Otter(Database):
1183
1280
  json_phot[c] = p[c].tolist()
1184
1281
  json_phot[bool_v_key] = [v != "null" for v in json_phot[c]]
1185
1282
 
1283
+ # deal with the xray model
1284
+ if "xray_model_name" not in p and obstype == "xray":
1285
+ raise ValueError(
1286
+ "You must provide the xray model for xray data!"
1287
+ )
1288
+ if obstype == "xray" and "xray_model_name" in p:
1289
+ # get various sets of keywords
1290
+ model_val_cols = list(
1291
+ p.columns[p.columns.str.contains("xray_model_param_value")]
1292
+ )
1293
+ model_up_err_cols = list(
1294
+ p.columns[p.columns.str.contains("xray_model_param_up_err")]
1295
+ )
1296
+ model_lo_err_cols = list(
1297
+ p.columns[p.columns.str.contains("xray_model_param_lo_err")]
1298
+ )
1299
+ model_val_units_cols = list(
1300
+ p.columns[p.columns.str.contains("xray_model_param_unit")]
1301
+ )
1302
+ model_uplim_cols = list(
1303
+ p.columns[
1304
+ p.columns.str.contains("xray_model_param_upperlimit")
1305
+ ]
1306
+ )
1307
+
1308
+ param_names = [v.split("::")[-1] for v in model_val_cols]
1309
+
1310
+ xray_model_info = p[
1311
+ model_val_cols
1312
+ + model_up_err_cols
1313
+ + model_lo_err_cols
1314
+ + model_val_units_cols
1315
+ + model_uplim_cols
1316
+ + [
1317
+ "xray_model_name",
1318
+ "xray_model_reference",
1319
+ "filter_min",
1320
+ "filter_max",
1321
+ "filter_eff_units",
1322
+ ]
1323
+ ]
1324
+
1325
+ if len(model_uplim_cols) == 0:
1326
+ # assume they are all false
1327
+ for param_name in param_names:
1328
+ colname = f"xray_model_param_upperlimit::{param_name}"
1329
+ xray_model_info[colname] = False
1330
+ model_uplim_cols.append(colname)
1331
+
1332
+ if not all(
1333
+ len(model_val_cols) == len(p)
1334
+ for p in [
1335
+ model_up_err_cols,
1336
+ model_lo_err_cols,
1337
+ model_val_units_cols,
1338
+ model_uplim_cols,
1339
+ ]
1340
+ ):
1341
+ raise ValueError(
1342
+ "Missing a column for one of the X-ray parameters!"
1343
+ )
1344
+
1345
+ xray_models = []
1346
+ for _, row in xray_model_info.iterrows():
1347
+ energy1 = (
1348
+ (row["filter_min"] * u.Unit(row["filter_eff_units"]))
1349
+ .to("keV", equivalencies=u.spectral())
1350
+ .value
1351
+ )
1352
+ energy2 = (
1353
+ (row["filter_max"] * u.Unit(row["filter_eff_units"]))
1354
+ .to("keV", equivalencies=u.spectral())
1355
+ .value
1356
+ )
1357
+
1358
+ if energy1 > energy2:
1359
+ min_energy = energy2
1360
+ max_energy = energy1
1361
+ else:
1362
+ min_energy = energy1
1363
+ max_energy = energy2
1364
+
1365
+ param_names_not_na = []
1366
+ for n in param_names:
1367
+ if not pd.isna(row[f"xray_model_param_value::{n}"]):
1368
+ param_names_not_na.append(n)
1369
+
1370
+ xray_models.append(
1371
+ {
1372
+ "model_name": row.xray_model_name,
1373
+ "param_names": [n for n in param_names_not_na],
1374
+ "param_values": [
1375
+ row[f"xray_model_param_value::{n}"]
1376
+ for n in param_names_not_na
1377
+ ],
1378
+ "param_value_upper_err": [
1379
+ row[f"xray_model_param_up_err::{n}"]
1380
+ for n in param_names_not_na
1381
+ ],
1382
+ "param_value_lower_err": [
1383
+ row[f"xray_model_param_lo_err::{n}"]
1384
+ for n in param_names_not_na
1385
+ ],
1386
+ "param_upperlimit": [
1387
+ row[f"xray_model_param_upperlimit::{n}"]
1388
+ for n in param_names_not_na
1389
+ ],
1390
+ "param_units": [
1391
+ row[f"xray_model_param_unit::{n}"]
1392
+ for n in param_names_not_na
1393
+ ],
1394
+ "model_reference": row["xray_model_reference"],
1395
+ "min_energy": min_energy,
1396
+ "max_energy": max_energy,
1397
+ "energy_units": "keV",
1398
+ }
1399
+ )
1400
+
1401
+ json_phot["xray_model"] = xray_models
1402
+
1186
1403
  json["photometry"].append(json_phot)
1187
1404
 
1188
1405
  tde["filter_uq_key"] = pd.Series(
@@ -1191,14 +1408,20 @@ class Otter(Database):
1191
1408
 
1192
1409
  # filter alias
1193
1410
  # radio filters first
1194
- filter_keys1 = ["filter_uq_key", "band_eff_wave", "band_eff_wave_unit"]
1411
+ filter_keys1 = [
1412
+ "filter_uq_key",
1413
+ "band_eff_wave",
1414
+ "band_eff_wave_unit",
1415
+ "filter_eff_units",
1416
+ ]
1195
1417
  if "filter_min" in tde:
1196
1418
  filter_keys1.append("filter_min")
1197
1419
  if "filter_max" in tde:
1198
1420
  filter_keys1.append("filter_max")
1199
1421
 
1200
- filter_map = (
1201
- tde[filter_keys1].drop_duplicates().set_index("filter_uq_key")
1422
+ filt_info = tde[filter_keys1]
1423
+ filter_map = filt_info.drop_duplicates().set_index(
1424
+ "filter_uq_key"
1202
1425
  ) # .to_dict(orient='index')
1203
1426
  try:
1204
1427
  filter_map_radio = filter_map.to_dict(orient="index")
@@ -1229,22 +1452,24 @@ class Otter(Database):
1229
1452
  wave_units=val["band_eff_wave_unit"],
1230
1453
  )
1231
1454
 
1232
- if "filter_min" in val:
1455
+ if "filter_min" in val and not pd.isna(val["filter_min"]):
1233
1456
  filter_alias_dict["wave_min"] = (
1234
- val["filter_min"] * u.Unit(phot.filter_eff_units)
1235
- ).to(
1236
- u.Unit(
1237
- val["band_eff_wave_unit"], equivalencies=u.spectral()
1457
+ (val["filter_min"] * u.Unit(val["filter_eff_units"]))
1458
+ .to(
1459
+ u.Unit(val["band_eff_wave_unit"]),
1460
+ equivalencies=u.spectral(),
1238
1461
  )
1462
+ .value
1239
1463
  )
1240
1464
 
1241
- if "filter_max" in val:
1465
+ if "filter_max" in val and not pd.isna(val["filter_max"]):
1242
1466
  filter_alias_dict["wave_max"] = (
1243
- val["filter_max"] * u.Unit(phot.filter_eff_units)
1244
- ).to(
1245
- u.Unit(
1246
- val["band_eff_wave_unit"], equivalencies=u.spectral()
1467
+ (val["filter_max"] * u.Unit(val["filter_eff_units"]))
1468
+ .to(
1469
+ u.Unit(val["band_eff_wave_unit"]),
1470
+ equivalencies=u.spectral(),
1247
1471
  )
1472
+ .value
1248
1473
  )
1249
1474
 
1250
1475
  json["filter_alias"].append(filter_alias_dict)