dkist-processing-common 10.9.0__py3-none-any.whl → 11.0.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. changelog/256.feature.rst +2 -0
  2. changelog/257.feature.rst +1 -0
  3. changelog/259.feature.rst +1 -0
  4. dkist_processing_common/models/dkist_location.py +27 -0
  5. dkist_processing_common/models/metric_code.py +1 -0
  6. dkist_processing_common/models/quality.py +49 -5
  7. dkist_processing_common/parsers/dsps_repeat.py +1 -1
  8. dkist_processing_common/parsers/id_bud.py +1 -1
  9. dkist_processing_common/parsers/near_bud.py +8 -6
  10. dkist_processing_common/parsers/retarder.py +1 -1
  11. dkist_processing_common/parsers/time.py +26 -9
  12. dkist_processing_common/parsers/unique_bud.py +8 -6
  13. dkist_processing_common/parsers/wavelength.py +1 -1
  14. dkist_processing_common/tasks/mixin/quality/_base.py +7 -1
  15. dkist_processing_common/tasks/mixin/quality/_metrics.py +129 -0
  16. dkist_processing_common/tasks/parse_l0_input_data.py +4 -3
  17. dkist_processing_common/tasks/write_l1.py +6 -35
  18. dkist_processing_common/tests/test_assemble_quality.py +33 -4
  19. dkist_processing_common/tests/test_dkist_location.py +15 -0
  20. dkist_processing_common/tests/test_quality_mixin.py +204 -1
  21. dkist_processing_common/tests/test_stems.py +141 -13
  22. dkist_processing_common/tests/test_write_l1.py +0 -12
  23. {dkist_processing_common-10.9.0.dist-info → dkist_processing_common-11.0.0rc1.dist-info}/METADATA +3 -2
  24. {dkist_processing_common-10.9.0.dist-info → dkist_processing_common-11.0.0rc1.dist-info}/RECORD +26 -21
  25. {dkist_processing_common-10.9.0.dist-info → dkist_processing_common-11.0.0rc1.dist-info}/WHEEL +0 -0
  26. {dkist_processing_common-10.9.0.dist-info → dkist_processing_common-11.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -220,6 +220,17 @@ def quality_metrics(dataframe_json) -> list[Metric]:
220
220
  {"name": "hist 3", "value": 9.35, "warnings": "warning for historical metric 3"},
221
221
  ["QUALITY_HISTORICAL"],
222
222
  ),
223
+ Metric(
224
+ {
225
+ "input_wavelength_nm": [1001.0, 1002.0, 1003.0, 1004.0],
226
+ "input_spectrum": [1.0, 1.0, 0.5, 1.0],
227
+ "best_fit_wavelength_nm": [1001.5, 1002.6, 1003.7, 1004.8],
228
+ "best_fit_atlas": [1.0, 1.0, 0.4, 1.0],
229
+ "normalized_residuals": [0.0, 0.0, 0.1, 0.0],
230
+ "weights": None,
231
+ },
232
+ ["QUALITY_WAVECAL_FIT"],
233
+ ),
223
234
  ]
224
235
  return metrics
225
236
 
@@ -247,6 +258,20 @@ def plot_data_expected() -> Callable[[str], bool]:
247
258
  return expected
248
259
 
249
260
 
261
+ @pytest.fixture()
262
+ def vertical_multi_pane_plot_data_expected() -> Callable[[str], bool]:
263
+ """
264
+ Tightly coupled with quality_metrics fixture and resultant report metric name
265
+ """
266
+ # names where vertical_multi_pane_plot_data is expected to be populated
267
+ names = {"Wavelength Calibration Results"}
268
+
269
+ def expected(name: str) -> bool:
270
+ return name in names
271
+
272
+ return expected
273
+
274
+
250
275
  @pytest.fixture()
251
276
  def table_data_expected() -> Callable[[str], bool]:
252
277
  """
@@ -426,6 +451,7 @@ def test_assemble_quality_data(
426
451
  assemble_quality_data_task,
427
452
  recipe_run_id,
428
453
  plot_data_expected,
454
+ vertical_multi_pane_plot_data_expected,
429
455
  table_data_expected,
430
456
  modmat_data_expected,
431
457
  histogram_data_expected,
@@ -447,14 +473,17 @@ def test_assemble_quality_data(
447
473
  quality_data = list(
448
474
  chain.from_iterable(task.read(tags=Tag.quality_data(), decoder=json_decoder))
449
475
  )
450
- # With polcal, this would be 19, but the polcal metrics are not included with this task
451
- assert len(quality_data) == 15
476
+ # With polcal, this would be 20, but the polcal metrics are not included with this task
477
+ assert len(quality_data) == 16
452
478
  for metric_data in quality_data:
453
479
  rm: ReportMetric = ReportMetric.from_dict(metric_data)
454
480
  assert isinstance(rm.name, str)
455
481
  assert isinstance(rm.description, str)
456
482
  if plot_data_expected(rm.name):
457
483
  assert rm.plot_data
484
+ if vertical_multi_pane_plot_data_expected(rm.name):
485
+ # TODO: Update this once `dkist-quality` knows about vertical multi-pane metrics
486
+ assert True
458
487
  if table_data_expected(rm.name):
459
488
  assert rm.table_data
460
489
  if modmat_data_expected(rm.name):
@@ -496,8 +525,8 @@ def test_assemble_quality_data_for_polcal(
496
525
  quality_data = list(
497
526
  chain.from_iterable(task.read(tags=Tag.quality_data(), decoder=json_decoder))
498
527
  )
499
- # this is 19 with polcal
500
- assert len(quality_data) == 19
528
+ # this is 20 with polcal
529
+ assert len(quality_data) == 20
501
530
  for metric_data in quality_data:
502
531
  rm: ReportMetric = ReportMetric.from_dict(metric_data)
503
532
  assert isinstance(rm.name, str)
@@ -0,0 +1,15 @@
1
+ import pytest
2
+ from astropy.coordinates import EarthLocation
3
+
4
+ from dkist_processing_common.models.dkist_location import location_of_dkist
5
+
6
+
7
+ @pytest.mark.flaky(max_reruns=10)
8
+ def test_location_of_dkist():
9
+ """
10
+ Given: function for retrieving the dkist location on earth
11
+ When: Call function
12
+ Then: result is the same as what is in the astropy online database
13
+ """
14
+ itrs = location_of_dkist
15
+ assert itrs == EarthLocation.of_site("dkist")
@@ -3,13 +3,21 @@ import json
3
3
  from io import StringIO
4
4
  from typing import Any
5
5
 
6
+ import astropy.units as u
6
7
  import numpy as np
7
8
  import pandas
8
9
  import pytest
10
+ from lmfit.minimizer import MinimizerResult
11
+ from pydantic import ValidationError
12
+ from solar_wavelength_calibration.fitter.wavelength_fitter import FitResult
13
+ from solar_wavelength_calibration.fitter.wavelength_fitter import WavelengthParameters
9
14
 
10
15
  from dkist_processing_common._util.scratch import WorkflowFileSystem
11
16
  from dkist_processing_common.codecs.json import json_encoder
12
17
  from dkist_processing_common.codecs.quality import QualityValueEncoder
18
+ from dkist_processing_common.models.metric_code import MetricCode
19
+ from dkist_processing_common.models.quality import Plot2D
20
+ from dkist_processing_common.models.quality import VerticalMultiPanePlot2D
13
21
  from dkist_processing_common.models.tags import Tag
14
22
  from dkist_processing_common.tasks import WorkflowTaskBase
15
23
  from dkist_processing_common.tasks.mixin.quality import QualityMixin
@@ -21,7 +29,7 @@ class Task(WorkflowTaskBase, QualityMixin):
21
29
 
22
30
 
23
31
  @pytest.fixture
24
- def quality_task(tmp_path, recipe_run_id):
32
+ def quality_task(tmp_path, recipe_run_id) -> Task:
25
33
  with Task(
26
34
  recipe_run_id=recipe_run_id,
27
35
  workflow_name="workflow_name",
@@ -106,6 +114,7 @@ def test_create_2d_plot_with_datetime_metric(quality_task):
106
114
  "facet",
107
115
  "statement",
108
116
  "plot_data",
117
+ "vertical_multi_pane_plot_data",
109
118
  "histogram_data",
110
119
  "table_data",
111
120
  "modmat_data",
@@ -1185,6 +1194,170 @@ def test_avg_noise_nan_values(quality_task, array_shape):
1185
1194
  assert not np.isnan(result)
1186
1195
 
1187
1196
 
1197
+ @pytest.fixture(scope="session")
1198
+ def wavecal_input_wavelength() -> u.Quantity:
1199
+ return np.arange(100) * u.nm
1200
+
1201
+
1202
+ @pytest.fixture(scope="session")
1203
+ def wavecal_input_spectrum(wavecal_input_wavelength) -> np.ndarray:
1204
+ return (wavecal_input_wavelength.value - wavecal_input_wavelength.size // 2) ** 2 + 10.0
1205
+
1206
+
1207
+ @pytest.fixture(scope="session")
1208
+ def wavecal_weights(wavecal_input_wavelength) -> np.ndarray:
1209
+ return np.arange(wavecal_input_wavelength.size)
1210
+
1211
+
1212
+ @pytest.fixture(scope="session")
1213
+ def wavecal_fit_result(wavecal_input_wavelength) -> FitResult:
1214
+ wavelength_params = WavelengthParameters(
1215
+ crpix=1, crval=10.0, dispersion=1, grating_constant=1, order=1, incident_light_angle=0
1216
+ )
1217
+ minimizer_result = MinimizerResult(residual=np.random.random(wavecal_input_wavelength.size))
1218
+ return FitResult(wavelength_parameters=wavelength_params, minimizer_result=minimizer_result)
1219
+
1220
+
1221
+ @pytest.mark.parametrize(
1222
+ "use_weights",
1223
+ [pytest.param(True, id="custom_weights"), pytest.param(False, id="default_weights")],
1224
+ )
1225
+ def test_wavecal_store_results(
1226
+ quality_task,
1227
+ wavecal_input_wavelength,
1228
+ wavecal_input_spectrum,
1229
+ wavecal_fit_result,
1230
+ wavecal_weights,
1231
+ use_weights,
1232
+ ):
1233
+ """
1234
+ Given: A task with the QualityMixin and the results of a wavecal fit
1235
+ When: Storing the wavecal metric
1236
+ Then: The correct metric json files are written and their contents contain the correct types of data
1237
+ """
1238
+ quality_task.quality_store_wavecal_results(
1239
+ input_wavelength=wavecal_input_wavelength,
1240
+ input_spectrum=wavecal_input_spectrum,
1241
+ fit_result=wavecal_fit_result,
1242
+ weights=wavecal_weights if use_weights else None,
1243
+ )
1244
+
1245
+ wavecal_quality_files = list(quality_task.read(tags=[Tag.quality(MetricCode.wavecal_fit)]))
1246
+ assert len(wavecal_quality_files) == 1
1247
+ with open(wavecal_quality_files[0], "r") as f:
1248
+ results_dict = json.load(f)
1249
+ assert sorted(results_dict.keys()) == sorted(
1250
+ [
1251
+ "input_wavelength_nm",
1252
+ "input_spectrum",
1253
+ "best_fit_wavelength_nm",
1254
+ "best_fit_atlas",
1255
+ "normalized_residuals",
1256
+ "weights",
1257
+ ]
1258
+ )
1259
+ for k, v in results_dict.items():
1260
+ if k != "weights" or use_weights:
1261
+ assert isinstance(v, list)
1262
+ assert len(v) == len(results_dict["input_wavelength_nm"])
1263
+ if not use_weights:
1264
+ assert results_dict["weights"] is None
1265
+
1266
+
1267
+ @pytest.fixture(
1268
+ scope="session",
1269
+ params=[pytest.param([0, 1.0, 0.8, 0.0], id="weights"), pytest.param(None, id="no_weights")],
1270
+ )
1271
+ def wavecal_data_json(request) -> dict:
1272
+ weights = request.param
1273
+ return {
1274
+ "input_wavelength_nm": [1001.0, 1002.0, 1003.0, 1004.0],
1275
+ "input_spectrum": [1.0, 1.0, 0.5, 1.0],
1276
+ "best_fit_wavelength_nm": [1001.5, 1002.6, 1003.7, 1004.8],
1277
+ "best_fit_atlas": [1.0, 1.0, 0.4, 1.0],
1278
+ "normalized_residuals": [0.0, 0.0, 0.1, 0.0],
1279
+ "weights": weights,
1280
+ }
1281
+
1282
+
1283
+ def test_build_wavecal_results(quality_task, wavecal_data_json):
1284
+ """
1285
+ Given: A task with the QualityMixin
1286
+ When: Building the wavecal results quality metric
1287
+ Then: The correct metric model is returned
1288
+ """
1289
+ weights_included = wavecal_data_json["weights"] is not None
1290
+ quality_task.write(
1291
+ data=wavecal_data_json,
1292
+ tags=[Tag.quality(MetricCode.wavecal_fit)],
1293
+ encoder=json_encoder,
1294
+ allow_nan=False,
1295
+ cls=QualityValueEncoder,
1296
+ )
1297
+ metric = quality_task.quality_build_wavecal_results()
1298
+
1299
+ assert metric["name"] == "Wavelength Calibration Results"
1300
+ assert metric["description"] == (
1301
+ "These plots show the wavelength solution computed based on fits to a Solar FTS atlas. "
1302
+ "The top plot shows the input and best-fit spectra along with the best-fit atlas, which is "
1303
+ "a combination of Solar and Telluric spectra. The bottom plot shows the fir residuals."
1304
+ )
1305
+ assert metric["metric_code"] == MetricCode.wavecal_fit.value
1306
+ assert metric["facet"] is None
1307
+ assert metric["statement"] is None
1308
+ assert metric["plot_data"] is None
1309
+ assert metric["histogram_data"] is None
1310
+ assert metric["table_data"] is None
1311
+ assert metric["modmat_data"] is None
1312
+ assert metric["efficiency_data"] is None
1313
+ assert metric["raincloud_data"] is None
1314
+ assert metric["warnings"] is None
1315
+
1316
+ multi_plot_data = metric["vertical_multi_pane_plot_data"]
1317
+ assert multi_plot_data["match_x_axes"] is True
1318
+ assert multi_plot_data["no_gap"] is True
1319
+ assert (
1320
+ multi_plot_data["top_to_bottom_height_ratios"] == [1.5, 1, 1]
1321
+ if weights_included
1322
+ else [1.5, 1]
1323
+ )
1324
+ plot_list = multi_plot_data["top_to_bottom_plot_list"]
1325
+ assert isinstance(plot_list, list)
1326
+ assert len(plot_list) == 3 if weights_included else 3
1327
+
1328
+ fit_plot = plot_list[0]
1329
+ assert fit_plot["sort_series"] is False
1330
+ assert fit_plot["xlabel"] == "Wavelength [nm]"
1331
+ assert fit_plot["ylabel"] == "Signal"
1332
+ assert fit_plot["series_data"] == {
1333
+ "Input Spectrum": [[1001.0, 1002.0, 1003.0, 1004.0], [1.0, 1.0, 0.5, 1.0]],
1334
+ "Best Fit Observations": [[1001.5, 1002.6, 1003.7, 1004.8], [1.0, 1.0, 0.5, 1.0]],
1335
+ "Best Fit Atlas": [[1001.5, 1002.6, 1003.7, 1004.8], [1.0, 1.0, 0.4, 1.0]],
1336
+ }
1337
+ assert fit_plot["plot_kwargs"] == {
1338
+ "Input Spectrum": {"ls": "-", "alpha": 0.4, "ms": 0},
1339
+ "Best Fit Observations": {"ls": "-", "lw": 4, "alpha": 0.8, "ms": 0},
1340
+ "Best Fit Atlas": {"color": "k", "ls": "-", "ms": 0},
1341
+ }
1342
+
1343
+ residuals_plot = plot_list[1]
1344
+ assert residuals_plot["xlabel"] == "Wavelength [nm]"
1345
+ assert residuals_plot["ylabel"] == r"$\frac{\mathrm{Obs - Atlas}}{\mathrm{Obs}}$"
1346
+ assert residuals_plot["series_data"] == {
1347
+ "Residuals": [[1001.5, 1002.6, 1003.7, 1004.8], [0.0, 0.0, 0.1, 0.0]]
1348
+ }
1349
+ assert residuals_plot["plot_kwargs"] == {"Residuals": {"ls": "-", "color": "k", "ms": 0}}
1350
+
1351
+ if weights_included:
1352
+ weights_plot = plot_list[2]
1353
+ assert weights_plot["xlabel"] == "Wavelength [nm]"
1354
+ assert weights_plot["ylabel"] == "Fit Weights"
1355
+ assert weights_plot["series_data"] == {
1356
+ "Weights": [[1001.5, 1002.6, 1003.7, 1004.8], [0.0, 1.0, 0.8, 0.0]]
1357
+ }
1358
+ assert weights_plot["plot_kwargs"] == {"Weights": {"ls": "-", "color": "k", "ms": 0}}
1359
+
1360
+
1188
1361
  @pytest.mark.parametrize(
1189
1362
  "bin_strs, sampled_bins, expected_bin_str, expected_sample_str",
1190
1363
  [
@@ -1255,3 +1428,33 @@ def test_format_facet(label: str | Any, expected_result: str):
1255
1428
  Then: the label is properly formatted
1256
1429
  """
1257
1430
  assert QualityMixin._format_facet(label) == expected_result
1431
+
1432
+
1433
+ def test_validate_vertical_multi_pane_plot_model():
1434
+ """
1435
+ Given: A `VerticalMultiPanePlot2D` model and some `Plot2D` models
1436
+ When: Instantiating the `VerticalMultiPanePlot2D` with various parameters
1437
+ Then: The `top_to_bottom_plot_ratios` property is correctly populated
1438
+ """
1439
+ plot2d = Plot2D(xlabel="X", ylabel="Y", series_data={"Foo": [[1.0], [2.0]]})
1440
+
1441
+ # Test given ratios valid case
1442
+ _ = VerticalMultiPanePlot2D(
1443
+ top_to_bottom_plot_list=[plot2d, plot2d], top_to_bottom_height_ratios=[1.0, 2.0]
1444
+ )
1445
+
1446
+ # Test None ratios
1447
+ vertical_plots = VerticalMultiPanePlot2D(
1448
+ top_to_bottom_plot_list=[plot2d, plot2d], top_to_bottom_height_ratios=None
1449
+ )
1450
+ assert vertical_plots.top_to_bottom_height_ratios == [1.0, 1.0]
1451
+
1452
+ # Test invalid case
1453
+ with pytest.raises(
1454
+ ValidationError,
1455
+ match="The number of items in `top_to_bottom_height_ratios` list \(3\) is not "
1456
+ "the same as the number of plots \(2\)",
1457
+ ):
1458
+ _ = VerticalMultiPanePlot2D(
1459
+ top_to_bottom_plot_list=[plot2d, plot2d], top_to_bottom_height_ratios=[1.0, 2.0, 3.0]
1460
+ )
@@ -32,6 +32,7 @@ from dkist_processing_common.parsers.time import ObsIpStartTimeBud
32
32
  from dkist_processing_common.parsers.time import ReadoutExpTimeFlower
33
33
  from dkist_processing_common.parsers.time import TaskExposureTimesBud
34
34
  from dkist_processing_common.parsers.time import TaskReadoutExpTimesBud
35
+ from dkist_processing_common.parsers.time import TaskTimeBudBase
35
36
  from dkist_processing_common.parsers.time import VarianceCadenceBud
36
37
  from dkist_processing_common.parsers.unique_bud import TaskUniqueBud
37
38
  from dkist_processing_common.parsers.unique_bud import UniqueBud
@@ -61,6 +62,7 @@ class FitsReader(FitsAccessBase):
61
62
  self.gos_retarder_status: str = self.header.get("GOSRET")
62
63
  self.gos_polarizer_status: str = self.header.get("GOSPOL")
63
64
  self.wavelength: str = self.header.get("LINEWAV")
65
+ self.roundable_time: float = self.header.get("RTIME", 0.0)
64
66
 
65
67
 
66
68
  @pytest.fixture()
@@ -120,6 +122,7 @@ def basic_header_objs():
120
122
  "DKIST011": "1903-01-01T12:00.000",
121
123
  "LINEWAV": 0.0,
122
124
  "GOSRET": "wrong",
125
+ "RTIME": 2.3400000009999,
123
126
  }
124
127
  ),
125
128
  "thing3": fits.header.Header(
@@ -139,6 +142,26 @@ def basic_header_objs():
139
142
  "DKIST011": "2023-09-28T10:23.000",
140
143
  "LINEWAV": 666.0,
141
144
  "GOSRET": "clear",
145
+ },
146
+ ),
147
+ "thing4": fits.header.Header(
148
+ {
149
+ "DKIST004": "gain",
150
+ "ID___013": "proposal_id_1",
151
+ "id_key": 0,
152
+ "constant": 6.28,
153
+ "near": 1.23,
154
+ "ID___012": "experiment_id_1",
155
+ "XPOSURE": 100.0,
156
+ "TEXPOSUR": 11.0,
157
+ "NSUMEXP": 4,
158
+ "DSPSNUM": 2,
159
+ "DSPSREPS": 2,
160
+ "DATE-OBS": "2022-06-17T22:00:03.000",
161
+ "DKIST011": "2023-09-28T10:23.000",
162
+ "LINEWAV": 666.0,
163
+ "GOSRET": "clear",
164
+ "RTIME": 2.340000004444,
142
165
  }
143
166
  ),
144
167
  }
@@ -203,6 +226,30 @@ def bad_header_objs():
203
226
  "LINEWAV": 1.0,
204
227
  }
205
228
  ),
229
+ "thing2": fits.header.Header(
230
+ {
231
+ "id_key": 1,
232
+ "constant": 2.78,
233
+ "near": 1.76,
234
+ "DKIST004": "gain",
235
+ "DSPSREPS": 2,
236
+ "DSPSNUM": 2,
237
+ "DATE-OBS": "2022-06-17T22:00:03.000",
238
+ "LINEWAV": 1.0,
239
+ }
240
+ ),
241
+ "thing4": fits.header.Header(
242
+ {
243
+ "id_key": 1,
244
+ "constant": 6.66,
245
+ "near": 1.76,
246
+ "DKIST004": "dark",
247
+ "DSPSREPS": 2,
248
+ "DSPSNUM": 2,
249
+ "DATE-OBS": "2022-06-17T22:00:03.000",
250
+ "LINEWAV": 1.0,
251
+ }
252
+ ),
206
253
  }
207
254
  return (FitsReader.from_header(header, name=path) for path, header in bad_headers.items())
208
255
 
@@ -257,14 +304,21 @@ def test_unique_bud_non_unique_inputs(bad_header_objs):
257
304
  assert next(bud.petals)
258
305
 
259
306
 
260
- def test_task_unique_bud(basic_header_objs):
307
+ @pytest.mark.parametrize(
308
+ "ip_task_type",
309
+ [
310
+ pytest.param("observe", id="single_task_type"),
311
+ pytest.param(["observe", "gain"], id="task_type_list"),
312
+ ],
313
+ )
314
+ def test_task_unique_bud(basic_header_objs, ip_task_type):
261
315
  """
262
316
  Given: A set of headers with a constant value header key
263
317
  When: Ingesting headers with a TaskUniqueBud and asking for the value
264
318
  Then: The bud's value is the header constant value
265
319
  """
266
320
  bud = TaskUniqueBud(
267
- constant_name="proposal", metadata_key="proposal_id", ip_task_type="observe"
321
+ constant_name="proposal", metadata_key="proposal_id", ip_task_types=ip_task_type
268
322
  )
269
323
  assert bud.stem_name == "proposal"
270
324
  for fo in basic_header_objs:
@@ -276,14 +330,21 @@ def test_task_unique_bud(basic_header_objs):
276
330
  assert petal[0].value == "proposal_id_1"
277
331
 
278
332
 
279
- def test_task_unique_bud_non_unique_inputs(bad_header_objs):
333
+ @pytest.mark.parametrize(
334
+ "ip_task_type",
335
+ [
336
+ pytest.param("observe", id="single_task_type"),
337
+ pytest.param(["dark", "gain"], id="task_type_list"),
338
+ ],
339
+ )
340
+ def test_task_unique_bud_non_unique_inputs(bad_header_objs, ip_task_type):
280
341
  """
281
342
  Given: A set of headers with a non-constant header key that is expected to be constant
282
343
  When: Ingesting headers with a UniqueBud and asking for the value
283
344
  Then: An error is raised
284
345
  """
285
346
  bud = TaskUniqueBud(
286
- constant_name="constant", metadata_key="constant_thing", ip_task_type="observe"
347
+ constant_name="constant", metadata_key="constant_thing", ip_task_types=ip_task_type
287
348
  )
288
349
  assert bud.stem_name == "constant"
289
350
  for fo in bad_header_objs:
@@ -291,7 +352,7 @@ def test_task_unique_bud_non_unique_inputs(bad_header_objs):
291
352
  bud.update(key, fo)
292
353
 
293
354
  with pytest.raises(ValueError):
294
- assert next(bud.petals)
355
+ list(bud.petals)
295
356
 
296
357
 
297
358
  def test_single_value_single_key_flower(basic_header_objs):
@@ -309,13 +370,40 @@ def test_single_value_single_key_flower(basic_header_objs):
309
370
  petals = sorted(list(flower.petals), key=lambda x: x.value)
310
371
  assert len(petals) == 3
311
372
  assert petals[0].value == 0
312
- assert petals[0].keys == ["thing0", "thing3"]
373
+ assert petals[0].keys == ["thing0", "thing3", "thing4"]
313
374
  assert petals[1].value == 1
314
375
  assert petals[1].keys == ["thing1"]
315
376
  assert petals[2].value == 2
316
377
  assert petals[2].keys == ["thing2"]
317
378
 
318
379
 
380
+ @pytest.mark.parametrize(
381
+ "ip_task_type, expected_value",
382
+ [
383
+ pytest.param("dark", (2.34,), id="single_task_type"),
384
+ pytest.param(["dark", "gain"], (2.34,), id="task_type_list"),
385
+ pytest.param(["dark", "gain", "observe"], (0.0, 2.34), id="task_type_list2"),
386
+ ],
387
+ )
388
+ def test_task_time_base_bud(basic_header_objs, ip_task_type, expected_value):
389
+ """
390
+ Given: A set of headers with a value that needs to be rounded
391
+ When: Ingesting headers with a `TaskTimeBudBase` bud and asking for the value
392
+ Then: The bud's value is the header constant value
393
+ """
394
+ bud = TaskTimeBudBase(
395
+ stem_name="rounded_time", metadata_key="roundable_time", ip_task_types=ip_task_type
396
+ )
397
+ assert bud.stem_name == "rounded_time"
398
+ for fo in basic_header_objs:
399
+ key = fo.name
400
+ bud.update(key, fo)
401
+
402
+ petal = list(bud.petals)
403
+ assert len(petal) == 1
404
+ assert petal[0].value == expected_value
405
+
406
+
319
407
  def test_cs_step_flower(grouped_cal_sequence_headers, non_polcal_headers, max_cs_step_time_sec):
320
408
  """
321
409
  Given: A set of PolCal headers, non-PolCal headers, and the CSStepFlower
@@ -444,7 +532,7 @@ def test_exp_time_flower(basic_header_objs):
444
532
  assert petals[1].value == 12.345
445
533
  assert petals[1].keys == ["thing2"]
446
534
  assert petals[2].value == 100.0
447
- assert petals[2].keys == ["thing3"]
535
+ assert petals[2].keys == ["thing3", "thing4"]
448
536
 
449
537
 
450
538
  def test_readout_exp_time_flower(basic_header_objs):
@@ -466,7 +554,7 @@ def test_readout_exp_time_flower(basic_header_objs):
466
554
  assert petals[1].value == 10.0
467
555
  assert petals[1].keys == ["thing0", "thing1"]
468
556
  assert petals[2].value == 11.0
469
- assert petals[2].keys == ["thing3"]
557
+ assert petals[2].keys == ["thing3", "thing4"]
470
558
 
471
559
 
472
560
  def test_task_type_flower(task_with_gains_header_objs):
@@ -534,8 +622,8 @@ def test_fpa_exp_times_bud(basic_header_objs):
534
622
  When: Ingesting with a TaskExposureTimesBud
535
623
  Then: All (rounded) exposure times are accounted for in the resulting tuple
536
624
  """
537
- dark_bud = TaskExposureTimesBud(stem_name=BudName.dark_exposure_times, ip_task_type="DARK")
538
- obs_bud = TaskExposureTimesBud(stem_name="obs_exp_times", ip_task_type="OBSERVE")
625
+ dark_bud = TaskExposureTimesBud(stem_name=BudName.dark_exposure_times, ip_task_types="DARK")
626
+ obs_bud = TaskExposureTimesBud(stem_name="obs_exp_times", ip_task_types="OBSERVE")
539
627
  assert dark_bud.stem_name == BudName.dark_exposure_times.value
540
628
  for fo in basic_header_objs:
541
629
  key = fo.name
@@ -559,8 +647,8 @@ def test_readout_exp_times_bud(basic_header_objs):
559
647
  When: Ingesting with a TaskReadoutExpTimesBud
560
648
  Then: All (rounded) exposure times are accounted for in the resulting tuple
561
649
  """
562
- dark_bud = TaskReadoutExpTimesBud(stem_name=BudName.dark_exposure_times, ip_task_type="DARK")
563
- obs_bud = TaskReadoutExpTimesBud(stem_name="obs_exp_times", ip_task_type="OBSERVE")
650
+ dark_bud = TaskReadoutExpTimesBud(stem_name=BudName.dark_exposure_times, ip_task_types="DARK")
651
+ obs_bud = TaskReadoutExpTimesBud(stem_name="obs_exp_times", ip_task_types="OBSERVE")
564
652
  assert dark_bud.stem_name == BudName.dark_exposure_times.value
565
653
  for fo in basic_header_objs:
566
654
  key = fo.name
@@ -731,7 +819,7 @@ def test_task_near_bud(basic_header_objs):
731
819
  Then: The bud's value is the average of the header values of that task type
732
820
  """
733
821
  bud = TaskNearFloatBud(
734
- constant_name="near", metadata_key="near_thing", ip_task_type="observe", tolerance=0.5
822
+ constant_name="near", metadata_key="near_thing", ip_task_types="observe", tolerance=0.5
735
823
  )
736
824
  assert bud.stem_name == "near"
737
825
  for fo in basic_header_objs:
@@ -743,6 +831,46 @@ def test_task_near_bud(basic_header_objs):
743
831
  assert round(petal[0].value, 3) == 1.227
744
832
 
745
833
 
834
+ def test_multi_task_near_bud():
835
+ """
836
+ Given: A set of headers where multiple, but not all, task types have the same values
837
+ When: Ingesting the headers with a `TaskNearBud`
838
+ Then: When multiple tasks have the same value the correct value is returned. When a task has a different value, an
839
+ Error is raised.
840
+ """
841
+ header_dicts = [
842
+ {"DKIST004": "observe", "near": 3.2},
843
+ {"DKIST004": "dark", "near": 3.11},
844
+ {"DKIST004": "solar", "near": 1e3},
845
+ ]
846
+ header_objs = [FitsReader.from_header(h, f"{i}") for i, h in enumerate(header_dicts)]
847
+
848
+ bud = TaskNearFloatBud(
849
+ constant_name="near",
850
+ metadata_key="near_thing",
851
+ ip_task_types=["observe", "dark"],
852
+ tolerance=0.1,
853
+ )
854
+ for fo in header_objs:
855
+ bud.update(fo.name, fo)
856
+
857
+ petal = list(bud.petals)
858
+ assert len(petal) == 1
859
+ assert round(petal[0].value, 0) == 3.0
860
+
861
+ bad_bud = TaskNearFloatBud(
862
+ constant_name="near",
863
+ metadata_key="near_thing",
864
+ ip_task_types=["observe", "solar"],
865
+ tolerance=0.1,
866
+ )
867
+ for fo in header_objs:
868
+ bad_bud.update(fo.name, fo)
869
+
870
+ with pytest.raises(ValueError, match="near values are not close enough"):
871
+ list(bad_bud.petals)
872
+
873
+
746
874
  def test_near_bud_not_near_inputs(bad_header_objs):
747
875
  """
748
876
  Given: A set of headers with a header key that is expected to be in a given range but is not
@@ -657,18 +657,6 @@ def test_spectral_line_keys(write_l1_task, mocker):
657
657
  assert header["SPECLN03"]
658
658
 
659
659
 
660
- @pytest.mark.flaky(max_reruns=10)
661
- def test_location_of_dkist(write_l1_task):
662
- """
663
- Given: function for retrieving the dkist location on earth
664
- When: Call function
665
- Then: result is the same as what is in the astropy online database
666
- """
667
- task, _, _ = write_l1_task
668
- itrs = task.location_of_dkist
669
- assert itrs == EarthLocation.of_site("dkist")
670
-
671
-
672
660
  def test_check_r0_ao_lock(write_l1_task_no_data):
673
661
  """
674
662
  :Given: a header
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dkist-processing-common
3
- Version: 10.9.0
3
+ Version: 11.0.0rc1
4
4
  Summary: Common task classes used by the DKIST science data processing pipelines
5
5
  Author-email: NSO / AURA <dkistdc@nso.edu>
6
6
  License: BSD-3-Clause
@@ -21,6 +21,7 @@ Requires-Dist: dkist-processing-core==5.1.1
21
21
  Requires-Dist: dkist-processing-pac<4.0,>=3.1
22
22
  Requires-Dist: dkist-service-configuration<3.0,>=2.0.2
23
23
  Requires-Dist: dkist-spectral-lines<4.0,>=3.0.0
24
+ Requires-Dist: solar-wavelength-calibration<2.0,>=1.0
24
25
  Requires-Dist: globus-sdk>=3.12.0
25
26
  Requires-Dist: gqlclient[pydantic]==1.2.3
26
27
  Requires-Dist: sqids==0.5.1
@@ -60,7 +61,7 @@ Requires-Dist: dkist-inventory<2.0,>=1.6.0; extra == "inventory"
60
61
  Provides-Extra: asdf
61
62
  Requires-Dist: dkist-inventory[asdf]<2.0,>=1.6.0; extra == "asdf"
62
63
  Provides-Extra: quality
63
- Requires-Dist: dkist-quality<2.0,>=1.2.1; extra == "quality"
64
+ Requires-Dist: dkist-quality<2.0,>=1.3.0rc1; extra == "quality"
64
65
 
65
66
  dkist-processing-common
66
67
  =======================