dkist-processing-test 1.21.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dkist-processing-test might be problematic. Click here for more details.

Files changed (34) hide show
  1. dkist_processing_test/__init__.py +11 -0
  2. dkist_processing_test/config.py +10 -0
  3. dkist_processing_test/models/__init__.py +0 -0
  4. dkist_processing_test/models/constants.py +18 -0
  5. dkist_processing_test/models/parameters.py +35 -0
  6. dkist_processing_test/tasks/__init__.py +10 -0
  7. dkist_processing_test/tasks/exercise_numba.py +42 -0
  8. dkist_processing_test/tasks/fail.py +11 -0
  9. dkist_processing_test/tasks/fake_science.py +101 -0
  10. dkist_processing_test/tasks/high_memory.py +20 -0
  11. dkist_processing_test/tasks/manual.py +26 -0
  12. dkist_processing_test/tasks/movie.py +53 -0
  13. dkist_processing_test/tasks/noop.py +15 -0
  14. dkist_processing_test/tasks/parse.py +88 -0
  15. dkist_processing_test/tasks/quality.py +30 -0
  16. dkist_processing_test/tasks/trial_output_data.py +46 -0
  17. dkist_processing_test/tasks/write_l1.py +64 -0
  18. dkist_processing_test/tests/__init__.py +0 -0
  19. dkist_processing_test/tests/conftest.py +64 -0
  20. dkist_processing_test/tests/test_parameters.py +103 -0
  21. dkist_processing_test/tests/test_tasks.py +724 -0
  22. dkist_processing_test/tests/test_workflows.py +9 -0
  23. dkist_processing_test/workflows/__init__.py +1 -0
  24. dkist_processing_test/workflows/common_tasks.py +231 -0
  25. dkist_processing_test/workflows/end_to_end.py +58 -0
  26. dkist_processing_test/workflows/exercise_numba.py +13 -0
  27. dkist_processing_test/workflows/fail.py +16 -0
  28. dkist_processing_test/workflows/noop.py +28 -0
  29. dkist_processing_test/workflows/resource_queue.py +19 -0
  30. dkist_processing_test/workflows/trial_end_to_end.py +64 -0
  31. dkist_processing_test-1.21.3.dist-info/METADATA +79 -0
  32. dkist_processing_test-1.21.3.dist-info/RECORD +34 -0
  33. dkist_processing_test-1.21.3.dist-info/WHEEL +5 -0
  34. dkist_processing_test-1.21.3.dist-info/top_level.txt +1 -0
@@ -0,0 +1,724 @@
1
+ """
2
+ Tests for the tasks defined in this repo
3
+ """
4
+ import json
5
+ from dataclasses import asdict
6
+ from dataclasses import dataclass
7
+ from dataclasses import is_dataclass
8
+ from datetime import datetime
9
+ from random import randint
10
+ from typing import Type
11
+ from uuid import uuid4
12
+
13
+ import numpy as np
14
+ import pytest
15
+ from astropy.io import fits
16
+ from dkist_data_simulator.spec122 import Spec122Dataset
17
+ from dkist_header_validator import spec122_validator
18
+ from dkist_processing_common._util.scratch import WorkflowFileSystem
19
+ from dkist_processing_common.codecs.fits import fits_hdu_decoder
20
+ from dkist_processing_common.codecs.fits import fits_hdulist_encoder
21
+ from dkist_processing_common.models.constants import BudName
22
+ from dkist_processing_common.models.constants import ConstantsBase
23
+ from dkist_processing_common.models.tags import Tag
24
+ from dkist_processing_common.models.task_name import TaskName
25
+ from dkist_processing_common.tasks import QualityL0Metrics
26
+ from dkist_processing_common.tests.conftest import FakeGQLClient
27
+ from dkist_service_configuration.logging import logger
28
+
29
+ from dkist_processing_test.models.parameters import TestParameters
30
+ from dkist_processing_test.tasks import TestQualityL0Metrics
31
+ from dkist_processing_test.tasks.exercise_numba import ExerciseNumba
32
+ from dkist_processing_test.tasks.fail import FailTask
33
+ from dkist_processing_test.tasks.fake_science import GenerateCalibratedData
34
+ from dkist_processing_test.tasks.movie import AssembleTestMovie
35
+ from dkist_processing_test.tasks.movie import MakeTestMovieFrames
36
+ from dkist_processing_test.tasks.noop import NoOpTask
37
+ from dkist_processing_test.tasks.trial_output_data import TransferTestTrialData
38
+ from dkist_processing_test.tasks.write_l1 import WriteL1Data
39
+ from dkist_processing_test.tests.conftest import generate_214_l0_fits_frame
40
+ from dkist_processing_test.tests.conftest import S122Headers
41
+
42
+
43
+ @dataclass
44
+ class FakeConstantDb:
45
+ NUM_DSPS_REPEATS: int = 2
46
+ OBS_IP_START_TIME: str = "2024-06-12T12:00:00"
47
+ INSTRUMENT: str = "TEST"
48
+ AVERAGE_CADENCE: float = 10.0
49
+ MINIMUM_CADENCE: float = 10.0
50
+ MAXIMUM_CADENCE: float = 10.0
51
+ VARIANCE_CADENCE: float = 0.0
52
+ STOKES_PARAMS: tuple[str] = (
53
+ "I",
54
+ "Q",
55
+ "U",
56
+ "V",
57
+ ) # A tuple because lists aren't allowed on dataclasses
58
+ CONTRIBUTING_PROPOSAL_IDS: tuple[str] = ("abc", "def")
59
+ CONTRIBUTING_EXPERIMENT_IDS: tuple[str] = ("ghi", "jkl")
60
+
61
+
62
+ @pytest.fixture()
63
+ def noop_task():
64
+ return NoOpTask(recipe_run_id=1, workflow_name="noop", workflow_version="VX.Y")
65
+
66
+
67
+ def test_noop_task(noop_task):
68
+ """
69
+ Given: A NoOpTask
70
+ When: Calling the task instance
71
+ Then: No errors raised
72
+ """
73
+ noop_task()
74
+
75
+
76
+ @pytest.fixture()
77
+ def fail_task():
78
+ return FailTask(recipe_run_id=1, workflow_name="fail", workflow_version="VX.Y")
79
+
80
+
81
+ def test_fail_task(fail_task):
82
+ """
83
+ Given: A FailTask
84
+ When: Calling the task instance
85
+ Then: Runtime Error raised
86
+ """
87
+ with pytest.raises(RuntimeError):
88
+ fail_task()
89
+
90
+
91
+ @pytest.fixture()
92
+ def generate_calibrated_data_task(
93
+ tmp_path,
94
+ recipe_run_id,
95
+ assign_input_dataset_doc_to_task,
96
+ link_constants_db,
97
+ parameter_file_object_key,
98
+ random_parameter_hdulist,
99
+ ):
100
+ number_of_frames = 10
101
+ link_constants_db(
102
+ recipe_run_id=recipe_run_id, constants_obj=FakeConstantDb(NUM_DSPS_REPEATS=number_of_frames)
103
+ )
104
+ with GenerateCalibratedData(
105
+ recipe_run_id=recipe_run_id, workflow_name="GenerateCalibratedData", workflow_version="VX.Y"
106
+ ) as task:
107
+ # configure input data
108
+ task.scratch = WorkflowFileSystem(scratch_base_path=tmp_path, recipe_run_id=recipe_run_id)
109
+ input_frame_set = Spec122Dataset(
110
+ instrument="vbi",
111
+ dataset_shape=(number_of_frames, 512, 512),
112
+ array_shape=(1, 512, 512),
113
+ time_delta=10,
114
+ )
115
+ # load input data
116
+ for idx, input_frame in enumerate(input_frame_set):
117
+ hdu = input_frame.hdu()
118
+ hdu.data = (
119
+ np.ones(hdu.data.shape, dtype=int) * 10
120
+ ) # Because input data will be ints in test system
121
+ hdu.header["DSPSNUM"] = 1
122
+ hdul = fits.HDUList([hdu])
123
+ file_name = f"input_{idx}.fits"
124
+ task.write(
125
+ data=hdul, tags=Tag.input(), relative_path=file_name, encoder=fits_hdulist_encoder
126
+ )
127
+
128
+ # Write parameter file
129
+ hdul = random_parameter_hdulist[0]
130
+ task.write(
131
+ data=hdul, tags=Tag.parameter(parameter_file_object_key), encoder=fits_hdulist_encoder
132
+ )
133
+
134
+ # This needs to be after we've tagged the parameter file
135
+ assign_input_dataset_doc_to_task(task, obs_ip_start_time=task.constants.obs_ip_start_time)
136
+
137
+ # result
138
+ yield task, number_of_frames
139
+ # teardown
140
+ task._purge()
141
+ # disconnect
142
+
143
+
144
+ @pytest.fixture(scope="session")
145
+ def input_dataset_document_parameters_part_with_file(parameter_file_object_key):
146
+ param_name = "test_random_data"
147
+ value = {
148
+ "__file__": {"bucket": "foo", "objectKey": parameter_file_object_key},
149
+ }
150
+ value_id = randint(1000, 2000)
151
+ values = [
152
+ {
153
+ "parameterValueId": value_id,
154
+ "parameterValue": json.dumps(value),
155
+ "parameterValueStartDate": "1946-11-20",
156
+ }
157
+ ]
158
+ parameter = {"parameterName": param_name, "parameterValues": values}
159
+ parameters_list = [parameter]
160
+
161
+ return parameters_list
162
+
163
+
164
+ @pytest.fixture(scope="session")
165
+ def input_dataset_document_parameters_part_with_wavelength():
166
+ param_name = "test_wavelength_category"
167
+ value = {"wavelength": [1.0, 2.0, 3.0], "values": ["one", "two", "three"]}
168
+ value_id = randint(2000, 3000)
169
+ values = [
170
+ {
171
+ "parameterValueId": value_id,
172
+ "parameterValue": json.dumps(value),
173
+ "parameterValueStartDate": "1946-11-20",
174
+ }
175
+ ]
176
+ parameter = {"parameterName": param_name, "parameterValues": values}
177
+ parameter_list = [parameter]
178
+ return parameter_list
179
+
180
+
181
+ @pytest.fixture(scope="session")
182
+ def assign_input_dataset_doc_to_task(
183
+ input_dataset_document_parameters_part_with_file,
184
+ input_dataset_document_parameters_part_with_wavelength,
185
+ ):
186
+ def update_task(task, obs_ip_start_time=None):
187
+ doc_path = task.scratch.workflow_base_path / "dataset_parameters.json"
188
+ full_parameters = (
189
+ input_dataset_document_parameters_part_with_file
190
+ + input_dataset_document_parameters_part_with_wavelength
191
+ )
192
+ with open(doc_path, "w") as f:
193
+ f.write(json.dumps(full_parameters))
194
+ task.tag(doc_path, Tag.input_dataset_parameters())
195
+ task.parameters = TestParameters(
196
+ task.input_dataset_parameters, wavelength=2.0, obs_ip_start_time=obs_ip_start_time
197
+ )
198
+
199
+ return update_task
200
+
201
+
202
+ @pytest.fixture
203
+ def link_constants_db():
204
+ return constants_linker
205
+
206
+
207
+ def constants_linker(recipe_run_id: int, constants_obj):
208
+ """Take a dataclass (or dict) containing a constants DB and link it to a specific recipe run id."""
209
+ if is_dataclass(constants_obj):
210
+ constants_obj = asdict(constants_obj)
211
+ constants = ConstantsBase(recipe_run_id=recipe_run_id, task_name="test")
212
+ constants._purge()
213
+ constants._update(constants_obj)
214
+ return
215
+
216
+
217
+ def test_generate_calibrated_data(generate_calibrated_data_task, mocker):
218
+ """
219
+ Given: A GenerateCalibratedData task
220
+ When: Calling the task instance
221
+ Then: Output files are generated for each input file with appropriate tags
222
+ """
223
+ mocker.patch(
224
+ "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=FakeGQLClient
225
+ )
226
+ task, number_of_frames = generate_calibrated_data_task
227
+ task()
228
+ # Then
229
+ calibrated_frame_hdus = list(
230
+ task.read(tags=[Tag.calibrated(), Tag.frame()], decoder=fits_hdu_decoder)
231
+ )
232
+
233
+ # Verify frames
234
+ assert len(calibrated_frame_hdus) == number_of_frames
235
+ for hdu in calibrated_frame_hdus:
236
+ assert "VBINMOSC" in hdu.header
237
+ assert "VBICMOSC" in hdu.header
238
+
239
+ # Verify debug frame was written
240
+ debug_frame_paths = list(task.read(tags=[Tag.debug(), Tag.frame()]))
241
+ assert len(debug_frame_paths) == 1
242
+ assert debug_frame_paths[0].exists()
243
+
244
+
245
+ class CommonDataset(Spec122Dataset):
246
+ # NOTE: We use ViSP data for unit tests because ViSP can be polarimetric
247
+ # **BUT** in actual integration tests `*-procesing-test` processes VBI data
248
+ def __init__(self):
249
+ super().__init__(
250
+ array_shape=(1, 10, 10),
251
+ time_delta=1,
252
+ dataset_shape=(2, 10, 10),
253
+ instrument="visp",
254
+ start_time=datetime(2020, 1, 1, 0, 0, 0),
255
+ )
256
+
257
+ self.add_constant_key("TELEVATN", 6.28)
258
+ self.add_constant_key("TAZIMUTH", 3.14)
259
+ self.add_constant_key("TTBLANGL", 1.23)
260
+ self.add_constant_key("INST_FOO", "bar")
261
+ self.add_constant_key("DKIST004", "observe")
262
+ self.add_constant_key("ID___005", "ip id")
263
+ self.add_constant_key("PAC__004", "Sapphire Polarizer")
264
+ self.add_constant_key("PAC__005", "31.2")
265
+ self.add_constant_key("PAC__006", "clear")
266
+ self.add_constant_key("PAC__007", "6.66")
267
+ self.add_constant_key("PAC__008", "DarkShutter")
268
+ self.add_constant_key("INSTRUME", "VISP")
269
+ self.add_constant_key("WAVELNTH", 1080.0)
270
+ self.add_constant_key("DATE-OBS", "2020-01-02T00:00:00.000")
271
+ self.add_constant_key("DATE-END", "2020-01-03T00:00:00.000")
272
+ self.add_constant_key("ID___013", "PROPOSAL_ID1")
273
+ self.add_constant_key("PAC__002", "clear")
274
+ self.add_constant_key("PAC__003", "on")
275
+ self.add_constant_key("TELSCAN", "Raster")
276
+ self.add_constant_key("DKIST008", 1)
277
+ self.add_constant_key("DKIST009", 1)
278
+ self.add_constant_key("BZERO", 0)
279
+ self.add_constant_key("BSCALE", 1)
280
+
281
+ # Because these test data are from "ViSP" we need to add these keys,
282
+ # which would normally be added by the `*-processing-visp` science task (although they are not
283
+ # added by the `*-processing-test` science task because Test calibrates VBI data in integration tests
284
+ self.add_constant_key("VSPMAP", 1)
285
+ self.add_constant_key("VSPNMAPS", 2)
286
+
287
+
288
+ @pytest.fixture()
289
+ def complete_common_header():
290
+ """
291
+ A header with some common by-frame keywords
292
+ """
293
+ # Taken from dkist-processing-common
294
+ ds = CommonDataset()
295
+ header_list = [
296
+ spec122_validator.validate_and_translate_to_214_l0(d.header(), return_type=fits.HDUList)[
297
+ 0
298
+ ].header
299
+ for d in ds
300
+ ]
301
+
302
+ return header_list[0]
303
+
304
+
305
+ @pytest.fixture(scope="function", params=[1, 4])
306
+ def write_l1_task(complete_common_header, request):
307
+ with WriteL1Data(
308
+ recipe_run_id=randint(0, 99999),
309
+ workflow_name="workflow_name",
310
+ workflow_version="workflow_version",
311
+ ) as task:
312
+ num_of_stokes_params = request.param
313
+ stokes_params = ["I", "Q", "U", "V"]
314
+
315
+ # Make sure polarimetric header validation happens correctly
316
+ if num_of_stokes_params == 4:
317
+ complete_common_header["VSPPOLMD"] = "observe_polarimetric"
318
+ complete_common_header["POL_NOIS"] = 0.1
319
+ complete_common_header["POL_SENS"] = 0.2
320
+ else:
321
+ complete_common_header["VSPPOLMD"] = "observe_intensity"
322
+
323
+ hdu = fits.PrimaryHDU(
324
+ data=np.random.random(size=(1, 128, 128)) * 10, header=complete_common_header
325
+ )
326
+ logger.info(f"{num_of_stokes_params=}")
327
+ hdul = fits.HDUList([hdu])
328
+ for i in range(num_of_stokes_params):
329
+ task.write(
330
+ data=hdul,
331
+ tags=[Tag.calibrated(), Tag.frame(), Tag.stokes(stokes_params[i])],
332
+ encoder=fits_hdulist_encoder,
333
+ )
334
+ task.constants._update(
335
+ asdict(
336
+ FakeConstantDb(
337
+ AVERAGE_CADENCE=10,
338
+ MINIMUM_CADENCE=10,
339
+ MAXIMUM_CADENCE=10,
340
+ VARIANCE_CADENCE=0,
341
+ INSTRUMENT="TEST",
342
+ )
343
+ )
344
+ )
345
+ yield task, num_of_stokes_params
346
+ task._purge()
347
+
348
+
349
+ def test_write_l1_task(write_l1_task, mocker):
350
+ """
351
+ :Given: a write L1 task
352
+ :When: running the task
353
+ :Then: no errors are raised
354
+ """
355
+ mocker.patch(
356
+ "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=FakeGQLClient
357
+ )
358
+ task, num_of_stokes_params = write_l1_task
359
+ task()
360
+ files = list(task.read(tags=[Tag.frame(), Tag.output()]))
361
+ logger.info(f"{files=}")
362
+ assert len(files) == num_of_stokes_params
363
+ for file in files:
364
+ logger.info(f"Checking file {file}")
365
+ assert file.exists
366
+
367
+
368
+ class BaseSpec214l0Dataset(Spec122Dataset):
369
+ def __init__(self, num_tasks: int, instrument: str = "vbi"):
370
+ super().__init__(
371
+ dataset_shape=(num_tasks, 4, 4),
372
+ array_shape=(1, 4, 4),
373
+ time_delta=1,
374
+ instrument=instrument,
375
+ file_schema="level0_spec214",
376
+ )
377
+
378
+ @property
379
+ def data(self):
380
+ return np.ones(shape=self.array_shape)
381
+
382
+
383
+ @pytest.fixture()
384
+ def test_l0_quality_metrics_task_class(quality_l0_task_types):
385
+ # Just to override `quality_task_types` to make testing more precise
386
+ class TestingL0QualityMetrics(TestQualityL0Metrics):
387
+ @property
388
+ def quality_task_types(self) -> list[str]:
389
+ return quality_l0_task_types
390
+
391
+ return TestingL0QualityMetrics
392
+
393
+
394
+ @pytest.fixture(params=[pytest.param(1, id="no_modstates"), pytest.param(4, id="with_modstates")])
395
+ def num_modstates(request):
396
+ return request.param
397
+
398
+
399
+ @pytest.fixture()
400
+ def quality_l0_task_types() -> list[str]:
401
+ # The tasks types we want to build l0 metrics for
402
+ return [TaskName.lamp_gain.value, TaskName.dark.value]
403
+
404
+
405
+ @pytest.fixture()
406
+ def dataset_task_types(quality_l0_task_types) -> list[str]:
407
+ # The task types that exist in the dataset. I.e., a larger set than we want to build metrics for.
408
+ return quality_l0_task_types + [TaskName.solar_gain.value, TaskName.observe.value]
409
+
410
+
411
+ @pytest.fixture()
412
+ def quality_l0_task(
413
+ test_l0_quality_metrics_task_class,
414
+ tmp_path,
415
+ num_modstates,
416
+ dataset_task_types,
417
+ link_constants_db,
418
+ recipe_run_id,
419
+ ):
420
+ link_constants_db(
421
+ recipe_run_id=recipe_run_id, constants_obj={BudName.num_modstates.value: num_modstates}
422
+ )
423
+ with test_l0_quality_metrics_task_class(
424
+ recipe_run_id=recipe_run_id, workflow_name="TestTasks", workflow_version="vX.Y"
425
+ ) as task:
426
+ task.scratch = WorkflowFileSystem(scratch_base_path=tmp_path, recipe_run_id=recipe_run_id)
427
+ ds = BaseSpec214l0Dataset(num_tasks=len(dataset_task_types) * num_modstates)
428
+ for modstate in range(1, num_modstates + 1):
429
+ for frame, task_type in zip(ds, dataset_task_types):
430
+ hdu = frame.hdu()
431
+ hdul = fits.HDUList([hdu])
432
+ task.write(
433
+ data=hdul,
434
+ tags=[Tag.input(), Tag.task(task_type), Tag.modstate(modstate)],
435
+ encoder=fits_hdulist_encoder,
436
+ )
437
+
438
+ yield task
439
+ task._purge()
440
+
441
+
442
+ def test_quality_l0_metrics(quality_l0_task, quality_l0_task_types, num_modstates):
443
+ """
444
+ Given: A sublcassed `QualityL0Metrics` task and some data frames
445
+ When: Running the task
446
+ Then: The correct metrics are produced
447
+ """
448
+ task = quality_l0_task
449
+ task()
450
+
451
+ task_metric_names = ["FRAME_RMS", "FRAME_AVERAGE"]
452
+
453
+ for modstate in range(1, num_modstates + 1):
454
+ for metric_name in task_metric_names:
455
+ for task_type in quality_l0_task_types:
456
+ tags = [Tag.quality(metric_name), Tag.quality_task(task_type)]
457
+ if num_modstates > 1:
458
+ tags.append(Tag.modstate(modstate))
459
+ files = list(task.read(tags=tags))
460
+ assert files # there are some
461
+ for file in files:
462
+ with file.open() as f:
463
+ data = json.load(f)
464
+ assert isinstance(data, dict)
465
+ assert data["x_values"]
466
+ assert data["y_values"]
467
+ assert all(isinstance(item, str) for item in data["x_values"])
468
+ assert all(isinstance(item, float) for item in data["y_values"])
469
+ assert len(data["x_values"]) == len(data["y_values"])
470
+
471
+ global_metric_names = ["DATASET_AVERAGE", "DATASET_RMS"]
472
+ for metric_name in global_metric_names:
473
+ files = list(task.read(tags=[Tag.quality(metric_name)]))
474
+ assert files
475
+ for file in files:
476
+ with file.open() as f:
477
+ data = json.load(f)
478
+ assert isinstance(data, dict)
479
+
480
+
481
+ def test_quality_l0_metrics_task_integration_run(recipe_run_id):
482
+ """
483
+ Given: A base `TestQualityL0Metrics` task with no constants or data
484
+ When: Running the task
485
+ Then: No error is raised
486
+ """
487
+ # I.e., this tests that the fixturization needed to get good testing on the quality L0 task aren't hiding
488
+ # an inability to run in integration tests where the setup is much more minimal
489
+ task = TestQualityL0Metrics(
490
+ recipe_run_id=recipe_run_id, workflow_name="integration-style", workflow_version="vX.Y"
491
+ )
492
+ task()
493
+
494
+
495
+ @pytest.fixture()
496
+ def make_movie_frames_task(tmp_path, recipe_run_id):
497
+ with MakeTestMovieFrames(
498
+ recipe_run_id=recipe_run_id, workflow_name="MakeMovieFrames", workflow_version="VX.Y"
499
+ ) as task:
500
+ task.scratch = WorkflowFileSystem(scratch_base_path=tmp_path, recipe_run_id=recipe_run_id)
501
+ task.testing_num_dsps_repeats = 10
502
+ task.num_steps = 1
503
+ task.num_exp_per_step = 1
504
+ task.constants._update(
505
+ asdict(FakeConstantDb(NUM_DSPS_REPEATS=task.testing_num_dsps_repeats))
506
+ )
507
+ ds = S122Headers(
508
+ array_shape=(1, 10, 10),
509
+ num_steps=task.num_steps,
510
+ num_exp_per_step=task.num_exp_per_step,
511
+ num_dsps_repeats=task.testing_num_dsps_repeats,
512
+ )
513
+ header_generator = (d.header() for d in ds)
514
+ for d, header in enumerate(header_generator):
515
+ data = np.ones((1, 10, 10))
516
+ data[:, : d * 10, :] = 0.0
517
+ hdl = generate_214_l0_fits_frame(data=data, s122_header=header)
518
+ task.write(
519
+ data=hdl,
520
+ tags=[
521
+ Tag.calibrated(),
522
+ Tag.dsps_repeat(d + 1),
523
+ ],
524
+ encoder=fits_hdulist_encoder,
525
+ )
526
+ yield task
527
+ task._purge()
528
+
529
+
530
+ def test_make_movie_frames_task(make_movie_frames_task, mocker):
531
+ """
532
+ :Given: a make_movie_frames_task task
533
+ :When: running the task
534
+ :Then: no errors are raised and a movie file is created
535
+ """
536
+ mocker.patch(
537
+ "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=FakeGQLClient
538
+ )
539
+ task = make_movie_frames_task
540
+ task()
541
+ movie_frames = list(task.read(tags=[Tag.movie_frame()]))
542
+ logger.info(f"{movie_frames=}")
543
+ assert len(movie_frames) == task.testing_num_dsps_repeats
544
+ for frame in movie_frames:
545
+ assert frame.exists()
546
+ hdul = fits.open(frame)
547
+ assert len(hdul[0].data.shape) == 2
548
+
549
+
550
+ @pytest.fixture()
551
+ def assemble_test_movie_task(tmp_path, recipe_run_id):
552
+ with AssembleTestMovie(
553
+ recipe_run_id=recipe_run_id, workflow_name="AssembleTestMovie", workflow_version="VX.Y"
554
+ ) as task:
555
+ task.scratch = WorkflowFileSystem(scratch_base_path=tmp_path)
556
+ task.testing_num_dsps_repeats = 10
557
+ task.num_steps = 1
558
+ task.num_exp_per_step = 1
559
+ task.constants._update(
560
+ asdict(FakeConstantDb(NUM_DSPS_REPEATS=task.testing_num_dsps_repeats))
561
+ )
562
+ ds = S122Headers(
563
+ array_shape=(1, 10, 10),
564
+ num_steps=task.num_steps,
565
+ num_exp_per_step=task.num_exp_per_step,
566
+ num_dsps_repeats=task.testing_num_dsps_repeats,
567
+ )
568
+ header_generator = (d.header() for d in ds)
569
+ for d, header in enumerate(header_generator):
570
+ data = np.ones((10, 10))
571
+ data[: d * 10, :] = 0.0
572
+ hdl = generate_214_l0_fits_frame(data=data, s122_header=header)
573
+ task.write(
574
+ data=hdl,
575
+ tags=[
576
+ Tag.movie_frame(),
577
+ Tag.dsps_repeat(d + 1),
578
+ ],
579
+ encoder=fits_hdulist_encoder,
580
+ )
581
+ yield task
582
+ task._purge()
583
+
584
+
585
+ def test_assemble_test_movie_task(assemble_test_movie_task, mocker):
586
+ """
587
+ :Given: an assemble_test_movie task
588
+ :When: running the task
589
+ :Then: no errors are raised and a movie file is created
590
+ """
591
+ mocker.patch(
592
+ "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=FakeGQLClient
593
+ )
594
+ task = assemble_test_movie_task
595
+ task()
596
+ movie_file = list(task.read(tags=[Tag.movie()]))
597
+ logger.info(f"{movie_file=}")
598
+ assert len(movie_file) == 1
599
+ assert movie_file[0].exists()
600
+
601
+
602
+ @pytest.fixture
603
+ def trial_output_task(recipe_run_id, tmp_path, mocker):
604
+ mocker.patch(
605
+ "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient",
606
+ new=FakeGQLClient,
607
+ )
608
+ proposal_id = "test_proposal_id"
609
+ with TransferTestTrialData(
610
+ recipe_run_id=recipe_run_id,
611
+ workflow_name="workflow_name",
612
+ workflow_version="workflow_version",
613
+ ) as task:
614
+ task.scratch = WorkflowFileSystem(
615
+ recipe_run_id=recipe_run_id,
616
+ scratch_base_path=tmp_path,
617
+ )
618
+ task.constants._update({"PROPOSAL_ID": proposal_id})
619
+
620
+ file_count = 0
621
+ # Write a debug frame
622
+ debug_file_obj = uuid4().hex.encode("utf8")
623
+ task.write(debug_file_obj, tags=[Tag.debug(), Tag.frame()])
624
+ file_count += 1
625
+
626
+ # Write an intermediate frame that we want to transfer
627
+ intermediate_keep_file_obj = uuid4().hex.encode("utf8")
628
+ task.write(
629
+ intermediate_keep_file_obj,
630
+ tags=[Tag.intermediate(), Tag.frame(), Tag.task("DUMMY")],
631
+ )
632
+ file_count += 1
633
+
634
+ # Write an intermediate frame that we don't want to transfer
635
+ intermediate_discard_file_obj = uuid4().hex.encode("utf8")
636
+ task.write(
637
+ intermediate_discard_file_obj,
638
+ tags=[Tag.intermediate(), Tag.frame(), Tag.task("WHO_CARES")],
639
+ )
640
+
641
+ # An output frame
642
+ output_file_obj = uuid4().hex.encode("utf8")
643
+ task.write(output_file_obj, tags=[Tag.output(), Tag.frame()])
644
+ file_count += 1
645
+
646
+ # Output dataset inventory
647
+ dsi_file_obj = uuid4().hex.encode("utf8")
648
+ task.write(dsi_file_obj, tags=[Tag.output(), Tag.dataset_inventory()])
649
+ file_count += 1
650
+
651
+ # Output asdf
652
+ asdf_file_obj = uuid4().hex.encode("utf8")
653
+ task.write(asdf_file_obj, tags=[Tag.output(), Tag.asdf()])
654
+ file_count += 1
655
+
656
+ # Output movie
657
+ movie_file_obj = uuid4().hex.encode("utf8")
658
+ task.write(movie_file_obj, tags=[Tag.output(), Tag.movie()])
659
+ file_count += 1
660
+
661
+ # Output quality data
662
+ quality_data_file_obj = uuid4().hex.encode("utf8")
663
+ task.write(quality_data_file_obj, tags=Tag.quality_data())
664
+ file_count += 1
665
+
666
+ # Output quality report
667
+ quality_report_file_obj = uuid4().hex.encode("utf8")
668
+ task.write(quality_report_file_obj, tags=[Tag.output(), Tag.quality_report()])
669
+ file_count += 1
670
+
671
+ # Specifically tagged files
672
+ task.write(uuid4().hex.encode("utf8"), tags=[Tag.frame(), "FOO", "BAR"])
673
+ file_count += 1
674
+ task.write(uuid4().hex.encode("utf8"), tags=[Tag.frame(), "BAZ"])
675
+ file_count += 1
676
+
677
+ # This one won't get transferred
678
+ task.write(uuid4().hex.encode("utf8"), tags=[Tag.frame(), "FOO"])
679
+
680
+ yield task, file_count
681
+ task._purge()
682
+
683
+
684
+ def test_transfer_test_trial_data(trial_output_task, mocker):
685
+ """
686
+ Given: A TransferTestTrialData task with associated frames
687
+ When: Running the task and building the transfer list
688
+ Then: No errors occur and the transfer list has the correct number of items
689
+ """
690
+ task, expected_num_items = trial_output_task
691
+
692
+ mocker.patch(
693
+ "dkist_processing_common.tasks.mixin.globus.GlobusMixin.globus_transfer_scratch_to_object_store"
694
+ )
695
+ mocker.patch(
696
+ "dkist_processing_test.tasks.trial_output_data.TransferTestTrialData.remove_folder_objects"
697
+ )
698
+
699
+ # Just make sure the thing runs with no errors
700
+ task()
701
+
702
+ transfer_list = task.build_transfer_list()
703
+ assert len(transfer_list) == expected_num_items
704
+
705
+
706
+ @pytest.fixture()
707
+ def exercise_numba_task(recipe_run_id):
708
+ with ExerciseNumba(
709
+ recipe_run_id=recipe_run_id, workflow_name="ExerciseNumba", workflow_version="VX.Y"
710
+ ) as task:
711
+ yield task
712
+
713
+
714
+ def test_exercise_numba_task(exercise_numba_task):
715
+ """
716
+ :Given: an exercise_numba task
717
+ :When: running the task
718
+ :Then: the numba module can be loaded and simple method using numba is executed
719
+ """
720
+ original = np.linspace(0.0, 10.0, 1001)
721
+ task = exercise_numba_task
722
+ task()
723
+ assert task.speedup > 1.0
724
+ assert np.all(np.equal(original, task.sorted_array))