dkist-processing-test 1.18.3__tar.gz → 1.18.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dkist-processing-test might be problematic. Click here for more details.

Files changed (43) hide show
  1. {dkist-processing-test-1.18.3/dkist_processing_test.egg-info → dkist-processing-test-1.18.5}/PKG-INFO +1 -1
  2. dkist-processing-test-1.18.5/dkist_processing_test/models/constants.py +18 -0
  3. dkist-processing-test-1.18.5/dkist_processing_test/tasks/quality.py +30 -0
  4. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tests/test_tasks.py +132 -0
  5. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5/dkist_processing_test.egg-info}/PKG-INFO +1 -1
  6. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test.egg-info/SOURCES.txt +1 -0
  7. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test.egg-info/requires.txt +2 -2
  8. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/setup.cfg +2 -2
  9. dkist-processing-test-1.18.3/dkist_processing_test/tasks/quality.py +0 -18
  10. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/.gitignore +0 -0
  11. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/.pre-commit-config.yaml +0 -0
  12. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/README.rst +0 -0
  13. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/bitbucket-pipelines.yml +0 -0
  14. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/__init__.py +0 -0
  15. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/config.py +0 -0
  16. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/models/__init__.py +0 -0
  17. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/models/parameters.py +0 -0
  18. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tasks/__init__.py +0 -0
  19. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tasks/exercise_numba.py +0 -0
  20. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tasks/fail.py +0 -0
  21. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tasks/fake_science.py +0 -0
  22. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tasks/high_memory.py +0 -0
  23. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tasks/manual.py +0 -0
  24. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tasks/movie.py +0 -0
  25. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tasks/noop.py +0 -0
  26. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tasks/parse.py +0 -0
  27. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tasks/trial_output_data.py +0 -0
  28. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tasks/write_l1.py +0 -0
  29. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tests/__init__.py +0 -0
  30. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tests/conftest.py +0 -0
  31. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tests/test_parameters.py +0 -0
  32. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/tests/test_workflows.py +0 -0
  33. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/workflows/__init__.py +0 -0
  34. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/workflows/common_tasks.py +0 -0
  35. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/workflows/end_to_end.py +0 -0
  36. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/workflows/exercise_numba.py +0 -0
  37. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/workflows/fail.py +0 -0
  38. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/workflows/noop.py +0 -0
  39. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/workflows/resource_queue.py +0 -0
  40. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test/workflows/trial_end_to_end.py +0 -0
  41. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test.egg-info/dependency_links.txt +0 -0
  42. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/dkist_processing_test.egg-info/top_level.txt +0 -0
  43. {dkist-processing-test-1.18.3 → dkist-processing-test-1.18.5}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dkist-processing-test
3
- Version: 1.18.3
3
+ Version: 1.18.5
4
4
  Summary: Example Instrument code that is used by the DKIST Science Data Processing pipelines to test processing infrastructure.
5
5
  Home-page: https://bitbucket.org/dkistdc/dkist-processing-test/src/main/
6
6
  Author: NSO / AURA
@@ -0,0 +1,18 @@
1
+ from dkist_processing_common.models.constants import BudName
2
+ from dkist_processing_common.models.constants import ConstantsBase
3
+
4
+
5
+ class TestConstants(ConstantsBase):
6
+ """
7
+ Constants for the test instrument.
8
+
9
+ This class is only used on the `TestQualityL0Metrics` task to allow us to check for `num_modstates`.
10
+ """
11
+
12
+ @property
13
+ def num_modstates(self) -> int:
14
+ """Return the number of modstates."""
15
+ # Use .get with default because integration tests use VBI, which doesn't have a modstate key and thus the db
16
+ # entry won't be there.
17
+ # In other words, we get the actual db value in unit tests and 1 in integration tests
18
+ return self._db_dict.get(BudName.num_modstates, 1)
@@ -0,0 +1,30 @@
1
+ """Quality task definition."""
2
+ from typing import Iterable
3
+ from typing import Type
4
+
5
+ from dkist_processing_common.models.constants import ConstantsBase
6
+ from dkist_processing_common.tasks import AssembleQualityData
7
+ from dkist_processing_common.tasks import QualityL0Metrics
8
+
9
+ __all__ = ["TestQualityL0Metrics", "TestAssembleQualityData"]
10
+
11
+ from dkist_processing_test.models.constants import TestConstants
12
+
13
+
14
+ class TestQualityL0Metrics(QualityL0Metrics):
15
+ @property
16
+ def constants_model_class(self) -> Type[ConstantsBase]:
17
+ return TestConstants
18
+
19
+ @property
20
+ def modstate_list(self) -> Iterable[int] | None:
21
+ if self.constants.num_modstates > 1:
22
+ return range(1, self.constants.num_modstates + 1)
23
+
24
+ return None
25
+
26
+
27
+ class TestAssembleQualityData(AssembleQualityData):
28
+ @property
29
+ def polcal_label_list(self) -> list[str] | None:
30
+ return ["Beam 1"]
@@ -7,6 +7,7 @@ from dataclasses import dataclass
7
7
  from dataclasses import is_dataclass
8
8
  from datetime import datetime
9
9
  from random import randint
10
+ from typing import Type
10
11
  from uuid import uuid4
11
12
 
12
13
  import numpy as np
@@ -17,12 +18,16 @@ from dkist_header_validator import spec122_validator
17
18
  from dkist_processing_common._util.scratch import WorkflowFileSystem
18
19
  from dkist_processing_common.codecs.fits import fits_hdu_decoder
19
20
  from dkist_processing_common.codecs.fits import fits_hdulist_encoder
21
+ from dkist_processing_common.models.constants import BudName
20
22
  from dkist_processing_common.models.constants import ConstantsBase
21
23
  from dkist_processing_common.models.tags import Tag
24
+ from dkist_processing_common.models.task_name import TaskName
25
+ from dkist_processing_common.tasks import QualityL0Metrics
22
26
  from dkist_processing_common.tests.conftest import FakeGQLClient
23
27
  from dkist_service_configuration.logging import logger
24
28
 
25
29
  from dkist_processing_test.models.parameters import TestParameters
30
+ from dkist_processing_test.tasks import TestQualityL0Metrics
26
31
  from dkist_processing_test.tasks.exercise_numba import ExerciseNumba
27
32
  from dkist_processing_test.tasks.fail import FailTask
28
33
  from dkist_processing_test.tasks.fake_science import GenerateCalibratedData
@@ -360,6 +365,133 @@ def test_write_l1_task(write_l1_task, mocker):
360
365
  assert file.exists
361
366
 
362
367
 
368
+ class BaseSpec214l0Dataset(Spec122Dataset):
369
+ def __init__(self, num_tasks: int, instrument: str = "vbi"):
370
+ super().__init__(
371
+ dataset_shape=(num_tasks, 4, 4),
372
+ array_shape=(1, 4, 4),
373
+ time_delta=1,
374
+ instrument=instrument,
375
+ file_schema="level0_spec214",
376
+ )
377
+
378
+ @property
379
+ def data(self):
380
+ return np.ones(shape=self.array_shape)
381
+
382
+
383
+ @pytest.fixture()
384
+ def test_l0_quality_metrics_task_class(quality_l0_task_types):
385
+ # Just to override `quality_task_types` to make testing more precise
386
+ class TestingL0QualityMetrics(TestQualityL0Metrics):
387
+ @property
388
+ def quality_task_types(self) -> list[str]:
389
+ return quality_l0_task_types
390
+
391
+ return TestingL0QualityMetrics
392
+
393
+
394
+ @pytest.fixture(params=[pytest.param(1, id="no_modstates"), pytest.param(4, id="with_modstates")])
395
+ def num_modstates(request):
396
+ return request.param
397
+
398
+
399
+ @pytest.fixture()
400
+ def quality_l0_task_types() -> list[str]:
401
+ # The tasks types we want to build l0 metrics for
402
+ return [TaskName.lamp_gain.value, TaskName.dark.value]
403
+
404
+
405
+ @pytest.fixture()
406
+ def dataset_task_types(quality_l0_task_types) -> list[str]:
407
+ # The task types that exist in the dataset. I.e., a larger set than we want to build metrics for.
408
+ return quality_l0_task_types + [TaskName.solar_gain.value, TaskName.observe.value]
409
+
410
+
411
+ @pytest.fixture()
412
+ def quality_l0_task(
413
+ test_l0_quality_metrics_task_class,
414
+ tmp_path,
415
+ num_modstates,
416
+ dataset_task_types,
417
+ link_constants_db,
418
+ recipe_run_id,
419
+ ):
420
+ link_constants_db(
421
+ recipe_run_id=recipe_run_id, constants_obj={BudName.num_modstates.value: num_modstates}
422
+ )
423
+ with test_l0_quality_metrics_task_class(
424
+ recipe_run_id=recipe_run_id, workflow_name="TestTasks", workflow_version="vX.Y"
425
+ ) as task:
426
+ task.scratch = WorkflowFileSystem(scratch_base_path=tmp_path, recipe_run_id=recipe_run_id)
427
+ ds = BaseSpec214l0Dataset(num_tasks=len(dataset_task_types) * num_modstates)
428
+ for modstate in range(1, num_modstates + 1):
429
+ for frame, task_type in zip(ds, dataset_task_types):
430
+ hdu = frame.hdu()
431
+ hdul = fits.HDUList([hdu])
432
+ task.write(
433
+ data=hdul,
434
+ tags=[Tag.input(), Tag.task(task_type), Tag.modstate(modstate)],
435
+ encoder=fits_hdulist_encoder,
436
+ )
437
+
438
+ yield task
439
+ task._purge()
440
+
441
+
442
+ def test_quality_l0_metrics(quality_l0_task, quality_l0_task_types, num_modstates):
443
+ """
444
+ Given: A sublcassed `QualityL0Metrics` task and some data frames
445
+ When: Running the task
446
+ Then: The correct metrics are produced
447
+ """
448
+ task = quality_l0_task
449
+ task()
450
+
451
+ task_metric_names = ["FRAME_RMS", "FRAME_AVERAGE"]
452
+
453
+ for modstate in range(1, num_modstates + 1):
454
+ for metric_name in task_metric_names:
455
+ for task_type in quality_l0_task_types:
456
+ tags = [Tag.quality(metric_name), Tag.quality_task(task_type)]
457
+ if num_modstates > 1:
458
+ tags.append(Tag.modstate(modstate))
459
+ files = list(task.read(tags=tags))
460
+ assert files # there are some
461
+ for file in files:
462
+ with file.open() as f:
463
+ data = json.load(f)
464
+ assert isinstance(data, dict)
465
+ assert data["x_values"]
466
+ assert data["y_values"]
467
+ assert all(isinstance(item, str) for item in data["x_values"])
468
+ assert all(isinstance(item, float) for item in data["y_values"])
469
+ assert len(data["x_values"]) == len(data["y_values"])
470
+
471
+ global_metric_names = ["DATASET_AVERAGE", "DATASET_RMS"]
472
+ for metric_name in global_metric_names:
473
+ files = list(task.read(tags=[Tag.quality(metric_name)]))
474
+ assert files
475
+ for file in files:
476
+ with file.open() as f:
477
+ data = json.load(f)
478
+ assert isinstance(data, dict)
479
+
480
+
481
+ def test_quality_l0_metrics_task_integration_run(recipe_run_id):
482
+ """
483
+ Given: A base `TestQualityL0Metrics` task with no constants or data
484
+ When: Running the task
485
+ Then: No error is raised
486
+ """
487
+ # I.e., this tests that the fixturization needed to get good testing on the quality L0 task aren't hiding
488
+ # an inability to run in integration tests where the setup is much more minimal
489
+ task = TestQualityL0Metrics(
490
+ recipe_run_id=recipe_run_id, workflow_name="integration-style", workflow_version="vX.Y"
491
+ )
492
+ task()
493
+
494
+
363
495
  @pytest.fixture()
364
496
  def make_movie_frames_task(tmp_path, recipe_run_id):
365
497
  with MakeTestMovieFrames(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dkist-processing-test
3
- Version: 1.18.3
3
+ Version: 1.18.5
4
4
  Summary: Example Instrument code that is used by the DKIST Science Data Processing pipelines to test processing infrastructure.
5
5
  Home-page: https://bitbucket.org/dkistdc/dkist-processing-test/src/main/
6
6
  Author: NSO / AURA
@@ -12,6 +12,7 @@ dkist_processing_test.egg-info/dependency_links.txt
12
12
  dkist_processing_test.egg-info/requires.txt
13
13
  dkist_processing_test.egg-info/top_level.txt
14
14
  dkist_processing_test/models/__init__.py
15
+ dkist_processing_test/models/constants.py
15
16
  dkist_processing_test/models/parameters.py
16
17
  dkist_processing_test/tasks/__init__.py
17
18
  dkist_processing_test/tasks/exercise_numba.py
@@ -1,7 +1,7 @@
1
- dkist-processing-common==8.2.2
1
+ dkist-processing-common==9.0.0
2
2
  dkist-header-validator==5.1.1
3
3
  dkist-service-configuration==2.2
4
- dkist-fits-specifications==4.5.0
4
+ dkist-fits-specifications==4.6.0
5
5
  numba==0.59.1
6
6
  astropy==6.1.0
7
7
  numpy==1.26.4
@@ -17,10 +17,10 @@ setup_requires = setuptools_scm
17
17
  packages = find:
18
18
  include_package_data = True
19
19
  install_requires =
20
- dkist-processing-common == 8.2.2
20
+ dkist-processing-common == 9.0.0
21
21
  dkist-header-validator == 5.1.1
22
22
  dkist-service-configuration == 2.2
23
- dkist-fits-specifications == 4.5.0
23
+ dkist-fits-specifications == 4.6.0
24
24
  numba == 0.59.1
25
25
  astropy == 6.1.0
26
26
  numpy == 1.26.4
@@ -1,18 +0,0 @@
1
- """Quality task definition."""
2
- from dkist_processing_common.models.tags import Tag
3
- from dkist_processing_common.tasks import AssembleQualityData
4
- from dkist_processing_common.tasks import QualityL0Metrics
5
-
6
- __all__ = ["TestQualityL0Metrics", "TestAssembleQualityData"]
7
-
8
-
9
- class TestQualityL0Metrics(QualityL0Metrics):
10
- def run(self) -> None:
11
- paths = self.read(tags=[Tag.input()])
12
- self.calculate_l0_metrics(paths=paths)
13
-
14
-
15
- class TestAssembleQualityData(AssembleQualityData):
16
- @property
17
- def polcal_label_list(self) -> list[str] | None:
18
- return ["Beam 1"]