dkist-processing-test 1.21.3rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dkist-processing-test might be problematic. Click here for more details.

Files changed (34) hide show
  1. dkist_processing_test/__init__.py +11 -0
  2. dkist_processing_test/config.py +10 -0
  3. dkist_processing_test/models/__init__.py +0 -0
  4. dkist_processing_test/models/constants.py +18 -0
  5. dkist_processing_test/models/parameters.py +35 -0
  6. dkist_processing_test/tasks/__init__.py +10 -0
  7. dkist_processing_test/tasks/exercise_numba.py +42 -0
  8. dkist_processing_test/tasks/fail.py +11 -0
  9. dkist_processing_test/tasks/fake_science.py +101 -0
  10. dkist_processing_test/tasks/high_memory.py +20 -0
  11. dkist_processing_test/tasks/manual.py +26 -0
  12. dkist_processing_test/tasks/movie.py +53 -0
  13. dkist_processing_test/tasks/noop.py +15 -0
  14. dkist_processing_test/tasks/parse.py +88 -0
  15. dkist_processing_test/tasks/quality.py +30 -0
  16. dkist_processing_test/tasks/trial_output_data.py +46 -0
  17. dkist_processing_test/tasks/write_l1.py +64 -0
  18. dkist_processing_test/tests/__init__.py +0 -0
  19. dkist_processing_test/tests/conftest.py +64 -0
  20. dkist_processing_test/tests/test_parameters.py +103 -0
  21. dkist_processing_test/tests/test_tasks.py +724 -0
  22. dkist_processing_test/tests/test_workflows.py +9 -0
  23. dkist_processing_test/workflows/__init__.py +1 -0
  24. dkist_processing_test/workflows/common_tasks.py +231 -0
  25. dkist_processing_test/workflows/end_to_end.py +58 -0
  26. dkist_processing_test/workflows/exercise_numba.py +13 -0
  27. dkist_processing_test/workflows/fail.py +16 -0
  28. dkist_processing_test/workflows/noop.py +28 -0
  29. dkist_processing_test/workflows/resource_queue.py +19 -0
  30. dkist_processing_test/workflows/trial_end_to_end.py +64 -0
  31. dkist_processing_test-1.21.3rc1.dist-info/METADATA +79 -0
  32. dkist_processing_test-1.21.3rc1.dist-info/RECORD +34 -0
  33. dkist_processing_test-1.21.3rc1.dist-info/WHEEL +5 -0
  34. dkist_processing_test-1.21.3rc1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,11 @@
1
+ from importlib.metadata import PackageNotFoundError
2
+ from importlib.metadata import version
3
+
4
+ from dkist_service_configuration.logging import logger # first import to call logging.BasicConfig
5
+
6
+
7
+ try:
8
+ __version__ = version(distribution_name=__name__)
9
+ except PackageNotFoundError:
10
+ # package is not installed
11
+ __version__ = "unknown"
@@ -0,0 +1,10 @@
1
+ """Configurations for the dkist-processing-test package."""
2
+ from dkist_processing_common.config import DKISTProcessingCommonConfiguration
3
+
4
+
5
+ class DKISTProcessingTestConfigurations(DKISTProcessingCommonConfiguration):
6
+ pass # nothing custom yet
7
+
8
+
9
+ dkist_processing_test_configurations = DKISTProcessingTestConfigurations()
10
+ dkist_processing_test_configurations.log_configurations()
File without changes
@@ -0,0 +1,18 @@
1
+ from dkist_processing_common.models.constants import BudName
2
+ from dkist_processing_common.models.constants import ConstantsBase
3
+
4
+
5
+ class TestConstants(ConstantsBase):
6
+ """
7
+ Constants for the test instrument.
8
+
9
+ This class is only used on the `TestQualityL0Metrics` task to allow us to check for `num_modstates`.
10
+ """
11
+
12
+ @property
13
+ def num_modstates(self) -> int:
14
+ """Return the number of modstates."""
15
+ # Use .get with default because integration tests use VBI, which doesn't have a modstate key and thus the db
16
+ # entry won't be there.
17
+ # In other words, we get the actual db value in unit tests and 1 in integration tests
18
+ return self._db_dict.get(BudName.num_modstates, 1)
@@ -0,0 +1,35 @@
1
+ from datetime import datetime
2
+
3
+ import numpy as np
4
+ from dkist_processing_common.models.parameters import ParameterBase
5
+ from dkist_processing_common.models.parameters import ParameterWavelengthMixin
6
+
7
+
8
+ class TestParameters(ParameterBase, ParameterWavelengthMixin):
9
+ """Class to test loading parameters from a file."""
10
+
11
+ @property
12
+ def randomness(self) -> (float, float):
13
+ """A dummy parameter that requires loading a file."""
14
+ param_dict = self._find_most_recent_past_value(
15
+ "test_random_data", start_date=datetime.now()
16
+ )
17
+ data = self._load_param_value_from_fits(param_dict)
18
+ mean = np.nanmean(data)
19
+ std = np.nanstd(data)
20
+
21
+ return mean, std
22
+
23
+ @property
24
+ def constant(self) -> float:
25
+ """A dummy parameter that depends on the same file as a different parameter."""
26
+ param_dict = self._find_most_recent_past_value("test_random_data")
27
+ data = self._load_param_value_from_fits(param_dict, hdu=1)
28
+ constant = np.median(data)
29
+
30
+ return float(constant)
31
+
32
+ @property
33
+ def wavelength_category(self) -> str:
34
+ """A dummy parameter that depends on wavelength."""
35
+ return self._find_parameter_closest_wavelength("test_wavelength_category")
@@ -0,0 +1,10 @@
1
+ from dkist_processing_test.tasks.exercise_numba import *
2
+ from dkist_processing_test.tasks.fail import *
3
+ from dkist_processing_test.tasks.fake_science import *
4
+ from dkist_processing_test.tasks.high_memory import *
5
+ from dkist_processing_test.tasks.movie import *
6
+ from dkist_processing_test.tasks.noop import *
7
+ from dkist_processing_test.tasks.parse import *
8
+ from dkist_processing_test.tasks.quality import *
9
+ from dkist_processing_test.tasks.trial_output_data import *
10
+ from dkist_processing_test.tasks.write_l1 import *
@@ -0,0 +1,42 @@
1
+ """
2
+ Basic exercising of numba
3
+ """
4
+ import timeit
5
+
6
+ import numpy as np
7
+ from dkist_processing_common.tasks import WorkflowTaskBase
8
+ from dkist_service_configuration.logging import logger
9
+ from numba import njit
10
+
11
+ __all__ = ["ExerciseNumba"]
12
+
13
+
14
+ class ExerciseNumba(WorkflowTaskBase):
15
+ def run(self):
16
+ bubblesort_numba = njit(self.bubblesort)
17
+ original = np.linspace(0.0, 10.0, 1001)
18
+ shuffled_1 = original.copy()
19
+ np.random.shuffle(shuffled_1)
20
+ shuffled_2 = shuffled_1.copy()
21
+ foo_1 = timeit.Timer(lambda: self.bubblesort(shuffled_1), globals=globals())
22
+ time_1 = foo_1.timeit(100)
23
+ foo_2 = timeit.Timer(lambda: bubblesort_numba(shuffled_2), globals=globals())
24
+ time_2 = foo_2.timeit(100)
25
+ speedup = time_1 / time_2
26
+ logger.info(f"Normal task execution time: {time_1} secs")
27
+ logger.info(f"Numba task execution time: {time_2} secs")
28
+ logger.info(f"ExerciseNumba: Achieved a speedup of {speedup} using numba.")
29
+ self.speedup = speedup
30
+ self.sorted_array = shuffled_2
31
+
32
+ @staticmethod
33
+ def bubblesort(x):
34
+ """Simple bubblesort algorithm copied from numba documentation"""
35
+ n = len(x)
36
+ for end in range(n, 1, -1):
37
+ for i in range(end - 1):
38
+ cur = x[i]
39
+ if cur > x[i + 1]:
40
+ tmp = x[i]
41
+ x[i] = x[i + 1]
42
+ x[i + 1] = tmp
@@ -0,0 +1,11 @@
1
+ """
2
+ Test task for infrastructure integration that will always fail
3
+ """
4
+ from dkist_processing_core import TaskBase
5
+
6
+ __all__ = ["FailTask"]
7
+
8
+
9
+ class FailTask(TaskBase):
10
+ def run(self) -> None:
11
+ raise RuntimeError("Failure is guaranteed")
@@ -0,0 +1,101 @@
1
+ """
2
+ Fake science task
3
+ """
4
+ import numpy as np
5
+ from astropy.io import fits
6
+ from dkist_processing_common.codecs.fits import fits_array_encoder
7
+ from dkist_processing_common.codecs.fits import fits_hdu_decoder
8
+ from dkist_processing_common.codecs.fits import fits_hdulist_encoder
9
+ from dkist_processing_common.codecs.json import json_encoder
10
+ from dkist_processing_common.models.tags import Tag
11
+ from dkist_processing_common.tasks import WorkflowTaskBase
12
+ from dkist_processing_common.tasks.mixin.input_dataset import InputDatasetMixin
13
+
14
+ from dkist_processing_test.models.parameters import TestParameters
15
+
16
+ __all__ = ["GenerateCalibratedData"]
17
+
18
+
19
+ class GenerateCalibratedData(WorkflowTaskBase, InputDatasetMixin):
20
+
21
+ record_provenance = True
22
+
23
+ def __init__(
24
+ self,
25
+ recipe_run_id: int,
26
+ workflow_name: str,
27
+ workflow_version: str,
28
+ ):
29
+ super().__init__(
30
+ recipe_run_id=recipe_run_id,
31
+ workflow_name=workflow_name,
32
+ workflow_version=workflow_version,
33
+ )
34
+ self.parameters = TestParameters(
35
+ self.input_dataset_parameters,
36
+ obs_ip_start_time=self.constants.obs_ip_start_time,
37
+ wavelength=2.0,
38
+ )
39
+
40
+ def run(self):
41
+ rng = np.random.default_rng()
42
+ with self.apm_task_step("Create debug frame"):
43
+ self.write(
44
+ data=np.arange(10), tags=[Tag.frame(), Tag.debug()], encoder=fits_array_encoder
45
+ )
46
+
47
+ with self.apm_task_step("Creating intermediate frame"):
48
+ self.write(
49
+ data=np.arange(5),
50
+ tags=[Tag.frame(), Tag.intermediate(), Tag.task("DUMMY")],
51
+ encoder=fits_array_encoder,
52
+ )
53
+
54
+ with self.apm_task_step("Creating unique frames"):
55
+ for _ in range(2):
56
+ self.write(data=np.arange(3), tags=["FOO", "BAR"], encoder=fits_array_encoder)
57
+
58
+ self.write(data={"test": "dictionary"}, tags=["BAZ"], encoder=json_encoder)
59
+
60
+ with self.apm_task_step("Creating frames that won't be used"):
61
+ self.write(data=b"123", tags=[Tag.intermediate(), Tag.task("NOT_USED"), Tag.frame()])
62
+ self.write(data=b"123", tags=["FOO"])
63
+
64
+ with self.apm_task_step("Loop over inputs"):
65
+ count = 1 # keep a running count to increment the dsps repeat number
66
+ for hdu in self.read(tags=Tag.input(), decoder=fits_hdu_decoder):
67
+ header = hdu.header
68
+ with self.apm_processing_step("Doing some calculations"):
69
+ header["DSPSNUM"] = count
70
+ data = hdu.data
71
+
72
+ # Just do some weird crap. We don't use the loaded random array directly so that we
73
+ # don't have to care that the shapes are the same as the "real" data.
74
+ random_signal = rng.normal(*self.parameters.randomness, size=data.shape)
75
+ data = (
76
+ data + random_signal
77
+ ) # Needs to be like this because data will start as int-type
78
+ data += self.parameters.constant
79
+
80
+ # Add needed VBI L1 keys that would be computed during real VBI science
81
+ header["VBINMOSC"] = self.constants.num_dsps_repeats
82
+ header["VBICMOSC"] = count
83
+
84
+ output_hdu = fits.PrimaryHDU(data=data, header=header)
85
+
86
+ wavelength_category = self.parameters.wavelength_category
87
+ header["WAVECAT"] = wavelength_category
88
+
89
+ with self.apm_writing_step("Writing data"):
90
+ output_hdul = fits.HDUList([output_hdu])
91
+ self.write(
92
+ data=output_hdul,
93
+ tags=[
94
+ Tag.calibrated(),
95
+ Tag.frame(),
96
+ Tag.stokes("I"),
97
+ Tag.dsps_repeat(count),
98
+ ],
99
+ encoder=fits_hdulist_encoder,
100
+ )
101
+ count += 1
@@ -0,0 +1,20 @@
1
+ """
2
+ Test task for the 'high_memory' resource queue
3
+ """
4
+ from time import sleep
5
+
6
+ from dkist_processing_core import TaskBase
7
+ from dkist_service_configuration.logging import logger
8
+
9
+ __all__ = ["HighMemoryTask"]
10
+
11
+
12
+ class HighMemoryTask(TaskBase):
13
+ def run(self) -> None:
14
+ logger.info("Starting High Memory Task")
15
+ one_gibibyte = int(9.7e9)
16
+ use_memory = bytearray(one_gibibyte)
17
+ logger.info(f"Using Memory {one_gibibyte = }")
18
+ sleep(5)
19
+ use_memory = None
20
+ logger.info(f"Memory De-Referenced.")
@@ -0,0 +1,26 @@
1
+ """
2
+ Tasks simulating manual intervention.
3
+ Manual tasks are expected to write provenance records regardless of they always record provenance.
4
+ """
5
+ from dkist_processing_common.tasks import WorkflowTaskBase
6
+
7
+
8
+ __all__ = ["ManualWithProvenance", "ManualWithoutProvenance"]
9
+
10
+
11
+ class ManualBase(WorkflowTaskBase):
12
+ def run(self):
13
+ with self.apm_task_step("NoOp"):
14
+ pass
15
+
16
+
17
+ class ManualWithProvenance(ManualBase):
18
+
19
+ record_provenance = True
20
+ is_task_manual = True
21
+
22
+
23
+ class ManualWithoutProvenance(ManualBase):
24
+
25
+ record_provenance = False
26
+ is_task_manual = True
@@ -0,0 +1,53 @@
1
+ """
2
+ Fake MakeMovieFrames and AssembleTestMovie
3
+ """
4
+ import numpy as np
5
+ from astropy.io import fits
6
+ from dkist_processing_common.codecs.fits import fits_hdu_decoder
7
+ from dkist_processing_common.codecs.fits import fits_hdulist_encoder
8
+ from dkist_processing_common.models.tags import Tag
9
+ from dkist_processing_common.parsers.l1_fits_access import L1FitsAccess
10
+ from dkist_processing_common.tasks import AssembleMovie
11
+ from dkist_processing_common.tasks import WorkflowTaskBase
12
+ from PIL import ImageDraw
13
+
14
+
15
+ __all__ = ["MakeTestMovieFrames", "AssembleTestMovie"]
16
+
17
+
18
+ class MakeTestMovieFrames(WorkflowTaskBase):
19
+ """
20
+ Take each output frame, copy the header and data and write out
21
+ as a movie frame
22
+ """
23
+
24
+ def run(self):
25
+ for d in range(1, self.constants.num_dsps_repeats + 1):
26
+ with self.apm_task_step(f"Workign on dsps repeat {d}"):
27
+ for hdu in self.read(
28
+ tags=[Tag.calibrated(), Tag.dsps_repeat(d)], decoder=fits_hdu_decoder
29
+ ):
30
+ header = hdu.header
31
+ data = np.squeeze(hdu.data)
32
+ output_hdu = fits.PrimaryHDU(data=data, header=header)
33
+ output_hdul = fits.HDUList([output_hdu])
34
+
35
+ with self.apm_writing_step("Writing data"):
36
+ self.write(
37
+ data=output_hdul,
38
+ tags=[Tag.movie_frame(), Tag.dsps_repeat(d)],
39
+ encoder=fits_hdulist_encoder,
40
+ )
41
+
42
+
43
+ class AssembleTestMovie(AssembleMovie):
44
+ """
45
+ A shell to extend the AssembleMovie class for the end-to-end test.
46
+ """
47
+
48
+ @property
49
+ def fits_parsing_class(self):
50
+ return L1FitsAccess
51
+
52
+ def write_overlay(self, draw: ImageDraw, fits_obj: L1FitsAccess) -> None:
53
+ pass
@@ -0,0 +1,15 @@
1
+ """
2
+ Test task for infrastructure integration
3
+ """
4
+ from dkist_processing_core import TaskBase
5
+
6
+ __all__ = ["NoOpTask", "NoOpTask2"]
7
+
8
+
9
+ class NoOpTask(TaskBase):
10
+ def run(self) -> None:
11
+ pass
12
+
13
+
14
+ class NoOpTask2(NoOpTask):
15
+ pass
@@ -0,0 +1,88 @@
1
+ """Parse Task definition."""
2
+ from typing import TypeVar
3
+
4
+ from dkist_processing_common.models.constants import BudName
5
+ from dkist_processing_common.models.flower_pot import SpilledDirt
6
+ from dkist_processing_common.models.flower_pot import Stem
7
+ from dkist_processing_common.models.flower_pot import Thorn
8
+ from dkist_processing_common.models.tags import StemName
9
+ from dkist_processing_common.parsers.l0_fits_access import L0FitsAccess
10
+ from dkist_processing_common.parsers.single_value_single_key_flower import (
11
+ SingleValueSingleKeyFlower,
12
+ )
13
+ from dkist_processing_common.parsers.unique_bud import UniqueBud
14
+ from dkist_processing_common.tasks import ParseL0InputDataBase
15
+
16
+ __all__ = ["ParseL0TestInputData"]
17
+
18
+ S = TypeVar("S", bound=Stem)
19
+
20
+
21
+ class TotalDspsRepeatsBud(UniqueBud):
22
+ def __init__(self):
23
+ super().__init__(
24
+ constant_name=BudName.num_dsps_repeats.value, metadata_key="num_dsps_repeats"
25
+ )
26
+
27
+ def setter(self, fits_obj: L0FitsAccess):
28
+ if fits_obj.ip_task_type != "observe":
29
+ return SpilledDirt
30
+ return super().setter(fits_obj)
31
+
32
+
33
+ class ObsIpStartTimeBud(UniqueBud):
34
+ def __init__(self):
35
+ super().__init__(
36
+ constant_name=BudName.obs_ip_start_time.value, metadata_key="ip_start_time"
37
+ )
38
+
39
+ def setter(self, fits_obj: L0FitsAccess):
40
+ if fits_obj.ip_task_type != "observe":
41
+ return SpilledDirt
42
+ return super().setter(fits_obj)
43
+
44
+
45
+ class DspsRepeatNumberFlower(SingleValueSingleKeyFlower):
46
+ def __init__(self):
47
+ super().__init__(
48
+ tag_stem_name=StemName.dsps_repeat.value, metadata_key="current_dsps_repeat"
49
+ )
50
+
51
+ def setter(self, fits_obj: L0FitsAccess):
52
+ if fits_obj.ip_task_type != "observe":
53
+ return SpilledDirt
54
+ return super().setter(fits_obj)
55
+
56
+
57
+ class PickyDummyBud(Stem):
58
+ """Exists to do literally nothing"""
59
+
60
+ def setter(self, fits_obj: L0FitsAccess):
61
+ if fits_obj.ip_task_type == "bad value":
62
+ raise ValueError("This task type is bad!")
63
+
64
+ def getter(self, key):
65
+ return Thorn
66
+
67
+
68
+ class ParseL0TestInputData(ParseL0InputDataBase):
69
+ @property
70
+ def fits_parsing_class(self):
71
+ return L0FitsAccess
72
+
73
+ @property
74
+ def tag_flowers(self) -> list[S]:
75
+ return super().tag_flowers + [
76
+ SingleValueSingleKeyFlower(
77
+ tag_stem_name=StemName.task.value, metadata_key="ip_task_type"
78
+ ),
79
+ DspsRepeatNumberFlower(),
80
+ ]
81
+
82
+ @property
83
+ def constant_buds(self) -> list[S]:
84
+ return super().constant_buds + [
85
+ TotalDspsRepeatsBud(),
86
+ PickyDummyBud(stem_name="PICKY_BUD"),
87
+ ObsIpStartTimeBud(),
88
+ ]
@@ -0,0 +1,30 @@
1
+ """Quality task definition."""
2
+ from typing import Iterable
3
+ from typing import Type
4
+
5
+ from dkist_processing_common.models.constants import ConstantsBase
6
+ from dkist_processing_common.tasks import AssembleQualityData
7
+ from dkist_processing_common.tasks import QualityL0Metrics
8
+
9
+ __all__ = ["TestQualityL0Metrics", "TestAssembleQualityData"]
10
+
11
+ from dkist_processing_test.models.constants import TestConstants
12
+
13
+
14
+ class TestQualityL0Metrics(QualityL0Metrics):
15
+ @property
16
+ def constants_model_class(self) -> Type[ConstantsBase]:
17
+ return TestConstants
18
+
19
+ @property
20
+ def modstate_list(self) -> Iterable[int] | None:
21
+ if self.constants.num_modstates > 1:
22
+ return range(1, self.constants.num_modstates + 1)
23
+
24
+ return None
25
+
26
+
27
+ class TestAssembleQualityData(AssembleQualityData):
28
+ @property
29
+ def polcal_label_list(self) -> list[str] | None:
30
+ return ["Beam 1"]
@@ -0,0 +1,46 @@
1
+ """Trial tasks"""
2
+ from dkist_processing_common.tasks.mixin.globus import GlobusTransferItem
3
+ from dkist_processing_common.tasks.trial_output_data import TransferTrialDataBase
4
+ from dkist_service_configuration.logging import logger
5
+
6
+ __all__ = ["TransferTestTrialData"]
7
+
8
+
9
+ class TransferTestTrialData(TransferTrialDataBase):
10
+ @property
11
+ def intermediate_task_names(self) -> list[str]:
12
+ """Return a list of intermediate tasks we want to transfer.
13
+
14
+ Just a dummy task for testing.
15
+ """
16
+ return ["DUMMY"]
17
+
18
+ def build_transfer_list(self) -> list[GlobusTransferItem]:
19
+ """
20
+ Build a list containing all files we want to transfer to the trial environment.
21
+
22
+ For the purposes of testing we try to exercise all of the provided helper methods.
23
+ """
24
+ transfer_list = []
25
+
26
+ transfer_list += self.build_debug_frame_transfer_list()
27
+
28
+ transfer_list += self.build_intermediate_frame_transfer_list()
29
+
30
+ transfer_list += self.build_output_frame_transfer_list()
31
+
32
+ transfer_list += self.build_output_dataset_inventory_transfer_list()
33
+
34
+ transfer_list += self.build_output_asdf_transfer_list()
35
+
36
+ transfer_list += self.build_output_quality_data_transfer_list()
37
+
38
+ transfer_list += self.build_output_quality_report_transfer_list()
39
+
40
+ transfer_list += self.build_output_movie_transfer_list()
41
+
42
+ transfer_list += self.build_transfer_list_from_tag_lists([["FOO", "BAR"], ["BAZ"]])
43
+
44
+ logger.info(f"{transfer_list = }")
45
+
46
+ return transfer_list
@@ -0,0 +1,64 @@
1
+ from typing import Literal
2
+
3
+ import astropy.units as u
4
+ from astropy.io import fits
5
+ from astropy.time import Time
6
+ from astropy.time import TimeDelta
7
+ from dkist_processing_common.models.wavelength import WavelengthRange
8
+ from dkist_processing_common.tasks import WriteL1Frame
9
+
10
+ __all__ = ["WriteL1Data"]
11
+
12
+
13
+ class WriteL1Data(WriteL1Frame):
14
+ def add_dataset_headers(
15
+ self, header: fits.Header, stokes: Literal["I", "Q", "U", "V"]
16
+ ) -> fits.Header:
17
+ header["DAAXES"] = 2
18
+ header["DEAXES"] = 1
19
+ header["DNAXIS"] = 3
20
+ header["LEVEL"] = 1
21
+ header["WAVEREF"] = "Air"
22
+ header["WAVEUNIT"] = -9
23
+ header["DINDEX3"] = 3
24
+ header["DNAXIS1"] = header["NAXIS1"]
25
+ header["DNAXIS2"] = header["NAXIS2"]
26
+ header["DNAXIS3"] = 10
27
+ header["DPNAME1"] = "spatial x"
28
+ header["DPNAME2"] = "spatial y"
29
+ header["DPNAME3"] = "frame number"
30
+ header["DTYPE1"] = "SPATIAL"
31
+ header["DTYPE2"] = "SPATIAL"
32
+ header["DTYPE3"] = "TEMPORAL"
33
+ header["DUNIT1"] = "arcsec"
34
+ header["DUNIT2"] = "arcsec"
35
+ header["DUNIT3"] = "s"
36
+ header["DWNAME1"] = "helioprojective longitude"
37
+ header["DWNAME2"] = "helioprojective latitude"
38
+ header["DWNAME3"] = "time"
39
+ header["NBIN"] = 1
40
+ for i in range(1, header["NAXIS"] + 1):
41
+ header[f"NBIN{i}"] = 1
42
+
43
+ return header
44
+
45
+ def calculate_date_end(self, header: fits.Header) -> str:
46
+ """
47
+ Calculate the VBI specific version of the "DATE-END" keyword.
48
+
49
+ Parameters
50
+ ----------
51
+ header
52
+ The input fits header
53
+
54
+ Returns
55
+ -------
56
+ The isot formatted string of the DATE-END keyword value
57
+ """
58
+ return (
59
+ Time(header["DATE-BEG"], format="isot", precision=6)
60
+ + TimeDelta(float(header["TEXPOSUR"]) / 1000, format="sec")
61
+ ).to_value("isot")
62
+
63
+ def get_wavelength_range(self, header: fits.Header) -> WavelengthRange:
64
+ return WavelengthRange(min=1075.0 * u.nm, max=1085.0 * u.nm)
File without changes
@@ -0,0 +1,64 @@
1
+ """Global test fixture configuration"""
2
+ from random import randint
3
+
4
+ import numpy as np
5
+ import pytest
6
+ from astropy.io import fits
7
+ from dkist_data_simulator.spec122 import Spec122Dataset
8
+ from dkist_header_validator.translator import translate_spec122_to_spec214_l0
9
+
10
+
11
+ @pytest.fixture()
12
+ def recipe_run_id():
13
+ return randint(0, 99999)
14
+
15
+
16
+ class S122Headers(Spec122Dataset):
17
+ def __init__(
18
+ self,
19
+ array_shape: tuple[int, ...],
20
+ num_steps: int = 4,
21
+ num_exp_per_step: int = 1,
22
+ num_dsps_repeats: int = 5,
23
+ time_delta: float = 10.0,
24
+ instrument: str = "vbi",
25
+ ):
26
+ dataset_shape = (num_exp_per_step * num_steps * num_dsps_repeats,) + array_shape[-2:]
27
+ super().__init__(
28
+ dataset_shape=dataset_shape,
29
+ array_shape=array_shape,
30
+ time_delta=time_delta,
31
+ instrument=instrument,
32
+ )
33
+ self.num_steps = num_steps
34
+ self.num_exp_per_step = num_exp_per_step
35
+ self.num_dsps_repeats = num_dsps_repeats
36
+
37
+
38
+ def generate_214_l0_fits_frame(
39
+ s122_header: fits.Header, data: np.ndarray | None = None
40
+ ) -> fits.HDUList:
41
+ """Convert S122 header into 214 L0"""
42
+ if data is None:
43
+ data = np.ones((1, 10, 10))
44
+ translated_header = translate_spec122_to_spec214_l0(s122_header)
45
+ del translated_header["COMMENT"]
46
+ hdu = fits.PrimaryHDU(data=data, header=fits.Header(translated_header))
47
+ return fits.HDUList([hdu])
48
+
49
+
50
+ @pytest.fixture(scope="session")
51
+ def parameter_file_object_key() -> str:
52
+ return "random.fits"
53
+
54
+
55
+ @pytest.fixture(scope="session")
56
+ def random_parameter_hdulist() -> (fits.HDUList, float, float, float):
57
+ rng = np.random.default_rng()
58
+ mu, std = 10.0, 2.0
59
+ const = 5.0
60
+ rand_data = rng.normal(mu, std, size=(100, 100))
61
+ const_data = np.ones((10, 10)) * const
62
+ hdul = fits.HDUList([fits.PrimaryHDU(rand_data), fits.ImageHDU(const_data)])
63
+
64
+ return hdul, mu, std, const