dkist-processing-common 11.0.0rc2__py3-none-any.whl → 11.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. dkist_processing_common/manual.py +2 -2
  2. dkist_processing_common/models/graphql.py +34 -22
  3. dkist_processing_common/models/input_dataset.py +4 -1
  4. dkist_processing_common/tasks/assemble_movie.py +2 -2
  5. dkist_processing_common/tasks/mixin/quality/_metrics.py +16 -7
  6. dkist_processing_common/tasks/trial_output_data.py +0 -3
  7. dkist_processing_common/tests/conftest.py +28 -221
  8. dkist_processing_common/tests/mock_metadata_store.py +237 -0
  9. dkist_processing_common/tests/test_assemble_movie.py +4 -3
  10. dkist_processing_common/tests/test_base.py +2 -3
  11. dkist_processing_common/tests/test_interservice_bus_mixin.py +0 -1
  12. dkist_processing_common/tests/test_output_data_base.py +4 -5
  13. dkist_processing_common/tests/test_publish_catalog_messages.py +2 -3
  14. dkist_processing_common/tests/test_quality_mixin.py +7 -3
  15. dkist_processing_common/tests/test_submit_dataset_metadata.py +2 -2
  16. dkist_processing_common/tests/test_teardown.py +14 -11
  17. dkist_processing_common/tests/test_transfer_input_data.py +79 -22
  18. dkist_processing_common/tests/test_transfer_l1_output_data.py +2 -3
  19. dkist_processing_common/tests/test_trial_catalog.py +7 -3
  20. dkist_processing_common/tests/test_trial_output_data.py +44 -64
  21. dkist_processing_common/tests/test_workflow_task_base.py +2 -3
  22. dkist_processing_common/tests/test_write_l1.py +82 -54
  23. {dkist_processing_common-11.0.0rc2.dist-info → dkist_processing_common-11.1.0.dist-info}/METADATA +2 -2
  24. {dkist_processing_common-11.0.0rc2.dist-info → dkist_processing_common-11.1.0.dist-info}/RECORD +26 -28
  25. changelog/256.feature.rst +0 -2
  26. changelog/257.feature.rst +0 -1
  27. changelog/259.feature.rst +0 -1
  28. {dkist_processing_common-11.0.0rc2.dist-info → dkist_processing_common-11.1.0.dist-info}/WHEEL +0 -0
  29. {dkist_processing_common-11.0.0rc2.dist-info → dkist_processing_common-11.1.0.dist-info}/top_level.txt +0 -0
@@ -12,7 +12,7 @@ from dkist_processing_common.codecs.basemodel import basemodel_encoder
12
12
  from dkist_processing_common.models.graphql import RecipeRunProvenanceMutation
13
13
  from dkist_processing_common.models.tags import Tag
14
14
  from dkist_processing_common.tasks.base import WorkflowTaskBase
15
- from dkist_processing_common.tests.conftest import FakeGQLClient
15
+ from dkist_processing_common.tests.mock_metadata_store import fake_gql_client_factory
16
16
 
17
17
 
18
18
  logger = logging.getLogger(__name__)
@@ -67,7 +67,7 @@ class ManualProcessing:
67
67
 
68
68
  with patch(
69
69
  "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient",
70
- new=FakeGQLClient,
70
+ new=fake_gql_client_factory(),
71
71
  ) as foo:
72
72
  # Run the task with a FakeGQLClient. This will handle pre_run(), run(), and post_run()
73
73
  with patch(
@@ -1,5 +1,7 @@
1
1
  """GraphQL Data models for the metadata store api."""
2
2
  from pydantic import BaseModel
3
+ from pydantic import ConfigDict
4
+ from pydantic import field_serializer
3
5
  from pydantic import field_validator
4
6
  from pydantic import Json
5
7
 
@@ -7,20 +9,26 @@ from dkist_processing_common.models.input_dataset import InputDatasetBaseModel
7
9
  from dkist_processing_common.models.input_dataset import InputDatasetPartDocumentList
8
10
 
9
11
 
10
- class RecipeRunMutation(BaseModel):
12
+ class GraphqlBaseModel(BaseModel):
13
+ """Custom BaseModel for input datasets."""
14
+
15
+ model_config = ConfigDict(validate_assignment=True)
16
+
17
+
18
+ class RecipeRunMutation(GraphqlBaseModel):
11
19
  """Recipe run mutation record."""
12
20
 
13
21
  recipeRunId: int
14
22
  recipeRunStatusId: int
15
23
 
16
24
 
17
- class RecipeRunStatusQuery(BaseModel):
25
+ class RecipeRunStatusQuery(GraphqlBaseModel):
18
26
  """Recipe run status query for the recipeRunStatuses endpoint."""
19
27
 
20
28
  recipeRunStatusName: str
21
29
 
22
30
 
23
- class RecipeRunStatusMutation(BaseModel):
31
+ class RecipeRunStatusMutation(GraphqlBaseModel):
24
32
  """Recipe run status mutation record."""
25
33
 
26
34
  recipeRunStatusName: str
@@ -28,13 +36,13 @@ class RecipeRunStatusMutation(BaseModel):
28
36
  recipeRunStatusDescription: str
29
37
 
30
38
 
31
- class RecipeRunStatusResponse(BaseModel):
39
+ class RecipeRunStatusResponse(GraphqlBaseModel):
32
40
  """Response to a recipe run status query."""
33
41
 
34
42
  recipeRunStatusId: int
35
43
 
36
44
 
37
- class InputDatasetPartTypeResponse(BaseModel):
45
+ class InputDatasetPartTypeResponse(GraphqlBaseModel):
38
46
  """Response class for the input dataset part type entity."""
39
47
 
40
48
  inputDatasetPartTypeName: str
@@ -54,13 +62,13 @@ class InputDatasetPartResponse(InputDatasetBaseModel):
54
62
  return InputDatasetPartDocumentList(doc_list=value_list)
55
63
 
56
64
 
57
- class InputDatasetInputDatasetPartResponse(BaseModel):
65
+ class InputDatasetInputDatasetPartResponse(GraphqlBaseModel):
58
66
  """Response class for the join entity between input datasets and input dataset parts."""
59
67
 
60
68
  inputDatasetPart: InputDatasetPartResponse
61
69
 
62
70
 
63
- class InputDatasetResponse(BaseModel):
71
+ class InputDatasetResponse(GraphqlBaseModel):
64
72
  """Input dataset query response."""
65
73
 
66
74
  inputDatasetId: int
@@ -68,33 +76,33 @@ class InputDatasetResponse(BaseModel):
68
76
  inputDatasetInputDatasetParts: list[InputDatasetInputDatasetPartResponse]
69
77
 
70
78
 
71
- class InputDatasetRecipeInstanceResponse(BaseModel):
79
+ class InputDatasetRecipeInstanceResponse(GraphqlBaseModel):
72
80
  """Recipe instance query response."""
73
81
 
74
82
  inputDataset: InputDatasetResponse
75
83
 
76
84
 
77
- class InputDatasetRecipeRunResponse(BaseModel):
85
+ class InputDatasetRecipeRunResponse(GraphqlBaseModel):
78
86
  """Recipe run query response."""
79
87
 
80
88
  recipeInstance: InputDatasetRecipeInstanceResponse
81
89
 
82
90
 
83
- class RecipeInstanceResponse(BaseModel):
91
+ class RecipeInstanceResponse(GraphqlBaseModel):
84
92
  """Recipe instance query response."""
85
93
 
86
94
  recipeId: int
87
95
  inputDatasetId: int
88
96
 
89
97
 
90
- class RecipeRunProvenanceResponse(BaseModel):
98
+ class RecipeRunProvenanceResponse(GraphqlBaseModel):
91
99
  """Response for the metadata store recipeRunProvenances and mutations endpoints."""
92
100
 
93
101
  recipeRunProvenanceId: int
94
102
  isTaskManual: bool
95
103
 
96
104
 
97
- class RecipeRunConfiguration(BaseModel):
105
+ class RecipeRunConfiguration(GraphqlBaseModel):
98
106
  """Response class for a recipe run configuration dictionary."""
99
107
 
100
108
  validate_l1_on_write: bool = True
@@ -103,10 +111,10 @@ class RecipeRunConfiguration(BaseModel):
103
111
  trial_directory_name: str | None = None
104
112
  trial_root_directory_name: str | None = None
105
113
  teardown_enabled: bool = True
106
- trial_exclusive_transfer_tag_lists: list[str] | None = None
114
+ trial_exclusive_transfer_tag_lists: list[list[str]] | None = None
107
115
 
108
116
 
109
- class RecipeRunResponse(BaseModel):
117
+ class RecipeRunResponse(GraphqlBaseModel):
110
118
  """Recipe run query response."""
111
119
 
112
120
  recipeInstance: RecipeInstanceResponse
@@ -122,20 +130,24 @@ class RecipeRunResponse(BaseModel):
122
130
  return RecipeRunConfiguration()
123
131
  return RecipeRunConfiguration.model_validate(value)
124
132
 
133
+ @field_serializer("configuration")
134
+ def _serialize_as_basemodel(self, config: RecipeRunConfiguration):
135
+ return config.model_dump()
136
+
125
137
 
126
- class RecipeRunMutationResponse(BaseModel):
138
+ class RecipeRunMutationResponse(GraphqlBaseModel):
127
139
  """Recipe run mutation response."""
128
140
 
129
141
  recipeRunId: int
130
142
 
131
143
 
132
- class RecipeRunQuery(BaseModel):
144
+ class RecipeRunQuery(GraphqlBaseModel):
133
145
  """Query parameters for the metadata store endpoint recipeRuns."""
134
146
 
135
147
  recipeRunId: int
136
148
 
137
149
 
138
- class DatasetCatalogReceiptAccountMutation(BaseModel):
150
+ class DatasetCatalogReceiptAccountMutation(GraphqlBaseModel):
139
151
  """
140
152
  Dataset catalog receipt account mutation record.
141
153
 
@@ -147,13 +159,13 @@ class DatasetCatalogReceiptAccountMutation(BaseModel):
147
159
  expectedObjectCount: int
148
160
 
149
161
 
150
- class DatasetCatalogReceiptAccountResponse(BaseModel):
162
+ class DatasetCatalogReceiptAccountResponse(GraphqlBaseModel):
151
163
  """Dataset catalog receipt account response for query and mutation endpoints."""
152
164
 
153
165
  datasetCatalogReceiptAccountId: int
154
166
 
155
167
 
156
- class RecipeRunProvenanceMutation(BaseModel):
168
+ class RecipeRunProvenanceMutation(GraphqlBaseModel):
157
169
  """Recipe run provenance mutation record."""
158
170
 
159
171
  inputDatasetId: int
@@ -165,7 +177,7 @@ class RecipeRunProvenanceMutation(BaseModel):
165
177
  codeVersion: str | None = None
166
178
 
167
179
 
168
- class QualityCreation(BaseModel):
180
+ class QualityCreation(GraphqlBaseModel):
169
181
  """Quality data creation record."""
170
182
 
171
183
  datasetId: str
@@ -185,13 +197,13 @@ class QualityCreation(BaseModel):
185
197
  efficiencyData: str | None = None
186
198
 
187
199
 
188
- class QualitiesRequest(BaseModel):
200
+ class QualitiesRequest(GraphqlBaseModel):
189
201
  """Query parameters for quality data."""
190
202
 
191
203
  datasetId: str
192
204
 
193
205
 
194
- class QualityResponse(BaseModel):
206
+ class QualityResponse(GraphqlBaseModel):
195
207
  """Query Response for quality data."""
196
208
 
197
209
  qualityId: int
@@ -18,7 +18,10 @@ class InputDatasetBaseModel(BaseModel):
18
18
  """Custom BaseModel for input datasets."""
19
19
 
20
20
  model_config = ConfigDict(
21
- alias_generator=to_camel, validate_by_name=True, validate_by_alias=True
21
+ alias_generator=to_camel,
22
+ validate_by_name=True,
23
+ validate_by_alias=True,
24
+ validate_assignment=True,
22
25
  )
23
26
 
24
27
  def model_dump(self, **kwargs) -> dict:
@@ -2,10 +2,10 @@
2
2
  import logging
3
3
  from abc import ABC
4
4
  from abc import abstractmethod
5
+ from importlib.resources import files
5
6
  from typing import Literal
6
7
 
7
8
  import numpy as np
8
- import pkg_resources
9
9
  from matplotlib import colormaps
10
10
  from moviepy import VideoClip
11
11
  from PIL import Image
@@ -66,7 +66,7 @@ class AssembleMovie(WorkflowTaskBase, ABC):
66
66
  MINIMUM_DURATION = 10 # seconds
67
67
  MAXIMUM_DURATION = 60 # seconds
68
68
  FPS = 15
69
- FONT_FILE = pkg_resources.resource_filename("dkist_processing_common", "fonts/Lato-Regular.ttf")
69
+ FONT_FILE = files("dkist_processing_common").joinpath("fonts/Lato-Regular.ttf")
70
70
  TEXT_MARGIN_PX = 5
71
71
  MPL_COLOR_MAP = "viridis"
72
72
 
@@ -1365,13 +1365,22 @@ class _WavecalQualityMixin:
1365
1365
  wcs = WCS(best_fit_header)
1366
1366
  best_fit_wavelength = wcs.spectral.pixel_to_world(np.arange(input_spectrum.size))
1367
1367
 
1368
+ finite_idx = (
1369
+ np.isfinite(input_wavelength)
1370
+ * np.isfinite(input_spectrum)
1371
+ * np.isfinite(best_fit_wavelength)
1372
+ * np.isfinite(best_fit_atlas)
1373
+ * np.isfinite(normalized_residuals)
1374
+ * np.isfinite(weight_data)
1375
+ )
1376
+
1368
1377
  data = {
1369
- "input_wavelength_nm": input_wavelength.to_value(u.nm).tolist(),
1370
- "input_spectrum": input_spectrum.tolist(),
1371
- "best_fit_wavelength_nm": best_fit_wavelength.to_value(u.nm).tolist(),
1372
- "best_fit_atlas": best_fit_atlas.tolist(),
1373
- "normalized_residuals": normalized_residuals.tolist(),
1374
- "weights": None if weights is None else weight_data.tolist(),
1378
+ "input_wavelength_nm": input_wavelength.to_value(u.nm)[finite_idx].tolist(),
1379
+ "input_spectrum": input_spectrum[finite_idx].tolist(),
1380
+ "best_fit_wavelength_nm": best_fit_wavelength.to_value(u.nm)[finite_idx].tolist(),
1381
+ "best_fit_atlas": best_fit_atlas[finite_idx].tolist(),
1382
+ "normalized_residuals": normalized_residuals[finite_idx].tolist(),
1383
+ "weights": None if weights is None else weight_data[finite_idx].tolist(),
1375
1384
  }
1376
1385
 
1377
1386
  self._record_values(values=data, tags=[Tag.quality(MetricCode.wavecal_fit)])
@@ -1447,7 +1456,7 @@ class _WavecalQualityMixin:
1447
1456
  name="Wavelength Calibration Results",
1448
1457
  description="These plots show the wavelength solution computed based on fits to a Solar FTS atlas. "
1449
1458
  "The top plot shows the input and best-fit spectra along with the best-fit atlas, which is "
1450
- "a combination of Solar and Telluric spectra. The bottom plot shows the fir residuals.",
1459
+ "a combination of Solar and Telluric spectra. The bottom plot shows the fit residuals.",
1451
1460
  metric_code=MetricCode.wavecal_fit,
1452
1461
  vertical_multi_pane_plot_data=full_plot,
1453
1462
  )
@@ -129,9 +129,6 @@ class TransferTrialData(TransferDataBase, GlobusMixin):
129
129
  """
130
130
  tag_lists = self.transfer_tag_lists
131
131
 
132
- if not isinstance(tag_lists[0], list):
133
- raise ValueError(f"{tag_lists=} must be a list of tag set lists")
134
-
135
132
  transfer_items = []
136
133
  for tag_set in tag_lists:
137
134
 
@@ -32,21 +32,9 @@ from dkist_processing_pac.optics.telescope import Telescope
32
32
  from dkist_processing_common._util.constants import ConstantsDb
33
33
  from dkist_processing_common._util.scratch import WorkflowFileSystem
34
34
  from dkist_processing_common._util.tags import TagDB
35
- from dkist_processing_common.models.graphql import InputDatasetInputDatasetPartResponse
36
- from dkist_processing_common.models.graphql import InputDatasetPartResponse
37
- from dkist_processing_common.models.graphql import InputDatasetPartTypeResponse
38
- from dkist_processing_common.models.graphql import InputDatasetRecipeInstanceResponse
39
- from dkist_processing_common.models.graphql import InputDatasetRecipeRunResponse
40
- from dkist_processing_common.models.graphql import InputDatasetResponse
41
- from dkist_processing_common.models.graphql import RecipeInstanceResponse
42
- from dkist_processing_common.models.graphql import RecipeRunProvenanceResponse
43
- from dkist_processing_common.models.graphql import RecipeRunResponse
44
- from dkist_processing_common.models.graphql import RecipeRunStatusResponse
45
- from dkist_processing_common.models.tags import Tag
46
35
  from dkist_processing_common.parsers.l0_fits_access import L0FitsAccess
47
36
  from dkist_processing_common.tasks import WorkflowTaskBase
48
-
49
- TILE_SIZE = 64
37
+ from dkist_processing_common.tests.mock_metadata_store import fake_gql_client
50
38
 
51
39
 
52
40
  @pytest.fixture()
@@ -99,6 +87,21 @@ def constants_db(recipe_run_id) -> ConstantsDb:
99
87
  constants.close()
100
88
 
101
89
 
90
+ @pytest.fixture()
91
+ def fake_constants_db() -> dict:
92
+ """
93
+ A fake constants DB to prevent key errors.
94
+
95
+ Usage on a task: task.constants._update(fake_constants_db)
96
+ """
97
+ db = {
98
+ "PROPOSAL_ID": "PROPID",
99
+ "INSTRUMENT": "INSTRUMENT",
100
+ "OBS_IP_START_TIME": "20240416T160000",
101
+ }
102
+ return db
103
+
104
+
102
105
  class CommonDataset(Spec122Dataset):
103
106
  def __init__(self, polarimetric: bool = True):
104
107
  super().__init__(
@@ -285,18 +288,18 @@ def cs_step_angle_round_ndigits() -> int:
285
288
 
286
289
 
287
290
  @pytest.fixture(scope="session")
288
- def angle_random_max_perturabtion(cs_step_angle_round_ndigits) -> float:
291
+ def angle_random_max_perturbation(cs_step_angle_round_ndigits) -> float:
289
292
  # Ensures that we always round down to zero.
290
293
  # E.g., if ndigits = 1 then this value will be 0.049.
291
294
  return 10**-cs_step_angle_round_ndigits / 2 - 10 ** -(cs_step_angle_round_ndigits + 2)
292
295
 
293
296
 
294
297
  @pytest.fixture(scope="session")
295
- def grouped_cal_sequence_headers(angle_random_max_perturabtion) -> dict[int, list[L0FitsAccess]]:
298
+ def grouped_cal_sequence_headers(angle_random_max_perturbation) -> dict[int, list[L0FitsAccess]]:
296
299
  ds = CalibrationSequenceDataset(
297
300
  array_shape=(1, 2, 2),
298
301
  time_delta=2.0,
299
- angle_max_random_perturbation=angle_random_max_perturabtion,
302
+ angle_max_random_perturbation=angle_random_max_perturbation,
300
303
  )
301
304
  header_list = [
302
305
  spec122_validator.validate_and_translate_to_214_l0(d.header(), return_type=fits.HDUList)[
@@ -331,155 +334,9 @@ def max_cs_step_time_sec() -> float:
331
334
  return 20.0
332
335
 
333
336
 
334
- class FakeGQLClient:
335
-
336
- observe_frames_doc_object = [
337
- {
338
- "bucket": uuid4().hex[:6],
339
- "object_keys": [Path(uuid4().hex[:6]).as_posix() for _ in range(3)],
340
- }
341
- ]
342
-
343
- calibration_frames_doc_object = [
344
- {
345
- "bucket": uuid4().hex[:6],
346
- "object_keys": [Path(uuid4().hex[:6]).as_posix() for _ in range(3)],
347
- },
348
- {
349
- "bucket": uuid4().hex[:6],
350
- "object_keys": [Path(uuid4().hex[:6]).as_posix() for _ in range(3)],
351
- },
352
- ]
353
-
354
- parameters_doc_object = [
355
- {
356
- "parameterName": "param_name_1",
357
- "parameterValues": [
358
- {
359
- "parameterValueId": 1,
360
- "parameterValue": json.dumps([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
361
- "parameterValueStartDate": datetime(2000, 1, 1).isoformat(),
362
- }
363
- ],
364
- },
365
- {
366
- "parameterName": "param_name_2",
367
- "parameterValues": [
368
- {
369
- "parameterValueId": 2,
370
- "parameterValue": json.dumps(
371
- {
372
- "__file__": {
373
- "bucket": "data",
374
- "objectKey": f"parameters/param_name/{uuid4().hex}.dat",
375
- }
376
- }
377
- ),
378
- "parameterValueStartDate": datetime(2000, 1, 1).isoformat(),
379
- },
380
- {
381
- "parameterValueId": 3,
382
- "parameterValue": json.dumps(
383
- {
384
- "__file__": {
385
- "bucket": "data",
386
- "objectKey": f"parameters/param_name/{uuid4().hex}.dat",
387
- }
388
- }
389
- ),
390
- "parameterValueStartDate": datetime(2000, 1, 2).isoformat(),
391
- },
392
- ],
393
- },
394
- {
395
- "parameterName": "param_name_4",
396
- "parameterValues": [
397
- {
398
- "parameterValueId": 4,
399
- "parameterValue": json.dumps(
400
- {"a": 1, "b": 3.14159, "c": "foo", "d": [1, 2, 3]}
401
- ),
402
- "parameterValueStartDate": datetime(2000, 1, 1).isoformat(),
403
- }
404
- ],
405
- },
406
- ]
407
-
408
- def __init__(self, *args, **kwargs):
409
- pass
410
-
411
- def execute_gql_query(self, **kwargs):
412
- query_base = kwargs["query_base"]
413
- if query_base == "recipeRunStatuses":
414
- return [RecipeRunStatusResponse(recipeRunStatusId=1)]
415
- if query_base == "recipeRuns":
416
- if kwargs.get("query_response_cls") == InputDatasetRecipeRunResponse:
417
- return [
418
- InputDatasetRecipeRunResponse(
419
- recipeInstance=InputDatasetRecipeInstanceResponse(
420
- inputDataset=InputDatasetResponse(
421
- inputDatasetId=1,
422
- isActive=True,
423
- inputDatasetInputDatasetParts=[
424
- InputDatasetInputDatasetPartResponse(
425
- inputDatasetPart=InputDatasetPartResponse(
426
- inputDatasetPartId=1,
427
- inputDatasetPartDocument=json.dumps(
428
- self.parameters_doc_object
429
- ),
430
- inputDatasetPartType=InputDatasetPartTypeResponse(
431
- inputDatasetPartTypeName="parameters"
432
- ),
433
- )
434
- ),
435
- InputDatasetInputDatasetPartResponse(
436
- inputDatasetPart=InputDatasetPartResponse(
437
- inputDatasetPartId=2,
438
- inputDatasetPartDocument=json.dumps(
439
- self.observe_frames_doc_object
440
- ),
441
- inputDatasetPartType=InputDatasetPartTypeResponse(
442
- inputDatasetPartTypeName="observe_frames"
443
- ),
444
- )
445
- ),
446
- InputDatasetInputDatasetPartResponse(
447
- inputDatasetPart=InputDatasetPartResponse(
448
- inputDatasetPartId=3,
449
- inputDatasetPartDocument=json.dumps(
450
- self.calibration_frames_doc_object
451
- ),
452
- inputDatasetPartType=InputDatasetPartTypeResponse(
453
- inputDatasetPartTypeName="calibration_frames"
454
- ),
455
- )
456
- ),
457
- ],
458
- ),
459
- ),
460
- ),
461
- ]
462
-
463
- return [
464
- RecipeRunResponse(
465
- recipeInstanceId=1,
466
- recipeInstance=RecipeInstanceResponse(
467
- recipeId=1,
468
- inputDatasetId=1,
469
- ),
470
- configuration=f'{{"tile_size": {TILE_SIZE}}}',
471
- recipeRunProvenances=[
472
- RecipeRunProvenanceResponse(recipeRunProvenanceId=1, isTaskManual=False),
473
- ],
474
- ),
475
- ]
476
-
477
- @staticmethod
478
- def execute_gql_mutation(**kwargs):
479
- ...
480
-
481
-
482
- # All the following stuff is copied from dkist-processing-pac
337
+ ####################################
338
+ # Copied from dkist-processing-pac #
339
+ ####################################
483
340
  def compute_telgeom(time_hst: Time):
484
341
  dkist_lon = (156 + 15 / 60.0 + 21.7 / 3600.0) * (-1)
485
342
  dkist_lat = 20 + 42 / 60.0 + 27.0 / 3600.0
@@ -543,7 +400,7 @@ class CalibrationSequenceStepDataset(Spec122Dataset):
543
400
  return "none" if self.pol_status == "clear" else str(self.pol_theta)
544
401
 
545
402
  @key_function("PAC__006")
546
- def retarter_status(self, key: str) -> str:
403
+ def retarder_status(self, key: str) -> str:
547
404
  return self.ret_status
548
405
 
549
406
  @key_function("PAC__007")
@@ -795,6 +652,11 @@ def post_fit_polcal_fitter(
795
652
  return fitter
796
653
 
797
654
 
655
+ #################
656
+ # Input Dataset #
657
+ #################
658
+
659
+
798
660
  class InputDatasetTask(WorkflowTaskBase):
799
661
  def run(self):
800
662
  pass
@@ -826,58 +688,3 @@ def task_with_input_dataset(
826
688
  file_path.write_text(data=json.dumps({"doc_list": part}))
827
689
  task.tag(path=file_path, tags=tag)
828
690
  yield task
829
-
830
-
831
- def create_parameter_files(
832
- task: WorkflowTaskBase, parameters_doc: list[dict] = FakeGQLClient.parameters_doc_object
833
- ):
834
- """
835
- Create the parameter files specified in the parameters document returned by the metadata store.
836
-
837
- This fixture assumes that the JSON parameters document has already been loaded into a python
838
- structure, but the parameter values themselves are still JSON.
839
- """
840
- for parameter in parameters_doc:
841
- for value in parameter["parameterValues"]:
842
- if "__file__" not in value["parameterValue"]:
843
- continue
844
- parameter_value = json.loads(value["parameterValue"])
845
- param_path = parameter_value["__file__"]["objectKey"]
846
- file_path = task.scratch.workflow_base_path / Path(param_path)
847
- if not file_path.parent.exists():
848
- file_path.parent.mkdir(parents=True, exist_ok=True)
849
- file_path.write_text(data="")
850
- task.tag(path=file_path, tags=Tag.parameter(param_path))
851
-
852
-
853
- def create_input_frames(
854
- task: WorkflowTaskBase,
855
- input_frame_docs: list[dict] = FakeGQLClient.observe_frames_doc_object
856
- + FakeGQLClient.calibration_frames_doc_object,
857
- ):
858
- """
859
- Create the observe and calibration frame files specified in the input dataset documents
860
- returned by the metadata store.
861
- """
862
- for frame in input_frame_docs:
863
- for object_key in frame["object_keys"]:
864
- file_path = task.scratch.workflow_base_path / Path(object_key)
865
- if not file_path.parent.exists():
866
- file_path.parent.mkdir(parents=True, exist_ok=True)
867
- file_path.write_text(data="")
868
- task.tag(path=file_path, tags=[Tag.frame(), Tag.input()])
869
-
870
-
871
- @pytest.fixture()
872
- def fake_constants_db() -> dict:
873
- """
874
- A fake constants DB to prevent key errors.
875
-
876
- Usage on a task: task.constants._update(fake_constants_db)
877
- """
878
- db = {
879
- "PROPOSAL_ID": "PROPID",
880
- "INSTRUMENT": "INSTRUMENT",
881
- "OBS_IP_START_TIME": "20240416T160000",
882
- }
883
- return db