dkist-processing-common 11.0.0rc1__py3-none-any.whl → 11.1.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. changelog/255.misc.rst +3 -0
  2. dkist_processing_common/manual.py +2 -2
  3. dkist_processing_common/models/graphql.py +29 -22
  4. dkist_processing_common/models/input_dataset.py +4 -1
  5. dkist_processing_common/tasks/mixin/quality/_metrics.py +1 -1
  6. dkist_processing_common/tasks/trial_output_data.py +0 -3
  7. dkist_processing_common/tests/conftest.py +28 -221
  8. dkist_processing_common/tests/mock_metadata_store.py +237 -0
  9. dkist_processing_common/tests/test_assemble_movie.py +4 -3
  10. dkist_processing_common/tests/test_assemble_quality.py +1 -2
  11. dkist_processing_common/tests/test_base.py +2 -3
  12. dkist_processing_common/tests/test_interservice_bus_mixin.py +0 -1
  13. dkist_processing_common/tests/test_output_data_base.py +4 -5
  14. dkist_processing_common/tests/test_publish_catalog_messages.py +2 -3
  15. dkist_processing_common/tests/test_quality_mixin.py +1 -1
  16. dkist_processing_common/tests/test_submit_dataset_metadata.py +2 -2
  17. dkist_processing_common/tests/test_teardown.py +14 -11
  18. dkist_processing_common/tests/test_transfer_input_data.py +79 -22
  19. dkist_processing_common/tests/test_transfer_l1_output_data.py +2 -3
  20. dkist_processing_common/tests/test_trial_catalog.py +7 -3
  21. dkist_processing_common/tests/test_trial_output_data.py +44 -64
  22. dkist_processing_common/tests/test_write_l1.py +82 -54
  23. {dkist_processing_common-11.0.0rc1.dist-info → dkist_processing_common-11.1.0rc1.dist-info}/METADATA +2 -2
  24. {dkist_processing_common-11.0.0rc1.dist-info → dkist_processing_common-11.1.0rc1.dist-info}/RECORD +26 -27
  25. changelog/256.feature.rst +0 -2
  26. changelog/257.feature.rst +0 -1
  27. changelog/259.feature.rst +0 -1
  28. {dkist_processing_common-11.0.0rc1.dist-info → dkist_processing_common-11.1.0rc1.dist-info}/WHEEL +0 -0
  29. {dkist_processing_common-11.0.0rc1.dist-info → dkist_processing_common-11.1.0rc1.dist-info}/top_level.txt +0 -0
changelog/255.misc.rst ADDED
@@ -0,0 +1,3 @@
1
+ Create a factory for custom `FakeGQLClient` used in tests. The factory and associated default returns live in a
2
+ new `mock_metadata_store.py` module in the tests directory. `FakeGQLClient` is now a test fixture and does not need to
3
+ be imported in tests.
@@ -12,7 +12,7 @@ from dkist_processing_common.codecs.basemodel import basemodel_encoder
12
12
  from dkist_processing_common.models.graphql import RecipeRunProvenanceMutation
13
13
  from dkist_processing_common.models.tags import Tag
14
14
  from dkist_processing_common.tasks.base import WorkflowTaskBase
15
- from dkist_processing_common.tests.conftest import FakeGQLClient
15
+ from dkist_processing_common.tests.mock_metadata_store import fake_gql_client_factory
16
16
 
17
17
 
18
18
  logger = logging.getLogger(__name__)
@@ -67,7 +67,7 @@ class ManualProcessing:
67
67
 
68
68
  with patch(
69
69
  "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient",
70
- new=FakeGQLClient,
70
+ new=fake_gql_client_factory(),
71
71
  ) as foo:
72
72
  # Run the task with a FakeGQLClient. This will handle pre_run(), run(), and post_run()
73
73
  with patch(
@@ -1,5 +1,6 @@
1
1
  """GraphQL Data models for the metadata store api."""
2
2
  from pydantic import BaseModel
3
+ from pydantic import ConfigDict
3
4
  from pydantic import field_validator
4
5
  from pydantic import Json
5
6
 
@@ -7,20 +8,26 @@ from dkist_processing_common.models.input_dataset import InputDatasetBaseModel
7
8
  from dkist_processing_common.models.input_dataset import InputDatasetPartDocumentList
8
9
 
9
10
 
10
- class RecipeRunMutation(BaseModel):
11
+ class GraphqlBaseModel(BaseModel):
12
+ """Custom BaseModel for input datasets."""
13
+
14
+ model_config = ConfigDict(validate_assignment=True)
15
+
16
+
17
+ class RecipeRunMutation(GraphqlBaseModel):
11
18
  """Recipe run mutation record."""
12
19
 
13
20
  recipeRunId: int
14
21
  recipeRunStatusId: int
15
22
 
16
23
 
17
- class RecipeRunStatusQuery(BaseModel):
24
+ class RecipeRunStatusQuery(GraphqlBaseModel):
18
25
  """Recipe run status query for the recipeRunStatuses endpoint."""
19
26
 
20
27
  recipeRunStatusName: str
21
28
 
22
29
 
23
- class RecipeRunStatusMutation(BaseModel):
30
+ class RecipeRunStatusMutation(GraphqlBaseModel):
24
31
  """Recipe run status mutation record."""
25
32
 
26
33
  recipeRunStatusName: str
@@ -28,13 +35,13 @@ class RecipeRunStatusMutation(BaseModel):
28
35
  recipeRunStatusDescription: str
29
36
 
30
37
 
31
- class RecipeRunStatusResponse(BaseModel):
38
+ class RecipeRunStatusResponse(GraphqlBaseModel):
32
39
  """Response to a recipe run status query."""
33
40
 
34
41
  recipeRunStatusId: int
35
42
 
36
43
 
37
- class InputDatasetPartTypeResponse(BaseModel):
44
+ class InputDatasetPartTypeResponse(GraphqlBaseModel):
38
45
  """Response class for the input dataset part type entity."""
39
46
 
40
47
  inputDatasetPartTypeName: str
@@ -54,13 +61,13 @@ class InputDatasetPartResponse(InputDatasetBaseModel):
54
61
  return InputDatasetPartDocumentList(doc_list=value_list)
55
62
 
56
63
 
57
- class InputDatasetInputDatasetPartResponse(BaseModel):
64
+ class InputDatasetInputDatasetPartResponse(GraphqlBaseModel):
58
65
  """Response class for the join entity between input datasets and input dataset parts."""
59
66
 
60
67
  inputDatasetPart: InputDatasetPartResponse
61
68
 
62
69
 
63
- class InputDatasetResponse(BaseModel):
70
+ class InputDatasetResponse(GraphqlBaseModel):
64
71
  """Input dataset query response."""
65
72
 
66
73
  inputDatasetId: int
@@ -68,33 +75,33 @@ class InputDatasetResponse(BaseModel):
68
75
  inputDatasetInputDatasetParts: list[InputDatasetInputDatasetPartResponse]
69
76
 
70
77
 
71
- class InputDatasetRecipeInstanceResponse(BaseModel):
78
+ class InputDatasetRecipeInstanceResponse(GraphqlBaseModel):
72
79
  """Recipe instance query response."""
73
80
 
74
81
  inputDataset: InputDatasetResponse
75
82
 
76
83
 
77
- class InputDatasetRecipeRunResponse(BaseModel):
84
+ class InputDatasetRecipeRunResponse(GraphqlBaseModel):
78
85
  """Recipe run query response."""
79
86
 
80
87
  recipeInstance: InputDatasetRecipeInstanceResponse
81
88
 
82
89
 
83
- class RecipeInstanceResponse(BaseModel):
90
+ class RecipeInstanceResponse(GraphqlBaseModel):
84
91
  """Recipe instance query response."""
85
92
 
86
93
  recipeId: int
87
94
  inputDatasetId: int
88
95
 
89
96
 
90
- class RecipeRunProvenanceResponse(BaseModel):
97
+ class RecipeRunProvenanceResponse(GraphqlBaseModel):
91
98
  """Response for the metadata store recipeRunProvenances and mutations endpoints."""
92
99
 
93
100
  recipeRunProvenanceId: int
94
101
  isTaskManual: bool
95
102
 
96
103
 
97
- class RecipeRunConfiguration(BaseModel):
104
+ class RecipeRunConfiguration(GraphqlBaseModel):
98
105
  """Response class for a recipe run configuration dictionary."""
99
106
 
100
107
  validate_l1_on_write: bool = True
@@ -103,10 +110,10 @@ class RecipeRunConfiguration(BaseModel):
103
110
  trial_directory_name: str | None = None
104
111
  trial_root_directory_name: str | None = None
105
112
  teardown_enabled: bool = True
106
- trial_exclusive_transfer_tag_lists: list[str] | None = None
113
+ trial_exclusive_transfer_tag_lists: list[list[str]] | None = None
107
114
 
108
115
 
109
- class RecipeRunResponse(BaseModel):
116
+ class RecipeRunResponse(GraphqlBaseModel):
110
117
  """Recipe run query response."""
111
118
 
112
119
  recipeInstance: RecipeInstanceResponse
@@ -123,19 +130,19 @@ class RecipeRunResponse(BaseModel):
123
130
  return RecipeRunConfiguration.model_validate(value)
124
131
 
125
132
 
126
- class RecipeRunMutationResponse(BaseModel):
133
+ class RecipeRunMutationResponse(GraphqlBaseModel):
127
134
  """Recipe run mutation response."""
128
135
 
129
136
  recipeRunId: int
130
137
 
131
138
 
132
- class RecipeRunQuery(BaseModel):
139
+ class RecipeRunQuery(GraphqlBaseModel):
133
140
  """Query parameters for the metadata store endpoint recipeRuns."""
134
141
 
135
142
  recipeRunId: int
136
143
 
137
144
 
138
- class DatasetCatalogReceiptAccountMutation(BaseModel):
145
+ class DatasetCatalogReceiptAccountMutation(GraphqlBaseModel):
139
146
  """
140
147
  Dataset catalog receipt account mutation record.
141
148
 
@@ -147,13 +154,13 @@ class DatasetCatalogReceiptAccountMutation(BaseModel):
147
154
  expectedObjectCount: int
148
155
 
149
156
 
150
- class DatasetCatalogReceiptAccountResponse(BaseModel):
157
+ class DatasetCatalogReceiptAccountResponse(GraphqlBaseModel):
151
158
  """Dataset catalog receipt account response for query and mutation endpoints."""
152
159
 
153
160
  datasetCatalogReceiptAccountId: int
154
161
 
155
162
 
156
- class RecipeRunProvenanceMutation(BaseModel):
163
+ class RecipeRunProvenanceMutation(GraphqlBaseModel):
157
164
  """Recipe run provenance mutation record."""
158
165
 
159
166
  inputDatasetId: int
@@ -165,7 +172,7 @@ class RecipeRunProvenanceMutation(BaseModel):
165
172
  codeVersion: str | None = None
166
173
 
167
174
 
168
- class QualityCreation(BaseModel):
175
+ class QualityCreation(GraphqlBaseModel):
169
176
  """Quality data creation record."""
170
177
 
171
178
  datasetId: str
@@ -185,13 +192,13 @@ class QualityCreation(BaseModel):
185
192
  efficiencyData: str | None = None
186
193
 
187
194
 
188
- class QualitiesRequest(BaseModel):
195
+ class QualitiesRequest(GraphqlBaseModel):
189
196
  """Query parameters for quality data."""
190
197
 
191
198
  datasetId: str
192
199
 
193
200
 
194
- class QualityResponse(BaseModel):
201
+ class QualityResponse(GraphqlBaseModel):
195
202
  """Query Response for quality data."""
196
203
 
197
204
  qualityId: int
@@ -18,7 +18,10 @@ class InputDatasetBaseModel(BaseModel):
18
18
  """Custom BaseModel for input datasets."""
19
19
 
20
20
  model_config = ConfigDict(
21
- alias_generator=to_camel, validate_by_name=True, validate_by_alias=True
21
+ alias_generator=to_camel,
22
+ validate_by_name=True,
23
+ validate_by_alias=True,
24
+ validate_assignment=True,
22
25
  )
23
26
 
24
27
  def model_dump(self, **kwargs) -> dict:
@@ -1447,7 +1447,7 @@ class _WavecalQualityMixin:
1447
1447
  name="Wavelength Calibration Results",
1448
1448
  description="These plots show the wavelength solution computed based on fits to a Solar FTS atlas. "
1449
1449
  "The top plot shows the input and best-fit spectra along with the best-fit atlas, which is "
1450
- "a combination of Solar and Telluric spectra. The bottom plot shows the fir residuals.",
1450
+ "a combination of Solar and Telluric spectra. The bottom plot shows the fit residuals.",
1451
1451
  metric_code=MetricCode.wavecal_fit,
1452
1452
  vertical_multi_pane_plot_data=full_plot,
1453
1453
  )
@@ -129,9 +129,6 @@ class TransferTrialData(TransferDataBase, GlobusMixin):
129
129
  """
130
130
  tag_lists = self.transfer_tag_lists
131
131
 
132
- if not isinstance(tag_lists[0], list):
133
- raise ValueError(f"{tag_lists=} must be a list of tag set lists")
134
-
135
132
  transfer_items = []
136
133
  for tag_set in tag_lists:
137
134
 
@@ -32,21 +32,9 @@ from dkist_processing_pac.optics.telescope import Telescope
32
32
  from dkist_processing_common._util.constants import ConstantsDb
33
33
  from dkist_processing_common._util.scratch import WorkflowFileSystem
34
34
  from dkist_processing_common._util.tags import TagDB
35
- from dkist_processing_common.models.graphql import InputDatasetInputDatasetPartResponse
36
- from dkist_processing_common.models.graphql import InputDatasetPartResponse
37
- from dkist_processing_common.models.graphql import InputDatasetPartTypeResponse
38
- from dkist_processing_common.models.graphql import InputDatasetRecipeInstanceResponse
39
- from dkist_processing_common.models.graphql import InputDatasetRecipeRunResponse
40
- from dkist_processing_common.models.graphql import InputDatasetResponse
41
- from dkist_processing_common.models.graphql import RecipeInstanceResponse
42
- from dkist_processing_common.models.graphql import RecipeRunProvenanceResponse
43
- from dkist_processing_common.models.graphql import RecipeRunResponse
44
- from dkist_processing_common.models.graphql import RecipeRunStatusResponse
45
- from dkist_processing_common.models.tags import Tag
46
35
  from dkist_processing_common.parsers.l0_fits_access import L0FitsAccess
47
36
  from dkist_processing_common.tasks import WorkflowTaskBase
48
-
49
- TILE_SIZE = 64
37
+ from dkist_processing_common.tests.mock_metadata_store import fake_gql_client
50
38
 
51
39
 
52
40
  @pytest.fixture()
@@ -99,6 +87,21 @@ def constants_db(recipe_run_id) -> ConstantsDb:
99
87
  constants.close()
100
88
 
101
89
 
90
+ @pytest.fixture()
91
+ def fake_constants_db() -> dict:
92
+ """
93
+ A fake constants DB to prevent key errors.
94
+
95
+ Usage on a task: task.constants._update(fake_constants_db)
96
+ """
97
+ db = {
98
+ "PROPOSAL_ID": "PROPID",
99
+ "INSTRUMENT": "INSTRUMENT",
100
+ "OBS_IP_START_TIME": "20240416T160000",
101
+ }
102
+ return db
103
+
104
+
102
105
  class CommonDataset(Spec122Dataset):
103
106
  def __init__(self, polarimetric: bool = True):
104
107
  super().__init__(
@@ -285,18 +288,18 @@ def cs_step_angle_round_ndigits() -> int:
285
288
 
286
289
 
287
290
  @pytest.fixture(scope="session")
288
- def angle_random_max_perturabtion(cs_step_angle_round_ndigits) -> float:
291
+ def angle_random_max_perturbation(cs_step_angle_round_ndigits) -> float:
289
292
  # Ensures that we always round down to zero.
290
293
  # E.g., if ndigits = 1 then this value will be 0.049.
291
294
  return 10**-cs_step_angle_round_ndigits / 2 - 10 ** -(cs_step_angle_round_ndigits + 2)
292
295
 
293
296
 
294
297
  @pytest.fixture(scope="session")
295
- def grouped_cal_sequence_headers(angle_random_max_perturabtion) -> dict[int, list[L0FitsAccess]]:
298
+ def grouped_cal_sequence_headers(angle_random_max_perturbation) -> dict[int, list[L0FitsAccess]]:
296
299
  ds = CalibrationSequenceDataset(
297
300
  array_shape=(1, 2, 2),
298
301
  time_delta=2.0,
299
- angle_max_random_perturbation=angle_random_max_perturabtion,
302
+ angle_max_random_perturbation=angle_random_max_perturbation,
300
303
  )
301
304
  header_list = [
302
305
  spec122_validator.validate_and_translate_to_214_l0(d.header(), return_type=fits.HDUList)[
@@ -331,155 +334,9 @@ def max_cs_step_time_sec() -> float:
331
334
  return 20.0
332
335
 
333
336
 
334
- class FakeGQLClient:
335
-
336
- observe_frames_doc_object = [
337
- {
338
- "bucket": uuid4().hex[:6],
339
- "object_keys": [Path(uuid4().hex[:6]).as_posix() for _ in range(3)],
340
- }
341
- ]
342
-
343
- calibration_frames_doc_object = [
344
- {
345
- "bucket": uuid4().hex[:6],
346
- "object_keys": [Path(uuid4().hex[:6]).as_posix() for _ in range(3)],
347
- },
348
- {
349
- "bucket": uuid4().hex[:6],
350
- "object_keys": [Path(uuid4().hex[:6]).as_posix() for _ in range(3)],
351
- },
352
- ]
353
-
354
- parameters_doc_object = [
355
- {
356
- "parameterName": "param_name_1",
357
- "parameterValues": [
358
- {
359
- "parameterValueId": 1,
360
- "parameterValue": json.dumps([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
361
- "parameterValueStartDate": datetime(2000, 1, 1).isoformat(),
362
- }
363
- ],
364
- },
365
- {
366
- "parameterName": "param_name_2",
367
- "parameterValues": [
368
- {
369
- "parameterValueId": 2,
370
- "parameterValue": json.dumps(
371
- {
372
- "__file__": {
373
- "bucket": "data",
374
- "objectKey": f"parameters/param_name/{uuid4().hex}.dat",
375
- }
376
- }
377
- ),
378
- "parameterValueStartDate": datetime(2000, 1, 1).isoformat(),
379
- },
380
- {
381
- "parameterValueId": 3,
382
- "parameterValue": json.dumps(
383
- {
384
- "__file__": {
385
- "bucket": "data",
386
- "objectKey": f"parameters/param_name/{uuid4().hex}.dat",
387
- }
388
- }
389
- ),
390
- "parameterValueStartDate": datetime(2000, 1, 2).isoformat(),
391
- },
392
- ],
393
- },
394
- {
395
- "parameterName": "param_name_4",
396
- "parameterValues": [
397
- {
398
- "parameterValueId": 4,
399
- "parameterValue": json.dumps(
400
- {"a": 1, "b": 3.14159, "c": "foo", "d": [1, 2, 3]}
401
- ),
402
- "parameterValueStartDate": datetime(2000, 1, 1).isoformat(),
403
- }
404
- ],
405
- },
406
- ]
407
-
408
- def __init__(self, *args, **kwargs):
409
- pass
410
-
411
- def execute_gql_query(self, **kwargs):
412
- query_base = kwargs["query_base"]
413
- if query_base == "recipeRunStatuses":
414
- return [RecipeRunStatusResponse(recipeRunStatusId=1)]
415
- if query_base == "recipeRuns":
416
- if kwargs.get("query_response_cls") == InputDatasetRecipeRunResponse:
417
- return [
418
- InputDatasetRecipeRunResponse(
419
- recipeInstance=InputDatasetRecipeInstanceResponse(
420
- inputDataset=InputDatasetResponse(
421
- inputDatasetId=1,
422
- isActive=True,
423
- inputDatasetInputDatasetParts=[
424
- InputDatasetInputDatasetPartResponse(
425
- inputDatasetPart=InputDatasetPartResponse(
426
- inputDatasetPartId=1,
427
- inputDatasetPartDocument=json.dumps(
428
- self.parameters_doc_object
429
- ),
430
- inputDatasetPartType=InputDatasetPartTypeResponse(
431
- inputDatasetPartTypeName="parameters"
432
- ),
433
- )
434
- ),
435
- InputDatasetInputDatasetPartResponse(
436
- inputDatasetPart=InputDatasetPartResponse(
437
- inputDatasetPartId=2,
438
- inputDatasetPartDocument=json.dumps(
439
- self.observe_frames_doc_object
440
- ),
441
- inputDatasetPartType=InputDatasetPartTypeResponse(
442
- inputDatasetPartTypeName="observe_frames"
443
- ),
444
- )
445
- ),
446
- InputDatasetInputDatasetPartResponse(
447
- inputDatasetPart=InputDatasetPartResponse(
448
- inputDatasetPartId=3,
449
- inputDatasetPartDocument=json.dumps(
450
- self.calibration_frames_doc_object
451
- ),
452
- inputDatasetPartType=InputDatasetPartTypeResponse(
453
- inputDatasetPartTypeName="calibration_frames"
454
- ),
455
- )
456
- ),
457
- ],
458
- ),
459
- ),
460
- ),
461
- ]
462
-
463
- return [
464
- RecipeRunResponse(
465
- recipeInstanceId=1,
466
- recipeInstance=RecipeInstanceResponse(
467
- recipeId=1,
468
- inputDatasetId=1,
469
- ),
470
- configuration=f'{{"tile_size": {TILE_SIZE}}}',
471
- recipeRunProvenances=[
472
- RecipeRunProvenanceResponse(recipeRunProvenanceId=1, isTaskManual=False),
473
- ],
474
- ),
475
- ]
476
-
477
- @staticmethod
478
- def execute_gql_mutation(**kwargs):
479
- ...
480
-
481
-
482
- # All the following stuff is copied from dkist-processing-pac
337
+ ####################################
338
+ # Copied from dkist-processing-pac #
339
+ ####################################
483
340
  def compute_telgeom(time_hst: Time):
484
341
  dkist_lon = (156 + 15 / 60.0 + 21.7 / 3600.0) * (-1)
485
342
  dkist_lat = 20 + 42 / 60.0 + 27.0 / 3600.0
@@ -543,7 +400,7 @@ class CalibrationSequenceStepDataset(Spec122Dataset):
543
400
  return "none" if self.pol_status == "clear" else str(self.pol_theta)
544
401
 
545
402
  @key_function("PAC__006")
546
- def retarter_status(self, key: str) -> str:
403
+ def retarder_status(self, key: str) -> str:
547
404
  return self.ret_status
548
405
 
549
406
  @key_function("PAC__007")
@@ -795,6 +652,11 @@ def post_fit_polcal_fitter(
795
652
  return fitter
796
653
 
797
654
 
655
+ #################
656
+ # Input Dataset #
657
+ #################
658
+
659
+
798
660
  class InputDatasetTask(WorkflowTaskBase):
799
661
  def run(self):
800
662
  pass
@@ -826,58 +688,3 @@ def task_with_input_dataset(
826
688
  file_path.write_text(data=json.dumps({"doc_list": part}))
827
689
  task.tag(path=file_path, tags=tag)
828
690
  yield task
829
-
830
-
831
- def create_parameter_files(
832
- task: WorkflowTaskBase, parameters_doc: list[dict] = FakeGQLClient.parameters_doc_object
833
- ):
834
- """
835
- Create the parameter files specified in the parameters document returned by the metadata store.
836
-
837
- This fixture assumes that the JSON parameters document has already been loaded into a python
838
- structure, but the parameter values themselves are still JSON.
839
- """
840
- for parameter in parameters_doc:
841
- for value in parameter["parameterValues"]:
842
- if "__file__" not in value["parameterValue"]:
843
- continue
844
- parameter_value = json.loads(value["parameterValue"])
845
- param_path = parameter_value["__file__"]["objectKey"]
846
- file_path = task.scratch.workflow_base_path / Path(param_path)
847
- if not file_path.parent.exists():
848
- file_path.parent.mkdir(parents=True, exist_ok=True)
849
- file_path.write_text(data="")
850
- task.tag(path=file_path, tags=Tag.parameter(param_path))
851
-
852
-
853
- def create_input_frames(
854
- task: WorkflowTaskBase,
855
- input_frame_docs: list[dict] = FakeGQLClient.observe_frames_doc_object
856
- + FakeGQLClient.calibration_frames_doc_object,
857
- ):
858
- """
859
- Create the observe and calibration frame files specified in the input dataset documents
860
- returned by the metadata store.
861
- """
862
- for frame in input_frame_docs:
863
- for object_key in frame["object_keys"]:
864
- file_path = task.scratch.workflow_base_path / Path(object_key)
865
- if not file_path.parent.exists():
866
- file_path.parent.mkdir(parents=True, exist_ok=True)
867
- file_path.write_text(data="")
868
- task.tag(path=file_path, tags=[Tag.frame(), Tag.input()])
869
-
870
-
871
- @pytest.fixture()
872
- def fake_constants_db() -> dict:
873
- """
874
- A fake constants DB to prevent key errors.
875
-
876
- Usage on a task: task.constants._update(fake_constants_db)
877
- """
878
- db = {
879
- "PROPOSAL_ID": "PROPID",
880
- "INSTRUMENT": "INSTRUMENT",
881
- "OBS_IP_START_TIME": "20240416T160000",
882
- }
883
- return db