dkist-processing-common 11.0.0rc1__py3-none-any.whl → 11.1.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. changelog/255.misc.rst +3 -0
  2. dkist_processing_common/manual.py +2 -2
  3. dkist_processing_common/models/graphql.py +29 -22
  4. dkist_processing_common/models/input_dataset.py +4 -1
  5. dkist_processing_common/tasks/mixin/quality/_metrics.py +1 -1
  6. dkist_processing_common/tasks/trial_output_data.py +0 -3
  7. dkist_processing_common/tests/conftest.py +28 -221
  8. dkist_processing_common/tests/mock_metadata_store.py +237 -0
  9. dkist_processing_common/tests/test_assemble_movie.py +4 -3
  10. dkist_processing_common/tests/test_assemble_quality.py +1 -2
  11. dkist_processing_common/tests/test_base.py +2 -3
  12. dkist_processing_common/tests/test_interservice_bus_mixin.py +0 -1
  13. dkist_processing_common/tests/test_output_data_base.py +4 -5
  14. dkist_processing_common/tests/test_publish_catalog_messages.py +2 -3
  15. dkist_processing_common/tests/test_quality_mixin.py +1 -1
  16. dkist_processing_common/tests/test_submit_dataset_metadata.py +2 -2
  17. dkist_processing_common/tests/test_teardown.py +14 -11
  18. dkist_processing_common/tests/test_transfer_input_data.py +79 -22
  19. dkist_processing_common/tests/test_transfer_l1_output_data.py +2 -3
  20. dkist_processing_common/tests/test_trial_catalog.py +7 -3
  21. dkist_processing_common/tests/test_trial_output_data.py +44 -64
  22. dkist_processing_common/tests/test_write_l1.py +82 -54
  23. {dkist_processing_common-11.0.0rc1.dist-info → dkist_processing_common-11.1.0rc1.dist-info}/METADATA +2 -2
  24. {dkist_processing_common-11.0.0rc1.dist-info → dkist_processing_common-11.1.0rc1.dist-info}/RECORD +26 -27
  25. changelog/256.feature.rst +0 -2
  26. changelog/257.feature.rst +0 -1
  27. changelog/259.feature.rst +0 -1
  28. {dkist_processing_common-11.0.0rc1.dist-info → dkist_processing_common-11.1.0rc1.dist-info}/WHEEL +0 -0
  29. {dkist_processing_common-11.0.0rc1.dist-info → dkist_processing_common-11.1.0rc1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,237 @@
1
+ """
2
+ Support functions and constants for customized FakeGQLClient
3
+ """
4
+ import json
5
+ from abc import ABC
6
+ from abc import abstractmethod
7
+ from datetime import datetime
8
+ from pathlib import Path
9
+ from uuid import uuid4
10
+
11
+ import pytest
12
+ from pydantic import BaseModel
13
+
14
+ from dkist_processing_common.models.graphql import InputDatasetInputDatasetPartResponse
15
+ from dkist_processing_common.models.graphql import InputDatasetPartResponse
16
+ from dkist_processing_common.models.graphql import InputDatasetPartTypeResponse
17
+ from dkist_processing_common.models.graphql import InputDatasetRecipeInstanceResponse
18
+ from dkist_processing_common.models.graphql import InputDatasetRecipeRunResponse
19
+ from dkist_processing_common.models.graphql import InputDatasetResponse
20
+ from dkist_processing_common.models.graphql import RecipeInstanceResponse
21
+ from dkist_processing_common.models.graphql import RecipeRunProvenanceResponse
22
+ from dkist_processing_common.models.graphql import RecipeRunResponse
23
+ from dkist_processing_common.models.graphql import RecipeRunStatusResponse
24
+
25
+
26
+ TILE_SIZE = 64
27
+
28
+ default_observe_frames_doc = [
29
+ {
30
+ "bucket": uuid4().hex[:6],
31
+ "object_keys": [Path(uuid4().hex[:6]).as_posix() for _ in range(3)],
32
+ }
33
+ ]
34
+
35
+ default_calibration_frames_doc = [
36
+ {
37
+ "bucket": uuid4().hex[:6],
38
+ "object_keys": [Path(uuid4().hex[:6]).as_posix() for _ in range(3)],
39
+ },
40
+ {
41
+ "bucket": uuid4().hex[:6],
42
+ "object_keys": [Path(uuid4().hex[:6]).as_posix() for _ in range(3)],
43
+ },
44
+ ]
45
+
46
+ default_parameters_doc = [
47
+ {
48
+ "parameterName": "param_name_1",
49
+ "parameterValues": [
50
+ {
51
+ "parameterValueId": 1,
52
+ "parameterValue": json.dumps([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
53
+ "parameterValueStartDate": datetime(2000, 1, 1).isoformat(),
54
+ }
55
+ ],
56
+ },
57
+ {
58
+ "parameterName": "param_name_2",
59
+ "parameterValues": [
60
+ {
61
+ "parameterValueId": 2,
62
+ "parameterValue": json.dumps(
63
+ {
64
+ "__file__": {
65
+ "bucket": "data",
66
+ "objectKey": f"parameters/param_name/{uuid4().hex}.dat",
67
+ }
68
+ }
69
+ ),
70
+ "parameterValueStartDate": datetime(2000, 1, 1).isoformat(),
71
+ },
72
+ {
73
+ "parameterValueId": 3,
74
+ "parameterValue": json.dumps(
75
+ {
76
+ "__file__": {
77
+ "bucket": "data",
78
+ "objectKey": f"parameters/param_name/{uuid4().hex}.dat",
79
+ }
80
+ }
81
+ ),
82
+ "parameterValueStartDate": datetime(2000, 1, 2).isoformat(),
83
+ },
84
+ ],
85
+ },
86
+ {
87
+ "parameterName": "param_name_4",
88
+ "parameterValues": [
89
+ {
90
+ "parameterValueId": 4,
91
+ "parameterValue": json.dumps({"a": 1, "b": 3.14159, "c": "foo", "d": [1, 2, 3]}),
92
+ "parameterValueStartDate": datetime(2000, 1, 1).isoformat(),
93
+ }
94
+ ],
95
+ },
96
+ ]
97
+
98
+ default_recipe_run_configuration = {"tile_size": TILE_SIZE}
99
+
100
+
101
+ class Unset:
102
+ pass
103
+
104
+
105
+ class ResponseMapping(BaseModel, ABC):
106
+ response: BaseModel
107
+
108
+ @abstractmethod
109
+ def match_query(self, query_base: str, query_response_cls: type):
110
+ pass
111
+
112
+
113
+ class RecipeRunStatusResponseMapping(ResponseMapping):
114
+ def match_query(self, query_base: str, query_response_cls: type):
115
+ if query_base == "recipeRunStatuses":
116
+ if query_response_cls == RecipeRunStatusResponse:
117
+ return self.response
118
+ return Unset
119
+
120
+
121
+ class RecipeRunResponseMapping(ResponseMapping):
122
+ def match_query(self, query_base: str, query_response_cls: type):
123
+ if query_base == "recipeRuns":
124
+ if query_response_cls == RecipeRunResponse:
125
+ return self.response
126
+ return Unset
127
+
128
+
129
+ class InputDatasetRecipeRunResponseMapping(ResponseMapping):
130
+ def match_query(self, query_base: str, query_response_cls: type):
131
+ if query_base == "recipeRuns":
132
+ if query_response_cls == InputDatasetRecipeRunResponse:
133
+ return self.response
134
+ return Unset
135
+
136
+
137
+ class QualityResponseMapping(ResponseMapping):
138
+ pass # TODO
139
+
140
+
141
+ def make_default_recipe_run_status_response() -> RecipeRunStatusResponse:
142
+ return RecipeRunStatusResponse(recipeRunStatusId=1)
143
+
144
+
145
+ def make_default_recipe_run_response() -> RecipeRunResponse:
146
+ return RecipeRunResponse(
147
+ recipeInstanceId=1,
148
+ recipeInstance=RecipeInstanceResponse(
149
+ recipeId=1,
150
+ inputDatasetId=1,
151
+ ),
152
+ configuration=json.dumps(default_recipe_run_configuration),
153
+ recipeRunProvenances=[
154
+ RecipeRunProvenanceResponse(recipeRunProvenanceId=1, isTaskManual=False),
155
+ ],
156
+ )
157
+
158
+
159
+ def make_default_input_dataset_recipe_run_response() -> InputDatasetRecipeRunResponse:
160
+ return InputDatasetRecipeRunResponse(
161
+ recipeInstance=InputDatasetRecipeInstanceResponse(
162
+ inputDataset=InputDatasetResponse(
163
+ inputDatasetId=1,
164
+ isActive=True,
165
+ inputDatasetInputDatasetParts=[
166
+ InputDatasetInputDatasetPartResponse(
167
+ inputDatasetPart=InputDatasetPartResponse(
168
+ inputDatasetPartId=1,
169
+ inputDatasetPartDocument=json.dumps(default_parameters_doc),
170
+ inputDatasetPartType=InputDatasetPartTypeResponse(
171
+ inputDatasetPartTypeName="parameters"
172
+ ),
173
+ )
174
+ ),
175
+ InputDatasetInputDatasetPartResponse(
176
+ inputDatasetPart=InputDatasetPartResponse(
177
+ inputDatasetPartId=2,
178
+ inputDatasetPartDocument=json.dumps(default_observe_frames_doc),
179
+ inputDatasetPartType=InputDatasetPartTypeResponse(
180
+ inputDatasetPartTypeName="observe_frames"
181
+ ),
182
+ )
183
+ ),
184
+ InputDatasetInputDatasetPartResponse(
185
+ inputDatasetPart=InputDatasetPartResponse(
186
+ inputDatasetPartId=3,
187
+ inputDatasetPartDocument=json.dumps(default_calibration_frames_doc),
188
+ inputDatasetPartType=InputDatasetPartTypeResponse(
189
+ inputDatasetPartTypeName="calibration_frames"
190
+ ),
191
+ )
192
+ ),
193
+ ],
194
+ ),
195
+ ),
196
+ )
197
+
198
+
199
+ default_response_mappings = (
200
+ RecipeRunStatusResponseMapping(response=make_default_recipe_run_status_response()),
201
+ RecipeRunResponseMapping(response=make_default_recipe_run_response()),
202
+ InputDatasetRecipeRunResponseMapping(response=make_default_input_dataset_recipe_run_response()),
203
+ )
204
+
205
+
206
+ def fake_gql_client_factory(response_mapping_override: ResponseMapping | None = None):
207
+
208
+ if response_mapping_override:
209
+ response_mappings = (response_mapping_override,) + default_response_mappings
210
+ else:
211
+ response_mappings = default_response_mappings
212
+
213
+ class FakeGQLClientClass:
214
+ def __init__(self, *args, **kwargs):
215
+ pass
216
+
217
+ def execute_gql_query(self, query_base: str, query_response_cls: type, *args, **kwargs):
218
+ # Overrides are prepended; first match is returned.
219
+ for rm in response_mappings:
220
+ response = rm.match_query(query_base, query_response_cls)
221
+ if response is not Unset:
222
+ return [response]
223
+ raise ValueError(f"Mocked response not found for {query_base=}, {query_response_cls=}")
224
+
225
+ @staticmethod
226
+ def execute_gql_mutation(**kwargs):
227
+ ...
228
+
229
+ return FakeGQLClientClass
230
+
231
+
232
+ @pytest.fixture()
233
+ def fake_gql_client():
234
+ """
235
+ Convenience fixture for default mock GQL client. To customize, use fake_gql_client_factory.
236
+ """
237
+ return fake_gql_client_factory()
@@ -10,7 +10,6 @@ from dkist_processing_common.models.constants import BudName
10
10
  from dkist_processing_common.models.fits_access import FitsAccessBase
11
11
  from dkist_processing_common.models.tags import Tag
12
12
  from dkist_processing_common.tasks.assemble_movie import AssembleMovie
13
- from dkist_processing_common.tests.conftest import FakeGQLClient
14
13
 
15
14
 
16
15
  @pytest.fixture
@@ -85,14 +84,16 @@ def assemble_task_with_tagged_movie_frames(
85
84
  "movie_dimensions",
86
85
  [pytest.param((2048, 1536), id="Even_dims"), pytest.param((2047, 1535), id="Odd_dims")],
87
86
  )
88
- def test_assemble_movie(assemble_task_with_tagged_movie_frames, mocker, movie_dimensions):
87
+ def test_assemble_movie(
88
+ assemble_task_with_tagged_movie_frames, mocker, movie_dimensions, fake_gql_client
89
+ ):
89
90
  """
90
91
  Given: An AssembleMovie subclass with movie frames in scratch
91
92
  When: Calling the task
92
93
  Then: The movie is written and has an even number of pixels in both dimensions
93
94
  """
94
95
  mocker.patch(
95
- "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=FakeGQLClient
96
+ "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=fake_gql_client
96
97
  )
97
98
  assemble_task_with_tagged_movie_frames()
98
99
  expected_dimensions = tuple([size + 1 if size % 2 else size for size in movie_dimensions])
@@ -482,8 +482,7 @@ def test_assemble_quality_data(
482
482
  if plot_data_expected(rm.name):
483
483
  assert rm.plot_data
484
484
  if vertical_multi_pane_plot_data_expected(rm.name):
485
- # TODO: Update this once `dkist-quality` knows about vertical multi-pane metrics
486
- assert True
485
+ assert rm.vertical_multi_pane_plot_data
487
486
  if table_data_expected(rm.name):
488
487
  assert rm.table_data
489
488
  if modmat_data_expected(rm.name):
@@ -7,7 +7,6 @@ from dkist_processing_common._util.scratch import WorkflowFileSystem
7
7
  from dkist_processing_common.models.tags import StemName
8
8
  from dkist_processing_common.models.tags import Tag
9
9
  from dkist_processing_common.tasks import WorkflowTaskBase
10
- from dkist_processing_common.tests.conftest import FakeGQLClient
11
10
 
12
11
 
13
12
  class Task(WorkflowTaskBase):
@@ -144,10 +143,10 @@ def test_write_workflow_task_tag(base_task, other_tags: str | list[str]):
144
143
 
145
144
 
146
145
  @pytest.fixture
147
- def rollback_task_setup(tmp_path, recipe_run_id, base_task, mocker) -> dict:
146
+ def rollback_task_setup(tmp_path, recipe_run_id, base_task, mocker, fake_gql_client) -> dict:
148
147
  """Return setup data for a task that has data in scratch/constants written by 2 task names (The one from base_task and the RollbackTask)."""
149
148
  mocker.patch(
150
- "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=FakeGQLClient
149
+ "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=fake_gql_client
151
150
  )
152
151
  # add data that should remain
153
152
  keep_tag = "test_keep_tag"
@@ -15,7 +15,6 @@ from dkist_processing_common.config import common_configurations
15
15
  from dkist_processing_common.models.message_queue_binding import common_message_queue_bindings
16
16
  from dkist_processing_common.tasks import WorkflowTaskBase
17
17
  from dkist_processing_common.tasks.mixin.interservice_bus import InterserviceBusMixin
18
- from dkist_processing_common.tests.conftest import recipe_run_id
19
18
 
20
19
  logger = logging.getLogger(__name__)
21
20
 
@@ -7,7 +7,6 @@ from dkist_processing_common._util.scratch import WorkflowFileSystem
7
7
  from dkist_processing_common.models.tags import Tag
8
8
  from dkist_processing_common.tasks.output_data_base import OutputDataBase
9
9
  from dkist_processing_common.tasks.output_data_base import TransferDataBase
10
- from dkist_processing_common.tests.conftest import FakeGQLClient
11
10
 
12
11
 
13
12
  class OutputDataBaseTask(OutputDataBase):
@@ -16,9 +15,9 @@ class OutputDataBaseTask(OutputDataBase):
16
15
 
17
16
 
18
17
  @pytest.fixture
19
- def output_data_base_task(recipe_run_id, mocker):
18
+ def output_data_base_task(recipe_run_id, mocker, fake_gql_client):
20
19
  mocker.patch(
21
- "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=FakeGQLClient
20
+ "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=fake_gql_client
22
21
  )
23
22
  proposal_id = "test_proposal_id"
24
23
  with OutputDataBaseTask(
@@ -37,9 +36,9 @@ class TransferDataTask(TransferDataBase):
37
36
 
38
37
 
39
38
  @pytest.fixture
40
- def transfer_data_task(recipe_run_id, tmp_path, mocker):
39
+ def transfer_data_task(recipe_run_id, tmp_path, mocker, fake_gql_client):
41
40
  mocker.patch(
42
- "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=FakeGQLClient
41
+ "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=fake_gql_client
43
42
  )
44
43
  with TransferDataTask(
45
44
  recipe_run_id=recipe_run_id,
@@ -4,13 +4,12 @@ import pytest
4
4
 
5
5
  from dkist_processing_common.models.message import CreateQualityReportMessage
6
6
  from dkist_processing_common.tasks.l1_output_data import PublishCatalogAndQualityMessages
7
- from dkist_processing_common.tests.conftest import FakeGQLClient
8
7
 
9
8
 
10
9
  @pytest.fixture
11
- def publish_catalog_and_quality_messages_task(recipe_run_id, mocker):
10
+ def publish_catalog_and_quality_messages_task(recipe_run_id, mocker, fake_gql_client):
12
11
  mocker.patch(
13
- "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=FakeGQLClient
12
+ "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=fake_gql_client
14
13
  )
15
14
  with PublishCatalogAndQualityMessages(
16
15
  recipe_run_id=recipe_run_id,
@@ -1300,7 +1300,7 @@ def test_build_wavecal_results(quality_task, wavecal_data_json):
1300
1300
  assert metric["description"] == (
1301
1301
  "These plots show the wavelength solution computed based on fits to a Solar FTS atlas. "
1302
1302
  "The top plot shows the input and best-fit spectra along with the best-fit atlas, which is "
1303
- "a combination of Solar and Telluric spectra. The bottom plot shows the fir residuals."
1303
+ "a combination of Solar and Telluric spectra. The bottom plot shows the fit residuals."
1304
1304
  )
1305
1305
  assert metric["metric_code"] == MetricCode.wavecal_fit.value
1306
1306
  assert metric["facet"] is None
@@ -7,7 +7,6 @@ from dkist_processing_common._util.scratch import WorkflowFileSystem
7
7
  from dkist_processing_common.models.tags import Tag
8
8
  from dkist_processing_common.tasks import SubmitDatasetMetadata
9
9
  from dkist_processing_common.tasks.mixin import metadata_store
10
- from dkist_processing_common.tests.conftest import FakeGQLClient
11
10
 
12
11
 
13
12
  @pytest.fixture()
@@ -86,6 +85,7 @@ def submit_dataset_metadata_task(
86
85
  def test_submit_dataset_metadata(
87
86
  submit_dataset_metadata_task,
88
87
  mocker,
88
+ fake_gql_client,
89
89
  ):
90
90
  """
91
91
  :Given: An instance of SubmitDatasetMetadata with tagged processed data
@@ -93,7 +93,7 @@ def test_submit_dataset_metadata(
93
93
  :Then: Metadata files for the dataset are saved to the remote database
94
94
  """
95
95
  mocker.patch(
96
- "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=FakeGQLClient
96
+ "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=fake_gql_client
97
97
  )
98
98
  # intercept these two GraphQLClient calls so they can be confirmed
99
99
  mocked_metadata_store_add_dataset_receipt_account = mocker.patch.object(
@@ -5,10 +5,11 @@ import pytest
5
5
 
6
6
  from dkist_processing_common._util.scratch import WorkflowFileSystem
7
7
  from dkist_processing_common.codecs.str import str_encoder
8
- from dkist_processing_common.models.graphql import RecipeRunResponse
9
8
  from dkist_processing_common.models.tags import Tag
10
9
  from dkist_processing_common.tasks.teardown import Teardown
11
- from dkist_processing_common.tests.conftest import FakeGQLClient
10
+ from dkist_processing_common.tests.mock_metadata_store import fake_gql_client_factory
11
+ from dkist_processing_common.tests.mock_metadata_store import make_default_recipe_run_response
12
+ from dkist_processing_common.tests.mock_metadata_store import RecipeRunResponseMapping
12
13
 
13
14
 
14
15
  class TeardownTest(Teardown):
@@ -19,15 +20,17 @@ class TeardownTest(Teardown):
19
20
  @pytest.fixture()
20
21
  def make_mock_GQL_with_configuration():
21
22
  def class_generator(teardown_option: bool | None):
22
- class TeardownFakeGQLClient(FakeGQLClient):
23
- def execute_gql_query(self, **kwargs):
24
- response = super().execute_gql_query(**kwargs)
25
- if isinstance(response, list):
26
- if isinstance(response[0], RecipeRunResponse):
27
- if isinstance(teardown_option, bool):
28
- response[0].configuration.teardown_enabled = teardown_option
29
- return response
30
-
23
+ recipe_run_response = make_default_recipe_run_response()
24
+ config = recipe_run_response.configuration
25
+ if isinstance(teardown_option, bool):
26
+ config.teardown_enabled = teardown_option
27
+ else:
28
+ config_dict = config.model_dump(exclude="teardown_enabled")
29
+ config = json.dumps(config_dict)
30
+ response_mapping_override = RecipeRunResponseMapping(response=recipe_run_response)
31
+ TeardownFakeGQLClient = fake_gql_client_factory(
32
+ response_mapping_override=response_mapping_override
33
+ )
31
34
  return TeardownFakeGQLClient
32
35
 
33
36
  return class_generator
@@ -6,13 +6,57 @@ import pytest
6
6
 
7
7
  from dkist_processing_common._util.scratch import WorkflowFileSystem
8
8
  from dkist_processing_common.codecs.basemodel import basemodel_decoder
9
- from dkist_processing_common.models.graphql import InputDatasetRecipeRunResponse
10
9
  from dkist_processing_common.models.input_dataset import InputDatasetPartDocumentList
11
10
  from dkist_processing_common.models.tags import Tag
11
+ from dkist_processing_common.tasks import WorkflowTaskBase
12
12
  from dkist_processing_common.tasks.transfer_input_data import TransferL0Data
13
- from dkist_processing_common.tests.conftest import create_input_frames
14
- from dkist_processing_common.tests.conftest import create_parameter_files
15
- from dkist_processing_common.tests.conftest import FakeGQLClient
13
+ from dkist_processing_common.tests.mock_metadata_store import default_calibration_frames_doc
14
+ from dkist_processing_common.tests.mock_metadata_store import default_observe_frames_doc
15
+ from dkist_processing_common.tests.mock_metadata_store import default_parameters_doc
16
+ from dkist_processing_common.tests.mock_metadata_store import fake_gql_client_factory
17
+ from dkist_processing_common.tests.mock_metadata_store import InputDatasetRecipeRunResponseMapping
18
+ from dkist_processing_common.tests.mock_metadata_store import (
19
+ make_default_input_dataset_recipe_run_response,
20
+ )
21
+
22
+
23
+ def create_parameter_files(
24
+ task: WorkflowTaskBase, parameters_doc: list[dict] = default_parameters_doc
25
+ ):
26
+ """
27
+ Create the parameter files specified in the parameters document returned by the metadata store.
28
+
29
+ This fixture assumes that the JSON parameters document has already been loaded into a python
30
+ structure (a list of dicts), but the parameter values themselves are still JSON.
31
+ """
32
+ for parameter in parameters_doc:
33
+ for value in parameter["parameterValues"]:
34
+ if "__file__" not in value["parameterValue"]:
35
+ continue
36
+ parameter_value = json.loads(value["parameterValue"])
37
+ param_path = parameter_value["__file__"]["objectKey"]
38
+ file_path = task.scratch.workflow_base_path / Path(param_path)
39
+ if not file_path.parent.exists():
40
+ file_path.parent.mkdir(parents=True, exist_ok=True)
41
+ file_path.write_text(data="")
42
+ task.tag(path=file_path, tags=Tag.parameter(param_path))
43
+
44
+
45
+ def create_input_frames(
46
+ task: WorkflowTaskBase,
47
+ input_frame_docs: list[dict] = default_observe_frames_doc + default_calibration_frames_doc,
48
+ ):
49
+ """
50
+ Create the observe and calibration frame files specified in the input dataset documents
51
+ returned by the metadata store.
52
+ """
53
+ for frame in input_frame_docs:
54
+ for object_key in frame["object_keys"]:
55
+ file_path = task.scratch.workflow_base_path / Path(object_key)
56
+ if not file_path.parent.exists():
57
+ file_path.parent.mkdir(parents=True, exist_ok=True)
58
+ file_path.write_text(data="")
59
+ task.tag(path=file_path, tags=[Tag.frame(), Tag.input()])
16
60
 
17
61
 
18
62
  class TransferL0DataTask(TransferL0Data):
@@ -20,15 +64,26 @@ class TransferL0DataTask(TransferL0Data):
20
64
  ...
21
65
 
22
66
 
23
- class FakeGQLClientMissingInputDatasetCalibrationPart(FakeGQLClient):
24
- """Same metadata mocker with calibration input dataset part missing."""
67
+ @pytest.fixture
68
+ def fake_gql_client_class_missing_calibration_part():
69
+ input_dataset_recipe_run_response = make_default_input_dataset_recipe_run_response()
70
+ dataset_parts = (
71
+ input_dataset_recipe_run_response.recipeInstance.inputDataset.inputDatasetInputDatasetParts
72
+ )
73
+ for index, part in enumerate(dataset_parts):
74
+ if (
75
+ part.inputDatasetPart.inputDatasetPartType.inputDatasetPartTypeName
76
+ == "calibration_frames"
77
+ ):
78
+ del dataset_parts[index]
79
+ new_response_mapping = InputDatasetRecipeRunResponseMapping(
80
+ response=input_dataset_recipe_run_response
81
+ )
82
+ FakeGQLClientMissingInputDatasetCalibrationPart = fake_gql_client_factory(
83
+ response_mapping_override=new_response_mapping
84
+ )
25
85
 
26
- def execute_gql_query(self, **kwargs):
27
- original_response = super().execute_gql_query(**kwargs)
28
- # Remove calibration frames part if getting InputDatasetRecipeRunResponse:
29
- if kwargs.get("query_response_cls") == InputDatasetRecipeRunResponse:
30
- del original_response[0].recipeInstance.inputDataset.inputDatasetInputDatasetParts[2]
31
- return original_response
86
+ return FakeGQLClientMissingInputDatasetCalibrationPart
32
87
 
33
88
 
34
89
  def _transfer_l0_data_task_with_client(recipe_run_id, tmp_path, mocker, client_cls):
@@ -50,14 +105,16 @@ def _transfer_l0_data_task_with_client(recipe_run_id, tmp_path, mocker, client_c
50
105
 
51
106
 
52
107
  @pytest.fixture
53
- def transfer_l0_data_task(recipe_run_id, tmp_path, mocker):
54
- yield from _transfer_l0_data_task_with_client(recipe_run_id, tmp_path, mocker, FakeGQLClient)
108
+ def transfer_l0_data_task(recipe_run_id, tmp_path, mocker, fake_gql_client):
109
+ yield from _transfer_l0_data_task_with_client(recipe_run_id, tmp_path, mocker, fake_gql_client)
55
110
 
56
111
 
57
112
  @pytest.fixture
58
- def transfer_l0_data_task_missing_calibration_part(recipe_run_id, tmp_path, mocker):
113
+ def transfer_l0_data_task_missing_calibration_part(
114
+ recipe_run_id, tmp_path, mocker, fake_gql_client_class_missing_calibration_part
115
+ ):
59
116
  yield from _transfer_l0_data_task_with_client(
60
- recipe_run_id, tmp_path, mocker, FakeGQLClientMissingInputDatasetCalibrationPart
117
+ recipe_run_id, tmp_path, mocker, fake_gql_client_class_missing_calibration_part
61
118
  )
62
119
 
63
120
 
@@ -65,17 +122,17 @@ def transfer_l0_data_task_missing_calibration_part(recipe_run_id, tmp_path, mock
65
122
  "expected_doc, tag",
66
123
  [
67
124
  pytest.param(
68
- FakeGQLClient.observe_frames_doc_object,
125
+ default_observe_frames_doc,
69
126
  Tag.input_dataset_observe_frames(),
70
127
  id="observe_frames",
71
128
  ),
72
129
  pytest.param(
73
- FakeGQLClient.calibration_frames_doc_object,
130
+ default_calibration_frames_doc,
74
131
  Tag.input_dataset_calibration_frames(),
75
132
  id="calibration_frames",
76
133
  ),
77
134
  pytest.param(
78
- FakeGQLClient.parameters_doc_object,
135
+ default_parameters_doc,
79
136
  Tag.input_dataset_parameters(),
80
137
  id="parameters",
81
138
  ),
@@ -175,9 +232,9 @@ def test_build_frame_transfer_list_formatted(request, task_name):
175
232
  # Then
176
233
  source_filenames = []
177
234
  destination_filenames = []
178
- expected_frames = list(FakeGQLClient.observe_frames_doc_object)
235
+ expected_frames = list(default_observe_frames_doc)
179
236
  if "missing_calibration_part" not in task_name:
180
- expected_frames += FakeGQLClient.calibration_frames_doc_object
237
+ expected_frames += default_calibration_frames_doc
181
238
  for frame_set in expected_frames:
182
239
  for key in frame_set["object_keys"]:
183
240
  source_filenames.append(os.path.join("/", frame_set["bucket"], key))
@@ -204,7 +261,7 @@ def test_build_parameter_file_transfer_items(transfer_l0_data_task):
204
261
  # Then
205
262
  source_filenames = []
206
263
  destination_filenames = []
207
- parameters = FakeGQLClient.parameters_doc_object
264
+ parameters = default_parameters_doc
208
265
  for param in parameters:
209
266
  for value in param["parameterValues"]:
210
267
  if "__file__" in value["parameterValue"]:
@@ -5,7 +5,6 @@ import pytest
5
5
  from dkist_processing_common._util.scratch import WorkflowFileSystem
6
6
  from dkist_processing_common.models.tags import Tag
7
7
  from dkist_processing_common.tasks import TransferL1Data
8
- from dkist_processing_common.tests.conftest import FakeGQLClient
9
8
 
10
9
 
11
10
  def fake_list_objects(self, bucket, prefix=None):
@@ -37,7 +36,7 @@ def transfer_l1_data_task(recipe_run_id, tmp_path, fake_constants_db):
37
36
  task._purge()
38
37
 
39
38
 
40
- def test_transfer_l1_data(transfer_l1_data_task, mocker):
39
+ def test_transfer_l1_data(transfer_l1_data_task, mocker, fake_gql_client):
41
40
  """
42
41
  Given: A task with frames and movies tagged as output
43
42
  When: Transfering the L1 data
@@ -45,7 +44,7 @@ def test_transfer_l1_data(transfer_l1_data_task, mocker):
45
44
  """
46
45
  # Yeah, we mock a whole bunch of stuff here, but this test at least confirms that the setup to these calls is correct
47
46
  mocker.patch(
48
- "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=FakeGQLClient
47
+ "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient", new=fake_gql_client
49
48
  )
50
49
  mocker.patch(
51
50
  "dkist_processing_common.tasks.mixin.globus.GlobusMixin.globus_transfer_scratch_to_object_store"
@@ -19,7 +19,6 @@ from dkist_processing_common.models.tags import Tag
19
19
  from dkist_processing_common.tasks import CreateTrialAsdf
20
20
  from dkist_processing_common.tasks import CreateTrialDatasetInventory
21
21
  from dkist_processing_common.tasks import CreateTrialQualityReport
22
- from dkist_processing_common.tests.conftest import FakeGQLClient
23
22
 
24
23
 
25
24
  @pytest.fixture()
@@ -46,12 +45,17 @@ def scratch_with_l1_frames(recipe_run_id, tmp_path) -> WorkflowFileSystem:
46
45
 
47
46
  @pytest.fixture()
48
47
  def create_trial_dataset_inventory_task(
49
- recipe_run_id, tmp_path, scratch_with_l1_frames, fake_constants_db, mocker
48
+ recipe_run_id,
49
+ tmp_path,
50
+ scratch_with_l1_frames,
51
+ fake_constants_db,
52
+ mocker,
53
+ fake_gql_client,
50
54
  ) -> CreateTrialDatasetInventory:
51
55
  """An instance of CreateTrialDatasetInventory with L1 frames tagged in scratch."""
52
56
  mocker.patch(
53
57
  "dkist_processing_common.tasks.mixin.metadata_store.GraphQLClient",
54
- new=FakeGQLClient,
58
+ new=fake_gql_client,
55
59
  )
56
60
  task = CreateTrialDatasetInventory(
57
61
  recipe_run_id=recipe_run_id,