runnable 0.50.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. extensions/README.md +0 -0
  2. extensions/__init__.py +0 -0
  3. extensions/catalog/README.md +0 -0
  4. extensions/catalog/any_path.py +214 -0
  5. extensions/catalog/file_system.py +52 -0
  6. extensions/catalog/minio.py +72 -0
  7. extensions/catalog/pyproject.toml +14 -0
  8. extensions/catalog/s3.py +11 -0
  9. extensions/job_executor/README.md +0 -0
  10. extensions/job_executor/__init__.py +236 -0
  11. extensions/job_executor/emulate.py +70 -0
  12. extensions/job_executor/k8s.py +553 -0
  13. extensions/job_executor/k8s_job_spec.yaml +37 -0
  14. extensions/job_executor/local.py +35 -0
  15. extensions/job_executor/local_container.py +161 -0
  16. extensions/job_executor/pyproject.toml +16 -0
  17. extensions/nodes/README.md +0 -0
  18. extensions/nodes/__init__.py +0 -0
  19. extensions/nodes/conditional.py +301 -0
  20. extensions/nodes/fail.py +78 -0
  21. extensions/nodes/loop.py +394 -0
  22. extensions/nodes/map.py +477 -0
  23. extensions/nodes/parallel.py +281 -0
  24. extensions/nodes/pyproject.toml +15 -0
  25. extensions/nodes/stub.py +93 -0
  26. extensions/nodes/success.py +78 -0
  27. extensions/nodes/task.py +156 -0
  28. extensions/pipeline_executor/README.md +0 -0
  29. extensions/pipeline_executor/__init__.py +871 -0
  30. extensions/pipeline_executor/argo.py +1266 -0
  31. extensions/pipeline_executor/emulate.py +119 -0
  32. extensions/pipeline_executor/local.py +226 -0
  33. extensions/pipeline_executor/local_container.py +369 -0
  34. extensions/pipeline_executor/mocked.py +159 -0
  35. extensions/pipeline_executor/pyproject.toml +16 -0
  36. extensions/run_log_store/README.md +0 -0
  37. extensions/run_log_store/__init__.py +0 -0
  38. extensions/run_log_store/any_path.py +100 -0
  39. extensions/run_log_store/chunked_fs.py +122 -0
  40. extensions/run_log_store/chunked_minio.py +141 -0
  41. extensions/run_log_store/file_system.py +91 -0
  42. extensions/run_log_store/generic_chunked.py +549 -0
  43. extensions/run_log_store/minio.py +114 -0
  44. extensions/run_log_store/pyproject.toml +15 -0
  45. extensions/secrets/README.md +0 -0
  46. extensions/secrets/dotenv.py +62 -0
  47. extensions/secrets/pyproject.toml +15 -0
  48. runnable/__init__.py +108 -0
  49. runnable/catalog.py +141 -0
  50. runnable/cli.py +484 -0
  51. runnable/context.py +730 -0
  52. runnable/datastore.py +1058 -0
  53. runnable/defaults.py +159 -0
  54. runnable/entrypoints.py +390 -0
  55. runnable/exceptions.py +137 -0
  56. runnable/executor.py +561 -0
  57. runnable/gantt.py +1646 -0
  58. runnable/graph.py +501 -0
  59. runnable/names.py +546 -0
  60. runnable/nodes.py +593 -0
  61. runnable/parameters.py +217 -0
  62. runnable/pickler.py +96 -0
  63. runnable/sdk.py +1277 -0
  64. runnable/secrets.py +92 -0
  65. runnable/tasks.py +1268 -0
  66. runnable/telemetry.py +142 -0
  67. runnable/utils.py +423 -0
  68. runnable-0.50.0.dist-info/METADATA +189 -0
  69. runnable-0.50.0.dist-info/RECORD +72 -0
  70. runnable-0.50.0.dist-info/WHEEL +4 -0
  71. runnable-0.50.0.dist-info/entry_points.txt +53 -0
  72. runnable-0.50.0.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,369 @@
1
+ import logging
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Dict, Optional
5
+
6
+ from pydantic import Field, PrivateAttr
7
+
8
+ from extensions.pipeline_executor import GenericPipelineExecutor
9
+ from runnable import defaults
10
+ from runnable.datastore import StepAttempt
11
+ from runnable.defaults import IterableParameterModel
12
+ from runnable.nodes import BaseNode
13
+
14
+ logger = logging.getLogger(defaults.LOGGER_NAME)
15
+
16
+
17
+ class LocalContainerExecutor(GenericPipelineExecutor):
18
+ """
19
+ In the mode of local-container, we execute all the commands in a container.
20
+
21
+ Ensure that the local compute has enough resources to finish all your jobs.
22
+
23
+ Configuration options:
24
+
25
+ ```yaml
26
+ pipeline-executor:
27
+ type: local-container
28
+ config:
29
+ docker_image: <required>
30
+ auto_remove_container: true/false
31
+ environment:
32
+ key: value
33
+ overrides:
34
+ alternate_config:
35
+ docker_image: <required>
36
+ auto_remove_container: true/false
37
+ environment:
38
+ key: value
39
+ ```
40
+
41
+ - ```docker_image```: The default docker image to use for all the steps.
42
+ - ```auto_remove_container```: Remove container after execution
43
+ - ```environment```: Environment variables to pass to the container
44
+
45
+ Overrides give you the ability to override the default docker image for a single step.
46
+ A step can then then refer to the alternate_config in the task definition.
47
+
48
+ Example:
49
+
50
+ ```python
51
+ from runnable import PythonTask
52
+
53
+ task = PythonTask(
54
+ name="alt_task",
55
+ overrides={
56
+ "local-container": "alternate_config"
57
+ }
58
+ )
59
+ ```
60
+
61
+ In the above example, ```alt_task``` will run in the docker image/configuration
62
+ as defined in the alternate_config.
63
+
64
+ ```runnable``` does not build the docker image for you, it is still left for the user to build
65
+ and ensure that the docker image provided is the correct one.
66
+
67
+ """
68
+
69
+ service_name: str = "local-container"
70
+ enable_parallel: bool = Field(default=False)
71
+
72
+ docker_image: str
73
+ auto_remove_container: bool = True
74
+ environment: Dict[str, str] = Field(default_factory=dict)
75
+
76
+ _should_setup_run_log_at_traversal: bool = PrivateAttr(default=True)
77
+
78
+ _container_log_location = "/tmp/run_logs/"
79
+ _container_catalog_location = "/tmp/catalog/"
80
+ _container_secrets_location = "/tmp/dotenv"
81
+ _volumes: Dict[str, Dict[str, str]] = {}
82
+
83
+ def _get_docker_image_digest(self, docker_image: str) -> str | None:
84
+ """
85
+ Retrieve the docker image digest, trying local first, then pulling if needed.
86
+
87
+ Args:
88
+ docker_image: The docker image name/tag
89
+
90
+ Returns:
91
+ The image digest (sha256:...) or None if retrieval fails
92
+ """
93
+ import docker # pylint: disable=C0415
94
+
95
+ try:
96
+ client = docker.from_env()
97
+
98
+ # Try to get digest from local image first
99
+ try:
100
+ image = client.images.get(docker_image)
101
+ # Get the RepoDigest which contains the sha256 digest
102
+ if image.attrs.get("RepoDigests"):
103
+ # RepoDigests is a list like ["registry/repo@sha256:..."]
104
+ for digest in image.attrs["RepoDigests"]:
105
+ if "@sha256:" in digest:
106
+ return digest.split("@")[1] # Return just "sha256:..."
107
+
108
+ # If no RepoDigest, try to get the image ID (less ideal but better than nothing)
109
+ if image.id:
110
+ return image.id
111
+
112
+ except docker.errors.ImageNotFound:
113
+ # Image not found locally, try to pull it
114
+ logger.info(
115
+ f"Docker image {docker_image} not found locally, pulling..."
116
+ )
117
+ try:
118
+ pulled_image = client.images.pull(docker_image)
119
+
120
+ # Get digest from pulled image
121
+ if pulled_image.attrs.get("RepoDigests"):
122
+ for digest in pulled_image.attrs["RepoDigests"]:
123
+ if "@sha256:" in digest:
124
+ return digest.split("@")[1] # Return just "sha256:..."
125
+
126
+ if pulled_image.id:
127
+ return pulled_image.id
128
+
129
+ except Exception as pull_ex:
130
+ logger.warning(
131
+ f"Failed to pull docker image {docker_image}: {pull_ex}"
132
+ )
133
+
134
+ except Exception as ex:
135
+ logger.warning(
136
+ f"Failed to retrieve docker image digest for {docker_image}: {ex}"
137
+ )
138
+
139
+ return None
140
+
141
+ def add_code_identities(self, node: BaseNode, attempt_log: StepAttempt):
142
+ """
143
+ Call the Base class to add the git code identity and add docker identity
144
+
145
+ Args:
146
+ node (BaseNode): The node we are adding the code identity
147
+ attempt_log (StepAttempt): The step attempt log corresponding to the node
148
+ """
149
+
150
+ super().add_code_identities(node, attempt_log)
151
+
152
+ if node.node_type in ["success", "fail"]:
153
+ # Need not add code identities if we are in a success or fail node
154
+ return
155
+
156
+ # Add docker image digest as code identity if available, fall back to image name
157
+ docker_digest = os.getenv("RUNNABLE_CODE_ID_DOCKER_IMAGE_DIGEST")
158
+
159
+ if not docker_digest:
160
+ # Fall back to docker image name if digest not available
161
+ executor_config = self._resolve_executor_config(node)
162
+ docker_digest = executor_config.get("docker_image", None)
163
+
164
+ if docker_digest:
165
+ code_id = self._context.run_log_store.create_code_identity()
166
+
167
+ code_id.code_identifier = docker_digest
168
+ code_id.code_identifier_type = "docker"
169
+ code_id.code_identifier_dependable = True
170
+ code_id.code_identifier_url = "local docker host"
171
+ attempt_log.code_identities.append(code_id)
172
+
173
+ logger.debug(f"Added docker image code identity: {docker_digest[:50]}...")
174
+
175
+ def execute_node(
176
+ self,
177
+ node: BaseNode,
178
+ iter_variable: Optional[IterableParameterModel] = None,
179
+ ):
180
+ """
181
+ We are already in the container, we just execute the node.
182
+ The node is already prepared for execution.
183
+ """
184
+ self._use_volumes()
185
+ return self._execute_node(node, iter_variable)
186
+
187
+ def trigger_node_execution(
188
+ self,
189
+ node: BaseNode,
190
+ iter_variable: Optional[IterableParameterModel] = None,
191
+ ):
192
+ """
193
+ We come into this step via execute from graph, use trigger job to spin up the container.
194
+
195
+ In local container execution, we just spin the container to execute runnable execute_single_node.
196
+
197
+ Args:
198
+ node (BaseNode): The node we are currently executing
199
+ iter_variable (str, optional): If the node is part of the map branch. Defaults to ''.
200
+ """
201
+ self._mount_volumes()
202
+ executor_config = self._resolve_executor_config(node)
203
+ auto_remove_container = executor_config.get("auto_remove_container", True)
204
+
205
+ logger.debug("Here is the resolved executor config")
206
+ logger.debug(executor_config)
207
+
208
+ command = self._context.get_node_callable_command(
209
+ node, iter_variable=iter_variable
210
+ )
211
+
212
+ self._spin_container(
213
+ node=node,
214
+ command=command,
215
+ iter_variable=iter_variable,
216
+ auto_remove_container=auto_remove_container,
217
+ )
218
+
219
+ step_log = self._context.run_log_store.get_step_log(
220
+ node._get_step_log_name(iter_variable), self._context.run_id
221
+ )
222
+ if step_log.status != defaults.SUCCESS:
223
+ msg = (
224
+ "Node execution inside the container failed. Please check the logs.\n"
225
+ "Note: If you do not see any docker issue from your side and the code works properly on local execution"
226
+ "please raise a bug report."
227
+ )
228
+ logger.error(msg)
229
+ step_log.status = defaults.FAIL
230
+ self._context.run_log_store.add_step_log(step_log, self._context.run_id)
231
+ raise Exception(msg)
232
+
233
+ def _spin_container(
234
+ self,
235
+ node: BaseNode,
236
+ command: str,
237
+ iter_variable: Optional[IterableParameterModel] = None,
238
+ auto_remove_container: bool = True,
239
+ ):
240
+ """
241
+ During the flow run, we have to spin up a container with the docker image mentioned
242
+ and the right log locations
243
+ """
244
+ # Conditional import
245
+ import docker # pylint: disable=C0415
246
+
247
+ try:
248
+ client = docker.from_env()
249
+ api_client = docker.APIClient()
250
+ except Exception as ex:
251
+ logger.exception("Could not get access to docker")
252
+ raise Exception(
253
+ "Could not get the docker socket file, do you have docker installed?"
254
+ ) from ex
255
+
256
+ try:
257
+ logger.info(f"Running the command {command}")
258
+ #  Overrides global config with local
259
+ executor_config = self._resolve_executor_config(node)
260
+
261
+ docker_image = executor_config.get("docker_image", None)
262
+ environment = executor_config.get("environment", {})
263
+ environment.update(self._context.variables)
264
+ if not docker_image:
265
+ raise Exception(
266
+ f"Please provide a docker_image using executor_config of the step {node.name} or at global config"
267
+ )
268
+
269
+ # Retrieve docker image digest and pass it as environment variable
270
+ digest = self._get_docker_image_digest(docker_image)
271
+ if digest:
272
+ environment["RUNNABLE_CODE_ID_DOCKER_IMAGE_DIGEST"] = digest
273
+ logger.info(f"Retrieved docker image digest: {digest[:12]}...")
274
+ else:
275
+ logger.warning(
276
+ f"Could not retrieve digest for docker image: {docker_image}"
277
+ )
278
+
279
+ container = client.containers.create(
280
+ image=docker_image,
281
+ command=command,
282
+ auto_remove=False,
283
+ volumes=self._volumes,
284
+ # network_mode="host",
285
+ environment=environment,
286
+ )
287
+
288
+ # print(container.__dict__)
289
+
290
+ container.start()
291
+ stream = api_client.logs(
292
+ container=container.id, timestamps=True, stream=True, follow=True
293
+ )
294
+ while True:
295
+ try:
296
+ output = next(stream).decode("utf-8")
297
+ output = output.strip("\r\n")
298
+ logger.info(output)
299
+ print(output)
300
+ except StopIteration:
301
+ logger.info("Docker Run completed")
302
+ break
303
+
304
+ exit_status = api_client.inspect_container(container.id)["State"][
305
+ "ExitCode"
306
+ ]
307
+
308
+ if auto_remove_container:
309
+ container.remove(force=True)
310
+
311
+ if exit_status != 0:
312
+ msg = f"Docker command failed with exit code {exit_status}"
313
+ raise Exception(msg)
314
+
315
+ except Exception as _e:
316
+ logger.exception("Problems with spinning/running the container")
317
+ raise _e
318
+
319
+ def _mount_volumes(self):
320
+ """
321
+ Mount the volumes for the container
322
+ """
323
+ # TODO: There should be an abstraction on top of service providers
324
+ match self._context.run_log_store.service_name:
325
+ case "file-system":
326
+ write_to = self._context.run_log_store.log_folder
327
+ self._volumes[str(Path(write_to).resolve())] = {
328
+ "bind": f"{self._container_log_location}",
329
+ "mode": "rw",
330
+ }
331
+ case "chunked-fs":
332
+ write_to = self._context.run_log_store.log_folder
333
+ self._volumes[str(Path(write_to).resolve())] = {
334
+ "bind": f"{self._container_log_location}",
335
+ "mode": "rw",
336
+ }
337
+
338
+ match self._context.catalog.service_name:
339
+ case "file-system":
340
+ catalog_location = self._context.catalog.catalog_location
341
+ self._volumes[str(Path(catalog_location).resolve())] = {
342
+ "bind": f"{self._container_catalog_location}",
343
+ "mode": "rw",
344
+ }
345
+
346
+ match self._context.secrets.service_name:
347
+ case "dotenv":
348
+ secrets_location = self._context.secrets.location
349
+ self._volumes[str(Path(secrets_location).resolve())] = {
350
+ "bind": f"{self._container_secrets_location}",
351
+ "mode": "ro",
352
+ }
353
+
354
+ def _use_volumes(self):
355
+ match self._context.run_log_store.service_name:
356
+ case "file-system":
357
+ self._context.run_log_store.log_folder = self._container_log_location
358
+ case "chunked-fs":
359
+ self._context.run_log_store.log_folder = self._container_log_location
360
+
361
+ match self._context.catalog.service_name:
362
+ case "file-system":
363
+ self._context.catalog.catalog_location = (
364
+ self._container_catalog_location
365
+ )
366
+
367
+ match self._context.secrets.service_name:
368
+ case "dotenv":
369
+ self._context.secrets.location = self._container_secrets_location
@@ -0,0 +1,159 @@
1
+ import copy
2
+ import logging
3
+ from typing import Any, Dict, Optional, Type, cast
4
+
5
+ from pydantic import ConfigDict, Field
6
+
7
+ from extensions.nodes.task import TaskNode
8
+ from extensions.pipeline_executor import GenericPipelineExecutor
9
+ from runnable import defaults
10
+ from runnable.defaults import IterableParameterModel
11
+ from runnable.nodes import BaseNode
12
+ from runnable.tasks import BaseTaskType
13
+
14
+ logger = logging.getLogger(defaults.LOGGER_NAME)
15
+
16
+
17
+ def create_executable(
18
+ params: Dict[str, Any],
19
+ model: Type[BaseTaskType],
20
+ node_name: str,
21
+ ) -> BaseTaskType:
22
+ class EasyModel(model): # type: ignore
23
+ model_config = ConfigDict(extra="ignore")
24
+
25
+ swallow_all = EasyModel(node_name=node_name, **params)
26
+ return swallow_all
27
+
28
+
29
+ class MockedExecutor(GenericPipelineExecutor):
30
+ service_name: str = "mocked"
31
+ _is_local: bool = True
32
+
33
+ model_config = ConfigDict(extra="ignore")
34
+
35
+ patches: Dict[str, Any] = Field(default_factory=dict)
36
+
37
+ def execute_from_graph(
38
+ self,
39
+ node: BaseNode,
40
+ iter_variable: Optional[IterableParameterModel] = None,
41
+ ):
42
+ """
43
+ This is the entry point to from the graph execution.
44
+
45
+ While the self.execute_graph is responsible for traversing the graph, this function is responsible for
46
+ actual execution of the node.
47
+
48
+ If the node type is:
49
+ * task : We can delegate to _execute_node after checking the eligibility for re-run in cases of a re-run
50
+ * success: We can delegate to _execute_node
51
+ * fail: We can delegate to _execute_node
52
+
53
+ For nodes that are internally graphs:
54
+ * parallel: Delegate the responsibility of execution to the node.execute_as_graph()
55
+ * dag: Delegate the responsibility of execution to the node.execute_as_graph()
56
+ * map: Delegate the responsibility of execution to the node.execute_as_graph()
57
+
58
+ Transpilers will NEVER use this method and will NEVER call ths method.
59
+ This method should only be used by interactive executors.
60
+
61
+ Args:
62
+ node (Node): The node to execute
63
+ iter_variable (dict, optional): If the node if of a map state, this corresponds to the value of iterable.
64
+ Defaults to None.
65
+ """
66
+ step_log = self._context.run_log_store.create_step_log(
67
+ node.name, node._get_step_log_name(iter_variable)
68
+ )
69
+
70
+ step_log.step_type = node.node_type
71
+ step_log.status = defaults.PROCESSING
72
+
73
+ self._context.run_log_store.add_step_log(step_log, self._context.run_id)
74
+
75
+ logger.info(f"Executing node: {node.get_summary()}")
76
+
77
+ # Add the step log to the database as per the situation.
78
+ # If its a terminal node, complete it now
79
+ if node.node_type in ["success", "fail"]:
80
+ self._context.run_log_store.add_step_log(step_log, self._context.run_id)
81
+ self._execute_node(node, iter_variable=iter_variable)
82
+ return
83
+
84
+ # We call an internal function to iterate the sub graphs and execute them
85
+ if node.is_composite:
86
+ self._context.run_log_store.add_step_log(step_log, self._context.run_id)
87
+ node.execute_as_graph(iter_variable=iter_variable)
88
+ return
89
+
90
+ if node.name not in self.patches:
91
+ # node is not patched, so mock it
92
+ self._execute_node(node, iter_variable=iter_variable, mock=True)
93
+ else:
94
+ # node is patched
95
+ # command as the patch value
96
+ node_to_send: TaskNode = cast(TaskNode, node).model_copy(deep=True)
97
+ executable_type = node_to_send.executable.__class__
98
+ executable = create_executable(
99
+ self.patches[node.name],
100
+ executable_type,
101
+ node_name=node.name,
102
+ )
103
+ node_to_send.executable = executable
104
+ self._execute_node(node_to_send, iter_variable=iter_variable, mock=False)
105
+
106
+ def _resolve_executor_config(self, node: BaseNode):
107
+ """
108
+ The overrides section can contain specific over-rides to an global executor config.
109
+ To avoid too much clutter in the dag definition, we allow the configuration file to have overrides block.
110
+ The nodes can over-ride the global config by referring to key in the overrides.
111
+
112
+ This function also applies variables to the effective node config.
113
+
114
+ For example:
115
+ # configuration.yaml
116
+ execution:
117
+ type: cloud-implementation
118
+ config:
119
+ k1: v1
120
+ k3: v3
121
+ overrides:
122
+ custom_config:
123
+ k1: v11
124
+ k2: v2 # Could be a mapping internally.
125
+
126
+ # in pipeline definition.yaml
127
+ dag:
128
+ steps:
129
+ step1:
130
+ overrides:
131
+ cloud-implementation: custom_config
132
+
133
+ This method should resolve the node_config to {'k1': 'v11', 'k2': 'v2', 'k3': 'v3'}
134
+
135
+ Args:
136
+ node (BaseNode): The current node being processed.
137
+
138
+ """
139
+ effective_node_config = copy.deepcopy(self.model_dump())
140
+
141
+ return effective_node_config
142
+
143
+ def execute_node(
144
+ self,
145
+ node: BaseNode,
146
+ iter_variable: Optional[IterableParameterModel] = None,
147
+ ):
148
+ """
149
+ The entry point for all executors apart from local.
150
+ We have already prepared for node execution.
151
+
152
+ Args:
153
+ node (BaseNode): The node to execute
154
+ iter_variable (dict, optional): If the node is part of a map, send in the map dictionary. Defaults to None.
155
+
156
+ Raises:
157
+ NotImplementedError: _description_
158
+ """
159
+ ...
@@ -0,0 +1,16 @@
1
+ [project]
2
+ name = "pipeline_executor"
3
+ version = "0.0.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ dependencies = []
8
+
9
+
10
+ [build-system]
11
+ requires = ["hatchling"]
12
+ build-backend = "hatchling.build"
13
+
14
+
15
+ [tool.hatch.build.targets.wheel]
16
+ packages = ["."]
File without changes
File without changes
@@ -0,0 +1,100 @@
1
+ import logging
2
+ from abc import abstractmethod
3
+ from typing import Any, Dict
4
+
5
+ from runnable import defaults, exceptions
6
+ from runnable.datastore import BaseRunLogStore, RunLog
7
+
8
+ logger = logging.getLogger(defaults.LOGGER_NAME)
9
+
10
+
11
+ class AnyPathRunLogStore(BaseRunLogStore):
12
+ """
13
+ In this type of Run Log store, we use a file system to store the JSON run log.
14
+
15
+ Every single run is stored as a different file which makes it compatible across other store types.
16
+
17
+ When to use:
18
+ When locally testing a pipeline and have the need to compare across runs.
19
+ Its fully featured and perfectly fine if your local environment is where you would do everything.
20
+
21
+ Do not use:
22
+ If you need parallelization on local, this run log would not support it.
23
+
24
+ Example config:
25
+
26
+ run_log:
27
+ type: file-system
28
+ config:
29
+ log_folder: The folder to out the logs. Defaults to .run_log_store
30
+
31
+ """
32
+
33
+ service_name: str = "file-system"
34
+ log_folder: str = defaults.LOG_LOCATION_FOLDER
35
+
36
+ def get_summary(self) -> Dict[str, Any]:
37
+ summary = {"Type": self.service_name, "Location": self.log_folder}
38
+
39
+ return summary
40
+
41
+ @abstractmethod
42
+ def write_to_path(self, run_log: RunLog): ...
43
+
44
+ @abstractmethod
45
+ def read_from_path(self, run_id: str) -> RunLog: ...
46
+
47
+ def create_run_log(
48
+ self,
49
+ run_id: str,
50
+ dag_hash: str = "",
51
+ use_cached: bool = False,
52
+ tag: str = "",
53
+ original_run_id: str = "",
54
+ status: str = defaults.CREATED,
55
+ ) -> RunLog:
56
+ """
57
+ # Creates a Run log
58
+ # Adds it to the db
59
+ """
60
+
61
+ try:
62
+ self.get_run_log_by_id(run_id=run_id, full=False)
63
+ raise exceptions.RunLogExistsError(run_id=run_id)
64
+ except exceptions.RunLogNotFoundError:
65
+ pass
66
+
67
+ logger.info(f"{self.service_name} Creating a Run Log for : {run_id}")
68
+ run_log = RunLog(
69
+ run_id=run_id,
70
+ dag_hash=dag_hash,
71
+ tag=tag,
72
+ status=status,
73
+ )
74
+ self.write_to_path(run_log)
75
+ return run_log
76
+
77
+ def get_run_log_by_id(
78
+ self,
79
+ run_id: str,
80
+ full: bool = False,
81
+ ) -> RunLog:
82
+ """
83
+ # Returns the run_log defined by id
84
+ # Raises Exception if not found
85
+ """
86
+ try:
87
+ logger.info(f"{self.service_name} Getting a Run Log for : {run_id}")
88
+ run_log = self.read_from_path(run_id)
89
+ return run_log
90
+ except FileNotFoundError as e:
91
+ raise exceptions.RunLogNotFoundError(run_id) from e
92
+
93
+ def put_run_log(self, run_log: RunLog):
94
+ """
95
+ # Puts the run_log into the database
96
+ """
97
+ logger.info(
98
+ f"{self.service_name} Putting the run log in the DB: {run_log.run_id}"
99
+ )
100
+ self.write_to_path(run_log)