runnable 0.50.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. extensions/README.md +0 -0
  2. extensions/__init__.py +0 -0
  3. extensions/catalog/README.md +0 -0
  4. extensions/catalog/any_path.py +214 -0
  5. extensions/catalog/file_system.py +52 -0
  6. extensions/catalog/minio.py +72 -0
  7. extensions/catalog/pyproject.toml +14 -0
  8. extensions/catalog/s3.py +11 -0
  9. extensions/job_executor/README.md +0 -0
  10. extensions/job_executor/__init__.py +236 -0
  11. extensions/job_executor/emulate.py +70 -0
  12. extensions/job_executor/k8s.py +553 -0
  13. extensions/job_executor/k8s_job_spec.yaml +37 -0
  14. extensions/job_executor/local.py +35 -0
  15. extensions/job_executor/local_container.py +161 -0
  16. extensions/job_executor/pyproject.toml +16 -0
  17. extensions/nodes/README.md +0 -0
  18. extensions/nodes/__init__.py +0 -0
  19. extensions/nodes/conditional.py +301 -0
  20. extensions/nodes/fail.py +78 -0
  21. extensions/nodes/loop.py +394 -0
  22. extensions/nodes/map.py +477 -0
  23. extensions/nodes/parallel.py +281 -0
  24. extensions/nodes/pyproject.toml +15 -0
  25. extensions/nodes/stub.py +93 -0
  26. extensions/nodes/success.py +78 -0
  27. extensions/nodes/task.py +156 -0
  28. extensions/pipeline_executor/README.md +0 -0
  29. extensions/pipeline_executor/__init__.py +871 -0
  30. extensions/pipeline_executor/argo.py +1266 -0
  31. extensions/pipeline_executor/emulate.py +119 -0
  32. extensions/pipeline_executor/local.py +226 -0
  33. extensions/pipeline_executor/local_container.py +369 -0
  34. extensions/pipeline_executor/mocked.py +159 -0
  35. extensions/pipeline_executor/pyproject.toml +16 -0
  36. extensions/run_log_store/README.md +0 -0
  37. extensions/run_log_store/__init__.py +0 -0
  38. extensions/run_log_store/any_path.py +100 -0
  39. extensions/run_log_store/chunked_fs.py +122 -0
  40. extensions/run_log_store/chunked_minio.py +141 -0
  41. extensions/run_log_store/file_system.py +91 -0
  42. extensions/run_log_store/generic_chunked.py +549 -0
  43. extensions/run_log_store/minio.py +114 -0
  44. extensions/run_log_store/pyproject.toml +15 -0
  45. extensions/secrets/README.md +0 -0
  46. extensions/secrets/dotenv.py +62 -0
  47. extensions/secrets/pyproject.toml +15 -0
  48. runnable/__init__.py +108 -0
  49. runnable/catalog.py +141 -0
  50. runnable/cli.py +484 -0
  51. runnable/context.py +730 -0
  52. runnable/datastore.py +1058 -0
  53. runnable/defaults.py +159 -0
  54. runnable/entrypoints.py +390 -0
  55. runnable/exceptions.py +137 -0
  56. runnable/executor.py +561 -0
  57. runnable/gantt.py +1646 -0
  58. runnable/graph.py +501 -0
  59. runnable/names.py +546 -0
  60. runnable/nodes.py +593 -0
  61. runnable/parameters.py +217 -0
  62. runnable/pickler.py +96 -0
  63. runnable/sdk.py +1277 -0
  64. runnable/secrets.py +92 -0
  65. runnable/tasks.py +1268 -0
  66. runnable/telemetry.py +142 -0
  67. runnable/utils.py +423 -0
  68. runnable-0.50.0.dist-info/METADATA +189 -0
  69. runnable-0.50.0.dist-info/RECORD +72 -0
  70. runnable-0.50.0.dist-info/WHEEL +4 -0
  71. runnable-0.50.0.dist-info/entry_points.txt +53 -0
  72. runnable-0.50.0.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,553 @@
1
+ import logging
2
+ import re
3
+ import shlex
4
+ from enum import Enum
5
+ from typing import Annotated, List, Optional
6
+
7
+ from pydantic import (
8
+ BaseModel,
9
+ ConfigDict,
10
+ Field,
11
+ PlainSerializer,
12
+ PrivateAttr,
13
+ field_validator,
14
+ )
15
+ from pydantic.alias_generators import to_camel
16
+
17
+ from extensions.job_executor import GenericJobExecutor
18
+ from runnable import console, context, defaults
19
+ from runnable.tasks import BaseTaskType
20
+
21
+ logger = logging.getLogger(defaults.NAME)
22
+
23
+
24
+ # TODO: Secrets should be exposed
25
+ class Operator(str, Enum):
26
+ NOT_IN = "NotIn"
27
+ EXISTS = "Exists"
28
+ DOES_NOT_EXIST = "DoesNotExist"
29
+
30
+
31
+ class RestartPolicy(str, Enum):
32
+ ALWAYS = "Always"
33
+ ON_FAILURE = "OnFailure"
34
+ NEVER = "Never"
35
+
36
+
37
+ class ImagePullPolicy(str, Enum):
38
+ ALWAYS = "Always"
39
+ IF_NOT_PRESENT = "IfNotPresent"
40
+ NEVER = "Never"
41
+
42
+
43
+ class TolerationOperator(str, Enum):
44
+ EXISTS = "Exists"
45
+ EQUAL = "Equal"
46
+
47
+
48
+ class BaseModelWIthConfig(BaseModel, use_enum_values=True):
49
+ model_config = ConfigDict(
50
+ extra="forbid",
51
+ alias_generator=to_camel,
52
+ populate_by_name=True,
53
+ from_attributes=True,
54
+ validate_default=True,
55
+ )
56
+
57
+
58
+ class Toleration(BaseModelWIthConfig):
59
+ key: str
60
+ operator: TolerationOperator = TolerationOperator.EQUAL
61
+ value: Optional[str]
62
+ effect: str
63
+ toleration_seconds: Optional[int] = Field(default=None)
64
+
65
+
66
+ class LabelSelectorRequirement(BaseModelWIthConfig):
67
+ key: str
68
+ operator: Operator
69
+ values: list[str]
70
+
71
+
72
+ class LabelSelector(BaseModelWIthConfig):
73
+ match_expressions: list[LabelSelectorRequirement]
74
+ match_labels: dict[str, str]
75
+
76
+
77
+ class ObjectMetaData(BaseModelWIthConfig):
78
+ generate_name: Optional[str]
79
+ annotations: Optional[dict[str, str]]
80
+ namespace: Optional[str] = "default"
81
+
82
+
83
+ class EnvVar(BaseModelWIthConfig):
84
+ name: str
85
+ value: str
86
+
87
+
88
+ VendorGPU = Annotated[
89
+ Optional[int],
90
+ PlainSerializer(lambda x: str(x), return_type=str, when_used="unless-none"),
91
+ ]
92
+
93
+
94
+ class Request(BaseModelWIthConfig):
95
+ """
96
+ The default requests
97
+ """
98
+
99
+ memory: str = "1Gi"
100
+ cpu: str = "250m"
101
+ gpu: VendorGPU = Field(default=None, serialization_alias="nvidia.com/gpu")
102
+
103
+
104
+ class Limit(BaseModelWIthConfig):
105
+ """
106
+ The default limits
107
+ """
108
+
109
+ memory: str = "1Gi"
110
+ cpu: str = "250m"
111
+ gpu: VendorGPU = Field(default=None, serialization_alias="nvidia.com/gpu")
112
+
113
+
114
+ class Resources(BaseModelWIthConfig):
115
+ limits: Limit = Limit()
116
+ requests: Optional[Request] = Field(default=None)
117
+
118
+
119
+ class VolumeMount(BaseModelWIthConfig):
120
+ name: str
121
+ mount_path: str
122
+
123
+
124
+ class Container(BaseModelWIthConfig):
125
+ image: str
126
+ env: list[EnvVar] = Field(default_factory=list)
127
+ image_pull_policy: ImagePullPolicy = Field(default=ImagePullPolicy.NEVER)
128
+ resources: Resources = Resources()
129
+ volume_mounts: Optional[list[VolumeMount]] = Field(default_factory=lambda: [])
130
+
131
+
132
+ class HostPath(BaseModelWIthConfig):
133
+ path: str
134
+
135
+
136
+ class HostPathVolume(BaseModelWIthConfig):
137
+ name: str
138
+ host_path: HostPath
139
+
140
+
141
+ class PVCClaim(BaseModelWIthConfig):
142
+ claimName: str
143
+
144
+
145
+ class PVCVolume(BaseModelWIthConfig):
146
+ name: str
147
+ persistent_volume_claim: PVCClaim
148
+
149
+
150
+ class K8sTemplateSpec(BaseModelWIthConfig):
151
+ active_deadline_seconds: int = Field(default=60 * 60 * 2) # 2 hours
152
+ node_selector: Optional[dict[str, str]] = None
153
+ tolerations: Optional[list[Toleration]] = None
154
+ volumes: Optional[list[HostPathVolume | PVCVolume]] = Field(
155
+ default_factory=lambda: []
156
+ )
157
+ service_account_name: Optional[str] = "default"
158
+ restart_policy: RestartPolicy = RestartPolicy.NEVER
159
+ container: Container
160
+
161
+
162
+ class K8sTemplate(BaseModelWIthConfig):
163
+ spec: K8sTemplateSpec
164
+ metadata: Optional[ObjectMetaData] = None
165
+
166
+
167
+ class Spec(BaseModelWIthConfig):
168
+ active_deadline_seconds: Optional[int] = Field(default=60 * 60 * 2) # 2 hours
169
+ backoff_limit: int = 6
170
+ selector: Optional[LabelSelector] = None
171
+ template: K8sTemplate
172
+ ttl_seconds_after_finished: Optional[int] = Field(default=60 * 60 * 24) # 24 hours
173
+
174
+
175
+ class GenericK8sJobExecutor(GenericJobExecutor):
176
+ service_name: str = "k8s-job"
177
+ config_path: Optional[str] = None
178
+ job_spec: Spec
179
+ mock: bool = False
180
+ namespace: str = Field(default="default")
181
+ schedule: Optional[str] = Field(
182
+ default=None, description="Cron expression for scheduling (e.g., '0 2 * * *')"
183
+ )
184
+
185
+ @field_validator("schedule")
186
+ @classmethod
187
+ def validate_schedule(cls, v):
188
+ if v is not None:
189
+ # Validate cron expression format (5 fields: minute hour day month weekday)
190
+ if not re.match(r"^(\S+\s+){4}\S+$", v):
191
+ raise ValueError(
192
+ "Schedule must be a valid cron expression with 5 fields (minute hour day month weekday)"
193
+ )
194
+ return v
195
+
196
+ _should_setup_run_log_at_traversal: bool = PrivateAttr(default=False)
197
+ _volume_mounts: list[VolumeMount] = PrivateAttr(default_factory=lambda: [])
198
+ _volumes: list[HostPathVolume | PVCVolume] = PrivateAttr(default_factory=lambda: [])
199
+
200
+ _container_log_location: str = PrivateAttr(default="/tmp/run_logs/")
201
+ _container_catalog_location: str = PrivateAttr(default="/tmp/catalog/")
202
+ _container_secrets_location: str = PrivateAttr(default="/tmp/dotenv")
203
+
204
+ model_config = ConfigDict(
205
+ alias_generator=to_camel,
206
+ populate_by_name=True,
207
+ from_attributes=True,
208
+ )
209
+
210
+ def submit_job(self, job: BaseTaskType, catalog_settings=Optional[List[str]]):
211
+ """
212
+ This method gets invoked by the CLI.
213
+ """
214
+ self._set_up_run_log()
215
+
216
+ # Call the container job
217
+ job_log = self._context.run_log_store.create_job_log()
218
+ self._context.run_log_store.add_job_log(
219
+ run_id=self._context.run_id, job_log=job_log
220
+ )
221
+ # create volumes and volume mounts for the job
222
+ self._create_volumes()
223
+
224
+ # submit_k8s_job now handles both regular jobs and cronjobs
225
+ self.submit_k8s_job(job)
226
+
227
+ @property
228
+ def _client(self):
229
+ # Lazy import kubernetes dependencies to avoid import-time failures in tests
230
+ try:
231
+ from kubernetes import client
232
+ from kubernetes import config as k8s_config
233
+ except ImportError:
234
+ raise ImportError(
235
+ "Kubernetes Python client is required but not installed. "
236
+ "Install it with: uv add 'runnable[k8s]'"
237
+ )
238
+
239
+ if self.config_path:
240
+ k8s_config.load_kube_config(config_file=self.config_path)
241
+ else:
242
+ # https://github.com/kubernetes-client/python/blob/master/kubernetes/base/config/__init__.py
243
+ k8s_config.load_config()
244
+ return client
245
+
246
+ def submit_k8s_job(self, task: BaseTaskType):
247
+ """
248
+ Submit a Kubernetes Job or CronJob based on whether schedule is configured.
249
+ This method builds the job specification once and then creates either a Job or CronJob.
250
+ """
251
+ # Build volume mounts
252
+ if self.job_spec.template.spec.container.volume_mounts:
253
+ self._volume_mounts += self.job_spec.template.spec.container.volume_mounts
254
+
255
+ container_volume_mounts = [
256
+ self._client.V1VolumeMount(**vol.model_dump())
257
+ for vol in self._volume_mounts
258
+ ]
259
+
260
+ # Get command
261
+ assert isinstance(self._context, context.JobContext)
262
+ command = self._context.get_job_callable_command()
263
+
264
+ # Build container env
265
+ container_env = [
266
+ self._client.V1EnvVar(**env.model_dump())
267
+ for env in self.job_spec.template.spec.container.env
268
+ ]
269
+
270
+ # Build container
271
+ base_container = self._client.V1Container(
272
+ command=shlex.split(command),
273
+ env=container_env,
274
+ name="default",
275
+ volume_mounts=container_volume_mounts,
276
+ resources=self.job_spec.template.spec.container.resources.model_dump(
277
+ by_alias=True, exclude_none=True
278
+ ),
279
+ **self.job_spec.template.spec.container.model_dump(
280
+ exclude_none=True,
281
+ exclude={"volume_mounts", "command", "env", "resources"},
282
+ ),
283
+ )
284
+
285
+ # Build volumes
286
+ if self.job_spec.template.spec.volumes:
287
+ self._volumes += self.job_spec.template.spec.volumes
288
+
289
+ spec_volumes = [
290
+ self._client.V1Volume(**vol.model_dump()) for vol in self._volumes
291
+ ]
292
+
293
+ # Build tolerations
294
+ tolerations = None
295
+ if self.job_spec.template.spec.tolerations:
296
+ tolerations = [
297
+ self._client.V1Toleration(**toleration.model_dump())
298
+ for toleration in self.job_spec.template.spec.tolerations
299
+ ]
300
+
301
+ # Build pod spec
302
+ pod_spec = self._client.V1PodSpec(
303
+ containers=[base_container],
304
+ volumes=spec_volumes,
305
+ tolerations=tolerations,
306
+ **self.job_spec.template.spec.model_dump(
307
+ exclude_none=True, exclude={"container", "volumes", "tolerations"}
308
+ ),
309
+ )
310
+
311
+ # Build pod template metadata
312
+ pod_template_metadata = None
313
+ if self.job_spec.template.metadata:
314
+ pod_template_metadata = self._client.V1ObjectMeta(
315
+ **self.job_spec.template.metadata.model_dump(exclude_none=True)
316
+ )
317
+
318
+ # Build pod template
319
+ pod_template = self._client.V1PodTemplateSpec(
320
+ spec=pod_spec,
321
+ metadata=pod_template_metadata,
322
+ )
323
+
324
+ # Build job spec
325
+ job_spec = self._client.V1JobSpec(
326
+ template=pod_template,
327
+ **self.job_spec.model_dump(exclude_none=True, exclude={"template"}),
328
+ )
329
+
330
+ # Decision point: Create Job or CronJob based on schedule
331
+ if self.schedule:
332
+ # Create CronJob
333
+ cronjob_spec = self._client.V1CronJobSpec(
334
+ schedule=self.schedule,
335
+ job_template=self._client.V1JobTemplateSpec(spec=job_spec),
336
+ )
337
+
338
+ cronjob = self._client.V1CronJob(
339
+ api_version="batch/v1",
340
+ kind="CronJob",
341
+ metadata=self._client.V1ObjectMeta(name=self._context.run_id),
342
+ spec=cronjob_spec,
343
+ )
344
+
345
+ logger.info(f"Submitting CronJob: {cronjob.__dict__}")
346
+ self._display_scheduled_job_info(cronjob)
347
+
348
+ if self.mock:
349
+ logger.info(cronjob.__dict__)
350
+ return
351
+
352
+ try:
353
+ k8s_batch = self._client.BatchV1Api()
354
+ response = k8s_batch.create_namespaced_cron_job(
355
+ body=cronjob,
356
+ namespace=self.namespace,
357
+ )
358
+ logger.debug(f"Kubernetes CronJob response: {response}")
359
+ except Exception as e:
360
+ logger.exception(e)
361
+ print(e)
362
+ raise
363
+ else:
364
+ # Create regular Job
365
+ job = self._client.V1Job(
366
+ api_version="batch/v1",
367
+ kind="Job",
368
+ metadata=self._client.V1ObjectMeta(name=self._context.run_id),
369
+ spec=job_spec,
370
+ )
371
+
372
+ logger.info(f"Submitting job: {job.__dict__}")
373
+ if self.mock:
374
+ logger.info(job.__dict__)
375
+ return
376
+
377
+ try:
378
+ k8s_batch = self._client.BatchV1Api()
379
+ response = k8s_batch.create_namespaced_job(
380
+ body=job,
381
+ _preload_content=False,
382
+ pretty=True,
383
+ namespace=self.namespace,
384
+ )
385
+ logger.debug(f"Kubernetes job response: {response}")
386
+ except Exception as e:
387
+ logger.exception(e)
388
+ print(e)
389
+ raise
390
+
391
+ def _display_scheduled_job_info(self, cronjob):
392
+ """Display information about the scheduled CronJob to the console"""
393
+
394
+ console.print("✓ CronJob scheduled successfully")
395
+ console.print(f" Name: {cronjob.metadata.name}")
396
+ console.print(f" Namespace: {self.namespace}")
397
+ console.print(f" Schedule: {cronjob.spec.schedule}")
398
+ console.print("")
399
+ console.print(" Job Spec:")
400
+ console.print(f" - Image: {self.job_spec.template.spec.container.image}")
401
+ console.print(
402
+ f" - Resources: {self.job_spec.template.spec.container.resources.model_dump()}"
403
+ )
404
+
405
+ def _create_volumes(self): ...
406
+
407
+ def _use_volumes(self):
408
+ match self._context.run_log_store.service_name:
409
+ case "file-system":
410
+ self._context.run_log_store.log_folder = self._container_log_location
411
+ case "chunked-fs":
412
+ self._context.run_log_store.log_folder = self._container_log_location
413
+
414
+ match self._context.catalog.service_name:
415
+ case "file-system":
416
+ self._context.catalog.catalog_location = (
417
+ self._container_catalog_location
418
+ )
419
+
420
+
421
+ class MiniK8sJobExecutor(GenericK8sJobExecutor):
422
+ service_name: str = "k8s-job"
423
+ config_path: Optional[str] = None
424
+ job_spec: Spec
425
+ mock: bool = False
426
+
427
+ # The location the mount of .run_log_store is mounted to in minikube
428
+ # ensure that minikube mount $HOME/workspace/runnable/.run_log_store:/volume/run_logs is executed first
429
+ # $HOME/workspace/runnable/.catalog:/volume/catalog
430
+ # Ensure that the docker build is done with eval $(minikube docker-env)
431
+ mini_k8s_run_log_location: str = Field(default="/volume/run_logs/")
432
+ mini_k8s_catalog_location: str = Field(default="/volume/catalog/")
433
+
434
+ _is_local: bool = PrivateAttr(default=False)
435
+
436
+ model_config = ConfigDict(
437
+ alias_generator=to_camel,
438
+ populate_by_name=True,
439
+ from_attributes=True,
440
+ )
441
+
442
+ def execute_job(self, job: BaseTaskType, catalog_settings=Optional[List[str]]):
443
+ self._use_volumes()
444
+ super().execute_job(job, catalog_settings=catalog_settings)
445
+
446
+ def _create_volumes(self):
447
+ match self._context.run_log_store.service_name:
448
+ case "file-system":
449
+ self._volumes.append(
450
+ # When you do: # minikube mount $HOME:/tmp/run_logs
451
+ # This .run_log_store is mounted to /tmp/run_logs of minikube
452
+ # You then are creating a volume that is mounted to /tmp/run_logs in the container
453
+ # You are then referring to it.
454
+ # https://stackoverflow.com/questions/57411456/minikube-mounted-host-folders-are-not-working
455
+ HostPathVolume(
456
+ name="run-logs",
457
+ host_path=HostPath(path=self.mini_k8s_run_log_location),
458
+ )
459
+ )
460
+ self._volume_mounts.append(
461
+ VolumeMount(
462
+ name="run-logs", mount_path=self._container_log_location
463
+ )
464
+ )
465
+ case "chunked-fs":
466
+ self._volumes.append(
467
+ HostPathVolume(
468
+ name="run-logs",
469
+ host_path=HostPath(path=self.mini_k8s_run_log_location),
470
+ )
471
+ )
472
+ self._volume_mounts.append(
473
+ VolumeMount(
474
+ name="run-logs", mount_path=self._container_log_location
475
+ )
476
+ )
477
+
478
+ match self._context.catalog.service_name:
479
+ case "file-system":
480
+ self._volumes.append(
481
+ HostPathVolume(
482
+ name="catalog",
483
+ host_path=HostPath(path=self.mini_k8s_catalog_location),
484
+ )
485
+ )
486
+ self._volume_mounts.append(
487
+ VolumeMount(
488
+ name="catalog", mount_path=self._container_catalog_location
489
+ )
490
+ )
491
+
492
+
493
+ class K8sJobExecutor(GenericK8sJobExecutor):
494
+ service_name: str = "k8s-job"
495
+ config_path: Optional[str] = None
496
+ job_spec: Spec
497
+ mock: bool = False
498
+ pvc_claim_name: str
499
+
500
+ # change the spec to pull image if not present
501
+ def model_post_init(self, __context):
502
+ self.job_spec.template.spec.container.image_pull_policy = ImagePullPolicy.ALWAYS
503
+
504
+ _is_local: bool = PrivateAttr(default=False)
505
+
506
+ model_config = ConfigDict(
507
+ alias_generator=to_camel,
508
+ populate_by_name=True,
509
+ from_attributes=True,
510
+ )
511
+
512
+ def execute_job(self, job: BaseTaskType, catalog_settings=Optional[List[str]]):
513
+ self._use_volumes()
514
+ self._set_up_run_log()
515
+
516
+ job_log = self._context.run_log_store.create_job_log()
517
+ self._context.run_log_store.add_job_log(
518
+ run_id=self._context.run_id, job_log=job_log
519
+ )
520
+
521
+ super().execute_job(job, catalog_settings=catalog_settings)
522
+
523
+ def _create_volumes(self):
524
+ self._volumes.append(
525
+ PVCVolume(
526
+ name=self.pvc_claim_name,
527
+ persistent_volume_claim=PVCClaim(claimName=self.pvc_claim_name),
528
+ )
529
+ )
530
+ match self._context.run_log_store.service_name:
531
+ case "file-system":
532
+ self._volume_mounts.append(
533
+ VolumeMount(
534
+ name=self.pvc_claim_name,
535
+ mount_path=self._container_log_location,
536
+ )
537
+ )
538
+ case "chunked-fs":
539
+ self._volume_mounts.append(
540
+ VolumeMount(
541
+ name=self.pvc_claim_name,
542
+ mount_path=self._container_log_location,
543
+ )
544
+ )
545
+
546
+ match self._context.catalog.service_name:
547
+ case "file-system":
548
+ self._volume_mounts.append(
549
+ VolumeMount(
550
+ name=self.pvc_claim_name,
551
+ mount_path=self._container_catalog_location,
552
+ )
553
+ )
@@ -0,0 +1,37 @@
1
+ #Follow this as a template https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1JobSpec.md
2
+
3
+ jobSpec:
4
+ activeDeadlineSeconds: Optional[int]
5
+ selector: Optional[LabelSelector]
6
+ ttlSecondsAfterFinished: Optional[int]
7
+ template:
8
+ metadata:
9
+ annotations: Optional[Dict[str, str]]
10
+ generate_name: Optional[str] = run_id
11
+ namespace: Optional[str] = "default"
12
+ spec:
13
+ activeDeadlineSeconds: Optional[int]
14
+ nodeSelector: Optional[Dict[str, str]]
15
+ tolerations: Optional[List[Toleration]]
16
+ volumes: Optional[List[str]]
17
+ serviceAccountName: Optional[str]
18
+ restartPolicy: Optional[str] = Choose from [Always, OnFailure, Never]
19
+ container:
20
+ command: List[str]
21
+ env:
22
+ - name: str
23
+ value: str
24
+ image: str
25
+ imagePullPolicy: Optional[str] = choose from [Always, Never, IfNotPresent]
26
+ resources:
27
+ limits:
28
+ cpu: str
29
+ memory: str
30
+ gpu: str
31
+ requests:
32
+ cpu: str
33
+ memory: str
34
+ gpu: str
35
+ volumeMounts:
36
+ - name: str
37
+ mountPath: str
@@ -0,0 +1,35 @@
1
+ import logging
2
+ from typing import List, Optional
3
+
4
+ from extensions.job_executor import GenericJobExecutor
5
+ from runnable import defaults
6
+ from runnable.tasks import BaseTaskType
7
+
8
+ logger = logging.getLogger(defaults.LOGGER_NAME)
9
+
10
+
11
+ class LocalJobExecutor(GenericJobExecutor):
12
+ """
13
+ The LocalJobExecutor is a job executor that runs the job locally.
14
+
15
+ Configuration:
16
+
17
+ pipeline-executor:
18
+ type: local
19
+
20
+ """
21
+
22
+ service_name: str = "local"
23
+
24
+ def submit_job(self, job: BaseTaskType, catalog_settings=Optional[List[str]]):
25
+ """
26
+ This method gets invoked by the CLI.
27
+ """
28
+ self._set_up_run_log()
29
+
30
+ job_log = self._context.run_log_store.create_job_log()
31
+ self._context.run_log_store.add_job_log(
32
+ run_id=self._context.run_id, job_log=job_log
33
+ )
34
+
35
+ self.execute_job(job, catalog_settings=catalog_settings)