metaflow 2.12.8__py2.py3-none-any.whl → 2.12.10__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- metaflow/__init__.py +2 -0
- metaflow/cli.py +12 -4
- metaflow/extension_support/plugins.py +1 -0
- metaflow/flowspec.py +8 -1
- metaflow/lint.py +13 -0
- metaflow/metaflow_current.py +0 -8
- metaflow/plugins/__init__.py +12 -0
- metaflow/plugins/argo/argo_workflows.py +616 -46
- metaflow/plugins/argo/argo_workflows_cli.py +70 -3
- metaflow/plugins/argo/argo_workflows_decorator.py +38 -7
- metaflow/plugins/argo/argo_workflows_deployer.py +290 -0
- metaflow/plugins/argo/daemon.py +59 -0
- metaflow/plugins/argo/jobset_input_paths.py +16 -0
- metaflow/plugins/aws/batch/batch_decorator.py +16 -13
- metaflow/plugins/aws/step_functions/step_functions_cli.py +45 -3
- metaflow/plugins/aws/step_functions/step_functions_deployer.py +251 -0
- metaflow/plugins/cards/card_cli.py +1 -1
- metaflow/plugins/kubernetes/kubernetes.py +279 -52
- metaflow/plugins/kubernetes/kubernetes_cli.py +26 -8
- metaflow/plugins/kubernetes/kubernetes_client.py +0 -1
- metaflow/plugins/kubernetes/kubernetes_decorator.py +56 -44
- metaflow/plugins/kubernetes/kubernetes_job.py +7 -6
- metaflow/plugins/kubernetes/kubernetes_jobsets.py +511 -272
- metaflow/plugins/parallel_decorator.py +108 -8
- metaflow/plugins/secrets/secrets_decorator.py +12 -3
- metaflow/plugins/test_unbounded_foreach_decorator.py +39 -4
- metaflow/runner/deployer.py +386 -0
- metaflow/runner/metaflow_runner.py +1 -20
- metaflow/runner/nbdeploy.py +130 -0
- metaflow/runner/nbrun.py +4 -28
- metaflow/runner/utils.py +49 -0
- metaflow/runtime.py +246 -134
- metaflow/version.py +1 -1
- {metaflow-2.12.8.dist-info → metaflow-2.12.10.dist-info}/METADATA +2 -2
- {metaflow-2.12.8.dist-info → metaflow-2.12.10.dist-info}/RECORD +39 -32
- {metaflow-2.12.8.dist-info → metaflow-2.12.10.dist-info}/WHEEL +1 -1
- {metaflow-2.12.8.dist-info → metaflow-2.12.10.dist-info}/LICENSE +0 -0
- {metaflow-2.12.8.dist-info → metaflow-2.12.10.dist-info}/entry_points.txt +0 -0
- {metaflow-2.12.8.dist-info → metaflow-2.12.10.dist-info}/top_level.txt +0 -0
@@ -5,7 +5,9 @@ import re
|
|
5
5
|
import sys
|
6
6
|
from hashlib import sha1
|
7
7
|
|
8
|
-
from metaflow import JSONType, current, decorators, parameters
|
8
|
+
from metaflow import Run, JSONType, current, decorators, parameters
|
9
|
+
from metaflow.client.core import get_metadata
|
10
|
+
from metaflow.exception import MetaflowNotFound
|
9
11
|
from metaflow._vendor import click
|
10
12
|
from metaflow.exception import MetaflowException, MetaflowInternalError
|
11
13
|
from metaflow.metaflow_config import (
|
@@ -165,6 +167,20 @@ def argo_workflows(obj, name=None):
|
|
165
167
|
default="",
|
166
168
|
help="PagerDuty Events API V2 Integration key for workflow success/failure notifications.",
|
167
169
|
)
|
170
|
+
@click.option(
|
171
|
+
"--enable-heartbeat-daemon/--no-enable-heartbeat-daemon",
|
172
|
+
default=False,
|
173
|
+
show_default=True,
|
174
|
+
help="Use a daemon container to broadcast heartbeats.",
|
175
|
+
)
|
176
|
+
@click.option(
|
177
|
+
"--deployer-attribute-file",
|
178
|
+
default=None,
|
179
|
+
show_default=True,
|
180
|
+
type=str,
|
181
|
+
help="Write the workflow name to the file specified. Used internally for Metaflow's Deployer API.",
|
182
|
+
hidden=True,
|
183
|
+
)
|
168
184
|
@click.pass_obj
|
169
185
|
def create(
|
170
186
|
obj,
|
@@ -182,9 +198,22 @@ def create(
|
|
182
198
|
notify_on_success=False,
|
183
199
|
notify_slack_webhook_url=None,
|
184
200
|
notify_pager_duty_integration_key=None,
|
201
|
+
enable_heartbeat_daemon=True,
|
202
|
+
deployer_attribute_file=None,
|
185
203
|
):
|
186
204
|
validate_tags(tags)
|
187
205
|
|
206
|
+
if deployer_attribute_file:
|
207
|
+
with open(deployer_attribute_file, "w") as f:
|
208
|
+
json.dump(
|
209
|
+
{
|
210
|
+
"name": obj.workflow_name,
|
211
|
+
"flow_name": obj.flow.name,
|
212
|
+
"metadata": get_metadata(),
|
213
|
+
},
|
214
|
+
f,
|
215
|
+
)
|
216
|
+
|
188
217
|
obj.echo("Deploying *%s* to Argo Workflows..." % obj.workflow_name, bold=True)
|
189
218
|
|
190
219
|
if SERVICE_VERSION_CHECK:
|
@@ -218,6 +247,7 @@ def create(
|
|
218
247
|
notify_on_success,
|
219
248
|
notify_slack_webhook_url,
|
220
249
|
notify_pager_duty_integration_key,
|
250
|
+
enable_heartbeat_daemon,
|
221
251
|
)
|
222
252
|
|
223
253
|
if only_json:
|
@@ -390,6 +420,7 @@ def make_flow(
|
|
390
420
|
notify_on_success,
|
391
421
|
notify_slack_webhook_url,
|
392
422
|
notify_pager_duty_integration_key,
|
423
|
+
enable_heartbeat_daemon,
|
393
424
|
):
|
394
425
|
# TODO: Make this check less specific to Amazon S3 as we introduce
|
395
426
|
# support for more cloud object stores.
|
@@ -452,6 +483,7 @@ def make_flow(
|
|
452
483
|
notify_on_success=notify_on_success,
|
453
484
|
notify_slack_webhook_url=notify_slack_webhook_url,
|
454
485
|
notify_pager_duty_integration_key=notify_pager_duty_integration_key,
|
486
|
+
enable_heartbeat_daemon=enable_heartbeat_daemon,
|
455
487
|
)
|
456
488
|
|
457
489
|
|
@@ -563,8 +595,16 @@ def resolve_token(
|
|
563
595
|
type=str,
|
564
596
|
help="Write the ID of this run to the file specified.",
|
565
597
|
)
|
598
|
+
@click.option(
|
599
|
+
"--deployer-attribute-file",
|
600
|
+
default=None,
|
601
|
+
show_default=True,
|
602
|
+
type=str,
|
603
|
+
help="Write the metadata and pathspec of this run to the file specified.\nUsed internally for Metaflow's Deployer API.",
|
604
|
+
hidden=True,
|
605
|
+
)
|
566
606
|
@click.pass_obj
|
567
|
-
def trigger(obj, run_id_file=None, **kwargs):
|
607
|
+
def trigger(obj, run_id_file=None, deployer_attribute_file=None, **kwargs):
|
568
608
|
def _convert_value(param):
|
569
609
|
# Swap `-` with `_` in parameter name to match click's behavior
|
570
610
|
val = kwargs.get(param.name.replace("-", "_").lower())
|
@@ -587,6 +627,17 @@ def trigger(obj, run_id_file=None, **kwargs):
|
|
587
627
|
with open(run_id_file, "w") as f:
|
588
628
|
f.write(str(run_id))
|
589
629
|
|
630
|
+
if deployer_attribute_file:
|
631
|
+
with open(deployer_attribute_file, "w") as f:
|
632
|
+
json.dump(
|
633
|
+
{
|
634
|
+
"name": obj.workflow_name,
|
635
|
+
"metadata": get_metadata(),
|
636
|
+
"pathspec": "/".join((obj.flow.name, run_id)),
|
637
|
+
},
|
638
|
+
f,
|
639
|
+
)
|
640
|
+
|
590
641
|
obj.echo(
|
591
642
|
"Workflow *{name}* triggered on Argo Workflows "
|
592
643
|
"(run-id *{run_id}*).".format(name=obj.workflow_name, run_id=run_id),
|
@@ -786,6 +837,20 @@ def validate_token(name, token_prefix, authorize, instructions_fn=None):
|
|
786
837
|
return True
|
787
838
|
|
788
839
|
|
840
|
+
def get_run_object(pathspec: str):
|
841
|
+
try:
|
842
|
+
return Run(pathspec, _namespace_check=False)
|
843
|
+
except MetaflowNotFound:
|
844
|
+
return None
|
845
|
+
|
846
|
+
|
847
|
+
def get_status_considering_run_object(status, run_obj):
|
848
|
+
remapped_status = remap_status(status)
|
849
|
+
if remapped_status == "Running" and run_obj is None:
|
850
|
+
return "Pending"
|
851
|
+
return remapped_status
|
852
|
+
|
853
|
+
|
789
854
|
@argo_workflows.command(help="Fetch flow execution status on Argo Workflows.")
|
790
855
|
@click.argument("run-id", required=True, type=str)
|
791
856
|
@click.pass_obj
|
@@ -803,8 +868,10 @@ def status(obj, run_id):
|
|
803
868
|
# Trim prefix from run_id
|
804
869
|
name = run_id[5:]
|
805
870
|
status = ArgoWorkflows.get_workflow_status(obj.flow.name, name)
|
871
|
+
run_obj = get_run_object("/".join((obj.flow.name, run_id)))
|
806
872
|
if status is not None:
|
807
|
-
|
873
|
+
status = get_status_considering_run_object(status, run_obj)
|
874
|
+
obj.echo_always(status)
|
808
875
|
|
809
876
|
|
810
877
|
@argo_workflows.command(help="Terminate flow execution on Argo Workflows.")
|
@@ -2,12 +2,14 @@ import json
|
|
2
2
|
import os
|
3
3
|
import time
|
4
4
|
|
5
|
+
|
5
6
|
from metaflow import current
|
6
7
|
from metaflow.decorators import StepDecorator
|
7
8
|
from metaflow.events import Trigger
|
8
9
|
from metaflow.metadata import MetaDatum
|
9
10
|
from metaflow.metaflow_config import ARGO_EVENTS_WEBHOOK_URL
|
10
|
-
|
11
|
+
from metaflow.graph import DAGNode, FlowGraph
|
12
|
+
from metaflow.flowspec import FlowSpec
|
11
13
|
from .argo_events import ArgoEvent
|
12
14
|
|
13
15
|
|
@@ -83,7 +85,13 @@ class ArgoWorkflowsInternalDecorator(StepDecorator):
|
|
83
85
|
metadata.register_metadata(run_id, step_name, task_id, entries)
|
84
86
|
|
85
87
|
def task_finished(
|
86
|
-
self,
|
88
|
+
self,
|
89
|
+
step_name,
|
90
|
+
flow: FlowSpec,
|
91
|
+
graph: FlowGraph,
|
92
|
+
is_task_ok,
|
93
|
+
retry_count,
|
94
|
+
max_user_code_retries,
|
87
95
|
):
|
88
96
|
if not is_task_ok:
|
89
97
|
# The task finished with an exception - execution won't
|
@@ -100,16 +108,39 @@ class ArgoWorkflowsInternalDecorator(StepDecorator):
|
|
100
108
|
# we run pods with a security context. We work around this constraint by
|
101
109
|
# mounting an emptyDir volume.
|
102
110
|
if graph[step_name].type == "foreach":
|
111
|
+
# A DAGNode is considered a `parallel_step` if it is annotated by the @parallel decorator.
|
112
|
+
# A DAGNode is considered a `parallel_foreach` if it contains a `num_parallel` kwarg provided to the
|
113
|
+
# `next` method of that DAGNode.
|
114
|
+
# At this moment in the code we care if a node is marked as a `parallel_foreach` so that we can pass down the
|
115
|
+
# value of `num_parallel` to the subsequent steps.
|
116
|
+
# For @parallel, the implmentation uses 1 jobset object. That one jobset
|
117
|
+
# object internally creates 'num_parallel' jobs. So, we set foreach_num_splits
|
118
|
+
# to 1 here for @parallel. The parallelism of jobset is handled in
|
119
|
+
# kubernetes_job.py.
|
120
|
+
if graph[step_name].parallel_foreach:
|
121
|
+
with open("/mnt/out/num_parallel", "w") as f:
|
122
|
+
json.dump(flow._parallel_ubf_iter.num_parallel, f)
|
123
|
+
flow._foreach_num_splits = 1
|
124
|
+
with open("/mnt/out/task_id_entropy", "w") as file:
|
125
|
+
import uuid
|
126
|
+
|
127
|
+
file.write(uuid.uuid4().hex[:6])
|
128
|
+
|
103
129
|
with open("/mnt/out/splits", "w") as file:
|
104
130
|
json.dump(list(range(flow._foreach_num_splits)), file)
|
105
131
|
with open("/mnt/out/split_cardinality", "w") as file:
|
106
132
|
json.dump(flow._foreach_num_splits, file)
|
107
133
|
|
108
|
-
#
|
109
|
-
#
|
110
|
-
#
|
111
|
-
|
112
|
-
|
134
|
+
# for steps that have a `@parallel` decorator set to them, we will be relying on Jobsets
|
135
|
+
# to run the task. In this case, we cannot set anything in the
|
136
|
+
# `/mnt/out` directory, since such form of output mounts are not available to jobset execution as
|
137
|
+
# argo just treats it like A K8s resource that it throws in the cluster.
|
138
|
+
if not graph[step_name].parallel_step:
|
139
|
+
# Unfortunately, we can't always use pod names as task-ids since the pod names
|
140
|
+
# are not static across retries. We write the task-id to a file that is read
|
141
|
+
# by the next task here.
|
142
|
+
with open("/mnt/out/task_id", "w") as file:
|
143
|
+
file.write(self.task_id)
|
113
144
|
|
114
145
|
# Emit Argo Events given that the flow has succeeded. Given that we only
|
115
146
|
# emit events when the task succeeds, we can piggy back on this decorator
|
@@ -0,0 +1,290 @@
|
|
1
|
+
import sys
|
2
|
+
import tempfile
|
3
|
+
from typing import Optional, ClassVar
|
4
|
+
|
5
|
+
from metaflow.plugins.argo.argo_workflows import ArgoWorkflows
|
6
|
+
from metaflow.runner.deployer import (
|
7
|
+
DeployerImpl,
|
8
|
+
DeployedFlow,
|
9
|
+
TriggeredRun,
|
10
|
+
get_lower_level_group,
|
11
|
+
handle_timeout,
|
12
|
+
)
|
13
|
+
|
14
|
+
|
15
|
+
def suspend(instance: TriggeredRun, **kwargs):
|
16
|
+
"""
|
17
|
+
Suspend the running workflow.
|
18
|
+
|
19
|
+
Parameters
|
20
|
+
----------
|
21
|
+
**kwargs : Any
|
22
|
+
Additional arguments to pass to the suspend command.
|
23
|
+
|
24
|
+
Returns
|
25
|
+
-------
|
26
|
+
bool
|
27
|
+
True if the command was successful, False otherwise.
|
28
|
+
"""
|
29
|
+
_, run_id = instance.pathspec.split("/")
|
30
|
+
|
31
|
+
# every subclass needs to have `self.deployer_kwargs`
|
32
|
+
command = get_lower_level_group(
|
33
|
+
instance.deployer.api,
|
34
|
+
instance.deployer.top_level_kwargs,
|
35
|
+
instance.deployer.TYPE,
|
36
|
+
instance.deployer.deployer_kwargs,
|
37
|
+
).suspend(run_id=run_id, **kwargs)
|
38
|
+
|
39
|
+
pid = instance.deployer.spm.run_command(
|
40
|
+
[sys.executable, *command],
|
41
|
+
env=instance.deployer.env_vars,
|
42
|
+
cwd=instance.deployer.cwd,
|
43
|
+
show_output=instance.deployer.show_output,
|
44
|
+
)
|
45
|
+
|
46
|
+
command_obj = instance.deployer.spm.get(pid)
|
47
|
+
return command_obj.process.returncode == 0
|
48
|
+
|
49
|
+
|
50
|
+
def unsuspend(instance: TriggeredRun, **kwargs):
|
51
|
+
"""
|
52
|
+
Unsuspend the suspended workflow.
|
53
|
+
|
54
|
+
Parameters
|
55
|
+
----------
|
56
|
+
**kwargs : Any
|
57
|
+
Additional arguments to pass to the unsuspend command.
|
58
|
+
|
59
|
+
Returns
|
60
|
+
-------
|
61
|
+
bool
|
62
|
+
True if the command was successful, False otherwise.
|
63
|
+
"""
|
64
|
+
_, run_id = instance.pathspec.split("/")
|
65
|
+
|
66
|
+
# every subclass needs to have `self.deployer_kwargs`
|
67
|
+
command = get_lower_level_group(
|
68
|
+
instance.deployer.api,
|
69
|
+
instance.deployer.top_level_kwargs,
|
70
|
+
instance.deployer.TYPE,
|
71
|
+
instance.deployer.deployer_kwargs,
|
72
|
+
).unsuspend(run_id=run_id, **kwargs)
|
73
|
+
|
74
|
+
pid = instance.deployer.spm.run_command(
|
75
|
+
[sys.executable, *command],
|
76
|
+
env=instance.deployer.env_vars,
|
77
|
+
cwd=instance.deployer.cwd,
|
78
|
+
show_output=instance.deployer.show_output,
|
79
|
+
)
|
80
|
+
|
81
|
+
command_obj = instance.deployer.spm.get(pid)
|
82
|
+
return command_obj.process.returncode == 0
|
83
|
+
|
84
|
+
|
85
|
+
def terminate(instance: TriggeredRun, **kwargs):
|
86
|
+
"""
|
87
|
+
Terminate the running workflow.
|
88
|
+
|
89
|
+
Parameters
|
90
|
+
----------
|
91
|
+
**kwargs : Any
|
92
|
+
Additional arguments to pass to the terminate command.
|
93
|
+
|
94
|
+
Returns
|
95
|
+
-------
|
96
|
+
bool
|
97
|
+
True if the command was successful, False otherwise.
|
98
|
+
"""
|
99
|
+
_, run_id = instance.pathspec.split("/")
|
100
|
+
|
101
|
+
# every subclass needs to have `self.deployer_kwargs`
|
102
|
+
command = get_lower_level_group(
|
103
|
+
instance.deployer.api,
|
104
|
+
instance.deployer.top_level_kwargs,
|
105
|
+
instance.deployer.TYPE,
|
106
|
+
instance.deployer.deployer_kwargs,
|
107
|
+
).terminate(run_id=run_id, **kwargs)
|
108
|
+
|
109
|
+
pid = instance.deployer.spm.run_command(
|
110
|
+
[sys.executable, *command],
|
111
|
+
env=instance.deployer.env_vars,
|
112
|
+
cwd=instance.deployer.cwd,
|
113
|
+
show_output=instance.deployer.show_output,
|
114
|
+
)
|
115
|
+
|
116
|
+
command_obj = instance.deployer.spm.get(pid)
|
117
|
+
return command_obj.process.returncode == 0
|
118
|
+
|
119
|
+
|
120
|
+
def status(instance: TriggeredRun):
|
121
|
+
"""
|
122
|
+
Get the status of the triggered run.
|
123
|
+
|
124
|
+
Returns
|
125
|
+
-------
|
126
|
+
str, optional
|
127
|
+
The status of the workflow considering the run object, or None if the status could not be retrieved.
|
128
|
+
"""
|
129
|
+
from metaflow.plugins.argo.argo_workflows_cli import (
|
130
|
+
get_status_considering_run_object,
|
131
|
+
)
|
132
|
+
|
133
|
+
flow_name, run_id = instance.pathspec.split("/")
|
134
|
+
name = run_id[5:]
|
135
|
+
status = ArgoWorkflows.get_workflow_status(flow_name, name)
|
136
|
+
if status is not None:
|
137
|
+
return get_status_considering_run_object(status, instance.run)
|
138
|
+
return None
|
139
|
+
|
140
|
+
|
141
|
+
def production_token(instance: DeployedFlow):
|
142
|
+
"""
|
143
|
+
Get the production token for the deployed flow.
|
144
|
+
|
145
|
+
Returns
|
146
|
+
-------
|
147
|
+
str, optional
|
148
|
+
The production token, None if it cannot be retrieved.
|
149
|
+
"""
|
150
|
+
try:
|
151
|
+
_, production_token = ArgoWorkflows.get_existing_deployment(
|
152
|
+
instance.deployer.name
|
153
|
+
)
|
154
|
+
return production_token
|
155
|
+
except TypeError:
|
156
|
+
return None
|
157
|
+
|
158
|
+
|
159
|
+
def delete(instance: DeployedFlow, **kwargs):
|
160
|
+
"""
|
161
|
+
Delete the deployed flow.
|
162
|
+
|
163
|
+
Parameters
|
164
|
+
----------
|
165
|
+
**kwargs : Any
|
166
|
+
Additional arguments to pass to the delete command.
|
167
|
+
|
168
|
+
Returns
|
169
|
+
-------
|
170
|
+
bool
|
171
|
+
True if the command was successful, False otherwise.
|
172
|
+
"""
|
173
|
+
command = get_lower_level_group(
|
174
|
+
instance.deployer.api,
|
175
|
+
instance.deployer.top_level_kwargs,
|
176
|
+
instance.deployer.TYPE,
|
177
|
+
instance.deployer.deployer_kwargs,
|
178
|
+
).delete(**kwargs)
|
179
|
+
|
180
|
+
pid = instance.deployer.spm.run_command(
|
181
|
+
[sys.executable, *command],
|
182
|
+
env=instance.deployer.env_vars,
|
183
|
+
cwd=instance.deployer.cwd,
|
184
|
+
show_output=instance.deployer.show_output,
|
185
|
+
)
|
186
|
+
|
187
|
+
command_obj = instance.deployer.spm.get(pid)
|
188
|
+
return command_obj.process.returncode == 0
|
189
|
+
|
190
|
+
|
191
|
+
def trigger(instance: DeployedFlow, **kwargs):
|
192
|
+
"""
|
193
|
+
Trigger a new run for the deployed flow.
|
194
|
+
|
195
|
+
Parameters
|
196
|
+
----------
|
197
|
+
**kwargs : Any
|
198
|
+
Additional arguments to pass to the trigger command, `Parameters` in particular
|
199
|
+
|
200
|
+
Returns
|
201
|
+
-------
|
202
|
+
ArgoWorkflowsTriggeredRun
|
203
|
+
The triggered run instance.
|
204
|
+
|
205
|
+
Raises
|
206
|
+
------
|
207
|
+
Exception
|
208
|
+
If there is an error during the trigger process.
|
209
|
+
"""
|
210
|
+
with tempfile.TemporaryDirectory() as temp_dir:
|
211
|
+
tfp_runner_attribute = tempfile.NamedTemporaryFile(dir=temp_dir, delete=False)
|
212
|
+
|
213
|
+
# every subclass needs to have `self.deployer_kwargs`
|
214
|
+
command = get_lower_level_group(
|
215
|
+
instance.deployer.api,
|
216
|
+
instance.deployer.top_level_kwargs,
|
217
|
+
instance.deployer.TYPE,
|
218
|
+
instance.deployer.deployer_kwargs,
|
219
|
+
).trigger(deployer_attribute_file=tfp_runner_attribute.name, **kwargs)
|
220
|
+
|
221
|
+
pid = instance.deployer.spm.run_command(
|
222
|
+
[sys.executable, *command],
|
223
|
+
env=instance.deployer.env_vars,
|
224
|
+
cwd=instance.deployer.cwd,
|
225
|
+
show_output=instance.deployer.show_output,
|
226
|
+
)
|
227
|
+
|
228
|
+
command_obj = instance.deployer.spm.get(pid)
|
229
|
+
content = handle_timeout(tfp_runner_attribute, command_obj)
|
230
|
+
|
231
|
+
if command_obj.process.returncode == 0:
|
232
|
+
triggered_run = TriggeredRun(deployer=instance.deployer, content=content)
|
233
|
+
triggered_run._enrich_object(
|
234
|
+
{
|
235
|
+
"status": property(status),
|
236
|
+
"terminate": terminate,
|
237
|
+
"suspend": suspend,
|
238
|
+
"unsuspend": unsuspend,
|
239
|
+
}
|
240
|
+
)
|
241
|
+
return triggered_run
|
242
|
+
|
243
|
+
raise Exception(
|
244
|
+
"Error triggering %s on %s for %s"
|
245
|
+
% (instance.deployer.name, instance.deployer.TYPE, instance.deployer.flow_file)
|
246
|
+
)
|
247
|
+
|
248
|
+
|
249
|
+
class ArgoWorkflowsDeployer(DeployerImpl):
|
250
|
+
"""
|
251
|
+
Deployer implementation for Argo Workflows.
|
252
|
+
|
253
|
+
Attributes
|
254
|
+
----------
|
255
|
+
TYPE : ClassVar[Optional[str]]
|
256
|
+
The type of the deployer, which is "argo-workflows".
|
257
|
+
"""
|
258
|
+
|
259
|
+
TYPE: ClassVar[Optional[str]] = "argo-workflows"
|
260
|
+
|
261
|
+
def __init__(self, deployer_kwargs, **kwargs):
|
262
|
+
"""
|
263
|
+
Initialize the ArgoWorkflowsDeployer.
|
264
|
+
|
265
|
+
Parameters
|
266
|
+
----------
|
267
|
+
deployer_kwargs : dict
|
268
|
+
The deployer-specific keyword arguments.
|
269
|
+
**kwargs : Any
|
270
|
+
Additional arguments to pass to the superclass constructor.
|
271
|
+
"""
|
272
|
+
self.deployer_kwargs = deployer_kwargs
|
273
|
+
super().__init__(**kwargs)
|
274
|
+
|
275
|
+
def _enrich_deployed_flow(self, deployed_flow: DeployedFlow):
|
276
|
+
"""
|
277
|
+
Enrich the DeployedFlow object with additional properties and methods.
|
278
|
+
|
279
|
+
Parameters
|
280
|
+
----------
|
281
|
+
deployed_flow : DeployedFlow
|
282
|
+
The deployed flow object to enrich.
|
283
|
+
"""
|
284
|
+
deployed_flow._enrich_object(
|
285
|
+
{
|
286
|
+
"production_token": property(production_token),
|
287
|
+
"trigger": trigger,
|
288
|
+
"delete": delete,
|
289
|
+
}
|
290
|
+
)
|
@@ -0,0 +1,59 @@
|
|
1
|
+
from collections import namedtuple
|
2
|
+
from time import sleep
|
3
|
+
from metaflow.metaflow_config import DEFAULT_METADATA
|
4
|
+
from metaflow.metaflow_environment import MetaflowEnvironment
|
5
|
+
from metaflow.plugins import METADATA_PROVIDERS
|
6
|
+
from metaflow._vendor import click
|
7
|
+
|
8
|
+
|
9
|
+
class CliState:
|
10
|
+
pass
|
11
|
+
|
12
|
+
|
13
|
+
@click.group()
|
14
|
+
@click.option("--flow_name", required=True)
|
15
|
+
@click.option("--run_id", required=True)
|
16
|
+
@click.option(
|
17
|
+
"--tag",
|
18
|
+
"tags",
|
19
|
+
multiple=True,
|
20
|
+
default=None,
|
21
|
+
help="Annotate all objects produced by Argo Workflows runs "
|
22
|
+
"with the given tag. You can specify this option multiple "
|
23
|
+
"times to attach multiple tags.",
|
24
|
+
)
|
25
|
+
@click.pass_context
|
26
|
+
def cli(ctx, flow_name, run_id, tags=None):
|
27
|
+
ctx.obj = CliState()
|
28
|
+
ctx.obj.flow_name = flow_name
|
29
|
+
ctx.obj.run_id = run_id
|
30
|
+
ctx.obj.tags = tags
|
31
|
+
# Use a dummy flow to initialize the environment and metadata service,
|
32
|
+
# as we only need a name for the flow object.
|
33
|
+
flow = namedtuple("DummyFlow", "name")
|
34
|
+
dummyflow = flow(flow_name)
|
35
|
+
|
36
|
+
# Initialize a proper metadata service instance
|
37
|
+
environment = MetaflowEnvironment(dummyflow)
|
38
|
+
|
39
|
+
ctx.obj.metadata = [m for m in METADATA_PROVIDERS if m.TYPE == DEFAULT_METADATA][0](
|
40
|
+
environment, dummyflow, None, None
|
41
|
+
)
|
42
|
+
|
43
|
+
|
44
|
+
@cli.command(help="start heartbeat process for a run")
|
45
|
+
@click.pass_obj
|
46
|
+
def heartbeat(obj):
|
47
|
+
# Try to register a run in case the start task has not taken care of it yet.
|
48
|
+
obj.metadata.register_run_id(obj.run_id, obj.tags)
|
49
|
+
# Start run heartbeat
|
50
|
+
obj.metadata.start_run_heartbeat(obj.flow_name, obj.run_id)
|
51
|
+
# Keepalive loop
|
52
|
+
while True:
|
53
|
+
# Do not pollute daemon logs with anything unnecessary,
|
54
|
+
# as they might be extremely long running.
|
55
|
+
sleep(10)
|
56
|
+
|
57
|
+
|
58
|
+
if __name__ == "__main__":
|
59
|
+
cli()
|
@@ -0,0 +1,16 @@
|
|
1
|
+
import sys
|
2
|
+
from hashlib import md5
|
3
|
+
|
4
|
+
|
5
|
+
def generate_input_paths(run_id, step_name, task_id_entropy, num_parallel):
|
6
|
+
# => run_id/step/:foo,bar
|
7
|
+
control_id = "control-{}-0".format(task_id_entropy)
|
8
|
+
worker_ids = [
|
9
|
+
"worker-{}-{}".format(task_id_entropy, i) for i in range(int(num_parallel) - 1)
|
10
|
+
]
|
11
|
+
ids = [control_id] + worker_ids
|
12
|
+
return "{}/{}/:{}".format(run_id, step_name, ",".join(ids))
|
13
|
+
|
14
|
+
|
15
|
+
if __name__ == "__main__":
|
16
|
+
print(generate_input_paths(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]))
|
@@ -261,8 +261,8 @@ class BatchDecorator(StepDecorator):
|
|
261
261
|
# metadata. A rudimentary way to detect non-local execution is to
|
262
262
|
# check for the existence of AWS_BATCH_JOB_ID environment variable.
|
263
263
|
|
264
|
+
meta = {}
|
264
265
|
if "AWS_BATCH_JOB_ID" in os.environ:
|
265
|
-
meta = {}
|
266
266
|
meta["aws-batch-job-id"] = os.environ["AWS_BATCH_JOB_ID"]
|
267
267
|
meta["aws-batch-job-attempt"] = os.environ["AWS_BATCH_JOB_ATTEMPT"]
|
268
268
|
meta["aws-batch-ce-name"] = os.environ["AWS_BATCH_CE_NAME"]
|
@@ -290,18 +290,6 @@ class BatchDecorator(StepDecorator):
|
|
290
290
|
instance_meta = get_ec2_instance_metadata()
|
291
291
|
meta.update(instance_meta)
|
292
292
|
|
293
|
-
entries = [
|
294
|
-
MetaDatum(
|
295
|
-
field=k,
|
296
|
-
value=v,
|
297
|
-
type=k,
|
298
|
-
tags=["attempt_id:{0}".format(retry_count)],
|
299
|
-
)
|
300
|
-
for k, v in meta.items()
|
301
|
-
]
|
302
|
-
# Register book-keeping metadata for debugging.
|
303
|
-
metadata.register_metadata(run_id, step_name, task_id, entries)
|
304
|
-
|
305
293
|
self._save_logs_sidecar = Sidecar("save_logs_periodically")
|
306
294
|
self._save_logs_sidecar.start()
|
307
295
|
|
@@ -322,6 +310,21 @@ class BatchDecorator(StepDecorator):
|
|
322
310
|
|
323
311
|
if num_parallel >= 1:
|
324
312
|
_setup_multinode_environment()
|
313
|
+
# current.parallel.node_index will be correctly available over here.
|
314
|
+
meta.update({"parallel-node-index": current.parallel.node_index})
|
315
|
+
|
316
|
+
if len(meta) > 0:
|
317
|
+
entries = [
|
318
|
+
MetaDatum(
|
319
|
+
field=k,
|
320
|
+
value=v,
|
321
|
+
type=k,
|
322
|
+
tags=["attempt_id:{0}".format(retry_count)],
|
323
|
+
)
|
324
|
+
for k, v in meta.items()
|
325
|
+
]
|
326
|
+
# Register book-keeping metadata for debugging.
|
327
|
+
metadata.register_metadata(run_id, step_name, task_id, entries)
|
325
328
|
|
326
329
|
def task_finished(
|
327
330
|
self, step_name, flow, graph, is_task_ok, retry_count, max_retries
|