metaflow-stubs 2.12.6__py2.py3-none-any.whl → 2.12.7__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- metaflow-stubs/__init__.pyi +606 -606
- metaflow-stubs/cards.pyi +5 -5
- metaflow-stubs/cli.pyi +2 -2
- metaflow-stubs/client/__init__.pyi +4 -4
- metaflow-stubs/client/core.pyi +7 -7
- metaflow-stubs/client/filecache.pyi +2 -2
- metaflow-stubs/clone_util.pyi +2 -2
- metaflow-stubs/events.pyi +3 -3
- metaflow-stubs/exception.pyi +2 -2
- metaflow-stubs/flowspec.pyi +4 -4
- metaflow-stubs/generated_for.txt +1 -1
- metaflow-stubs/includefile.pyi +3 -3
- metaflow-stubs/metadata/metadata.pyi +3 -3
- metaflow-stubs/metadata/util.pyi +2 -2
- metaflow-stubs/metaflow_config.pyi +2 -2
- metaflow-stubs/metaflow_current.pyi +17 -17
- metaflow-stubs/mflog/mflog.pyi +2 -2
- metaflow-stubs/multicore_utils.pyi +2 -2
- metaflow-stubs/parameters.pyi +3 -3
- metaflow-stubs/plugins/__init__.pyi +2 -2
- metaflow-stubs/plugins/airflow/__init__.pyi +2 -2
- metaflow-stubs/plugins/airflow/airflow.pyi +2 -2
- metaflow-stubs/plugins/airflow/airflow_cli.pyi +2 -2
- metaflow-stubs/plugins/airflow/airflow_decorator.pyi +2 -2
- metaflow-stubs/plugins/airflow/airflow_utils.pyi +2 -2
- metaflow-stubs/plugins/airflow/exception.pyi +2 -2
- metaflow-stubs/plugins/airflow/sensors/__init__.pyi +2 -2
- metaflow-stubs/plugins/airflow/sensors/base_sensor.pyi +2 -2
- metaflow-stubs/plugins/airflow/sensors/external_task_sensor.pyi +3 -3
- metaflow-stubs/plugins/airflow/sensors/s3_sensor.pyi +3 -3
- metaflow-stubs/plugins/argo/__init__.pyi +2 -2
- metaflow-stubs/plugins/argo/argo_client.pyi +2 -2
- metaflow-stubs/plugins/argo/argo_events.pyi +2 -2
- metaflow-stubs/plugins/argo/argo_workflows.pyi +4 -4
- metaflow-stubs/plugins/argo/argo_workflows_cli.pyi +3 -3
- metaflow-stubs/plugins/argo/argo_workflows_decorator.pyi +3 -3
- metaflow-stubs/plugins/aws/__init__.pyi +2 -2
- metaflow-stubs/plugins/aws/aws_client.pyi +2 -2
- metaflow-stubs/plugins/aws/aws_utils.pyi +2 -2
- metaflow-stubs/plugins/aws/batch/__init__.pyi +2 -2
- metaflow-stubs/plugins/aws/batch/batch.pyi +2 -2
- metaflow-stubs/plugins/aws/batch/batch_cli.pyi +2 -2
- metaflow-stubs/plugins/aws/batch/batch_client.pyi +2 -2
- metaflow-stubs/plugins/aws/batch/batch_decorator.pyi +2 -2
- metaflow-stubs/plugins/aws/secrets_manager/__init__.pyi +2 -2
- metaflow-stubs/plugins/aws/secrets_manager/aws_secrets_manager_secrets_provider.pyi +3 -3
- metaflow-stubs/plugins/aws/step_functions/__init__.pyi +2 -2
- metaflow-stubs/plugins/aws/step_functions/dynamo_db_client.pyi +2 -2
- metaflow-stubs/plugins/aws/step_functions/event_bridge_client.pyi +2 -2
- metaflow-stubs/plugins/aws/step_functions/production_token.pyi +2 -2
- metaflow-stubs/plugins/aws/step_functions/schedule_decorator.pyi +2 -2
- metaflow-stubs/plugins/aws/step_functions/step_functions.pyi +2 -2
- metaflow-stubs/plugins/aws/step_functions/step_functions_cli.pyi +3 -3
- metaflow-stubs/plugins/aws/step_functions/step_functions_client.pyi +2 -2
- metaflow-stubs/plugins/aws/step_functions/step_functions_decorator.pyi +2 -2
- metaflow-stubs/plugins/azure/__init__.pyi +2 -2
- metaflow-stubs/plugins/azure/azure_credential.pyi +2 -2
- metaflow-stubs/plugins/azure/azure_exceptions.pyi +2 -2
- metaflow-stubs/plugins/azure/azure_secret_manager_secrets_provider.pyi +3 -3
- metaflow-stubs/plugins/azure/azure_utils.pyi +2 -2
- metaflow-stubs/plugins/azure/blob_service_client_factory.pyi +2 -2
- metaflow-stubs/plugins/azure/includefile_support.pyi +2 -2
- metaflow-stubs/plugins/cards/__init__.pyi +2 -2
- metaflow-stubs/plugins/cards/card_cli.pyi +4 -4
- metaflow-stubs/plugins/cards/card_client.pyi +3 -3
- metaflow-stubs/plugins/cards/card_creator.pyi +2 -2
- metaflow-stubs/plugins/cards/card_datastore.pyi +2 -2
- metaflow-stubs/plugins/cards/card_decorator.pyi +2 -2
- metaflow-stubs/plugins/cards/card_modules/__init__.pyi +2 -2
- metaflow-stubs/plugins/cards/card_modules/basic.pyi +3 -3
- metaflow-stubs/plugins/cards/card_modules/card.pyi +2 -2
- metaflow-stubs/plugins/cards/card_modules/chevron/__init__.pyi +2 -2
- metaflow-stubs/plugins/cards/card_modules/chevron/main.pyi +2 -2
- metaflow-stubs/plugins/cards/card_modules/chevron/metadata.pyi +2 -2
- metaflow-stubs/plugins/cards/card_modules/chevron/renderer.pyi +2 -2
- metaflow-stubs/plugins/cards/card_modules/chevron/tokenizer.pyi +2 -2
- metaflow-stubs/plugins/cards/card_modules/components.pyi +4 -4
- metaflow-stubs/plugins/cards/card_modules/convert_to_native_type.pyi +2 -2
- metaflow-stubs/plugins/cards/card_modules/renderer_tools.pyi +2 -2
- metaflow-stubs/plugins/cards/card_modules/test_cards.pyi +3 -3
- metaflow-stubs/plugins/cards/card_resolver.pyi +2 -2
- metaflow-stubs/plugins/cards/component_serializer.pyi +4 -4
- metaflow-stubs/plugins/cards/exception.pyi +2 -2
- metaflow-stubs/plugins/catch_decorator.pyi +2 -2
- metaflow-stubs/plugins/datatools/__init__.pyi +2 -2
- metaflow-stubs/plugins/datatools/local.pyi +2 -2
- metaflow-stubs/plugins/datatools/s3/__init__.pyi +4 -4
- metaflow-stubs/plugins/datatools/s3/s3.pyi +6 -6
- metaflow-stubs/plugins/datatools/s3/s3tail.pyi +2 -2
- metaflow-stubs/plugins/datatools/s3/s3util.pyi +2 -2
- metaflow-stubs/plugins/debug_logger.pyi +2 -2
- metaflow-stubs/plugins/debug_monitor.pyi +2 -2
- metaflow-stubs/plugins/environment_decorator.pyi +2 -2
- metaflow-stubs/plugins/events_decorator.pyi +2 -2
- metaflow-stubs/plugins/frameworks/__init__.pyi +2 -2
- metaflow-stubs/plugins/frameworks/pytorch.pyi +3 -3
- metaflow-stubs/plugins/gcp/__init__.pyi +2 -2
- metaflow-stubs/plugins/gcp/gcp_secret_manager_secrets_provider.pyi +3 -3
- metaflow-stubs/plugins/gcp/gs_exceptions.pyi +2 -2
- metaflow-stubs/plugins/gcp/gs_storage_client_factory.pyi +2 -2
- metaflow-stubs/plugins/gcp/gs_utils.pyi +2 -2
- metaflow-stubs/plugins/gcp/includefile_support.pyi +2 -2
- metaflow-stubs/plugins/kubernetes/__init__.pyi +2 -2
- metaflow-stubs/plugins/kubernetes/kubernetes.pyi +2 -2
- metaflow-stubs/plugins/kubernetes/kubernetes_cli.pyi +3 -3
- metaflow-stubs/plugins/kubernetes/kubernetes_client.pyi +2 -2
- metaflow-stubs/plugins/kubernetes/kubernetes_decorator.pyi +2 -2
- metaflow-stubs/plugins/kubernetes/kubernetes_job.pyi +2 -2
- metaflow-stubs/plugins/kubernetes/kubernetes_jobsets.pyi +2 -2
- metaflow-stubs/plugins/logs_cli.pyi +2 -2
- metaflow-stubs/plugins/package_cli.pyi +2 -2
- metaflow-stubs/plugins/parallel_decorator.pyi +2 -2
- metaflow-stubs/plugins/project_decorator.pyi +2 -2
- metaflow-stubs/plugins/pypi/__init__.pyi +2 -2
- metaflow-stubs/plugins/pypi/conda_decorator.pyi +2 -2
- metaflow-stubs/plugins/pypi/conda_environment.pyi +3 -3
- metaflow-stubs/plugins/pypi/pypi_decorator.pyi +2 -2
- metaflow-stubs/plugins/pypi/pypi_environment.pyi +2 -2
- metaflow-stubs/plugins/pypi/utils.pyi +2 -2
- metaflow-stubs/plugins/resources_decorator.pyi +2 -2
- metaflow-stubs/plugins/retry_decorator.pyi +2 -2
- metaflow-stubs/plugins/secrets/__init__.pyi +2 -2
- metaflow-stubs/plugins/secrets/inline_secrets_provider.pyi +3 -3
- metaflow-stubs/plugins/secrets/secrets_decorator.pyi +2 -2
- metaflow-stubs/plugins/storage_executor.pyi +2 -2
- metaflow-stubs/plugins/tag_cli.pyi +5 -5
- metaflow-stubs/plugins/test_unbounded_foreach_decorator.pyi +3 -3
- metaflow-stubs/plugins/timeout_decorator.pyi +2 -2
- metaflow-stubs/procpoll.pyi +2 -2
- metaflow-stubs/pylint_wrapper.pyi +2 -2
- metaflow-stubs/runner/__init__.pyi +2 -2
- metaflow-stubs/runner/metaflow_runner.pyi +5 -5
- metaflow-stubs/runner/nbrun.pyi +2 -2
- metaflow-stubs/runner/subprocess_manager.pyi +2 -2
- metaflow-stubs/system/__init__.pyi +112 -0
- metaflow-stubs/system/system_logger.pyi +51 -0
- metaflow-stubs/system/system_monitor.pyi +73 -0
- metaflow-stubs/tagging_util.pyi +2 -2
- metaflow-stubs/tuple_util.pyi +2 -2
- metaflow-stubs/version.pyi +2 -2
- {metaflow_stubs-2.12.6.dist-info → metaflow_stubs-2.12.7.dist-info}/METADATA +2 -2
- metaflow_stubs-2.12.7.dist-info/RECORD +145 -0
- metaflow_stubs-2.12.6.dist-info/RECORD +0 -142
- {metaflow_stubs-2.12.6.dist-info → metaflow_stubs-2.12.7.dist-info}/WHEEL +0 -0
- {metaflow_stubs-2.12.6.dist-info → metaflow_stubs-2.12.7.dist-info}/top_level.txt +0 -0
metaflow-stubs/__init__.pyi
CHANGED
@@ -1,25 +1,25 @@
|
|
1
1
|
##################################################################################
|
2
2
|
# Auto-generated Metaflow stub file #
|
3
|
-
# MF version: 2.12.
|
4
|
-
# Generated on 2024-07-
|
3
|
+
# MF version: 2.12.7 #
|
4
|
+
# Generated on 2024-07-03T19:20:47.569890 #
|
5
5
|
##################################################################################
|
6
6
|
|
7
7
|
from __future__ import annotations
|
8
8
|
|
9
9
|
import typing
|
10
10
|
if typing.TYPE_CHECKING:
|
11
|
-
import metaflow.parameters
|
12
|
-
import metaflow.flowspec
|
13
|
-
import datetime
|
14
|
-
import metaflow.datastore.inputs
|
15
|
-
import metaflow.plugins.datatools.s3.s3
|
16
11
|
import metaflow.client.core
|
17
|
-
import metaflow.
|
18
|
-
import metaflow.
|
12
|
+
import metaflow.datastore.inputs
|
13
|
+
import metaflow.parameters
|
14
|
+
import metaflow.metaflow_current
|
19
15
|
import metaflow._vendor.click.types
|
20
16
|
import typing
|
21
|
-
import metaflow.
|
17
|
+
import metaflow.runner.metaflow_runner
|
22
18
|
import io
|
19
|
+
import metaflow.plugins.datatools.s3.s3
|
20
|
+
import datetime
|
21
|
+
import metaflow.events
|
22
|
+
import metaflow.flowspec
|
23
23
|
FlowSpecDerived = typing.TypeVar("FlowSpecDerived", bound="FlowSpec", contravariant=False, covariant=False)
|
24
24
|
StepFlag = typing.NewType("StepFlag", bool)
|
25
25
|
|
@@ -727,59 +727,6 @@ def step(f: typing.Union[typing.Callable[[FlowSpecDerived], None], typing.Callab
|
|
727
727
|
"""
|
728
728
|
...
|
729
729
|
|
730
|
-
@typing.overload
|
731
|
-
def retry(*, times: int = 3, minutes_between_retries: int = 2) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
|
732
|
-
"""
|
733
|
-
Specifies the number of times the task corresponding
|
734
|
-
to a step needs to be retried.
|
735
|
-
|
736
|
-
This decorator is useful for handling transient errors, such as networking issues.
|
737
|
-
If your task contains operations that can't be retried safely, e.g. database updates,
|
738
|
-
it is advisable to annotate it with `@retry(times=0)`.
|
739
|
-
|
740
|
-
This can be used in conjunction with the `@catch` decorator. The `@catch`
|
741
|
-
decorator will execute a no-op task after all retries have been exhausted,
|
742
|
-
ensuring that the flow execution can continue.
|
743
|
-
|
744
|
-
Parameters
|
745
|
-
----------
|
746
|
-
times : int, default 3
|
747
|
-
Number of times to retry this task.
|
748
|
-
minutes_between_retries : int, default 2
|
749
|
-
Number of minutes between retries.
|
750
|
-
"""
|
751
|
-
...
|
752
|
-
|
753
|
-
@typing.overload
|
754
|
-
def retry(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
|
755
|
-
...
|
756
|
-
|
757
|
-
@typing.overload
|
758
|
-
def retry(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
|
759
|
-
...
|
760
|
-
|
761
|
-
def retry(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, times: int = 3, minutes_between_retries: int = 2):
|
762
|
-
"""
|
763
|
-
Specifies the number of times the task corresponding
|
764
|
-
to a step needs to be retried.
|
765
|
-
|
766
|
-
This decorator is useful for handling transient errors, such as networking issues.
|
767
|
-
If your task contains operations that can't be retried safely, e.g. database updates,
|
768
|
-
it is advisable to annotate it with `@retry(times=0)`.
|
769
|
-
|
770
|
-
This can be used in conjunction with the `@catch` decorator. The `@catch`
|
771
|
-
decorator will execute a no-op task after all retries have been exhausted,
|
772
|
-
ensuring that the flow execution can continue.
|
773
|
-
|
774
|
-
Parameters
|
775
|
-
----------
|
776
|
-
times : int, default 3
|
777
|
-
Number of times to retry this task.
|
778
|
-
minutes_between_retries : int, default 2
|
779
|
-
Number of minutes between retries.
|
780
|
-
"""
|
781
|
-
...
|
782
|
-
|
783
730
|
@typing.overload
|
784
731
|
def conda(*, packages: typing.Dict[str, str] = {}, libraries: typing.Dict[str, str] = {}, python: typing.Optional[str] = None, disabled: bool = False) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
|
785
732
|
"""
|
@@ -838,228 +785,200 @@ def conda(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], ty
|
|
838
785
|
...
|
839
786
|
|
840
787
|
@typing.overload
|
841
|
-
def
|
788
|
+
def card(*, type: str = "default", id: typing.Optional[str] = None, options: typing.Dict[str, typing.Any] = {}, timeout: int = 45) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
|
842
789
|
"""
|
843
|
-
|
844
|
-
|
790
|
+
Creates a human-readable report, a Metaflow Card, after this step completes.
|
791
|
+
|
792
|
+
Note that you may add multiple `@card` decorators in a step with different parameters.
|
845
793
|
|
846
794
|
Parameters
|
847
795
|
----------
|
848
|
-
|
849
|
-
|
796
|
+
type : str, default 'default'
|
797
|
+
Card type.
|
798
|
+
id : str, optional, default None
|
799
|
+
If multiple cards are present, use this id to identify this card.
|
800
|
+
options : Dict[str, Any], default {}
|
801
|
+
Options passed to the card. The contents depend on the card type.
|
802
|
+
timeout : int, default 45
|
803
|
+
Interrupt reporting if it takes more than this many seconds.
|
804
|
+
|
805
|
+
|
850
806
|
"""
|
851
807
|
...
|
852
808
|
|
853
809
|
@typing.overload
|
854
|
-
def
|
810
|
+
def card(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
|
855
811
|
...
|
856
812
|
|
857
813
|
@typing.overload
|
858
|
-
def
|
814
|
+
def card(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
|
859
815
|
...
|
860
816
|
|
861
|
-
def
|
817
|
+
def card(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, type: str = "default", id: typing.Optional[str] = None, options: typing.Dict[str, typing.Any] = {}, timeout: int = 45):
|
862
818
|
"""
|
863
|
-
|
864
|
-
|
819
|
+
Creates a human-readable report, a Metaflow Card, after this step completes.
|
820
|
+
|
821
|
+
Note that you may add multiple `@card` decorators in a step with different parameters.
|
865
822
|
|
866
823
|
Parameters
|
867
824
|
----------
|
868
|
-
|
869
|
-
|
825
|
+
type : str, default 'default'
|
826
|
+
Card type.
|
827
|
+
id : str, optional, default None
|
828
|
+
If multiple cards are present, use this id to identify this card.
|
829
|
+
options : Dict[str, Any], default {}
|
830
|
+
Options passed to the card. The contents depend on the card type.
|
831
|
+
timeout : int, default 45
|
832
|
+
Interrupt reporting if it takes more than this many seconds.
|
833
|
+
|
834
|
+
|
870
835
|
"""
|
871
836
|
...
|
872
837
|
|
873
|
-
|
838
|
+
@typing.overload
|
839
|
+
def batch(*, cpu: int = 1, gpu: int = 0, memory: int = 4096, image: typing.Optional[str] = None, queue: str = "METAFLOW_BATCH_JOB_QUEUE", iam_role: str = "METAFLOW_ECS_S3_ACCESS_IAM_ROLE", execution_role: str = "METAFLOW_ECS_FARGATE_EXECUTION_ROLE", shared_memory: typing.Optional[int] = None, max_swap: typing.Optional[int] = None, swappiness: typing.Optional[int] = None, use_tmpfs: bool = False, tmpfs_tempdir: bool = True, tmpfs_size: typing.Optional[int] = None, tmpfs_path: typing.Optional[str] = None, inferentia: int = 0, trainium: int = None, efa: int = 0, ephemeral_storage: int = None, log_driver: typing.Optional[str] = None, log_options: typing.Optional[typing.List[str]] = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
|
874
840
|
"""
|
875
|
-
Specifies that this step should execute on
|
841
|
+
Specifies that this step should execute on [AWS Batch](https://aws.amazon.com/batch/).
|
876
842
|
|
877
843
|
Parameters
|
878
844
|
----------
|
879
845
|
cpu : int, default 1
|
880
846
|
Number of CPUs required for this step. If `@resources` is
|
881
847
|
also present, the maximum value from all decorators is used.
|
848
|
+
gpu : int, default 0
|
849
|
+
Number of GPUs required for this step. If `@resources` is
|
850
|
+
also present, the maximum value from all decorators is used.
|
882
851
|
memory : int, default 4096
|
883
852
|
Memory size (in MB) required for this step. If
|
884
853
|
`@resources` is also present, the maximum value from all decorators is
|
885
854
|
used.
|
886
|
-
disk : int, default 10240
|
887
|
-
Disk size (in MB) required for this step. If
|
888
|
-
`@resources` is also present, the maximum value from all decorators is
|
889
|
-
used.
|
890
855
|
image : str, optional, default None
|
891
|
-
Docker image to use when launching on
|
892
|
-
|
856
|
+
Docker image to use when launching on AWS Batch. If not specified, and
|
857
|
+
METAFLOW_BATCH_CONTAINER_IMAGE is specified, that image is used. If
|
893
858
|
not, a default Docker image mapping to the current version of Python is used.
|
894
|
-
|
895
|
-
|
896
|
-
|
897
|
-
|
898
|
-
|
899
|
-
|
900
|
-
|
901
|
-
|
902
|
-
|
903
|
-
|
904
|
-
|
905
|
-
|
906
|
-
|
907
|
-
|
908
|
-
|
909
|
-
|
910
|
-
|
911
|
-
|
859
|
+
queue : str, default METAFLOW_BATCH_JOB_QUEUE
|
860
|
+
AWS Batch Job Queue to submit the job to.
|
861
|
+
iam_role : str, default METAFLOW_ECS_S3_ACCESS_IAM_ROLE
|
862
|
+
AWS IAM role that AWS Batch container uses to access AWS cloud resources.
|
863
|
+
execution_role : str, default METAFLOW_ECS_FARGATE_EXECUTION_ROLE
|
864
|
+
AWS IAM role that AWS Batch can use [to trigger AWS Fargate tasks]
|
865
|
+
(https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html).
|
866
|
+
shared_memory : int, optional, default None
|
867
|
+
The value for the size (in MiB) of the /dev/shm volume for this step.
|
868
|
+
This parameter maps to the `--shm-size` option in Docker.
|
869
|
+
max_swap : int, optional, default None
|
870
|
+
The total amount of swap memory (in MiB) a container can use for this
|
871
|
+
step. This parameter is translated to the `--memory-swap` option in
|
872
|
+
Docker where the value is the sum of the container memory plus the
|
873
|
+
`max_swap` value.
|
874
|
+
swappiness : int, optional, default None
|
875
|
+
This allows you to tune memory swappiness behavior for this step.
|
876
|
+
A swappiness value of 0 causes swapping not to happen unless absolutely
|
877
|
+
necessary. A swappiness value of 100 causes pages to be swapped very
|
878
|
+
aggressively. Accepted values are whole numbers between 0 and 100.
|
912
879
|
use_tmpfs : bool, default False
|
913
|
-
This enables an explicit tmpfs mount for this step.
|
880
|
+
This enables an explicit tmpfs mount for this step. Note that tmpfs is
|
881
|
+
not available on Fargate compute environments
|
914
882
|
tmpfs_tempdir : bool, default True
|
915
883
|
sets METAFLOW_TEMPDIR to tmpfs_path if set for this step.
|
916
|
-
tmpfs_size : int, optional, default
|
884
|
+
tmpfs_size : int, optional, default None
|
917
885
|
The value for the size (in MiB) of the tmpfs mount for this step.
|
918
886
|
This parameter maps to the `--tmpfs` option in Docker. Defaults to 50% of the
|
919
887
|
memory allocated for this step.
|
920
|
-
tmpfs_path : str, optional, default
|
921
|
-
Path to tmpfs mount for this step.
|
922
|
-
|
923
|
-
|
924
|
-
|
925
|
-
|
926
|
-
|
927
|
-
|
928
|
-
|
929
|
-
|
930
|
-
|
931
|
-
|
932
|
-
|
933
|
-
|
934
|
-
|
935
|
-
|
936
|
-
|
937
|
-
This decorator is useful if this step may hang indefinitely.
|
938
|
-
|
939
|
-
This can be used in conjunction with the `@retry` decorator as well as the `@catch` decorator.
|
940
|
-
A timeout is considered to be an exception thrown by the step. It will cause the step to be
|
941
|
-
retried if needed and the exception will be caught by the `@catch` decorator, if present.
|
942
|
-
|
943
|
-
Note that all the values specified in parameters are added together so if you specify
|
944
|
-
60 seconds and 1 hour, the decorator will have an effective timeout of 1 hour and 1 minute.
|
945
|
-
|
946
|
-
Parameters
|
947
|
-
----------
|
948
|
-
seconds : int, default 0
|
949
|
-
Number of seconds to wait prior to timing out.
|
950
|
-
minutes : int, default 0
|
951
|
-
Number of minutes to wait prior to timing out.
|
952
|
-
hours : int, default 0
|
953
|
-
Number of hours to wait prior to timing out.
|
888
|
+
tmpfs_path : str, optional, default None
|
889
|
+
Path to tmpfs mount for this step. Defaults to /metaflow_temp.
|
890
|
+
inferentia : int, default 0
|
891
|
+
Number of Inferentia chips required for this step.
|
892
|
+
trainium : int, default None
|
893
|
+
Alias for inferentia. Use only one of the two.
|
894
|
+
efa : int, default 0
|
895
|
+
Number of elastic fabric adapter network devices to attach to container
|
896
|
+
ephemeral_storage : int, default None
|
897
|
+
The total amount, in GiB, of ephemeral storage to set for the task, 21-200GiB.
|
898
|
+
This is only relevant for Fargate compute environments
|
899
|
+
log_driver: str, optional, default None
|
900
|
+
The log driver to use for the Amazon ECS container.
|
901
|
+
log_options: List[str], optional, default None
|
902
|
+
List of strings containing options for the chosen log driver. The configurable values
|
903
|
+
depend on the `log driver` chosen. Validation of these options is not supported yet.
|
904
|
+
Example: [`awslogs-group:aws/batch/job`]
|
954
905
|
"""
|
955
906
|
...
|
956
907
|
|
957
908
|
@typing.overload
|
958
|
-
def
|
909
|
+
def batch(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
|
959
910
|
...
|
960
911
|
|
961
912
|
@typing.overload
|
962
|
-
def
|
913
|
+
def batch(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
|
963
914
|
...
|
964
915
|
|
965
|
-
def
|
916
|
+
def batch(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, cpu: int = 1, gpu: int = 0, memory: int = 4096, image: typing.Optional[str] = None, queue: str = "METAFLOW_BATCH_JOB_QUEUE", iam_role: str = "METAFLOW_ECS_S3_ACCESS_IAM_ROLE", execution_role: str = "METAFLOW_ECS_FARGATE_EXECUTION_ROLE", shared_memory: typing.Optional[int] = None, max_swap: typing.Optional[int] = None, swappiness: typing.Optional[int] = None, use_tmpfs: bool = False, tmpfs_tempdir: bool = True, tmpfs_size: typing.Optional[int] = None, tmpfs_path: typing.Optional[str] = None, inferentia: int = 0, trainium: int = None, efa: int = 0, ephemeral_storage: int = None, log_driver: typing.Optional[str] = None, log_options: typing.Optional[typing.List[str]] = None):
|
966
917
|
"""
|
967
|
-
Specifies
|
968
|
-
|
969
|
-
This decorator is useful if this step may hang indefinitely.
|
970
|
-
|
971
|
-
This can be used in conjunction with the `@retry` decorator as well as the `@catch` decorator.
|
972
|
-
A timeout is considered to be an exception thrown by the step. It will cause the step to be
|
973
|
-
retried if needed and the exception will be caught by the `@catch` decorator, if present.
|
974
|
-
|
975
|
-
Note that all the values specified in parameters are added together so if you specify
|
976
|
-
60 seconds and 1 hour, the decorator will have an effective timeout of 1 hour and 1 minute.
|
977
|
-
|
978
|
-
Parameters
|
979
|
-
----------
|
980
|
-
seconds : int, default 0
|
981
|
-
Number of seconds to wait prior to timing out.
|
982
|
-
minutes : int, default 0
|
983
|
-
Number of minutes to wait prior to timing out.
|
984
|
-
hours : int, default 0
|
985
|
-
Number of hours to wait prior to timing out.
|
986
|
-
"""
|
987
|
-
...
|
988
|
-
|
989
|
-
@typing.overload
|
990
|
-
def resources(*, cpu: int = 1, gpu: int = 0, disk: typing.Optional[int] = None, memory: int = 4096, shared_memory: typing.Optional[int] = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
|
991
|
-
"""
|
992
|
-
Specifies the resources needed when executing this step.
|
993
|
-
|
994
|
-
Use `@resources` to specify the resource requirements
|
995
|
-
independently of the specific compute layer (`@batch`, `@kubernetes`).
|
996
|
-
|
997
|
-
You can choose the compute layer on the command line by executing e.g.
|
998
|
-
```
|
999
|
-
python myflow.py run --with batch
|
1000
|
-
```
|
1001
|
-
or
|
1002
|
-
```
|
1003
|
-
python myflow.py run --with kubernetes
|
1004
|
-
```
|
1005
|
-
which executes the flow on the desired system using the
|
1006
|
-
requirements specified in `@resources`.
|
1007
|
-
|
1008
|
-
Parameters
|
1009
|
-
----------
|
1010
|
-
cpu : int, default 1
|
1011
|
-
Number of CPUs required for this step.
|
1012
|
-
gpu : int, default 0
|
1013
|
-
Number of GPUs required for this step.
|
1014
|
-
disk : int, optional, default None
|
1015
|
-
Disk size (in MB) required for this step. Only applies on Kubernetes.
|
1016
|
-
memory : int, default 4096
|
1017
|
-
Memory size (in MB) required for this step.
|
1018
|
-
shared_memory : int, optional, default None
|
1019
|
-
The value for the size (in MiB) of the /dev/shm volume for this step.
|
1020
|
-
This parameter maps to the `--shm-size` option in Docker.
|
1021
|
-
"""
|
1022
|
-
...
|
1023
|
-
|
1024
|
-
@typing.overload
|
1025
|
-
def resources(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
|
1026
|
-
...
|
1027
|
-
|
1028
|
-
@typing.overload
|
1029
|
-
def resources(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
|
1030
|
-
...
|
1031
|
-
|
1032
|
-
def resources(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, cpu: int = 1, gpu: int = 0, disk: typing.Optional[int] = None, memory: int = 4096, shared_memory: typing.Optional[int] = None):
|
1033
|
-
"""
|
1034
|
-
Specifies the resources needed when executing this step.
|
1035
|
-
|
1036
|
-
Use `@resources` to specify the resource requirements
|
1037
|
-
independently of the specific compute layer (`@batch`, `@kubernetes`).
|
1038
|
-
|
1039
|
-
You can choose the compute layer on the command line by executing e.g.
|
1040
|
-
```
|
1041
|
-
python myflow.py run --with batch
|
1042
|
-
```
|
1043
|
-
or
|
1044
|
-
```
|
1045
|
-
python myflow.py run --with kubernetes
|
1046
|
-
```
|
1047
|
-
which executes the flow on the desired system using the
|
1048
|
-
requirements specified in `@resources`.
|
918
|
+
Specifies that this step should execute on [AWS Batch](https://aws.amazon.com/batch/).
|
1049
919
|
|
1050
920
|
Parameters
|
1051
921
|
----------
|
1052
922
|
cpu : int, default 1
|
1053
|
-
Number of CPUs required for this step.
|
923
|
+
Number of CPUs required for this step. If `@resources` is
|
924
|
+
also present, the maximum value from all decorators is used.
|
1054
925
|
gpu : int, default 0
|
1055
|
-
Number of GPUs required for this step.
|
1056
|
-
|
1057
|
-
Disk size (in MB) required for this step. Only applies on Kubernetes.
|
926
|
+
Number of GPUs required for this step. If `@resources` is
|
927
|
+
also present, the maximum value from all decorators is used.
|
1058
928
|
memory : int, default 4096
|
1059
|
-
Memory size (in MB) required for this step.
|
929
|
+
Memory size (in MB) required for this step. If
|
930
|
+
`@resources` is also present, the maximum value from all decorators is
|
931
|
+
used.
|
932
|
+
image : str, optional, default None
|
933
|
+
Docker image to use when launching on AWS Batch. If not specified, and
|
934
|
+
METAFLOW_BATCH_CONTAINER_IMAGE is specified, that image is used. If
|
935
|
+
not, a default Docker image mapping to the current version of Python is used.
|
936
|
+
queue : str, default METAFLOW_BATCH_JOB_QUEUE
|
937
|
+
AWS Batch Job Queue to submit the job to.
|
938
|
+
iam_role : str, default METAFLOW_ECS_S3_ACCESS_IAM_ROLE
|
939
|
+
AWS IAM role that AWS Batch container uses to access AWS cloud resources.
|
940
|
+
execution_role : str, default METAFLOW_ECS_FARGATE_EXECUTION_ROLE
|
941
|
+
AWS IAM role that AWS Batch can use [to trigger AWS Fargate tasks]
|
942
|
+
(https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html).
|
1060
943
|
shared_memory : int, optional, default None
|
1061
944
|
The value for the size (in MiB) of the /dev/shm volume for this step.
|
1062
945
|
This parameter maps to the `--shm-size` option in Docker.
|
946
|
+
max_swap : int, optional, default None
|
947
|
+
The total amount of swap memory (in MiB) a container can use for this
|
948
|
+
step. This parameter is translated to the `--memory-swap` option in
|
949
|
+
Docker where the value is the sum of the container memory plus the
|
950
|
+
`max_swap` value.
|
951
|
+
swappiness : int, optional, default None
|
952
|
+
This allows you to tune memory swappiness behavior for this step.
|
953
|
+
A swappiness value of 0 causes swapping not to happen unless absolutely
|
954
|
+
necessary. A swappiness value of 100 causes pages to be swapped very
|
955
|
+
aggressively. Accepted values are whole numbers between 0 and 100.
|
956
|
+
use_tmpfs : bool, default False
|
957
|
+
This enables an explicit tmpfs mount for this step. Note that tmpfs is
|
958
|
+
not available on Fargate compute environments
|
959
|
+
tmpfs_tempdir : bool, default True
|
960
|
+
sets METAFLOW_TEMPDIR to tmpfs_path if set for this step.
|
961
|
+
tmpfs_size : int, optional, default None
|
962
|
+
The value for the size (in MiB) of the tmpfs mount for this step.
|
963
|
+
This parameter maps to the `--tmpfs` option in Docker. Defaults to 50% of the
|
964
|
+
memory allocated for this step.
|
965
|
+
tmpfs_path : str, optional, default None
|
966
|
+
Path to tmpfs mount for this step. Defaults to /metaflow_temp.
|
967
|
+
inferentia : int, default 0
|
968
|
+
Number of Inferentia chips required for this step.
|
969
|
+
trainium : int, default None
|
970
|
+
Alias for inferentia. Use only one of the two.
|
971
|
+
efa : int, default 0
|
972
|
+
Number of elastic fabric adapter network devices to attach to container
|
973
|
+
ephemeral_storage : int, default None
|
974
|
+
The total amount, in GiB, of ephemeral storage to set for the task, 21-200GiB.
|
975
|
+
This is only relevant for Fargate compute environments
|
976
|
+
log_driver: str, optional, default None
|
977
|
+
The log driver to use for the Amazon ECS container.
|
978
|
+
log_options: List[str], optional, default None
|
979
|
+
List of strings containing options for the chosen log driver. The configurable values
|
980
|
+
depend on the `log driver` chosen. Validation of these options is not supported yet.
|
981
|
+
Example: [`awslogs-group:aws/batch/job`]
|
1063
982
|
"""
|
1064
983
|
...
|
1065
984
|
|
@@ -1113,450 +1032,421 @@ def pypi(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typ
|
|
1113
1032
|
...
|
1114
1033
|
|
1115
1034
|
@typing.overload
|
1116
|
-
def
|
1035
|
+
def catch(*, var: typing.Optional[str] = None, print_exception: bool = True) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
|
1117
1036
|
"""
|
1118
|
-
Specifies
|
1037
|
+
Specifies that the step will success under all circumstances.
|
1038
|
+
|
1039
|
+
The decorator will create an optional artifact, specified by `var`, which
|
1040
|
+
contains the exception raised. You can use it to detect the presence
|
1041
|
+
of errors, indicating that all happy-path artifacts produced by the step
|
1042
|
+
are missing.
|
1119
1043
|
|
1120
1044
|
Parameters
|
1121
1045
|
----------
|
1122
|
-
|
1123
|
-
|
1046
|
+
var : str, optional, default None
|
1047
|
+
Name of the artifact in which to store the caught exception.
|
1048
|
+
If not specified, the exception is not stored.
|
1049
|
+
print_exception : bool, default True
|
1050
|
+
Determines whether or not the exception is printed to
|
1051
|
+
stdout when caught.
|
1124
1052
|
"""
|
1125
1053
|
...
|
1126
1054
|
|
1127
1055
|
@typing.overload
|
1128
|
-
def
|
1056
|
+
def catch(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
|
1129
1057
|
...
|
1130
1058
|
|
1131
1059
|
@typing.overload
|
1132
|
-
def
|
1060
|
+
def catch(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
|
1133
1061
|
...
|
1134
1062
|
|
1135
|
-
def
|
1063
|
+
def catch(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, var: typing.Optional[str] = None, print_exception: bool = True):
|
1136
1064
|
"""
|
1137
|
-
Specifies
|
1065
|
+
Specifies that the step will success under all circumstances.
|
1066
|
+
|
1067
|
+
The decorator will create an optional artifact, specified by `var`, which
|
1068
|
+
contains the exception raised. You can use it to detect the presence
|
1069
|
+
of errors, indicating that all happy-path artifacts produced by the step
|
1070
|
+
are missing.
|
1138
1071
|
|
1139
1072
|
Parameters
|
1140
1073
|
----------
|
1141
|
-
|
1142
|
-
|
1074
|
+
var : str, optional, default None
|
1075
|
+
Name of the artifact in which to store the caught exception.
|
1076
|
+
If not specified, the exception is not stored.
|
1077
|
+
print_exception : bool, default True
|
1078
|
+
Determines whether or not the exception is printed to
|
1079
|
+
stdout when caught.
|
1143
1080
|
"""
|
1144
1081
|
...
|
1145
1082
|
|
1146
1083
|
@typing.overload
|
1147
|
-
def
|
1084
|
+
def retry(*, times: int = 3, minutes_between_retries: int = 2) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
|
1148
1085
|
"""
|
1149
|
-
|
1086
|
+
Specifies the number of times the task corresponding
|
1087
|
+
to a step needs to be retried.
|
1150
1088
|
|
1151
|
-
|
1089
|
+
This decorator is useful for handling transient errors, such as networking issues.
|
1090
|
+
If your task contains operations that can't be retried safely, e.g. database updates,
|
1091
|
+
it is advisable to annotate it with `@retry(times=0)`.
|
1092
|
+
|
1093
|
+
This can be used in conjunction with the `@catch` decorator. The `@catch`
|
1094
|
+
decorator will execute a no-op task after all retries have been exhausted,
|
1095
|
+
ensuring that the flow execution can continue.
|
1152
1096
|
|
1153
1097
|
Parameters
|
1154
1098
|
----------
|
1155
|
-
|
1156
|
-
|
1157
|
-
|
1158
|
-
|
1159
|
-
options : Dict[str, Any], default {}
|
1160
|
-
Options passed to the card. The contents depend on the card type.
|
1161
|
-
timeout : int, default 45
|
1162
|
-
Interrupt reporting if it takes more than this many seconds.
|
1163
|
-
|
1164
|
-
|
1099
|
+
times : int, default 3
|
1100
|
+
Number of times to retry this task.
|
1101
|
+
minutes_between_retries : int, default 2
|
1102
|
+
Number of minutes between retries.
|
1165
1103
|
"""
|
1166
1104
|
...
|
1167
1105
|
|
1168
1106
|
@typing.overload
|
1169
|
-
def
|
1107
|
+
def retry(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
|
1170
1108
|
...
|
1171
1109
|
|
1172
1110
|
@typing.overload
|
1173
|
-
def
|
1111
|
+
def retry(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
|
1174
1112
|
...
|
1175
1113
|
|
1176
|
-
def
|
1114
|
+
def retry(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, times: int = 3, minutes_between_retries: int = 2):
|
1177
1115
|
"""
|
1178
|
-
|
1116
|
+
Specifies the number of times the task corresponding
|
1117
|
+
to a step needs to be retried.
|
1179
1118
|
|
1180
|
-
|
1119
|
+
This decorator is useful for handling transient errors, such as networking issues.
|
1120
|
+
If your task contains operations that can't be retried safely, e.g. database updates,
|
1121
|
+
it is advisable to annotate it with `@retry(times=0)`.
|
1122
|
+
|
1123
|
+
This can be used in conjunction with the `@catch` decorator. The `@catch`
|
1124
|
+
decorator will execute a no-op task after all retries have been exhausted,
|
1125
|
+
ensuring that the flow execution can continue.
|
1181
1126
|
|
1182
1127
|
Parameters
|
1183
1128
|
----------
|
1184
|
-
|
1185
|
-
|
1186
|
-
|
1187
|
-
|
1188
|
-
|
1189
|
-
|
1190
|
-
|
1191
|
-
|
1129
|
+
times : int, default 3
|
1130
|
+
Number of times to retry this task.
|
1131
|
+
minutes_between_retries : int, default 2
|
1132
|
+
Number of minutes between retries.
|
1133
|
+
"""
|
1134
|
+
...
|
1135
|
+
|
1136
|
+
@typing.overload
|
1137
|
+
def timeout(*, seconds: int = 0, minutes: int = 0, hours: int = 0) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
|
1138
|
+
"""
|
1139
|
+
Specifies a timeout for your step.
|
1140
|
+
|
1141
|
+
This decorator is useful if this step may hang indefinitely.
|
1192
1142
|
|
1143
|
+
This can be used in conjunction with the `@retry` decorator as well as the `@catch` decorator.
|
1144
|
+
A timeout is considered to be an exception thrown by the step. It will cause the step to be
|
1145
|
+
retried if needed and the exception will be caught by the `@catch` decorator, if present.
|
1146
|
+
|
1147
|
+
Note that all the values specified in parameters are added together so if you specify
|
1148
|
+
60 seconds and 1 hour, the decorator will have an effective timeout of 1 hour and 1 minute.
|
1193
1149
|
|
1150
|
+
Parameters
|
1151
|
+
----------
|
1152
|
+
seconds : int, default 0
|
1153
|
+
Number of seconds to wait prior to timing out.
|
1154
|
+
minutes : int, default 0
|
1155
|
+
Number of minutes to wait prior to timing out.
|
1156
|
+
hours : int, default 0
|
1157
|
+
Number of hours to wait prior to timing out.
|
1194
1158
|
"""
|
1195
1159
|
...
|
1196
1160
|
|
1197
1161
|
@typing.overload
|
1198
|
-
def
|
1162
|
+
def timeout(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
|
1163
|
+
...
|
1164
|
+
|
1165
|
+
@typing.overload
|
1166
|
+
def timeout(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
|
1167
|
+
...
|
1168
|
+
|
1169
|
+
def timeout(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, seconds: int = 0, minutes: int = 0, hours: int = 0):
|
1199
1170
|
"""
|
1200
|
-
Specifies
|
1171
|
+
Specifies a timeout for your step.
|
1172
|
+
|
1173
|
+
This decorator is useful if this step may hang indefinitely.
|
1174
|
+
|
1175
|
+
This can be used in conjunction with the `@retry` decorator as well as the `@catch` decorator.
|
1176
|
+
A timeout is considered to be an exception thrown by the step. It will cause the step to be
|
1177
|
+
retried if needed and the exception will be caught by the `@catch` decorator, if present.
|
1178
|
+
|
1179
|
+
Note that all the values specified in parameters are added together so if you specify
|
1180
|
+
60 seconds and 1 hour, the decorator will have an effective timeout of 1 hour and 1 minute.
|
1201
1181
|
|
1202
1182
|
Parameters
|
1203
1183
|
----------
|
1204
|
-
|
1205
|
-
Number of
|
1206
|
-
|
1207
|
-
|
1208
|
-
|
1209
|
-
|
1210
|
-
memory : int, default 4096
|
1211
|
-
Memory size (in MB) required for this step. If
|
1212
|
-
`@resources` is also present, the maximum value from all decorators is
|
1213
|
-
used.
|
1214
|
-
image : str, optional, default None
|
1215
|
-
Docker image to use when launching on AWS Batch. If not specified, and
|
1216
|
-
METAFLOW_BATCH_CONTAINER_IMAGE is specified, that image is used. If
|
1217
|
-
not, a default Docker image mapping to the current version of Python is used.
|
1218
|
-
queue : str, default METAFLOW_BATCH_JOB_QUEUE
|
1219
|
-
AWS Batch Job Queue to submit the job to.
|
1220
|
-
iam_role : str, default METAFLOW_ECS_S3_ACCESS_IAM_ROLE
|
1221
|
-
AWS IAM role that AWS Batch container uses to access AWS cloud resources.
|
1222
|
-
execution_role : str, default METAFLOW_ECS_FARGATE_EXECUTION_ROLE
|
1223
|
-
AWS IAM role that AWS Batch can use [to trigger AWS Fargate tasks]
|
1224
|
-
(https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html).
|
1225
|
-
shared_memory : int, optional, default None
|
1226
|
-
The value for the size (in MiB) of the /dev/shm volume for this step.
|
1227
|
-
This parameter maps to the `--shm-size` option in Docker.
|
1228
|
-
max_swap : int, optional, default None
|
1229
|
-
The total amount of swap memory (in MiB) a container can use for this
|
1230
|
-
step. This parameter is translated to the `--memory-swap` option in
|
1231
|
-
Docker where the value is the sum of the container memory plus the
|
1232
|
-
`max_swap` value.
|
1233
|
-
swappiness : int, optional, default None
|
1234
|
-
This allows you to tune memory swappiness behavior for this step.
|
1235
|
-
A swappiness value of 0 causes swapping not to happen unless absolutely
|
1236
|
-
necessary. A swappiness value of 100 causes pages to be swapped very
|
1237
|
-
aggressively. Accepted values are whole numbers between 0 and 100.
|
1238
|
-
use_tmpfs : bool, default False
|
1239
|
-
This enables an explicit tmpfs mount for this step. Note that tmpfs is
|
1240
|
-
not available on Fargate compute environments
|
1241
|
-
tmpfs_tempdir : bool, default True
|
1242
|
-
sets METAFLOW_TEMPDIR to tmpfs_path if set for this step.
|
1243
|
-
tmpfs_size : int, optional, default None
|
1244
|
-
The value for the size (in MiB) of the tmpfs mount for this step.
|
1245
|
-
This parameter maps to the `--tmpfs` option in Docker. Defaults to 50% of the
|
1246
|
-
memory allocated for this step.
|
1247
|
-
tmpfs_path : str, optional, default None
|
1248
|
-
Path to tmpfs mount for this step. Defaults to /metaflow_temp.
|
1249
|
-
inferentia : int, default 0
|
1250
|
-
Number of Inferentia chips required for this step.
|
1251
|
-
trainium : int, default None
|
1252
|
-
Alias for inferentia. Use only one of the two.
|
1253
|
-
efa : int, default 0
|
1254
|
-
Number of elastic fabric adapter network devices to attach to container
|
1255
|
-
ephemeral_storage : int, default None
|
1256
|
-
The total amount, in GiB, of ephemeral storage to set for the task, 21-200GiB.
|
1257
|
-
This is only relevant for Fargate compute environments
|
1258
|
-
log_driver: str, optional, default None
|
1259
|
-
The log driver to use for the Amazon ECS container.
|
1260
|
-
log_options: List[str], optional, default None
|
1261
|
-
List of strings containing options for the chosen log driver. The configurable values
|
1262
|
-
depend on the `log driver` chosen. Validation of these options is not supported yet.
|
1263
|
-
Example: [`awslogs-group:aws/batch/job`]
|
1184
|
+
seconds : int, default 0
|
1185
|
+
Number of seconds to wait prior to timing out.
|
1186
|
+
minutes : int, default 0
|
1187
|
+
Number of minutes to wait prior to timing out.
|
1188
|
+
hours : int, default 0
|
1189
|
+
Number of hours to wait prior to timing out.
|
1264
1190
|
"""
|
1265
1191
|
...
|
1266
1192
|
|
1267
1193
|
@typing.overload
|
1268
|
-
def
|
1194
|
+
def secrets(*, sources: typing.List[typing.Union[str, typing.Dict[str, typing.Any]]] = []) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
|
1195
|
+
"""
|
1196
|
+
Specifies secrets to be retrieved and injected as environment variables prior to
|
1197
|
+
the execution of a step.
|
1198
|
+
|
1199
|
+
Parameters
|
1200
|
+
----------
|
1201
|
+
sources : List[Union[str, Dict[str, Any]]], default: []
|
1202
|
+
List of secret specs, defining how the secrets are to be retrieved
|
1203
|
+
"""
|
1269
1204
|
...
|
1270
1205
|
|
1271
1206
|
@typing.overload
|
1272
|
-
def
|
1207
|
+
def secrets(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
|
1273
1208
|
...
|
1274
1209
|
|
1275
|
-
|
1210
|
+
@typing.overload
|
1211
|
+
def secrets(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
|
1212
|
+
...
|
1213
|
+
|
1214
|
+
def secrets(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, sources: typing.List[typing.Union[str, typing.Dict[str, typing.Any]]] = []):
|
1276
1215
|
"""
|
1277
|
-
Specifies
|
1216
|
+
Specifies secrets to be retrieved and injected as environment variables prior to
|
1217
|
+
the execution of a step.
|
1218
|
+
|
1219
|
+
Parameters
|
1220
|
+
----------
|
1221
|
+
sources : List[Union[str, Dict[str, Any]]], default: []
|
1222
|
+
List of secret specs, defining how the secrets are to be retrieved
|
1223
|
+
"""
|
1224
|
+
...
|
1225
|
+
|
1226
|
+
def kubernetes(*, cpu: int = 1, memory: int = 4096, disk: int = 10240, image: typing.Optional[str] = None, image_pull_policy: str = "KUBERNETES_IMAGE_PULL_POLICY", service_account: str = "METAFLOW_KUBERNETES_SERVICE_ACCOUNT", secrets: typing.Optional[typing.List[str]] = None, namespace: str = "METAFLOW_KUBERNETES_NAMESPACE", gpu: typing.Optional[int] = None, gpu_vendor: str = "KUBERNETES_GPU_VENDOR", tolerations: typing.List[str] = [], use_tmpfs: bool = False, tmpfs_tempdir: bool = True, tmpfs_size: typing.Optional[int] = None, tmpfs_path: typing.Optional[str] = "/metaflow_temp", persistent_volume_claims: typing.Optional[typing.Dict[str, str]] = None, shared_memory: typing.Optional[int] = None, port: typing.Optional[int] = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
|
1227
|
+
"""
|
1228
|
+
Specifies that this step should execute on Kubernetes.
|
1278
1229
|
|
1279
1230
|
Parameters
|
1280
1231
|
----------
|
1281
1232
|
cpu : int, default 1
|
1282
1233
|
Number of CPUs required for this step. If `@resources` is
|
1283
1234
|
also present, the maximum value from all decorators is used.
|
1284
|
-
gpu : int, default 0
|
1285
|
-
Number of GPUs required for this step. If `@resources` is
|
1286
|
-
also present, the maximum value from all decorators is used.
|
1287
1235
|
memory : int, default 4096
|
1288
1236
|
Memory size (in MB) required for this step. If
|
1289
1237
|
`@resources` is also present, the maximum value from all decorators is
|
1290
1238
|
used.
|
1239
|
+
disk : int, default 10240
|
1240
|
+
Disk size (in MB) required for this step. If
|
1241
|
+
`@resources` is also present, the maximum value from all decorators is
|
1242
|
+
used.
|
1291
1243
|
image : str, optional, default None
|
1292
|
-
Docker image to use when launching on
|
1293
|
-
|
1244
|
+
Docker image to use when launching on Kubernetes. If not specified, and
|
1245
|
+
METAFLOW_KUBERNETES_CONTAINER_IMAGE is specified, that image is used. If
|
1294
1246
|
not, a default Docker image mapping to the current version of Python is used.
|
1295
|
-
|
1296
|
-
|
1297
|
-
|
1298
|
-
|
1299
|
-
|
1300
|
-
|
1301
|
-
|
1302
|
-
|
1303
|
-
|
1304
|
-
|
1305
|
-
|
1306
|
-
|
1307
|
-
|
1308
|
-
|
1309
|
-
|
1310
|
-
|
1311
|
-
|
1312
|
-
|
1313
|
-
necessary. A swappiness value of 100 causes pages to be swapped very
|
1314
|
-
aggressively. Accepted values are whole numbers between 0 and 100.
|
1247
|
+
image_pull_policy: str, default KUBERNETES_IMAGE_PULL_POLICY
|
1248
|
+
If given, the imagePullPolicy to be applied to the Docker image of the step.
|
1249
|
+
service_account : str, default METAFLOW_KUBERNETES_SERVICE_ACCOUNT
|
1250
|
+
Kubernetes service account to use when launching pod in Kubernetes.
|
1251
|
+
secrets : List[str], optional, default None
|
1252
|
+
Kubernetes secrets to use when launching pod in Kubernetes. These
|
1253
|
+
secrets are in addition to the ones defined in `METAFLOW_KUBERNETES_SECRETS`
|
1254
|
+
in Metaflow configuration.
|
1255
|
+
namespace : str, default METAFLOW_KUBERNETES_NAMESPACE
|
1256
|
+
Kubernetes namespace to use when launching pod in Kubernetes.
|
1257
|
+
gpu : int, optional, default None
|
1258
|
+
Number of GPUs required for this step. A value of zero implies that
|
1259
|
+
the scheduled node should not have GPUs.
|
1260
|
+
gpu_vendor : str, default KUBERNETES_GPU_VENDOR
|
1261
|
+
The vendor of the GPUs to be used for this step.
|
1262
|
+
tolerations : List[str], default []
|
1263
|
+
The default is extracted from METAFLOW_KUBERNETES_TOLERATIONS.
|
1264
|
+
Kubernetes tolerations to use when launching pod in Kubernetes.
|
1315
1265
|
use_tmpfs : bool, default False
|
1316
|
-
This enables an explicit tmpfs mount for this step.
|
1317
|
-
not available on Fargate compute environments
|
1266
|
+
This enables an explicit tmpfs mount for this step.
|
1318
1267
|
tmpfs_tempdir : bool, default True
|
1319
1268
|
sets METAFLOW_TEMPDIR to tmpfs_path if set for this step.
|
1320
|
-
tmpfs_size : int, optional, default None
|
1269
|
+
tmpfs_size : int, optional, default: None
|
1321
1270
|
The value for the size (in MiB) of the tmpfs mount for this step.
|
1322
1271
|
This parameter maps to the `--tmpfs` option in Docker. Defaults to 50% of the
|
1323
1272
|
memory allocated for this step.
|
1324
|
-
tmpfs_path : str, optional, default
|
1325
|
-
Path to tmpfs mount for this step.
|
1326
|
-
|
1327
|
-
|
1328
|
-
|
1329
|
-
|
1330
|
-
|
1331
|
-
|
1332
|
-
|
1333
|
-
The total amount, in GiB, of ephemeral storage to set for the task, 21-200GiB.
|
1334
|
-
This is only relevant for Fargate compute environments
|
1335
|
-
log_driver: str, optional, default None
|
1336
|
-
The log driver to use for the Amazon ECS container.
|
1337
|
-
log_options: List[str], optional, default None
|
1338
|
-
List of strings containing options for the chosen log driver. The configurable values
|
1339
|
-
depend on the `log driver` chosen. Validation of these options is not supported yet.
|
1340
|
-
Example: [`awslogs-group:aws/batch/job`]
|
1273
|
+
tmpfs_path : str, optional, default /metaflow_temp
|
1274
|
+
Path to tmpfs mount for this step.
|
1275
|
+
persistent_volume_claims : Dict[str, str], optional, default None
|
1276
|
+
A map (dictionary) of persistent volumes to be mounted to the pod for this step. The map is from persistent
|
1277
|
+
volumes to the path to which the volume is to be mounted, e.g., `{'pvc-name': '/path/to/mount/on'}`.
|
1278
|
+
shared_memory: int, optional
|
1279
|
+
Shared memory size (in MiB) required for this step
|
1280
|
+
port: int, optional
|
1281
|
+
Port number to specify in the Kubernetes job object
|
1341
1282
|
"""
|
1342
1283
|
...
|
1343
1284
|
|
1344
1285
|
@typing.overload
|
1345
|
-
def
|
1286
|
+
def resources(*, cpu: int = 1, gpu: int = 0, disk: typing.Optional[int] = None, memory: int = 4096, shared_memory: typing.Optional[int] = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
|
1346
1287
|
"""
|
1347
|
-
Specifies
|
1288
|
+
Specifies the resources needed when executing this step.
|
1348
1289
|
|
1349
|
-
|
1350
|
-
|
1351
|
-
|
1352
|
-
|
1290
|
+
Use `@resources` to specify the resource requirements
|
1291
|
+
independently of the specific compute layer (`@batch`, `@kubernetes`).
|
1292
|
+
|
1293
|
+
You can choose the compute layer on the command line by executing e.g.
|
1294
|
+
```
|
1295
|
+
python myflow.py run --with batch
|
1296
|
+
```
|
1297
|
+
or
|
1298
|
+
```
|
1299
|
+
python myflow.py run --with kubernetes
|
1300
|
+
```
|
1301
|
+
which executes the flow on the desired system using the
|
1302
|
+
requirements specified in `@resources`.
|
1353
1303
|
|
1354
1304
|
Parameters
|
1355
1305
|
----------
|
1356
|
-
|
1357
|
-
|
1358
|
-
|
1359
|
-
|
1360
|
-
|
1361
|
-
|
1306
|
+
cpu : int, default 1
|
1307
|
+
Number of CPUs required for this step.
|
1308
|
+
gpu : int, default 0
|
1309
|
+
Number of GPUs required for this step.
|
1310
|
+
disk : int, optional, default None
|
1311
|
+
Disk size (in MB) required for this step. Only applies on Kubernetes.
|
1312
|
+
memory : int, default 4096
|
1313
|
+
Memory size (in MB) required for this step.
|
1314
|
+
shared_memory : int, optional, default None
|
1315
|
+
The value for the size (in MiB) of the /dev/shm volume for this step.
|
1316
|
+
This parameter maps to the `--shm-size` option in Docker.
|
1362
1317
|
"""
|
1363
1318
|
...
|
1364
1319
|
|
1365
1320
|
@typing.overload
|
1366
|
-
def
|
1321
|
+
def resources(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
|
1367
1322
|
...
|
1368
1323
|
|
1369
1324
|
@typing.overload
|
1370
|
-
def
|
1371
|
-
...
|
1372
|
-
|
1373
|
-
def catch(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, var: typing.Optional[str] = None, print_exception: bool = True):
|
1374
|
-
"""
|
1375
|
-
Specifies that the step will success under all circumstances.
|
1376
|
-
|
1377
|
-
The decorator will create an optional artifact, specified by `var`, which
|
1378
|
-
contains the exception raised. You can use it to detect the presence
|
1379
|
-
of errors, indicating that all happy-path artifacts produced by the step
|
1380
|
-
are missing.
|
1381
|
-
|
1382
|
-
Parameters
|
1383
|
-
----------
|
1384
|
-
var : str, optional, default None
|
1385
|
-
Name of the artifact in which to store the caught exception.
|
1386
|
-
If not specified, the exception is not stored.
|
1387
|
-
print_exception : bool, default True
|
1388
|
-
Determines whether or not the exception is printed to
|
1389
|
-
stdout when caught.
|
1390
|
-
"""
|
1325
|
+
def resources(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
|
1391
1326
|
...
|
1392
1327
|
|
1393
|
-
|
1394
|
-
def trigger_on_finish(*, flow: typing.Union[str, typing.Dict[str, str], None] = None, flows: typing.List[typing.Union[str, typing.Dict[str, str]]] = [], options: typing.Dict[str, typing.Any] = {}) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
|
1328
|
+
def resources(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, cpu: int = 1, gpu: int = 0, disk: typing.Optional[int] = None, memory: int = 4096, shared_memory: typing.Optional[int] = None):
|
1395
1329
|
"""
|
1396
|
-
Specifies the
|
1330
|
+
Specifies the resources needed when executing this step.
|
1397
1331
|
|
1398
|
-
|
1399
|
-
|
1400
|
-
```
|
1401
|
-
or
|
1402
|
-
```
|
1403
|
-
@trigger_on_finish(flows=['FooFlow', 'BarFlow'])
|
1404
|
-
```
|
1405
|
-
This decorator respects the @project decorator and triggers the flow
|
1406
|
-
when upstream runs within the same namespace complete successfully
|
1332
|
+
Use `@resources` to specify the resource requirements
|
1333
|
+
independently of the specific compute layer (`@batch`, `@kubernetes`).
|
1407
1334
|
|
1408
|
-
|
1409
|
-
by specifying the fully qualified project_flow_name.
|
1335
|
+
You can choose the compute layer on the command line by executing e.g.
|
1410
1336
|
```
|
1411
|
-
|
1337
|
+
python myflow.py run --with batch
|
1412
1338
|
```
|
1413
1339
|
or
|
1414
1340
|
```
|
1415
|
-
|
1416
|
-
```
|
1417
|
-
|
1418
|
-
You can also specify just the project or project branch (other values will be
|
1419
|
-
inferred from the current project or project branch):
|
1420
|
-
```
|
1421
|
-
@trigger_on_finish(flow={"name": "FooFlow", "project": "my_project", "project_branch": "branch"})
|
1341
|
+
python myflow.py run --with kubernetes
|
1422
1342
|
```
|
1423
|
-
|
1424
|
-
|
1425
|
-
- `prod`
|
1426
|
-
- `user.bob`
|
1427
|
-
- `test.my_experiment`
|
1428
|
-
- `prod.staging`
|
1343
|
+
which executes the flow on the desired system using the
|
1344
|
+
requirements specified in `@resources`.
|
1429
1345
|
|
1430
1346
|
Parameters
|
1431
1347
|
----------
|
1432
|
-
|
1433
|
-
|
1434
|
-
|
1435
|
-
|
1436
|
-
|
1437
|
-
|
1438
|
-
|
1439
|
-
|
1348
|
+
cpu : int, default 1
|
1349
|
+
Number of CPUs required for this step.
|
1350
|
+
gpu : int, default 0
|
1351
|
+
Number of GPUs required for this step.
|
1352
|
+
disk : int, optional, default None
|
1353
|
+
Disk size (in MB) required for this step. Only applies on Kubernetes.
|
1354
|
+
memory : int, default 4096
|
1355
|
+
Memory size (in MB) required for this step.
|
1356
|
+
shared_memory : int, optional, default None
|
1357
|
+
The value for the size (in MiB) of the /dev/shm volume for this step.
|
1358
|
+
This parameter maps to the `--shm-size` option in Docker.
|
1440
1359
|
"""
|
1441
1360
|
...
|
1442
1361
|
|
1443
1362
|
@typing.overload
|
1444
|
-
def
|
1445
|
-
...
|
1446
|
-
|
1447
|
-
def trigger_on_finish(f: typing.Optional[typing.Type[FlowSpecDerived]] = None, *, flow: typing.Union[str, typing.Dict[str, str], None] = None, flows: typing.List[typing.Union[str, typing.Dict[str, str]]] = [], options: typing.Dict[str, typing.Any] = {}):
|
1363
|
+
def environment(*, vars: typing.Dict[str, str] = {}) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
|
1448
1364
|
"""
|
1449
|
-
Specifies
|
1450
|
-
|
1451
|
-
```
|
1452
|
-
@trigger_on_finish(flow='FooFlow')
|
1453
|
-
```
|
1454
|
-
or
|
1455
|
-
```
|
1456
|
-
@trigger_on_finish(flows=['FooFlow', 'BarFlow'])
|
1457
|
-
```
|
1458
|
-
This decorator respects the @project decorator and triggers the flow
|
1459
|
-
when upstream runs within the same namespace complete successfully
|
1460
|
-
|
1461
|
-
Additionally, you can specify project aware upstream flow dependencies
|
1462
|
-
by specifying the fully qualified project_flow_name.
|
1463
|
-
```
|
1464
|
-
@trigger_on_finish(flow='my_project.branch.my_branch.FooFlow')
|
1465
|
-
```
|
1466
|
-
or
|
1467
|
-
```
|
1468
|
-
@trigger_on_finish(flows=['my_project.branch.my_branch.FooFlow', 'BarFlow'])
|
1469
|
-
```
|
1470
|
-
|
1471
|
-
You can also specify just the project or project branch (other values will be
|
1472
|
-
inferred from the current project or project branch):
|
1473
|
-
```
|
1474
|
-
@trigger_on_finish(flow={"name": "FooFlow", "project": "my_project", "project_branch": "branch"})
|
1475
|
-
```
|
1476
|
-
|
1477
|
-
Note that `branch` is typically one of:
|
1478
|
-
- `prod`
|
1479
|
-
- `user.bob`
|
1480
|
-
- `test.my_experiment`
|
1481
|
-
- `prod.staging`
|
1365
|
+
Specifies environment variables to be set prior to the execution of a step.
|
1482
1366
|
|
1483
1367
|
Parameters
|
1484
1368
|
----------
|
1485
|
-
|
1486
|
-
|
1487
|
-
flows : List[Union[str, Dict[str, str]]], default []
|
1488
|
-
Upstream flow dependencies for this flow.
|
1489
|
-
options : Dict[str, Any], default {}
|
1490
|
-
Backend-specific configuration for tuning eventing behavior.
|
1491
|
-
|
1492
|
-
|
1369
|
+
vars : Dict[str, str], default {}
|
1370
|
+
Dictionary of environment variables to set.
|
1493
1371
|
"""
|
1494
1372
|
...
|
1495
1373
|
|
1496
|
-
|
1374
|
+
@typing.overload
|
1375
|
+
def environment(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
|
1376
|
+
...
|
1377
|
+
|
1378
|
+
@typing.overload
|
1379
|
+
def environment(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
|
1380
|
+
...
|
1381
|
+
|
1382
|
+
def environment(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, vars: typing.Dict[str, str] = {}):
|
1497
1383
|
"""
|
1498
|
-
Specifies
|
1499
|
-
|
1500
|
-
A project-specific namespace is created for all flows that
|
1501
|
-
use the same `@project(name)`.
|
1384
|
+
Specifies environment variables to be set prior to the execution of a step.
|
1502
1385
|
|
1503
1386
|
Parameters
|
1504
1387
|
----------
|
1505
|
-
|
1506
|
-
|
1507
|
-
projects that use the same production scheduler. The name may
|
1508
|
-
contain only lowercase alphanumeric characters and underscores.
|
1509
|
-
|
1510
|
-
|
1388
|
+
vars : Dict[str, str], default {}
|
1389
|
+
Dictionary of environment variables to set.
|
1511
1390
|
"""
|
1512
1391
|
...
|
1513
1392
|
|
1514
|
-
|
1515
|
-
def schedule(*, hourly: bool = False, daily: bool = True, weekly: bool = False, cron: typing.Optional[str] = None, timezone: typing.Optional[str] = None) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
|
1393
|
+
def airflow_external_task_sensor(*, timeout: int, poke_interval: int, mode: str, exponential_backoff: bool, pool: str, soft_fail: bool, name: str, description: str, external_dag_id: str, external_task_ids: typing.List[str], allowed_states: typing.List[str], failed_states: typing.List[str], execution_delta: "datetime.timedelta", check_existence: bool) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
|
1516
1394
|
"""
|
1517
|
-
|
1518
|
-
|
1395
|
+
The `@airflow_external_task_sensor` decorator attaches a Airflow [ExternalTaskSensor](https://airflow.apache.org/docs/apache-airflow/stable/_api/airflow/sensors/external_task/index.html#airflow.sensors.external_task.ExternalTaskSensor) before the start step of the flow.
|
1396
|
+
This decorator only works when a flow is scheduled on Airflow and is compiled using `airflow create`. More than one `@airflow_external_task_sensor` can be added as a flow decorators. Adding more than one decorator will ensure that `start` step starts only after all sensors finish.
|
1519
1397
|
|
1520
1398
|
Parameters
|
1521
1399
|
----------
|
1522
|
-
|
1523
|
-
|
1524
|
-
|
1525
|
-
|
1526
|
-
|
1527
|
-
|
1528
|
-
|
1529
|
-
|
1530
|
-
|
1531
|
-
|
1532
|
-
|
1533
|
-
|
1400
|
+
timeout : int
|
1401
|
+
Time, in seconds before the task times out and fails. (Default: 3600)
|
1402
|
+
poke_interval : int
|
1403
|
+
Time in seconds that the job should wait in between each try. (Default: 60)
|
1404
|
+
mode : str
|
1405
|
+
How the sensor operates. Options are: { poke | reschedule }. (Default: "poke")
|
1406
|
+
exponential_backoff : bool
|
1407
|
+
allow progressive longer waits between pokes by using exponential backoff algorithm. (Default: True)
|
1408
|
+
pool : str
|
1409
|
+
the slot pool this task should run in,
|
1410
|
+
slot pools are a way to limit concurrency for certain tasks. (Default:None)
|
1411
|
+
soft_fail : bool
|
1412
|
+
Set to true to mark the task as SKIPPED on failure. (Default: False)
|
1413
|
+
name : str
|
1414
|
+
Name of the sensor on Airflow
|
1415
|
+
description : str
|
1416
|
+
Description of sensor in the Airflow UI
|
1417
|
+
external_dag_id : str
|
1418
|
+
The dag_id that contains the task you want to wait for.
|
1419
|
+
external_task_ids : List[str]
|
1420
|
+
The list of task_ids that you want to wait for.
|
1421
|
+
If None (default value) the sensor waits for the DAG. (Default: None)
|
1422
|
+
allowed_states : List[str]
|
1423
|
+
Iterable of allowed states, (Default: ['success'])
|
1424
|
+
failed_states : List[str]
|
1425
|
+
Iterable of failed or dis-allowed states. (Default: None)
|
1426
|
+
execution_delta : datetime.timedelta
|
1427
|
+
time difference with the previous execution to look at,
|
1428
|
+
the default is the same logical date as the current task or DAG. (Default: None)
|
1429
|
+
check_existence: bool
|
1430
|
+
Set to True to check if the external task exists or check if
|
1431
|
+
the DAG to wait for exists. (Default: True)
|
1534
1432
|
"""
|
1535
1433
|
...
|
1536
1434
|
|
1537
|
-
|
1538
|
-
def schedule(f: typing.Type[FlowSpecDerived]) -> typing.Type[FlowSpecDerived]:
|
1539
|
-
...
|
1540
|
-
|
1541
|
-
def schedule(f: typing.Optional[typing.Type[FlowSpecDerived]] = None, *, hourly: bool = False, daily: bool = True, weekly: bool = False, cron: typing.Optional[str] = None, timezone: typing.Optional[str] = None):
|
1435
|
+
def project(*, name: str) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
|
1542
1436
|
"""
|
1543
|
-
Specifies
|
1544
|
-
|
1437
|
+
Specifies what flows belong to the same project.
|
1438
|
+
|
1439
|
+
A project-specific namespace is created for all flows that
|
1440
|
+
use the same `@project(name)`.
|
1545
1441
|
|
1546
1442
|
Parameters
|
1547
1443
|
----------
|
1548
|
-
|
1549
|
-
|
1550
|
-
|
1551
|
-
|
1552
|
-
|
1553
|
-
|
1554
|
-
cron : str, optional, default None
|
1555
|
-
Run the workflow at [a custom Cron schedule](https://docs.aws.amazon.com/eventbridge/latest/userguide/scheduled-events.html#cron-expressions)
|
1556
|
-
specified by this expression.
|
1557
|
-
timezone : str, optional, default None
|
1558
|
-
Timezone on which the schedule runs (default: None). Currently supported only for Argo workflows,
|
1559
|
-
which accepts timezones in [IANA format](https://nodatime.org/TimeZones).
|
1444
|
+
name : str
|
1445
|
+
Project name. Make sure that the name is unique amongst all
|
1446
|
+
projects that use the same production scheduler. The name may
|
1447
|
+
contain only lowercase alphanumeric characters and underscores.
|
1448
|
+
|
1449
|
+
|
1560
1450
|
"""
|
1561
1451
|
...
|
1562
1452
|
|
@@ -1655,6 +1545,55 @@ def trigger(f: typing.Optional[typing.Type[FlowSpecDerived]] = None, *, event: t
|
|
1655
1545
|
"""
|
1656
1546
|
...
|
1657
1547
|
|
1548
|
+
@typing.overload
|
1549
|
+
def conda_base(*, packages: typing.Dict[str, str] = {}, libraries: typing.Dict[str, str] = {}, python: typing.Optional[str] = None, disabled: bool = False) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
|
1550
|
+
"""
|
1551
|
+
Specifies the Conda environment for all steps of the flow.
|
1552
|
+
|
1553
|
+
Use `@conda_base` to set common libraries required by all
|
1554
|
+
steps and use `@conda` to specify step-specific additions.
|
1555
|
+
|
1556
|
+
Parameters
|
1557
|
+
----------
|
1558
|
+
packages : Dict[str, str], default {}
|
1559
|
+
Packages to use for this flow. The key is the name of the package
|
1560
|
+
and the value is the version to use.
|
1561
|
+
libraries : Dict[str, str], default {}
|
1562
|
+
Supported for backward compatibility. When used with packages, packages will take precedence.
|
1563
|
+
python : str, optional, default None
|
1564
|
+
Version of Python to use, e.g. '3.7.4'. A default value of None implies
|
1565
|
+
that the version used will correspond to the version of the Python interpreter used to start the run.
|
1566
|
+
disabled : bool, default False
|
1567
|
+
If set to True, disables Conda.
|
1568
|
+
"""
|
1569
|
+
...
|
1570
|
+
|
1571
|
+
@typing.overload
|
1572
|
+
def conda_base(f: typing.Type[FlowSpecDerived]) -> typing.Type[FlowSpecDerived]:
|
1573
|
+
...
|
1574
|
+
|
1575
|
+
def conda_base(f: typing.Optional[typing.Type[FlowSpecDerived]] = None, *, packages: typing.Dict[str, str] = {}, libraries: typing.Dict[str, str] = {}, python: typing.Optional[str] = None, disabled: bool = False):
|
1576
|
+
"""
|
1577
|
+
Specifies the Conda environment for all steps of the flow.
|
1578
|
+
|
1579
|
+
Use `@conda_base` to set common libraries required by all
|
1580
|
+
steps and use `@conda` to specify step-specific additions.
|
1581
|
+
|
1582
|
+
Parameters
|
1583
|
+
----------
|
1584
|
+
packages : Dict[str, str], default {}
|
1585
|
+
Packages to use for this flow. The key is the name of the package
|
1586
|
+
and the value is the version to use.
|
1587
|
+
libraries : Dict[str, str], default {}
|
1588
|
+
Supported for backward compatibility. When used with packages, packages will take precedence.
|
1589
|
+
python : str, optional, default None
|
1590
|
+
Version of Python to use, e.g. '3.7.4'. A default value of None implies
|
1591
|
+
that the version used will correspond to the version of the Python interpreter used to start the run.
|
1592
|
+
disabled : bool, default False
|
1593
|
+
If set to True, disables Conda.
|
1594
|
+
"""
|
1595
|
+
...
|
1596
|
+
|
1658
1597
|
@typing.overload
|
1659
1598
|
def pypi_base(*, packages: typing.Dict[str, str] = {}, python: typing.Optional[str] = None) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
|
1660
1599
|
"""
|
@@ -1694,48 +1633,6 @@ def pypi_base(f: typing.Optional[typing.Type[FlowSpecDerived]] = None, *, packag
|
|
1694
1633
|
"""
|
1695
1634
|
...
|
1696
1635
|
|
1697
|
-
def airflow_external_task_sensor(*, timeout: int, poke_interval: int, mode: str, exponential_backoff: bool, pool: str, soft_fail: bool, name: str, description: str, external_dag_id: str, external_task_ids: typing.List[str], allowed_states: typing.List[str], failed_states: typing.List[str], execution_delta: "datetime.timedelta", check_existence: bool) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
|
1698
|
-
"""
|
1699
|
-
The `@airflow_external_task_sensor` decorator attaches a Airflow [ExternalTaskSensor](https://airflow.apache.org/docs/apache-airflow/stable/_api/airflow/sensors/external_task/index.html#airflow.sensors.external_task.ExternalTaskSensor) before the start step of the flow.
|
1700
|
-
This decorator only works when a flow is scheduled on Airflow and is compiled using `airflow create`. More than one `@airflow_external_task_sensor` can be added as a flow decorators. Adding more than one decorator will ensure that `start` step starts only after all sensors finish.
|
1701
|
-
|
1702
|
-
Parameters
|
1703
|
-
----------
|
1704
|
-
timeout : int
|
1705
|
-
Time, in seconds before the task times out and fails. (Default: 3600)
|
1706
|
-
poke_interval : int
|
1707
|
-
Time in seconds that the job should wait in between each try. (Default: 60)
|
1708
|
-
mode : str
|
1709
|
-
How the sensor operates. Options are: { poke | reschedule }. (Default: "poke")
|
1710
|
-
exponential_backoff : bool
|
1711
|
-
allow progressive longer waits between pokes by using exponential backoff algorithm. (Default: True)
|
1712
|
-
pool : str
|
1713
|
-
the slot pool this task should run in,
|
1714
|
-
slot pools are a way to limit concurrency for certain tasks. (Default:None)
|
1715
|
-
soft_fail : bool
|
1716
|
-
Set to true to mark the task as SKIPPED on failure. (Default: False)
|
1717
|
-
name : str
|
1718
|
-
Name of the sensor on Airflow
|
1719
|
-
description : str
|
1720
|
-
Description of sensor in the Airflow UI
|
1721
|
-
external_dag_id : str
|
1722
|
-
The dag_id that contains the task you want to wait for.
|
1723
|
-
external_task_ids : List[str]
|
1724
|
-
The list of task_ids that you want to wait for.
|
1725
|
-
If None (default value) the sensor waits for the DAG. (Default: None)
|
1726
|
-
allowed_states : List[str]
|
1727
|
-
Iterable of allowed states, (Default: ['success'])
|
1728
|
-
failed_states : List[str]
|
1729
|
-
Iterable of failed or dis-allowed states. (Default: None)
|
1730
|
-
execution_delta : datetime.timedelta
|
1731
|
-
time difference with the previous execution to look at,
|
1732
|
-
the default is the same logical date as the current task or DAG. (Default: None)
|
1733
|
-
check_existence: bool
|
1734
|
-
Set to True to check if the external task exists or check if
|
1735
|
-
the DAG to wait for exists. (Default: True)
|
1736
|
-
"""
|
1737
|
-
...
|
1738
|
-
|
1739
1636
|
def airflow_s3_key_sensor(*, timeout: int, poke_interval: int, mode: str, exponential_backoff: bool, pool: str, soft_fail: bool, name: str, description: str, bucket_key: typing.Union[str, typing.List[str]], bucket_name: str, wildcard_match: bool, aws_conn_id: str, verify: bool) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
|
1740
1637
|
"""
|
1741
1638
|
The `@airflow_s3_key_sensor` decorator attaches a Airflow [S3KeySensor](https://airflow.apache.org/docs/apache-airflow-providers-amazon/stable/_api/airflow/providers/amazon/aws/sensors/s3/index.html#airflow.providers.amazon.aws.sensors.s3.S3KeySensor)
|
@@ -1779,51 +1676,154 @@ def airflow_s3_key_sensor(*, timeout: int, poke_interval: int, mode: str, expone
|
|
1779
1676
|
...
|
1780
1677
|
|
1781
1678
|
@typing.overload
|
1782
|
-
def
|
1679
|
+
def schedule(*, hourly: bool = False, daily: bool = True, weekly: bool = False, cron: typing.Optional[str] = None, timezone: typing.Optional[str] = None) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
|
1783
1680
|
"""
|
1784
|
-
Specifies the
|
1681
|
+
Specifies the times when the flow should be run when running on a
|
1682
|
+
production scheduler.
|
1785
1683
|
|
1786
|
-
|
1787
|
-
|
1684
|
+
Parameters
|
1685
|
+
----------
|
1686
|
+
hourly : bool, default False
|
1687
|
+
Run the workflow hourly.
|
1688
|
+
daily : bool, default True
|
1689
|
+
Run the workflow daily.
|
1690
|
+
weekly : bool, default False
|
1691
|
+
Run the workflow weekly.
|
1692
|
+
cron : str, optional, default None
|
1693
|
+
Run the workflow at [a custom Cron schedule](https://docs.aws.amazon.com/eventbridge/latest/userguide/scheduled-events.html#cron-expressions)
|
1694
|
+
specified by this expression.
|
1695
|
+
timezone : str, optional, default None
|
1696
|
+
Timezone on which the schedule runs (default: None). Currently supported only for Argo workflows,
|
1697
|
+
which accepts timezones in [IANA format](https://nodatime.org/TimeZones).
|
1698
|
+
"""
|
1699
|
+
...
|
1700
|
+
|
1701
|
+
@typing.overload
|
1702
|
+
def schedule(f: typing.Type[FlowSpecDerived]) -> typing.Type[FlowSpecDerived]:
|
1703
|
+
...
|
1704
|
+
|
1705
|
+
def schedule(f: typing.Optional[typing.Type[FlowSpecDerived]] = None, *, hourly: bool = False, daily: bool = True, weekly: bool = False, cron: typing.Optional[str] = None, timezone: typing.Optional[str] = None):
|
1706
|
+
"""
|
1707
|
+
Specifies the times when the flow should be run when running on a
|
1708
|
+
production scheduler.
|
1788
1709
|
|
1789
1710
|
Parameters
|
1790
1711
|
----------
|
1791
|
-
|
1792
|
-
|
1793
|
-
|
1794
|
-
|
1795
|
-
|
1796
|
-
|
1797
|
-
|
1798
|
-
|
1799
|
-
|
1800
|
-
|
1712
|
+
hourly : bool, default False
|
1713
|
+
Run the workflow hourly.
|
1714
|
+
daily : bool, default True
|
1715
|
+
Run the workflow daily.
|
1716
|
+
weekly : bool, default False
|
1717
|
+
Run the workflow weekly.
|
1718
|
+
cron : str, optional, default None
|
1719
|
+
Run the workflow at [a custom Cron schedule](https://docs.aws.amazon.com/eventbridge/latest/userguide/scheduled-events.html#cron-expressions)
|
1720
|
+
specified by this expression.
|
1721
|
+
timezone : str, optional, default None
|
1722
|
+
Timezone on which the schedule runs (default: None). Currently supported only for Argo workflows,
|
1723
|
+
which accepts timezones in [IANA format](https://nodatime.org/TimeZones).
|
1801
1724
|
"""
|
1802
1725
|
...
|
1803
1726
|
|
1804
1727
|
@typing.overload
|
1805
|
-
def
|
1728
|
+
def trigger_on_finish(*, flow: typing.Union[str, typing.Dict[str, str], None] = None, flows: typing.List[typing.Union[str, typing.Dict[str, str]]] = [], options: typing.Dict[str, typing.Any] = {}) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
|
1729
|
+
"""
|
1730
|
+
Specifies the flow(s) that this flow depends on.
|
1731
|
+
|
1732
|
+
```
|
1733
|
+
@trigger_on_finish(flow='FooFlow')
|
1734
|
+
```
|
1735
|
+
or
|
1736
|
+
```
|
1737
|
+
@trigger_on_finish(flows=['FooFlow', 'BarFlow'])
|
1738
|
+
```
|
1739
|
+
This decorator respects the @project decorator and triggers the flow
|
1740
|
+
when upstream runs within the same namespace complete successfully
|
1741
|
+
|
1742
|
+
Additionally, you can specify project aware upstream flow dependencies
|
1743
|
+
by specifying the fully qualified project_flow_name.
|
1744
|
+
```
|
1745
|
+
@trigger_on_finish(flow='my_project.branch.my_branch.FooFlow')
|
1746
|
+
```
|
1747
|
+
or
|
1748
|
+
```
|
1749
|
+
@trigger_on_finish(flows=['my_project.branch.my_branch.FooFlow', 'BarFlow'])
|
1750
|
+
```
|
1751
|
+
|
1752
|
+
You can also specify just the project or project branch (other values will be
|
1753
|
+
inferred from the current project or project branch):
|
1754
|
+
```
|
1755
|
+
@trigger_on_finish(flow={"name": "FooFlow", "project": "my_project", "project_branch": "branch"})
|
1756
|
+
```
|
1757
|
+
|
1758
|
+
Note that `branch` is typically one of:
|
1759
|
+
- `prod`
|
1760
|
+
- `user.bob`
|
1761
|
+
- `test.my_experiment`
|
1762
|
+
- `prod.staging`
|
1763
|
+
|
1764
|
+
Parameters
|
1765
|
+
----------
|
1766
|
+
flow : Union[str, Dict[str, str]], optional, default None
|
1767
|
+
Upstream flow dependency for this flow.
|
1768
|
+
flows : List[Union[str, Dict[str, str]]], default []
|
1769
|
+
Upstream flow dependencies for this flow.
|
1770
|
+
options : Dict[str, Any], default {}
|
1771
|
+
Backend-specific configuration for tuning eventing behavior.
|
1772
|
+
|
1773
|
+
|
1774
|
+
"""
|
1806
1775
|
...
|
1807
1776
|
|
1808
|
-
|
1777
|
+
@typing.overload
|
1778
|
+
def trigger_on_finish(f: typing.Type[FlowSpecDerived]) -> typing.Type[FlowSpecDerived]:
|
1779
|
+
...
|
1780
|
+
|
1781
|
+
def trigger_on_finish(f: typing.Optional[typing.Type[FlowSpecDerived]] = None, *, flow: typing.Union[str, typing.Dict[str, str], None] = None, flows: typing.List[typing.Union[str, typing.Dict[str, str]]] = [], options: typing.Dict[str, typing.Any] = {}):
|
1809
1782
|
"""
|
1810
|
-
Specifies the
|
1783
|
+
Specifies the flow(s) that this flow depends on.
|
1811
1784
|
|
1812
|
-
|
1813
|
-
|
1785
|
+
```
|
1786
|
+
@trigger_on_finish(flow='FooFlow')
|
1787
|
+
```
|
1788
|
+
or
|
1789
|
+
```
|
1790
|
+
@trigger_on_finish(flows=['FooFlow', 'BarFlow'])
|
1791
|
+
```
|
1792
|
+
This decorator respects the @project decorator and triggers the flow
|
1793
|
+
when upstream runs within the same namespace complete successfully
|
1794
|
+
|
1795
|
+
Additionally, you can specify project aware upstream flow dependencies
|
1796
|
+
by specifying the fully qualified project_flow_name.
|
1797
|
+
```
|
1798
|
+
@trigger_on_finish(flow='my_project.branch.my_branch.FooFlow')
|
1799
|
+
```
|
1800
|
+
or
|
1801
|
+
```
|
1802
|
+
@trigger_on_finish(flows=['my_project.branch.my_branch.FooFlow', 'BarFlow'])
|
1803
|
+
```
|
1804
|
+
|
1805
|
+
You can also specify just the project or project branch (other values will be
|
1806
|
+
inferred from the current project or project branch):
|
1807
|
+
```
|
1808
|
+
@trigger_on_finish(flow={"name": "FooFlow", "project": "my_project", "project_branch": "branch"})
|
1809
|
+
```
|
1810
|
+
|
1811
|
+
Note that `branch` is typically one of:
|
1812
|
+
- `prod`
|
1813
|
+
- `user.bob`
|
1814
|
+
- `test.my_experiment`
|
1815
|
+
- `prod.staging`
|
1814
1816
|
|
1815
1817
|
Parameters
|
1816
1818
|
----------
|
1817
|
-
|
1818
|
-
|
1819
|
-
|
1820
|
-
|
1821
|
-
|
1822
|
-
|
1823
|
-
|
1824
|
-
|
1825
|
-
disabled : bool, default False
|
1826
|
-
If set to True, disables Conda.
|
1819
|
+
flow : Union[str, Dict[str, str]], optional, default None
|
1820
|
+
Upstream flow dependency for this flow.
|
1821
|
+
flows : List[Union[str, Dict[str, str]]], default []
|
1822
|
+
Upstream flow dependencies for this flow.
|
1823
|
+
options : Dict[str, Any], default {}
|
1824
|
+
Backend-specific configuration for tuning eventing behavior.
|
1825
|
+
|
1826
|
+
|
1827
1827
|
"""
|
1828
1828
|
...
|
1829
1829
|
|