ob-metaflow-stubs 6.0.10.0__py2.py3-none-any.whl → 6.0.10.2rc0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ob-metaflow-stubs might be problematic. Click here for more details.

Files changed (262) hide show
  1. metaflow-stubs/__init__.pyi +907 -906
  2. metaflow-stubs/cards.pyi +2 -2
  3. metaflow-stubs/cli.pyi +2 -2
  4. metaflow-stubs/cli_components/__init__.pyi +2 -2
  5. metaflow-stubs/cli_components/utils.pyi +2 -2
  6. metaflow-stubs/client/__init__.pyi +2 -2
  7. metaflow-stubs/client/core.pyi +5 -5
  8. metaflow-stubs/client/filecache.pyi +2 -2
  9. metaflow-stubs/events.pyi +2 -2
  10. metaflow-stubs/exception.pyi +2 -2
  11. metaflow-stubs/flowspec.pyi +5 -5
  12. metaflow-stubs/generated_for.txt +1 -1
  13. metaflow-stubs/includefile.pyi +4 -4
  14. metaflow-stubs/meta_files.pyi +2 -2
  15. metaflow-stubs/metadata_provider/__init__.pyi +2 -2
  16. metaflow-stubs/metadata_provider/heartbeat.pyi +2 -2
  17. metaflow-stubs/metadata_provider/metadata.pyi +3 -3
  18. metaflow-stubs/metadata_provider/util.pyi +2 -2
  19. metaflow-stubs/metaflow_config.pyi +6 -2
  20. metaflow-stubs/metaflow_current.pyi +43 -43
  21. metaflow-stubs/metaflow_git.pyi +2 -2
  22. metaflow-stubs/mf_extensions/__init__.pyi +2 -2
  23. metaflow-stubs/mf_extensions/obcheckpoint/__init__.pyi +2 -2
  24. metaflow-stubs/mf_extensions/obcheckpoint/plugins/__init__.pyi +2 -2
  25. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/__init__.pyi +2 -2
  26. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/card_utils/__init__.pyi +2 -2
  27. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/card_utils/async_cards.pyi +2 -2
  28. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/card_utils/deco_injection_mixin.pyi +2 -2
  29. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/card_utils/extra_components.pyi +3 -3
  30. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/checkpoints/__init__.pyi +2 -2
  31. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/checkpoints/cards/__init__.pyi +2 -2
  32. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/checkpoints/cards/checkpoint_lister.pyi +4 -4
  33. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/checkpoints/cards/lineage_card.pyi +2 -2
  34. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/checkpoints/checkpoint_storage.pyi +4 -4
  35. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/checkpoints/constructors.pyi +2 -2
  36. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/checkpoints/core.pyi +4 -4
  37. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/checkpoints/decorator.pyi +6 -6
  38. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/checkpoints/exceptions.pyi +2 -2
  39. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/checkpoints/final_api.pyi +3 -3
  40. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/checkpoints/lineage.pyi +2 -2
  41. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/datastore/__init__.pyi +2 -2
  42. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/datastore/context.pyi +4 -4
  43. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/datastore/core.pyi +2 -2
  44. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/datastore/decorator.pyi +2 -2
  45. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/datastore/exceptions.pyi +2 -2
  46. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/datastore/task_utils.pyi +4 -4
  47. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/datastore/utils.pyi +2 -2
  48. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/datastructures.pyi +3 -3
  49. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/exceptions.pyi +2 -2
  50. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/hf_hub/__init__.pyi +2 -2
  51. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/hf_hub/decorator.pyi +3 -3
  52. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/modeling_utils/__init__.pyi +2 -2
  53. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/modeling_utils/core.pyi +3 -3
  54. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/modeling_utils/exceptions.pyi +2 -2
  55. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/modeling_utils/model_storage.pyi +4 -4
  56. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/utils/__init__.pyi +2 -2
  57. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/utils/flowspec_utils.pyi +2 -2
  58. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/utils/general.pyi +2 -2
  59. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/utils/identity_utils.pyi +3 -3
  60. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/utils/serialization_handler/__init__.pyi +2 -2
  61. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/utils/serialization_handler/base.pyi +2 -2
  62. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/utils/serialization_handler/tar.pyi +3 -3
  63. metaflow-stubs/mf_extensions/obcheckpoint/plugins/machine_learning_utilities/utils/tar_utils.pyi +3 -3
  64. metaflow-stubs/mf_extensions/outerbounds/__init__.pyi +2 -2
  65. metaflow-stubs/mf_extensions/outerbounds/plugins/__init__.pyi +2 -2
  66. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/__init__.pyi +2 -2
  67. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/__init__.pyi +3 -2
  68. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/_state_machine.pyi +2 -2
  69. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/_vendor/__init__.pyi +2 -2
  70. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/_vendor/spinner/__init__.pyi +2 -2
  71. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/_vendor/spinner/spinners.pyi +2 -2
  72. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/app_cli.pyi +3 -3
  73. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/app_config.pyi +3 -3
  74. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/capsule.pyi +5 -5
  75. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/click_importer.pyi +2 -2
  76. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/code_package/__init__.pyi +2 -2
  77. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/code_package/code_packager.pyi +2 -2
  78. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/config/__init__.pyi +2 -2
  79. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/config/cli_generator.pyi +2 -2
  80. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/config/config_utils.pyi +4 -4
  81. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/config/schema_export.pyi +2 -2
  82. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/config/typed_configs.pyi +3 -3
  83. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/config/unified_config.pyi +4 -4
  84. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/dependencies.pyi +4 -4
  85. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/deployer.pyi +12 -6
  86. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/experimental/__init__.pyi +2 -2
  87. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/perimeters.pyi +2 -2
  88. metaflow-stubs/mf_extensions/outerbounds/plugins/apps/core/utils.pyi +3 -3
  89. metaflow-stubs/mf_extensions/outerbounds/plugins/aws/__init__.pyi +2 -2
  90. metaflow-stubs/mf_extensions/outerbounds/plugins/aws/assume_role_decorator.pyi +2 -2
  91. metaflow-stubs/mf_extensions/outerbounds/plugins/card_utilities/__init__.pyi +2 -2
  92. metaflow-stubs/mf_extensions/outerbounds/plugins/card_utilities/async_cards.pyi +2 -2
  93. metaflow-stubs/mf_extensions/outerbounds/plugins/card_utilities/injector.pyi +2 -2
  94. metaflow-stubs/mf_extensions/outerbounds/plugins/checkpoint_datastores/__init__.pyi +2 -2
  95. metaflow-stubs/mf_extensions/outerbounds/plugins/checkpoint_datastores/coreweave.pyi +2 -2
  96. metaflow-stubs/mf_extensions/outerbounds/plugins/checkpoint_datastores/nebius.pyi +2 -2
  97. metaflow-stubs/mf_extensions/outerbounds/plugins/fast_bakery/__init__.pyi +2 -2
  98. metaflow-stubs/mf_extensions/outerbounds/plugins/fast_bakery/baker.pyi +5 -5
  99. metaflow-stubs/mf_extensions/outerbounds/plugins/fast_bakery/docker_environment.pyi +3 -3
  100. metaflow-stubs/mf_extensions/outerbounds/plugins/fast_bakery/fast_bakery.pyi +2 -2
  101. metaflow-stubs/mf_extensions/outerbounds/plugins/kubernetes/__init__.pyi +2 -2
  102. metaflow-stubs/mf_extensions/outerbounds/plugins/kubernetes/pod_killer.pyi +2 -2
  103. metaflow-stubs/mf_extensions/outerbounds/plugins/ollama/__init__.pyi +2 -2
  104. metaflow-stubs/mf_extensions/outerbounds/plugins/ollama/constants.pyi +2 -2
  105. metaflow-stubs/mf_extensions/outerbounds/plugins/ollama/exceptions.pyi +2 -2
  106. metaflow-stubs/mf_extensions/outerbounds/plugins/ollama/ollama.pyi +2 -2
  107. metaflow-stubs/mf_extensions/outerbounds/plugins/ollama/status_card.pyi +2 -2
  108. metaflow-stubs/mf_extensions/outerbounds/plugins/snowflake/__init__.pyi +2 -2
  109. metaflow-stubs/mf_extensions/outerbounds/plugins/snowflake/snowflake.pyi +2 -2
  110. metaflow-stubs/mf_extensions/outerbounds/profilers/__init__.pyi +2 -2
  111. metaflow-stubs/mf_extensions/outerbounds/profilers/gpu.pyi +2 -2
  112. metaflow-stubs/mf_extensions/outerbounds/remote_config.pyi +4 -4
  113. metaflow-stubs/mf_extensions/outerbounds/toplevel/__init__.pyi +2 -2
  114. metaflow-stubs/mf_extensions/outerbounds/toplevel/global_aliases_for_metaflow_package.pyi +3 -2
  115. metaflow-stubs/mf_extensions/outerbounds/toplevel/s3_proxy.pyi +2 -2
  116. metaflow-stubs/multicore_utils.pyi +2 -2
  117. metaflow-stubs/ob_internal.pyi +3 -2
  118. metaflow-stubs/packaging_sys/__init__.pyi +5 -5
  119. metaflow-stubs/packaging_sys/backend.pyi +4 -4
  120. metaflow-stubs/packaging_sys/distribution_support.pyi +5 -5
  121. metaflow-stubs/packaging_sys/tar_backend.pyi +6 -6
  122. metaflow-stubs/packaging_sys/utils.pyi +2 -2
  123. metaflow-stubs/packaging_sys/v1.pyi +3 -3
  124. metaflow-stubs/parameters.pyi +4 -4
  125. metaflow-stubs/plugins/__init__.pyi +10 -10
  126. metaflow-stubs/plugins/airflow/__init__.pyi +2 -2
  127. metaflow-stubs/plugins/airflow/airflow_utils.pyi +2 -2
  128. metaflow-stubs/plugins/airflow/exception.pyi +2 -2
  129. metaflow-stubs/plugins/airflow/sensors/__init__.pyi +2 -2
  130. metaflow-stubs/plugins/airflow/sensors/base_sensor.pyi +2 -2
  131. metaflow-stubs/plugins/airflow/sensors/external_task_sensor.pyi +2 -2
  132. metaflow-stubs/plugins/airflow/sensors/s3_sensor.pyi +2 -2
  133. metaflow-stubs/plugins/argo/__init__.pyi +2 -2
  134. metaflow-stubs/plugins/argo/argo_client.pyi +6 -4
  135. metaflow-stubs/plugins/argo/argo_events.pyi +2 -2
  136. metaflow-stubs/plugins/argo/argo_workflows.pyi +10 -3
  137. metaflow-stubs/plugins/argo/argo_workflows_decorator.pyi +2 -2
  138. metaflow-stubs/plugins/argo/argo_workflows_deployer.pyi +5 -5
  139. metaflow-stubs/plugins/argo/argo_workflows_deployer_objects.pyi +3 -3
  140. metaflow-stubs/plugins/argo/exit_hooks.pyi +3 -3
  141. metaflow-stubs/plugins/aws/__init__.pyi +2 -2
  142. metaflow-stubs/plugins/aws/aws_client.pyi +2 -2
  143. metaflow-stubs/plugins/aws/aws_utils.pyi +2 -2
  144. metaflow-stubs/plugins/aws/batch/__init__.pyi +2 -2
  145. metaflow-stubs/plugins/aws/batch/batch.pyi +4 -4
  146. metaflow-stubs/plugins/aws/batch/batch_client.pyi +2 -2
  147. metaflow-stubs/plugins/aws/batch/batch_decorator.pyi +2 -2
  148. metaflow-stubs/plugins/aws/secrets_manager/__init__.pyi +2 -2
  149. metaflow-stubs/plugins/aws/secrets_manager/aws_secrets_manager_secrets_provider.pyi +3 -3
  150. metaflow-stubs/plugins/aws/step_functions/__init__.pyi +2 -2
  151. metaflow-stubs/plugins/aws/step_functions/event_bridge_client.pyi +2 -2
  152. metaflow-stubs/plugins/aws/step_functions/schedule_decorator.pyi +2 -2
  153. metaflow-stubs/plugins/aws/step_functions/step_functions.pyi +3 -3
  154. metaflow-stubs/plugins/aws/step_functions/step_functions_client.pyi +2 -2
  155. metaflow-stubs/plugins/aws/step_functions/step_functions_deployer.pyi +8 -5
  156. metaflow-stubs/plugins/aws/step_functions/step_functions_deployer_objects.pyi +4 -4
  157. metaflow-stubs/plugins/azure/__init__.pyi +2 -2
  158. metaflow-stubs/plugins/azure/azure_credential.pyi +2 -2
  159. metaflow-stubs/plugins/azure/azure_exceptions.pyi +2 -2
  160. metaflow-stubs/plugins/azure/azure_secret_manager_secrets_provider.pyi +3 -3
  161. metaflow-stubs/plugins/azure/azure_utils.pyi +2 -2
  162. metaflow-stubs/plugins/azure/blob_service_client_factory.pyi +2 -2
  163. metaflow-stubs/plugins/azure/includefile_support.pyi +2 -2
  164. metaflow-stubs/plugins/cards/__init__.pyi +2 -2
  165. metaflow-stubs/plugins/cards/card_client.pyi +2 -2
  166. metaflow-stubs/plugins/cards/card_creator.pyi +2 -2
  167. metaflow-stubs/plugins/cards/card_datastore.pyi +2 -2
  168. metaflow-stubs/plugins/cards/card_decorator.pyi +3 -3
  169. metaflow-stubs/plugins/cards/card_modules/__init__.pyi +2 -2
  170. metaflow-stubs/plugins/cards/card_modules/basic.pyi +3 -3
  171. metaflow-stubs/plugins/cards/card_modules/card.pyi +2 -2
  172. metaflow-stubs/plugins/cards/card_modules/components.pyi +4 -4
  173. metaflow-stubs/plugins/cards/card_modules/convert_to_native_type.pyi +2 -2
  174. metaflow-stubs/plugins/cards/card_modules/renderer_tools.pyi +2 -2
  175. metaflow-stubs/plugins/cards/card_modules/test_cards.pyi +2 -2
  176. metaflow-stubs/plugins/cards/card_resolver.pyi +2 -2
  177. metaflow-stubs/plugins/cards/component_serializer.pyi +2 -2
  178. metaflow-stubs/plugins/cards/exception.pyi +2 -2
  179. metaflow-stubs/plugins/catch_decorator.pyi +2 -2
  180. metaflow-stubs/plugins/datatools/__init__.pyi +2 -2
  181. metaflow-stubs/plugins/datatools/local.pyi +2 -2
  182. metaflow-stubs/plugins/datatools/s3/__init__.pyi +2 -2
  183. metaflow-stubs/plugins/datatools/s3/s3.pyi +4 -4
  184. metaflow-stubs/plugins/datatools/s3/s3tail.pyi +2 -2
  185. metaflow-stubs/plugins/datatools/s3/s3util.pyi +2 -2
  186. metaflow-stubs/plugins/debug_logger.pyi +2 -2
  187. metaflow-stubs/plugins/debug_monitor.pyi +2 -2
  188. metaflow-stubs/plugins/environment_decorator.pyi +2 -2
  189. metaflow-stubs/plugins/events_decorator.pyi +2 -2
  190. metaflow-stubs/plugins/exit_hook/__init__.pyi +2 -2
  191. metaflow-stubs/plugins/exit_hook/exit_hook_decorator.pyi +2 -2
  192. metaflow-stubs/plugins/frameworks/__init__.pyi +2 -2
  193. metaflow-stubs/plugins/frameworks/pytorch.pyi +2 -2
  194. metaflow-stubs/plugins/gcp/__init__.pyi +2 -2
  195. metaflow-stubs/plugins/gcp/gcp_secret_manager_secrets_provider.pyi +3 -3
  196. metaflow-stubs/plugins/gcp/gs_exceptions.pyi +2 -2
  197. metaflow-stubs/plugins/gcp/gs_storage_client_factory.pyi +2 -2
  198. metaflow-stubs/plugins/gcp/gs_utils.pyi +2 -2
  199. metaflow-stubs/plugins/gcp/includefile_support.pyi +2 -2
  200. metaflow-stubs/plugins/kubernetes/__init__.pyi +2 -2
  201. metaflow-stubs/plugins/kubernetes/kube_utils.pyi +3 -3
  202. metaflow-stubs/plugins/kubernetes/kubernetes.pyi +2 -2
  203. metaflow-stubs/plugins/kubernetes/kubernetes_client.pyi +2 -2
  204. metaflow-stubs/plugins/kubernetes/kubernetes_decorator.pyi +2 -2
  205. metaflow-stubs/plugins/kubernetes/kubernetes_jobsets.pyi +2 -2
  206. metaflow-stubs/plugins/kubernetes/spot_monitor_sidecar.pyi +2 -2
  207. metaflow-stubs/plugins/ollama/__init__.pyi +3 -3
  208. metaflow-stubs/plugins/optuna/__init__.pyi +2 -2
  209. metaflow-stubs/plugins/parallel_decorator.pyi +2 -2
  210. metaflow-stubs/plugins/perimeters.pyi +2 -2
  211. metaflow-stubs/plugins/project_decorator.pyi +2 -2
  212. metaflow-stubs/plugins/pypi/__init__.pyi +2 -2
  213. metaflow-stubs/plugins/pypi/conda_decorator.pyi +2 -2
  214. metaflow-stubs/plugins/pypi/conda_environment.pyi +5 -5
  215. metaflow-stubs/plugins/pypi/parsers.pyi +2 -2
  216. metaflow-stubs/plugins/pypi/pypi_decorator.pyi +2 -2
  217. metaflow-stubs/plugins/pypi/pypi_environment.pyi +2 -2
  218. metaflow-stubs/plugins/pypi/utils.pyi +2 -2
  219. metaflow-stubs/plugins/resources_decorator.pyi +2 -2
  220. metaflow-stubs/plugins/retry_decorator.pyi +2 -2
  221. metaflow-stubs/plugins/secrets/__init__.pyi +3 -3
  222. metaflow-stubs/plugins/secrets/inline_secrets_provider.pyi +3 -3
  223. metaflow-stubs/plugins/secrets/secrets_decorator.pyi +2 -2
  224. metaflow-stubs/plugins/secrets/secrets_func.pyi +2 -2
  225. metaflow-stubs/plugins/secrets/secrets_spec.pyi +2 -2
  226. metaflow-stubs/plugins/secrets/utils.pyi +2 -2
  227. metaflow-stubs/plugins/snowflake/__init__.pyi +2 -2
  228. metaflow-stubs/plugins/storage_executor.pyi +2 -2
  229. metaflow-stubs/plugins/test_unbounded_foreach_decorator.pyi +3 -3
  230. metaflow-stubs/plugins/timeout_decorator.pyi +2 -2
  231. metaflow-stubs/plugins/torchtune/__init__.pyi +2 -2
  232. metaflow-stubs/plugins/uv/__init__.pyi +2 -2
  233. metaflow-stubs/plugins/uv/uv_environment.pyi +3 -3
  234. metaflow-stubs/profilers/__init__.pyi +2 -2
  235. metaflow-stubs/pylint_wrapper.pyi +2 -2
  236. metaflow-stubs/runner/__init__.pyi +2 -2
  237. metaflow-stubs/runner/deployer.pyi +7 -7
  238. metaflow-stubs/runner/deployer_impl.pyi +3 -3
  239. metaflow-stubs/runner/metaflow_runner.pyi +4 -4
  240. metaflow-stubs/runner/nbdeploy.pyi +2 -2
  241. metaflow-stubs/runner/nbrun.pyi +2 -2
  242. metaflow-stubs/runner/subprocess_manager.pyi +2 -2
  243. metaflow-stubs/runner/utils.pyi +3 -3
  244. metaflow-stubs/system/__init__.pyi +2 -2
  245. metaflow-stubs/system/system_logger.pyi +3 -3
  246. metaflow-stubs/system/system_monitor.pyi +2 -2
  247. metaflow-stubs/tagging_util.pyi +2 -2
  248. metaflow-stubs/tuple_util.pyi +2 -2
  249. metaflow-stubs/user_configs/__init__.pyi +2 -2
  250. metaflow-stubs/user_configs/config_options.pyi +4 -4
  251. metaflow-stubs/user_configs/config_parameters.pyi +6 -6
  252. metaflow-stubs/user_decorators/__init__.pyi +2 -2
  253. metaflow-stubs/user_decorators/common.pyi +2 -2
  254. metaflow-stubs/user_decorators/mutable_flow.pyi +5 -5
  255. metaflow-stubs/user_decorators/mutable_step.pyi +6 -6
  256. metaflow-stubs/user_decorators/user_flow_decorator.pyi +4 -4
  257. metaflow-stubs/user_decorators/user_step_decorator.pyi +6 -6
  258. {ob_metaflow_stubs-6.0.10.0.dist-info → ob_metaflow_stubs-6.0.10.2rc0.dist-info}/METADATA +1 -1
  259. ob_metaflow_stubs-6.0.10.2rc0.dist-info/RECORD +262 -0
  260. ob_metaflow_stubs-6.0.10.0.dist-info/RECORD +0 -262
  261. {ob_metaflow_stubs-6.0.10.0.dist-info → ob_metaflow_stubs-6.0.10.2rc0.dist-info}/WHEEL +0 -0
  262. {ob_metaflow_stubs-6.0.10.0.dist-info → ob_metaflow_stubs-6.0.10.2rc0.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,7 @@
1
1
  ######################################################################################################
2
2
  # Auto-generated Metaflow stub file #
3
- # MF version: 2.18.2.1+obcheckpoint(0.2.4);ob(v1) #
4
- # Generated on 2025-09-08T21:00:14.553698 #
3
+ # MF version: 2.18.3.2+obcheckpoint(0.2.4);ob(v1) #
4
+ # Generated on 2025-09-09T23:55:12.839647 #
5
5
  ######################################################################################################
6
6
 
7
7
  from __future__ import annotations
@@ -39,10 +39,10 @@ from .user_decorators.user_step_decorator import UserStepDecorator as UserStepDe
39
39
  from .user_decorators.user_step_decorator import StepMutator as StepMutator
40
40
  from .user_decorators.user_step_decorator import user_step_decorator as user_step_decorator
41
41
  from .user_decorators.user_flow_decorator import FlowMutator as FlowMutator
42
+ from . import tuple_util as tuple_util
42
43
  from . import cards as cards
43
44
  from . import metaflow_git as metaflow_git
44
45
  from . import events as events
45
- from . import tuple_util as tuple_util
46
46
  from . import runner as runner
47
47
  from . import plugins as plugins
48
48
  from .mf_extensions.outerbounds.toplevel.global_aliases_for_metaflow_package import S3 as S3
@@ -83,6 +83,7 @@ from .mf_extensions.outerbounds.plugins.checkpoint_datastores.nebius import nebi
83
83
  from .mf_extensions.outerbounds.plugins.checkpoint_datastores.coreweave import coreweave_checkpoints as coreweave_checkpoints
84
84
  from .mf_extensions.outerbounds.plugins.aws.assume_role_decorator import assume_role as assume_role
85
85
  from .mf_extensions.outerbounds.plugins.apps.core.deployer import AppDeployer as AppDeployer
86
+ from .mf_extensions.outerbounds.plugins.apps.core.deployer import DeployedApp as DeployedApp
86
87
  from . import system as system
87
88
  from . import cli_components as cli_components
88
89
  from . import pylint_wrapper as pylint_wrapper
@@ -168,596 +169,550 @@ def step(f: typing.Union[typing.Callable[[FlowSpecDerived], None], typing.Callab
168
169
  ...
169
170
 
170
171
  @typing.overload
171
- def resources(*, cpu: int = 1, gpu: typing.Optional[int] = None, disk: typing.Optional[int] = None, memory: int = 4096, shared_memory: typing.Optional[int] = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
172
+ def environment(*, vars: typing.Dict[str, str] = {}) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
172
173
  """
173
- Specifies the resources needed when executing this step.
174
-
175
- Use `@resources` to specify the resource requirements
176
- independently of the specific compute layer (`@batch`, `@kubernetes`).
177
-
178
- You can choose the compute layer on the command line by executing e.g.
179
- ```
180
- python myflow.py run --with batch
181
- ```
182
- or
183
- ```
184
- python myflow.py run --with kubernetes
185
- ```
186
- which executes the flow on the desired system using the
187
- requirements specified in `@resources`.
174
+ Specifies environment variables to be set prior to the execution of a step.
188
175
 
189
176
 
190
177
  Parameters
191
178
  ----------
192
- cpu : int, default 1
193
- Number of CPUs required for this step.
194
- gpu : int, optional, default None
195
- Number of GPUs required for this step.
196
- disk : int, optional, default None
197
- Disk size (in MB) required for this step. Only applies on Kubernetes.
198
- memory : int, default 4096
199
- Memory size (in MB) required for this step.
200
- shared_memory : int, optional, default None
201
- The value for the size (in MiB) of the /dev/shm volume for this step.
202
- This parameter maps to the `--shm-size` option in Docker.
179
+ vars : Dict[str, str], default {}
180
+ Dictionary of environment variables to set.
203
181
  """
204
182
  ...
205
183
 
206
184
  @typing.overload
207
- def resources(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
185
+ def environment(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
208
186
  ...
209
187
 
210
188
  @typing.overload
211
- def resources(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
189
+ def environment(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
212
190
  ...
213
191
 
214
- def resources(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, cpu: int = 1, gpu: typing.Optional[int] = None, disk: typing.Optional[int] = None, memory: int = 4096, shared_memory: typing.Optional[int] = None):
192
+ def environment(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, vars: typing.Dict[str, str] = {}):
215
193
  """
216
- Specifies the resources needed when executing this step.
217
-
218
- Use `@resources` to specify the resource requirements
219
- independently of the specific compute layer (`@batch`, `@kubernetes`).
220
-
221
- You can choose the compute layer on the command line by executing e.g.
222
- ```
223
- python myflow.py run --with batch
224
- ```
225
- or
226
- ```
227
- python myflow.py run --with kubernetes
228
- ```
229
- which executes the flow on the desired system using the
230
- requirements specified in `@resources`.
194
+ Specifies environment variables to be set prior to the execution of a step.
231
195
 
232
196
 
233
197
  Parameters
234
198
  ----------
235
- cpu : int, default 1
236
- Number of CPUs required for this step.
237
- gpu : int, optional, default None
238
- Number of GPUs required for this step.
239
- disk : int, optional, default None
240
- Disk size (in MB) required for this step. Only applies on Kubernetes.
241
- memory : int, default 4096
242
- Memory size (in MB) required for this step.
243
- shared_memory : int, optional, default None
244
- The value for the size (in MiB) of the /dev/shm volume for this step.
245
- This parameter maps to the `--shm-size` option in Docker.
246
- """
247
- ...
248
-
249
- @typing.overload
250
- def test_append_card(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
251
- """
252
- A simple decorator that demonstrates using CardDecoratorInjector
253
- to inject a card and render simple markdown content.
199
+ vars : Dict[str, str], default {}
200
+ Dictionary of environment variables to set.
254
201
  """
255
202
  ...
256
203
 
257
- @typing.overload
258
- def test_append_card(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
259
- ...
260
-
261
- def test_append_card(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None):
204
+ def nvidia(*, gpu: int, gpu_type: str, queue_timeout: int) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
262
205
  """
263
- A simple decorator that demonstrates using CardDecoratorInjector
264
- to inject a card and render simple markdown content.
206
+ Specifies that this step should execute on DGX cloud.
207
+
208
+
209
+ Parameters
210
+ ----------
211
+ gpu : int
212
+ Number of GPUs to use.
213
+ gpu_type : str
214
+ Type of Nvidia GPU to use.
215
+ queue_timeout : int
216
+ Time to keep the job in NVCF's queue.
265
217
  """
266
218
  ...
267
219
 
268
- @typing.overload
269
- def environment(*, vars: typing.Dict[str, str] = {}) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
220
+ def nvct(*, gpu: int, gpu_type: str) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
270
221
  """
271
- Specifies environment variables to be set prior to the execution of a step.
222
+ Specifies that this step should execute on DGX cloud.
272
223
 
273
224
 
274
225
  Parameters
275
226
  ----------
276
- vars : Dict[str, str], default {}
277
- Dictionary of environment variables to set.
227
+ gpu : int
228
+ Number of GPUs to use.
229
+ gpu_type : str
230
+ Type of Nvidia GPU to use.
278
231
  """
279
232
  ...
280
233
 
281
234
  @typing.overload
282
- def environment(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
235
+ def fast_bakery_internal(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
236
+ """
237
+ Internal decorator to support Fast bakery
238
+ """
283
239
  ...
284
240
 
285
241
  @typing.overload
286
- def environment(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
242
+ def fast_bakery_internal(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
287
243
  ...
288
244
 
289
- def environment(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, vars: typing.Dict[str, str] = {}):
245
+ def fast_bakery_internal(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None):
290
246
  """
291
- Specifies environment variables to be set prior to the execution of a step.
247
+ Internal decorator to support Fast bakery
248
+ """
249
+ ...
250
+
251
+ def s3_proxy(*, integration_name: typing.Optional[str] = None, write_mode: typing.Optional[str] = None, debug: typing.Optional[bool] = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
252
+ """
253
+ S3 Proxy decorator for routing S3 requests through a local proxy service.
292
254
 
293
255
 
294
256
  Parameters
295
257
  ----------
296
- vars : Dict[str, str], default {}
297
- Dictionary of environment variables to set.
258
+ integration_name : str, optional
259
+ Name of the S3 proxy integration. If not specified, will use the only
260
+ available S3 proxy integration in the namespace (fails if multiple exist).
261
+ write_mode : str, optional
262
+ The desired behavior during write operations to target (origin) S3 bucket.
263
+ allowed options are:
264
+ "origin-and-cache" -> write to both the target S3 bucket and local object
265
+ storage
266
+ "origin" -> only write to the target S3 bucket
267
+ "cache" -> only write to the object storage service used for caching
268
+ debug : bool, optional
269
+ Enable debug logging for proxy operations.
298
270
  """
299
271
  ...
300
272
 
301
273
  @typing.overload
302
- def conda(*, packages: typing.Dict[str, str] = {}, libraries: typing.Dict[str, str] = {}, python: typing.Optional[str] = None, disabled: bool = False) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
274
+ def timeout(*, seconds: int = 0, minutes: int = 0, hours: int = 0) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
303
275
  """
304
- Specifies the Conda environment for the step.
276
+ Specifies a timeout for your step.
305
277
 
306
- Information in this decorator will augment any
307
- attributes set in the `@conda_base` flow-level decorator. Hence,
308
- you can use `@conda_base` to set packages required by all
309
- steps and use `@conda` to specify step-specific overrides.
278
+ This decorator is useful if this step may hang indefinitely.
279
+
280
+ This can be used in conjunction with the `@retry` decorator as well as the `@catch` decorator.
281
+ A timeout is considered to be an exception thrown by the step. It will cause the step to be
282
+ retried if needed and the exception will be caught by the `@catch` decorator, if present.
283
+
284
+ Note that all the values specified in parameters are added together so if you specify
285
+ 60 seconds and 1 hour, the decorator will have an effective timeout of 1 hour and 1 minute.
310
286
 
311
287
 
312
288
  Parameters
313
289
  ----------
314
- packages : Dict[str, str], default {}
315
- Packages to use for this step. The key is the name of the package
316
- and the value is the version to use.
317
- libraries : Dict[str, str], default {}
318
- Supported for backward compatibility. When used with packages, packages will take precedence.
319
- python : str, optional, default None
320
- Version of Python to use, e.g. '3.7.4'. A default value of None implies
321
- that the version used will correspond to the version of the Python interpreter used to start the run.
322
- disabled : bool, default False
323
- If set to True, disables @conda.
290
+ seconds : int, default 0
291
+ Number of seconds to wait prior to timing out.
292
+ minutes : int, default 0
293
+ Number of minutes to wait prior to timing out.
294
+ hours : int, default 0
295
+ Number of hours to wait prior to timing out.
324
296
  """
325
297
  ...
326
298
 
327
299
  @typing.overload
328
- def conda(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
300
+ def timeout(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
329
301
  ...
330
302
 
331
303
  @typing.overload
332
- def conda(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
304
+ def timeout(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
333
305
  ...
334
306
 
335
- def conda(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, packages: typing.Dict[str, str] = {}, libraries: typing.Dict[str, str] = {}, python: typing.Optional[str] = None, disabled: bool = False):
307
+ def timeout(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, seconds: int = 0, minutes: int = 0, hours: int = 0):
336
308
  """
337
- Specifies the Conda environment for the step.
309
+ Specifies a timeout for your step.
338
310
 
339
- Information in this decorator will augment any
340
- attributes set in the `@conda_base` flow-level decorator. Hence,
341
- you can use `@conda_base` to set packages required by all
342
- steps and use `@conda` to specify step-specific overrides.
311
+ This decorator is useful if this step may hang indefinitely.
312
+
313
+ This can be used in conjunction with the `@retry` decorator as well as the `@catch` decorator.
314
+ A timeout is considered to be an exception thrown by the step. It will cause the step to be
315
+ retried if needed and the exception will be caught by the `@catch` decorator, if present.
316
+
317
+ Note that all the values specified in parameters are added together so if you specify
318
+ 60 seconds and 1 hour, the decorator will have an effective timeout of 1 hour and 1 minute.
343
319
 
344
320
 
345
321
  Parameters
346
322
  ----------
347
- packages : Dict[str, str], default {}
348
- Packages to use for this step. The key is the name of the package
349
- and the value is the version to use.
350
- libraries : Dict[str, str], default {}
351
- Supported for backward compatibility. When used with packages, packages will take precedence.
352
- python : str, optional, default None
353
- Version of Python to use, e.g. '3.7.4'. A default value of None implies
354
- that the version used will correspond to the version of the Python interpreter used to start the run.
355
- disabled : bool, default False
356
- If set to True, disables @conda.
323
+ seconds : int, default 0
324
+ Number of seconds to wait prior to timing out.
325
+ minutes : int, default 0
326
+ Number of minutes to wait prior to timing out.
327
+ hours : int, default 0
328
+ Number of hours to wait prior to timing out.
357
329
  """
358
330
  ...
359
331
 
360
332
  @typing.overload
361
- def retry(*, times: int = 3, minutes_between_retries: int = 2) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
333
+ def secrets(*, sources: typing.List[typing.Union[str, typing.Dict[str, typing.Any]]] = [], role: typing.Optional[str] = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
362
334
  """
363
- Specifies the number of times the task corresponding
364
- to a step needs to be retried.
365
-
366
- This decorator is useful for handling transient errors, such as networking issues.
367
- If your task contains operations that can't be retried safely, e.g. database updates,
368
- it is advisable to annotate it with `@retry(times=0)`.
369
-
370
- This can be used in conjunction with the `@catch` decorator. The `@catch`
371
- decorator will execute a no-op task after all retries have been exhausted,
372
- ensuring that the flow execution can continue.
335
+ Specifies secrets to be retrieved and injected as environment variables prior to
336
+ the execution of a step.
373
337
 
374
338
 
375
339
  Parameters
376
340
  ----------
377
- times : int, default 3
378
- Number of times to retry this task.
379
- minutes_between_retries : int, default 2
380
- Number of minutes between retries.
341
+ sources : List[Union[str, Dict[str, Any]]], default: []
342
+ List of secret specs, defining how the secrets are to be retrieved
343
+ role : str, optional, default: None
344
+ Role to use for fetching secrets
381
345
  """
382
346
  ...
383
347
 
384
348
  @typing.overload
385
- def retry(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
349
+ def secrets(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
386
350
  ...
387
351
 
388
352
  @typing.overload
389
- def retry(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
353
+ def secrets(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
390
354
  ...
391
355
 
392
- def retry(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, times: int = 3, minutes_between_retries: int = 2):
356
+ def secrets(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, sources: typing.List[typing.Union[str, typing.Dict[str, typing.Any]]] = [], role: typing.Optional[str] = None):
393
357
  """
394
- Specifies the number of times the task corresponding
395
- to a step needs to be retried.
396
-
397
- This decorator is useful for handling transient errors, such as networking issues.
398
- If your task contains operations that can't be retried safely, e.g. database updates,
399
- it is advisable to annotate it with `@retry(times=0)`.
400
-
401
- This can be used in conjunction with the `@catch` decorator. The `@catch`
402
- decorator will execute a no-op task after all retries have been exhausted,
403
- ensuring that the flow execution can continue.
358
+ Specifies secrets to be retrieved and injected as environment variables prior to
359
+ the execution of a step.
404
360
 
405
361
 
406
362
  Parameters
407
363
  ----------
408
- times : int, default 3
409
- Number of times to retry this task.
410
- minutes_between_retries : int, default 2
411
- Number of minutes between retries.
364
+ sources : List[Union[str, Dict[str, Any]]], default: []
365
+ List of secret specs, defining how the secrets are to be retrieved
366
+ role : str, optional, default: None
367
+ Role to use for fetching secrets
412
368
  """
413
369
  ...
414
370
 
415
- def nvct(*, gpu: int, gpu_type: str) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
371
+ def huggingface_hub(*, temp_dir_root: typing.Optional[str] = None, load: typing.Union[typing.List[str], typing.List[typing.Tuple[typing.Dict, str]], typing.List[typing.Tuple[str, str]], typing.List[typing.Dict], None]) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
416
372
  """
417
- Specifies that this step should execute on DGX cloud.
418
-
373
+ Decorator that helps cache, version and store models/datasets from huggingface hub.
419
374
 
420
- Parameters
421
- ----------
422
- gpu : int
423
- Number of GPUs to use.
424
- gpu_type : str
425
- Type of Nvidia GPU to use.
426
- """
427
- ...
428
-
429
- def s3_proxy(*, integration_name: typing.Optional[str] = None, write_mode: typing.Optional[str] = None, debug: typing.Optional[bool] = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
430
- """
431
- S3 Proxy decorator for routing S3 requests through a local proxy service.
375
+ > Examples
432
376
 
377
+ **Usage: creating references of models from huggingface that may be loaded in downstream steps**
378
+ ```python
379
+ @huggingface_hub
380
+ @step
381
+ def pull_model_from_huggingface(self):
382
+ # `current.huggingface_hub.snapshot_download` downloads the model from the Hugging Face Hub
383
+ # and saves it in the backend storage based on the model's `repo_id`. If there exists a model
384
+ # with the same `repo_id` in the backend storage, it will not download the model again. The return
385
+ # value of the function is a reference to the model in the backend storage.
386
+ # This reference can be used to load the model in the subsequent steps via `@model(load=["llama_model"])`
433
387
 
434
- Parameters
435
- ----------
436
- integration_name : str, optional
437
- Name of the S3 proxy integration. If not specified, will use the only
438
- available S3 proxy integration in the namespace (fails if multiple exist).
439
- write_mode : str, optional
440
- The desired behavior during write operations to target (origin) S3 bucket.
441
- allowed options are:
442
- "origin-and-cache" -> write to both the target S3 bucket and local object
443
- storage
444
- "origin" -> only write to the target S3 bucket
445
- "cache" -> only write to the object storage service used for caching
446
- debug : bool, optional
447
- Enable debug logging for proxy operations.
448
- """
449
- ...
450
-
451
- def ollama(*, models: list, backend: str, force_pull: bool, cache_update_policy: str, force_cache_update: bool, debug: bool, circuit_breaker_config: dict, timeout_config: dict) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
452
- """
453
- This decorator is used to run Ollama APIs as Metaflow task sidecars.
388
+ self.model_id = "mistralai/Mistral-7B-Instruct-v0.1"
389
+ self.llama_model = current.huggingface_hub.snapshot_download(
390
+ repo_id=self.model_id,
391
+ allow_patterns=["*.safetensors", "*.json", "tokenizer.*"],
392
+ )
393
+ self.next(self.train)
394
+ ```
454
395
 
455
- User code call
456
- --------------
457
- @ollama(
458
- models=[...],
459
- ...
460
- )
396
+ **Usage: loading models directly from huggingface hub or from cache (from metaflow's datastore)**
397
+ ```python
398
+ @huggingface_hub(load=["mistralai/Mistral-7B-Instruct-v0.1"])
399
+ @step
400
+ def pull_model_from_huggingface(self):
401
+ path_to_model = current.huggingface_hub.loaded["mistralai/Mistral-7B-Instruct-v0.1"]
402
+ ```
461
403
 
462
- Valid backend options
463
- ---------------------
464
- - 'local': Run as a separate process on the local task machine.
465
- - (TODO) 'managed': Outerbounds hosts and selects compute provider.
466
- - (TODO) 'remote': Spin up separate instance to serve Ollama models.
404
+ ```python
405
+ @huggingface_hub(load=[("mistralai/Mistral-7B-Instruct-v0.1", "/my-directory"), ("myorg/mistral-lora, "/my-lora-directory")])
406
+ @step
407
+ def finetune_model(self):
408
+ path_to_model = current.huggingface_hub.loaded["mistralai/Mistral-7B-Instruct-v0.1"]
409
+ # path_to_model will be /my-directory
410
+ ```
467
411
 
468
- Valid model options
469
- -------------------
470
- Any model here https://ollama.com/search, e.g. 'llama3.2', 'llama3.3'
412
+ ```python
413
+ # Takes all the arguments passed to `snapshot_download`
414
+ # except for `local_dir`
415
+ @huggingface_hub(load=[
416
+ {
417
+ "repo_id": "mistralai/Mistral-7B-Instruct-v0.1",
418
+ },
419
+ {
420
+ "repo_id": "myorg/mistral-lora",
421
+ "repo_type": "model",
422
+ },
423
+ ])
424
+ @step
425
+ def finetune_model(self):
426
+ path_to_model = current.huggingface_hub.loaded["mistralai/Mistral-7B-Instruct-v0.1"]
427
+ # path_to_model will be /my-directory
428
+ ```
471
429
 
472
430
 
473
431
  Parameters
474
432
  ----------
475
- models: list[str]
476
- List of Ollama containers running models in sidecars.
477
- backend: str
478
- Determines where and how to run the Ollama process.
479
- force_pull: bool
480
- Whether to run `ollama pull` no matter what, or first check the remote cache in Metaflow datastore for this model key.
481
- cache_update_policy: str
482
- Cache update policy: "auto", "force", or "never".
483
- force_cache_update: bool
484
- Simple override for "force" cache update policy.
485
- debug: bool
486
- Whether to turn on verbose debugging logs.
487
- circuit_breaker_config: dict
488
- Configuration for circuit breaker protection. Keys: failure_threshold, recovery_timeout, reset_timeout.
489
- timeout_config: dict
490
- Configuration for various operation timeouts. Keys: pull, stop, health_check, install, server_startup.
433
+ temp_dir_root : str, optional
434
+ The root directory that will hold the temporary directory where objects will be downloaded.
435
+
436
+ load: Union[List[str], List[Tuple[Dict, str]], List[Tuple[str, str]], List[Dict], None]
437
+ The list of repos (models/datasets) to load.
438
+
439
+ Loaded repos can be accessed via `current.huggingface_hub.loaded`. If load is set, then the following happens:
440
+
441
+ - If repo (model/dataset) is not found in the datastore:
442
+ - Downloads the repo from Hugging Face Hub to a temporary directory (or uses specified path) for local access
443
+ - Stores it in Metaflow's datastore (s3/gcs/azure etc.) with a unique name based on repo_type/repo_id
444
+ - All HF models loaded for a `@step` will be cached separately under flow/step/namespace.
445
+
446
+ - If repo is found in the datastore:
447
+ - Loads it directly from datastore to local path (can be temporary directory or specified path)
491
448
  """
492
449
  ...
493
450
 
494
451
  @typing.overload
495
- def secrets(*, sources: typing.List[typing.Union[str, typing.Dict[str, typing.Any]]] = [], role: typing.Optional[str] = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
452
+ def coreweave_s3_proxy(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
496
453
  """
497
- Specifies secrets to be retrieved and injected as environment variables prior to
498
- the execution of a step.
499
-
500
-
501
- Parameters
502
- ----------
503
- sources : List[Union[str, Dict[str, Any]]], default: []
504
- List of secret specs, defining how the secrets are to be retrieved
505
- role : str, optional, default: None
506
- Role to use for fetching secrets
454
+ CoreWeave-specific S3 Proxy decorator for routing S3 requests through a local proxy service.
455
+ It exists to make it easier for users to know that this decorator should only be used with
456
+ a Neo Cloud like CoreWeave.
507
457
  """
508
458
  ...
509
459
 
510
460
  @typing.overload
511
- def secrets(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
461
+ def coreweave_s3_proxy(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
512
462
  ...
513
463
 
514
- @typing.overload
515
- def secrets(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
464
+ def coreweave_s3_proxy(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None):
465
+ """
466
+ CoreWeave-specific S3 Proxy decorator for routing S3 requests through a local proxy service.
467
+ It exists to make it easier for users to know that this decorator should only be used with
468
+ a Neo Cloud like CoreWeave.
469
+ """
516
470
  ...
517
471
 
518
- def secrets(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, sources: typing.List[typing.Union[str, typing.Dict[str, typing.Any]]] = [], role: typing.Optional[str] = None):
472
+ @typing.overload
473
+ def card(*, type: str = 'default', id: typing.Optional[str] = None, options: typing.Dict[str, typing.Any] = {}, timeout: int = 45) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
519
474
  """
520
- Specifies secrets to be retrieved and injected as environment variables prior to
521
- the execution of a step.
475
+ Creates a human-readable report, a Metaflow Card, after this step completes.
476
+
477
+ Note that you may add multiple `@card` decorators in a step with different parameters.
522
478
 
523
479
 
524
480
  Parameters
525
481
  ----------
526
- sources : List[Union[str, Dict[str, Any]]], default: []
527
- List of secret specs, defining how the secrets are to be retrieved
528
- role : str, optional, default: None
529
- Role to use for fetching secrets
482
+ type : str, default 'default'
483
+ Card type.
484
+ id : str, optional, default None
485
+ If multiple cards are present, use this id to identify this card.
486
+ options : Dict[str, Any], default {}
487
+ Options passed to the card. The contents depend on the card type.
488
+ timeout : int, default 45
489
+ Interrupt reporting if it takes more than this many seconds.
530
490
  """
531
491
  ...
532
492
 
533
- def nvidia(*, gpu: int, gpu_type: str, queue_timeout: int) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
493
+ @typing.overload
494
+ def card(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
495
+ ...
496
+
497
+ @typing.overload
498
+ def card(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
499
+ ...
500
+
501
+ def card(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, type: str = 'default', id: typing.Optional[str] = None, options: typing.Dict[str, typing.Any] = {}, timeout: int = 45):
534
502
  """
535
- Specifies that this step should execute on DGX cloud.
503
+ Creates a human-readable report, a Metaflow Card, after this step completes.
504
+
505
+ Note that you may add multiple `@card` decorators in a step with different parameters.
536
506
 
537
507
 
538
508
  Parameters
539
509
  ----------
540
- gpu : int
541
- Number of GPUs to use.
542
- gpu_type : str
543
- Type of Nvidia GPU to use.
544
- queue_timeout : int
545
- Time to keep the job in NVCF's queue.
510
+ type : str, default 'default'
511
+ Card type.
512
+ id : str, optional, default None
513
+ If multiple cards are present, use this id to identify this card.
514
+ options : Dict[str, Any], default {}
515
+ Options passed to the card. The contents depend on the card type.
516
+ timeout : int, default 45
517
+ Interrupt reporting if it takes more than this many seconds.
546
518
  """
547
519
  ...
548
520
 
549
521
  @typing.overload
550
- def checkpoint(*, load_policy: str = 'fresh', temp_dir_root: str = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
522
+ def model(*, load: typing.Union[typing.List[str], str, typing.List[typing.Tuple[str, typing.Optional[str]]]] = None, temp_dir_root: str = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
551
523
  """
552
- Enables checkpointing for a step.
524
+ Enables loading / saving of models within a step.
553
525
 
554
526
  > Examples
555
-
556
- - Saving Checkpoints
557
-
527
+ - Saving Models
558
528
  ```python
559
- @checkpoint
529
+ @model
560
530
  @step
561
531
  def train(self):
562
- model = create_model(self.parameters, checkpoint_path = None)
563
- for i in range(self.epochs):
564
- # some training logic
565
- loss = model.train(self.dataset)
566
- if i % 10 == 0:
567
- model.save(
568
- current.checkpoint.directory,
569
- )
570
- # saves the contents of the `current.checkpoint.directory` as a checkpoint
571
- # and returns a reference dictionary to the checkpoint saved in the datastore
572
- self.latest_checkpoint = current.checkpoint.save(
573
- name="epoch_checkpoint",
574
- metadata={
575
- "epoch": i,
576
- "loss": loss,
577
- }
578
- )
579
- ```
532
+ # current.model.save returns a dictionary reference to the model saved
533
+ self.my_model = current.model.save(
534
+ path_to_my_model,
535
+ label="my_model",
536
+ metadata={
537
+ "epochs": 10,
538
+ "batch-size": 32,
539
+ "learning-rate": 0.001,
540
+ }
541
+ )
542
+ self.next(self.test)
580
543
 
581
- - Using Loaded Checkpoints
544
+ @model(load="my_model")
545
+ @step
546
+ def test(self):
547
+ # `current.model.loaded` returns a dictionary of the loaded models
548
+ # where the key is the name of the artifact and the value is the path to the model
549
+ print(os.listdir(current.model.loaded["my_model"]))
550
+ self.next(self.end)
551
+ ```
582
552
 
553
+ - Loading models
583
554
  ```python
584
- @retry(times=3)
585
- @checkpoint
586
555
  @step
587
556
  def train(self):
588
- # Assume that the task has restarted and the previous attempt of the task
589
- # saved a checkpoint
590
- checkpoint_path = None
591
- if current.checkpoint.is_loaded: # Check if a checkpoint is loaded
592
- print("Loaded checkpoint from the previous attempt")
593
- checkpoint_path = current.checkpoint.directory
594
-
595
- model = create_model(self.parameters, checkpoint_path = checkpoint_path)
596
- for i in range(self.epochs):
597
- ...
557
+ # current.model.load returns the path to the model loaded
558
+ checkpoint_path = current.model.load(
559
+ self.checkpoint_key,
560
+ )
561
+ model_path = current.model.load(
562
+ self.model,
563
+ )
564
+ self.next(self.test)
598
565
  ```
599
566
 
600
567
 
601
568
  Parameters
602
569
  ----------
603
- load_policy : str, default: "fresh"
604
- The policy for loading the checkpoint. The following policies are supported:
605
- - "eager": Loads the the latest available checkpoint within the namespace.
606
- With this mode, the latest checkpoint written by any previous task (can be even a different run) of the step
607
- will be loaded at the start of the task.
608
- - "none": Do not load any checkpoint
609
- - "fresh": Loads the lastest checkpoint created within the running Task.
610
- This mode helps loading checkpoints across various retry attempts of the same task.
611
- With this mode, no checkpoint will be loaded at the start of a task but any checkpoints
612
- created within the task will be loaded when the task is retries execution on failure.
570
+ load : Union[List[str],str,List[Tuple[str,Union[str,None]]]], default: None
571
+ Artifact name/s referencing the models/checkpoints to load. Artifact names refer to the names of the instance variables set to `self`.
572
+ These artifact names give to `load` be reference objects or reference `key` string's from objects created by `current.checkpoint` / `current.model` / `current.huggingface_hub`.
573
+ If a list of tuples is provided, the first element is the artifact name and the second element is the path the artifact needs be unpacked on
574
+ the local filesystem. If the second element is None, the artifact will be unpacked in the current working directory.
575
+ If a string is provided, then the artifact corresponding to that name will be loaded in the current working directory.
613
576
 
614
577
  temp_dir_root : str, default: None
615
- The root directory under which `current.checkpoint.directory` will be created.
578
+ The root directory under which `current.model.loaded` will store loaded models
616
579
  """
617
580
  ...
618
581
 
619
582
  @typing.overload
620
- def checkpoint(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
583
+ def model(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
621
584
  ...
622
585
 
623
586
  @typing.overload
624
- def checkpoint(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
587
+ def model(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
625
588
  ...
626
589
 
627
- def checkpoint(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, load_policy: str = 'fresh', temp_dir_root: str = None):
590
+ def model(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, load: typing.Union[typing.List[str], str, typing.List[typing.Tuple[str, typing.Optional[str]]]] = None, temp_dir_root: str = None):
628
591
  """
629
- Enables checkpointing for a step.
592
+ Enables loading / saving of models within a step.
630
593
 
631
594
  > Examples
632
-
633
- - Saving Checkpoints
634
-
595
+ - Saving Models
635
596
  ```python
636
- @checkpoint
597
+ @model
637
598
  @step
638
599
  def train(self):
639
- model = create_model(self.parameters, checkpoint_path = None)
640
- for i in range(self.epochs):
641
- # some training logic
642
- loss = model.train(self.dataset)
643
- if i % 10 == 0:
644
- model.save(
645
- current.checkpoint.directory,
646
- )
647
- # saves the contents of the `current.checkpoint.directory` as a checkpoint
648
- # and returns a reference dictionary to the checkpoint saved in the datastore
649
- self.latest_checkpoint = current.checkpoint.save(
650
- name="epoch_checkpoint",
651
- metadata={
652
- "epoch": i,
653
- "loss": loss,
654
- }
655
- )
656
- ```
600
+ # current.model.save returns a dictionary reference to the model saved
601
+ self.my_model = current.model.save(
602
+ path_to_my_model,
603
+ label="my_model",
604
+ metadata={
605
+ "epochs": 10,
606
+ "batch-size": 32,
607
+ "learning-rate": 0.001,
608
+ }
609
+ )
610
+ self.next(self.test)
657
611
 
658
- - Using Loaded Checkpoints
612
+ @model(load="my_model")
613
+ @step
614
+ def test(self):
615
+ # `current.model.loaded` returns a dictionary of the loaded models
616
+ # where the key is the name of the artifact and the value is the path to the model
617
+ print(os.listdir(current.model.loaded["my_model"]))
618
+ self.next(self.end)
619
+ ```
659
620
 
621
+ - Loading models
660
622
  ```python
661
- @retry(times=3)
662
- @checkpoint
663
623
  @step
664
624
  def train(self):
665
- # Assume that the task has restarted and the previous attempt of the task
666
- # saved a checkpoint
667
- checkpoint_path = None
668
- if current.checkpoint.is_loaded: # Check if a checkpoint is loaded
669
- print("Loaded checkpoint from the previous attempt")
670
- checkpoint_path = current.checkpoint.directory
671
-
672
- model = create_model(self.parameters, checkpoint_path = checkpoint_path)
673
- for i in range(self.epochs):
674
- ...
625
+ # current.model.load returns the path to the model loaded
626
+ checkpoint_path = current.model.load(
627
+ self.checkpoint_key,
628
+ )
629
+ model_path = current.model.load(
630
+ self.model,
631
+ )
632
+ self.next(self.test)
675
633
  ```
676
634
 
677
635
 
678
636
  Parameters
679
637
  ----------
680
- load_policy : str, default: "fresh"
681
- The policy for loading the checkpoint. The following policies are supported:
682
- - "eager": Loads the the latest available checkpoint within the namespace.
683
- With this mode, the latest checkpoint written by any previous task (can be even a different run) of the step
684
- will be loaded at the start of the task.
685
- - "none": Do not load any checkpoint
686
- - "fresh": Loads the lastest checkpoint created within the running Task.
687
- This mode helps loading checkpoints across various retry attempts of the same task.
688
- With this mode, no checkpoint will be loaded at the start of a task but any checkpoints
689
- created within the task will be loaded when the task is retries execution on failure.
638
+ load : Union[List[str],str,List[Tuple[str,Union[str,None]]]], default: None
639
+ Artifact name/s referencing the models/checkpoints to load. Artifact names refer to the names of the instance variables set to `self`.
640
+ These artifact names give to `load` be reference objects or reference `key` string's from objects created by `current.checkpoint` / `current.model` / `current.huggingface_hub`.
641
+ If a list of tuples is provided, the first element is the artifact name and the second element is the path the artifact needs be unpacked on
642
+ the local filesystem. If the second element is None, the artifact will be unpacked in the current working directory.
643
+ If a string is provided, then the artifact corresponding to that name will be loaded in the current working directory.
690
644
 
691
645
  temp_dir_root : str, default: None
692
- The root directory under which `current.checkpoint.directory` will be created.
693
- """
694
- ...
695
-
696
- @typing.overload
697
- def fast_bakery_internal(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
698
- """
699
- Internal decorator to support Fast bakery
700
- """
701
- ...
702
-
703
- @typing.overload
704
- def fast_bakery_internal(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
705
- ...
706
-
707
- def fast_bakery_internal(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None):
708
- """
709
- Internal decorator to support Fast bakery
646
+ The root directory under which `current.model.loaded` will store loaded models
710
647
  """
711
648
  ...
712
649
 
713
- @typing.overload
714
- def pypi(*, packages: typing.Dict[str, str] = {}, python: typing.Optional[str] = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
650
+ def vllm(*, model: str, backend: str, openai_api_server: bool, debug: bool, card_refresh_interval: int, max_retries: int, retry_alert_frequency: int, engine_args: dict) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
715
651
  """
716
- Specifies the PyPI packages for the step.
652
+ This decorator is used to run vllm APIs as Metaflow task sidecars.
717
653
 
718
- Information in this decorator will augment any
719
- attributes set in the `@pyi_base` flow-level decorator. Hence,
720
- you can use `@pypi_base` to set packages required by all
721
- steps and use `@pypi` to specify step-specific overrides.
654
+ User code call
655
+ --------------
656
+ @vllm(
657
+ model="...",
658
+ ...
659
+ )
660
+
661
+ Valid backend options
662
+ ---------------------
663
+ - 'local': Run as a separate process on the local task machine.
664
+
665
+ Valid model options
666
+ -------------------
667
+ Any HuggingFace model identifier, e.g. 'meta-llama/Llama-3.2-1B'
668
+
669
+ NOTE: vLLM's OpenAI-compatible server serves ONE model per server instance.
670
+ If you need multiple models, you must create multiple @vllm decorators.
722
671
 
723
672
 
724
673
  Parameters
725
674
  ----------
726
- packages : Dict[str, str], default: {}
727
- Packages to use for this step. The key is the name of the package
728
- and the value is the version to use.
729
- python : str, optional, default: None
730
- Version of Python to use, e.g. '3.7.4'. A default value of None implies
731
- that the version used will correspond to the version of the Python interpreter used to start the run.
675
+ model: str
676
+ HuggingFace model identifier to be served by vLLM.
677
+ backend: str
678
+ Determines where and how to run the vLLM process.
679
+ openai_api_server: bool
680
+ Whether to use OpenAI-compatible API server mode (subprocess) instead of native engine.
681
+ Default is False (uses native engine).
682
+ Set to True for backward compatibility with existing code.
683
+ debug: bool
684
+ Whether to turn on verbose debugging logs.
685
+ card_refresh_interval: int
686
+ Interval in seconds for refreshing the vLLM status card.
687
+ Only used when openai_api_server=True.
688
+ max_retries: int
689
+ Maximum number of retries checking for vLLM server startup.
690
+ Only used when openai_api_server=True.
691
+ retry_alert_frequency: int
692
+ Frequency of alert logs for vLLM server startup retries.
693
+ Only used when openai_api_server=True.
694
+ engine_args : dict
695
+ Additional keyword arguments to pass to the vLLM engine.
696
+ For example, `tensor_parallel_size=2`.
732
697
  """
733
698
  ...
734
699
 
735
700
  @typing.overload
736
- def pypi(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
701
+ def app_deploy(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
702
+ """
703
+ Decorator prototype for all step decorators. This function gets specialized
704
+ and imported for all decorators types by _import_plugin_decorators().
705
+ """
737
706
  ...
738
707
 
739
708
  @typing.overload
740
- def pypi(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
709
+ def app_deploy(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
741
710
  ...
742
711
 
743
- def pypi(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, packages: typing.Dict[str, str] = {}, python: typing.Optional[str] = None):
712
+ def app_deploy(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None):
744
713
  """
745
- Specifies the PyPI packages for the step.
746
-
747
- Information in this decorator will augment any
748
- attributes set in the `@pyi_base` flow-level decorator. Hence,
749
- you can use `@pypi_base` to set packages required by all
750
- steps and use `@pypi` to specify step-specific overrides.
751
-
752
-
753
- Parameters
754
- ----------
755
- packages : Dict[str, str], default: {}
756
- Packages to use for this step. The key is the name of the package
757
- and the value is the version to use.
758
- python : str, optional, default: None
759
- Version of Python to use, e.g. '3.7.4'. A default value of None implies
760
- that the version used will correspond to the version of the Python interpreter used to start the run.
714
+ Decorator prototype for all step decorators. This function gets specialized
715
+ and imported for all decorators types by _import_plugin_decorators().
761
716
  """
762
717
  ...
763
718
 
@@ -851,399 +806,440 @@ def kubernetes(*, cpu: int = 1, memory: int = 4096, disk: int = 10240, image: ty
851
806
  ...
852
807
 
853
808
  @typing.overload
854
- def nebius_s3_proxy(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
809
+ def conda(*, packages: typing.Dict[str, str] = {}, libraries: typing.Dict[str, str] = {}, python: typing.Optional[str] = None, disabled: bool = False) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
855
810
  """
856
- Nebius-specific S3 Proxy decorator for routing S3 requests through a local proxy service.
857
- It exists to make it easier for users to know that this decorator should only be used with
858
- a Neo Cloud like Nebius.
811
+ Specifies the Conda environment for the step.
812
+
813
+ Information in this decorator will augment any
814
+ attributes set in the `@conda_base` flow-level decorator. Hence,
815
+ you can use `@conda_base` to set packages required by all
816
+ steps and use `@conda` to specify step-specific overrides.
817
+
818
+
819
+ Parameters
820
+ ----------
821
+ packages : Dict[str, str], default {}
822
+ Packages to use for this step. The key is the name of the package
823
+ and the value is the version to use.
824
+ libraries : Dict[str, str], default {}
825
+ Supported for backward compatibility. When used with packages, packages will take precedence.
826
+ python : str, optional, default None
827
+ Version of Python to use, e.g. '3.7.4'. A default value of None implies
828
+ that the version used will correspond to the version of the Python interpreter used to start the run.
829
+ disabled : bool, default False
830
+ If set to True, disables @conda.
859
831
  """
860
832
  ...
861
833
 
862
834
  @typing.overload
863
- def nebius_s3_proxy(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
835
+ def conda(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
864
836
  ...
865
837
 
866
- def nebius_s3_proxy(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None):
838
+ @typing.overload
839
+ def conda(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
840
+ ...
841
+
842
+ def conda(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, packages: typing.Dict[str, str] = {}, libraries: typing.Dict[str, str] = {}, python: typing.Optional[str] = None, disabled: bool = False):
867
843
  """
868
- Nebius-specific S3 Proxy decorator for routing S3 requests through a local proxy service.
869
- It exists to make it easier for users to know that this decorator should only be used with
870
- a Neo Cloud like Nebius.
844
+ Specifies the Conda environment for the step.
845
+
846
+ Information in this decorator will augment any
847
+ attributes set in the `@conda_base` flow-level decorator. Hence,
848
+ you can use `@conda_base` to set packages required by all
849
+ steps and use `@conda` to specify step-specific overrides.
850
+
851
+
852
+ Parameters
853
+ ----------
854
+ packages : Dict[str, str], default {}
855
+ Packages to use for this step. The key is the name of the package
856
+ and the value is the version to use.
857
+ libraries : Dict[str, str], default {}
858
+ Supported for backward compatibility. When used with packages, packages will take precedence.
859
+ python : str, optional, default None
860
+ Version of Python to use, e.g. '3.7.4'. A default value of None implies
861
+ that the version used will correspond to the version of the Python interpreter used to start the run.
862
+ disabled : bool, default False
863
+ If set to True, disables @conda.
871
864
  """
872
865
  ...
873
866
 
874
867
  @typing.overload
875
- def coreweave_s3_proxy(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
868
+ def pypi(*, packages: typing.Dict[str, str] = {}, python: typing.Optional[str] = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
876
869
  """
877
- CoreWeave-specific S3 Proxy decorator for routing S3 requests through a local proxy service.
878
- It exists to make it easier for users to know that this decorator should only be used with
879
- a Neo Cloud like CoreWeave.
870
+ Specifies the PyPI packages for the step.
871
+
872
+ Information in this decorator will augment any
873
+ attributes set in the `@pyi_base` flow-level decorator. Hence,
874
+ you can use `@pypi_base` to set packages required by all
875
+ steps and use `@pypi` to specify step-specific overrides.
876
+
877
+
878
+ Parameters
879
+ ----------
880
+ packages : Dict[str, str], default: {}
881
+ Packages to use for this step. The key is the name of the package
882
+ and the value is the version to use.
883
+ python : str, optional, default: None
884
+ Version of Python to use, e.g. '3.7.4'. A default value of None implies
885
+ that the version used will correspond to the version of the Python interpreter used to start the run.
880
886
  """
881
887
  ...
882
888
 
883
889
  @typing.overload
884
- def coreweave_s3_proxy(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
890
+ def pypi(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
885
891
  ...
886
892
 
887
- def coreweave_s3_proxy(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None):
888
- """
889
- CoreWeave-specific S3 Proxy decorator for routing S3 requests through a local proxy service.
890
- It exists to make it easier for users to know that this decorator should only be used with
891
- a Neo Cloud like CoreWeave.
892
- """
893
+ @typing.overload
894
+ def pypi(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
893
895
  ...
894
896
 
895
- def huggingface_hub(*, temp_dir_root: typing.Optional[str] = None, load: typing.Union[typing.List[str], typing.List[typing.Tuple[typing.Dict, str]], typing.List[typing.Tuple[str, str]], typing.List[typing.Dict], None]) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
897
+ def pypi(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, packages: typing.Dict[str, str] = {}, python: typing.Optional[str] = None):
896
898
  """
897
- Decorator that helps cache, version and store models/datasets from huggingface hub.
898
-
899
- > Examples
900
-
901
- **Usage: creating references of models from huggingface that may be loaded in downstream steps**
902
- ```python
903
- @huggingface_hub
904
- @step
905
- def pull_model_from_huggingface(self):
906
- # `current.huggingface_hub.snapshot_download` downloads the model from the Hugging Face Hub
907
- # and saves it in the backend storage based on the model's `repo_id`. If there exists a model
908
- # with the same `repo_id` in the backend storage, it will not download the model again. The return
909
- # value of the function is a reference to the model in the backend storage.
910
- # This reference can be used to load the model in the subsequent steps via `@model(load=["llama_model"])`
911
-
912
- self.model_id = "mistralai/Mistral-7B-Instruct-v0.1"
913
- self.llama_model = current.huggingface_hub.snapshot_download(
914
- repo_id=self.model_id,
915
- allow_patterns=["*.safetensors", "*.json", "tokenizer.*"],
916
- )
917
- self.next(self.train)
918
- ```
919
-
920
- **Usage: loading models directly from huggingface hub or from cache (from metaflow's datastore)**
921
- ```python
922
- @huggingface_hub(load=["mistralai/Mistral-7B-Instruct-v0.1"])
923
- @step
924
- def pull_model_from_huggingface(self):
925
- path_to_model = current.huggingface_hub.loaded["mistralai/Mistral-7B-Instruct-v0.1"]
926
- ```
927
-
928
- ```python
929
- @huggingface_hub(load=[("mistralai/Mistral-7B-Instruct-v0.1", "/my-directory"), ("myorg/mistral-lora, "/my-lora-directory")])
930
- @step
931
- def finetune_model(self):
932
- path_to_model = current.huggingface_hub.loaded["mistralai/Mistral-7B-Instruct-v0.1"]
933
- # path_to_model will be /my-directory
934
- ```
899
+ Specifies the PyPI packages for the step.
935
900
 
936
- ```python
937
- # Takes all the arguments passed to `snapshot_download`
938
- # except for `local_dir`
939
- @huggingface_hub(load=[
940
- {
941
- "repo_id": "mistralai/Mistral-7B-Instruct-v0.1",
942
- },
943
- {
944
- "repo_id": "myorg/mistral-lora",
945
- "repo_type": "model",
946
- },
947
- ])
948
- @step
949
- def finetune_model(self):
950
- path_to_model = current.huggingface_hub.loaded["mistralai/Mistral-7B-Instruct-v0.1"]
951
- # path_to_model will be /my-directory
952
- ```
901
+ Information in this decorator will augment any
902
+ attributes set in the `@pyi_base` flow-level decorator. Hence,
903
+ you can use `@pypi_base` to set packages required by all
904
+ steps and use `@pypi` to specify step-specific overrides.
953
905
 
954
906
 
955
907
  Parameters
956
908
  ----------
957
- temp_dir_root : str, optional
958
- The root directory that will hold the temporary directory where objects will be downloaded.
959
-
960
- load: Union[List[str], List[Tuple[Dict, str]], List[Tuple[str, str]], List[Dict], None]
961
- The list of repos (models/datasets) to load.
962
-
963
- Loaded repos can be accessed via `current.huggingface_hub.loaded`. If load is set, then the following happens:
964
-
965
- - If repo (model/dataset) is not found in the datastore:
966
- - Downloads the repo from Hugging Face Hub to a temporary directory (or uses specified path) for local access
967
- - Stores it in Metaflow's datastore (s3/gcs/azure etc.) with a unique name based on repo_type/repo_id
968
- - All HF models loaded for a `@step` will be cached separately under flow/step/namespace.
969
-
970
- - If repo is found in the datastore:
971
- - Loads it directly from datastore to local path (can be temporary directory or specified path)
909
+ packages : Dict[str, str], default: {}
910
+ Packages to use for this step. The key is the name of the package
911
+ and the value is the version to use.
912
+ python : str, optional, default: None
913
+ Version of Python to use, e.g. '3.7.4'. A default value of None implies
914
+ that the version used will correspond to the version of the Python interpreter used to start the run.
972
915
  """
973
916
  ...
974
917
 
975
918
  @typing.overload
976
- def model(*, load: typing.Union[typing.List[str], str, typing.List[typing.Tuple[str, typing.Optional[str]]]] = None, temp_dir_root: str = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
919
+ def parallel(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
977
920
  """
978
- Enables loading / saving of models within a step.
979
-
980
- > Examples
981
- - Saving Models
982
- ```python
983
- @model
984
- @step
985
- def train(self):
986
- # current.model.save returns a dictionary reference to the model saved
987
- self.my_model = current.model.save(
988
- path_to_my_model,
989
- label="my_model",
990
- metadata={
991
- "epochs": 10,
992
- "batch-size": 32,
993
- "learning-rate": 0.001,
994
- }
995
- )
996
- self.next(self.test)
997
-
998
- @model(load="my_model")
999
- @step
1000
- def test(self):
1001
- # `current.model.loaded` returns a dictionary of the loaded models
1002
- # where the key is the name of the artifact and the value is the path to the model
1003
- print(os.listdir(current.model.loaded["my_model"]))
1004
- self.next(self.end)
1005
- ```
1006
-
1007
- - Loading models
1008
- ```python
1009
- @step
1010
- def train(self):
1011
- # current.model.load returns the path to the model loaded
1012
- checkpoint_path = current.model.load(
1013
- self.checkpoint_key,
1014
- )
1015
- model_path = current.model.load(
1016
- self.model,
1017
- )
1018
- self.next(self.test)
1019
- ```
1020
-
1021
-
1022
- Parameters
1023
- ----------
1024
- load : Union[List[str],str,List[Tuple[str,Union[str,None]]]], default: None
1025
- Artifact name/s referencing the models/checkpoints to load. Artifact names refer to the names of the instance variables set to `self`.
1026
- These artifact names give to `load` be reference objects or reference `key` string's from objects created by `current.checkpoint` / `current.model` / `current.huggingface_hub`.
1027
- If a list of tuples is provided, the first element is the artifact name and the second element is the path the artifact needs be unpacked on
1028
- the local filesystem. If the second element is None, the artifact will be unpacked in the current working directory.
1029
- If a string is provided, then the artifact corresponding to that name will be loaded in the current working directory.
1030
-
1031
- temp_dir_root : str, default: None
1032
- The root directory under which `current.model.loaded` will store loaded models
921
+ Decorator prototype for all step decorators. This function gets specialized
922
+ and imported for all decorators types by _import_plugin_decorators().
1033
923
  """
1034
924
  ...
1035
925
 
1036
926
  @typing.overload
1037
- def model(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
927
+ def parallel(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
1038
928
  ...
1039
929
 
1040
- @typing.overload
1041
- def model(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
930
+ def parallel(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None):
931
+ """
932
+ Decorator prototype for all step decorators. This function gets specialized
933
+ and imported for all decorators types by _import_plugin_decorators().
934
+ """
1042
935
  ...
1043
936
 
1044
- def model(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, load: typing.Union[typing.List[str], str, typing.List[typing.Tuple[str, typing.Optional[str]]]] = None, temp_dir_root: str = None):
937
+ @typing.overload
938
+ def checkpoint(*, load_policy: str = 'fresh', temp_dir_root: str = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
1045
939
  """
1046
- Enables loading / saving of models within a step.
940
+ Enables checkpointing for a step.
1047
941
 
1048
942
  > Examples
1049
- - Saving Models
943
+
944
+ - Saving Checkpoints
945
+
1050
946
  ```python
1051
- @model
947
+ @checkpoint
1052
948
  @step
1053
949
  def train(self):
1054
- # current.model.save returns a dictionary reference to the model saved
1055
- self.my_model = current.model.save(
1056
- path_to_my_model,
1057
- label="my_model",
1058
- metadata={
1059
- "epochs": 10,
1060
- "batch-size": 32,
1061
- "learning-rate": 0.001,
1062
- }
1063
- )
1064
- self.next(self.test)
1065
-
1066
- @model(load="my_model")
1067
- @step
1068
- def test(self):
1069
- # `current.model.loaded` returns a dictionary of the loaded models
1070
- # where the key is the name of the artifact and the value is the path to the model
1071
- print(os.listdir(current.model.loaded["my_model"]))
1072
- self.next(self.end)
950
+ model = create_model(self.parameters, checkpoint_path = None)
951
+ for i in range(self.epochs):
952
+ # some training logic
953
+ loss = model.train(self.dataset)
954
+ if i % 10 == 0:
955
+ model.save(
956
+ current.checkpoint.directory,
957
+ )
958
+ # saves the contents of the `current.checkpoint.directory` as a checkpoint
959
+ # and returns a reference dictionary to the checkpoint saved in the datastore
960
+ self.latest_checkpoint = current.checkpoint.save(
961
+ name="epoch_checkpoint",
962
+ metadata={
963
+ "epoch": i,
964
+ "loss": loss,
965
+ }
966
+ )
1073
967
  ```
1074
968
 
1075
- - Loading models
969
+ - Using Loaded Checkpoints
970
+
1076
971
  ```python
972
+ @retry(times=3)
973
+ @checkpoint
1077
974
  @step
1078
975
  def train(self):
1079
- # current.model.load returns the path to the model loaded
1080
- checkpoint_path = current.model.load(
1081
- self.checkpoint_key,
1082
- )
1083
- model_path = current.model.load(
1084
- self.model,
1085
- )
1086
- self.next(self.test)
976
+ # Assume that the task has restarted and the previous attempt of the task
977
+ # saved a checkpoint
978
+ checkpoint_path = None
979
+ if current.checkpoint.is_loaded: # Check if a checkpoint is loaded
980
+ print("Loaded checkpoint from the previous attempt")
981
+ checkpoint_path = current.checkpoint.directory
982
+
983
+ model = create_model(self.parameters, checkpoint_path = checkpoint_path)
984
+ for i in range(self.epochs):
985
+ ...
1087
986
  ```
1088
987
 
1089
988
 
1090
989
  Parameters
1091
990
  ----------
1092
- load : Union[List[str],str,List[Tuple[str,Union[str,None]]]], default: None
1093
- Artifact name/s referencing the models/checkpoints to load. Artifact names refer to the names of the instance variables set to `self`.
1094
- These artifact names give to `load` be reference objects or reference `key` string's from objects created by `current.checkpoint` / `current.model` / `current.huggingface_hub`.
1095
- If a list of tuples is provided, the first element is the artifact name and the second element is the path the artifact needs be unpacked on
1096
- the local filesystem. If the second element is None, the artifact will be unpacked in the current working directory.
1097
- If a string is provided, then the artifact corresponding to that name will be loaded in the current working directory.
991
+ load_policy : str, default: "fresh"
992
+ The policy for loading the checkpoint. The following policies are supported:
993
+ - "eager": Loads the the latest available checkpoint within the namespace.
994
+ With this mode, the latest checkpoint written by any previous task (can be even a different run) of the step
995
+ will be loaded at the start of the task.
996
+ - "none": Do not load any checkpoint
997
+ - "fresh": Loads the lastest checkpoint created within the running Task.
998
+ This mode helps loading checkpoints across various retry attempts of the same task.
999
+ With this mode, no checkpoint will be loaded at the start of a task but any checkpoints
1000
+ created within the task will be loaded when the task is retries execution on failure.
1098
1001
 
1099
1002
  temp_dir_root : str, default: None
1100
- The root directory under which `current.model.loaded` will store loaded models
1003
+ The root directory under which `current.checkpoint.directory` will be created.
1101
1004
  """
1102
1005
  ...
1103
1006
 
1104
1007
  @typing.overload
1105
- def parallel(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
1106
- """
1107
- Decorator prototype for all step decorators. This function gets specialized
1108
- and imported for all decorators types by _import_plugin_decorators().
1109
- """
1008
+ def checkpoint(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
1110
1009
  ...
1111
1010
 
1112
1011
  @typing.overload
1113
- def parallel(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
1012
+ def checkpoint(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
1114
1013
  ...
1115
1014
 
1116
- def parallel(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None):
1015
+ def checkpoint(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, load_policy: str = 'fresh', temp_dir_root: str = None):
1117
1016
  """
1118
- Decorator prototype for all step decorators. This function gets specialized
1119
- and imported for all decorators types by _import_plugin_decorators().
1017
+ Enables checkpointing for a step.
1018
+
1019
+ > Examples
1020
+
1021
+ - Saving Checkpoints
1022
+
1023
+ ```python
1024
+ @checkpoint
1025
+ @step
1026
+ def train(self):
1027
+ model = create_model(self.parameters, checkpoint_path = None)
1028
+ for i in range(self.epochs):
1029
+ # some training logic
1030
+ loss = model.train(self.dataset)
1031
+ if i % 10 == 0:
1032
+ model.save(
1033
+ current.checkpoint.directory,
1034
+ )
1035
+ # saves the contents of the `current.checkpoint.directory` as a checkpoint
1036
+ # and returns a reference dictionary to the checkpoint saved in the datastore
1037
+ self.latest_checkpoint = current.checkpoint.save(
1038
+ name="epoch_checkpoint",
1039
+ metadata={
1040
+ "epoch": i,
1041
+ "loss": loss,
1042
+ }
1043
+ )
1044
+ ```
1045
+
1046
+ - Using Loaded Checkpoints
1047
+
1048
+ ```python
1049
+ @retry(times=3)
1050
+ @checkpoint
1051
+ @step
1052
+ def train(self):
1053
+ # Assume that the task has restarted and the previous attempt of the task
1054
+ # saved a checkpoint
1055
+ checkpoint_path = None
1056
+ if current.checkpoint.is_loaded: # Check if a checkpoint is loaded
1057
+ print("Loaded checkpoint from the previous attempt")
1058
+ checkpoint_path = current.checkpoint.directory
1059
+
1060
+ model = create_model(self.parameters, checkpoint_path = checkpoint_path)
1061
+ for i in range(self.epochs):
1062
+ ...
1063
+ ```
1064
+
1065
+
1066
+ Parameters
1067
+ ----------
1068
+ load_policy : str, default: "fresh"
1069
+ The policy for loading the checkpoint. The following policies are supported:
1070
+ - "eager": Loads the the latest available checkpoint within the namespace.
1071
+ With this mode, the latest checkpoint written by any previous task (can be even a different run) of the step
1072
+ will be loaded at the start of the task.
1073
+ - "none": Do not load any checkpoint
1074
+ - "fresh": Loads the lastest checkpoint created within the running Task.
1075
+ This mode helps loading checkpoints across various retry attempts of the same task.
1076
+ With this mode, no checkpoint will be loaded at the start of a task but any checkpoints
1077
+ created within the task will be loaded when the task is retries execution on failure.
1078
+
1079
+ temp_dir_root : str, default: None
1080
+ The root directory under which `current.checkpoint.directory` will be created.
1120
1081
  """
1121
1082
  ...
1122
1083
 
1123
- @typing.overload
1124
- def timeout(*, seconds: int = 0, minutes: int = 0, hours: int = 0) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
1084
+ def ollama(*, models: list, backend: str, force_pull: bool, cache_update_policy: str, force_cache_update: bool, debug: bool, circuit_breaker_config: dict, timeout_config: dict) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
1125
1085
  """
1126
- Specifies a timeout for your step.
1086
+ This decorator is used to run Ollama APIs as Metaflow task sidecars.
1127
1087
 
1128
- This decorator is useful if this step may hang indefinitely.
1088
+ User code call
1089
+ --------------
1090
+ @ollama(
1091
+ models=[...],
1092
+ ...
1093
+ )
1129
1094
 
1130
- This can be used in conjunction with the `@retry` decorator as well as the `@catch` decorator.
1131
- A timeout is considered to be an exception thrown by the step. It will cause the step to be
1132
- retried if needed and the exception will be caught by the `@catch` decorator, if present.
1095
+ Valid backend options
1096
+ ---------------------
1097
+ - 'local': Run as a separate process on the local task machine.
1098
+ - (TODO) 'managed': Outerbounds hosts and selects compute provider.
1099
+ - (TODO) 'remote': Spin up separate instance to serve Ollama models.
1133
1100
 
1134
- Note that all the values specified in parameters are added together so if you specify
1135
- 60 seconds and 1 hour, the decorator will have an effective timeout of 1 hour and 1 minute.
1101
+ Valid model options
1102
+ -------------------
1103
+ Any model here https://ollama.com/search, e.g. 'llama3.2', 'llama3.3'
1136
1104
 
1137
1105
 
1138
1106
  Parameters
1139
1107
  ----------
1140
- seconds : int, default 0
1141
- Number of seconds to wait prior to timing out.
1142
- minutes : int, default 0
1143
- Number of minutes to wait prior to timing out.
1144
- hours : int, default 0
1145
- Number of hours to wait prior to timing out.
1108
+ models: list[str]
1109
+ List of Ollama containers running models in sidecars.
1110
+ backend: str
1111
+ Determines where and how to run the Ollama process.
1112
+ force_pull: bool
1113
+ Whether to run `ollama pull` no matter what, or first check the remote cache in Metaflow datastore for this model key.
1114
+ cache_update_policy: str
1115
+ Cache update policy: "auto", "force", or "never".
1116
+ force_cache_update: bool
1117
+ Simple override for "force" cache update policy.
1118
+ debug: bool
1119
+ Whether to turn on verbose debugging logs.
1120
+ circuit_breaker_config: dict
1121
+ Configuration for circuit breaker protection. Keys: failure_threshold, recovery_timeout, reset_timeout.
1122
+ timeout_config: dict
1123
+ Configuration for various operation timeouts. Keys: pull, stop, health_check, install, server_startup.
1146
1124
  """
1147
1125
  ...
1148
1126
 
1149
1127
  @typing.overload
1150
- def timeout(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
1128
+ def test_append_card(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
1129
+ """
1130
+ A simple decorator that demonstrates using CardDecoratorInjector
1131
+ to inject a card and render simple markdown content.
1132
+ """
1151
1133
  ...
1152
1134
 
1153
1135
  @typing.overload
1154
- def timeout(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
1136
+ def test_append_card(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
1155
1137
  ...
1156
1138
 
1157
- def timeout(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, seconds: int = 0, minutes: int = 0, hours: int = 0):
1139
+ def test_append_card(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None):
1158
1140
  """
1159
- Specifies a timeout for your step.
1160
-
1161
- This decorator is useful if this step may hang indefinitely.
1162
-
1163
- This can be used in conjunction with the `@retry` decorator as well as the `@catch` decorator.
1164
- A timeout is considered to be an exception thrown by the step. It will cause the step to be
1165
- retried if needed and the exception will be caught by the `@catch` decorator, if present.
1166
-
1167
- Note that all the values specified in parameters are added together so if you specify
1168
- 60 seconds and 1 hour, the decorator will have an effective timeout of 1 hour and 1 minute.
1169
-
1170
-
1171
- Parameters
1172
- ----------
1173
- seconds : int, default 0
1174
- Number of seconds to wait prior to timing out.
1175
- minutes : int, default 0
1176
- Number of minutes to wait prior to timing out.
1177
- hours : int, default 0
1178
- Number of hours to wait prior to timing out.
1141
+ A simple decorator that demonstrates using CardDecoratorInjector
1142
+ to inject a card and render simple markdown content.
1179
1143
  """
1180
1144
  ...
1181
1145
 
1182
1146
  @typing.overload
1183
- def card(*, type: str = 'default', id: typing.Optional[str] = None, options: typing.Dict[str, typing.Any] = {}, timeout: int = 45) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
1147
+ def nebius_s3_proxy(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
1184
1148
  """
1185
- Creates a human-readable report, a Metaflow Card, after this step completes.
1186
-
1187
- Note that you may add multiple `@card` decorators in a step with different parameters.
1188
-
1189
-
1190
- Parameters
1191
- ----------
1192
- type : str, default 'default'
1193
- Card type.
1194
- id : str, optional, default None
1195
- If multiple cards are present, use this id to identify this card.
1196
- options : Dict[str, Any], default {}
1197
- Options passed to the card. The contents depend on the card type.
1198
- timeout : int, default 45
1199
- Interrupt reporting if it takes more than this many seconds.
1149
+ Nebius-specific S3 Proxy decorator for routing S3 requests through a local proxy service.
1150
+ It exists to make it easier for users to know that this decorator should only be used with
1151
+ a Neo Cloud like Nebius.
1200
1152
  """
1201
1153
  ...
1202
1154
 
1203
1155
  @typing.overload
1204
- def card(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
1156
+ def nebius_s3_proxy(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
1205
1157
  ...
1206
1158
 
1207
- @typing.overload
1208
- def card(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
1159
+ def nebius_s3_proxy(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None):
1160
+ """
1161
+ Nebius-specific S3 Proxy decorator for routing S3 requests through a local proxy service.
1162
+ It exists to make it easier for users to know that this decorator should only be used with
1163
+ a Neo Cloud like Nebius.
1164
+ """
1209
1165
  ...
1210
1166
 
1211
- def card(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, type: str = 'default', id: typing.Optional[str] = None, options: typing.Dict[str, typing.Any] = {}, timeout: int = 45):
1167
+ @typing.overload
1168
+ def resources(*, cpu: int = 1, gpu: typing.Optional[int] = None, disk: typing.Optional[int] = None, memory: int = 4096, shared_memory: typing.Optional[int] = None) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
1212
1169
  """
1213
- Creates a human-readable report, a Metaflow Card, after this step completes.
1170
+ Specifies the resources needed when executing this step.
1214
1171
 
1215
- Note that you may add multiple `@card` decorators in a step with different parameters.
1172
+ Use `@resources` to specify the resource requirements
1173
+ independently of the specific compute layer (`@batch`, `@kubernetes`).
1174
+
1175
+ You can choose the compute layer on the command line by executing e.g.
1176
+ ```
1177
+ python myflow.py run --with batch
1178
+ ```
1179
+ or
1180
+ ```
1181
+ python myflow.py run --with kubernetes
1182
+ ```
1183
+ which executes the flow on the desired system using the
1184
+ requirements specified in `@resources`.
1216
1185
 
1217
1186
 
1218
1187
  Parameters
1219
1188
  ----------
1220
- type : str, default 'default'
1221
- Card type.
1222
- id : str, optional, default None
1223
- If multiple cards are present, use this id to identify this card.
1224
- options : Dict[str, Any], default {}
1225
- Options passed to the card. The contents depend on the card type.
1226
- timeout : int, default 45
1227
- Interrupt reporting if it takes more than this many seconds.
1189
+ cpu : int, default 1
1190
+ Number of CPUs required for this step.
1191
+ gpu : int, optional, default None
1192
+ Number of GPUs required for this step.
1193
+ disk : int, optional, default None
1194
+ Disk size (in MB) required for this step. Only applies on Kubernetes.
1195
+ memory : int, default 4096
1196
+ Memory size (in MB) required for this step.
1197
+ shared_memory : int, optional, default None
1198
+ The value for the size (in MiB) of the /dev/shm volume for this step.
1199
+ This parameter maps to the `--shm-size` option in Docker.
1228
1200
  """
1229
1201
  ...
1230
1202
 
1231
1203
  @typing.overload
1232
- def app_deploy(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
1233
- """
1234
- Decorator prototype for all step decorators. This function gets specialized
1235
- and imported for all decorators types by _import_plugin_decorators().
1236
- """
1204
+ def resources(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
1237
1205
  ...
1238
1206
 
1239
1207
  @typing.overload
1240
- def app_deploy(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
1208
+ def resources(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
1241
1209
  ...
1242
1210
 
1243
- def app_deploy(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None):
1211
+ def resources(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, cpu: int = 1, gpu: typing.Optional[int] = None, disk: typing.Optional[int] = None, memory: int = 4096, shared_memory: typing.Optional[int] = None):
1244
1212
  """
1245
- Decorator prototype for all step decorators. This function gets specialized
1246
- and imported for all decorators types by _import_plugin_decorators().
1213
+ Specifies the resources needed when executing this step.
1214
+
1215
+ Use `@resources` to specify the resource requirements
1216
+ independently of the specific compute layer (`@batch`, `@kubernetes`).
1217
+
1218
+ You can choose the compute layer on the command line by executing e.g.
1219
+ ```
1220
+ python myflow.py run --with batch
1221
+ ```
1222
+ or
1223
+ ```
1224
+ python myflow.py run --with kubernetes
1225
+ ```
1226
+ which executes the flow on the desired system using the
1227
+ requirements specified in `@resources`.
1228
+
1229
+
1230
+ Parameters
1231
+ ----------
1232
+ cpu : int, default 1
1233
+ Number of CPUs required for this step.
1234
+ gpu : int, optional, default None
1235
+ Number of GPUs required for this step.
1236
+ disk : int, optional, default None
1237
+ Disk size (in MB) required for this step. Only applies on Kubernetes.
1238
+ memory : int, default 4096
1239
+ Memory size (in MB) required for this step.
1240
+ shared_memory : int, optional, default None
1241
+ The value for the size (in MiB) of the /dev/shm volume for this step.
1242
+ This parameter maps to the `--shm-size` option in Docker.
1247
1243
  """
1248
1244
  ...
1249
1245
 
@@ -1298,53 +1294,150 @@ def catch(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], ty
1298
1294
  """
1299
1295
  ...
1300
1296
 
1301
- def vllm(*, model: str, backend: str, openai_api_server: bool, debug: bool, card_refresh_interval: int, max_retries: int, retry_alert_frequency: int, engine_args: dict) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
1297
+ @typing.overload
1298
+ def retry(*, times: int = 3, minutes_between_retries: int = 2) -> typing.Callable[[typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]], typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]]]:
1302
1299
  """
1303
- This decorator is used to run vllm APIs as Metaflow task sidecars.
1300
+ Specifies the number of times the task corresponding
1301
+ to a step needs to be retried.
1304
1302
 
1305
- User code call
1306
- --------------
1307
- @vllm(
1308
- model="...",
1309
- ...
1310
- )
1303
+ This decorator is useful for handling transient errors, such as networking issues.
1304
+ If your task contains operations that can't be retried safely, e.g. database updates,
1305
+ it is advisable to annotate it with `@retry(times=0)`.
1311
1306
 
1312
- Valid backend options
1313
- ---------------------
1314
- - 'local': Run as a separate process on the local task machine.
1307
+ This can be used in conjunction with the `@catch` decorator. The `@catch`
1308
+ decorator will execute a no-op task after all retries have been exhausted,
1309
+ ensuring that the flow execution can continue.
1315
1310
 
1316
- Valid model options
1317
- -------------------
1318
- Any HuggingFace model identifier, e.g. 'meta-llama/Llama-3.2-1B'
1319
1311
 
1320
- NOTE: vLLM's OpenAI-compatible server serves ONE model per server instance.
1321
- If you need multiple models, you must create multiple @vllm decorators.
1312
+ Parameters
1313
+ ----------
1314
+ times : int, default 3
1315
+ Number of times to retry this task.
1316
+ minutes_between_retries : int, default 2
1317
+ Number of minutes between retries.
1318
+ """
1319
+ ...
1320
+
1321
+ @typing.overload
1322
+ def retry(f: typing.Callable[[FlowSpecDerived, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, StepFlag], None]:
1323
+ ...
1324
+
1325
+ @typing.overload
1326
+ def retry(f: typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]) -> typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None]:
1327
+ ...
1328
+
1329
+ def retry(f: typing.Union[typing.Callable[[FlowSpecDerived, StepFlag], None], typing.Callable[[FlowSpecDerived, typing.Any, StepFlag], None], None] = None, *, times: int = 3, minutes_between_retries: int = 2):
1330
+ """
1331
+ Specifies the number of times the task corresponding
1332
+ to a step needs to be retried.
1333
+
1334
+ This decorator is useful for handling transient errors, such as networking issues.
1335
+ If your task contains operations that can't be retried safely, e.g. database updates,
1336
+ it is advisable to annotate it with `@retry(times=0)`.
1337
+
1338
+ This can be used in conjunction with the `@catch` decorator. The `@catch`
1339
+ decorator will execute a no-op task after all retries have been exhausted,
1340
+ ensuring that the flow execution can continue.
1322
1341
 
1323
1342
 
1324
1343
  Parameters
1325
1344
  ----------
1326
- model: str
1327
- HuggingFace model identifier to be served by vLLM.
1328
- backend: str
1329
- Determines where and how to run the vLLM process.
1330
- openai_api_server: bool
1331
- Whether to use OpenAI-compatible API server mode (subprocess) instead of native engine.
1332
- Default is False (uses native engine).
1333
- Set to True for backward compatibility with existing code.
1334
- debug: bool
1335
- Whether to turn on verbose debugging logs.
1336
- card_refresh_interval: int
1337
- Interval in seconds for refreshing the vLLM status card.
1338
- Only used when openai_api_server=True.
1339
- max_retries: int
1340
- Maximum number of retries checking for vLLM server startup.
1341
- Only used when openai_api_server=True.
1342
- retry_alert_frequency: int
1343
- Frequency of alert logs for vLLM server startup retries.
1344
- Only used when openai_api_server=True.
1345
- engine_args : dict
1346
- Additional keyword arguments to pass to the vLLM engine.
1347
- For example, `tensor_parallel_size=2`.
1345
+ times : int, default 3
1346
+ Number of times to retry this task.
1347
+ minutes_between_retries : int, default 2
1348
+ Number of minutes between retries.
1349
+ """
1350
+ ...
1351
+
1352
+ @typing.overload
1353
+ def pypi_base(*, packages: typing.Dict[str, str] = {}, python: typing.Optional[str] = None) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
1354
+ """
1355
+ Specifies the PyPI packages for all steps of the flow.
1356
+
1357
+ Use `@pypi_base` to set common packages required by all
1358
+ steps and use `@pypi` to specify step-specific overrides.
1359
+
1360
+ Parameters
1361
+ ----------
1362
+ packages : Dict[str, str], default: {}
1363
+ Packages to use for this flow. The key is the name of the package
1364
+ and the value is the version to use.
1365
+ python : str, optional, default: None
1366
+ Version of Python to use, e.g. '3.7.4'. A default value of None implies
1367
+ that the version used will correspond to the version of the Python interpreter used to start the run.
1368
+ """
1369
+ ...
1370
+
1371
+ @typing.overload
1372
+ def pypi_base(f: typing.Type[FlowSpecDerived]) -> typing.Type[FlowSpecDerived]:
1373
+ ...
1374
+
1375
+ def pypi_base(f: typing.Optional[typing.Type[FlowSpecDerived]] = None, *, packages: typing.Dict[str, str] = {}, python: typing.Optional[str] = None):
1376
+ """
1377
+ Specifies the PyPI packages for all steps of the flow.
1378
+
1379
+ Use `@pypi_base` to set common packages required by all
1380
+ steps and use `@pypi` to specify step-specific overrides.
1381
+
1382
+ Parameters
1383
+ ----------
1384
+ packages : Dict[str, str], default: {}
1385
+ Packages to use for this flow. The key is the name of the package
1386
+ and the value is the version to use.
1387
+ python : str, optional, default: None
1388
+ Version of Python to use, e.g. '3.7.4'. A default value of None implies
1389
+ that the version used will correspond to the version of the Python interpreter used to start the run.
1390
+ """
1391
+ ...
1392
+
1393
+ @typing.overload
1394
+ def conda_base(*, packages: typing.Dict[str, str] = {}, libraries: typing.Dict[str, str] = {}, python: typing.Optional[str] = None, disabled: bool = False) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
1395
+ """
1396
+ Specifies the Conda environment for all steps of the flow.
1397
+
1398
+ Use `@conda_base` to set common libraries required by all
1399
+ steps and use `@conda` to specify step-specific additions.
1400
+
1401
+
1402
+ Parameters
1403
+ ----------
1404
+ packages : Dict[str, str], default {}
1405
+ Packages to use for this flow. The key is the name of the package
1406
+ and the value is the version to use.
1407
+ libraries : Dict[str, str], default {}
1408
+ Supported for backward compatibility. When used with packages, packages will take precedence.
1409
+ python : str, optional, default None
1410
+ Version of Python to use, e.g. '3.7.4'. A default value of None implies
1411
+ that the version used will correspond to the version of the Python interpreter used to start the run.
1412
+ disabled : bool, default False
1413
+ If set to True, disables Conda.
1414
+ """
1415
+ ...
1416
+
1417
+ @typing.overload
1418
+ def conda_base(f: typing.Type[FlowSpecDerived]) -> typing.Type[FlowSpecDerived]:
1419
+ ...
1420
+
1421
+ def conda_base(f: typing.Optional[typing.Type[FlowSpecDerived]] = None, *, packages: typing.Dict[str, str] = {}, libraries: typing.Dict[str, str] = {}, python: typing.Optional[str] = None, disabled: bool = False):
1422
+ """
1423
+ Specifies the Conda environment for all steps of the flow.
1424
+
1425
+ Use `@conda_base` to set common libraries required by all
1426
+ steps and use `@conda` to specify step-specific additions.
1427
+
1428
+
1429
+ Parameters
1430
+ ----------
1431
+ packages : Dict[str, str], default {}
1432
+ Packages to use for this flow. The key is the name of the package
1433
+ and the value is the version to use.
1434
+ libraries : Dict[str, str], default {}
1435
+ Supported for backward compatibility. When used with packages, packages will take precedence.
1436
+ python : str, optional, default None
1437
+ Version of Python to use, e.g. '3.7.4'. A default value of None implies
1438
+ that the version used will correspond to the version of the Python interpreter used to start the run.
1439
+ disabled : bool, default False
1440
+ If set to True, disables Conda.
1348
1441
  """
1349
1442
  ...
1350
1443
 
@@ -1420,75 +1513,24 @@ def trigger(f: typing.Optional[typing.Type[FlowSpecDerived]] = None, *, event: t
1420
1513
  {'name':'bar', 'parameters':{'flow_param_2': 'event_field_2'}])
1421
1514
  ```
1422
1515
 
1423
- 'parameters' can also be a list of strings and tuples like so:
1424
- ```
1425
- @trigger(event={'name':'foo', 'parameters':['common_name', ('flow_param', 'event_field')]})
1426
- ```
1427
- This is equivalent to:
1428
- ```
1429
- @trigger(event={'name':'foo', 'parameters':{'common_name': 'common_name', 'flow_param': 'event_field'}})
1430
- ```
1431
-
1432
-
1433
- Parameters
1434
- ----------
1435
- event : Union[str, Dict[str, Any]], optional, default None
1436
- Event dependency for this flow.
1437
- events : List[Union[str, Dict[str, Any]]], default []
1438
- Events dependency for this flow.
1439
- options : Dict[str, Any], default {}
1440
- Backend-specific configuration for tuning eventing behavior.
1441
- """
1442
- ...
1443
-
1444
- @typing.overload
1445
- def schedule(*, hourly: bool = False, daily: bool = True, weekly: bool = False, cron: typing.Optional[str] = None, timezone: typing.Optional[str] = None) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
1446
- """
1447
- Specifies the times when the flow should be run when running on a
1448
- production scheduler.
1449
-
1450
-
1451
- Parameters
1452
- ----------
1453
- hourly : bool, default False
1454
- Run the workflow hourly.
1455
- daily : bool, default True
1456
- Run the workflow daily.
1457
- weekly : bool, default False
1458
- Run the workflow weekly.
1459
- cron : str, optional, default None
1460
- Run the workflow at [a custom Cron schedule](https://docs.aws.amazon.com/eventbridge/latest/userguide/scheduled-events.html#cron-expressions)
1461
- specified by this expression.
1462
- timezone : str, optional, default None
1463
- Timezone on which the schedule runs (default: None). Currently supported only for Argo workflows,
1464
- which accepts timezones in [IANA format](https://nodatime.org/TimeZones).
1465
- """
1466
- ...
1467
-
1468
- @typing.overload
1469
- def schedule(f: typing.Type[FlowSpecDerived]) -> typing.Type[FlowSpecDerived]:
1470
- ...
1471
-
1472
- def schedule(f: typing.Optional[typing.Type[FlowSpecDerived]] = None, *, hourly: bool = False, daily: bool = True, weekly: bool = False, cron: typing.Optional[str] = None, timezone: typing.Optional[str] = None):
1473
- """
1474
- Specifies the times when the flow should be run when running on a
1475
- production scheduler.
1476
-
1516
+ 'parameters' can also be a list of strings and tuples like so:
1517
+ ```
1518
+ @trigger(event={'name':'foo', 'parameters':['common_name', ('flow_param', 'event_field')]})
1519
+ ```
1520
+ This is equivalent to:
1521
+ ```
1522
+ @trigger(event={'name':'foo', 'parameters':{'common_name': 'common_name', 'flow_param': 'event_field'}})
1523
+ ```
1524
+
1477
1525
 
1478
1526
  Parameters
1479
1527
  ----------
1480
- hourly : bool, default False
1481
- Run the workflow hourly.
1482
- daily : bool, default True
1483
- Run the workflow daily.
1484
- weekly : bool, default False
1485
- Run the workflow weekly.
1486
- cron : str, optional, default None
1487
- Run the workflow at [a custom Cron schedule](https://docs.aws.amazon.com/eventbridge/latest/userguide/scheduled-events.html#cron-expressions)
1488
- specified by this expression.
1489
- timezone : str, optional, default None
1490
- Timezone on which the schedule runs (default: None). Currently supported only for Argo workflows,
1491
- which accepts timezones in [IANA format](https://nodatime.org/TimeZones).
1528
+ event : Union[str, Dict[str, Any]], optional, default None
1529
+ Event dependency for this flow.
1530
+ events : List[Union[str, Dict[str, Any]]], default []
1531
+ Events dependency for this flow.
1532
+ options : Dict[str, Any], default {}
1533
+ Backend-specific configuration for tuning eventing behavior.
1492
1534
  """
1493
1535
  ...
1494
1536
 
@@ -1593,6 +1635,100 @@ def trigger_on_finish(f: typing.Optional[typing.Type[FlowSpecDerived]] = None, *
1593
1635
  """
1594
1636
  ...
1595
1637
 
1638
+ def airflow_external_task_sensor(*, timeout: int, poke_interval: int, mode: str, exponential_backoff: bool, pool: str, soft_fail: bool, name: str, description: str, external_dag_id: str, external_task_ids: typing.List[str], allowed_states: typing.List[str], failed_states: typing.List[str], execution_delta: "datetime.timedelta", check_existence: bool) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
1639
+ """
1640
+ The `@airflow_external_task_sensor` decorator attaches a Airflow [ExternalTaskSensor](https://airflow.apache.org/docs/apache-airflow/stable/_api/airflow/sensors/external_task/index.html#airflow.sensors.external_task.ExternalTaskSensor) before the start step of the flow.
1641
+ This decorator only works when a flow is scheduled on Airflow and is compiled using `airflow create`. More than one `@airflow_external_task_sensor` can be added as a flow decorators. Adding more than one decorator will ensure that `start` step starts only after all sensors finish.
1642
+
1643
+
1644
+ Parameters
1645
+ ----------
1646
+ timeout : int
1647
+ Time, in seconds before the task times out and fails. (Default: 3600)
1648
+ poke_interval : int
1649
+ Time in seconds that the job should wait in between each try. (Default: 60)
1650
+ mode : str
1651
+ How the sensor operates. Options are: { poke | reschedule }. (Default: "poke")
1652
+ exponential_backoff : bool
1653
+ allow progressive longer waits between pokes by using exponential backoff algorithm. (Default: True)
1654
+ pool : str
1655
+ the slot pool this task should run in,
1656
+ slot pools are a way to limit concurrency for certain tasks. (Default:None)
1657
+ soft_fail : bool
1658
+ Set to true to mark the task as SKIPPED on failure. (Default: False)
1659
+ name : str
1660
+ Name of the sensor on Airflow
1661
+ description : str
1662
+ Description of sensor in the Airflow UI
1663
+ external_dag_id : str
1664
+ The dag_id that contains the task you want to wait for.
1665
+ external_task_ids : List[str]
1666
+ The list of task_ids that you want to wait for.
1667
+ If None (default value) the sensor waits for the DAG. (Default: None)
1668
+ allowed_states : List[str]
1669
+ Iterable of allowed states, (Default: ['success'])
1670
+ failed_states : List[str]
1671
+ Iterable of failed or dis-allowed states. (Default: None)
1672
+ execution_delta : datetime.timedelta
1673
+ time difference with the previous execution to look at,
1674
+ the default is the same logical date as the current task or DAG. (Default: None)
1675
+ check_existence: bool
1676
+ Set to True to check if the external task exists or check if
1677
+ the DAG to wait for exists. (Default: True)
1678
+ """
1679
+ ...
1680
+
1681
+ @typing.overload
1682
+ def schedule(*, hourly: bool = False, daily: bool = True, weekly: bool = False, cron: typing.Optional[str] = None, timezone: typing.Optional[str] = None) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
1683
+ """
1684
+ Specifies the times when the flow should be run when running on a
1685
+ production scheduler.
1686
+
1687
+
1688
+ Parameters
1689
+ ----------
1690
+ hourly : bool, default False
1691
+ Run the workflow hourly.
1692
+ daily : bool, default True
1693
+ Run the workflow daily.
1694
+ weekly : bool, default False
1695
+ Run the workflow weekly.
1696
+ cron : str, optional, default None
1697
+ Run the workflow at [a custom Cron schedule](https://docs.aws.amazon.com/eventbridge/latest/userguide/scheduled-events.html#cron-expressions)
1698
+ specified by this expression.
1699
+ timezone : str, optional, default None
1700
+ Timezone on which the schedule runs (default: None). Currently supported only for Argo workflows,
1701
+ which accepts timezones in [IANA format](https://nodatime.org/TimeZones).
1702
+ """
1703
+ ...
1704
+
1705
+ @typing.overload
1706
+ def schedule(f: typing.Type[FlowSpecDerived]) -> typing.Type[FlowSpecDerived]:
1707
+ ...
1708
+
1709
+ def schedule(f: typing.Optional[typing.Type[FlowSpecDerived]] = None, *, hourly: bool = False, daily: bool = True, weekly: bool = False, cron: typing.Optional[str] = None, timezone: typing.Optional[str] = None):
1710
+ """
1711
+ Specifies the times when the flow should be run when running on a
1712
+ production scheduler.
1713
+
1714
+
1715
+ Parameters
1716
+ ----------
1717
+ hourly : bool, default False
1718
+ Run the workflow hourly.
1719
+ daily : bool, default True
1720
+ Run the workflow daily.
1721
+ weekly : bool, default False
1722
+ Run the workflow weekly.
1723
+ cron : str, optional, default None
1724
+ Run the workflow at [a custom Cron schedule](https://docs.aws.amazon.com/eventbridge/latest/userguide/scheduled-events.html#cron-expressions)
1725
+ specified by this expression.
1726
+ timezone : str, optional, default None
1727
+ Timezone on which the schedule runs (default: None). Currently supported only for Argo workflows,
1728
+ which accepts timezones in [IANA format](https://nodatime.org/TimeZones).
1729
+ """
1730
+ ...
1731
+
1596
1732
  def with_artifact_store(f: typing.Optional[typing.Type[FlowSpecDerived]] = None):
1597
1733
  """
1598
1734
  Allows setting external datastores to save data for the
@@ -1707,47 +1843,6 @@ def with_artifact_store(f: typing.Optional[typing.Type[FlowSpecDerived]] = None)
1707
1843
  """
1708
1844
  ...
1709
1845
 
1710
- @typing.overload
1711
- def pypi_base(*, packages: typing.Dict[str, str] = {}, python: typing.Optional[str] = None) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
1712
- """
1713
- Specifies the PyPI packages for all steps of the flow.
1714
-
1715
- Use `@pypi_base` to set common packages required by all
1716
- steps and use `@pypi` to specify step-specific overrides.
1717
-
1718
- Parameters
1719
- ----------
1720
- packages : Dict[str, str], default: {}
1721
- Packages to use for this flow. The key is the name of the package
1722
- and the value is the version to use.
1723
- python : str, optional, default: None
1724
- Version of Python to use, e.g. '3.7.4'. A default value of None implies
1725
- that the version used will correspond to the version of the Python interpreter used to start the run.
1726
- """
1727
- ...
1728
-
1729
- @typing.overload
1730
- def pypi_base(f: typing.Type[FlowSpecDerived]) -> typing.Type[FlowSpecDerived]:
1731
- ...
1732
-
1733
- def pypi_base(f: typing.Optional[typing.Type[FlowSpecDerived]] = None, *, packages: typing.Dict[str, str] = {}, python: typing.Optional[str] = None):
1734
- """
1735
- Specifies the PyPI packages for all steps of the flow.
1736
-
1737
- Use `@pypi_base` to set common packages required by all
1738
- steps and use `@pypi` to specify step-specific overrides.
1739
-
1740
- Parameters
1741
- ----------
1742
- packages : Dict[str, str], default: {}
1743
- Packages to use for this flow. The key is the name of the package
1744
- and the value is the version to use.
1745
- python : str, optional, default: None
1746
- Version of Python to use, e.g. '3.7.4'. A default value of None implies
1747
- that the version used will correspond to the version of the Python interpreter used to start the run.
1748
- """
1749
- ...
1750
-
1751
1846
  def airflow_s3_key_sensor(*, timeout: int, poke_interval: int, mode: str, exponential_backoff: bool, pool: str, soft_fail: bool, name: str, description: str, bucket_key: typing.Union[str, typing.List[str]], bucket_name: str, wildcard_match: bool, aws_conn_id: str, verify: bool) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
1752
1847
  """
1753
1848
  The `@airflow_s3_key_sensor` decorator attaches a Airflow [S3KeySensor](https://airflow.apache.org/docs/apache-airflow-providers-amazon/stable/_api/airflow/providers/amazon/aws/sensors/s3/index.html#airflow.providers.amazon.aws.sensors.s3.S3KeySensor)
@@ -1791,100 +1886,6 @@ def airflow_s3_key_sensor(*, timeout: int, poke_interval: int, mode: str, expone
1791
1886
  """
1792
1887
  ...
1793
1888
 
1794
- @typing.overload
1795
- def conda_base(*, packages: typing.Dict[str, str] = {}, libraries: typing.Dict[str, str] = {}, python: typing.Optional[str] = None, disabled: bool = False) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
1796
- """
1797
- Specifies the Conda environment for all steps of the flow.
1798
-
1799
- Use `@conda_base` to set common libraries required by all
1800
- steps and use `@conda` to specify step-specific additions.
1801
-
1802
-
1803
- Parameters
1804
- ----------
1805
- packages : Dict[str, str], default {}
1806
- Packages to use for this flow. The key is the name of the package
1807
- and the value is the version to use.
1808
- libraries : Dict[str, str], default {}
1809
- Supported for backward compatibility. When used with packages, packages will take precedence.
1810
- python : str, optional, default None
1811
- Version of Python to use, e.g. '3.7.4'. A default value of None implies
1812
- that the version used will correspond to the version of the Python interpreter used to start the run.
1813
- disabled : bool, default False
1814
- If set to True, disables Conda.
1815
- """
1816
- ...
1817
-
1818
- @typing.overload
1819
- def conda_base(f: typing.Type[FlowSpecDerived]) -> typing.Type[FlowSpecDerived]:
1820
- ...
1821
-
1822
- def conda_base(f: typing.Optional[typing.Type[FlowSpecDerived]] = None, *, packages: typing.Dict[str, str] = {}, libraries: typing.Dict[str, str] = {}, python: typing.Optional[str] = None, disabled: bool = False):
1823
- """
1824
- Specifies the Conda environment for all steps of the flow.
1825
-
1826
- Use `@conda_base` to set common libraries required by all
1827
- steps and use `@conda` to specify step-specific additions.
1828
-
1829
-
1830
- Parameters
1831
- ----------
1832
- packages : Dict[str, str], default {}
1833
- Packages to use for this flow. The key is the name of the package
1834
- and the value is the version to use.
1835
- libraries : Dict[str, str], default {}
1836
- Supported for backward compatibility. When used with packages, packages will take precedence.
1837
- python : str, optional, default None
1838
- Version of Python to use, e.g. '3.7.4'. A default value of None implies
1839
- that the version used will correspond to the version of the Python interpreter used to start the run.
1840
- disabled : bool, default False
1841
- If set to True, disables Conda.
1842
- """
1843
- ...
1844
-
1845
- def airflow_external_task_sensor(*, timeout: int, poke_interval: int, mode: str, exponential_backoff: bool, pool: str, soft_fail: bool, name: str, description: str, external_dag_id: str, external_task_ids: typing.List[str], allowed_states: typing.List[str], failed_states: typing.List[str], execution_delta: "datetime.timedelta", check_existence: bool) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
1846
- """
1847
- The `@airflow_external_task_sensor` decorator attaches a Airflow [ExternalTaskSensor](https://airflow.apache.org/docs/apache-airflow/stable/_api/airflow/sensors/external_task/index.html#airflow.sensors.external_task.ExternalTaskSensor) before the start step of the flow.
1848
- This decorator only works when a flow is scheduled on Airflow and is compiled using `airflow create`. More than one `@airflow_external_task_sensor` can be added as a flow decorators. Adding more than one decorator will ensure that `start` step starts only after all sensors finish.
1849
-
1850
-
1851
- Parameters
1852
- ----------
1853
- timeout : int
1854
- Time, in seconds before the task times out and fails. (Default: 3600)
1855
- poke_interval : int
1856
- Time in seconds that the job should wait in between each try. (Default: 60)
1857
- mode : str
1858
- How the sensor operates. Options are: { poke | reschedule }. (Default: "poke")
1859
- exponential_backoff : bool
1860
- allow progressive longer waits between pokes by using exponential backoff algorithm. (Default: True)
1861
- pool : str
1862
- the slot pool this task should run in,
1863
- slot pools are a way to limit concurrency for certain tasks. (Default:None)
1864
- soft_fail : bool
1865
- Set to true to mark the task as SKIPPED on failure. (Default: False)
1866
- name : str
1867
- Name of the sensor on Airflow
1868
- description : str
1869
- Description of sensor in the Airflow UI
1870
- external_dag_id : str
1871
- The dag_id that contains the task you want to wait for.
1872
- external_task_ids : List[str]
1873
- The list of task_ids that you want to wait for.
1874
- If None (default value) the sensor waits for the DAG. (Default: None)
1875
- allowed_states : List[str]
1876
- Iterable of allowed states, (Default: ['success'])
1877
- failed_states : List[str]
1878
- Iterable of failed or dis-allowed states. (Default: None)
1879
- execution_delta : datetime.timedelta
1880
- time difference with the previous execution to look at,
1881
- the default is the same logical date as the current task or DAG. (Default: None)
1882
- check_existence: bool
1883
- Set to True to check if the external task exists or check if
1884
- the DAG to wait for exists. (Default: True)
1885
- """
1886
- ...
1887
-
1888
1889
  def project(*, name: str, branch: typing.Optional[str] = None, production: bool = False) -> typing.Callable[[typing.Type[FlowSpecDerived]], typing.Type[FlowSpecDerived]]:
1889
1890
  """
1890
1891
  Specifies what flows belong to the same project.