flowyml 1.7.2__py3-none-any.whl → 1.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (126) hide show
  1. flowyml/assets/base.py +15 -0
  2. flowyml/assets/metrics.py +5 -0
  3. flowyml/cli/main.py +709 -0
  4. flowyml/cli/stack_cli.py +138 -25
  5. flowyml/core/__init__.py +17 -0
  6. flowyml/core/executor.py +161 -26
  7. flowyml/core/image_builder.py +129 -0
  8. flowyml/core/log_streamer.py +227 -0
  9. flowyml/core/orchestrator.py +22 -2
  10. flowyml/core/pipeline.py +34 -10
  11. flowyml/core/routing.py +558 -0
  12. flowyml/core/step.py +9 -1
  13. flowyml/core/step_grouping.py +49 -35
  14. flowyml/core/types.py +407 -0
  15. flowyml/monitoring/alerts.py +10 -0
  16. flowyml/monitoring/notifications.py +104 -25
  17. flowyml/monitoring/slack_blocks.py +323 -0
  18. flowyml/plugins/__init__.py +251 -0
  19. flowyml/plugins/alerters/__init__.py +1 -0
  20. flowyml/plugins/alerters/slack.py +168 -0
  21. flowyml/plugins/base.py +752 -0
  22. flowyml/plugins/config.py +478 -0
  23. flowyml/plugins/deployers/__init__.py +22 -0
  24. flowyml/plugins/deployers/gcp_cloud_run.py +200 -0
  25. flowyml/plugins/deployers/sagemaker.py +306 -0
  26. flowyml/plugins/deployers/vertex.py +290 -0
  27. flowyml/plugins/integration.py +369 -0
  28. flowyml/plugins/manager.py +510 -0
  29. flowyml/plugins/model_registries/__init__.py +22 -0
  30. flowyml/plugins/model_registries/mlflow.py +159 -0
  31. flowyml/plugins/model_registries/sagemaker.py +489 -0
  32. flowyml/plugins/model_registries/vertex.py +386 -0
  33. flowyml/plugins/orchestrators/__init__.py +13 -0
  34. flowyml/plugins/orchestrators/sagemaker.py +443 -0
  35. flowyml/plugins/orchestrators/vertex_ai.py +461 -0
  36. flowyml/plugins/registries/__init__.py +13 -0
  37. flowyml/plugins/registries/ecr.py +321 -0
  38. flowyml/plugins/registries/gcr.py +313 -0
  39. flowyml/plugins/registry.py +454 -0
  40. flowyml/plugins/stack.py +494 -0
  41. flowyml/plugins/stack_config.py +537 -0
  42. flowyml/plugins/stores/__init__.py +13 -0
  43. flowyml/plugins/stores/gcs.py +460 -0
  44. flowyml/plugins/stores/s3.py +453 -0
  45. flowyml/plugins/trackers/__init__.py +11 -0
  46. flowyml/plugins/trackers/mlflow.py +316 -0
  47. flowyml/plugins/validators/__init__.py +3 -0
  48. flowyml/plugins/validators/deepchecks.py +119 -0
  49. flowyml/registry/__init__.py +2 -1
  50. flowyml/registry/model_environment.py +109 -0
  51. flowyml/registry/model_registry.py +241 -96
  52. flowyml/serving/__init__.py +17 -0
  53. flowyml/serving/model_server.py +628 -0
  54. flowyml/stacks/__init__.py +60 -0
  55. flowyml/stacks/aws.py +93 -0
  56. flowyml/stacks/base.py +62 -0
  57. flowyml/stacks/components.py +12 -0
  58. flowyml/stacks/gcp.py +44 -9
  59. flowyml/stacks/plugins.py +115 -0
  60. flowyml/stacks/registry.py +2 -1
  61. flowyml/storage/sql.py +401 -12
  62. flowyml/tracking/experiment.py +8 -5
  63. flowyml/ui/backend/Dockerfile +87 -16
  64. flowyml/ui/backend/auth.py +12 -2
  65. flowyml/ui/backend/main.py +149 -5
  66. flowyml/ui/backend/routers/ai_context.py +226 -0
  67. flowyml/ui/backend/routers/assets.py +23 -4
  68. flowyml/ui/backend/routers/auth.py +96 -0
  69. flowyml/ui/backend/routers/deployments.py +660 -0
  70. flowyml/ui/backend/routers/model_explorer.py +597 -0
  71. flowyml/ui/backend/routers/plugins.py +103 -51
  72. flowyml/ui/backend/routers/projects.py +91 -8
  73. flowyml/ui/backend/routers/runs.py +20 -1
  74. flowyml/ui/backend/routers/schedules.py +22 -17
  75. flowyml/ui/backend/routers/templates.py +319 -0
  76. flowyml/ui/backend/routers/websocket.py +2 -2
  77. flowyml/ui/frontend/Dockerfile +55 -6
  78. flowyml/ui/frontend/dist/assets/index-B5AsPTSz.css +1 -0
  79. flowyml/ui/frontend/dist/assets/index-dFbZ8wD8.js +753 -0
  80. flowyml/ui/frontend/dist/index.html +2 -2
  81. flowyml/ui/frontend/dist/logo.png +0 -0
  82. flowyml/ui/frontend/nginx.conf +65 -4
  83. flowyml/ui/frontend/package-lock.json +1404 -74
  84. flowyml/ui/frontend/package.json +3 -0
  85. flowyml/ui/frontend/public/logo.png +0 -0
  86. flowyml/ui/frontend/src/App.jsx +10 -7
  87. flowyml/ui/frontend/src/app/auth/Login.jsx +90 -0
  88. flowyml/ui/frontend/src/app/dashboard/page.jsx +8 -8
  89. flowyml/ui/frontend/src/app/deployments/page.jsx +786 -0
  90. flowyml/ui/frontend/src/app/model-explorer/page.jsx +1031 -0
  91. flowyml/ui/frontend/src/app/pipelines/page.jsx +12 -2
  92. flowyml/ui/frontend/src/app/projects/[projectId]/_components/ProjectExperimentsList.jsx +19 -6
  93. flowyml/ui/frontend/src/app/runs/[runId]/page.jsx +36 -24
  94. flowyml/ui/frontend/src/app/runs/page.jsx +8 -2
  95. flowyml/ui/frontend/src/app/settings/page.jsx +267 -253
  96. flowyml/ui/frontend/src/components/AssetDetailsPanel.jsx +29 -7
  97. flowyml/ui/frontend/src/components/Layout.jsx +6 -0
  98. flowyml/ui/frontend/src/components/PipelineGraph.jsx +79 -29
  99. flowyml/ui/frontend/src/components/RunDetailsPanel.jsx +36 -6
  100. flowyml/ui/frontend/src/components/RunMetaPanel.jsx +113 -0
  101. flowyml/ui/frontend/src/components/ai/AIAssistantButton.jsx +71 -0
  102. flowyml/ui/frontend/src/components/ai/AIAssistantPanel.jsx +420 -0
  103. flowyml/ui/frontend/src/components/header/Header.jsx +22 -0
  104. flowyml/ui/frontend/src/components/plugins/PluginManager.jsx +4 -4
  105. flowyml/ui/frontend/src/components/plugins/{ZenMLIntegration.jsx → StackImport.jsx} +38 -12
  106. flowyml/ui/frontend/src/components/sidebar/Sidebar.jsx +36 -13
  107. flowyml/ui/frontend/src/contexts/AIAssistantContext.jsx +245 -0
  108. flowyml/ui/frontend/src/contexts/AuthContext.jsx +108 -0
  109. flowyml/ui/frontend/src/hooks/useAIContext.js +156 -0
  110. flowyml/ui/frontend/src/hooks/useWebGPU.js +54 -0
  111. flowyml/ui/frontend/src/layouts/MainLayout.jsx +6 -0
  112. flowyml/ui/frontend/src/router/index.jsx +47 -20
  113. flowyml/ui/frontend/src/services/pluginService.js +3 -1
  114. flowyml/ui/server_manager.py +5 -5
  115. flowyml/ui/utils.py +157 -39
  116. flowyml/utils/config.py +37 -15
  117. flowyml/utils/model_introspection.py +123 -0
  118. flowyml/utils/observability.py +30 -0
  119. flowyml-1.8.0.dist-info/METADATA +174 -0
  120. {flowyml-1.7.2.dist-info → flowyml-1.8.0.dist-info}/RECORD +123 -65
  121. {flowyml-1.7.2.dist-info → flowyml-1.8.0.dist-info}/WHEEL +1 -1
  122. flowyml/ui/frontend/dist/assets/index-B40RsQDq.css +0 -1
  123. flowyml/ui/frontend/dist/assets/index-CjI0zKCn.js +0 -685
  124. flowyml-1.7.2.dist-info/METADATA +0 -477
  125. {flowyml-1.7.2.dist-info → flowyml-1.8.0.dist-info}/entry_points.txt +0 -0
  126. {flowyml-1.7.2.dist-info → flowyml-1.8.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,461 @@
1
+ """Vertex AI Orchestrator - Native FlowyML Plugin.
2
+
3
+ This is a native FlowyML implementation for Google Cloud Vertex AI Pipelines,
4
+ without requiring any external framework dependencies.
5
+
6
+ Usage:
7
+ from flowyml.plugins import get_plugin
8
+
9
+ orchestrator = get_plugin("vertex_ai",
10
+ project="my-gcp-project",
11
+ location="us-central1"
12
+ )
13
+
14
+ # Run a pipeline
15
+ orchestrator.run_pipeline(my_pipeline, run_id="run-001")
16
+ """
17
+
18
+ import logging
19
+ from typing import Any
20
+
21
+ from flowyml.plugins.base import OrchestratorPlugin, PluginMetadata, PluginType
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class VertexAIOrchestrator(OrchestratorPlugin):
27
+ """Native Vertex AI Pipelines orchestrator for FlowyML.
28
+
29
+ This orchestrator integrates directly with Vertex AI Pipelines
30
+ without any intermediate framework.
31
+
32
+ Args:
33
+ project: GCP project ID.
34
+ location: GCP region (e.g., "us-central1").
35
+ staging_bucket: GCS bucket for staging artifacts.
36
+ service_account: Service account email for pipeline execution.
37
+ network: VPC network for pipeline execution.
38
+
39
+ Example:
40
+ orchestrator = VertexAIOrchestrator(
41
+ project="my-gcp-project",
42
+ location="us-central1",
43
+ staging_bucket="gs://my-staging-bucket"
44
+ )
45
+
46
+ result = orchestrator.run_pipeline(
47
+ pipeline=my_pipeline,
48
+ run_id="training-run-001"
49
+ )
50
+ """
51
+
52
+ METADATA = PluginMetadata(
53
+ name="vertex_ai",
54
+ description="Google Cloud Vertex AI Pipelines orchestration",
55
+ plugin_type=PluginType.ORCHESTRATOR,
56
+ version="1.0.0",
57
+ author="FlowyML",
58
+ packages=["google-cloud-aiplatform>=1.25", "kfp>=2.0"],
59
+ documentation_url="https://cloud.google.com/vertex-ai/docs/pipelines",
60
+ tags=["orchestrator", "gcp", "vertex-ai", "cloud"],
61
+ )
62
+
63
+ def __init__(
64
+ self,
65
+ project: str,
66
+ location: str,
67
+ staging_bucket: str = None,
68
+ service_account: str = None,
69
+ network: str = None,
70
+ **kwargs,
71
+ ):
72
+ """Initialize the Vertex AI orchestrator."""
73
+ super().__init__(
74
+ name=kwargs.pop("name", "vertex_ai"),
75
+ project=project,
76
+ location=location,
77
+ staging_bucket=staging_bucket,
78
+ service_account=service_account,
79
+ network=network,
80
+ **kwargs,
81
+ )
82
+
83
+ self._project = project
84
+ self._location = location
85
+ self._staging_bucket = staging_bucket
86
+ self._aiplatform = None
87
+
88
+ def initialize(self) -> None:
89
+ """Initialize Vertex AI connection."""
90
+ try:
91
+ from google.cloud import aiplatform
92
+
93
+ aiplatform.init(
94
+ project=self._project,
95
+ location=self._location,
96
+ staging_bucket=self._staging_bucket,
97
+ )
98
+
99
+ self._aiplatform = aiplatform
100
+ self._is_initialized = True
101
+ logger.info(f"Vertex AI orchestrator initialized: {self._project}/{self._location}")
102
+
103
+ except ImportError:
104
+ raise ImportError(
105
+ "google-cloud-aiplatform is not installed. " "Run: flowyml plugin install vertex_ai",
106
+ )
107
+
108
+ def _ensure_initialized(self) -> None:
109
+ """Ensure Vertex AI is initialized."""
110
+ if not self._is_initialized:
111
+ self.initialize()
112
+
113
+ def run_pipeline(
114
+ self,
115
+ pipeline: Any,
116
+ run_id: str,
117
+ context: dict[str, Any] = None,
118
+ parameters: dict[str, Any] = None,
119
+ enable_caching: bool = True,
120
+ **kwargs,
121
+ ) -> Any:
122
+ """Run a pipeline on Vertex AI.
123
+
124
+ Args:
125
+ pipeline: The pipeline to run. Can be:
126
+ - A compiled KFP pipeline (JSON/YAML path)
127
+ - A FlowyML pipeline object
128
+ run_id: Unique identifier for this run.
129
+ context: Optional context dictionary.
130
+ parameters: Pipeline parameters.
131
+ enable_caching: Whether to enable step caching.
132
+ **kwargs: Additional Vertex AI-specific arguments.
133
+
134
+ Returns:
135
+ PipelineJob object.
136
+ """
137
+ self._ensure_initialized()
138
+
139
+ # Handle different pipeline types
140
+ if isinstance(pipeline, str):
141
+ # Assume it's a path to compiled pipeline
142
+ template_path = pipeline
143
+ elif hasattr(pipeline, "to_vertex_pipeline"):
144
+ # FlowyML pipeline with Vertex conversion
145
+ template_path = pipeline.to_vertex_pipeline()
146
+ elif callable(pipeline):
147
+ # KFP pipeline function - compile it
148
+ template_path = self._compile_kfp_pipeline(pipeline, run_id)
149
+ else:
150
+ raise ValueError(
151
+ f"Unsupported pipeline type: {type(pipeline)}. "
152
+ "Provide a path to compiled pipeline or a KFP pipeline function.",
153
+ )
154
+
155
+ # Create and run the pipeline job
156
+ job = self._aiplatform.PipelineJob(
157
+ display_name=run_id,
158
+ template_path=template_path,
159
+ pipeline_root=self._staging_bucket,
160
+ parameter_values=parameters or {},
161
+ enable_caching=enable_caching,
162
+ )
163
+
164
+ # Configure service account if provided
165
+ service_account = self._config.get("service_account")
166
+ network = self._config.get("network")
167
+
168
+ job.run(
169
+ service_account=service_account,
170
+ network=network,
171
+ sync=False, # Run asynchronously
172
+ )
173
+
174
+ logger.info(f"Started Vertex AI pipeline job: {job.display_name}")
175
+ logger.info(f"Job resource name: {job.resource_name}")
176
+
177
+ return job
178
+
179
+ def _compile_kfp_pipeline(self, pipeline_func: Any, run_id: str) -> str:
180
+ """Compile a KFP pipeline function to a template.
181
+
182
+ Args:
183
+ pipeline_func: KFP pipeline function.
184
+ run_id: Run ID for naming the compiled file.
185
+
186
+ Returns:
187
+ Path to compiled pipeline template.
188
+ """
189
+ try:
190
+ from kfp import compiler
191
+ import tempfile
192
+ import os
193
+
194
+ # Compile to a temporary file
195
+ temp_dir = tempfile.mkdtemp()
196
+ template_path = os.path.join(temp_dir, f"{run_id}_pipeline.yaml")
197
+
198
+ compiler.Compiler().compile(
199
+ pipeline_func=pipeline_func,
200
+ package_path=template_path,
201
+ )
202
+
203
+ return template_path
204
+
205
+ except ImportError:
206
+ raise ImportError(
207
+ "kfp is not installed. Run: flowyml plugin install vertex_ai",
208
+ )
209
+
210
+ def get_run_status(self, run_id: str) -> str:
211
+ """Get the status of a pipeline run.
212
+
213
+ Args:
214
+ run_id: The run identifier (job resource name).
215
+
216
+ Returns:
217
+ Run status string.
218
+ """
219
+ self._ensure_initialized()
220
+
221
+ try:
222
+ job = self._aiplatform.PipelineJob.get(run_id)
223
+ return job.state.name
224
+ except Exception as e:
225
+ logger.error(f"Failed to get run status: {e}")
226
+ return "unknown"
227
+
228
+ def cancel_run(self, run_id: str) -> bool:
229
+ """Cancel a running pipeline.
230
+
231
+ Args:
232
+ run_id: The run identifier (job resource name).
233
+
234
+ Returns:
235
+ True if cancellation was successful.
236
+ """
237
+ self._ensure_initialized()
238
+
239
+ try:
240
+ job = self._aiplatform.PipelineJob.get(run_id)
241
+ job.cancel()
242
+ logger.info(f"Cancelled pipeline job: {run_id}")
243
+ return True
244
+ except Exception as e:
245
+ logger.error(f"Failed to cancel run: {e}")
246
+ return False
247
+
248
+ def list_runs(self, pipeline_name: str = None, limit: int = 100) -> list[dict]:
249
+ """List pipeline runs.
250
+
251
+ Args:
252
+ pipeline_name: Optional filter by pipeline name.
253
+ limit: Maximum number of runs to return.
254
+
255
+ Returns:
256
+ List of run dictionaries.
257
+ """
258
+ self._ensure_initialized()
259
+
260
+ try:
261
+ filter_str = None
262
+ if pipeline_name:
263
+ filter_str = f'display_name="{pipeline_name}"'
264
+
265
+ jobs = self._aiplatform.PipelineJob.list(
266
+ filter=filter_str,
267
+ )
268
+
269
+ runs = []
270
+ for job in jobs[:limit]:
271
+ runs.append(
272
+ {
273
+ "run_id": job.resource_name,
274
+ "display_name": job.display_name,
275
+ "state": job.state.name,
276
+ "create_time": str(job.create_time),
277
+ "start_time": str(job.start_time) if job.start_time else None,
278
+ "end_time": str(job.end_time) if job.end_time else None,
279
+ },
280
+ )
281
+
282
+ return runs
283
+
284
+ except Exception as e:
285
+ logger.error(f"Failed to list runs: {e}")
286
+ return []
287
+
288
+ def wait_for_completion(self, job: Any, timeout: int = 3600) -> str:
289
+ """Wait for a pipeline job to complete.
290
+
291
+ Args:
292
+ job: PipelineJob object from run_pipeline.
293
+ timeout: Maximum wait time in seconds.
294
+
295
+ Returns:
296
+ Final job state.
297
+ """
298
+ self._ensure_initialized()
299
+
300
+ job.wait()
301
+ return job.state.name
302
+
303
+ def run_with_routing(
304
+ self,
305
+ pipeline: Any,
306
+ run_id: str,
307
+ stack_name: str = None,
308
+ context: dict[str, Any] = None,
309
+ parameters: dict[str, Any] = None,
310
+ **kwargs,
311
+ ) -> Any:
312
+ """Run a pipeline with type-based artifact routing.
313
+
314
+ This method integrates with FlowyML's type-based routing system,
315
+ ensuring that Model, Dataset, Metrics, and Parameters artifacts
316
+ are automatically routed to the configured infrastructure.
317
+
318
+ Args:
319
+ pipeline: The pipeline to run.
320
+ run_id: Unique identifier for this run.
321
+ stack_name: Stack to use for routing (uses active stack if None).
322
+ context: Optional context dictionary.
323
+ parameters: Pipeline parameters.
324
+ **kwargs: Additional arguments.
325
+
326
+ Returns:
327
+ PipelineJob object with routing metadata.
328
+ """
329
+ self._ensure_initialized()
330
+
331
+ # Get routing configuration
332
+ routing_config = self._get_routing_config(stack_name)
333
+
334
+ # Inject routing configuration into pipeline context
335
+ enriched_context = context or {}
336
+ enriched_context["__flowyml_routing__"] = {
337
+ "run_id": run_id,
338
+ "stack": stack_name or "default",
339
+ "routing_rules": routing_config,
340
+ "artifact_store": self._config.get("artifact_store_uri"),
341
+ "model_registry": self._config.get("model_registry"),
342
+ "experiment_tracker": self._config.get("experiment_tracker"),
343
+ }
344
+
345
+ # Add routing parameters to pipeline
346
+ enriched_params = parameters or {}
347
+ enriched_params["__run_id__"] = run_id
348
+
349
+ # Run the pipeline
350
+ job = self.run_pipeline(
351
+ pipeline=pipeline,
352
+ run_id=run_id,
353
+ context=enriched_context,
354
+ parameters=enriched_params,
355
+ **kwargs,
356
+ )
357
+
358
+ logger.info(f"Started type-aware pipeline: {run_id}")
359
+ logger.info(f"Routing config: stack={stack_name or 'active'}")
360
+
361
+ return job
362
+
363
+ def _get_routing_config(self, stack_name: str = None) -> dict:
364
+ """Get routing configuration for a stack.
365
+
366
+ Args:
367
+ stack_name: Stack name (uses active stack if None).
368
+
369
+ Returns:
370
+ Dictionary of routing rules.
371
+ """
372
+ try:
373
+ from flowyml.plugins.stack_config import get_stack_manager
374
+
375
+ manager = get_stack_manager()
376
+ stack = manager.get_stack(stack_name) if stack_name else manager.get_active_stack()
377
+
378
+ if stack and stack.artifact_routing:
379
+ return {
380
+ "Model": stack.artifact_routing.model.to_dict() if stack.artifact_routing.model else {},
381
+ "Dataset": stack.artifact_routing.dataset.to_dict() if stack.artifact_routing.dataset else {},
382
+ "Metrics": stack.artifact_routing.metrics.to_dict() if stack.artifact_routing.metrics else {},
383
+ "Parameters": stack.artifact_routing.parameters.to_dict()
384
+ if stack.artifact_routing.parameters
385
+ else {},
386
+ }
387
+ except ImportError:
388
+ logger.debug("Stack config not available for routing")
389
+ except Exception as e:
390
+ logger.warning(f"Failed to get routing config: {e}")
391
+
392
+ return {}
393
+
394
+ def configure_model_deployment(
395
+ self,
396
+ model_uri: str,
397
+ endpoint_name: str,
398
+ machine_type: str = "n1-standard-4",
399
+ min_replica_count: int = 1,
400
+ max_replica_count: int = 1,
401
+ accelerator_type: str = None,
402
+ accelerator_count: int = 0,
403
+ ) -> str:
404
+ """Deploy a model to a Vertex AI endpoint.
405
+
406
+ This method can be used after pipeline completion to deploy
407
+ registered models to serving endpoints.
408
+
409
+ Args:
410
+ model_uri: URI to the model in Vertex AI Model Registry.
411
+ endpoint_name: Name for the endpoint.
412
+ machine_type: Compute machine type.
413
+ min_replica_count: Minimum replicas.
414
+ max_replica_count: Maximum replicas.
415
+ accelerator_type: GPU type if needed.
416
+ accelerator_count: Number of GPUs.
417
+
418
+ Returns:
419
+ Endpoint URI.
420
+ """
421
+ self._ensure_initialized()
422
+
423
+ try:
424
+ # Get or create endpoint
425
+ endpoints = self._aiplatform.Endpoint.list(
426
+ filter=f'display_name="{endpoint_name}"',
427
+ )
428
+
429
+ if endpoints:
430
+ endpoint = endpoints[0]
431
+ logger.info(f"Using existing endpoint: {endpoint_name}")
432
+ else:
433
+ endpoint = self._aiplatform.Endpoint.create(
434
+ display_name=endpoint_name,
435
+ )
436
+ logger.info(f"Created new endpoint: {endpoint_name}")
437
+
438
+ # Get model
439
+ model = self._aiplatform.Model(model_uri)
440
+
441
+ # Deploy
442
+ machine_config = {"machine_type": machine_type}
443
+ if accelerator_type and accelerator_count > 0:
444
+ machine_config["accelerator_type"] = accelerator_type
445
+ machine_config["accelerator_count"] = accelerator_count
446
+
447
+ model.deploy(
448
+ endpoint=endpoint,
449
+ deployed_model_display_name=f"{endpoint_name}-model",
450
+ min_replica_count=min_replica_count,
451
+ max_replica_count=max_replica_count,
452
+ **machine_config,
453
+ )
454
+
455
+ endpoint_uri = endpoint.resource_name
456
+ logger.info(f"Model deployed to endpoint: {endpoint_uri}")
457
+ return endpoint_uri
458
+
459
+ except Exception as e:
460
+ logger.error(f"Failed to deploy model: {e}")
461
+ raise
@@ -0,0 +1,13 @@
1
+ """FlowyML Container Registry Plugins."""
2
+
3
+ try:
4
+ from flowyml.plugins.registries.gcr import GCRRegistry
5
+ except ImportError:
6
+ GCRRegistry = None
7
+
8
+ try:
9
+ from flowyml.plugins.registries.ecr import ECRRegistry
10
+ except ImportError:
11
+ ECRRegistry = None
12
+
13
+ __all__ = ["GCRRegistry", "ECRRegistry"]