flowyml 1.5.0__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
flowyml/core/pipeline.py CHANGED
@@ -134,17 +134,53 @@ class Pipeline:
134
134
  >>> pipeline = Pipeline("my_pipeline", context=ctx)
135
135
  >>> pipeline.add_step(train)
136
136
  >>> result = pipeline.run()
137
+
138
+ # With project_name, automatically creates/attaches to project
139
+ >>> pipeline = Pipeline("my_pipeline", context=ctx, project_name="ml_project")
140
+
141
+ # With version parameter, automatically creates VersionedPipeline
142
+ >>> pipeline = Pipeline("my_pipeline", context=ctx, version="v1.0.1", project_name="ml_project")
137
143
  """
138
144
 
145
+ def __new__(
146
+ cls,
147
+ name: str,
148
+ version: str | None = None,
149
+ project_name: str | None = None,
150
+ project: str | None = None, # For backward compatibility
151
+ **kwargs,
152
+ ):
153
+ """Create a Pipeline or VersionedPipeline instance.
154
+
155
+ If version is provided, automatically returns a VersionedPipeline instance.
156
+ Otherwise, returns a regular Pipeline instance.
157
+ """
158
+ if version is not None:
159
+ from flowyml.core.versioning import VersionedPipeline
160
+
161
+ # Pass project_name or project to VersionedPipeline
162
+ vp_kwargs = kwargs.copy()
163
+ if project_name:
164
+ vp_kwargs["project_name"] = project_name
165
+ elif project:
166
+ vp_kwargs["project"] = project
167
+ return VersionedPipeline(name=name, version=version, **vp_kwargs)
168
+ return super().__new__(cls)
169
+
139
170
  def __init__(
140
171
  self,
141
172
  name: str,
142
173
  context: Context | None = None,
143
174
  executor: Executor | None = None,
144
175
  enable_cache: bool = True,
176
+ enable_checkpointing: bool | None = None, # None means use config default
177
+ enable_experiment_tracking: bool | None = None, # None means use config default (True)
145
178
  cache_dir: str | None = None,
146
179
  stack: Any | None = None, # Stack instance
147
- project: str | None = None, # Project name to attach to
180
+ project: str | None = None, # Project name to attach to (deprecated, use project_name)
181
+ project_name: str | None = None, # Project name to attach to (creates if doesn't exist)
182
+ version: str | None = None, # If provided, VersionedPipeline is created via __new__
183
+ **kwargs,
148
184
  ):
149
185
  """Initialize pipeline.
150
186
 
@@ -153,13 +189,35 @@ class Pipeline:
153
189
  context: Optional context for parameter injection
154
190
  executor: Optional executor (defaults to LocalExecutor)
155
191
  enable_cache: Whether to enable caching
192
+ enable_checkpointing: Whether to enable checkpointing (defaults to config setting, True by default)
193
+ enable_experiment_tracking: Whether to enable automatic experiment tracking (defaults to config.auto_log_metrics, True by default)
156
194
  cache_dir: Optional directory for cache
157
195
  stack: Optional stack instance to run on
158
- project: Optional project name to attach this pipeline to.
196
+ project: Optional project name to attach this pipeline to (deprecated, use project_name)
197
+ project_name: Optional project name to attach this pipeline to.
198
+ If the project doesn't exist, it will be created automatically.
199
+ version: Optional version string. If provided, a VersionedPipeline
200
+ instance will be created instead of a regular Pipeline.
201
+ **kwargs: Additional keyword arguments passed to the pipeline.
202
+ instance is automatically created instead of a regular Pipeline.
159
203
  """
204
+ from flowyml.utils.config import get_config
205
+
160
206
  self.name = name
161
207
  self.context = context or Context()
162
208
  self.enable_cache = enable_cache
209
+
210
+ # Set checkpointing (use config default if not specified)
211
+ config = get_config()
212
+ self.enable_checkpointing = (
213
+ enable_checkpointing if enable_checkpointing is not None else config.enable_checkpointing
214
+ )
215
+
216
+ # Set experiment tracking (use config default if not specified, default: True)
217
+ # Can be set via enable_experiment_tracking parameter or defaults to config.auto_log_metrics
218
+ self.enable_experiment_tracking = (
219
+ enable_experiment_tracking if enable_experiment_tracking is not None else config.auto_log_metrics
220
+ )
163
221
  self.stack = None # Will be assigned via _apply_stack
164
222
  self._stack_locked = stack is not None
165
223
  self._provided_executor = executor
@@ -182,23 +240,28 @@ class Pipeline:
182
240
 
183
241
  # Initialize components from stack or defaults
184
242
  self.executor = executor or LocalExecutor()
185
- # Metadata store for UI integration
243
+ # Metadata store for UI integration - use same store as UI
186
244
  from flowyml.storage.metadata import SQLiteMetadataStore
245
+ from flowyml.utils.config import get_config
187
246
 
188
- self.metadata_store = SQLiteMetadataStore()
247
+ config = get_config()
248
+ # Use the same metadata database path as the UI to ensure visibility
249
+ self.metadata_store = SQLiteMetadataStore(db_path=str(config.metadata_db))
189
250
 
190
251
  if stack:
191
252
  self._apply_stack(stack, locked=True)
192
253
 
193
254
  # Handle Project Attachment
194
- if project:
255
+ # Support both project_name (preferred) and project (for backward compatibility)
256
+ project_to_use = project_name or project
257
+ if project_to_use:
195
258
  from flowyml.core.project import ProjectManager
196
259
 
197
260
  manager = ProjectManager()
198
261
  # Get or create project
199
- proj = manager.get_project(project)
262
+ proj = manager.get_project(project_to_use)
200
263
  if not proj:
201
- proj = manager.create_project(project)
264
+ proj = manager.create_project(project_to_use)
202
265
 
203
266
  # Configure pipeline with project settings
204
267
  self.runs_dir = proj.runs_dir
@@ -209,9 +272,15 @@ class Pipeline:
209
272
  proj.metadata["pipelines"].append(name)
210
273
  proj._save_metadata()
211
274
 
275
+ # Store project name for later use (e.g., in _save_run)
276
+ self.project_name = project_to_use
277
+ else:
278
+ self.project_name = None
279
+
212
280
  # State
213
281
  self._built = False
214
282
  self.step_groups: list[Any] = [] # Will hold StepGroup objects
283
+ self.control_flows: list[Any] = [] # Store conditional control flows (If, Switch, etc.)
215
284
 
216
285
  def _apply_stack(self, stack: Any | None, locked: bool) -> None:
217
286
  """Attach a stack and update executors/metadata."""
@@ -238,6 +307,32 @@ class Pipeline:
238
307
  self._built = False
239
308
  return self
240
309
 
310
+ def add_control_flow(self, control_flow: Any) -> "Pipeline":
311
+ """Add conditional control flow to the pipeline.
312
+
313
+ Args:
314
+ control_flow: Control flow object (If, Switch, etc.)
315
+
316
+ Returns:
317
+ Self for chaining
318
+
319
+ Example:
320
+ ```python
321
+ from flowyml import If
322
+
323
+ pipeline.add_control_flow(
324
+ If(
325
+ condition=lambda ctx: ctx.steps["evaluate_model"].outputs["accuracy"] > 0.9,
326
+ then_step=deploy_model,
327
+ else_step=retrain_model,
328
+ )
329
+ )
330
+ ```
331
+ """
332
+ self.control_flows.append(control_flow)
333
+ self._built = False
334
+ return self
335
+
241
336
  def build(self) -> None:
242
337
  """Build the execution DAG."""
243
338
  if self._built:
@@ -277,9 +372,11 @@ class Pipeline:
277
372
  inputs: dict[str, Any] | None = None,
278
373
  debug: bool = False,
279
374
  stack: Any | None = None, # Stack override
375
+ orchestrator: Any | None = None, # Orchestrator override (takes precedence over stack orchestrator)
280
376
  resources: Any | None = None, # ResourceConfig
281
377
  docker_config: Any | None = None, # DockerConfig
282
378
  context: dict[str, Any] | None = None, # Context vars override
379
+ auto_start_ui: bool = True, # Auto-start UI server
283
380
  **kwargs,
284
381
  ) -> PipelineResult:
285
382
  """Execute the pipeline.
@@ -287,19 +384,76 @@ class Pipeline:
287
384
  Args:
288
385
  inputs: Optional input data for the pipeline
289
386
  debug: Enable debug mode with detailed logging
290
- stack: Stack override (uses self.stack if not provided)
387
+ stack: Stack override (uses self.stack or active stack if not provided)
388
+ orchestrator: Orchestrator override (takes precedence over stack orchestrator)
291
389
  resources: Resource configuration for execution
292
390
  docker_config: Docker configuration for containerized execution
293
391
  context: Context variables override
392
+ auto_start_ui: Automatically start UI server if not running and display URL
294
393
  **kwargs: Additional arguments passed to the orchestrator
295
394
 
395
+ Note:
396
+ The orchestrator is determined in this priority order:
397
+ 1. Explicit `orchestrator` parameter (if provided)
398
+ 2. Stack's orchestrator (if stack is set/active)
399
+ 3. Default LocalOrchestrator
400
+
401
+ When using a stack (e.g., GCPStack), the stack's orchestrator is automatically
402
+ used unless explicitly overridden. This is the recommended approach for
403
+ production deployments.
404
+
296
405
  Returns:
297
406
  PipelineResult with outputs and execution info
298
407
  """
299
408
  import uuid
300
409
  from flowyml.core.orchestrator import LocalOrchestrator
410
+ from flowyml.core.checkpoint import PipelineCheckpoint
411
+ from flowyml.utils.config import get_config
301
412
 
302
- run_id = str(uuid.uuid4())
413
+ # Generate or use provided run_id
414
+ run_id = kwargs.pop("run_id", None) or str(uuid.uuid4())
415
+
416
+ # Initialize checkpointing if enabled
417
+ if self.enable_checkpointing:
418
+ config = get_config()
419
+ checkpoint = PipelineCheckpoint(
420
+ run_id=run_id,
421
+ checkpoint_dir=str(config.checkpoint_dir),
422
+ )
423
+
424
+ # Check if we should resume from checkpoint
425
+ if checkpoint.exists():
426
+ checkpoint_data = checkpoint.load()
427
+ completed_steps = checkpoint_data.get("completed_steps", [])
428
+ if completed_steps:
429
+ # Auto-resume: use checkpoint state
430
+ if hasattr(self, "_display") and self._display:
431
+ self._display.console.print(
432
+ f"[yellow]📦 Resuming from checkpoint: {len(completed_steps)} steps already completed[/yellow]",
433
+ )
434
+ # Store checkpoint info for orchestrator
435
+ self._checkpoint = checkpoint
436
+ self._resume_from_checkpoint = True
437
+ self._completed_steps_from_checkpoint = set(completed_steps)
438
+ else:
439
+ self._checkpoint = checkpoint
440
+ self._resume_from_checkpoint = False
441
+ self._completed_steps_from_checkpoint = set()
442
+ else:
443
+ self._checkpoint = checkpoint
444
+ self._resume_from_checkpoint = False
445
+ self._completed_steps_from_checkpoint = set()
446
+ else:
447
+ self._checkpoint = None
448
+ self._resume_from_checkpoint = False
449
+ self._completed_steps_from_checkpoint = set()
450
+
451
+ # Auto-start UI server if requested
452
+ ui_url = None
453
+ run_url = None
454
+ ui_start_failed = False
455
+ if auto_start_ui:
456
+ ui_url, run_url, ui_start_failed = self._ensure_ui_server(run_id)
303
457
 
304
458
  # Determine stack for this run
305
459
  if stack is not None:
@@ -316,9 +470,12 @@ class Pipeline:
316
470
  self._apply_stack(active_stack, locked=False)
317
471
 
318
472
  # Determine orchestrator
319
- orchestrator = getattr(self.stack, "orchestrator", None) if self.stack else None
473
+ # Priority: 1) Explicit orchestrator parameter, 2) Stack orchestrator, 3) Default LocalOrchestrator
320
474
  if orchestrator is None:
321
- orchestrator = LocalOrchestrator()
475
+ # Use orchestrator from stack if available
476
+ orchestrator = getattr(self.stack, "orchestrator", None) if self.stack else None
477
+ if orchestrator is None:
478
+ orchestrator = LocalOrchestrator()
322
479
 
323
480
  # Update context with provided values
324
481
  if context:
@@ -331,6 +488,28 @@ class Pipeline:
331
488
  resource_config = self._coerce_resource_config(resources)
332
489
  docker_cfg = self._coerce_docker_config(docker_config)
333
490
 
491
+ # Initialize display system for beautiful CLI output
492
+ display = None
493
+ try:
494
+ from flowyml.core.display import PipelineDisplay
495
+
496
+ display = PipelineDisplay(
497
+ pipeline_name=self.name,
498
+ steps=self.steps,
499
+ dag=self.dag,
500
+ verbose=True,
501
+ ui_url=ui_url, # Pass UI URL for prominent display at start
502
+ run_url=run_url, # Pass run-specific URL for clickable link
503
+ )
504
+ display.show_header()
505
+ display.show_execution_start()
506
+ except Exception:
507
+ # Silently fail if display system not available
508
+ pass
509
+
510
+ # Store display on pipeline for orchestrator to use
511
+ self._display = display
512
+
334
513
  # Run the pipeline via orchestrator
335
514
  result = orchestrator.run_pipeline(
336
515
  self,
@@ -342,6 +521,10 @@ class Pipeline:
342
521
  **kwargs,
343
522
  )
344
523
 
524
+ # Show summary (only if result is a PipelineResult, not a string)
525
+ if display and not isinstance(result, str):
526
+ display.show_summary(result, ui_url=ui_url, run_url=run_url)
527
+
345
528
  # If result is just a job ID (remote execution), wrap it in a basic result
346
529
  if isinstance(result, str):
347
530
  # Create a submitted result wrapper
@@ -352,6 +535,10 @@ class Pipeline:
352
535
  self._save_pipeline_definition()
353
536
  return wrapper
354
537
 
538
+ # Ensure result has configs attached (in case orchestrator didn't do it)
539
+ if hasattr(result, "attach_configs") and not hasattr(result, "resource_config"):
540
+ result.attach_configs(resource_config, docker_cfg)
541
+
355
542
  return result
356
543
 
357
544
  def to_definition(self) -> dict:
@@ -368,6 +555,7 @@ class Pipeline:
368
555
  "outputs": step.outputs,
369
556
  "source_code": step.source_code,
370
557
  "tags": step.tags,
558
+ "execution_group": step.execution_group,
371
559
  }
372
560
  for step in self.steps
373
561
  ],
@@ -395,6 +583,163 @@ class Pipeline:
395
583
  # Don't fail the run if definition saving fails
396
584
  print(f"Warning: Failed to save pipeline definition: {e}")
397
585
 
586
+ def _ensure_ui_server(self, run_id: str) -> tuple[str | None, str | None, bool]:
587
+ """Ensure UI server is running, start it if needed, or show guidance.
588
+
589
+ Args:
590
+ run_id: The run ID for generating the run URL
591
+
592
+ Returns:
593
+ Tuple of (ui_url, run_url, start_failed)
594
+ - ui_url: Base URL of the UI server if running
595
+ - run_url: URL to view this specific run if server is running
596
+ - start_failed: True if we tried to start and failed (show guidance)
597
+ """
598
+ import subprocess
599
+ import sys
600
+ import time
601
+ from pathlib import Path
602
+
603
+ try:
604
+ from flowyml.ui.utils import is_ui_running, get_ui_host_port
605
+ except ImportError:
606
+ return None, None, False
607
+
608
+ host, port = get_ui_host_port()
609
+ url = f"http://{host}:{port}"
610
+
611
+ # Check if already running
612
+ if is_ui_running(host, port):
613
+ return url, f"{url}/runs/{run_id}", False
614
+
615
+ # Try to start the UI server as a background subprocess
616
+ try:
617
+ # Check if uvicorn is available
618
+ try:
619
+ import uvicorn # noqa: F401
620
+ except ImportError:
621
+ # uvicorn not installed, show guidance but don't fail
622
+ self._show_ui_guidance(host, port, reason="missing_deps")
623
+ return None, None, True
624
+
625
+ # Start uvicorn as a background process
626
+ cmd = [
627
+ sys.executable,
628
+ "-m",
629
+ "uvicorn",
630
+ "flowyml.ui.backend.main:app",
631
+ "--host",
632
+ host,
633
+ "--port",
634
+ str(port),
635
+ "--log-level",
636
+ "warning",
637
+ ]
638
+
639
+ # Start as detached background process
640
+ if sys.platform == "win32":
641
+ process = subprocess.Popen(
642
+ cmd,
643
+ stdout=subprocess.DEVNULL,
644
+ stderr=subprocess.DEVNULL,
645
+ creationflags=subprocess.CREATE_NEW_PROCESS_GROUP | subprocess.DETACHED_PROCESS,
646
+ )
647
+ else:
648
+ process = subprocess.Popen(
649
+ cmd,
650
+ stdout=subprocess.DEVNULL,
651
+ stderr=subprocess.DEVNULL,
652
+ start_new_session=True,
653
+ )
654
+
655
+ # Wait for server to start (up to 8 seconds)
656
+ started = False
657
+ for _ in range(80):
658
+ time.sleep(0.1)
659
+ if is_ui_running(host, port):
660
+ started = True
661
+ break
662
+
663
+ if started:
664
+ # Save PID for later stop command
665
+ pid_file = Path.home() / ".flowyml" / "ui_server.pid"
666
+ pid_file.parent.mkdir(parents=True, exist_ok=True)
667
+ pid_file.write_text(f"{process.pid}\n{host}\n{port}")
668
+
669
+ return url, f"{url}/runs/{run_id}", False
670
+ else:
671
+ # Server didn't start, kill the process and show guidance
672
+ process.terminate()
673
+ self._show_ui_guidance(host, port, reason="start_failed")
674
+ return None, None, True
675
+
676
+ except Exception:
677
+ # Show guidance on failure
678
+ self._show_ui_guidance(host, port, reason="error")
679
+ return None, None, True
680
+
681
+ def _show_ui_guidance(self, host: str, port: int, reason: str = "not_running") -> None:
682
+ """Show a helpful message guiding the user to start the UI server.
683
+
684
+ Args:
685
+ host: Host the server should run on
686
+ port: Port the server should run on
687
+ reason: Why we're showing guidance (not_running, missing_deps, start_failed, error)
688
+ """
689
+ try:
690
+ from rich.console import Console
691
+ from rich.panel import Panel
692
+ from rich.text import Text
693
+ from rich import box
694
+
695
+ console = Console()
696
+
697
+ content = Text()
698
+ content.append("💡 ", style="yellow")
699
+ content.append("Want to see your pipeline run in a live dashboard?\n\n", style="bold")
700
+
701
+ if reason == "missing_deps":
702
+ content.append("UI dependencies not installed. ", style="dim")
703
+ content.append("Install with:\n", style="")
704
+ content.append(" pip install uvicorn fastapi\n\n", style="bold cyan")
705
+
706
+ content.append("Start the dashboard with:\n", style="")
707
+ content.append(" flowyml go", style="bold green")
708
+
709
+ if port != 8080:
710
+ content.append(f" --port {port}", style="bold green")
711
+
712
+ content.append("\n\n", style="")
713
+ content.append("Then run your pipeline again to see it in the UI!", style="dim")
714
+
715
+ console.print()
716
+ console.print(
717
+ Panel(
718
+ content,
719
+ title="[bold cyan]🌐 Dashboard Available[/bold cyan]",
720
+ border_style="yellow",
721
+ box=box.ROUNDED,
722
+ ),
723
+ )
724
+ console.print()
725
+
726
+ except ImportError:
727
+ # Fallback to simple print
728
+ print()
729
+ print("=" * 60)
730
+ print("💡 Want to see your pipeline run in a live dashboard?")
731
+ print()
732
+ if reason == "missing_deps":
733
+ print(" UI dependencies not installed. Install with:")
734
+ print(" pip install uvicorn fastapi")
735
+ print()
736
+ print(" Start the dashboard with:")
737
+ print(" flowyml go" + (f" --port {port}" if port != 8080 else ""))
738
+ print()
739
+ print(" Then run your pipeline again to see it in the UI!")
740
+ print("=" * 60)
741
+ print()
742
+
398
743
  def _coerce_resource_config(self, resources: Any | None):
399
744
  """Convert resources input to ResourceConfig if necessary."""
400
745
  if resources is None:
@@ -425,6 +770,92 @@ class Pipeline:
425
770
  return DockerConfig(**docker_config)
426
771
  return docker_config
427
772
 
773
+ def _log_experiment_metrics(self, result: PipelineResult) -> None:
774
+ """Automatically log Metrics to experiment tracking.
775
+
776
+ Extracts Metrics objects from pipeline outputs and logs them along with
777
+ context parameters to the experiment tracking system.
778
+
779
+ This is called automatically after each pipeline run if experiment tracking is enabled.
780
+ """
781
+ from flowyml.utils.config import get_config
782
+ from flowyml.assets.metrics import Metrics
783
+
784
+ config = get_config()
785
+
786
+ # Check if experiment tracking is enabled (default: True)
787
+ # Can be disabled globally via config or per-pipeline via enable_experiment_tracking
788
+ enable_tracking = getattr(self, "enable_experiment_tracking", None)
789
+ if enable_tracking is None:
790
+ enable_tracking = getattr(config, "auto_log_metrics", True)
791
+
792
+ if not enable_tracking:
793
+ return
794
+
795
+ # Extract all Metrics from pipeline outputs
796
+ all_metrics = {}
797
+ for output_name, output_value in result.outputs.items():
798
+ if isinstance(output_value, Metrics):
799
+ # Extract metrics from Metrics object
800
+ metrics_dict = output_value.get_all_metrics() or output_value.data or {}
801
+ # Use output name as prefix to avoid conflicts, but simplify if output is "metrics"
802
+ for key, value in metrics_dict.items():
803
+ if output_name == "metrics" or output_name.endswith("/metrics"):
804
+ # Use metric key directly for cleaner names
805
+ all_metrics[key] = value
806
+ else:
807
+ # Prefix with output name to avoid conflicts
808
+ all_metrics[f"{output_name}.{key}"] = value
809
+ elif isinstance(output_value, dict):
810
+ # Check if dict contains Metrics objects
811
+ for key, val in output_value.items():
812
+ if isinstance(val, Metrics):
813
+ metrics_dict = val.get_all_metrics() or val.data or {}
814
+ for mkey, mval in metrics_dict.items():
815
+ all_metrics[f"{key}.{mkey}"] = mval
816
+
817
+ # Extract context parameters
818
+ context_params = {}
819
+ if self.context:
820
+ # Get all context parameters using to_dict() method
821
+ context_params = self.context.to_dict()
822
+
823
+ # Only log if we have metrics or parameters
824
+ if all_metrics or context_params:
825
+ try:
826
+ from flowyml.tracking.experiment import Experiment
827
+ from flowyml.tracking.runs import Run
828
+
829
+ # Create or get experiment (use pipeline name as experiment name)
830
+ experiment_name = self.name
831
+ experiment = Experiment(
832
+ name=experiment_name,
833
+ description=f"Auto-tracked experiment for pipeline: {self.name}",
834
+ )
835
+
836
+ # Log run to experiment
837
+ experiment.log_run(
838
+ run_id=result.run_id,
839
+ metrics=all_metrics,
840
+ parameters=context_params,
841
+ )
842
+
843
+ # Also create/update Run object for compatibility
844
+ run = Run(
845
+ run_id=result.run_id,
846
+ pipeline_name=self.name,
847
+ parameters=context_params,
848
+ )
849
+ if all_metrics:
850
+ run.log_metrics(all_metrics)
851
+ run.complete(status="success" if result.success else "failed")
852
+
853
+ except Exception as e:
854
+ # Don't fail pipeline if experiment logging fails
855
+ import warnings
856
+
857
+ warnings.warn(f"Failed to log experiment metrics: {e}", stacklevel=2)
858
+
428
859
  def _save_run(self, result: PipelineResult) -> None:
429
860
  """Save run results to disk and metadata database."""
430
861
  # Save to JSON file
@@ -467,6 +898,7 @@ class Pipeline:
467
898
  "inputs": step.inputs,
468
899
  "outputs": step.outputs,
469
900
  "tags": step.tags,
901
+ "execution_group": step.execution_group,
470
902
  "resources": step.resources.to_dict() if hasattr(step.resources, "to_dict") else step.resources,
471
903
  }
472
904
 
@@ -489,9 +921,13 @@ class Pipeline:
489
921
  if hasattr(result.docker_config, "to_dict")
490
922
  else result.docker_config,
491
923
  "remote_job_id": result.remote_job_id,
924
+ "project": getattr(self, "project_name", None), # Include project for stats tracking
492
925
  }
493
926
  self.metadata_store.save_run(result.run_id, metadata)
494
927
 
928
+ # Automatic experiment tracking: Extract Metrics and log to experiments
929
+ self._log_experiment_metrics(result)
930
+
495
931
  # Save artifacts and metrics
496
932
  for step_name, step_result in result.step_results.items():
497
933
  if step_result.success and step_result.output is not None:
flowyml/core/project.py CHANGED
@@ -107,6 +107,9 @@ class Project:
107
107
  # Use project metadata store
108
108
  pipeline.metadata_store = self.metadata_store
109
109
 
110
+ # Set project name on pipeline for stats tracking
111
+ pipeline.project_name = self.name
112
+
110
113
  # Register pipeline
111
114
  if name not in self.metadata["pipelines"]:
112
115
  self.metadata["pipelines"].append(name)
@@ -161,7 +164,7 @@ class Project:
161
164
 
162
165
  def get_stats(self) -> dict[str, Any]:
163
166
  """Get project statistics."""
164
- stats = self.metadata_store.get_statistics()
167
+ stats = self.metadata_store.get_statistics(project=self.name)
165
168
  stats["project_name"] = self.name
166
169
  stats["pipelines"] = len(self.metadata["pipelines"])
167
170
  return stats