ai-pipeline-core 0.2.6__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. ai_pipeline_core/__init__.py +78 -125
  2. ai_pipeline_core/deployment/__init__.py +34 -0
  3. ai_pipeline_core/deployment/base.py +861 -0
  4. ai_pipeline_core/deployment/contract.py +80 -0
  5. ai_pipeline_core/deployment/deploy.py +561 -0
  6. ai_pipeline_core/deployment/helpers.py +97 -0
  7. ai_pipeline_core/deployment/progress.py +126 -0
  8. ai_pipeline_core/deployment/remote.py +116 -0
  9. ai_pipeline_core/docs_generator/__init__.py +54 -0
  10. ai_pipeline_core/docs_generator/__main__.py +5 -0
  11. ai_pipeline_core/docs_generator/cli.py +196 -0
  12. ai_pipeline_core/docs_generator/extractor.py +324 -0
  13. ai_pipeline_core/docs_generator/guide_builder.py +644 -0
  14. ai_pipeline_core/docs_generator/trimmer.py +35 -0
  15. ai_pipeline_core/docs_generator/validator.py +114 -0
  16. ai_pipeline_core/document_store/__init__.py +13 -0
  17. ai_pipeline_core/document_store/_summary.py +9 -0
  18. ai_pipeline_core/document_store/_summary_worker.py +170 -0
  19. ai_pipeline_core/document_store/clickhouse.py +492 -0
  20. ai_pipeline_core/document_store/factory.py +38 -0
  21. ai_pipeline_core/document_store/local.py +312 -0
  22. ai_pipeline_core/document_store/memory.py +85 -0
  23. ai_pipeline_core/document_store/protocol.py +68 -0
  24. ai_pipeline_core/documents/__init__.py +12 -14
  25. ai_pipeline_core/documents/_context_vars.py +85 -0
  26. ai_pipeline_core/documents/_hashing.py +52 -0
  27. ai_pipeline_core/documents/attachment.py +85 -0
  28. ai_pipeline_core/documents/context.py +128 -0
  29. ai_pipeline_core/documents/document.py +318 -1434
  30. ai_pipeline_core/documents/mime_type.py +37 -82
  31. ai_pipeline_core/documents/utils.py +4 -12
  32. ai_pipeline_core/exceptions.py +10 -62
  33. ai_pipeline_core/images/__init__.py +309 -0
  34. ai_pipeline_core/images/_processing.py +151 -0
  35. ai_pipeline_core/llm/__init__.py +6 -4
  36. ai_pipeline_core/llm/ai_messages.py +130 -81
  37. ai_pipeline_core/llm/client.py +327 -193
  38. ai_pipeline_core/llm/model_options.py +14 -86
  39. ai_pipeline_core/llm/model_response.py +60 -103
  40. ai_pipeline_core/llm/model_types.py +16 -34
  41. ai_pipeline_core/logging/__init__.py +2 -7
  42. ai_pipeline_core/logging/logging.yml +1 -1
  43. ai_pipeline_core/logging/logging_config.py +27 -37
  44. ai_pipeline_core/logging/logging_mixin.py +15 -41
  45. ai_pipeline_core/observability/__init__.py +32 -0
  46. ai_pipeline_core/observability/_debug/__init__.py +30 -0
  47. ai_pipeline_core/observability/_debug/_auto_summary.py +94 -0
  48. ai_pipeline_core/observability/_debug/_config.py +95 -0
  49. ai_pipeline_core/observability/_debug/_content.py +764 -0
  50. ai_pipeline_core/observability/_debug/_processor.py +98 -0
  51. ai_pipeline_core/observability/_debug/_summary.py +312 -0
  52. ai_pipeline_core/observability/_debug/_types.py +75 -0
  53. ai_pipeline_core/observability/_debug/_writer.py +843 -0
  54. ai_pipeline_core/observability/_document_tracking.py +146 -0
  55. ai_pipeline_core/observability/_initialization.py +194 -0
  56. ai_pipeline_core/observability/_logging_bridge.py +57 -0
  57. ai_pipeline_core/observability/_summary.py +81 -0
  58. ai_pipeline_core/observability/_tracking/__init__.py +6 -0
  59. ai_pipeline_core/observability/_tracking/_client.py +178 -0
  60. ai_pipeline_core/observability/_tracking/_internal.py +28 -0
  61. ai_pipeline_core/observability/_tracking/_models.py +138 -0
  62. ai_pipeline_core/observability/_tracking/_processor.py +158 -0
  63. ai_pipeline_core/observability/_tracking/_service.py +311 -0
  64. ai_pipeline_core/observability/_tracking/_writer.py +229 -0
  65. ai_pipeline_core/{tracing.py → observability/tracing.py} +139 -283
  66. ai_pipeline_core/pipeline/__init__.py +10 -0
  67. ai_pipeline_core/pipeline/decorators.py +915 -0
  68. ai_pipeline_core/pipeline/options.py +16 -0
  69. ai_pipeline_core/prompt_manager.py +16 -102
  70. ai_pipeline_core/settings.py +26 -31
  71. ai_pipeline_core/testing.py +9 -0
  72. ai_pipeline_core-0.4.1.dist-info/METADATA +807 -0
  73. ai_pipeline_core-0.4.1.dist-info/RECORD +76 -0
  74. {ai_pipeline_core-0.2.6.dist-info → ai_pipeline_core-0.4.1.dist-info}/WHEEL +1 -1
  75. ai_pipeline_core/documents/document_list.py +0 -420
  76. ai_pipeline_core/documents/flow_document.py +0 -112
  77. ai_pipeline_core/documents/task_document.py +0 -117
  78. ai_pipeline_core/documents/temporary_document.py +0 -74
  79. ai_pipeline_core/flow/__init__.py +0 -9
  80. ai_pipeline_core/flow/config.py +0 -483
  81. ai_pipeline_core/flow/options.py +0 -75
  82. ai_pipeline_core/pipeline.py +0 -718
  83. ai_pipeline_core/prefect.py +0 -63
  84. ai_pipeline_core/simple_runner/__init__.py +0 -14
  85. ai_pipeline_core/simple_runner/cli.py +0 -254
  86. ai_pipeline_core/simple_runner/simple_runner.py +0 -247
  87. ai_pipeline_core/storage/__init__.py +0 -8
  88. ai_pipeline_core/storage/storage.py +0 -628
  89. ai_pipeline_core/utils/__init__.py +0 -8
  90. ai_pipeline_core/utils/deploy.py +0 -373
  91. ai_pipeline_core/utils/remote_deployment.py +0 -269
  92. ai_pipeline_core-0.2.6.dist-info/METADATA +0 -500
  93. ai_pipeline_core-0.2.6.dist-info/RECORD +0 -41
  94. {ai_pipeline_core-0.2.6.dist-info → ai_pipeline_core-0.4.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,63 +0,0 @@
1
- """Prefect core features for pipeline orchestration.
2
-
3
- This module provides clean re-exports of essential Prefect functionality.
4
-
5
- IMPORTANT: You should NEVER use the `task` and `flow` decorators directly
6
- unless it is 100% impossible to use `pipeline_task` and `pipeline_flow`.
7
- The standard Prefect decorators are exported here only for extremely
8
- limited edge cases where the pipeline decorators cannot be used.
9
-
10
- Always prefer:
11
- >>> from ai_pipeline_core import pipeline_task, pipeline_flow
12
- >>>
13
- >>> @pipeline_task
14
- >>> async def my_task(...): ...
15
- >>>
16
- >>> @pipeline_flow
17
- >>> async def my_flow(...): ...
18
-
19
- The `task` and `flow` decorators should only be used when:
20
- - You absolutely cannot convert to async (pipeline decorators require async)
21
- - You have a very specific Prefect integration that conflicts with tracing
22
- - You are writing test utilities or infrastructure code
23
-
24
- Exported components:
25
- task: Prefect task decorator (AVOID - use pipeline_task instead).
26
- flow: Prefect flow decorator (AVOID - use pipeline_flow instead).
27
- disable_run_logger: Context manager to suppress Prefect logging.
28
- prefect_test_harness: Test harness for unit testing flows/tasks.
29
-
30
- Testing utilities (use as fixtures):
31
- The disable_run_logger and prefect_test_harness should be used as
32
- pytest fixtures as shown in tests/conftest.py:
33
-
34
- >>> @pytest.fixture(autouse=True, scope="session")
35
- >>> def prefect_test_fixture():
36
- ... with prefect_test_harness():
37
- ... yield
38
- >>>
39
- >>> @pytest.fixture(autouse=True)
40
- >>> def disable_prefect_logging():
41
- ... with disable_run_logger():
42
- ... yield
43
-
44
- Note:
45
- The pipeline_task and pipeline_flow decorators from
46
- ai_pipeline_core.pipeline provide async-only execution with
47
- integrated LMNR tracing and are the standard for this library.
48
- """
49
-
50
- from prefect import deploy, flow, serve, task
51
- from prefect.logging import disable_run_logger
52
- from prefect.testing.utilities import prefect_test_harness
53
- from prefect.types.entrypoint import EntrypointType
54
-
55
- __all__ = [
56
- "task",
57
- "flow",
58
- "disable_run_logger",
59
- "prefect_test_harness",
60
- "serve",
61
- "deploy",
62
- "EntrypointType",
63
- ]
@@ -1,14 +0,0 @@
1
- """Simple pipeline execution for local development.
2
-
3
- Utilities for running AI pipelines locally without full Prefect orchestration.
4
- """
5
-
6
- from .cli import run_cli
7
- from .simple_runner import FlowSequence, run_pipeline, run_pipelines
8
-
9
- __all__ = [
10
- "run_cli",
11
- "run_pipeline",
12
- "run_pipelines",
13
- "FlowSequence",
14
- ]
@@ -1,254 +0,0 @@
1
- """Command-line interface for simple pipeline execution."""
2
-
3
- import asyncio
4
- import os
5
- import sys
6
- from contextlib import ExitStack
7
- from pathlib import Path
8
- from typing import Callable, Type, TypeVar, cast
9
-
10
- from lmnr import Laminar
11
- from pydantic import ValidationError
12
- from pydantic_settings import CliPositionalArg, SettingsConfigDict
13
-
14
- from ai_pipeline_core.documents import DocumentList
15
- from ai_pipeline_core.flow.options import FlowOptions
16
- from ai_pipeline_core.logging import get_pipeline_logger, setup_logging
17
- from ai_pipeline_core.prefect import disable_run_logger, prefect_test_harness
18
- from ai_pipeline_core.settings import settings
19
-
20
- from .simple_runner import FlowSequence, run_pipelines
21
-
22
- logger = get_pipeline_logger(__name__)
23
-
24
- TOptions = TypeVar("TOptions", bound=FlowOptions)
25
- """Type variable for FlowOptions subclasses used in CLI."""
26
-
27
- InitializerFunc = Callable[[FlowOptions], tuple[str, DocumentList]] | None
28
- """Function type for custom pipeline initialization.
29
-
30
- Initializers can create initial documents or setup project state
31
- before flow execution begins.
32
-
33
- Args:
34
- FlowOptions: Parsed CLI options
35
-
36
- Returns:
37
- Tuple of (project_name, initial_documents) or None
38
- """
39
-
40
-
41
- def _initialize_environment() -> None:
42
- """Initialize logging and observability systems.
43
-
44
- Sets up the pipeline logging configuration and attempts to
45
- initialize LMNR (Laminar) for distributed tracing. Failures
46
- in LMNR initialization are logged but don't stop execution.
47
-
48
- Side effects:
49
- - Configures Python logging system
50
- - Initializes Laminar SDK if API key is available
51
- - Logs initialization status
52
-
53
- Note:
54
- Called automatically by run_cli before parsing arguments.
55
- """
56
- setup_logging()
57
- try:
58
- Laminar.initialize()
59
- logger.info("LMNR tracing initialized.")
60
- except Exception as e:
61
- logger.warning(f"Failed to initialize LMNR tracing: {e}")
62
-
63
-
64
- def _running_under_pytest() -> bool:
65
- """Check if code is running under pytest.
66
-
67
- Detects pytest execution context to determine whether test
68
- fixtures will provide necessary contexts (like Prefect test
69
- harness). This prevents duplicate context setup.
70
-
71
- Returns:
72
- True if running under pytest, False otherwise.
73
-
74
- Detection methods:
75
- - PYTEST_CURRENT_TEST environment variable (set by pytest)
76
- - 'pytest' module in sys.modules (imported by test runner)
77
-
78
- Note:
79
- Used to avoid setting up test harness when pytest fixtures
80
- already provide it.
81
- """
82
- return "PYTEST_CURRENT_TEST" in os.environ or "pytest" in sys.modules
83
-
84
-
85
- def run_cli(
86
- *,
87
- flows: FlowSequence,
88
- options_cls: Type[TOptions],
89
- initializer: InitializerFunc = None,
90
- trace_name: str | None = None,
91
- ) -> None:
92
- """Execute pipeline flows from command-line arguments.
93
-
94
- Environment setup:
95
- - Initializes logging system
96
- - Sets up LMNR tracing (if API key configured)
97
- - Creates Prefect test harness (if no API key and not in pytest)
98
- - Manages context stack for proper cleanup
99
-
100
- Raises:
101
- ValueError: If project name is empty after initialization.
102
-
103
- Example:
104
- >>> # In __main__.py
105
- >>> from ai_pipeline_core import simple_runner
106
- >>> from .flows import AnalysisFlow, SummaryFlow
107
- >>> from .config import AnalysisOptions
108
- >>>
109
- >>> if __name__ == "__main__":
110
- ... simple_runner.run_cli(
111
- ... flows=[AnalysisFlow, SummaryFlow],
112
- ... options_cls=AnalysisOptions,
113
- ... trace_name="document-analysis"
114
- ... )
115
-
116
- Command line:
117
- $ python -m my_module ./output --temperature 0.5 --model gpt-5
118
- $ python -m my_module ./output --start 2 # Skip first flow
119
-
120
- Note:
121
- - Field names are converted to kebab-case for CLI (max_tokens → --max-tokens)
122
- - Boolean fields become flags (--verbose/--no-verbose)
123
- - Field descriptions from Pydantic become help text
124
- - Type hints are enforced during parsing
125
- - Validation errors show helpful messages with field names
126
- - Includes hints for common error types (numbers, ranges)
127
- - Exits with status 1 on error
128
- - Shows --help when no arguments provided
129
- """
130
- # Check if no arguments provided before initialization
131
- if len(sys.argv) == 1:
132
- # Add --help to show usage when run without arguments
133
- sys.argv.append("--help")
134
-
135
- _initialize_environment()
136
-
137
- class _RunnerOptions( # type: ignore[reportRedeclaration]
138
- options_cls,
139
- cli_parse_args=True,
140
- cli_kebab_case=True,
141
- cli_exit_on_error=True, # Let it exit normally on error
142
- cli_prog_name="ai-pipeline",
143
- cli_use_class_docs_for_groups=True,
144
- ):
145
- """Internal options class combining user options with CLI arguments.
146
-
147
- Dynamically created class that inherits from user's options_cls
148
- and adds standard CLI arguments for pipeline execution.
149
- """
150
-
151
- working_directory: CliPositionalArg[Path]
152
- project_name: str | None = None
153
- start: int = 1
154
- end: int | None = None
155
-
156
- model_config = SettingsConfigDict(frozen=True, extra="ignore")
157
-
158
- try:
159
- opts = cast(FlowOptions, _RunnerOptions()) # type: ignore[reportCallIssue]
160
- except ValidationError as e:
161
- print("\nError: Invalid command line arguments\n", file=sys.stderr)
162
- for error in e.errors():
163
- field = " -> ".join(str(loc) for loc in error["loc"])
164
- msg = error["msg"]
165
- value = error.get("input", "")
166
-
167
- # Format the field name nicely (convert from snake_case to kebab-case for CLI)
168
- cli_field = field.replace("_", "-")
169
-
170
- print(f" --{cli_field}: {msg}", file=sys.stderr)
171
- if value:
172
- print(f" Provided value: '{value}'", file=sys.stderr)
173
-
174
- # Add helpful hints for common errors
175
- if error["type"] == "float_parsing":
176
- print(" Hint: Please provide a valid number (e.g., 0.7)", file=sys.stderr)
177
- elif error["type"] == "int_parsing":
178
- print(" Hint: Please provide a valid integer (e.g., 10)", file=sys.stderr)
179
- elif error["type"] == "literal_error":
180
- ctx = error.get("ctx", {})
181
- expected = ctx.get("expected", "valid options")
182
- print(f" Hint: Valid options are: {expected}", file=sys.stderr)
183
- elif error["type"] in [
184
- "less_than_equal",
185
- "greater_than_equal",
186
- "less_than",
187
- "greater_than",
188
- ]:
189
- ctx = error.get("ctx", {})
190
- if "le" in ctx:
191
- print(f" Hint: Value must be ≤ {ctx['le']}", file=sys.stderr)
192
- elif "ge" in ctx:
193
- print(f" Hint: Value must be ≥ {ctx['ge']}", file=sys.stderr)
194
- elif "lt" in ctx:
195
- print(f" Hint: Value must be < {ctx['lt']}", file=sys.stderr)
196
- elif "gt" in ctx:
197
- print(f" Hint: Value must be > {ctx['gt']}", file=sys.stderr)
198
-
199
- print("\nRun with --help to see all available options\n", file=sys.stderr)
200
- sys.exit(1)
201
-
202
- wd: Path = cast(Path, getattr(opts, "working_directory"))
203
- wd.mkdir(parents=True, exist_ok=True)
204
-
205
- # Get project name from options or use directory basename
206
- project_name = getattr(opts, "project_name", None)
207
- if not project_name: # None or empty string
208
- project_name = wd.name
209
-
210
- # Ensure project_name is not empty
211
- if not project_name:
212
- raise ValueError("Project name cannot be empty")
213
-
214
- # Use initializer if provided, otherwise use defaults
215
- initial_documents = DocumentList([])
216
- if initializer:
217
- init_result = initializer(opts)
218
- # Always expect tuple format from initializer
219
- _, initial_documents = init_result # Ignore project name from initializer
220
-
221
- # Save initial documents if starting from first step
222
- if getattr(opts, "start", 1) == 1 and initial_documents and flows:
223
- # Get config from the first flow
224
- first_flow_config = getattr(flows[0], "config", None)
225
- if first_flow_config:
226
- asyncio.run(
227
- first_flow_config.save_documents(
228
- str(wd), initial_documents, validate_output_type=False
229
- )
230
- )
231
-
232
- # Setup context stack with optional test harness and tracing
233
- with ExitStack() as stack:
234
- if trace_name:
235
- stack.enter_context(
236
- Laminar.start_as_current_span(
237
- name=f"{trace_name}-{project_name}", input=[opts.model_dump_json()]
238
- )
239
- )
240
-
241
- if not settings.prefect_api_key and not _running_under_pytest():
242
- stack.enter_context(prefect_test_harness())
243
- stack.enter_context(disable_run_logger())
244
-
245
- asyncio.run(
246
- run_pipelines(
247
- project_name=project_name,
248
- output_dir=wd,
249
- flows=flows,
250
- flow_options=opts,
251
- start_step=getattr(opts, "start", 1),
252
- end_step=getattr(opts, "end", None),
253
- )
254
- )
@@ -1,247 +0,0 @@
1
- """Simple pipeline runner for local flow execution.
2
-
3
- This module provides the core functionality for running AI pipeline flows
4
- locally without full Prefect orchestration. It handles document I/O,
5
- flow sequencing, and error management.
6
-
7
- Key components:
8
- - Document I/O from/to filesystem directories via FlowConfig
9
- - Single and multi-flow execution
10
- - Automatic document validation and passing between flows
11
- - Step-based execution control (start/end steps)
12
-
13
- Directory structure:
14
- working_dir/
15
- ├── inputdocument/ # Documents of type InputDocument (lowercase)
16
- │ ├── file1.txt
17
- │ └── file1.txt.description.md # Optional description
18
- └── outputdocument/ # Documents of type OutputDocument (lowercase)
19
- └── result.json
20
-
21
- Example:
22
- >>> from ai_pipeline_core import simple_runner
23
- >>>
24
- >>> # Run single flow
25
- >>> results = await simple_runner.run_pipeline(
26
- ... flow_func=MyFlow,
27
- ... config=MyConfig,
28
- ... project_name="test",
29
- ... output_dir=Path("./output"),
30
- ... flow_options=options
31
- ... )
32
-
33
- Note:
34
- Document directories are organized by document type names (lowercase)
35
- for consistent structure and easy access.
36
- """
37
-
38
- from pathlib import Path
39
- from typing import Any, Callable, Sequence
40
-
41
- from ai_pipeline_core.documents import DocumentList
42
- from ai_pipeline_core.flow.options import FlowOptions
43
- from ai_pipeline_core.logging import get_pipeline_logger
44
-
45
- logger = get_pipeline_logger(__name__)
46
-
47
- FlowSequence = Sequence[Callable[..., Any]]
48
- """Type alias for a sequence of flow functions."""
49
-
50
-
51
- async def run_pipeline(
52
- flow_func: Callable[..., Any],
53
- project_name: str,
54
- output_dir: Path,
55
- flow_options: FlowOptions,
56
- flow_name: str | None = None,
57
- ) -> DocumentList:
58
- """Execute a single pipeline flow with document I/O.
59
-
60
- Runs a flow function with automatic document loading, validation,
61
- and saving. The flow receives input documents from the filesystem
62
- and saves its output for subsequent flows.
63
-
64
- The execution proceeds through these steps:
65
- 1. Load input documents from output_dir subdirectories
66
- 2. Validate input documents against flow's config requirements
67
- 3. Execute flow function with documents and options
68
- 4. Validate output documents match config.OUTPUT_DOCUMENT_TYPE
69
- 5. Save output documents to output_dir subdirectories
70
-
71
- Args:
72
- flow_func: Async flow function decorated with @pipeline_flow.
73
- Must accept (project_name, documents, flow_options).
74
- The flow must have a config attribute set by @pipeline_flow.
75
-
76
- project_name: Name of the project/pipeline for logging and tracking.
77
-
78
- output_dir: Directory for loading input and saving output documents.
79
- Document subdirectories are created as needed.
80
-
81
- flow_options: Configuration options passed to the flow function.
82
- Can be FlowOptions or any subclass.
83
-
84
- flow_name: Optional display name for logging. If None, uses
85
- flow_func.name or flow_func.__name__.
86
-
87
- Returns:
88
- DocumentList containing the flow's output documents.
89
-
90
- Raises:
91
- RuntimeError: If required input documents are missing or if
92
- flow doesn't have a config attribute.
93
-
94
- Example:
95
- >>> from my_flows import AnalysisFlow
96
- >>>
97
- >>> results = await run_pipeline(
98
- ... flow_func=AnalysisFlow,
99
- ... project_name="analysis_001",
100
- ... output_dir=Path("./results"),
101
- ... flow_options=FlowOptions(temperature=0.7)
102
- ... )
103
- >>> print(f"Generated {len(results)} documents")
104
-
105
- Note:
106
- - Flow must be async (decorated with @pipeline_flow with config)
107
- - Input documents are loaded based on flow's config.INPUT_DOCUMENT_TYPES
108
- - Output is validated against config.OUTPUT_DOCUMENT_TYPE
109
- - All I/O is logged for debugging
110
- """
111
- if flow_name is None:
112
- # For Prefect Flow objects, use their name attribute
113
- # For regular functions, fall back to __name__
114
- flow_name = getattr(flow_func, "name", None) or getattr(flow_func, "__name__", "flow")
115
-
116
- logger.info(f"Running Flow: {flow_name}")
117
-
118
- # Get config from the flow function (attached by @pipeline_flow decorator)
119
- config = getattr(flow_func, "config", None)
120
- if config is None:
121
- raise RuntimeError(
122
- f"Flow {flow_name} does not have a config attribute. "
123
- "Ensure it's decorated with @pipeline_flow(config=YourConfig)"
124
- )
125
-
126
- # Load input documents using FlowConfig's new async method
127
- input_documents = await config.load_documents(str(output_dir))
128
-
129
- if not config.has_input_documents(input_documents):
130
- raise RuntimeError(f"Missing input documents for flow {flow_name}")
131
-
132
- result_documents = await flow_func(project_name, input_documents, flow_options)
133
-
134
- config.validate_output_documents(result_documents)
135
-
136
- # Save output documents using FlowConfig's new async method
137
- await config.save_documents(str(output_dir), result_documents)
138
-
139
- logger.info(f"Completed Flow: {flow_name}")
140
-
141
- return result_documents
142
-
143
-
144
- async def run_pipelines(
145
- project_name: str,
146
- output_dir: Path,
147
- flows: FlowSequence,
148
- flow_options: FlowOptions,
149
- start_step: int = 1,
150
- end_step: int | None = None,
151
- ) -> None:
152
- """Execute multiple pipeline flows in sequence.
153
-
154
- Runs a series of flows where each flow's output becomes the input
155
- for the next flow. Supports partial execution with start/end steps
156
- for debugging and resuming failed pipelines.
157
-
158
- Execution proceeds by:
159
- 1. Validating step indices
160
- 2. For each flow in range [start_step, end_step]:
161
- a. Loading input documents from output_dir
162
- b. Executing flow with documents
163
- c. Saving output documents to output_dir
164
- d. Output becomes input for next flow
165
- 3. Logging progress and any failures
166
-
167
- Steps are 1-based for user convenience. Step 1 is the first flow,
168
- Step N is the Nth flow. Use start_step > 1 to skip initial flows
169
- and end_step < N to stop early.
170
-
171
- Args:
172
- project_name: Name of the overall pipeline/project.
173
- output_dir: Directory for document I/O between flows.
174
- Shared by all flows in the sequence.
175
- flows: Sequence of flow functions to execute in order.
176
- Must all be async functions decorated with @pipeline_flow
177
- with a config parameter.
178
- flow_options: Options passed to all flows in the sequence.
179
- Individual flows can use different fields.
180
- start_step: First flow to execute (1-based index).
181
- Default 1 starts from the beginning.
182
- end_step: Last flow to execute (1-based index).
183
- None runs through the last flow.
184
-
185
- Raises:
186
- ValueError: If start_step or end_step are out of range.
187
- RuntimeError: If any flow doesn't have a config attribute.
188
-
189
- Example:
190
- >>> # Run full pipeline
191
- >>> await run_pipelines(
192
- ... project_name="analysis",
193
- ... output_dir=Path("./work"),
194
- ... flows=[ExtractFlow, AnalyzeFlow, SummarizeFlow],
195
- ... flow_options=options
196
- ... )
197
- >>>
198
- >>> # Run only steps 2-3 (skip extraction)
199
- >>> await run_pipelines(
200
- ... ...,
201
- ... start_step=2,
202
- ... end_step=3
203
- ... )
204
-
205
- Note:
206
- - Each flow must be decorated with @pipeline_flow(config=...)
207
- - Each flow's output must match the next flow's input types
208
- - Failed flows stop the entire pipeline
209
- - Progress is logged with step numbers for debugging
210
- - Documents persist in output_dir between runs
211
- """
212
- num_steps = len(flows)
213
- start_index = start_step - 1
214
- end_index = (end_step if end_step is not None else num_steps) - 1
215
-
216
- if (
217
- not (0 <= start_index < num_steps)
218
- or not (0 <= end_index < num_steps)
219
- or start_index > end_index
220
- ):
221
- raise ValueError("Invalid start/end steps.")
222
-
223
- logger.info(f"Starting pipeline '{project_name}' (Steps {start_step} to {end_index + 1})")
224
-
225
- for i in range(start_index, end_index + 1):
226
- flow_func = flows[i]
227
- # For Prefect Flow objects, use their name attribute; for functions, use __name__
228
- flow_name = getattr(flow_func, "name", None) or getattr(
229
- flow_func, "__name__", f"flow_{i + 1}"
230
- )
231
-
232
- logger.info(f"--- [Step {i + 1}/{num_steps}] Running Flow: {flow_name} ---")
233
-
234
- try:
235
- await run_pipeline(
236
- flow_func=flow_func,
237
- project_name=project_name,
238
- output_dir=output_dir,
239
- flow_options=flow_options,
240
- flow_name=f"[Step {i + 1}/{num_steps}] {flow_name}",
241
- )
242
-
243
- except Exception as e:
244
- logger.error(
245
- f"--- [Step {i + 1}/{num_steps}] Flow {flow_name} Failed: {e} ---", exc_info=True
246
- )
247
- raise
@@ -1,8 +0,0 @@
1
- """Storage module for ai_pipeline_core.
2
-
3
- @public
4
- """
5
-
6
- from ai_pipeline_core.storage.storage import ObjectInfo, RetryPolicy, Storage
7
-
8
- __all__ = ["Storage", "ObjectInfo", "RetryPolicy"]