vibe-aigc 0.6.3__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vibe_aigc/__init__.py +46 -4
- vibe_aigc/composer_general.py +408 -1
- vibe_aigc/discovery.py +107 -2
- vibe_aigc/knowledge.py +512 -24
- vibe_aigc/llm.py +305 -62
- vibe_aigc/models.py +98 -1
- vibe_aigc/pipeline.py +565 -0
- vibe_aigc/planner.py +145 -0
- vibe_aigc/tools.py +32 -0
- vibe_aigc/tools_audio.py +746 -0
- vibe_aigc/tools_comfyui.py +708 -3
- vibe_aigc/tools_utility.py +997 -0
- vibe_aigc/tools_video.py +799 -0
- vibe_aigc/tools_vision.py +1187 -0
- vibe_aigc/vibe_backend.py +11 -1
- vibe_aigc/vlm_feedback.py +186 -7
- {vibe_aigc-0.6.3.dist-info → vibe_aigc-0.7.0.dist-info}/METADATA +29 -1
- {vibe_aigc-0.6.3.dist-info → vibe_aigc-0.7.0.dist-info}/RECORD +22 -17
- {vibe_aigc-0.6.3.dist-info → vibe_aigc-0.7.0.dist-info}/WHEEL +0 -0
- {vibe_aigc-0.6.3.dist-info → vibe_aigc-0.7.0.dist-info}/entry_points.txt +0 -0
- {vibe_aigc-0.6.3.dist-info → vibe_aigc-0.7.0.dist-info}/licenses/LICENSE +0 -0
- {vibe_aigc-0.6.3.dist-info → vibe_aigc-0.7.0.dist-info}/top_level.txt +0 -0
vibe_aigc/pipeline.py
ADDED
|
@@ -0,0 +1,565 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Pipeline: Workflow Chaining for Multi-Step Content Generation.
|
|
3
|
+
|
|
4
|
+
Enables chaining of tools into sequential pipelines where the output
|
|
5
|
+
of one step becomes the input for the next. This is the operational
|
|
6
|
+
layer that executes decomposed workflows from MetaPlanner.
|
|
7
|
+
|
|
8
|
+
Example:
|
|
9
|
+
pipeline = Pipeline([
|
|
10
|
+
PipelineStep(tool="image_generation", config={"width": 768}),
|
|
11
|
+
PipelineStep(tool="upscale", config={"scale": 2}),
|
|
12
|
+
PipelineStep(tool="video_generation", config={"frames": 33})
|
|
13
|
+
])
|
|
14
|
+
result = await pipeline.execute({"prompt": "samurai in rain"})
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from dataclasses import dataclass, field
|
|
18
|
+
from typing import Dict, List, Optional, Any, Union, TYPE_CHECKING
|
|
19
|
+
from enum import Enum
|
|
20
|
+
from datetime import datetime
|
|
21
|
+
import asyncio
|
|
22
|
+
import logging
|
|
23
|
+
|
|
24
|
+
if TYPE_CHECKING:
|
|
25
|
+
from .tools import ToolRegistry, BaseTool
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class PipelineStatus(str, Enum):
|
|
31
|
+
"""Status of pipeline execution."""
|
|
32
|
+
PENDING = "pending"
|
|
33
|
+
RUNNING = "running"
|
|
34
|
+
COMPLETED = "completed"
|
|
35
|
+
FAILED = "failed"
|
|
36
|
+
PAUSED = "paused"
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@dataclass
|
|
40
|
+
class PipelineStep:
|
|
41
|
+
"""
|
|
42
|
+
A single step in a pipeline.
|
|
43
|
+
|
|
44
|
+
Attributes:
|
|
45
|
+
tool: Tool name (resolved from registry) or BaseTool instance
|
|
46
|
+
config: Configuration to merge with input for this step
|
|
47
|
+
name: Optional human-readable name for this step
|
|
48
|
+
condition: Optional condition function that determines if step should run
|
|
49
|
+
on_error: Error handling strategy ('fail', 'skip', 'retry')
|
|
50
|
+
max_retries: Number of retries if on_error='retry'
|
|
51
|
+
output_key: Key to store this step's output in accumulated results
|
|
52
|
+
"""
|
|
53
|
+
tool: Union[str, 'BaseTool']
|
|
54
|
+
config: Dict[str, Any] = field(default_factory=dict)
|
|
55
|
+
name: Optional[str] = None
|
|
56
|
+
condition: Optional[callable] = None
|
|
57
|
+
on_error: str = "fail" # 'fail', 'skip', 'retry'
|
|
58
|
+
max_retries: int = 3
|
|
59
|
+
output_key: Optional[str] = None
|
|
60
|
+
|
|
61
|
+
def __post_init__(self):
|
|
62
|
+
if self.name is None:
|
|
63
|
+
if isinstance(self.tool, str):
|
|
64
|
+
self.name = self.tool
|
|
65
|
+
else:
|
|
66
|
+
self.name = getattr(self.tool, 'spec', None)
|
|
67
|
+
if self.name:
|
|
68
|
+
self.name = self.name.name
|
|
69
|
+
else:
|
|
70
|
+
self.name = "unknown_step"
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
@dataclass
|
|
74
|
+
class StepResult:
|
|
75
|
+
"""Result from executing a single pipeline step."""
|
|
76
|
+
step_name: str
|
|
77
|
+
step_index: int
|
|
78
|
+
status: PipelineStatus
|
|
79
|
+
output: Any = None
|
|
80
|
+
error: Optional[str] = None
|
|
81
|
+
duration: float = 0.0
|
|
82
|
+
retries: int = 0
|
|
83
|
+
started_at: Optional[str] = None
|
|
84
|
+
completed_at: Optional[str] = None
|
|
85
|
+
|
|
86
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
87
|
+
return {
|
|
88
|
+
"step_name": self.step_name,
|
|
89
|
+
"step_index": self.step_index,
|
|
90
|
+
"status": self.status.value,
|
|
91
|
+
"output": self.output,
|
|
92
|
+
"error": self.error,
|
|
93
|
+
"duration": self.duration,
|
|
94
|
+
"retries": self.retries,
|
|
95
|
+
"started_at": self.started_at,
|
|
96
|
+
"completed_at": self.completed_at
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
@dataclass
|
|
101
|
+
class PipelineResult:
|
|
102
|
+
"""Complete result of pipeline execution."""
|
|
103
|
+
status: PipelineStatus
|
|
104
|
+
final_output: Any
|
|
105
|
+
step_results: List[StepResult]
|
|
106
|
+
total_duration: float
|
|
107
|
+
started_at: str
|
|
108
|
+
completed_at: Optional[str] = None
|
|
109
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
110
|
+
|
|
111
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
112
|
+
return {
|
|
113
|
+
"status": self.status.value,
|
|
114
|
+
"final_output": self.final_output,
|
|
115
|
+
"step_results": [r.to_dict() for r in self.step_results],
|
|
116
|
+
"total_duration": self.total_duration,
|
|
117
|
+
"started_at": self.started_at,
|
|
118
|
+
"completed_at": self.completed_at,
|
|
119
|
+
"metadata": self.metadata
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
@property
|
|
123
|
+
def success(self) -> bool:
|
|
124
|
+
return self.status == PipelineStatus.COMPLETED
|
|
125
|
+
|
|
126
|
+
def get_step_output(self, step_name_or_index: Union[str, int]) -> Optional[Any]:
|
|
127
|
+
"""Get output from a specific step."""
|
|
128
|
+
if isinstance(step_name_or_index, int):
|
|
129
|
+
if 0 <= step_name_or_index < len(self.step_results):
|
|
130
|
+
return self.step_results[step_name_or_index].output
|
|
131
|
+
else:
|
|
132
|
+
for result in self.step_results:
|
|
133
|
+
if result.step_name == step_name_or_index:
|
|
134
|
+
return result.output
|
|
135
|
+
return None
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
class Pipeline:
|
|
139
|
+
"""
|
|
140
|
+
Chains tools into sequential execution pipelines.
|
|
141
|
+
|
|
142
|
+
The Pipeline class implements workflow chaining where:
|
|
143
|
+
1. Each step receives the merged output of previous steps + its config
|
|
144
|
+
2. Output keys allow accumulating results across steps
|
|
145
|
+
3. Conditions can skip steps dynamically
|
|
146
|
+
4. Error handling is configurable per step
|
|
147
|
+
|
|
148
|
+
Example:
|
|
149
|
+
# Create pipeline with tool names (resolved from registry)
|
|
150
|
+
pipeline = Pipeline([
|
|
151
|
+
PipelineStep(tool="image_generate", config={"size": "768x768"}),
|
|
152
|
+
PipelineStep(tool="upscale", config={"scale": 2}),
|
|
153
|
+
PipelineStep(tool="video_generate", config={"frames": 33})
|
|
154
|
+
], tool_registry=registry)
|
|
155
|
+
|
|
156
|
+
result = await pipeline.execute({"prompt": "samurai in rain"})
|
|
157
|
+
|
|
158
|
+
# Or create with tool instances directly
|
|
159
|
+
pipeline = Pipeline([
|
|
160
|
+
PipelineStep(tool=ImageGenerationTool(), config={"size": "1024x1024"}),
|
|
161
|
+
PipelineStep(tool=UpscaleTool(), config={"scale": 4})
|
|
162
|
+
])
|
|
163
|
+
"""
|
|
164
|
+
|
|
165
|
+
def __init__(
|
|
166
|
+
self,
|
|
167
|
+
steps: List[PipelineStep],
|
|
168
|
+
tool_registry: Optional['ToolRegistry'] = None,
|
|
169
|
+
name: Optional[str] = None,
|
|
170
|
+
description: Optional[str] = None
|
|
171
|
+
):
|
|
172
|
+
"""
|
|
173
|
+
Initialize a pipeline.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
steps: List of PipelineStep objects defining the chain
|
|
177
|
+
tool_registry: Registry for resolving tool names to instances
|
|
178
|
+
name: Optional name for this pipeline
|
|
179
|
+
description: Optional description of what this pipeline does
|
|
180
|
+
"""
|
|
181
|
+
self.steps = steps
|
|
182
|
+
self.tool_registry = tool_registry
|
|
183
|
+
self.name = name or f"pipeline_{len(steps)}_steps"
|
|
184
|
+
self.description = description
|
|
185
|
+
self._resolved_tools: Dict[int, 'BaseTool'] = {}
|
|
186
|
+
|
|
187
|
+
def _resolve_tool(self, step: PipelineStep, step_index: int) -> 'BaseTool':
|
|
188
|
+
"""Resolve a tool from step definition."""
|
|
189
|
+
# Check cache
|
|
190
|
+
if step_index in self._resolved_tools:
|
|
191
|
+
return self._resolved_tools[step_index]
|
|
192
|
+
|
|
193
|
+
tool = step.tool
|
|
194
|
+
|
|
195
|
+
# If already a tool instance, use it
|
|
196
|
+
if not isinstance(tool, str):
|
|
197
|
+
self._resolved_tools[step_index] = tool
|
|
198
|
+
return tool
|
|
199
|
+
|
|
200
|
+
# Resolve from registry
|
|
201
|
+
if not self.tool_registry:
|
|
202
|
+
raise ValueError(
|
|
203
|
+
f"Step {step_index} uses tool name '{tool}' but no tool_registry provided. "
|
|
204
|
+
"Either pass tool instances directly or provide a tool_registry."
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
resolved = self.tool_registry.get(tool)
|
|
208
|
+
if not resolved:
|
|
209
|
+
# Try common aliases
|
|
210
|
+
aliases = {
|
|
211
|
+
"image_generation": "image_generate",
|
|
212
|
+
"video_generation": "video_generate",
|
|
213
|
+
"audio_generation": "audio_generate",
|
|
214
|
+
"text_generation": "llm_generate",
|
|
215
|
+
"upscale": "image_upscale",
|
|
216
|
+
}
|
|
217
|
+
aliased = aliases.get(tool)
|
|
218
|
+
if aliased:
|
|
219
|
+
resolved = self.tool_registry.get(aliased)
|
|
220
|
+
|
|
221
|
+
if not resolved:
|
|
222
|
+
available = [t.name for t in self.tool_registry.list_tools()]
|
|
223
|
+
raise ValueError(
|
|
224
|
+
f"Tool '{tool}' not found in registry. Available tools: {available}"
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
self._resolved_tools[step_index] = resolved
|
|
228
|
+
return resolved
|
|
229
|
+
|
|
230
|
+
def _merge_inputs(
|
|
231
|
+
self,
|
|
232
|
+
accumulated: Dict[str, Any],
|
|
233
|
+
step_config: Dict[str, Any],
|
|
234
|
+
prev_output: Any
|
|
235
|
+
) -> Dict[str, Any]:
|
|
236
|
+
"""
|
|
237
|
+
Merge accumulated results, previous output, and step config into inputs.
|
|
238
|
+
|
|
239
|
+
Priority (highest to lowest):
|
|
240
|
+
1. Step config (explicit configuration)
|
|
241
|
+
2. Previous step output (chained results)
|
|
242
|
+
3. Accumulated results (all previous outputs)
|
|
243
|
+
"""
|
|
244
|
+
result = dict(accumulated)
|
|
245
|
+
|
|
246
|
+
# Add previous output
|
|
247
|
+
if isinstance(prev_output, dict):
|
|
248
|
+
result.update(prev_output)
|
|
249
|
+
elif prev_output is not None:
|
|
250
|
+
result["_previous_output"] = prev_output
|
|
251
|
+
|
|
252
|
+
# Step config has highest priority
|
|
253
|
+
result.update(step_config)
|
|
254
|
+
|
|
255
|
+
return result
|
|
256
|
+
|
|
257
|
+
async def _execute_step(
|
|
258
|
+
self,
|
|
259
|
+
step: PipelineStep,
|
|
260
|
+
step_index: int,
|
|
261
|
+
inputs: Dict[str, Any],
|
|
262
|
+
context: Optional[Dict[str, Any]] = None
|
|
263
|
+
) -> StepResult:
|
|
264
|
+
"""Execute a single pipeline step with retry logic."""
|
|
265
|
+
import time
|
|
266
|
+
|
|
267
|
+
started_at = datetime.now().isoformat()
|
|
268
|
+
start_time = time.time()
|
|
269
|
+
retries = 0
|
|
270
|
+
last_error = None
|
|
271
|
+
|
|
272
|
+
# Check condition
|
|
273
|
+
if step.condition:
|
|
274
|
+
try:
|
|
275
|
+
should_run = step.condition(inputs)
|
|
276
|
+
if not should_run:
|
|
277
|
+
return StepResult(
|
|
278
|
+
step_name=step.name,
|
|
279
|
+
step_index=step_index,
|
|
280
|
+
status=PipelineStatus.COMPLETED,
|
|
281
|
+
output=inputs, # Pass through unchanged
|
|
282
|
+
duration=0,
|
|
283
|
+
started_at=started_at,
|
|
284
|
+
completed_at=datetime.now().isoformat()
|
|
285
|
+
)
|
|
286
|
+
except Exception as e:
|
|
287
|
+
logger.warning(f"Condition check failed for step {step.name}: {e}")
|
|
288
|
+
|
|
289
|
+
# Resolve tool
|
|
290
|
+
try:
|
|
291
|
+
tool = self._resolve_tool(step, step_index)
|
|
292
|
+
except ValueError as e:
|
|
293
|
+
return StepResult(
|
|
294
|
+
step_name=step.name,
|
|
295
|
+
step_index=step_index,
|
|
296
|
+
status=PipelineStatus.FAILED,
|
|
297
|
+
error=str(e),
|
|
298
|
+
duration=time.time() - start_time,
|
|
299
|
+
started_at=started_at,
|
|
300
|
+
completed_at=datetime.now().isoformat()
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
# Execute with retries
|
|
304
|
+
max_attempts = step.max_retries + 1 if step.on_error == "retry" else 1
|
|
305
|
+
|
|
306
|
+
for attempt in range(max_attempts):
|
|
307
|
+
try:
|
|
308
|
+
result = await tool.execute(inputs, context)
|
|
309
|
+
|
|
310
|
+
if result.success:
|
|
311
|
+
output = result.output
|
|
312
|
+
# If output has a specific key we want, extract it
|
|
313
|
+
if step.output_key and isinstance(output, dict):
|
|
314
|
+
output = {step.output_key: output}
|
|
315
|
+
|
|
316
|
+
return StepResult(
|
|
317
|
+
step_name=step.name,
|
|
318
|
+
step_index=step_index,
|
|
319
|
+
status=PipelineStatus.COMPLETED,
|
|
320
|
+
output=output,
|
|
321
|
+
duration=time.time() - start_time,
|
|
322
|
+
retries=retries,
|
|
323
|
+
started_at=started_at,
|
|
324
|
+
completed_at=datetime.now().isoformat()
|
|
325
|
+
)
|
|
326
|
+
else:
|
|
327
|
+
last_error = result.error
|
|
328
|
+
retries += 1
|
|
329
|
+
|
|
330
|
+
except Exception as e:
|
|
331
|
+
last_error = str(e)
|
|
332
|
+
retries += 1
|
|
333
|
+
logger.warning(f"Step {step.name} attempt {attempt + 1} failed: {e}")
|
|
334
|
+
|
|
335
|
+
# Exponential backoff between retries
|
|
336
|
+
if attempt < max_attempts - 1:
|
|
337
|
+
await asyncio.sleep(0.5 * (2 ** attempt))
|
|
338
|
+
|
|
339
|
+
# All attempts failed
|
|
340
|
+
if step.on_error == "skip":
|
|
341
|
+
return StepResult(
|
|
342
|
+
step_name=step.name,
|
|
343
|
+
step_index=step_index,
|
|
344
|
+
status=PipelineStatus.COMPLETED,
|
|
345
|
+
output=inputs, # Pass through unchanged
|
|
346
|
+
error=f"Skipped after error: {last_error}",
|
|
347
|
+
duration=time.time() - start_time,
|
|
348
|
+
retries=retries,
|
|
349
|
+
started_at=started_at,
|
|
350
|
+
completed_at=datetime.now().isoformat()
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
return StepResult(
|
|
354
|
+
step_name=step.name,
|
|
355
|
+
step_index=step_index,
|
|
356
|
+
status=PipelineStatus.FAILED,
|
|
357
|
+
error=last_error,
|
|
358
|
+
duration=time.time() - start_time,
|
|
359
|
+
retries=retries,
|
|
360
|
+
started_at=started_at,
|
|
361
|
+
completed_at=datetime.now().isoformat()
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
async def execute(
|
|
365
|
+
self,
|
|
366
|
+
initial_input: Dict[str, Any],
|
|
367
|
+
context: Optional[Dict[str, Any]] = None
|
|
368
|
+
) -> PipelineResult:
|
|
369
|
+
"""
|
|
370
|
+
Execute the pipeline with the given initial input.
|
|
371
|
+
|
|
372
|
+
Args:
|
|
373
|
+
initial_input: Initial input dict (e.g., {"prompt": "..."})
|
|
374
|
+
context: Optional execution context passed to all tools
|
|
375
|
+
|
|
376
|
+
Returns:
|
|
377
|
+
PipelineResult with final output and all step results
|
|
378
|
+
"""
|
|
379
|
+
import time
|
|
380
|
+
|
|
381
|
+
started_at = datetime.now().isoformat()
|
|
382
|
+
start_time = time.time()
|
|
383
|
+
|
|
384
|
+
step_results: List[StepResult] = []
|
|
385
|
+
accumulated: Dict[str, Any] = dict(initial_input)
|
|
386
|
+
current_output: Any = initial_input
|
|
387
|
+
|
|
388
|
+
# Build context
|
|
389
|
+
exec_context = context or {}
|
|
390
|
+
exec_context["pipeline_name"] = self.name
|
|
391
|
+
exec_context["total_steps"] = len(self.steps)
|
|
392
|
+
|
|
393
|
+
for i, step in enumerate(self.steps):
|
|
394
|
+
exec_context["current_step"] = i
|
|
395
|
+
exec_context["step_name"] = step.name
|
|
396
|
+
|
|
397
|
+
# Merge inputs for this step
|
|
398
|
+
step_inputs = self._merge_inputs(accumulated, step.config, current_output)
|
|
399
|
+
|
|
400
|
+
logger.info(f"Executing pipeline step {i + 1}/{len(self.steps)}: {step.name}")
|
|
401
|
+
|
|
402
|
+
# Execute step
|
|
403
|
+
step_result = await self._execute_step(step, i, step_inputs, exec_context)
|
|
404
|
+
step_results.append(step_result)
|
|
405
|
+
|
|
406
|
+
# Check for failure
|
|
407
|
+
if step_result.status == PipelineStatus.FAILED:
|
|
408
|
+
return PipelineResult(
|
|
409
|
+
status=PipelineStatus.FAILED,
|
|
410
|
+
final_output=None,
|
|
411
|
+
step_results=step_results,
|
|
412
|
+
total_duration=time.time() - start_time,
|
|
413
|
+
started_at=started_at,
|
|
414
|
+
completed_at=datetime.now().isoformat(),
|
|
415
|
+
metadata={
|
|
416
|
+
"failed_step": i,
|
|
417
|
+
"failed_step_name": step.name,
|
|
418
|
+
"error": step_result.error
|
|
419
|
+
}
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
# Accumulate output
|
|
423
|
+
current_output = step_result.output
|
|
424
|
+
if isinstance(current_output, dict):
|
|
425
|
+
accumulated.update(current_output)
|
|
426
|
+
|
|
427
|
+
return PipelineResult(
|
|
428
|
+
status=PipelineStatus.COMPLETED,
|
|
429
|
+
final_output=current_output,
|
|
430
|
+
step_results=step_results,
|
|
431
|
+
total_duration=time.time() - start_time,
|
|
432
|
+
started_at=started_at,
|
|
433
|
+
completed_at=datetime.now().isoformat(),
|
|
434
|
+
metadata={"steps_executed": len(step_results)}
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
def add_step(self, step: PipelineStep) -> 'Pipeline':
|
|
438
|
+
"""Add a step to the pipeline. Returns self for chaining."""
|
|
439
|
+
self.steps.append(step)
|
|
440
|
+
return self
|
|
441
|
+
|
|
442
|
+
def __len__(self) -> int:
|
|
443
|
+
return len(self.steps)
|
|
444
|
+
|
|
445
|
+
def __repr__(self) -> str:
|
|
446
|
+
step_names = [s.name for s in self.steps]
|
|
447
|
+
return f"Pipeline({self.name}, steps={step_names})"
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
class PipelineBuilder:
|
|
451
|
+
"""
|
|
452
|
+
Fluent builder for creating pipelines.
|
|
453
|
+
|
|
454
|
+
Example:
|
|
455
|
+
pipeline = (PipelineBuilder("image_to_video")
|
|
456
|
+
.add("image_generate", size="1024x1024")
|
|
457
|
+
.add("upscale", scale=2)
|
|
458
|
+
.add("video_generate", frames=33)
|
|
459
|
+
.build(registry))
|
|
460
|
+
"""
|
|
461
|
+
|
|
462
|
+
def __init__(self, name: str, description: Optional[str] = None):
|
|
463
|
+
self.name = name
|
|
464
|
+
self.description = description
|
|
465
|
+
self._steps: List[PipelineStep] = []
|
|
466
|
+
|
|
467
|
+
def add(
|
|
468
|
+
self,
|
|
469
|
+
tool: Union[str, 'BaseTool'],
|
|
470
|
+
name: Optional[str] = None,
|
|
471
|
+
on_error: str = "fail",
|
|
472
|
+
condition: Optional[callable] = None,
|
|
473
|
+
output_key: Optional[str] = None,
|
|
474
|
+
**config
|
|
475
|
+
) -> 'PipelineBuilder':
|
|
476
|
+
"""Add a step to the pipeline."""
|
|
477
|
+
step = PipelineStep(
|
|
478
|
+
tool=tool,
|
|
479
|
+
config=config,
|
|
480
|
+
name=name,
|
|
481
|
+
on_error=on_error,
|
|
482
|
+
condition=condition,
|
|
483
|
+
output_key=output_key
|
|
484
|
+
)
|
|
485
|
+
self._steps.append(step)
|
|
486
|
+
return self
|
|
487
|
+
|
|
488
|
+
def add_conditional(
|
|
489
|
+
self,
|
|
490
|
+
tool: Union[str, 'BaseTool'],
|
|
491
|
+
condition: callable,
|
|
492
|
+
**config
|
|
493
|
+
) -> 'PipelineBuilder':
|
|
494
|
+
"""Add a conditional step that only runs if condition returns True."""
|
|
495
|
+
return self.add(tool, condition=condition, **config)
|
|
496
|
+
|
|
497
|
+
def build(self, tool_registry: Optional['ToolRegistry'] = None) -> Pipeline:
|
|
498
|
+
"""Build the pipeline."""
|
|
499
|
+
return Pipeline(
|
|
500
|
+
steps=self._steps,
|
|
501
|
+
tool_registry=tool_registry,
|
|
502
|
+
name=self.name,
|
|
503
|
+
description=self.description
|
|
504
|
+
)
|
|
505
|
+
|
|
506
|
+
|
|
507
|
+
# Convenience factory functions
|
|
508
|
+
|
|
509
|
+
def create_image_pipeline(
|
|
510
|
+
tool_registry: 'ToolRegistry',
|
|
511
|
+
upscale: bool = False,
|
|
512
|
+
upscale_factor: int = 2
|
|
513
|
+
) -> Pipeline:
|
|
514
|
+
"""Create a standard image generation pipeline."""
|
|
515
|
+
steps = [
|
|
516
|
+
PipelineStep(
|
|
517
|
+
tool="image_generate",
|
|
518
|
+
config={"size": "1024x1024"},
|
|
519
|
+
name="generate_image"
|
|
520
|
+
)
|
|
521
|
+
]
|
|
522
|
+
|
|
523
|
+
if upscale:
|
|
524
|
+
steps.append(PipelineStep(
|
|
525
|
+
tool="image_upscale",
|
|
526
|
+
config={"scale": upscale_factor},
|
|
527
|
+
name="upscale_image",
|
|
528
|
+
on_error="skip" # Continue without upscaling if it fails
|
|
529
|
+
))
|
|
530
|
+
|
|
531
|
+
return Pipeline(
|
|
532
|
+
steps=steps,
|
|
533
|
+
tool_registry=tool_registry,
|
|
534
|
+
name="image_pipeline",
|
|
535
|
+
description="Generate and optionally upscale images"
|
|
536
|
+
)
|
|
537
|
+
|
|
538
|
+
|
|
539
|
+
def create_video_pipeline(
|
|
540
|
+
tool_registry: 'ToolRegistry',
|
|
541
|
+
generate_first_frame: bool = True,
|
|
542
|
+
frames: int = 33
|
|
543
|
+
) -> Pipeline:
|
|
544
|
+
"""Create a standard video generation pipeline."""
|
|
545
|
+
steps = []
|
|
546
|
+
|
|
547
|
+
if generate_first_frame:
|
|
548
|
+
steps.append(PipelineStep(
|
|
549
|
+
tool="image_generate",
|
|
550
|
+
config={"size": "768x768"},
|
|
551
|
+
name="generate_first_frame"
|
|
552
|
+
))
|
|
553
|
+
|
|
554
|
+
steps.append(PipelineStep(
|
|
555
|
+
tool="video_generate",
|
|
556
|
+
config={"frames": frames},
|
|
557
|
+
name="generate_video"
|
|
558
|
+
))
|
|
559
|
+
|
|
560
|
+
return Pipeline(
|
|
561
|
+
steps=steps,
|
|
562
|
+
tool_registry=tool_registry,
|
|
563
|
+
name="video_pipeline",
|
|
564
|
+
description="Generate video from prompt (optionally with generated first frame)"
|
|
565
|
+
)
|