devsper 2.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devsper/__init__.py +14 -0
- devsper/agents/a2a/__init__.py +27 -0
- devsper/agents/a2a/client.py +126 -0
- devsper/agents/a2a/discovery.py +24 -0
- devsper/agents/a2a/server.py +128 -0
- devsper/agents/a2a/tool_adapter.py +68 -0
- devsper/agents/a2a/types.py +49 -0
- devsper/agents/agent.py +602 -0
- devsper/agents/critic.py +80 -0
- devsper/agents/message_bus.py +124 -0
- devsper/agents/roles.py +181 -0
- devsper/agents/run_agent.py +78 -0
- devsper/analytics/__init__.py +5 -0
- devsper/analytics/tool_analytics.py +78 -0
- devsper/audit/__init__.py +5 -0
- devsper/audit/logger.py +214 -0
- devsper/bus/__init__.py +29 -0
- devsper/bus/backends/__init__.py +5 -0
- devsper/bus/backends/base.py +38 -0
- devsper/bus/backends/memory.py +55 -0
- devsper/bus/backends/redis.py +146 -0
- devsper/bus/message.py +56 -0
- devsper/bus/schema_version.py +3 -0
- devsper/bus/topics.py +19 -0
- devsper/cache/__init__.py +6 -0
- devsper/cache/embedding_index.py +98 -0
- devsper/cache/hashing.py +24 -0
- devsper/cache/store.py +153 -0
- devsper/cache/task_cache.py +191 -0
- devsper/cli/__init__.py +6 -0
- devsper/cli/commands/reg.py +733 -0
- devsper/cli/github_oauth.py +157 -0
- devsper/cli/init.py +637 -0
- devsper/cli/main.py +2956 -0
- devsper/cli/run_progress.py +103 -0
- devsper/cli/ui/__init__.py +65 -0
- devsper/cli/ui/components.py +94 -0
- devsper/cli/ui/errors.py +104 -0
- devsper/cli/ui/logging.py +120 -0
- devsper/cli/ui/onboarding.py +102 -0
- devsper/cli/ui/progress.py +43 -0
- devsper/cli/ui/run_view.py +308 -0
- devsper/cli/ui/theme.py +40 -0
- devsper/cluster/__init__.py +29 -0
- devsper/cluster/election.py +84 -0
- devsper/cluster/local.py +97 -0
- devsper/cluster/node_info.py +77 -0
- devsper/cluster/registry.py +71 -0
- devsper/cluster/router.py +117 -0
- devsper/cluster/state_backend.py +105 -0
- devsper/compliance/__init__.py +5 -0
- devsper/compliance/pii.py +147 -0
- devsper/config/__init__.py +52 -0
- devsper/config/config_loader.py +121 -0
- devsper/config/defaults.py +77 -0
- devsper/config/resolver.py +342 -0
- devsper/config/schema.py +237 -0
- devsper/credentials/__init__.py +19 -0
- devsper/credentials/cli.py +197 -0
- devsper/credentials/migration.py +124 -0
- devsper/credentials/store.py +142 -0
- devsper/dashboard/__init__.py +9 -0
- devsper/dashboard/dashboard.py +87 -0
- devsper/dev/__init__.py +25 -0
- devsper/dev/builder.py +195 -0
- devsper/dev/debugger.py +95 -0
- devsper/dev/repo_index.py +138 -0
- devsper/dev/sandbox.py +203 -0
- devsper/dev/scaffold.py +122 -0
- devsper/embeddings/__init__.py +5 -0
- devsper/embeddings/service.py +36 -0
- devsper/explainability/__init__.py +14 -0
- devsper/explainability/decision_tree.py +104 -0
- devsper/explainability/rationale.py +38 -0
- devsper/explainability/simulation.py +56 -0
- devsper/hitl/__init__.py +13 -0
- devsper/hitl/approval.py +160 -0
- devsper/hitl/escalation.py +95 -0
- devsper/intelligence/__init__.py +9 -0
- devsper/intelligence/adaptation.py +88 -0
- devsper/intelligence/analysis/__init__.py +19 -0
- devsper/intelligence/analysis/analyzer.py +71 -0
- devsper/intelligence/analysis/cost_estimator.py +66 -0
- devsper/intelligence/analysis/formatter.py +103 -0
- devsper/intelligence/analysis/run_report.py +402 -0
- devsper/intelligence/learning_engine.py +92 -0
- devsper/intelligence/strategies/__init__.py +23 -0
- devsper/intelligence/strategies/base.py +14 -0
- devsper/intelligence/strategies/code_analysis_strategy.py +33 -0
- devsper/intelligence/strategies/data_science_strategy.py +33 -0
- devsper/intelligence/strategies/document_pipeline_strategy.py +33 -0
- devsper/intelligence/strategies/experiment_strategy.py +33 -0
- devsper/intelligence/strategies/research_strategy.py +34 -0
- devsper/intelligence/strategy_selector.py +84 -0
- devsper/intelligence/synthesis.py +132 -0
- devsper/intelligence/task_optimizer.py +92 -0
- devsper/knowledge/__init__.py +5 -0
- devsper/knowledge/extractor.py +204 -0
- devsper/knowledge/knowledge_graph.py +184 -0
- devsper/knowledge/query.py +285 -0
- devsper/memory/__init__.py +35 -0
- devsper/memory/consolidation.py +138 -0
- devsper/memory/embeddings.py +60 -0
- devsper/memory/memory_index.py +97 -0
- devsper/memory/memory_router.py +62 -0
- devsper/memory/memory_store.py +221 -0
- devsper/memory/memory_types.py +54 -0
- devsper/memory/namespaces.py +45 -0
- devsper/memory/scoring.py +77 -0
- devsper/memory/summarizer.py +52 -0
- devsper/nodes/__init__.py +5 -0
- devsper/nodes/controller.py +449 -0
- devsper/nodes/rpc.py +127 -0
- devsper/nodes/single.py +161 -0
- devsper/nodes/worker.py +506 -0
- devsper/orchestration/__init__.py +19 -0
- devsper/orchestration/meta_planner.py +239 -0
- devsper/orchestration/priority_queue.py +61 -0
- devsper/plugins/__init__.py +19 -0
- devsper/plugins/marketplace/__init__.py +0 -0
- devsper/plugins/plugin_loader.py +70 -0
- devsper/plugins/plugin_registry.py +34 -0
- devsper/plugins/registry.py +83 -0
- devsper/protocols/__init__.py +6 -0
- devsper/providers/__init__.py +17 -0
- devsper/providers/anthropic.py +84 -0
- devsper/providers/base.py +75 -0
- devsper/providers/complexity_router.py +94 -0
- devsper/providers/gemini.py +36 -0
- devsper/providers/github.py +180 -0
- devsper/providers/model_router.py +40 -0
- devsper/providers/openai.py +105 -0
- devsper/providers/router/__init__.py +21 -0
- devsper/providers/router/backends/__init__.py +19 -0
- devsper/providers/router/backends/anthropic_backend.py +111 -0
- devsper/providers/router/backends/custom_backend.py +138 -0
- devsper/providers/router/backends/gemini_backend.py +89 -0
- devsper/providers/router/backends/github_backend.py +165 -0
- devsper/providers/router/backends/ollama_backend.py +104 -0
- devsper/providers/router/backends/openai_backend.py +142 -0
- devsper/providers/router/backends/vllm_backend.py +35 -0
- devsper/providers/router/base.py +60 -0
- devsper/providers/router/factory.py +92 -0
- devsper/providers/router/legacy.py +101 -0
- devsper/providers/router/router.py +135 -0
- devsper/reasoning/__init__.py +12 -0
- devsper/reasoning/graph.py +59 -0
- devsper/reasoning/nodes.py +20 -0
- devsper/reasoning/store.py +67 -0
- devsper/runtime/__init__.py +12 -0
- devsper/runtime/health.py +88 -0
- devsper/runtime/replay.py +53 -0
- devsper/runtime/replay_engine.py +142 -0
- devsper/runtime/run_history.py +204 -0
- devsper/runtime/telemetry.py +116 -0
- devsper/runtime/visualize.py +58 -0
- devsper/sandbox/__init__.py +13 -0
- devsper/sandbox/sandbox.py +161 -0
- devsper/swarm/checkpointer.py +65 -0
- devsper/swarm/executor.py +558 -0
- devsper/swarm/map_reduce.py +44 -0
- devsper/swarm/planner.py +197 -0
- devsper/swarm/prefetcher.py +91 -0
- devsper/swarm/scheduler.py +153 -0
- devsper/swarm/speculation.py +47 -0
- devsper/swarm/swarm.py +562 -0
- devsper/tools/__init__.py +33 -0
- devsper/tools/base.py +29 -0
- devsper/tools/code_intelligence/__init__.py +13 -0
- devsper/tools/code_intelligence/api_surface_extractor.py +73 -0
- devsper/tools/code_intelligence/architecture_analyzer.py +65 -0
- devsper/tools/code_intelligence/codebase_indexer.py +71 -0
- devsper/tools/code_intelligence/dependency_graph_builder.py +67 -0
- devsper/tools/code_intelligence/design_pattern_detector.py +62 -0
- devsper/tools/code_intelligence/large_function_detector.py +68 -0
- devsper/tools/code_intelligence/module_responsibility_mapper.py +56 -0
- devsper/tools/code_intelligence/parallel_codebase_analysis.py +44 -0
- devsper/tools/code_intelligence/refactor_candidate_detector.py +81 -0
- devsper/tools/code_intelligence/repository_semantic_index.py +61 -0
- devsper/tools/code_intelligence/test_coverage_estimator.py +62 -0
- devsper/tools/coding/__init__.py +12 -0
- devsper/tools/coding/analyze_code_complexity.py +48 -0
- devsper/tools/coding/dependency_analyzer.py +42 -0
- devsper/tools/coding/extract_functions.py +38 -0
- devsper/tools/coding/format_python.py +50 -0
- devsper/tools/coding/generate_docstrings.py +40 -0
- devsper/tools/coding/generate_unit_tests.py +42 -0
- devsper/tools/coding/lint_python.py +51 -0
- devsper/tools/coding/refactor_function.py +41 -0
- devsper/tools/coding/repo_structure_map.py +54 -0
- devsper/tools/coding/run_python.py +53 -0
- devsper/tools/data/__init__.py +12 -0
- devsper/tools/data/column_type_detection.py +64 -0
- devsper/tools/data/csv_summary.py +52 -0
- devsper/tools/data/dataframe_filter.py +51 -0
- devsper/tools/data/dataframe_groupby.py +47 -0
- devsper/tools/data/dataframe_stats.py +38 -0
- devsper/tools/data/dataset_sampling.py +55 -0
- devsper/tools/data/dataset_schema.py +45 -0
- devsper/tools/data/json_pretty_print.py +37 -0
- devsper/tools/data/json_query.py +46 -0
- devsper/tools/data/missing_value_report.py +47 -0
- devsper/tools/data_science/__init__.py +13 -0
- devsper/tools/data_science/correlation_heatmap.py +72 -0
- devsper/tools/data_science/dataset_bias_detector.py +49 -0
- devsper/tools/data_science/dataset_distribution_report.py +64 -0
- devsper/tools/data_science/dataset_drift_detector.py +64 -0
- devsper/tools/data_science/dataset_outlier_detector.py +65 -0
- devsper/tools/data_science/dataset_profile.py +76 -0
- devsper/tools/data_science/distributed_dataset_processor.py +54 -0
- devsper/tools/data_science/feature_engineering_suggestions.py +69 -0
- devsper/tools/data_science/feature_importance_estimator.py +82 -0
- devsper/tools/data_science/model_input_validator.py +59 -0
- devsper/tools/data_science/time_series_analyzer.py +57 -0
- devsper/tools/documents/__init__.py +11 -0
- devsper/tools/documents/_docproc.py +56 -0
- devsper/tools/documents/document_to_markdown.py +29 -0
- devsper/tools/documents/extract_document_images.py +39 -0
- devsper/tools/documents/extract_document_text.py +29 -0
- devsper/tools/documents/extract_equations.py +36 -0
- devsper/tools/documents/extract_tables.py +47 -0
- devsper/tools/documents/summarize_document.py +42 -0
- devsper/tools/documents/write_latex_document.py +133 -0
- devsper/tools/documents/write_markdown_document.py +89 -0
- devsper/tools/documents/write_word_document.py +149 -0
- devsper/tools/experiments/__init__.py +13 -0
- devsper/tools/experiments/bootstrap_estimator.py +54 -0
- devsper/tools/experiments/experiment_report_generator.py +50 -0
- devsper/tools/experiments/experiment_tracker.py +36 -0
- devsper/tools/experiments/grid_search_runner.py +50 -0
- devsper/tools/experiments/model_benchmark_runner.py +45 -0
- devsper/tools/experiments/monte_carlo_experiment.py +38 -0
- devsper/tools/experiments/parameter_sweep_runner.py +51 -0
- devsper/tools/experiments/result_comparator.py +58 -0
- devsper/tools/experiments/simulation_runner.py +43 -0
- devsper/tools/experiments/statistical_significance_test.py +56 -0
- devsper/tools/experiments/swarm_map_reduce.py +42 -0
- devsper/tools/filesystem/__init__.py +12 -0
- devsper/tools/filesystem/append_file.py +42 -0
- devsper/tools/filesystem/file_hash.py +40 -0
- devsper/tools/filesystem/file_line_count.py +36 -0
- devsper/tools/filesystem/file_metadata.py +38 -0
- devsper/tools/filesystem/file_preview.py +55 -0
- devsper/tools/filesystem/find_large_files.py +50 -0
- devsper/tools/filesystem/list_directory.py +39 -0
- devsper/tools/filesystem/read_file.py +35 -0
- devsper/tools/filesystem/search_files.py +60 -0
- devsper/tools/filesystem/write_file.py +41 -0
- devsper/tools/flagship/__init__.py +15 -0
- devsper/tools/flagship/distributed_document_analysis.py +77 -0
- devsper/tools/flagship/docproc_corpus_pipeline.py +91 -0
- devsper/tools/flagship/repository_semantic_map.py +99 -0
- devsper/tools/flagship/research_graph_builder.py +111 -0
- devsper/tools/flagship/swarm_experiment_runner.py +86 -0
- devsper/tools/knowledge/__init__.py +10 -0
- devsper/tools/knowledge/citation_graph_builder.py +69 -0
- devsper/tools/knowledge/concept_frequency_analyzer.py +74 -0
- devsper/tools/knowledge/corpus_builder.py +66 -0
- devsper/tools/knowledge/cross_document_entity_linker.py +71 -0
- devsper/tools/knowledge/document_corpus_summary.py +68 -0
- devsper/tools/knowledge/document_topic_extractor.py +58 -0
- devsper/tools/knowledge/knowledge_graph_extractor.py +58 -0
- devsper/tools/knowledge/timeline_extractor.py +59 -0
- devsper/tools/math/__init__.py +12 -0
- devsper/tools/math/calculate_expression.py +52 -0
- devsper/tools/math/correlation.py +44 -0
- devsper/tools/math/distribution_summary.py +39 -0
- devsper/tools/math/histogram.py +53 -0
- devsper/tools/math/linear_regression.py +47 -0
- devsper/tools/math/matrix_multiply.py +38 -0
- devsper/tools/math/mean_std.py +35 -0
- devsper/tools/math/monte_carlo_simulation.py +43 -0
- devsper/tools/math/polynomial_fit.py +40 -0
- devsper/tools/math/random_sample.py +36 -0
- devsper/tools/mcp/__init__.py +23 -0
- devsper/tools/mcp/adapter.py +53 -0
- devsper/tools/mcp/client.py +235 -0
- devsper/tools/mcp/discovery.py +53 -0
- devsper/tools/memory/__init__.py +16 -0
- devsper/tools/memory/delete_memory.py +25 -0
- devsper/tools/memory/list_memory.py +34 -0
- devsper/tools/memory/search_memory.py +36 -0
- devsper/tools/memory/store_memory.py +47 -0
- devsper/tools/memory/summarize_memory.py +41 -0
- devsper/tools/memory/tag_memory.py +47 -0
- devsper/tools/pipelines.py +92 -0
- devsper/tools/registry.py +39 -0
- devsper/tools/research/__init__.py +12 -0
- devsper/tools/research/arxiv_download.py +55 -0
- devsper/tools/research/arxiv_search.py +58 -0
- devsper/tools/research/citation_extractor.py +35 -0
- devsper/tools/research/duckduckgo_search.py +42 -0
- devsper/tools/research/paper_metadata_extractor.py +45 -0
- devsper/tools/research/paper_summarizer.py +41 -0
- devsper/tools/research/research_question_generator.py +39 -0
- devsper/tools/research/topic_cluster.py +46 -0
- devsper/tools/research/web_search.py +47 -0
- devsper/tools/research/wikipedia_lookup.py +50 -0
- devsper/tools/research_advanced/__init__.py +14 -0
- devsper/tools/research_advanced/citation_context_extractor.py +60 -0
- devsper/tools/research_advanced/literature_review_generator.py +79 -0
- devsper/tools/research_advanced/methodology_extractor.py +58 -0
- devsper/tools/research_advanced/paper_contribution_extractor.py +50 -0
- devsper/tools/research_advanced/paper_dataset_identifier.py +49 -0
- devsper/tools/research_advanced/paper_method_comparator.py +62 -0
- devsper/tools/research_advanced/paper_similarity_search.py +69 -0
- devsper/tools/research_advanced/paper_trend_analyzer.py +69 -0
- devsper/tools/research_advanced/parallel_document_analyzer.py +56 -0
- devsper/tools/research_advanced/research_gap_finder.py +71 -0
- devsper/tools/research_advanced/research_topic_mapper.py +69 -0
- devsper/tools/research_advanced/swarm_literature_review.py +58 -0
- devsper/tools/scoring/__init__.py +52 -0
- devsper/tools/scoring/report.py +44 -0
- devsper/tools/scoring/scorer.py +39 -0
- devsper/tools/scoring/selector.py +61 -0
- devsper/tools/scoring/store.py +267 -0
- devsper/tools/selector.py +130 -0
- devsper/tools/system/__init__.py +12 -0
- devsper/tools/system/cpu_usage.py +22 -0
- devsper/tools/system/disk_usage.py +35 -0
- devsper/tools/system/environment_variables.py +29 -0
- devsper/tools/system/memory_usage.py +23 -0
- devsper/tools/system/pip_install.py +44 -0
- devsper/tools/system/pip_search.py +29 -0
- devsper/tools/system/process_list.py +34 -0
- devsper/tools/system/python_package_list.py +40 -0
- devsper/tools/system/run_shell_command.py +51 -0
- devsper/tools/system/system_info.py +26 -0
- devsper/tools/tool_runner.py +122 -0
- devsper/tui/__init__.py +5 -0
- devsper/tui/activity_feed_view.py +73 -0
- devsper/tui/adaptive_tasks_view.py +75 -0
- devsper/tui/agent_role_view.py +35 -0
- devsper/tui/app.py +395 -0
- devsper/tui/dashboard_screen.py +290 -0
- devsper/tui/dev_view.py +99 -0
- devsper/tui/inject_screen.py +73 -0
- devsper/tui/knowledge_graph_view.py +46 -0
- devsper/tui/layout.py +43 -0
- devsper/tui/logs_view.py +83 -0
- devsper/tui/memory_view.py +58 -0
- devsper/tui/performance_view.py +33 -0
- devsper/tui/reasoning_graph_view.py +39 -0
- devsper/tui/results_view.py +139 -0
- devsper/tui/swarm_view.py +37 -0
- devsper/tui/task_detail_screen.py +55 -0
- devsper/tui/task_view.py +103 -0
- devsper/types/event.py +97 -0
- devsper/types/exceptions.py +21 -0
- devsper/types/swarm.py +41 -0
- devsper/types/task.py +80 -0
- devsper/upgrade/__init__.py +21 -0
- devsper/upgrade/changelog.py +124 -0
- devsper/upgrade/cli.py +145 -0
- devsper/upgrade/installer.py +103 -0
- devsper/upgrade/notifier.py +52 -0
- devsper/upgrade/version_check.py +121 -0
- devsper/utils/event_logger.py +88 -0
- devsper/utils/http.py +43 -0
- devsper/utils/models.py +54 -0
- devsper/visualization/__init__.py +5 -0
- devsper/visualization/dag_export.py +67 -0
- devsper/workflow/__init__.py +18 -0
- devsper/workflow/conditions.py +157 -0
- devsper/workflow/context.py +108 -0
- devsper/workflow/loader.py +156 -0
- devsper/workflow/resolver.py +109 -0
- devsper/workflow/runner.py +562 -0
- devsper/workflow/schema.py +63 -0
- devsper/workflow/validator.py +128 -0
- devsper-2.1.6.dist-info/METADATA +346 -0
- devsper-2.1.6.dist-info/RECORD +375 -0
- devsper-2.1.6.dist-info/WHEEL +4 -0
- devsper-2.1.6.dist-info/entry_points.txt +3 -0
- devsper-2.1.6.dist-info/licenses/LICENSE +639 -0
|
@@ -0,0 +1,562 @@
|
|
|
1
|
+
"""Pipeline engine: typed outputs, branching, dependencies, retries."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import json
|
|
5
|
+
import re
|
|
6
|
+
import time
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from devsper.types.task import Task
|
|
11
|
+
from devsper.types.event import Event, events
|
|
12
|
+
from devsper.workflow.conditions import evaluate_condition
|
|
13
|
+
from devsper.workflow.context import StepResult, WorkflowContext, WorkflowTemplateError
|
|
14
|
+
from devsper.workflow.resolver import WorkflowCycleError, build_execution_order
|
|
15
|
+
from devsper.workflow.schema import OutputField, WorkflowDefinition, WorkflowStep
|
|
16
|
+
from devsper.utils.models import resolve_model
|
|
17
|
+
from devsper.utils.event_logger import EventLog
|
|
18
|
+
from datetime import datetime, timezone
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class WorkflowStepError(Exception):
|
|
22
|
+
"""Raised when a workflow step fails after all retries (e.g. structured output)."""
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class ParseResult:
|
|
28
|
+
success: bool
|
|
29
|
+
data: dict[str, Any] | None
|
|
30
|
+
error: str | None
|
|
31
|
+
missing_fields: list[str]
|
|
32
|
+
type_errors: list[str]
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _validate_inputs(workflow: WorkflowDefinition, inputs: dict[str, Any]) -> None:
|
|
36
|
+
"""Validate runtime inputs against workflow.inputs schema. Raises ValueError on failure."""
|
|
37
|
+
for field in workflow.inputs:
|
|
38
|
+
if field.required and field.name not in inputs:
|
|
39
|
+
raise ValueError(
|
|
40
|
+
f"Required input {field.name!r} not provided. Required: {[f.name for f in workflow.inputs if f.required]}"
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _coerce_value(val: Any, type_name: str) -> Any:
|
|
45
|
+
"""Coerce value to output_schema type."""
|
|
46
|
+
if type_name == "str":
|
|
47
|
+
return str(val) if val is not None else ""
|
|
48
|
+
if type_name == "int":
|
|
49
|
+
return int(val) if val is not None else 0
|
|
50
|
+
if type_name == "float":
|
|
51
|
+
return float(val) if val is not None else 0.0
|
|
52
|
+
if type_name == "bool":
|
|
53
|
+
if isinstance(val, bool):
|
|
54
|
+
return val
|
|
55
|
+
return str(val).lower() in ("true", "1", "yes")
|
|
56
|
+
if type_name == "list":
|
|
57
|
+
return list(val) if isinstance(val, (list, tuple)) else [val]
|
|
58
|
+
if type_name == "dict":
|
|
59
|
+
return dict(val) if isinstance(val, dict) else {}
|
|
60
|
+
return val
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _strip_markdown_json(raw: str) -> str:
|
|
64
|
+
"""Strip markdown code fences (```json ... ```) if present."""
|
|
65
|
+
stripped = raw.strip()
|
|
66
|
+
if stripped.startswith("```"):
|
|
67
|
+
first = stripped.find("\n")
|
|
68
|
+
if first != -1 and "json" in stripped[:first].lower():
|
|
69
|
+
stripped = stripped[first + 1 :]
|
|
70
|
+
if stripped.endswith("```"):
|
|
71
|
+
stripped = stripped[:-3].rstrip()
|
|
72
|
+
return stripped
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def try_parse_structured(
|
|
76
|
+
raw: str, schema: list[OutputField]
|
|
77
|
+
) -> ParseResult:
|
|
78
|
+
"""
|
|
79
|
+
Parse raw string as JSON and validate against schema.
|
|
80
|
+
Returns ParseResult(success, data, error, missing_fields, type_errors).
|
|
81
|
+
"""
|
|
82
|
+
raw = _strip_markdown_json(raw)
|
|
83
|
+
missing: list[str] = []
|
|
84
|
+
type_errors: list[str] = []
|
|
85
|
+
try:
|
|
86
|
+
data = json.loads(raw)
|
|
87
|
+
except json.JSONDecodeError as e:
|
|
88
|
+
return ParseResult(
|
|
89
|
+
success=False,
|
|
90
|
+
data=None,
|
|
91
|
+
error=str(e),
|
|
92
|
+
missing_fields=[],
|
|
93
|
+
type_errors=[],
|
|
94
|
+
)
|
|
95
|
+
if not isinstance(data, dict):
|
|
96
|
+
return ParseResult(
|
|
97
|
+
success=False,
|
|
98
|
+
data=None,
|
|
99
|
+
error="Root is not a JSON object",
|
|
100
|
+
missing_fields=[],
|
|
101
|
+
type_errors=[],
|
|
102
|
+
)
|
|
103
|
+
for f in schema:
|
|
104
|
+
if f.name not in data:
|
|
105
|
+
if f.required:
|
|
106
|
+
missing.append(f.name)
|
|
107
|
+
continue
|
|
108
|
+
val = data[f.name]
|
|
109
|
+
expected = f.type
|
|
110
|
+
if expected == "str" and not isinstance(val, str):
|
|
111
|
+
type_errors.append(f"{f.name}: expected str")
|
|
112
|
+
elif expected == "int" and not isinstance(val, int):
|
|
113
|
+
type_errors.append(f"{f.name}: expected int")
|
|
114
|
+
elif expected == "float" and not isinstance(val, (int, float)):
|
|
115
|
+
type_errors.append(f"{f.name}: expected float")
|
|
116
|
+
elif expected == "bool" and not isinstance(val, bool):
|
|
117
|
+
type_errors.append(f"{f.name}: expected bool")
|
|
118
|
+
elif expected == "list" and not isinstance(val, list):
|
|
119
|
+
type_errors.append(f"{f.name}: expected list")
|
|
120
|
+
elif expected == "dict" and not isinstance(val, dict):
|
|
121
|
+
type_errors.append(f"{f.name}: expected dict")
|
|
122
|
+
if missing or type_errors:
|
|
123
|
+
err_parts = []
|
|
124
|
+
if missing:
|
|
125
|
+
err_parts.append(f"missing: {', '.join(missing)}")
|
|
126
|
+
if type_errors:
|
|
127
|
+
err_parts.append(f"type errors: {'; '.join(type_errors)}")
|
|
128
|
+
return ParseResult(
|
|
129
|
+
success=False,
|
|
130
|
+
data=None,
|
|
131
|
+
error="; ".join(err_parts),
|
|
132
|
+
missing_fields=missing,
|
|
133
|
+
type_errors=type_errors,
|
|
134
|
+
)
|
|
135
|
+
out: dict[str, Any] = {}
|
|
136
|
+
for f in schema:
|
|
137
|
+
val = data.get(f.name)
|
|
138
|
+
out[f.name] = (
|
|
139
|
+
_coerce_value(val, f.type)
|
|
140
|
+
if val is not None
|
|
141
|
+
else (None if not f.required else _coerce_value(None, f.type))
|
|
142
|
+
)
|
|
143
|
+
return ParseResult(
|
|
144
|
+
success=True,
|
|
145
|
+
data=out,
|
|
146
|
+
error=None,
|
|
147
|
+
missing_fields=[],
|
|
148
|
+
type_errors=[],
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def _format_schema(output_schema: list[OutputField]) -> str:
|
|
153
|
+
"""Format output_schema for inclusion in correction prompt."""
|
|
154
|
+
lines = []
|
|
155
|
+
for f in output_schema:
|
|
156
|
+
req = "required" if f.required else "optional"
|
|
157
|
+
lines.append(f" {f.name} ({f.type}, {req})")
|
|
158
|
+
return "\n".join(lines) if lines else "{}"
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def _parse_structured_output(
|
|
162
|
+
raw_result: str, output_schema: list[OutputField]
|
|
163
|
+
) -> dict[str, Any]:
|
|
164
|
+
"""Extract JSON from raw result and validate against output_schema. Returns dict or raises."""
|
|
165
|
+
pr = try_parse_structured(raw_result, output_schema)
|
|
166
|
+
if not pr.success:
|
|
167
|
+
raise ValueError(pr.error or "Parse failed")
|
|
168
|
+
return pr.data or {}
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def _run_single_step_sync(
|
|
172
|
+
step: WorkflowStep,
|
|
173
|
+
context: WorkflowContext,
|
|
174
|
+
worker_model: str,
|
|
175
|
+
worker_count: int,
|
|
176
|
+
memory_router: Any,
|
|
177
|
+
use_tools: bool,
|
|
178
|
+
event_log: Any,
|
|
179
|
+
task_description_override: str | None = None,
|
|
180
|
+
) -> StepResult:
|
|
181
|
+
"""Run one step synchronously (resolve template, call agent, parse output). Used from async via to_thread.
|
|
182
|
+
If task_description_override is set, use it as the task description instead of resolving step.task."""
|
|
183
|
+
from devsper.agents.agent import Agent
|
|
184
|
+
from devsper.reasoning.store import ReasoningStore
|
|
185
|
+
from devsper.swarm.executor import Executor
|
|
186
|
+
from devsper.swarm.scheduler import Scheduler
|
|
187
|
+
|
|
188
|
+
start = time.perf_counter()
|
|
189
|
+
if task_description_override is not None:
|
|
190
|
+
resolved_task = task_description_override
|
|
191
|
+
else:
|
|
192
|
+
try:
|
|
193
|
+
resolved_task = context.resolve_template(step.task)
|
|
194
|
+
except WorkflowTemplateError as e:
|
|
195
|
+
return StepResult(
|
|
196
|
+
step_id=step.id,
|
|
197
|
+
raw_result="",
|
|
198
|
+
structured=None,
|
|
199
|
+
skipped=False,
|
|
200
|
+
error=str(e),
|
|
201
|
+
duration_seconds=time.perf_counter() - start,
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
if step.output_schema:
|
|
205
|
+
field_names = ", ".join(f.name for f in step.output_schema)
|
|
206
|
+
resolved_task += (
|
|
207
|
+
f"\n\nRespond ONLY with a JSON object with these fields: {field_names}. "
|
|
208
|
+
"Do not include any other text."
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
model = resolve_model(step.model or worker_model, "analysis")
|
|
212
|
+
task = Task(
|
|
213
|
+
id=step.id,
|
|
214
|
+
description=resolved_task,
|
|
215
|
+
dependencies=[],
|
|
216
|
+
role=step.role,
|
|
217
|
+
)
|
|
218
|
+
scheduler = Scheduler()
|
|
219
|
+
scheduler.add_tasks([task])
|
|
220
|
+
reasoning_store = ReasoningStore()
|
|
221
|
+
agent = Agent(
|
|
222
|
+
model_name=model,
|
|
223
|
+
event_log=event_log,
|
|
224
|
+
memory_router=memory_router,
|
|
225
|
+
store_result_to_memory=False,
|
|
226
|
+
use_tools=use_tools,
|
|
227
|
+
reasoning_store=reasoning_store,
|
|
228
|
+
user_task=resolved_task[:200],
|
|
229
|
+
)
|
|
230
|
+
executor = Executor(
|
|
231
|
+
scheduler=scheduler,
|
|
232
|
+
agent=agent,
|
|
233
|
+
worker_count=min(worker_count, 1), # one task
|
|
234
|
+
event_log=event_log,
|
|
235
|
+
)
|
|
236
|
+
executor.run_sync()
|
|
237
|
+
results = scheduler.get_results()
|
|
238
|
+
raw_result = results.get(step.id, "")
|
|
239
|
+
|
|
240
|
+
structured: dict[str, Any] | None = None
|
|
241
|
+
if step.output_schema and raw_result:
|
|
242
|
+
try:
|
|
243
|
+
structured = _parse_structured_output(raw_result, step.output_schema)
|
|
244
|
+
except (ValueError, json.JSONDecodeError) as e:
|
|
245
|
+
return StepResult(
|
|
246
|
+
step_id=step.id,
|
|
247
|
+
raw_result=raw_result,
|
|
248
|
+
structured=None,
|
|
249
|
+
skipped=False,
|
|
250
|
+
error=f"Failed to parse output_schema: {e}",
|
|
251
|
+
duration_seconds=time.perf_counter() - start,
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
return StepResult(
|
|
255
|
+
step_id=step.id,
|
|
256
|
+
raw_result=raw_result or "",
|
|
257
|
+
structured=structured,
|
|
258
|
+
skipped=False,
|
|
259
|
+
error=None,
|
|
260
|
+
duration_seconds=time.perf_counter() - start,
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
async def _run_step_with_correction(
|
|
265
|
+
step: WorkflowStep,
|
|
266
|
+
context: WorkflowContext,
|
|
267
|
+
worker_model: str,
|
|
268
|
+
worker_count: int,
|
|
269
|
+
memory_router: Any,
|
|
270
|
+
use_tools: bool,
|
|
271
|
+
event_log: Any,
|
|
272
|
+
loop: asyncio.AbstractEventLoop,
|
|
273
|
+
) -> StepResult:
|
|
274
|
+
"""Run step with structured output self-correction: on parse failure, retry with correction prompt."""
|
|
275
|
+
start = time.perf_counter()
|
|
276
|
+
try:
|
|
277
|
+
resolved_task = context.resolve_template(step.task)
|
|
278
|
+
except WorkflowTemplateError as e:
|
|
279
|
+
return StepResult(
|
|
280
|
+
step_id=step.id,
|
|
281
|
+
raw_result="",
|
|
282
|
+
structured=None,
|
|
283
|
+
skipped=False,
|
|
284
|
+
error=str(e),
|
|
285
|
+
duration_seconds=time.perf_counter() - start,
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
if step.output_schema:
|
|
289
|
+
field_names = ", ".join(f.name for f in step.output_schema)
|
|
290
|
+
task_prompt = (
|
|
291
|
+
resolved_task
|
|
292
|
+
+ f"\n\nRespond ONLY with a JSON object with these fields: {field_names}. "
|
|
293
|
+
"Do not include any other text."
|
|
294
|
+
)
|
|
295
|
+
else:
|
|
296
|
+
task_prompt = resolved_task
|
|
297
|
+
|
|
298
|
+
last_error: str | None = None
|
|
299
|
+
for attempt in range(step.retry + 1):
|
|
300
|
+
result = await loop.run_in_executor(
|
|
301
|
+
None,
|
|
302
|
+
_run_single_step_sync,
|
|
303
|
+
step,
|
|
304
|
+
context,
|
|
305
|
+
worker_model,
|
|
306
|
+
worker_count,
|
|
307
|
+
memory_router,
|
|
308
|
+
use_tools,
|
|
309
|
+
event_log,
|
|
310
|
+
task_prompt,
|
|
311
|
+
)
|
|
312
|
+
if result.error and not step.output_schema:
|
|
313
|
+
last_error = result.error
|
|
314
|
+
if attempt < step.retry:
|
|
315
|
+
await asyncio.sleep(2**attempt)
|
|
316
|
+
continue
|
|
317
|
+
if not step.output_schema:
|
|
318
|
+
return result
|
|
319
|
+
|
|
320
|
+
parse_result = try_parse_structured(result.raw_result or "", step.output_schema)
|
|
321
|
+
if parse_result.success and parse_result.data is not None:
|
|
322
|
+
return StepResult(
|
|
323
|
+
step_id=step.id,
|
|
324
|
+
raw_result=result.raw_result or "",
|
|
325
|
+
structured=parse_result.data,
|
|
326
|
+
skipped=False,
|
|
327
|
+
error=None,
|
|
328
|
+
duration_seconds=time.perf_counter() - start,
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
if event_log:
|
|
332
|
+
event_log.append_event(
|
|
333
|
+
Event(
|
|
334
|
+
timestamp=datetime.now(timezone.utc),
|
|
335
|
+
type=events.TASK_STRUCTURED_OUTPUT_CORRECTED,
|
|
336
|
+
payload={
|
|
337
|
+
"task_id": step.id,
|
|
338
|
+
"step_id": step.id,
|
|
339
|
+
"attempt": attempt + 1,
|
|
340
|
+
"error_summary": parse_result.error,
|
|
341
|
+
},
|
|
342
|
+
)
|
|
343
|
+
)
|
|
344
|
+
correction_context = f"""
|
|
345
|
+
Your previous response could not be parsed. Error: {parse_result.error or 'Unknown'}
|
|
346
|
+
|
|
347
|
+
Required JSON schema:
|
|
348
|
+
{_format_schema(step.output_schema)}
|
|
349
|
+
|
|
350
|
+
Common mistakes:
|
|
351
|
+
- Extra text before/after the JSON
|
|
352
|
+
- Missing required fields: {parse_result.missing_fields}
|
|
353
|
+
- Wrong types: {parse_result.type_errors}
|
|
354
|
+
|
|
355
|
+
Please respond ONLY with valid JSON matching the schema above.
|
|
356
|
+
"""
|
|
357
|
+
task_prompt = f"{resolved_task}\n\n{correction_context}"
|
|
358
|
+
last_error = parse_result.error
|
|
359
|
+
|
|
360
|
+
raise WorkflowStepError(
|
|
361
|
+
f"Step {step.id} failed structured output after {step.retry + 1} attempts: {last_error}"
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
async def _run_step_with_retry(
|
|
366
|
+
step: WorkflowStep,
|
|
367
|
+
context: WorkflowContext,
|
|
368
|
+
worker_model: str,
|
|
369
|
+
worker_count: int,
|
|
370
|
+
memory_router: Any,
|
|
371
|
+
use_tools: bool,
|
|
372
|
+
event_log: Any,
|
|
373
|
+
loop: asyncio.AbstractEventLoop,
|
|
374
|
+
) -> StepResult:
|
|
375
|
+
"""Run step with retries and exponential backoff."""
|
|
376
|
+
last_error: str | None = None
|
|
377
|
+
for attempt in range(step.retry + 1):
|
|
378
|
+
try:
|
|
379
|
+
result = await loop.run_in_executor(
|
|
380
|
+
None,
|
|
381
|
+
_run_single_step_sync,
|
|
382
|
+
step,
|
|
383
|
+
context,
|
|
384
|
+
worker_model,
|
|
385
|
+
worker_count,
|
|
386
|
+
memory_router,
|
|
387
|
+
use_tools,
|
|
388
|
+
event_log,
|
|
389
|
+
)
|
|
390
|
+
if result.error and attempt < step.retry:
|
|
391
|
+
last_error = result.error
|
|
392
|
+
await asyncio.sleep(2**attempt)
|
|
393
|
+
continue
|
|
394
|
+
return result
|
|
395
|
+
except Exception as e:
|
|
396
|
+
last_error = str(e)
|
|
397
|
+
if attempt < step.retry:
|
|
398
|
+
await asyncio.sleep(2**attempt)
|
|
399
|
+
else:
|
|
400
|
+
return StepResult(
|
|
401
|
+
step_id=step.id,
|
|
402
|
+
raw_result="",
|
|
403
|
+
structured=None,
|
|
404
|
+
skipped=False,
|
|
405
|
+
error=last_error,
|
|
406
|
+
duration_seconds=0.0,
|
|
407
|
+
)
|
|
408
|
+
return StepResult(
|
|
409
|
+
step_id=step.id,
|
|
410
|
+
raw_result="",
|
|
411
|
+
structured=None,
|
|
412
|
+
skipped=False,
|
|
413
|
+
error=last_error or "Unknown",
|
|
414
|
+
duration_seconds=0.0,
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
class WorkflowRunner:
|
|
419
|
+
def run(
|
|
420
|
+
self,
|
|
421
|
+
workflow: WorkflowDefinition,
|
|
422
|
+
inputs: dict[str, Any],
|
|
423
|
+
worker_model: str = "mock",
|
|
424
|
+
worker_count: int = 4,
|
|
425
|
+
memory_router: Any = None,
|
|
426
|
+
use_tools: bool = False,
|
|
427
|
+
event_log: Any = None,
|
|
428
|
+
) -> WorkflowContext:
|
|
429
|
+
"""Execute workflow: validate inputs, run waves in order, steps in parallel within wave."""
|
|
430
|
+
_validate_inputs(workflow, inputs)
|
|
431
|
+
context = WorkflowContext(inputs)
|
|
432
|
+
waves = build_execution_order(workflow.steps)
|
|
433
|
+
for wave in waves:
|
|
434
|
+
# Evaluate conditions and collect steps to run vs skip
|
|
435
|
+
to_run: list[WorkflowStep] = []
|
|
436
|
+
for step in wave:
|
|
437
|
+
# Skip if any dependency was skipped
|
|
438
|
+
if any(
|
|
439
|
+
context.steps.get(dep, StepResult(step_id=dep, raw_result="", skipped=True)).skipped
|
|
440
|
+
for dep in step.depends_on
|
|
441
|
+
):
|
|
442
|
+
context.record(
|
|
443
|
+
step.id,
|
|
444
|
+
StepResult(
|
|
445
|
+
step_id=step.id,
|
|
446
|
+
raw_result="",
|
|
447
|
+
structured=None,
|
|
448
|
+
skipped=True,
|
|
449
|
+
error="Dependency was skipped",
|
|
450
|
+
duration_seconds=0.0,
|
|
451
|
+
),
|
|
452
|
+
)
|
|
453
|
+
continue
|
|
454
|
+
if step.if_:
|
|
455
|
+
try:
|
|
456
|
+
if not evaluate_condition(step.if_.expression, context):
|
|
457
|
+
context.record(
|
|
458
|
+
step.id,
|
|
459
|
+
StepResult(
|
|
460
|
+
step_id=step.id,
|
|
461
|
+
raw_result="",
|
|
462
|
+
structured=None,
|
|
463
|
+
skipped=True,
|
|
464
|
+
error=None,
|
|
465
|
+
duration_seconds=0.0,
|
|
466
|
+
),
|
|
467
|
+
)
|
|
468
|
+
continue
|
|
469
|
+
except Exception:
|
|
470
|
+
# Conservative: skip on condition error
|
|
471
|
+
context.record(
|
|
472
|
+
step.id,
|
|
473
|
+
StepResult(
|
|
474
|
+
step_id=step.id,
|
|
475
|
+
raw_result="",
|
|
476
|
+
structured=None,
|
|
477
|
+
skipped=True,
|
|
478
|
+
error="Condition evaluation failed",
|
|
479
|
+
duration_seconds=0.0,
|
|
480
|
+
),
|
|
481
|
+
)
|
|
482
|
+
continue
|
|
483
|
+
to_run.append(step)
|
|
484
|
+
|
|
485
|
+
if not to_run:
|
|
486
|
+
continue
|
|
487
|
+
|
|
488
|
+
log = event_log or EventLog()
|
|
489
|
+
|
|
490
|
+
async def _run_one(step: WorkflowStep) -> StepResult:
|
|
491
|
+
loop = asyncio.get_running_loop()
|
|
492
|
+
if step.output_schema:
|
|
493
|
+
try:
|
|
494
|
+
return await _run_step_with_correction(
|
|
495
|
+
step,
|
|
496
|
+
context,
|
|
497
|
+
worker_model,
|
|
498
|
+
worker_count,
|
|
499
|
+
memory_router,
|
|
500
|
+
use_tools,
|
|
501
|
+
log,
|
|
502
|
+
loop,
|
|
503
|
+
)
|
|
504
|
+
except WorkflowStepError as e:
|
|
505
|
+
return StepResult(
|
|
506
|
+
step_id=step.id,
|
|
507
|
+
raw_result="",
|
|
508
|
+
structured=None,
|
|
509
|
+
skipped=False,
|
|
510
|
+
error=str(e),
|
|
511
|
+
duration_seconds=0.0,
|
|
512
|
+
)
|
|
513
|
+
return await _run_step_with_retry(
|
|
514
|
+
step,
|
|
515
|
+
context,
|
|
516
|
+
worker_model,
|
|
517
|
+
worker_count,
|
|
518
|
+
memory_router,
|
|
519
|
+
use_tools,
|
|
520
|
+
log,
|
|
521
|
+
loop,
|
|
522
|
+
)
|
|
523
|
+
|
|
524
|
+
async def _run_wave() -> list[StepResult]:
|
|
525
|
+
return await asyncio.gather(
|
|
526
|
+
*[_run_one(step) for step in to_run]
|
|
527
|
+
)
|
|
528
|
+
|
|
529
|
+
results = asyncio.run(_run_wave())
|
|
530
|
+
for r in results:
|
|
531
|
+
context.record(r.step_id, r)
|
|
532
|
+
return context
|
|
533
|
+
|
|
534
|
+
|
|
535
|
+
def run_workflow(
|
|
536
|
+
steps: list[str],
|
|
537
|
+
worker_model: str = "mock",
|
|
538
|
+
worker_count: int = 2,
|
|
539
|
+
event_log=None,
|
|
540
|
+
memory_router=None,
|
|
541
|
+
use_tools: bool = False,
|
|
542
|
+
) -> dict[str, str]:
|
|
543
|
+
"""
|
|
544
|
+
Legacy: run a workflow from a list of task strings (sequential).
|
|
545
|
+
For new workflows use WorkflowRunner with WorkflowDefinition.
|
|
546
|
+
"""
|
|
547
|
+
from devsper.workflow.loader import _workflow_from_legacy_steps
|
|
548
|
+
from devsper.workflow.schema import WorkflowDefinition
|
|
549
|
+
|
|
550
|
+
definition = _workflow_from_legacy_steps(steps)
|
|
551
|
+
runner = WorkflowRunner()
|
|
552
|
+
ctx = runner.run(
|
|
553
|
+
definition,
|
|
554
|
+
inputs={},
|
|
555
|
+
worker_model=worker_model,
|
|
556
|
+
worker_count=worker_count,
|
|
557
|
+
memory_router=memory_router,
|
|
558
|
+
use_tools=use_tools,
|
|
559
|
+
event_log=event_log,
|
|
560
|
+
)
|
|
561
|
+
# Return task_id -> result for backward compat
|
|
562
|
+
return {sr.step_id: sr.raw_result for sr in ctx.steps.values()}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
"""Pydantic models for the workflow DSL."""
|
|
2
|
+
|
|
3
|
+
from typing import Literal
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class OutputField(BaseModel):
|
|
9
|
+
name: str
|
|
10
|
+
type: Literal["str", "int", "float", "bool", "list", "dict"]
|
|
11
|
+
description: str | None = None
|
|
12
|
+
required: bool = True
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class StepCondition(BaseModel):
|
|
16
|
+
"""Evaluated as: steps.<step_id>.<field> <op> <value>."""
|
|
17
|
+
|
|
18
|
+
expression: str # e.g. "steps.classify.category == 'technical'"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class WorkflowStep(BaseModel):
|
|
22
|
+
id: str # unique within workflow, used for references
|
|
23
|
+
task: str # prompt/task description (supports {var} interpolation)
|
|
24
|
+
depends_on: list[str] = [] # step ids this step waits for
|
|
25
|
+
if_: StepCondition | None = Field(None, alias="if") # skip step if false
|
|
26
|
+
output_schema: list[OutputField] = [] # if set, agent must return structured JSON
|
|
27
|
+
role: str | None = None
|
|
28
|
+
model: str | None = None # override worker model for this step
|
|
29
|
+
retry: int = 0 # number of retries on failure
|
|
30
|
+
timeout_seconds: int | None = None
|
|
31
|
+
|
|
32
|
+
model_config = ConfigDict(populate_by_name=True)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class WorkflowDefinition(BaseModel):
|
|
36
|
+
name: str
|
|
37
|
+
description: str | None = None
|
|
38
|
+
version: str = "1.0"
|
|
39
|
+
steps: list[WorkflowStep]
|
|
40
|
+
inputs: list[OutputField] = [] # required inputs passed at runtime
|
|
41
|
+
|
|
42
|
+
@model_validator(mode="after")
|
|
43
|
+
def validate_step_ids_unique(self) -> "WorkflowDefinition":
|
|
44
|
+
ids = [s.id for s in self.steps]
|
|
45
|
+
if len(ids) != len(set(ids)):
|
|
46
|
+
seen: set[str] = set()
|
|
47
|
+
for i in ids:
|
|
48
|
+
if i in seen:
|
|
49
|
+
raise ValueError(f"Duplicate step id: {i}")
|
|
50
|
+
seen.add(i)
|
|
51
|
+
return self
|
|
52
|
+
|
|
53
|
+
@model_validator(mode="after")
|
|
54
|
+
def validate_depends_on_references(self) -> "WorkflowDefinition":
|
|
55
|
+
step_ids = {s.id for s in self.steps}
|
|
56
|
+
for s in self.steps:
|
|
57
|
+
for dep in s.depends_on:
|
|
58
|
+
if dep not in step_ids:
|
|
59
|
+
raise ValueError(
|
|
60
|
+
f"Step {s.id!r} depends_on unknown step {dep!r}. "
|
|
61
|
+
f"Valid ids: {sorted(step_ids)}"
|
|
62
|
+
)
|
|
63
|
+
return self
|