devsper 2.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devsper/__init__.py +14 -0
- devsper/agents/a2a/__init__.py +27 -0
- devsper/agents/a2a/client.py +126 -0
- devsper/agents/a2a/discovery.py +24 -0
- devsper/agents/a2a/server.py +128 -0
- devsper/agents/a2a/tool_adapter.py +68 -0
- devsper/agents/a2a/types.py +49 -0
- devsper/agents/agent.py +602 -0
- devsper/agents/critic.py +80 -0
- devsper/agents/message_bus.py +124 -0
- devsper/agents/roles.py +181 -0
- devsper/agents/run_agent.py +78 -0
- devsper/analytics/__init__.py +5 -0
- devsper/analytics/tool_analytics.py +78 -0
- devsper/audit/__init__.py +5 -0
- devsper/audit/logger.py +214 -0
- devsper/bus/__init__.py +29 -0
- devsper/bus/backends/__init__.py +5 -0
- devsper/bus/backends/base.py +38 -0
- devsper/bus/backends/memory.py +55 -0
- devsper/bus/backends/redis.py +146 -0
- devsper/bus/message.py +56 -0
- devsper/bus/schema_version.py +3 -0
- devsper/bus/topics.py +19 -0
- devsper/cache/__init__.py +6 -0
- devsper/cache/embedding_index.py +98 -0
- devsper/cache/hashing.py +24 -0
- devsper/cache/store.py +153 -0
- devsper/cache/task_cache.py +191 -0
- devsper/cli/__init__.py +6 -0
- devsper/cli/commands/reg.py +733 -0
- devsper/cli/github_oauth.py +157 -0
- devsper/cli/init.py +637 -0
- devsper/cli/main.py +2956 -0
- devsper/cli/run_progress.py +103 -0
- devsper/cli/ui/__init__.py +65 -0
- devsper/cli/ui/components.py +94 -0
- devsper/cli/ui/errors.py +104 -0
- devsper/cli/ui/logging.py +120 -0
- devsper/cli/ui/onboarding.py +102 -0
- devsper/cli/ui/progress.py +43 -0
- devsper/cli/ui/run_view.py +308 -0
- devsper/cli/ui/theme.py +40 -0
- devsper/cluster/__init__.py +29 -0
- devsper/cluster/election.py +84 -0
- devsper/cluster/local.py +97 -0
- devsper/cluster/node_info.py +77 -0
- devsper/cluster/registry.py +71 -0
- devsper/cluster/router.py +117 -0
- devsper/cluster/state_backend.py +105 -0
- devsper/compliance/__init__.py +5 -0
- devsper/compliance/pii.py +147 -0
- devsper/config/__init__.py +52 -0
- devsper/config/config_loader.py +121 -0
- devsper/config/defaults.py +77 -0
- devsper/config/resolver.py +342 -0
- devsper/config/schema.py +237 -0
- devsper/credentials/__init__.py +19 -0
- devsper/credentials/cli.py +197 -0
- devsper/credentials/migration.py +124 -0
- devsper/credentials/store.py +142 -0
- devsper/dashboard/__init__.py +9 -0
- devsper/dashboard/dashboard.py +87 -0
- devsper/dev/__init__.py +25 -0
- devsper/dev/builder.py +195 -0
- devsper/dev/debugger.py +95 -0
- devsper/dev/repo_index.py +138 -0
- devsper/dev/sandbox.py +203 -0
- devsper/dev/scaffold.py +122 -0
- devsper/embeddings/__init__.py +5 -0
- devsper/embeddings/service.py +36 -0
- devsper/explainability/__init__.py +14 -0
- devsper/explainability/decision_tree.py +104 -0
- devsper/explainability/rationale.py +38 -0
- devsper/explainability/simulation.py +56 -0
- devsper/hitl/__init__.py +13 -0
- devsper/hitl/approval.py +160 -0
- devsper/hitl/escalation.py +95 -0
- devsper/intelligence/__init__.py +9 -0
- devsper/intelligence/adaptation.py +88 -0
- devsper/intelligence/analysis/__init__.py +19 -0
- devsper/intelligence/analysis/analyzer.py +71 -0
- devsper/intelligence/analysis/cost_estimator.py +66 -0
- devsper/intelligence/analysis/formatter.py +103 -0
- devsper/intelligence/analysis/run_report.py +402 -0
- devsper/intelligence/learning_engine.py +92 -0
- devsper/intelligence/strategies/__init__.py +23 -0
- devsper/intelligence/strategies/base.py +14 -0
- devsper/intelligence/strategies/code_analysis_strategy.py +33 -0
- devsper/intelligence/strategies/data_science_strategy.py +33 -0
- devsper/intelligence/strategies/document_pipeline_strategy.py +33 -0
- devsper/intelligence/strategies/experiment_strategy.py +33 -0
- devsper/intelligence/strategies/research_strategy.py +34 -0
- devsper/intelligence/strategy_selector.py +84 -0
- devsper/intelligence/synthesis.py +132 -0
- devsper/intelligence/task_optimizer.py +92 -0
- devsper/knowledge/__init__.py +5 -0
- devsper/knowledge/extractor.py +204 -0
- devsper/knowledge/knowledge_graph.py +184 -0
- devsper/knowledge/query.py +285 -0
- devsper/memory/__init__.py +35 -0
- devsper/memory/consolidation.py +138 -0
- devsper/memory/embeddings.py +60 -0
- devsper/memory/memory_index.py +97 -0
- devsper/memory/memory_router.py +62 -0
- devsper/memory/memory_store.py +221 -0
- devsper/memory/memory_types.py +54 -0
- devsper/memory/namespaces.py +45 -0
- devsper/memory/scoring.py +77 -0
- devsper/memory/summarizer.py +52 -0
- devsper/nodes/__init__.py +5 -0
- devsper/nodes/controller.py +449 -0
- devsper/nodes/rpc.py +127 -0
- devsper/nodes/single.py +161 -0
- devsper/nodes/worker.py +506 -0
- devsper/orchestration/__init__.py +19 -0
- devsper/orchestration/meta_planner.py +239 -0
- devsper/orchestration/priority_queue.py +61 -0
- devsper/plugins/__init__.py +19 -0
- devsper/plugins/marketplace/__init__.py +0 -0
- devsper/plugins/plugin_loader.py +70 -0
- devsper/plugins/plugin_registry.py +34 -0
- devsper/plugins/registry.py +83 -0
- devsper/protocols/__init__.py +6 -0
- devsper/providers/__init__.py +17 -0
- devsper/providers/anthropic.py +84 -0
- devsper/providers/base.py +75 -0
- devsper/providers/complexity_router.py +94 -0
- devsper/providers/gemini.py +36 -0
- devsper/providers/github.py +180 -0
- devsper/providers/model_router.py +40 -0
- devsper/providers/openai.py +105 -0
- devsper/providers/router/__init__.py +21 -0
- devsper/providers/router/backends/__init__.py +19 -0
- devsper/providers/router/backends/anthropic_backend.py +111 -0
- devsper/providers/router/backends/custom_backend.py +138 -0
- devsper/providers/router/backends/gemini_backend.py +89 -0
- devsper/providers/router/backends/github_backend.py +165 -0
- devsper/providers/router/backends/ollama_backend.py +104 -0
- devsper/providers/router/backends/openai_backend.py +142 -0
- devsper/providers/router/backends/vllm_backend.py +35 -0
- devsper/providers/router/base.py +60 -0
- devsper/providers/router/factory.py +92 -0
- devsper/providers/router/legacy.py +101 -0
- devsper/providers/router/router.py +135 -0
- devsper/reasoning/__init__.py +12 -0
- devsper/reasoning/graph.py +59 -0
- devsper/reasoning/nodes.py +20 -0
- devsper/reasoning/store.py +67 -0
- devsper/runtime/__init__.py +12 -0
- devsper/runtime/health.py +88 -0
- devsper/runtime/replay.py +53 -0
- devsper/runtime/replay_engine.py +142 -0
- devsper/runtime/run_history.py +204 -0
- devsper/runtime/telemetry.py +116 -0
- devsper/runtime/visualize.py +58 -0
- devsper/sandbox/__init__.py +13 -0
- devsper/sandbox/sandbox.py +161 -0
- devsper/swarm/checkpointer.py +65 -0
- devsper/swarm/executor.py +558 -0
- devsper/swarm/map_reduce.py +44 -0
- devsper/swarm/planner.py +197 -0
- devsper/swarm/prefetcher.py +91 -0
- devsper/swarm/scheduler.py +153 -0
- devsper/swarm/speculation.py +47 -0
- devsper/swarm/swarm.py +562 -0
- devsper/tools/__init__.py +33 -0
- devsper/tools/base.py +29 -0
- devsper/tools/code_intelligence/__init__.py +13 -0
- devsper/tools/code_intelligence/api_surface_extractor.py +73 -0
- devsper/tools/code_intelligence/architecture_analyzer.py +65 -0
- devsper/tools/code_intelligence/codebase_indexer.py +71 -0
- devsper/tools/code_intelligence/dependency_graph_builder.py +67 -0
- devsper/tools/code_intelligence/design_pattern_detector.py +62 -0
- devsper/tools/code_intelligence/large_function_detector.py +68 -0
- devsper/tools/code_intelligence/module_responsibility_mapper.py +56 -0
- devsper/tools/code_intelligence/parallel_codebase_analysis.py +44 -0
- devsper/tools/code_intelligence/refactor_candidate_detector.py +81 -0
- devsper/tools/code_intelligence/repository_semantic_index.py +61 -0
- devsper/tools/code_intelligence/test_coverage_estimator.py +62 -0
- devsper/tools/coding/__init__.py +12 -0
- devsper/tools/coding/analyze_code_complexity.py +48 -0
- devsper/tools/coding/dependency_analyzer.py +42 -0
- devsper/tools/coding/extract_functions.py +38 -0
- devsper/tools/coding/format_python.py +50 -0
- devsper/tools/coding/generate_docstrings.py +40 -0
- devsper/tools/coding/generate_unit_tests.py +42 -0
- devsper/tools/coding/lint_python.py +51 -0
- devsper/tools/coding/refactor_function.py +41 -0
- devsper/tools/coding/repo_structure_map.py +54 -0
- devsper/tools/coding/run_python.py +53 -0
- devsper/tools/data/__init__.py +12 -0
- devsper/tools/data/column_type_detection.py +64 -0
- devsper/tools/data/csv_summary.py +52 -0
- devsper/tools/data/dataframe_filter.py +51 -0
- devsper/tools/data/dataframe_groupby.py +47 -0
- devsper/tools/data/dataframe_stats.py +38 -0
- devsper/tools/data/dataset_sampling.py +55 -0
- devsper/tools/data/dataset_schema.py +45 -0
- devsper/tools/data/json_pretty_print.py +37 -0
- devsper/tools/data/json_query.py +46 -0
- devsper/tools/data/missing_value_report.py +47 -0
- devsper/tools/data_science/__init__.py +13 -0
- devsper/tools/data_science/correlation_heatmap.py +72 -0
- devsper/tools/data_science/dataset_bias_detector.py +49 -0
- devsper/tools/data_science/dataset_distribution_report.py +64 -0
- devsper/tools/data_science/dataset_drift_detector.py +64 -0
- devsper/tools/data_science/dataset_outlier_detector.py +65 -0
- devsper/tools/data_science/dataset_profile.py +76 -0
- devsper/tools/data_science/distributed_dataset_processor.py +54 -0
- devsper/tools/data_science/feature_engineering_suggestions.py +69 -0
- devsper/tools/data_science/feature_importance_estimator.py +82 -0
- devsper/tools/data_science/model_input_validator.py +59 -0
- devsper/tools/data_science/time_series_analyzer.py +57 -0
- devsper/tools/documents/__init__.py +11 -0
- devsper/tools/documents/_docproc.py +56 -0
- devsper/tools/documents/document_to_markdown.py +29 -0
- devsper/tools/documents/extract_document_images.py +39 -0
- devsper/tools/documents/extract_document_text.py +29 -0
- devsper/tools/documents/extract_equations.py +36 -0
- devsper/tools/documents/extract_tables.py +47 -0
- devsper/tools/documents/summarize_document.py +42 -0
- devsper/tools/documents/write_latex_document.py +133 -0
- devsper/tools/documents/write_markdown_document.py +89 -0
- devsper/tools/documents/write_word_document.py +149 -0
- devsper/tools/experiments/__init__.py +13 -0
- devsper/tools/experiments/bootstrap_estimator.py +54 -0
- devsper/tools/experiments/experiment_report_generator.py +50 -0
- devsper/tools/experiments/experiment_tracker.py +36 -0
- devsper/tools/experiments/grid_search_runner.py +50 -0
- devsper/tools/experiments/model_benchmark_runner.py +45 -0
- devsper/tools/experiments/monte_carlo_experiment.py +38 -0
- devsper/tools/experiments/parameter_sweep_runner.py +51 -0
- devsper/tools/experiments/result_comparator.py +58 -0
- devsper/tools/experiments/simulation_runner.py +43 -0
- devsper/tools/experiments/statistical_significance_test.py +56 -0
- devsper/tools/experiments/swarm_map_reduce.py +42 -0
- devsper/tools/filesystem/__init__.py +12 -0
- devsper/tools/filesystem/append_file.py +42 -0
- devsper/tools/filesystem/file_hash.py +40 -0
- devsper/tools/filesystem/file_line_count.py +36 -0
- devsper/tools/filesystem/file_metadata.py +38 -0
- devsper/tools/filesystem/file_preview.py +55 -0
- devsper/tools/filesystem/find_large_files.py +50 -0
- devsper/tools/filesystem/list_directory.py +39 -0
- devsper/tools/filesystem/read_file.py +35 -0
- devsper/tools/filesystem/search_files.py +60 -0
- devsper/tools/filesystem/write_file.py +41 -0
- devsper/tools/flagship/__init__.py +15 -0
- devsper/tools/flagship/distributed_document_analysis.py +77 -0
- devsper/tools/flagship/docproc_corpus_pipeline.py +91 -0
- devsper/tools/flagship/repository_semantic_map.py +99 -0
- devsper/tools/flagship/research_graph_builder.py +111 -0
- devsper/tools/flagship/swarm_experiment_runner.py +86 -0
- devsper/tools/knowledge/__init__.py +10 -0
- devsper/tools/knowledge/citation_graph_builder.py +69 -0
- devsper/tools/knowledge/concept_frequency_analyzer.py +74 -0
- devsper/tools/knowledge/corpus_builder.py +66 -0
- devsper/tools/knowledge/cross_document_entity_linker.py +71 -0
- devsper/tools/knowledge/document_corpus_summary.py +68 -0
- devsper/tools/knowledge/document_topic_extractor.py +58 -0
- devsper/tools/knowledge/knowledge_graph_extractor.py +58 -0
- devsper/tools/knowledge/timeline_extractor.py +59 -0
- devsper/tools/math/__init__.py +12 -0
- devsper/tools/math/calculate_expression.py +52 -0
- devsper/tools/math/correlation.py +44 -0
- devsper/tools/math/distribution_summary.py +39 -0
- devsper/tools/math/histogram.py +53 -0
- devsper/tools/math/linear_regression.py +47 -0
- devsper/tools/math/matrix_multiply.py +38 -0
- devsper/tools/math/mean_std.py +35 -0
- devsper/tools/math/monte_carlo_simulation.py +43 -0
- devsper/tools/math/polynomial_fit.py +40 -0
- devsper/tools/math/random_sample.py +36 -0
- devsper/tools/mcp/__init__.py +23 -0
- devsper/tools/mcp/adapter.py +53 -0
- devsper/tools/mcp/client.py +235 -0
- devsper/tools/mcp/discovery.py +53 -0
- devsper/tools/memory/__init__.py +16 -0
- devsper/tools/memory/delete_memory.py +25 -0
- devsper/tools/memory/list_memory.py +34 -0
- devsper/tools/memory/search_memory.py +36 -0
- devsper/tools/memory/store_memory.py +47 -0
- devsper/tools/memory/summarize_memory.py +41 -0
- devsper/tools/memory/tag_memory.py +47 -0
- devsper/tools/pipelines.py +92 -0
- devsper/tools/registry.py +39 -0
- devsper/tools/research/__init__.py +12 -0
- devsper/tools/research/arxiv_download.py +55 -0
- devsper/tools/research/arxiv_search.py +58 -0
- devsper/tools/research/citation_extractor.py +35 -0
- devsper/tools/research/duckduckgo_search.py +42 -0
- devsper/tools/research/paper_metadata_extractor.py +45 -0
- devsper/tools/research/paper_summarizer.py +41 -0
- devsper/tools/research/research_question_generator.py +39 -0
- devsper/tools/research/topic_cluster.py +46 -0
- devsper/tools/research/web_search.py +47 -0
- devsper/tools/research/wikipedia_lookup.py +50 -0
- devsper/tools/research_advanced/__init__.py +14 -0
- devsper/tools/research_advanced/citation_context_extractor.py +60 -0
- devsper/tools/research_advanced/literature_review_generator.py +79 -0
- devsper/tools/research_advanced/methodology_extractor.py +58 -0
- devsper/tools/research_advanced/paper_contribution_extractor.py +50 -0
- devsper/tools/research_advanced/paper_dataset_identifier.py +49 -0
- devsper/tools/research_advanced/paper_method_comparator.py +62 -0
- devsper/tools/research_advanced/paper_similarity_search.py +69 -0
- devsper/tools/research_advanced/paper_trend_analyzer.py +69 -0
- devsper/tools/research_advanced/parallel_document_analyzer.py +56 -0
- devsper/tools/research_advanced/research_gap_finder.py +71 -0
- devsper/tools/research_advanced/research_topic_mapper.py +69 -0
- devsper/tools/research_advanced/swarm_literature_review.py +58 -0
- devsper/tools/scoring/__init__.py +52 -0
- devsper/tools/scoring/report.py +44 -0
- devsper/tools/scoring/scorer.py +39 -0
- devsper/tools/scoring/selector.py +61 -0
- devsper/tools/scoring/store.py +267 -0
- devsper/tools/selector.py +130 -0
- devsper/tools/system/__init__.py +12 -0
- devsper/tools/system/cpu_usage.py +22 -0
- devsper/tools/system/disk_usage.py +35 -0
- devsper/tools/system/environment_variables.py +29 -0
- devsper/tools/system/memory_usage.py +23 -0
- devsper/tools/system/pip_install.py +44 -0
- devsper/tools/system/pip_search.py +29 -0
- devsper/tools/system/process_list.py +34 -0
- devsper/tools/system/python_package_list.py +40 -0
- devsper/tools/system/run_shell_command.py +51 -0
- devsper/tools/system/system_info.py +26 -0
- devsper/tools/tool_runner.py +122 -0
- devsper/tui/__init__.py +5 -0
- devsper/tui/activity_feed_view.py +73 -0
- devsper/tui/adaptive_tasks_view.py +75 -0
- devsper/tui/agent_role_view.py +35 -0
- devsper/tui/app.py +395 -0
- devsper/tui/dashboard_screen.py +290 -0
- devsper/tui/dev_view.py +99 -0
- devsper/tui/inject_screen.py +73 -0
- devsper/tui/knowledge_graph_view.py +46 -0
- devsper/tui/layout.py +43 -0
- devsper/tui/logs_view.py +83 -0
- devsper/tui/memory_view.py +58 -0
- devsper/tui/performance_view.py +33 -0
- devsper/tui/reasoning_graph_view.py +39 -0
- devsper/tui/results_view.py +139 -0
- devsper/tui/swarm_view.py +37 -0
- devsper/tui/task_detail_screen.py +55 -0
- devsper/tui/task_view.py +103 -0
- devsper/types/event.py +97 -0
- devsper/types/exceptions.py +21 -0
- devsper/types/swarm.py +41 -0
- devsper/types/task.py +80 -0
- devsper/upgrade/__init__.py +21 -0
- devsper/upgrade/changelog.py +124 -0
- devsper/upgrade/cli.py +145 -0
- devsper/upgrade/installer.py +103 -0
- devsper/upgrade/notifier.py +52 -0
- devsper/upgrade/version_check.py +121 -0
- devsper/utils/event_logger.py +88 -0
- devsper/utils/http.py +43 -0
- devsper/utils/models.py +54 -0
- devsper/visualization/__init__.py +5 -0
- devsper/visualization/dag_export.py +67 -0
- devsper/workflow/__init__.py +18 -0
- devsper/workflow/conditions.py +157 -0
- devsper/workflow/context.py +108 -0
- devsper/workflow/loader.py +156 -0
- devsper/workflow/resolver.py +109 -0
- devsper/workflow/runner.py +562 -0
- devsper/workflow/schema.py +63 -0
- devsper/workflow/validator.py +128 -0
- devsper-2.1.6.dist-info/METADATA +346 -0
- devsper-2.1.6.dist-info/RECORD +375 -0
- devsper-2.1.6.dist-info/WHEEL +4 -0
- devsper-2.1.6.dist-info/entry_points.txt +3 -0
- devsper-2.1.6.dist-info/licenses/LICENSE +639 -0
|
@@ -0,0 +1,402 @@
|
|
|
1
|
+
"""
|
|
2
|
+
RunReport and TaskSummary dataclasses; build from event log and DAG.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import os
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
from devsper.types.event import Event, events
|
|
13
|
+
from devsper.types.task import TaskStatus
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class TaskSummary:
|
|
18
|
+
task_id: str
|
|
19
|
+
description: str
|
|
20
|
+
role: str | None
|
|
21
|
+
status: TaskStatus
|
|
22
|
+
duration_seconds: float
|
|
23
|
+
tools_used: list[str]
|
|
24
|
+
tool_failures: list[str]
|
|
25
|
+
tokens_used: int | None
|
|
26
|
+
retry_count: int
|
|
27
|
+
error: str | None
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class RunReport:
|
|
32
|
+
run_id: str
|
|
33
|
+
root_task: str
|
|
34
|
+
strategy: str
|
|
35
|
+
started_at: str
|
|
36
|
+
finished_at: str
|
|
37
|
+
total_duration_seconds: float
|
|
38
|
+
total_tasks: int
|
|
39
|
+
completed_tasks: int
|
|
40
|
+
failed_tasks: int
|
|
41
|
+
skipped_tasks: int
|
|
42
|
+
tasks: list[TaskSummary]
|
|
43
|
+
critical_path: list[str]
|
|
44
|
+
bottleneck_task_id: str | None
|
|
45
|
+
tools_called: int
|
|
46
|
+
tool_success_rate: float
|
|
47
|
+
estimated_cost_usd: float | None
|
|
48
|
+
models_used: list[str]
|
|
49
|
+
peak_parallelism: int
|
|
50
|
+
plain_english_analysis: str | None
|
|
51
|
+
# v1.6
|
|
52
|
+
model_tier_breakdown: dict[str, int] | None = None # simple/medium/complex counts
|
|
53
|
+
estimated_cost_without_routing: float | None = None
|
|
54
|
+
theoretical_sequential_duration: float | None = None
|
|
55
|
+
actual_duration: float | None = None
|
|
56
|
+
parallelism_efficiency: float | None = None
|
|
57
|
+
# v1.7
|
|
58
|
+
tasks_critiqued: int = 0
|
|
59
|
+
tasks_retried_by_critic: int = 0
|
|
60
|
+
avg_critique_score: float | None = None
|
|
61
|
+
prefetch_hit_rate: float | None = None
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _find_log_path(events_dir: str | Path, run_id: str) -> str | None:
|
|
65
|
+
"""Find events JSONL file for run_id. Returns path or None."""
|
|
66
|
+
path = Path(events_dir)
|
|
67
|
+
if not path.is_dir():
|
|
68
|
+
return None
|
|
69
|
+
candidate = path / f"{run_id}.jsonl"
|
|
70
|
+
if candidate.is_file():
|
|
71
|
+
return str(candidate)
|
|
72
|
+
for f in path.glob("*.jsonl"):
|
|
73
|
+
if f.stem == run_id:
|
|
74
|
+
return str(f)
|
|
75
|
+
return None
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _load_events(log_path: str) -> list[Event]:
|
|
79
|
+
if not os.path.isfile(log_path):
|
|
80
|
+
return []
|
|
81
|
+
out = []
|
|
82
|
+
with open(log_path, "r", encoding="utf-8") as f:
|
|
83
|
+
for line in f:
|
|
84
|
+
line = line.strip()
|
|
85
|
+
if not line:
|
|
86
|
+
continue
|
|
87
|
+
try:
|
|
88
|
+
out.append(Event.model_validate_json(line))
|
|
89
|
+
except Exception:
|
|
90
|
+
continue
|
|
91
|
+
return out
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def _load_dag(events_dir: str | Path, run_id: str) -> tuple[list[dict], list[tuple[str, str]]]:
|
|
95
|
+
"""Load DAG from events_dir / {run_id}_dag.json. Returns (nodes, edges)."""
|
|
96
|
+
path = Path(events_dir) / f"{run_id}_dag.json"
|
|
97
|
+
if not path.is_file():
|
|
98
|
+
return [], []
|
|
99
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
100
|
+
data = json.load(f)
|
|
101
|
+
nodes = data.get("nodes", [])
|
|
102
|
+
edges = [tuple(e) for e in data.get("edges", [])]
|
|
103
|
+
return nodes, edges
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def _critical_path_and_bottleneck(
|
|
107
|
+
task_ids: list[str],
|
|
108
|
+
edges: list[tuple[str, str]],
|
|
109
|
+
duration_by_task: dict[str, float],
|
|
110
|
+
) -> tuple[list[str], str | None]:
|
|
111
|
+
"""
|
|
112
|
+
Compute critical path (longest dependency chain by duration) and bottleneck task.
|
|
113
|
+
edges: (from_id, to_id) meaning from is dependency of to.
|
|
114
|
+
"""
|
|
115
|
+
if not task_ids:
|
|
116
|
+
return [], None
|
|
117
|
+
pred = {tid: [] for tid in task_ids}
|
|
118
|
+
succ = {tid: [] for tid in task_ids}
|
|
119
|
+
for a, b in edges:
|
|
120
|
+
if a in pred and b in succ:
|
|
121
|
+
pred[b].append(a)
|
|
122
|
+
succ[a].append(b)
|
|
123
|
+
# Topological order (roots first)
|
|
124
|
+
in_degree = {tid: len(pred[tid]) for tid in task_ids}
|
|
125
|
+
order = []
|
|
126
|
+
stack = [tid for tid in task_ids if in_degree[tid] == 0]
|
|
127
|
+
while stack:
|
|
128
|
+
n = stack.pop()
|
|
129
|
+
order.append(n)
|
|
130
|
+
for s in succ[n]:
|
|
131
|
+
in_degree[s] -= 1
|
|
132
|
+
if in_degree[s] == 0:
|
|
133
|
+
stack.append(s)
|
|
134
|
+
# Longest path from any root to each node (earliest finish)
|
|
135
|
+
dist = {tid: duration_by_task.get(tid, 0.0) for tid in task_ids}
|
|
136
|
+
for n in order:
|
|
137
|
+
for p in pred[n]:
|
|
138
|
+
dist[n] = max(dist[n], dist[p] + duration_by_task.get(n, 0.0))
|
|
139
|
+
# Longest path value and a leaf on that path
|
|
140
|
+
max_dist = max(dist.values()) if dist else 0.0
|
|
141
|
+
leaf = next((tid for tid in order if dist[tid] == max_dist), None)
|
|
142
|
+
if not leaf:
|
|
143
|
+
return list(order), None
|
|
144
|
+
# Backtrack to get critical path (path from root to leaf with max total duration)
|
|
145
|
+
path = []
|
|
146
|
+
cur = leaf
|
|
147
|
+
while cur is not None:
|
|
148
|
+
path.append(cur)
|
|
149
|
+
best_pred = None
|
|
150
|
+
best_val = -1.0
|
|
151
|
+
for p in pred[cur]:
|
|
152
|
+
v = dist.get(p, 0.0)
|
|
153
|
+
if v >= best_val:
|
|
154
|
+
best_val = v
|
|
155
|
+
best_pred = p
|
|
156
|
+
cur = best_pred
|
|
157
|
+
path.reverse()
|
|
158
|
+
# Bottleneck: task on critical path with highest single-task duration
|
|
159
|
+
bottleneck = None
|
|
160
|
+
bottleneck_dur = -1.0
|
|
161
|
+
for tid in path:
|
|
162
|
+
d = duration_by_task.get(tid, 0.0)
|
|
163
|
+
if d > bottleneck_dur:
|
|
164
|
+
bottleneck_dur = d
|
|
165
|
+
bottleneck = tid
|
|
166
|
+
return path, bottleneck
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def build_report_from_events(run_id: str, events_dir: str | Path) -> RunReport:
|
|
170
|
+
"""
|
|
171
|
+
Load events from {events_dir}/{run_id}.jsonl (or matching .jsonl), reconstruct
|
|
172
|
+
timeline, compute critical path and bottleneck, peak parallelism, and cost estimate.
|
|
173
|
+
"""
|
|
174
|
+
events_dir = Path(events_dir)
|
|
175
|
+
log_path = _find_log_path(events_dir, run_id)
|
|
176
|
+
if not log_path:
|
|
177
|
+
raise FileNotFoundError(f"No event log found for run_id: {run_id!r} (looked in {events_dir})")
|
|
178
|
+
evs = _load_events(log_path)
|
|
179
|
+
if not evs:
|
|
180
|
+
raise ValueError(f"Empty event log for run_id: {run_id!r}")
|
|
181
|
+
|
|
182
|
+
evs.sort(key=lambda e: e.timestamp)
|
|
183
|
+
nodes, edges = _load_dag(events_dir, run_id)
|
|
184
|
+
node_by_id = {n["id"]: n for n in nodes}
|
|
185
|
+
# Timeline: TASK_STARTED -> TASK_COMPLETED / TASK_FAILED
|
|
186
|
+
task_started: dict[str, float] = {}
|
|
187
|
+
task_ended: dict[str, float] = {}
|
|
188
|
+
task_status: dict[str, str] = {}
|
|
189
|
+
task_error: dict[str, str] = {}
|
|
190
|
+
tools_by_task: dict[str, list[str]] = {}
|
|
191
|
+
tool_failures_by_task: dict[str, list[str]] = {}
|
|
192
|
+
task_tier: dict[str, str] = {} # v1.6: task_id -> simple|medium|complex
|
|
193
|
+
root_task = ""
|
|
194
|
+
started_ts: str | None = None
|
|
195
|
+
finished_ts: str | None = None
|
|
196
|
+
strategy = ""
|
|
197
|
+
# v1.7
|
|
198
|
+
tasks_critiqued = 0
|
|
199
|
+
tasks_retried_by_critic = 0
|
|
200
|
+
critique_scores: list[float] = []
|
|
201
|
+
prefetch_hits = 0
|
|
202
|
+
prefetch_misses = 0
|
|
203
|
+
|
|
204
|
+
for e in evs:
|
|
205
|
+
typ = e.type.value if hasattr(e.type, "value") else str(e.type)
|
|
206
|
+
payload = e.payload or {}
|
|
207
|
+
task_id = (payload.get("task_id") or "").strip()
|
|
208
|
+
ts = e.timestamp
|
|
209
|
+
ts_str = ts.isoformat() if hasattr(ts, "isoformat") else str(ts)
|
|
210
|
+
|
|
211
|
+
if typ == "swarm_started":
|
|
212
|
+
root_task = (payload.get("user_task") or "")[:500]
|
|
213
|
+
if not started_ts:
|
|
214
|
+
started_ts = ts_str
|
|
215
|
+
elif typ == "task_started" and task_id:
|
|
216
|
+
task_started[task_id] = ts.timestamp() if hasattr(ts, "timestamp") else 0.0
|
|
217
|
+
elif typ == "task_completed" and task_id:
|
|
218
|
+
task_ended[task_id] = ts.timestamp() if hasattr(ts, "timestamp") else 0.0
|
|
219
|
+
task_status[task_id] = "completed"
|
|
220
|
+
elif typ == "task_failed" and task_id:
|
|
221
|
+
task_ended[task_id] = ts.timestamp() if hasattr(ts, "timestamp") else 0.0
|
|
222
|
+
task_status[task_id] = "failed"
|
|
223
|
+
task_error[task_id] = payload.get("error") or "Unknown error"
|
|
224
|
+
elif typ == "tool_called" and task_id:
|
|
225
|
+
tool = payload.get("tool") or "unknown"
|
|
226
|
+
tools_by_task.setdefault(task_id, []).append(tool)
|
|
227
|
+
elif typ == "task_model_selected" and task_id:
|
|
228
|
+
task_tier[task_id] = payload.get("tier") or "medium"
|
|
229
|
+
elif typ == "task_critiqued" and task_id:
|
|
230
|
+
tasks_critiqued += 1
|
|
231
|
+
score = payload.get("score")
|
|
232
|
+
if score is not None:
|
|
233
|
+
critique_scores.append(float(score))
|
|
234
|
+
if payload.get("retry_requested"):
|
|
235
|
+
tasks_retried_by_critic += 1
|
|
236
|
+
elif typ == "prefetch_hit":
|
|
237
|
+
prefetch_hits += 1
|
|
238
|
+
elif typ == "prefetch_miss":
|
|
239
|
+
prefetch_misses += 1
|
|
240
|
+
elif typ == "swarm_finished":
|
|
241
|
+
finished_ts = ts_str
|
|
242
|
+
|
|
243
|
+
if not started_ts:
|
|
244
|
+
started_ts = evs[0].timestamp.isoformat() if evs else ""
|
|
245
|
+
if not finished_ts:
|
|
246
|
+
finished_ts = evs[-1].timestamp.isoformat() if evs else ""
|
|
247
|
+
|
|
248
|
+
# All task ids from DAG + any from events not in DAG
|
|
249
|
+
all_task_ids = set(node_by_id.keys()) | set(task_started.keys()) | set(task_ended.keys())
|
|
250
|
+
duration_by_task: dict[str, float] = {}
|
|
251
|
+
for tid in all_task_ids:
|
|
252
|
+
start = task_started.get(tid)
|
|
253
|
+
end = task_ended.get(tid)
|
|
254
|
+
if start is not None and end is not None:
|
|
255
|
+
duration_by_task[tid] = max(0.0, end - start)
|
|
256
|
+
else:
|
|
257
|
+
duration_by_task[tid] = 0.0
|
|
258
|
+
|
|
259
|
+
critical_path, bottleneck_task_id = _critical_path_and_bottleneck(
|
|
260
|
+
list(all_task_ids), edges, duration_by_task
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
# Peak parallelism: max overlapping RUNNING windows
|
|
264
|
+
intervals: list[tuple[float, float]] = []
|
|
265
|
+
for tid in all_task_ids:
|
|
266
|
+
s = task_started.get(tid)
|
|
267
|
+
e = task_ended.get(tid)
|
|
268
|
+
if s is not None and e is not None:
|
|
269
|
+
intervals.append((s, e))
|
|
270
|
+
peak_parallelism = 0
|
|
271
|
+
if intervals:
|
|
272
|
+
points = []
|
|
273
|
+
for a, b in intervals:
|
|
274
|
+
points.append((a, 1))
|
|
275
|
+
points.append((b, -1))
|
|
276
|
+
points.sort(key=lambda x: (x[0], -x[1]))
|
|
277
|
+
cur = 0
|
|
278
|
+
for _t, delta in points:
|
|
279
|
+
cur += delta
|
|
280
|
+
peak_parallelism = max(peak_parallelism, cur)
|
|
281
|
+
|
|
282
|
+
# Build TaskSummary list (order: by first appearance in events / DAG)
|
|
283
|
+
seen = set()
|
|
284
|
+
task_order = []
|
|
285
|
+
for e in evs:
|
|
286
|
+
p = e.payload or {}
|
|
287
|
+
tid = (p.get("task_id") or "").strip()
|
|
288
|
+
if tid and tid not in seen and (tid in node_by_id or tid in task_status):
|
|
289
|
+
seen.add(tid)
|
|
290
|
+
task_order.append(tid)
|
|
291
|
+
for nid in node_by_id:
|
|
292
|
+
if nid not in seen:
|
|
293
|
+
task_order.append(nid)
|
|
294
|
+
seen.add(nid)
|
|
295
|
+
|
|
296
|
+
status_enum = {
|
|
297
|
+
"completed": TaskStatus.COMPLETED,
|
|
298
|
+
"failed": TaskStatus.FAILED,
|
|
299
|
+
"running": TaskStatus.RUNNING,
|
|
300
|
+
"pending": TaskStatus.PENDING,
|
|
301
|
+
}
|
|
302
|
+
tasks_list: list[TaskSummary] = []
|
|
303
|
+
for tid in task_order:
|
|
304
|
+
desc = (node_by_id.get(tid) or {}).get("description") or tid
|
|
305
|
+
st = task_status.get(tid, "pending")
|
|
306
|
+
if tid not in task_started and tid not in task_ended and tid in node_by_id:
|
|
307
|
+
st = "skipped"
|
|
308
|
+
status = status_enum.get(st, TaskStatus.PENDING)
|
|
309
|
+
dur = duration_by_task.get(tid, 0.0)
|
|
310
|
+
tools = list(tools_by_task.get(tid, []))
|
|
311
|
+
tool_failures = list(tool_failures_by_task.get(tid, []))
|
|
312
|
+
tasks_list.append(
|
|
313
|
+
TaskSummary(
|
|
314
|
+
task_id=tid,
|
|
315
|
+
description=desc,
|
|
316
|
+
role=None,
|
|
317
|
+
status=status,
|
|
318
|
+
duration_seconds=dur,
|
|
319
|
+
tools_used=tools,
|
|
320
|
+
tool_failures=tool_failures,
|
|
321
|
+
tokens_used=None,
|
|
322
|
+
retry_count=0,
|
|
323
|
+
error=task_error.get(tid),
|
|
324
|
+
)
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
completed_tasks = sum(1 for t in tasks_list if t.status == TaskStatus.COMPLETED)
|
|
328
|
+
failed_tasks = sum(1 for t in tasks_list if t.status == TaskStatus.FAILED)
|
|
329
|
+
total_tasks = len(tasks_list)
|
|
330
|
+
skipped_tasks = max(0, total_tasks - completed_tasks - failed_tasks)
|
|
331
|
+
|
|
332
|
+
tools_called = sum(len(t.tools_used) for t in tasks_list)
|
|
333
|
+
tool_fail_count = sum(len(t.tool_failures) for t in tasks_list)
|
|
334
|
+
tool_success_rate = (
|
|
335
|
+
(100.0 * (tools_called - tool_fail_count) / tools_called)
|
|
336
|
+
if tools_called else 100.0
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
total_duration_seconds = 0.0
|
|
340
|
+
if evs:
|
|
341
|
+
t0 = evs[0].timestamp
|
|
342
|
+
t1 = evs[-1].timestamp
|
|
343
|
+
total_duration_seconds = (t1 - t0).total_seconds() if hasattr(t0, "__sub__") else 0.0
|
|
344
|
+
|
|
345
|
+
# Cost estimate
|
|
346
|
+
from devsper.intelligence.analysis.cost_estimator import CostEstimator
|
|
347
|
+
estimated_cost_usd = CostEstimator.estimate(tasks_list, [])
|
|
348
|
+
|
|
349
|
+
# v1.6: model tier breakdown
|
|
350
|
+
model_tier_breakdown: dict[str, int] = {}
|
|
351
|
+
for tid in task_tier:
|
|
352
|
+
t = task_tier[tid]
|
|
353
|
+
model_tier_breakdown[t] = model_tier_breakdown.get(t, 0) + 1
|
|
354
|
+
|
|
355
|
+
# v1.6: theoretical sequential duration and parallelism efficiency
|
|
356
|
+
theoretical_sequential_duration = sum(duration_by_task.values())
|
|
357
|
+
actual_duration = total_duration_seconds
|
|
358
|
+
parallelism_efficiency = (
|
|
359
|
+
(actual_duration / theoretical_sequential_duration)
|
|
360
|
+
if theoretical_sequential_duration and theoretical_sequential_duration > 0
|
|
361
|
+
else None
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
# v1.6: estimated cost without routing (placeholder; full impl would need per-task model)
|
|
365
|
+
estimated_cost_without_routing = None
|
|
366
|
+
|
|
367
|
+
return RunReport(
|
|
368
|
+
run_id=run_id,
|
|
369
|
+
root_task=root_task or "unknown",
|
|
370
|
+
strategy=strategy or "unknown",
|
|
371
|
+
started_at=started_ts or "",
|
|
372
|
+
finished_at=finished_ts or "",
|
|
373
|
+
total_duration_seconds=max(0.0, total_duration_seconds),
|
|
374
|
+
total_tasks=total_tasks,
|
|
375
|
+
completed_tasks=completed_tasks,
|
|
376
|
+
failed_tasks=failed_tasks,
|
|
377
|
+
skipped_tasks=skipped_tasks,
|
|
378
|
+
tasks=tasks_list,
|
|
379
|
+
critical_path=critical_path,
|
|
380
|
+
bottleneck_task_id=bottleneck_task_id,
|
|
381
|
+
tools_called=tools_called,
|
|
382
|
+
tool_success_rate=tool_success_rate,
|
|
383
|
+
estimated_cost_usd=estimated_cost_usd,
|
|
384
|
+
models_used=[],
|
|
385
|
+
peak_parallelism=peak_parallelism,
|
|
386
|
+
plain_english_analysis=None,
|
|
387
|
+
model_tier_breakdown=model_tier_breakdown or None,
|
|
388
|
+
estimated_cost_without_routing=estimated_cost_without_routing,
|
|
389
|
+
theoretical_sequential_duration=theoretical_sequential_duration if theoretical_sequential_duration else None,
|
|
390
|
+
actual_duration=actual_duration,
|
|
391
|
+
parallelism_efficiency=parallelism_efficiency,
|
|
392
|
+
tasks_critiqued=tasks_critiqued,
|
|
393
|
+
tasks_retried_by_critic=tasks_retried_by_critic,
|
|
394
|
+
avg_critique_score=(
|
|
395
|
+
sum(critique_scores) / len(critique_scores) if critique_scores else None
|
|
396
|
+
),
|
|
397
|
+
prefetch_hit_rate=(
|
|
398
|
+
prefetch_hits / (prefetch_hits + prefetch_misses)
|
|
399
|
+
if (prefetch_hits + prefetch_misses) > 0
|
|
400
|
+
else None
|
|
401
|
+
),
|
|
402
|
+
)
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Learning engine: analyze previous swarm runs (telemetry + memory) to improve planning.
|
|
3
|
+
|
|
4
|
+
Detects: which tasks fail, slow tools, patterns. Can adjust planner prompts or suggest optimizations.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
from devsper.runtime.telemetry import collect_telemetry
|
|
11
|
+
from devsper.memory.memory_store import get_default_store
|
|
12
|
+
from devsper.memory.memory_types import MemoryType
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class LearningEngine:
|
|
16
|
+
"""
|
|
17
|
+
Analyze past runs via telemetry and memory to suggest improvements:
|
|
18
|
+
failing tasks, slow tools, planner prompt adjustments.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, events_folder: str = ".devsper/events", memory_store=None) -> None:
|
|
22
|
+
self.events_folder = events_folder
|
|
23
|
+
self.memory_store = memory_store or get_default_store()
|
|
24
|
+
|
|
25
|
+
def analyze_telemetry(self, log_path: str | None = None) -> dict:
|
|
26
|
+
"""
|
|
27
|
+
Collect telemetry from an event log. If log_path is None, use latest from events_folder.
|
|
28
|
+
Returns dict with tasks_completed, tasks_failed, avg_task_duration_seconds, etc.
|
|
29
|
+
"""
|
|
30
|
+
if log_path is None:
|
|
31
|
+
log_path = self._latest_log_path()
|
|
32
|
+
if not log_path or not os.path.exists(log_path):
|
|
33
|
+
return {
|
|
34
|
+
"tasks_completed": 0,
|
|
35
|
+
"tasks_failed": 0,
|
|
36
|
+
"avg_task_duration_seconds": 0.0,
|
|
37
|
+
"avg_agent_latency_seconds": 0.0,
|
|
38
|
+
"max_concurrency": 0,
|
|
39
|
+
"task_success_rate": 0.0,
|
|
40
|
+
}
|
|
41
|
+
return collect_telemetry(log_path)
|
|
42
|
+
|
|
43
|
+
def _latest_log_path(self) -> str | None:
|
|
44
|
+
"""Return path to most recent events jsonl in events_folder."""
|
|
45
|
+
if not os.path.isdir(self.events_folder):
|
|
46
|
+
return None
|
|
47
|
+
files = list(Path(self.events_folder).glob("events_*.jsonl"))
|
|
48
|
+
if not files:
|
|
49
|
+
return None
|
|
50
|
+
files.sort(key=lambda p: p.stat().st_mtime, reverse=True)
|
|
51
|
+
return str(files[0])
|
|
52
|
+
|
|
53
|
+
def get_failure_patterns(self, log_path: str | None = None) -> list[str]:
|
|
54
|
+
"""
|
|
55
|
+
Return list of suggested failure patterns (e.g. "high failure rate", "slow tasks").
|
|
56
|
+
"""
|
|
57
|
+
tele = self.analyze_telemetry(log_path)
|
|
58
|
+
patterns = []
|
|
59
|
+
if tele.get("tasks_failed", 0) > 0 and tele.get("task_success_rate", 1) < 0.8:
|
|
60
|
+
patterns.append("high_failure_rate")
|
|
61
|
+
if tele.get("avg_task_duration_seconds", 0) > 60:
|
|
62
|
+
patterns.append("slow_tasks")
|
|
63
|
+
if tele.get("avg_agent_latency_seconds", 0) > 30:
|
|
64
|
+
patterns.append("slow_agent_latency")
|
|
65
|
+
return patterns
|
|
66
|
+
|
|
67
|
+
def get_planner_suggestions(self, log_path: str | None = None) -> str:
|
|
68
|
+
"""
|
|
69
|
+
Return a string of suggestions to add to planner context (e.g. "Avoid very long tasks").
|
|
70
|
+
"""
|
|
71
|
+
patterns = self.get_failure_patterns(log_path)
|
|
72
|
+
if not patterns:
|
|
73
|
+
return ""
|
|
74
|
+
suggestions = []
|
|
75
|
+
if "high_failure_rate" in patterns:
|
|
76
|
+
suggestions.append("Consider breaking tasks into smaller steps to reduce failures.")
|
|
77
|
+
if "slow_tasks" in patterns:
|
|
78
|
+
suggestions.append("Previous runs had long-running tasks; consider shorter subtasks.")
|
|
79
|
+
if "slow_agent_latency" in patterns:
|
|
80
|
+
suggestions.append("Agent latency was high; consider fewer tool-heavy steps.")
|
|
81
|
+
return " ".join(suggestions)
|
|
82
|
+
|
|
83
|
+
def summarize_memory_for_learning(self, limit: int = 50) -> dict[str, int]:
|
|
84
|
+
"""
|
|
85
|
+
Summarize stored memory by type (for learning dashboard).
|
|
86
|
+
"""
|
|
87
|
+
records = self.memory_store.list_memory(limit=limit)
|
|
88
|
+
by_type: dict[str, int] = {}
|
|
89
|
+
for r in records:
|
|
90
|
+
k = r.memory_type.value
|
|
91
|
+
by_type[k] = by_type.get(k, 0) + 1
|
|
92
|
+
return by_type
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""Strategy-based planning: map ExecutionStrategy to DAG-producing strategies."""
|
|
2
|
+
|
|
3
|
+
from devsper.intelligence.strategy_selector import ExecutionStrategy
|
|
4
|
+
from devsper.intelligence.strategies.base import Strategy
|
|
5
|
+
from devsper.intelligence.strategies.code_analysis_strategy import CodeAnalysisStrategy
|
|
6
|
+
from devsper.intelligence.strategies.data_science_strategy import DataScienceStrategy
|
|
7
|
+
from devsper.intelligence.strategies.document_pipeline_strategy import DocumentPipelineStrategy
|
|
8
|
+
from devsper.intelligence.strategies.experiment_strategy import ExperimentStrategy
|
|
9
|
+
from devsper.intelligence.strategies.research_strategy import ResearchStrategy
|
|
10
|
+
|
|
11
|
+
STRATEGY_REGISTRY: dict[ExecutionStrategy, type[Strategy]] = {
|
|
12
|
+
ExecutionStrategy.RESEARCH: ResearchStrategy,
|
|
13
|
+
ExecutionStrategy.CODE_ANALYSIS: CodeAnalysisStrategy,
|
|
14
|
+
ExecutionStrategy.DATA_ANALYSIS: DataScienceStrategy,
|
|
15
|
+
ExecutionStrategy.DOCUMENT: DocumentPipelineStrategy,
|
|
16
|
+
ExecutionStrategy.EXPERIMENT: ExperimentStrategy,
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def get_strategy_for(strategy_enum: ExecutionStrategy) -> Strategy | None:
|
|
21
|
+
"""Return a strategy instance for the given enum, or None for GENERAL."""
|
|
22
|
+
cls = STRATEGY_REGISTRY.get(strategy_enum)
|
|
23
|
+
return cls() if cls else None
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"""Base strategy: produce a DAG of tasks from a root task."""
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
|
|
5
|
+
from devsper.types.task import Task
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Strategy(ABC):
|
|
9
|
+
"""Strategy produces a list of tasks with dependencies (DAG) for the scheduler."""
|
|
10
|
+
|
|
11
|
+
@abstractmethod
|
|
12
|
+
def plan(self, root_task: Task) -> list[Task]:
|
|
13
|
+
"""Return subtasks with dependencies. Empty list means fall back to LLM planner."""
|
|
14
|
+
...
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Code analysis strategy: DAG for repository analysis."""
|
|
2
|
+
|
|
3
|
+
import secrets
|
|
4
|
+
|
|
5
|
+
from devsper.types.task import Task
|
|
6
|
+
|
|
7
|
+
from devsper.intelligence.strategies.base import Strategy
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _short_id() -> str:
|
|
11
|
+
return secrets.token_hex(4)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class CodeAnalysisStrategy(Strategy):
|
|
15
|
+
"""Code/repo pipeline: index -> structure -> dependencies -> report."""
|
|
16
|
+
|
|
17
|
+
def plan(self, root_task: Task) -> list[Task]:
|
|
18
|
+
steps = [
|
|
19
|
+
("index", "Index the codebase and identify main modules and entry points."),
|
|
20
|
+
("structure", "Analyze project structure and layout."),
|
|
21
|
+
("dependencies", "Map dependencies and external imports."),
|
|
22
|
+
("report", "Produce an architecture and code analysis report."),
|
|
23
|
+
]
|
|
24
|
+
task_ids: list[str] = []
|
|
25
|
+
tasks: list[Task] = []
|
|
26
|
+
for i, (_, desc) in enumerate(steps):
|
|
27
|
+
tid = _short_id()
|
|
28
|
+
task_ids.append(tid)
|
|
29
|
+
deps = [task_ids[i - 1]] if i > 0 else []
|
|
30
|
+
description = f"{root_task.description}\n\nStep: {desc}" if i == 0 else desc
|
|
31
|
+
t = Task(id=tid, description=description, dependencies=deps)
|
|
32
|
+
tasks.append(t)
|
|
33
|
+
return tasks
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Data science strategy: DAG for dataset workflow."""
|
|
2
|
+
|
|
3
|
+
import secrets
|
|
4
|
+
|
|
5
|
+
from devsper.types.task import Task
|
|
6
|
+
|
|
7
|
+
from devsper.intelligence.strategies.base import Strategy
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _short_id() -> str:
|
|
11
|
+
return secrets.token_hex(4)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class DataScienceStrategy(Strategy):
|
|
15
|
+
"""Data science pipeline: load -> profile -> analyze -> visualize."""
|
|
16
|
+
|
|
17
|
+
def plan(self, root_task: Task) -> list[Task]:
|
|
18
|
+
steps = [
|
|
19
|
+
("load", "Load and validate the dataset."),
|
|
20
|
+
("profile", "Generate data profile and basic statistics."),
|
|
21
|
+
("analyze", "Run analysis and identify patterns or models."),
|
|
22
|
+
("visualize", "Create visualizations and summary reports."),
|
|
23
|
+
]
|
|
24
|
+
task_ids: list[str] = []
|
|
25
|
+
tasks: list[Task] = []
|
|
26
|
+
for i, (_, desc) in enumerate(steps):
|
|
27
|
+
tid = _short_id()
|
|
28
|
+
task_ids.append(tid)
|
|
29
|
+
deps = [task_ids[i - 1]] if i > 0 else []
|
|
30
|
+
description = f"{root_task.description}\n\nStep: {desc}" if i == 0 else desc
|
|
31
|
+
t = Task(id=tid, description=description, dependencies=deps)
|
|
32
|
+
tasks.append(t)
|
|
33
|
+
return tasks
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Document pipeline strategy: DAG for document processing."""
|
|
2
|
+
|
|
3
|
+
import secrets
|
|
4
|
+
|
|
5
|
+
from devsper.types.task import Task
|
|
6
|
+
|
|
7
|
+
from devsper.intelligence.strategies.base import Strategy
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _short_id() -> str:
|
|
11
|
+
return secrets.token_hex(4)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class DocumentPipelineStrategy(Strategy):
|
|
15
|
+
"""Document pipeline: ingest -> extract -> link -> report."""
|
|
16
|
+
|
|
17
|
+
def plan(self, root_task: Task) -> list[Task]:
|
|
18
|
+
steps = [
|
|
19
|
+
("ingest", "Ingest documents and extract raw text/metadata."),
|
|
20
|
+
("extract", "Extract entities, topics, and structure from documents."),
|
|
21
|
+
("link", "Link entities and build cross-document references."),
|
|
22
|
+
("report", "Produce a document intelligence report."),
|
|
23
|
+
]
|
|
24
|
+
task_ids: list[str] = []
|
|
25
|
+
tasks: list[Task] = []
|
|
26
|
+
for i, (_, desc) in enumerate(steps):
|
|
27
|
+
tid = _short_id()
|
|
28
|
+
task_ids.append(tid)
|
|
29
|
+
deps = [task_ids[i - 1]] if i > 0 else []
|
|
30
|
+
description = f"{root_task.description}\n\nStep: {desc}" if i == 0 else desc
|
|
31
|
+
t = Task(id=tid, description=description, dependencies=deps)
|
|
32
|
+
tasks.append(t)
|
|
33
|
+
return tasks
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Experiment strategy: DAG for experiments (setup -> run -> compare -> report)."""
|
|
2
|
+
|
|
3
|
+
import secrets
|
|
4
|
+
|
|
5
|
+
from devsper.types.task import Task
|
|
6
|
+
|
|
7
|
+
from devsper.intelligence.strategies.base import Strategy
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _short_id() -> str:
|
|
11
|
+
return secrets.token_hex(4)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ExperimentStrategy(Strategy):
|
|
15
|
+
"""Experiment pipeline: setup -> run -> compare -> report."""
|
|
16
|
+
|
|
17
|
+
def plan(self, root_task: Task) -> list[Task]:
|
|
18
|
+
steps = [
|
|
19
|
+
("setup", "Set up experiment parameters and environment."),
|
|
20
|
+
("run", "Run the experiment or parameter sweep."),
|
|
21
|
+
("compare", "Compare results and run statistical checks."),
|
|
22
|
+
("report", "Generate experiment report and recommendations."),
|
|
23
|
+
]
|
|
24
|
+
task_ids: list[str] = []
|
|
25
|
+
tasks: list[Task] = []
|
|
26
|
+
for i, (_, desc) in enumerate(steps):
|
|
27
|
+
tid = _short_id()
|
|
28
|
+
task_ids.append(tid)
|
|
29
|
+
deps = [task_ids[i - 1]] if i > 0 else []
|
|
30
|
+
description = f"{root_task.description}\n\nStep: {desc}" if i == 0 else desc
|
|
31
|
+
t = Task(id=tid, description=description, dependencies=deps)
|
|
32
|
+
tasks.append(t)
|
|
33
|
+
return tasks
|