devsper 2.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devsper/__init__.py +14 -0
- devsper/agents/a2a/__init__.py +27 -0
- devsper/agents/a2a/client.py +126 -0
- devsper/agents/a2a/discovery.py +24 -0
- devsper/agents/a2a/server.py +128 -0
- devsper/agents/a2a/tool_adapter.py +68 -0
- devsper/agents/a2a/types.py +49 -0
- devsper/agents/agent.py +602 -0
- devsper/agents/critic.py +80 -0
- devsper/agents/message_bus.py +124 -0
- devsper/agents/roles.py +181 -0
- devsper/agents/run_agent.py +78 -0
- devsper/analytics/__init__.py +5 -0
- devsper/analytics/tool_analytics.py +78 -0
- devsper/audit/__init__.py +5 -0
- devsper/audit/logger.py +214 -0
- devsper/bus/__init__.py +29 -0
- devsper/bus/backends/__init__.py +5 -0
- devsper/bus/backends/base.py +38 -0
- devsper/bus/backends/memory.py +55 -0
- devsper/bus/backends/redis.py +146 -0
- devsper/bus/message.py +56 -0
- devsper/bus/schema_version.py +3 -0
- devsper/bus/topics.py +19 -0
- devsper/cache/__init__.py +6 -0
- devsper/cache/embedding_index.py +98 -0
- devsper/cache/hashing.py +24 -0
- devsper/cache/store.py +153 -0
- devsper/cache/task_cache.py +191 -0
- devsper/cli/__init__.py +6 -0
- devsper/cli/commands/reg.py +733 -0
- devsper/cli/github_oauth.py +157 -0
- devsper/cli/init.py +637 -0
- devsper/cli/main.py +2956 -0
- devsper/cli/run_progress.py +103 -0
- devsper/cli/ui/__init__.py +65 -0
- devsper/cli/ui/components.py +94 -0
- devsper/cli/ui/errors.py +104 -0
- devsper/cli/ui/logging.py +120 -0
- devsper/cli/ui/onboarding.py +102 -0
- devsper/cli/ui/progress.py +43 -0
- devsper/cli/ui/run_view.py +308 -0
- devsper/cli/ui/theme.py +40 -0
- devsper/cluster/__init__.py +29 -0
- devsper/cluster/election.py +84 -0
- devsper/cluster/local.py +97 -0
- devsper/cluster/node_info.py +77 -0
- devsper/cluster/registry.py +71 -0
- devsper/cluster/router.py +117 -0
- devsper/cluster/state_backend.py +105 -0
- devsper/compliance/__init__.py +5 -0
- devsper/compliance/pii.py +147 -0
- devsper/config/__init__.py +52 -0
- devsper/config/config_loader.py +121 -0
- devsper/config/defaults.py +77 -0
- devsper/config/resolver.py +342 -0
- devsper/config/schema.py +237 -0
- devsper/credentials/__init__.py +19 -0
- devsper/credentials/cli.py +197 -0
- devsper/credentials/migration.py +124 -0
- devsper/credentials/store.py +142 -0
- devsper/dashboard/__init__.py +9 -0
- devsper/dashboard/dashboard.py +87 -0
- devsper/dev/__init__.py +25 -0
- devsper/dev/builder.py +195 -0
- devsper/dev/debugger.py +95 -0
- devsper/dev/repo_index.py +138 -0
- devsper/dev/sandbox.py +203 -0
- devsper/dev/scaffold.py +122 -0
- devsper/embeddings/__init__.py +5 -0
- devsper/embeddings/service.py +36 -0
- devsper/explainability/__init__.py +14 -0
- devsper/explainability/decision_tree.py +104 -0
- devsper/explainability/rationale.py +38 -0
- devsper/explainability/simulation.py +56 -0
- devsper/hitl/__init__.py +13 -0
- devsper/hitl/approval.py +160 -0
- devsper/hitl/escalation.py +95 -0
- devsper/intelligence/__init__.py +9 -0
- devsper/intelligence/adaptation.py +88 -0
- devsper/intelligence/analysis/__init__.py +19 -0
- devsper/intelligence/analysis/analyzer.py +71 -0
- devsper/intelligence/analysis/cost_estimator.py +66 -0
- devsper/intelligence/analysis/formatter.py +103 -0
- devsper/intelligence/analysis/run_report.py +402 -0
- devsper/intelligence/learning_engine.py +92 -0
- devsper/intelligence/strategies/__init__.py +23 -0
- devsper/intelligence/strategies/base.py +14 -0
- devsper/intelligence/strategies/code_analysis_strategy.py +33 -0
- devsper/intelligence/strategies/data_science_strategy.py +33 -0
- devsper/intelligence/strategies/document_pipeline_strategy.py +33 -0
- devsper/intelligence/strategies/experiment_strategy.py +33 -0
- devsper/intelligence/strategies/research_strategy.py +34 -0
- devsper/intelligence/strategy_selector.py +84 -0
- devsper/intelligence/synthesis.py +132 -0
- devsper/intelligence/task_optimizer.py +92 -0
- devsper/knowledge/__init__.py +5 -0
- devsper/knowledge/extractor.py +204 -0
- devsper/knowledge/knowledge_graph.py +184 -0
- devsper/knowledge/query.py +285 -0
- devsper/memory/__init__.py +35 -0
- devsper/memory/consolidation.py +138 -0
- devsper/memory/embeddings.py +60 -0
- devsper/memory/memory_index.py +97 -0
- devsper/memory/memory_router.py +62 -0
- devsper/memory/memory_store.py +221 -0
- devsper/memory/memory_types.py +54 -0
- devsper/memory/namespaces.py +45 -0
- devsper/memory/scoring.py +77 -0
- devsper/memory/summarizer.py +52 -0
- devsper/nodes/__init__.py +5 -0
- devsper/nodes/controller.py +449 -0
- devsper/nodes/rpc.py +127 -0
- devsper/nodes/single.py +161 -0
- devsper/nodes/worker.py +506 -0
- devsper/orchestration/__init__.py +19 -0
- devsper/orchestration/meta_planner.py +239 -0
- devsper/orchestration/priority_queue.py +61 -0
- devsper/plugins/__init__.py +19 -0
- devsper/plugins/marketplace/__init__.py +0 -0
- devsper/plugins/plugin_loader.py +70 -0
- devsper/plugins/plugin_registry.py +34 -0
- devsper/plugins/registry.py +83 -0
- devsper/protocols/__init__.py +6 -0
- devsper/providers/__init__.py +17 -0
- devsper/providers/anthropic.py +84 -0
- devsper/providers/base.py +75 -0
- devsper/providers/complexity_router.py +94 -0
- devsper/providers/gemini.py +36 -0
- devsper/providers/github.py +180 -0
- devsper/providers/model_router.py +40 -0
- devsper/providers/openai.py +105 -0
- devsper/providers/router/__init__.py +21 -0
- devsper/providers/router/backends/__init__.py +19 -0
- devsper/providers/router/backends/anthropic_backend.py +111 -0
- devsper/providers/router/backends/custom_backend.py +138 -0
- devsper/providers/router/backends/gemini_backend.py +89 -0
- devsper/providers/router/backends/github_backend.py +165 -0
- devsper/providers/router/backends/ollama_backend.py +104 -0
- devsper/providers/router/backends/openai_backend.py +142 -0
- devsper/providers/router/backends/vllm_backend.py +35 -0
- devsper/providers/router/base.py +60 -0
- devsper/providers/router/factory.py +92 -0
- devsper/providers/router/legacy.py +101 -0
- devsper/providers/router/router.py +135 -0
- devsper/reasoning/__init__.py +12 -0
- devsper/reasoning/graph.py +59 -0
- devsper/reasoning/nodes.py +20 -0
- devsper/reasoning/store.py +67 -0
- devsper/runtime/__init__.py +12 -0
- devsper/runtime/health.py +88 -0
- devsper/runtime/replay.py +53 -0
- devsper/runtime/replay_engine.py +142 -0
- devsper/runtime/run_history.py +204 -0
- devsper/runtime/telemetry.py +116 -0
- devsper/runtime/visualize.py +58 -0
- devsper/sandbox/__init__.py +13 -0
- devsper/sandbox/sandbox.py +161 -0
- devsper/swarm/checkpointer.py +65 -0
- devsper/swarm/executor.py +558 -0
- devsper/swarm/map_reduce.py +44 -0
- devsper/swarm/planner.py +197 -0
- devsper/swarm/prefetcher.py +91 -0
- devsper/swarm/scheduler.py +153 -0
- devsper/swarm/speculation.py +47 -0
- devsper/swarm/swarm.py +562 -0
- devsper/tools/__init__.py +33 -0
- devsper/tools/base.py +29 -0
- devsper/tools/code_intelligence/__init__.py +13 -0
- devsper/tools/code_intelligence/api_surface_extractor.py +73 -0
- devsper/tools/code_intelligence/architecture_analyzer.py +65 -0
- devsper/tools/code_intelligence/codebase_indexer.py +71 -0
- devsper/tools/code_intelligence/dependency_graph_builder.py +67 -0
- devsper/tools/code_intelligence/design_pattern_detector.py +62 -0
- devsper/tools/code_intelligence/large_function_detector.py +68 -0
- devsper/tools/code_intelligence/module_responsibility_mapper.py +56 -0
- devsper/tools/code_intelligence/parallel_codebase_analysis.py +44 -0
- devsper/tools/code_intelligence/refactor_candidate_detector.py +81 -0
- devsper/tools/code_intelligence/repository_semantic_index.py +61 -0
- devsper/tools/code_intelligence/test_coverage_estimator.py +62 -0
- devsper/tools/coding/__init__.py +12 -0
- devsper/tools/coding/analyze_code_complexity.py +48 -0
- devsper/tools/coding/dependency_analyzer.py +42 -0
- devsper/tools/coding/extract_functions.py +38 -0
- devsper/tools/coding/format_python.py +50 -0
- devsper/tools/coding/generate_docstrings.py +40 -0
- devsper/tools/coding/generate_unit_tests.py +42 -0
- devsper/tools/coding/lint_python.py +51 -0
- devsper/tools/coding/refactor_function.py +41 -0
- devsper/tools/coding/repo_structure_map.py +54 -0
- devsper/tools/coding/run_python.py +53 -0
- devsper/tools/data/__init__.py +12 -0
- devsper/tools/data/column_type_detection.py +64 -0
- devsper/tools/data/csv_summary.py +52 -0
- devsper/tools/data/dataframe_filter.py +51 -0
- devsper/tools/data/dataframe_groupby.py +47 -0
- devsper/tools/data/dataframe_stats.py +38 -0
- devsper/tools/data/dataset_sampling.py +55 -0
- devsper/tools/data/dataset_schema.py +45 -0
- devsper/tools/data/json_pretty_print.py +37 -0
- devsper/tools/data/json_query.py +46 -0
- devsper/tools/data/missing_value_report.py +47 -0
- devsper/tools/data_science/__init__.py +13 -0
- devsper/tools/data_science/correlation_heatmap.py +72 -0
- devsper/tools/data_science/dataset_bias_detector.py +49 -0
- devsper/tools/data_science/dataset_distribution_report.py +64 -0
- devsper/tools/data_science/dataset_drift_detector.py +64 -0
- devsper/tools/data_science/dataset_outlier_detector.py +65 -0
- devsper/tools/data_science/dataset_profile.py +76 -0
- devsper/tools/data_science/distributed_dataset_processor.py +54 -0
- devsper/tools/data_science/feature_engineering_suggestions.py +69 -0
- devsper/tools/data_science/feature_importance_estimator.py +82 -0
- devsper/tools/data_science/model_input_validator.py +59 -0
- devsper/tools/data_science/time_series_analyzer.py +57 -0
- devsper/tools/documents/__init__.py +11 -0
- devsper/tools/documents/_docproc.py +56 -0
- devsper/tools/documents/document_to_markdown.py +29 -0
- devsper/tools/documents/extract_document_images.py +39 -0
- devsper/tools/documents/extract_document_text.py +29 -0
- devsper/tools/documents/extract_equations.py +36 -0
- devsper/tools/documents/extract_tables.py +47 -0
- devsper/tools/documents/summarize_document.py +42 -0
- devsper/tools/documents/write_latex_document.py +133 -0
- devsper/tools/documents/write_markdown_document.py +89 -0
- devsper/tools/documents/write_word_document.py +149 -0
- devsper/tools/experiments/__init__.py +13 -0
- devsper/tools/experiments/bootstrap_estimator.py +54 -0
- devsper/tools/experiments/experiment_report_generator.py +50 -0
- devsper/tools/experiments/experiment_tracker.py +36 -0
- devsper/tools/experiments/grid_search_runner.py +50 -0
- devsper/tools/experiments/model_benchmark_runner.py +45 -0
- devsper/tools/experiments/monte_carlo_experiment.py +38 -0
- devsper/tools/experiments/parameter_sweep_runner.py +51 -0
- devsper/tools/experiments/result_comparator.py +58 -0
- devsper/tools/experiments/simulation_runner.py +43 -0
- devsper/tools/experiments/statistical_significance_test.py +56 -0
- devsper/tools/experiments/swarm_map_reduce.py +42 -0
- devsper/tools/filesystem/__init__.py +12 -0
- devsper/tools/filesystem/append_file.py +42 -0
- devsper/tools/filesystem/file_hash.py +40 -0
- devsper/tools/filesystem/file_line_count.py +36 -0
- devsper/tools/filesystem/file_metadata.py +38 -0
- devsper/tools/filesystem/file_preview.py +55 -0
- devsper/tools/filesystem/find_large_files.py +50 -0
- devsper/tools/filesystem/list_directory.py +39 -0
- devsper/tools/filesystem/read_file.py +35 -0
- devsper/tools/filesystem/search_files.py +60 -0
- devsper/tools/filesystem/write_file.py +41 -0
- devsper/tools/flagship/__init__.py +15 -0
- devsper/tools/flagship/distributed_document_analysis.py +77 -0
- devsper/tools/flagship/docproc_corpus_pipeline.py +91 -0
- devsper/tools/flagship/repository_semantic_map.py +99 -0
- devsper/tools/flagship/research_graph_builder.py +111 -0
- devsper/tools/flagship/swarm_experiment_runner.py +86 -0
- devsper/tools/knowledge/__init__.py +10 -0
- devsper/tools/knowledge/citation_graph_builder.py +69 -0
- devsper/tools/knowledge/concept_frequency_analyzer.py +74 -0
- devsper/tools/knowledge/corpus_builder.py +66 -0
- devsper/tools/knowledge/cross_document_entity_linker.py +71 -0
- devsper/tools/knowledge/document_corpus_summary.py +68 -0
- devsper/tools/knowledge/document_topic_extractor.py +58 -0
- devsper/tools/knowledge/knowledge_graph_extractor.py +58 -0
- devsper/tools/knowledge/timeline_extractor.py +59 -0
- devsper/tools/math/__init__.py +12 -0
- devsper/tools/math/calculate_expression.py +52 -0
- devsper/tools/math/correlation.py +44 -0
- devsper/tools/math/distribution_summary.py +39 -0
- devsper/tools/math/histogram.py +53 -0
- devsper/tools/math/linear_regression.py +47 -0
- devsper/tools/math/matrix_multiply.py +38 -0
- devsper/tools/math/mean_std.py +35 -0
- devsper/tools/math/monte_carlo_simulation.py +43 -0
- devsper/tools/math/polynomial_fit.py +40 -0
- devsper/tools/math/random_sample.py +36 -0
- devsper/tools/mcp/__init__.py +23 -0
- devsper/tools/mcp/adapter.py +53 -0
- devsper/tools/mcp/client.py +235 -0
- devsper/tools/mcp/discovery.py +53 -0
- devsper/tools/memory/__init__.py +16 -0
- devsper/tools/memory/delete_memory.py +25 -0
- devsper/tools/memory/list_memory.py +34 -0
- devsper/tools/memory/search_memory.py +36 -0
- devsper/tools/memory/store_memory.py +47 -0
- devsper/tools/memory/summarize_memory.py +41 -0
- devsper/tools/memory/tag_memory.py +47 -0
- devsper/tools/pipelines.py +92 -0
- devsper/tools/registry.py +39 -0
- devsper/tools/research/__init__.py +12 -0
- devsper/tools/research/arxiv_download.py +55 -0
- devsper/tools/research/arxiv_search.py +58 -0
- devsper/tools/research/citation_extractor.py +35 -0
- devsper/tools/research/duckduckgo_search.py +42 -0
- devsper/tools/research/paper_metadata_extractor.py +45 -0
- devsper/tools/research/paper_summarizer.py +41 -0
- devsper/tools/research/research_question_generator.py +39 -0
- devsper/tools/research/topic_cluster.py +46 -0
- devsper/tools/research/web_search.py +47 -0
- devsper/tools/research/wikipedia_lookup.py +50 -0
- devsper/tools/research_advanced/__init__.py +14 -0
- devsper/tools/research_advanced/citation_context_extractor.py +60 -0
- devsper/tools/research_advanced/literature_review_generator.py +79 -0
- devsper/tools/research_advanced/methodology_extractor.py +58 -0
- devsper/tools/research_advanced/paper_contribution_extractor.py +50 -0
- devsper/tools/research_advanced/paper_dataset_identifier.py +49 -0
- devsper/tools/research_advanced/paper_method_comparator.py +62 -0
- devsper/tools/research_advanced/paper_similarity_search.py +69 -0
- devsper/tools/research_advanced/paper_trend_analyzer.py +69 -0
- devsper/tools/research_advanced/parallel_document_analyzer.py +56 -0
- devsper/tools/research_advanced/research_gap_finder.py +71 -0
- devsper/tools/research_advanced/research_topic_mapper.py +69 -0
- devsper/tools/research_advanced/swarm_literature_review.py +58 -0
- devsper/tools/scoring/__init__.py +52 -0
- devsper/tools/scoring/report.py +44 -0
- devsper/tools/scoring/scorer.py +39 -0
- devsper/tools/scoring/selector.py +61 -0
- devsper/tools/scoring/store.py +267 -0
- devsper/tools/selector.py +130 -0
- devsper/tools/system/__init__.py +12 -0
- devsper/tools/system/cpu_usage.py +22 -0
- devsper/tools/system/disk_usage.py +35 -0
- devsper/tools/system/environment_variables.py +29 -0
- devsper/tools/system/memory_usage.py +23 -0
- devsper/tools/system/pip_install.py +44 -0
- devsper/tools/system/pip_search.py +29 -0
- devsper/tools/system/process_list.py +34 -0
- devsper/tools/system/python_package_list.py +40 -0
- devsper/tools/system/run_shell_command.py +51 -0
- devsper/tools/system/system_info.py +26 -0
- devsper/tools/tool_runner.py +122 -0
- devsper/tui/__init__.py +5 -0
- devsper/tui/activity_feed_view.py +73 -0
- devsper/tui/adaptive_tasks_view.py +75 -0
- devsper/tui/agent_role_view.py +35 -0
- devsper/tui/app.py +395 -0
- devsper/tui/dashboard_screen.py +290 -0
- devsper/tui/dev_view.py +99 -0
- devsper/tui/inject_screen.py +73 -0
- devsper/tui/knowledge_graph_view.py +46 -0
- devsper/tui/layout.py +43 -0
- devsper/tui/logs_view.py +83 -0
- devsper/tui/memory_view.py +58 -0
- devsper/tui/performance_view.py +33 -0
- devsper/tui/reasoning_graph_view.py +39 -0
- devsper/tui/results_view.py +139 -0
- devsper/tui/swarm_view.py +37 -0
- devsper/tui/task_detail_screen.py +55 -0
- devsper/tui/task_view.py +103 -0
- devsper/types/event.py +97 -0
- devsper/types/exceptions.py +21 -0
- devsper/types/swarm.py +41 -0
- devsper/types/task.py +80 -0
- devsper/upgrade/__init__.py +21 -0
- devsper/upgrade/changelog.py +124 -0
- devsper/upgrade/cli.py +145 -0
- devsper/upgrade/installer.py +103 -0
- devsper/upgrade/notifier.py +52 -0
- devsper/upgrade/version_check.py +121 -0
- devsper/utils/event_logger.py +88 -0
- devsper/utils/http.py +43 -0
- devsper/utils/models.py +54 -0
- devsper/visualization/__init__.py +5 -0
- devsper/visualization/dag_export.py +67 -0
- devsper/workflow/__init__.py +18 -0
- devsper/workflow/conditions.py +157 -0
- devsper/workflow/context.py +108 -0
- devsper/workflow/loader.py +156 -0
- devsper/workflow/resolver.py +109 -0
- devsper/workflow/runner.py +562 -0
- devsper/workflow/schema.py +63 -0
- devsper/workflow/validator.py +128 -0
- devsper-2.1.6.dist-info/METADATA +346 -0
- devsper-2.1.6.dist-info/RECORD +375 -0
- devsper-2.1.6.dist-info/WHEEL +4 -0
- devsper-2.1.6.dist-info/entry_points.txt +3 -0
- devsper-2.1.6.dist-info/licenses/LICENSE +639 -0
devsper/cli/main.py
ADDED
|
@@ -0,0 +1,2956 @@
|
|
|
1
|
+
"""
|
|
2
|
+
devsper CLI: run, tui, research, analyze, memory, init, doctor, build.
|
|
3
|
+
|
|
4
|
+
Usage:
|
|
5
|
+
devsper run "analyze diffusion models"
|
|
6
|
+
devsper build "fastapi todo app"
|
|
7
|
+
devsper init
|
|
8
|
+
devsper doctor
|
|
9
|
+
devsper tui
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import argparse
|
|
13
|
+
import json
|
|
14
|
+
import os
|
|
15
|
+
import subprocess
|
|
16
|
+
import sys
|
|
17
|
+
import threading
|
|
18
|
+
import time
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _load_project_dotenv() -> None:
|
|
23
|
+
"""Load .env from the project directory (where devsper.toml lives) so API keys are available."""
|
|
24
|
+
try:
|
|
25
|
+
from dotenv import load_dotenv
|
|
26
|
+
from devsper.config.config_loader import project_config_paths
|
|
27
|
+
|
|
28
|
+
for p in project_config_paths():
|
|
29
|
+
if p.is_file():
|
|
30
|
+
load_dotenv(p.parent / ".env")
|
|
31
|
+
break
|
|
32
|
+
except Exception:
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _project_root() -> Path:
|
|
37
|
+
"""Project root (examples/ parent) for running example scripts."""
|
|
38
|
+
return Path(__file__).resolve().parent.parent.parent
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _run_example(script_path: Path, *args: str) -> int:
|
|
42
|
+
"""Run an example script with project root on PYTHONPATH."""
|
|
43
|
+
root = _project_root()
|
|
44
|
+
env = os.environ.copy()
|
|
45
|
+
env["PYTHONPATH"] = str(root) + os.pathsep + env.get("PYTHONPATH", "")
|
|
46
|
+
cmd = [sys.executable, str(script_path)] + list(args)
|
|
47
|
+
return subprocess.run(cmd, cwd=str(root), env=env).returncode
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _run_swarm(args: object) -> int:
|
|
51
|
+
"""Run swarm. Uses live view unless --quiet, --plain, or non-TTY."""
|
|
52
|
+
from devsper.config import get_config
|
|
53
|
+
from devsper.utils.event_logger import EventLog
|
|
54
|
+
from devsper.swarm.swarm import Swarm
|
|
55
|
+
from devsper.memory.memory_router import MemoryRouter
|
|
56
|
+
from devsper.memory.memory_store import get_default_store
|
|
57
|
+
from devsper.memory.memory_index import MemoryIndex
|
|
58
|
+
|
|
59
|
+
task = getattr(args, "task", "Summarize swarm intelligence in one paragraph.")
|
|
60
|
+
quiet = getattr(args, "quiet", False)
|
|
61
|
+
summary_only = getattr(args, "summary", False)
|
|
62
|
+
json_output = getattr(args, "json_output", False)
|
|
63
|
+
plain = getattr(args, "plain", False) or not sys.stdout.isatty()
|
|
64
|
+
use_live_view = not quiet and not plain and sys.stdout.isatty()
|
|
65
|
+
|
|
66
|
+
cfg = get_config()
|
|
67
|
+
event_log = EventLog(events_folder_path=cfg.events_dir)
|
|
68
|
+
log_path = getattr(event_log, "log_path", None)
|
|
69
|
+
memory_router = MemoryRouter(
|
|
70
|
+
store=get_default_store(),
|
|
71
|
+
index=MemoryIndex(get_default_store()),
|
|
72
|
+
top_k=5,
|
|
73
|
+
)
|
|
74
|
+
workers = getattr(cfg.swarm, "workers", 2)
|
|
75
|
+
swarm = Swarm(
|
|
76
|
+
worker_count=workers,
|
|
77
|
+
worker_model=cfg.worker_model,
|
|
78
|
+
planner_model=cfg.planner_model,
|
|
79
|
+
event_log=event_log,
|
|
80
|
+
memory_router=memory_router,
|
|
81
|
+
use_tools=True,
|
|
82
|
+
)
|
|
83
|
+
results_holder: list[dict] = []
|
|
84
|
+
run_id = getattr(event_log, "run_id", "") or ""
|
|
85
|
+
|
|
86
|
+
hitl_resolver = None
|
|
87
|
+
if (
|
|
88
|
+
getattr(getattr(cfg, "hitl", None), "enabled", False)
|
|
89
|
+
and sys.stdout.isatty()
|
|
90
|
+
and not plain
|
|
91
|
+
):
|
|
92
|
+
from devsper.hitl.approval import ApprovalStore
|
|
93
|
+
|
|
94
|
+
_store = ApprovalStore(getattr(cfg, "data_dir", ".devsper"))
|
|
95
|
+
|
|
96
|
+
def _prompt_resolver(approval, policy): # sync, runs in thread
|
|
97
|
+
try:
|
|
98
|
+
from devsper.cli.ui import console
|
|
99
|
+
|
|
100
|
+
task_desc = (getattr(approval.task, "description", "") or "")[:60]
|
|
101
|
+
console.print()
|
|
102
|
+
console.print("[hive.warning]Approval required[/]")
|
|
103
|
+
console.print(f" Task: {task_desc}...")
|
|
104
|
+
console.print(f" Trigger: {getattr(approval.trigger, 'type', '?')}")
|
|
105
|
+
preview = (approval.proposed_result or "")[:200]
|
|
106
|
+
if preview:
|
|
107
|
+
console.print(f" Result preview: {preview}...")
|
|
108
|
+
from rich.prompt import Prompt
|
|
109
|
+
|
|
110
|
+
choice = Prompt.ask(
|
|
111
|
+
"Approve this result? [y/n]", choices=["y", "n"], default="y"
|
|
112
|
+
)
|
|
113
|
+
approved = choice.lower() == "y"
|
|
114
|
+
_store.resolve(approval.request_id, approved, "")
|
|
115
|
+
return approved
|
|
116
|
+
except Exception:
|
|
117
|
+
return False
|
|
118
|
+
|
|
119
|
+
hitl_resolver = _prompt_resolver
|
|
120
|
+
|
|
121
|
+
def _run() -> None:
|
|
122
|
+
results_holder.append(swarm.run(task, hitl_resolver=hitl_resolver))
|
|
123
|
+
|
|
124
|
+
thread = threading.Thread(target=_run, daemon=False)
|
|
125
|
+
thread.start()
|
|
126
|
+
|
|
127
|
+
if use_live_view:
|
|
128
|
+
try:
|
|
129
|
+
from devsper.cli.ui import run_live_view, print_run_summary
|
|
130
|
+
|
|
131
|
+
state = run_live_view(
|
|
132
|
+
log_path=log_path,
|
|
133
|
+
run_id=run_id,
|
|
134
|
+
worker_count=workers,
|
|
135
|
+
stop_check=lambda: not thread.is_alive(),
|
|
136
|
+
)
|
|
137
|
+
thread.join()
|
|
138
|
+
results = results_holder[0] if results_holder else {}
|
|
139
|
+
if json_output:
|
|
140
|
+
import json
|
|
141
|
+
|
|
142
|
+
out = {
|
|
143
|
+
"run_id": state.run_id_short,
|
|
144
|
+
"tasks": len(state.tasks),
|
|
145
|
+
"results": results,
|
|
146
|
+
}
|
|
147
|
+
print(json.dumps(out))
|
|
148
|
+
else:
|
|
149
|
+
print_run_summary(state, results, summary_only=summary_only)
|
|
150
|
+
except Exception:
|
|
151
|
+
thread.join()
|
|
152
|
+
results = results_holder[0] if results_holder else {}
|
|
153
|
+
from devsper.cli.ui import console
|
|
154
|
+
|
|
155
|
+
for task_id, result in results.items():
|
|
156
|
+
console.print(f"--- {task_id} ---")
|
|
157
|
+
console.print((result or "")[:2000])
|
|
158
|
+
if (result or "") and len(result) > 2000:
|
|
159
|
+
console.print("...")
|
|
160
|
+
else:
|
|
161
|
+
if not quiet:
|
|
162
|
+
from devsper.cli.run_progress import read_run_status
|
|
163
|
+
|
|
164
|
+
last_status = ""
|
|
165
|
+
while thread.is_alive():
|
|
166
|
+
status, running = read_run_status(log_path, worker_count=workers)
|
|
167
|
+
line = status
|
|
168
|
+
if len(running) > 1:
|
|
169
|
+
line += f" (parallel: {len(running)} tasks)"
|
|
170
|
+
if line != last_status:
|
|
171
|
+
sys.stderr.write("\r " + line.ljust(70))
|
|
172
|
+
sys.stderr.flush()
|
|
173
|
+
last_status = line
|
|
174
|
+
time.sleep(0.3)
|
|
175
|
+
sys.stderr.write("\n")
|
|
176
|
+
sys.stderr.flush()
|
|
177
|
+
thread.join()
|
|
178
|
+
results = results_holder[0] if results_holder else {}
|
|
179
|
+
if json_output:
|
|
180
|
+
import json
|
|
181
|
+
|
|
182
|
+
print(json.dumps(results))
|
|
183
|
+
else:
|
|
184
|
+
from devsper.cli.ui import console
|
|
185
|
+
|
|
186
|
+
for task_id, result in results.items():
|
|
187
|
+
console.print(f"--- {task_id} ---")
|
|
188
|
+
console.print((result or "")[:2000])
|
|
189
|
+
if (result or "") and len(result) > 2000:
|
|
190
|
+
console.print("...")
|
|
191
|
+
return 0
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def _run_meta(
|
|
195
|
+
mega_task: str, max_swarms: int | None = None, budget: float | None = None
|
|
196
|
+
) -> int:
|
|
197
|
+
"""Run meta-planner: decompose mega-task into sub-swarms, run them, print synthesis."""
|
|
198
|
+
import asyncio
|
|
199
|
+
from devsper.orchestration import MetaPlanner
|
|
200
|
+
from devsper.config import get_config
|
|
201
|
+
|
|
202
|
+
cfg = get_config()
|
|
203
|
+
planner_model = getattr(cfg.models, "planner", "mock")
|
|
204
|
+
planner = MetaPlanner(model_name=planner_model)
|
|
205
|
+
result = asyncio.run(
|
|
206
|
+
planner.run(mega_task, max_swarms=max_swarms, budget_usd=budget)
|
|
207
|
+
)
|
|
208
|
+
from devsper.cli.ui import console
|
|
209
|
+
|
|
210
|
+
console.print(result.final_synthesis)
|
|
211
|
+
if result.sla_breaches:
|
|
212
|
+
console.print(
|
|
213
|
+
"\n[hive.warning]SLA breaches:[/]",
|
|
214
|
+
[f"{b.swarm_id}: {b.breach_type}" for b in result.sla_breaches],
|
|
215
|
+
)
|
|
216
|
+
return 0
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def _run_meta_plan(mega_task: str) -> int:
|
|
220
|
+
"""Decompose only: print SubSwarmSpecs as table, no execution."""
|
|
221
|
+
import asyncio
|
|
222
|
+
from devsper.orchestration import MetaPlanner
|
|
223
|
+
from devsper.config import get_config
|
|
224
|
+
|
|
225
|
+
cfg = get_config()
|
|
226
|
+
planner_model = getattr(cfg.models, "planner", "mock")
|
|
227
|
+
planner = MetaPlanner(model_name=planner_model)
|
|
228
|
+
specs = asyncio.run(planner.decompose(mega_task))
|
|
229
|
+
try:
|
|
230
|
+
from rich.console import Console
|
|
231
|
+
from rich.table import Table
|
|
232
|
+
|
|
233
|
+
c = Console()
|
|
234
|
+
t = Table(title="SubSwarmSpecs")
|
|
235
|
+
t.add_column("swarm_id", style="cyan")
|
|
236
|
+
t.add_column("root_task", style="green", max_width=50)
|
|
237
|
+
t.add_column("priority")
|
|
238
|
+
t.add_column("workers")
|
|
239
|
+
t.add_column("depends_on")
|
|
240
|
+
for s in specs:
|
|
241
|
+
t.add_row(
|
|
242
|
+
s.swarm_id,
|
|
243
|
+
(s.root_task or "")[:50],
|
|
244
|
+
str(s.priority),
|
|
245
|
+
str(s.worker_count),
|
|
246
|
+
",".join(s.depends_on) or "-",
|
|
247
|
+
)
|
|
248
|
+
c.print(t)
|
|
249
|
+
except ImportError:
|
|
250
|
+
from devsper.cli.ui import console
|
|
251
|
+
|
|
252
|
+
for s in specs:
|
|
253
|
+
console.print(
|
|
254
|
+
f" {s.swarm_id}: priority={s.priority} workers={s.worker_count} deps={s.depends_on}"
|
|
255
|
+
)
|
|
256
|
+
console.print(f" task: {(s.root_task or '')[:80]}")
|
|
257
|
+
return 0
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def _run_approvals_list() -> int:
|
|
261
|
+
"""Table: request_id, task (truncated), trigger, created, expires, status."""
|
|
262
|
+
from devsper.config import get_config
|
|
263
|
+
from devsper.hitl.approval import ApprovalStore
|
|
264
|
+
|
|
265
|
+
cfg = get_config()
|
|
266
|
+
store = ApprovalStore(cfg.data_dir)
|
|
267
|
+
pending = store.list_pending()
|
|
268
|
+
try:
|
|
269
|
+
from rich.console import Console
|
|
270
|
+
from rich.table import Table
|
|
271
|
+
|
|
272
|
+
c = Console()
|
|
273
|
+
t = Table(title="Pending approvals")
|
|
274
|
+
t.add_column("request_id", style="cyan")
|
|
275
|
+
t.add_column("task", style="green", max_width=40)
|
|
276
|
+
t.add_column("trigger", style="yellow")
|
|
277
|
+
t.add_column("created")
|
|
278
|
+
t.add_column("expires")
|
|
279
|
+
t.add_column("status")
|
|
280
|
+
for r in pending:
|
|
281
|
+
desc = (getattr(r.task, "description", "") or "")[:40]
|
|
282
|
+
t.add_row(
|
|
283
|
+
r.request_id[:12],
|
|
284
|
+
desc,
|
|
285
|
+
str(r.trigger.type),
|
|
286
|
+
r.created_at[:19],
|
|
287
|
+
r.expires_at[:19],
|
|
288
|
+
r.status,
|
|
289
|
+
)
|
|
290
|
+
c.print(t)
|
|
291
|
+
except ImportError:
|
|
292
|
+
from devsper.cli.ui import console
|
|
293
|
+
|
|
294
|
+
for r in pending:
|
|
295
|
+
console.print(
|
|
296
|
+
f" {r.request_id} {getattr(r.task, 'description', '')[:50]} {r.trigger.type} {r.status}"
|
|
297
|
+
)
|
|
298
|
+
return 0
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
def _run_approvals_show(request_id: str) -> int:
|
|
302
|
+
"""Full approval request details."""
|
|
303
|
+
from devsper.config import get_config
|
|
304
|
+
from devsper.hitl.approval import ApprovalStore
|
|
305
|
+
|
|
306
|
+
cfg = get_config()
|
|
307
|
+
store = ApprovalStore(cfg.data_dir)
|
|
308
|
+
req = store.get(request_id)
|
|
309
|
+
if req is None:
|
|
310
|
+
from devsper.cli.ui import err_console
|
|
311
|
+
|
|
312
|
+
err_console.print(f"No approval request found: {request_id}")
|
|
313
|
+
return 1
|
|
314
|
+
from devsper.cli.ui import console
|
|
315
|
+
|
|
316
|
+
console.print("Request ID:", req.request_id)
|
|
317
|
+
console.print("Task:", getattr(req.task, "description", ""))
|
|
318
|
+
console.print("Proposed result:", (req.proposed_result or "")[:500])
|
|
319
|
+
console.print("Trigger:", req.trigger.type, req.trigger.threshold)
|
|
320
|
+
console.print("Created:", req.created_at, "Expires:", req.expires_at)
|
|
321
|
+
console.print("Status:", req.status)
|
|
322
|
+
if req.reviewer_notes:
|
|
323
|
+
console.print("Notes:", req.reviewer_notes)
|
|
324
|
+
return 0
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
def _run_approvals_approve(request_id: str, notes: str = "") -> int:
|
|
328
|
+
from devsper.config import get_config
|
|
329
|
+
from devsper.hitl.approval import ApprovalStore
|
|
330
|
+
|
|
331
|
+
cfg = get_config()
|
|
332
|
+
store = ApprovalStore(cfg.data_dir)
|
|
333
|
+
store.resolve(request_id, approved=True, notes=notes)
|
|
334
|
+
from devsper.cli.ui import console
|
|
335
|
+
|
|
336
|
+
console.print(f"[hive.success]Approved[/] {request_id}")
|
|
337
|
+
return 0
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
def _run_approvals_reject(request_id: str, notes: str = "") -> int:
|
|
341
|
+
from devsper.config import get_config
|
|
342
|
+
from devsper.hitl.approval import ApprovalStore
|
|
343
|
+
|
|
344
|
+
cfg = get_config()
|
|
345
|
+
store = ApprovalStore(cfg.data_dir)
|
|
346
|
+
store.resolve(request_id, approved=False, notes=notes)
|
|
347
|
+
from devsper.cli.ui import console
|
|
348
|
+
|
|
349
|
+
console.print(f"[hive.error]Rejected[/] {request_id}")
|
|
350
|
+
return 0
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
def _run_approvals_watch() -> int:
|
|
354
|
+
"""Live-updating table of pending approvals, refresh every 10s."""
|
|
355
|
+
import time
|
|
356
|
+
from devsper.config import get_config
|
|
357
|
+
from devsper.hitl.approval import ApprovalStore
|
|
358
|
+
|
|
359
|
+
cfg = get_config()
|
|
360
|
+
store = ApprovalStore(cfg.data_dir)
|
|
361
|
+
try:
|
|
362
|
+
from rich.console import Console
|
|
363
|
+
from rich.table import Table
|
|
364
|
+
from rich.live import Live
|
|
365
|
+
|
|
366
|
+
c = Console()
|
|
367
|
+
|
|
368
|
+
def make_table():
|
|
369
|
+
pending = store.list_pending()
|
|
370
|
+
t = Table(title="Pending approvals (refresh 10s)")
|
|
371
|
+
t.add_column("request_id")
|
|
372
|
+
t.add_column("task", max_width=50)
|
|
373
|
+
t.add_column("trigger")
|
|
374
|
+
t.add_column("status")
|
|
375
|
+
for r in pending:
|
|
376
|
+
t.add_row(
|
|
377
|
+
r.request_id[:14],
|
|
378
|
+
(getattr(r.task, "description", "") or "")[:50],
|
|
379
|
+
str(r.trigger.type),
|
|
380
|
+
r.status,
|
|
381
|
+
)
|
|
382
|
+
return t
|
|
383
|
+
|
|
384
|
+
with Live(make_table(), refresh_per_second=0.1, console=c) as live:
|
|
385
|
+
while True:
|
|
386
|
+
time.sleep(10)
|
|
387
|
+
live.update(make_table())
|
|
388
|
+
except ImportError:
|
|
389
|
+
while True:
|
|
390
|
+
pending = store.list_pending()
|
|
391
|
+
for r in pending:
|
|
392
|
+
print(r.request_id, getattr(r.task, "description", "")[:40], r.status)
|
|
393
|
+
time.sleep(10)
|
|
394
|
+
return 0
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
def _run_tui() -> int:
|
|
398
|
+
"""Launch the TUI."""
|
|
399
|
+
from devsper.config import get_config
|
|
400
|
+
from devsper.tui.app import run_tui
|
|
401
|
+
|
|
402
|
+
cfg = get_config()
|
|
403
|
+
run_tui(events_folder=cfg.events_dir)
|
|
404
|
+
return 0
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
def _run_research(path: str) -> int:
|
|
408
|
+
"""Run literature review example on a directory."""
|
|
409
|
+
root = _project_root()
|
|
410
|
+
script = root / "examples" / "research" / "literature_review.py"
|
|
411
|
+
if not script.exists():
|
|
412
|
+
print(
|
|
413
|
+
"Error: examples/research/literature_review.py not found", file=sys.stderr
|
|
414
|
+
)
|
|
415
|
+
return 1
|
|
416
|
+
return _run_example(script, path or ".")
|
|
417
|
+
|
|
418
|
+
|
|
419
|
+
def _run_analyze(path: str) -> int:
|
|
420
|
+
"""Run repository analysis example."""
|
|
421
|
+
root = _project_root()
|
|
422
|
+
script = root / "examples" / "coding" / "analyze_repository.py"
|
|
423
|
+
if not script.exists():
|
|
424
|
+
print("Error: examples/coding/analyze_repository.py not found", file=sys.stderr)
|
|
425
|
+
return 1
|
|
426
|
+
return _run_example(script, path or ".")
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
def _run_analyze_dispatch(args: object) -> int:
|
|
430
|
+
"""Dispatch: run_id -> run analysis; path (., /path) -> repo analysis."""
|
|
431
|
+
run_id_or_path = getattr(args, "run_id_or_path", None)
|
|
432
|
+
no_ai = getattr(args, "no_ai", False)
|
|
433
|
+
json_out = getattr(args, "analyze_json", False)
|
|
434
|
+
if run_id_or_path is None or (
|
|
435
|
+
isinstance(run_id_or_path, str) and not run_id_or_path.strip()
|
|
436
|
+
):
|
|
437
|
+
from rich.console import Console
|
|
438
|
+
from devsper.runtime.run_history import RunHistory
|
|
439
|
+
|
|
440
|
+
console = Console()
|
|
441
|
+
rows = RunHistory().list_runs(limit=5)
|
|
442
|
+
if rows:
|
|
443
|
+
console.print(
|
|
444
|
+
"Recent runs (use [cyan]devsper analyze <run_id>[/] for run analysis):"
|
|
445
|
+
)
|
|
446
|
+
for r in rows[:5]:
|
|
447
|
+
console.print(f" [dim]{r.run_id}[/]")
|
|
448
|
+
else:
|
|
449
|
+
console.print(
|
|
450
|
+
"No runs yet. Use [cyan]devsper analyze <run_id>[/] after a run, or [cyan]devsper analyze .[/] for repo analysis."
|
|
451
|
+
)
|
|
452
|
+
return 0
|
|
453
|
+
s = str(run_id_or_path).strip()
|
|
454
|
+
if s in (".", "..") or "/" in s or os.path.exists(s):
|
|
455
|
+
return _run_analyze(s)
|
|
456
|
+
return _run_analyze_run(s, no_ai=no_ai, json_output=json_out)
|
|
457
|
+
|
|
458
|
+
|
|
459
|
+
def _run_analyze_run(
|
|
460
|
+
run_id: str,
|
|
461
|
+
no_ai: bool = False,
|
|
462
|
+
json_output: bool = False,
|
|
463
|
+
) -> int:
|
|
464
|
+
"""Analyze a swarm run: load events, build report, optional LLM analysis."""
|
|
465
|
+
from devsper.config import get_config
|
|
466
|
+
from devsper.intelligence.analysis import (
|
|
467
|
+
build_report_from_events,
|
|
468
|
+
analyze,
|
|
469
|
+
print_run_report,
|
|
470
|
+
RunReport,
|
|
471
|
+
)
|
|
472
|
+
from devsper.intelligence.analysis.cost_estimator import CostEstimator
|
|
473
|
+
from rich.console import Console
|
|
474
|
+
from rich.panel import Panel
|
|
475
|
+
|
|
476
|
+
cfg = get_config()
|
|
477
|
+
events_dir = cfg.events_dir
|
|
478
|
+
console = Console()
|
|
479
|
+
|
|
480
|
+
try:
|
|
481
|
+
report = build_report_from_events(run_id, events_dir)
|
|
482
|
+
except FileNotFoundError as e:
|
|
483
|
+
console.print(f"[red]Error:[/] {e}")
|
|
484
|
+
return 1
|
|
485
|
+
except ValueError as e:
|
|
486
|
+
console.print(f"[red]Error:[/] {e}")
|
|
487
|
+
return 1
|
|
488
|
+
|
|
489
|
+
if json_output:
|
|
490
|
+
import json
|
|
491
|
+
from dataclasses import asdict
|
|
492
|
+
from devsper.intelligence.analysis.run_report import TaskSummary
|
|
493
|
+
|
|
494
|
+
def _serialize(obj):
|
|
495
|
+
if hasattr(obj, "value"):
|
|
496
|
+
return obj.value
|
|
497
|
+
if hasattr(obj, "__dataclass_fields__"):
|
|
498
|
+
return {
|
|
499
|
+
k: _serialize(getattr(obj, k)) for k in obj.__dataclass_fields__
|
|
500
|
+
}
|
|
501
|
+
return obj
|
|
502
|
+
|
|
503
|
+
out = {
|
|
504
|
+
"run_id": report.run_id,
|
|
505
|
+
"root_task": report.root_task,
|
|
506
|
+
"strategy": report.strategy,
|
|
507
|
+
"started_at": report.started_at,
|
|
508
|
+
"finished_at": report.finished_at,
|
|
509
|
+
"total_duration_seconds": report.total_duration_seconds,
|
|
510
|
+
"total_tasks": report.total_tasks,
|
|
511
|
+
"completed_tasks": report.completed_tasks,
|
|
512
|
+
"failed_tasks": report.failed_tasks,
|
|
513
|
+
"skipped_tasks": report.skipped_tasks,
|
|
514
|
+
"critical_path": report.critical_path,
|
|
515
|
+
"bottleneck_task_id": report.bottleneck_task_id,
|
|
516
|
+
"tools_called": report.tools_called,
|
|
517
|
+
"tool_success_rate": report.tool_success_rate,
|
|
518
|
+
"estimated_cost_usd": report.estimated_cost_usd,
|
|
519
|
+
"models_used": report.models_used,
|
|
520
|
+
"peak_parallelism": report.peak_parallelism,
|
|
521
|
+
"tasks": [
|
|
522
|
+
{
|
|
523
|
+
"task_id": t.task_id,
|
|
524
|
+
"description": t.description,
|
|
525
|
+
"role": t.role,
|
|
526
|
+
"status": _serialize(t.status),
|
|
527
|
+
"duration_seconds": t.duration_seconds,
|
|
528
|
+
"tools_used": t.tools_used,
|
|
529
|
+
"tool_failures": t.tool_failures,
|
|
530
|
+
"tokens_used": t.tokens_used,
|
|
531
|
+
"retry_count": t.retry_count,
|
|
532
|
+
"error": t.error,
|
|
533
|
+
}
|
|
534
|
+
for t in report.tasks
|
|
535
|
+
],
|
|
536
|
+
}
|
|
537
|
+
console.print(json.dumps(out, indent=2))
|
|
538
|
+
return 0
|
|
539
|
+
|
|
540
|
+
print_run_report(report, console)
|
|
541
|
+
if not no_ai:
|
|
542
|
+
worker_model = getattr(cfg, "worker_model", None) or getattr(
|
|
543
|
+
cfg.models, "worker", "gpt-4o-mini"
|
|
544
|
+
)
|
|
545
|
+
from devsper.utils.models import resolve_model
|
|
546
|
+
|
|
547
|
+
worker_model = resolve_model(worker_model, "analysis")
|
|
548
|
+
analysis_text = analyze(
|
|
549
|
+
report,
|
|
550
|
+
worker_model,
|
|
551
|
+
stream_callback=lambda c: console.print(c, end=""),
|
|
552
|
+
)
|
|
553
|
+
report.plain_english_analysis = analysis_text
|
|
554
|
+
console.print()
|
|
555
|
+
console.print(
|
|
556
|
+
Panel(analysis_text, title="Plain-English Analysis", border_style="dim")
|
|
557
|
+
)
|
|
558
|
+
return 0
|
|
559
|
+
|
|
560
|
+
|
|
561
|
+
def _run_runs(args: object) -> int:
|
|
562
|
+
"""List run history (Rich table) or run-analyze <run_id> --no-ai when run_id given."""
|
|
563
|
+
run_id = getattr(args, "run_id", None)
|
|
564
|
+
if run_id and str(run_id).strip():
|
|
565
|
+
return _run_analyze_run(str(run_id).strip(), no_ai=True, json_output=False)
|
|
566
|
+
from devsper.runtime.run_history import RunHistory
|
|
567
|
+
|
|
568
|
+
limit = getattr(args, "limit", 20)
|
|
569
|
+
failed = getattr(args, "failed", False)
|
|
570
|
+
json_out = getattr(args, "runs_json", False)
|
|
571
|
+
history = RunHistory()
|
|
572
|
+
filter_status = "failed" if failed else None
|
|
573
|
+
rows = history.list_runs(limit=limit, filter_status=filter_status)
|
|
574
|
+
if json_out:
|
|
575
|
+
import json
|
|
576
|
+
|
|
577
|
+
out = [
|
|
578
|
+
{
|
|
579
|
+
"run_id": r.run_id,
|
|
580
|
+
"root_task": r.root_task[:200],
|
|
581
|
+
"strategy": r.strategy,
|
|
582
|
+
"started_at": r.started_at,
|
|
583
|
+
"duration_seconds": r.duration_seconds,
|
|
584
|
+
"total_tasks": r.total_tasks,
|
|
585
|
+
"completed_tasks": r.completed_tasks,
|
|
586
|
+
"failed_tasks": r.failed_tasks,
|
|
587
|
+
"estimated_cost_usd": r.estimated_cost_usd,
|
|
588
|
+
}
|
|
589
|
+
for r in rows
|
|
590
|
+
]
|
|
591
|
+
print(json.dumps(out, indent=2))
|
|
592
|
+
return 0
|
|
593
|
+
from rich.console import Console
|
|
594
|
+
from rich.table import Table
|
|
595
|
+
|
|
596
|
+
console = Console()
|
|
597
|
+
table = Table(title="Run history")
|
|
598
|
+
table.add_column("Run ID", style="dim", max_width=36, overflow="fold")
|
|
599
|
+
table.add_column("Task", max_width=40, overflow="fold")
|
|
600
|
+
table.add_column("Strategy", width=10)
|
|
601
|
+
table.add_column("Status", width=14)
|
|
602
|
+
table.add_column("Duration", justify="right", width=10)
|
|
603
|
+
table.add_column("Tasks", justify="right", width=6)
|
|
604
|
+
table.add_column("Cost", justify="right", width=10)
|
|
605
|
+
table.add_column("Date", style="dim", width=24)
|
|
606
|
+
for r in rows:
|
|
607
|
+
short_id = r.run_id[:32] + "…" if len(r.run_id) > 32 else r.run_id
|
|
608
|
+
task_preview = (r.root_task or "")[:40] + (
|
|
609
|
+
"…" if len(r.root_task or "") > 40 else ""
|
|
610
|
+
)
|
|
611
|
+
if r.failed_tasks > 0 and r.completed_tasks > 0:
|
|
612
|
+
status = "[yellow]⚠ partial[/]"
|
|
613
|
+
elif r.failed_tasks > 0:
|
|
614
|
+
status = "[red]✗ failed[/]"
|
|
615
|
+
else:
|
|
616
|
+
status = "[green]✓ completed[/]"
|
|
617
|
+
dur = f"{r.duration_seconds:.1f}s"
|
|
618
|
+
tasks = f"{r.completed_tasks}/{r.total_tasks}"
|
|
619
|
+
cost = (
|
|
620
|
+
f"${r.estimated_cost_usd:.4f}" if r.estimated_cost_usd is not None else "—"
|
|
621
|
+
)
|
|
622
|
+
date = (r.started_at or "")[:24]
|
|
623
|
+
table.add_row(
|
|
624
|
+
short_id, task_preview, r.strategy or "—", status, dur, tasks, cost, date
|
|
625
|
+
)
|
|
626
|
+
if rows:
|
|
627
|
+
console.print(table)
|
|
628
|
+
else:
|
|
629
|
+
console.print(
|
|
630
|
+
'No runs recorded. Run a swarm first (e.g. [cyan]devsper run "task"[/]).'
|
|
631
|
+
)
|
|
632
|
+
return 0
|
|
633
|
+
|
|
634
|
+
|
|
635
|
+
def _workflow_dispatch(args: object) -> int:
|
|
636
|
+
"""Dispatch workflow list | validate | run | <name>."""
|
|
637
|
+
a = args
|
|
638
|
+
first = getattr(a, "first", None)
|
|
639
|
+
second = getattr(a, "second", None)
|
|
640
|
+
inputs = getattr(a, "input", None) or []
|
|
641
|
+
if first == "list":
|
|
642
|
+
return _workflow_list()
|
|
643
|
+
if first == "validate":
|
|
644
|
+
return _workflow_validate(second or "")
|
|
645
|
+
if first == "run":
|
|
646
|
+
return _workflow_run(second or "", inputs)
|
|
647
|
+
if first:
|
|
648
|
+
return _workflow_run(first, inputs)
|
|
649
|
+
return _workflow_list()
|
|
650
|
+
|
|
651
|
+
|
|
652
|
+
def _workflow_list() -> int:
|
|
653
|
+
"""List all defined workflows with name, version, step count, description."""
|
|
654
|
+
try:
|
|
655
|
+
from rich.console import Console
|
|
656
|
+
from rich.table import Table
|
|
657
|
+
from devsper.workflow.loader import list_workflows, load_workflow
|
|
658
|
+
except ImportError:
|
|
659
|
+
from devsper.workflow.loader import list_workflows, load_workflow
|
|
660
|
+
|
|
661
|
+
names = list_workflows()
|
|
662
|
+
for n in names:
|
|
663
|
+
wf = load_workflow(n)
|
|
664
|
+
if wf:
|
|
665
|
+
print(
|
|
666
|
+
f"{wf.name} v{wf.version} steps={len(wf.steps)} {wf.description or ''}"
|
|
667
|
+
)
|
|
668
|
+
return 0
|
|
669
|
+
console = Console()
|
|
670
|
+
names = list_workflows()
|
|
671
|
+
if not names:
|
|
672
|
+
console.print(
|
|
673
|
+
"No workflows defined. Add [workflow] to workflow.devsper.toml or devsper.toml."
|
|
674
|
+
)
|
|
675
|
+
return 0
|
|
676
|
+
table = Table(title="Workflows")
|
|
677
|
+
table.add_column("Name", style="cyan")
|
|
678
|
+
table.add_column("Version", style="dim")
|
|
679
|
+
table.add_column("Steps", justify="right")
|
|
680
|
+
table.add_column("Description", style="dim")
|
|
681
|
+
for n in names:
|
|
682
|
+
wf = load_workflow(n)
|
|
683
|
+
if wf:
|
|
684
|
+
table.add_row(
|
|
685
|
+
wf.name,
|
|
686
|
+
wf.version,
|
|
687
|
+
str(len(wf.steps)),
|
|
688
|
+
(wf.description or "")[:60],
|
|
689
|
+
)
|
|
690
|
+
console.print(table)
|
|
691
|
+
return 0
|
|
692
|
+
|
|
693
|
+
|
|
694
|
+
def _workflow_validate(name: str) -> int:
|
|
695
|
+
"""Validate workflow by name. Exit 0 if valid, 1 if errors."""
|
|
696
|
+
from devsper.workflow.loader import load_workflow
|
|
697
|
+
from devsper.workflow.validator import ValidationReport, validate_workflow
|
|
698
|
+
|
|
699
|
+
wf = load_workflow(name)
|
|
700
|
+
if not wf:
|
|
701
|
+
print(f"Workflow '{name}' not found.", file=sys.stderr)
|
|
702
|
+
return 1
|
|
703
|
+
report = validate_workflow(wf)
|
|
704
|
+
try:
|
|
705
|
+
from rich.console import Console
|
|
706
|
+
from rich.markup import escape
|
|
707
|
+
|
|
708
|
+
console = Console()
|
|
709
|
+
if report.errors:
|
|
710
|
+
for e in report.errors:
|
|
711
|
+
console.print(f"[red]✗[/red] {escape(e)}")
|
|
712
|
+
if report.warnings:
|
|
713
|
+
for w in report.warnings:
|
|
714
|
+
console.print(f"[yellow]⚠[/yellow] {escape(w)}")
|
|
715
|
+
if report.info:
|
|
716
|
+
for i in report.info:
|
|
717
|
+
console.print(f"[dim]ℹ[/dim] {escape(i)}")
|
|
718
|
+
if report.valid and not report.errors:
|
|
719
|
+
console.print("[green]✓[/green] Validation passed.")
|
|
720
|
+
elif report.errors:
|
|
721
|
+
console.print("[red]✗[/red] Validation failed.")
|
|
722
|
+
except ImportError:
|
|
723
|
+
for e in report.errors:
|
|
724
|
+
print(f"✗ {e}", file=sys.stderr)
|
|
725
|
+
for w in report.warnings:
|
|
726
|
+
print(f"⚠ {w}", file=sys.stderr)
|
|
727
|
+
for i in report.info:
|
|
728
|
+
print(f"ℹ {i}")
|
|
729
|
+
if report.valid:
|
|
730
|
+
print("✓ Validation passed.")
|
|
731
|
+
else:
|
|
732
|
+
print("✗ Validation failed.", file=sys.stderr)
|
|
733
|
+
return 0 if report.valid else 1
|
|
734
|
+
|
|
735
|
+
|
|
736
|
+
def _workflow_run(name: str, input_pairs: list[str]) -> int:
|
|
737
|
+
"""Run workflow by name with optional --input key=value. Print summary table after."""
|
|
738
|
+
from devsper.config import get_config
|
|
739
|
+
from devsper.memory.memory_router import MemoryRouter
|
|
740
|
+
from devsper.memory.memory_store import get_default_store
|
|
741
|
+
from devsper.memory.memory_index import MemoryIndex
|
|
742
|
+
from devsper.workflow.loader import load_workflow
|
|
743
|
+
from devsper.workflow.runner import WorkflowRunner
|
|
744
|
+
|
|
745
|
+
wf = load_workflow(name)
|
|
746
|
+
if not wf:
|
|
747
|
+
print(f"Workflow '{name}' not found.", file=sys.stderr)
|
|
748
|
+
return 1
|
|
749
|
+
inputs = {}
|
|
750
|
+
for pair in input_pairs:
|
|
751
|
+
if "=" in pair:
|
|
752
|
+
k, v = pair.split("=", 1)
|
|
753
|
+
inputs[k.strip()] = v.strip()
|
|
754
|
+
else:
|
|
755
|
+
inputs[pair.strip()] = ""
|
|
756
|
+
cfg = get_config()
|
|
757
|
+
memory_router = MemoryRouter(
|
|
758
|
+
store=get_default_store(),
|
|
759
|
+
index=MemoryIndex(get_default_store()),
|
|
760
|
+
top_k=5,
|
|
761
|
+
)
|
|
762
|
+
runner = WorkflowRunner()
|
|
763
|
+
try:
|
|
764
|
+
ctx = runner.run(
|
|
765
|
+
wf,
|
|
766
|
+
inputs=inputs,
|
|
767
|
+
worker_model=cfg.worker_model,
|
|
768
|
+
worker_count=getattr(cfg.swarm, "workers", 2),
|
|
769
|
+
memory_router=memory_router,
|
|
770
|
+
use_tools=True,
|
|
771
|
+
)
|
|
772
|
+
except ValueError as e:
|
|
773
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
774
|
+
return 1
|
|
775
|
+
try:
|
|
776
|
+
from rich.console import Console
|
|
777
|
+
from rich.table import Table
|
|
778
|
+
|
|
779
|
+
console = Console()
|
|
780
|
+
table = Table(title="Workflow run summary")
|
|
781
|
+
table.add_column("Step", style="cyan")
|
|
782
|
+
table.add_column("Status", justify="center")
|
|
783
|
+
table.add_column("Duration", justify="right")
|
|
784
|
+
table.add_column("Note", style="dim")
|
|
785
|
+
for step_id, sr in ctx.steps.items():
|
|
786
|
+
if sr.skipped:
|
|
787
|
+
status = "[yellow]skipped[/yellow]"
|
|
788
|
+
elif sr.error:
|
|
789
|
+
status = "[red]failed[/red]"
|
|
790
|
+
else:
|
|
791
|
+
status = "[green]completed[/green]"
|
|
792
|
+
table.add_row(
|
|
793
|
+
step_id,
|
|
794
|
+
status,
|
|
795
|
+
f"{sr.duration_seconds:.2f}s",
|
|
796
|
+
sr.error or ("(skipped)" if sr.skipped else ""),
|
|
797
|
+
)
|
|
798
|
+
console.print(table)
|
|
799
|
+
except ImportError:
|
|
800
|
+
for step_id, sr in ctx.steps.items():
|
|
801
|
+
status = (
|
|
802
|
+
"skipped" if sr.skipped else ("failed" if sr.error else "completed")
|
|
803
|
+
)
|
|
804
|
+
print(
|
|
805
|
+
f" {step_id} {status} {sr.duration_seconds:.2f}s {sr.error or ''}"
|
|
806
|
+
)
|
|
807
|
+
for step_id, sr in ctx.steps.items():
|
|
808
|
+
if not sr.skipped and not sr.error and sr.raw_result:
|
|
809
|
+
print(f"\n--- {step_id} ---")
|
|
810
|
+
print((sr.raw_result or "")[:2000])
|
|
811
|
+
if (sr.raw_result or "") and len(sr.raw_result) > 2000:
|
|
812
|
+
print("...")
|
|
813
|
+
return 0
|
|
814
|
+
|
|
815
|
+
|
|
816
|
+
def _run_query(query_str: str) -> int:
|
|
817
|
+
"""Query the knowledge graph: entity search and relationship traversal."""
|
|
818
|
+
from devsper.knowledge.knowledge_graph import KnowledgeGraph
|
|
819
|
+
from devsper.knowledge.query import query as kg_query
|
|
820
|
+
from devsper.memory.memory_store import get_default_store
|
|
821
|
+
|
|
822
|
+
store = get_default_store()
|
|
823
|
+
kg = KnowledgeGraph(store=store)
|
|
824
|
+
kg.build_from_memory()
|
|
825
|
+
result = kg_query(kg, query_str or "")
|
|
826
|
+
if not result.entities and not result.edges and not result.documents:
|
|
827
|
+
print("No matching entities or documents.")
|
|
828
|
+
return 0
|
|
829
|
+
if result.entities:
|
|
830
|
+
print("Entities:")
|
|
831
|
+
for node_id, label in result.entities[:30]:
|
|
832
|
+
print(f" {node_id} {label[:80]}")
|
|
833
|
+
if result.edges:
|
|
834
|
+
print("\nRelationships:")
|
|
835
|
+
for a, b, et in result.edges[:30]:
|
|
836
|
+
print(f" {a} --[{et}]--> {b}")
|
|
837
|
+
if result.documents:
|
|
838
|
+
print("\nDocuments mentioning query:")
|
|
839
|
+
for doc_id in result.documents[:20]:
|
|
840
|
+
print(f" {doc_id}")
|
|
841
|
+
return 0
|
|
842
|
+
|
|
843
|
+
|
|
844
|
+
def _run_init(no_interactive: bool = False) -> int:
|
|
845
|
+
"""Run init: wizard with welcome screen (interactive) or minimal config (--no-interactive)."""
|
|
846
|
+
try:
|
|
847
|
+
from devsper.cli.ui.onboarding import run_init_wizard
|
|
848
|
+
|
|
849
|
+
return run_init_wizard(no_interactive=no_interactive)
|
|
850
|
+
except ImportError:
|
|
851
|
+
from devsper.cli.init import run_init
|
|
852
|
+
|
|
853
|
+
return run_init(interactive=not no_interactive)
|
|
854
|
+
|
|
855
|
+
|
|
856
|
+
def _run_credentials(args: object) -> int:
|
|
857
|
+
"""Run credentials subcommand: set, list, delete, migrate."""
|
|
858
|
+
from devsper.credentials.cli import run_credentials
|
|
859
|
+
|
|
860
|
+
return run_credentials(args)
|
|
861
|
+
|
|
862
|
+
|
|
863
|
+
def _run_doctor() -> int:
|
|
864
|
+
"""Run doctor subcommand: verify GITHUB_TOKEN, OpenAI, config, tools."""
|
|
865
|
+
from devsper.cli.init import run_doctor
|
|
866
|
+
|
|
867
|
+
return run_doctor()
|
|
868
|
+
|
|
869
|
+
|
|
870
|
+
def _run_mcp_list() -> int:
|
|
871
|
+
"""List configured MCP servers and their tool counts (from live discovery)."""
|
|
872
|
+
from devsper.config import get_config
|
|
873
|
+
from devsper.tools.mcp import discover_mcp_tools
|
|
874
|
+
|
|
875
|
+
cfg = get_config()
|
|
876
|
+
servers = getattr(getattr(cfg, "mcp", None), "servers", None) or []
|
|
877
|
+
try:
|
|
878
|
+
from rich.console import Console
|
|
879
|
+
from rich.table import Table
|
|
880
|
+
|
|
881
|
+
console = Console()
|
|
882
|
+
table = Table(title="MCP servers")
|
|
883
|
+
table.add_column("Name", style="cyan")
|
|
884
|
+
table.add_column("Transport", style="dim")
|
|
885
|
+
table.add_column("Tools", justify="right")
|
|
886
|
+
for s in servers:
|
|
887
|
+
sname = getattr(s, "name", "?")
|
|
888
|
+
try:
|
|
889
|
+
adapters = discover_mcp_tools(s)
|
|
890
|
+
count = len(adapters)
|
|
891
|
+
except Exception:
|
|
892
|
+
count = "—"
|
|
893
|
+
table.add_row(sname, getattr(s, "transport", "?"), str(count))
|
|
894
|
+
if not servers:
|
|
895
|
+
console.print(
|
|
896
|
+
"No MCP servers configured. Add [[mcp.servers]] to devsper.toml or use [cyan]devsper mcp add[/]."
|
|
897
|
+
)
|
|
898
|
+
else:
|
|
899
|
+
console.print(table)
|
|
900
|
+
except ImportError:
|
|
901
|
+
for s in servers:
|
|
902
|
+
print(getattr(s, "name", "?"), getattr(s, "transport", "?"))
|
|
903
|
+
return 0
|
|
904
|
+
|
|
905
|
+
|
|
906
|
+
def _run_mcp_test(server_name: str) -> int:
|
|
907
|
+
"""Connect to server, list tools, print names and descriptions. Exit 1 if connection fails."""
|
|
908
|
+
from devsper.config import get_config
|
|
909
|
+
|
|
910
|
+
cfg = get_config()
|
|
911
|
+
servers = getattr(getattr(cfg, "mcp", None), "servers", None) or []
|
|
912
|
+
server = next((s for s in servers if getattr(s, "name", "") == server_name), None)
|
|
913
|
+
if not server:
|
|
914
|
+
print(
|
|
915
|
+
f"Error: MCP server '{server_name}' not found in config.", file=sys.stderr
|
|
916
|
+
)
|
|
917
|
+
return 1
|
|
918
|
+
try:
|
|
919
|
+
from devsper.tools.mcp import discover_mcp_tools
|
|
920
|
+
|
|
921
|
+
adapters = discover_mcp_tools(server)
|
|
922
|
+
print(f"Connected to '{server_name}'. Tools: {len(adapters)}")
|
|
923
|
+
for a in adapters:
|
|
924
|
+
print(
|
|
925
|
+
f" - {getattr(a, '_mcp_tool_name', a.name)}: {(a.description or '')[:80]}"
|
|
926
|
+
)
|
|
927
|
+
return 0
|
|
928
|
+
except Exception as e:
|
|
929
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
930
|
+
return 1
|
|
931
|
+
|
|
932
|
+
|
|
933
|
+
def _run_mcp_add() -> int:
|
|
934
|
+
"""Interactive: prompt for transport, command/url, name; append to devsper.toml [mcp.servers]."""
|
|
935
|
+
from pathlib import Path
|
|
936
|
+
from devsper.config.config_loader import project_config_paths
|
|
937
|
+
|
|
938
|
+
config_path = None
|
|
939
|
+
for p in project_config_paths():
|
|
940
|
+
if p.is_file():
|
|
941
|
+
config_path = p
|
|
942
|
+
break
|
|
943
|
+
if not config_path:
|
|
944
|
+
print(
|
|
945
|
+
"Error: No devsper.toml found. Run [cyan]devsper init[/] first.",
|
|
946
|
+
file=sys.stderr,
|
|
947
|
+
)
|
|
948
|
+
return 1
|
|
949
|
+
try:
|
|
950
|
+
name = input("Server name (e.g. filesystem): ").strip() or "mcp-server"
|
|
951
|
+
transport = (
|
|
952
|
+
input("Transport (stdio|http|sse) [stdio]: ").strip().lower() or "stdio"
|
|
953
|
+
)
|
|
954
|
+
if transport == "stdio":
|
|
955
|
+
cmd_str = input(
|
|
956
|
+
"Command (space-separated, e.g. npx -y @modelcontextprotocol/server-filesystem /tmp): "
|
|
957
|
+
).strip()
|
|
958
|
+
command = cmd_str.split() if cmd_str else []
|
|
959
|
+
url = None
|
|
960
|
+
else:
|
|
961
|
+
command = None
|
|
962
|
+
url = input("URL (e.g. http://localhost:3000): ").strip() or None
|
|
963
|
+
toml = config_path.read_text()
|
|
964
|
+
# Append [[mcp.servers]] entry
|
|
965
|
+
entry = f'\n[[mcp.servers]]\nname = "{name}"\ntransport = "{transport}"\n'
|
|
966
|
+
if command:
|
|
967
|
+
entry += f"command = {json.dumps(command)}\n"
|
|
968
|
+
if url:
|
|
969
|
+
entry += f'url = "{url}"\n'
|
|
970
|
+
if "\n[mcp]" not in toml and "[[mcp.servers]]" not in toml:
|
|
971
|
+
toml = toml.rstrip() + "\n\n[mcp]\n" + entry.lstrip()
|
|
972
|
+
else:
|
|
973
|
+
toml = toml.rstrip() + "\n" + entry
|
|
974
|
+
config_path.write_text(toml)
|
|
975
|
+
print(f"Added MCP server '{name}' to {config_path}.")
|
|
976
|
+
return 0
|
|
977
|
+
except Exception as e:
|
|
978
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
979
|
+
return 1
|
|
980
|
+
|
|
981
|
+
|
|
982
|
+
def _run_a2a_serve(port: int | None) -> int:
|
|
983
|
+
"""Start A2A server, print AgentCard URL."""
|
|
984
|
+
from devsper.config import get_config
|
|
985
|
+
from devsper.agents.a2a.server import run_a2a_server
|
|
986
|
+
|
|
987
|
+
cfg = get_config()
|
|
988
|
+
p = (
|
|
989
|
+
port
|
|
990
|
+
if port is not None
|
|
991
|
+
else getattr(getattr(cfg, "a2a", None), "serve_port", 8080)
|
|
992
|
+
)
|
|
993
|
+
swarm_name = getattr(getattr(cfg, "swarm", None), "name", None) or "devsper"
|
|
994
|
+
print(f"A2A server starting at http://localhost:{p}", file=sys.stderr)
|
|
995
|
+
print(f"AgentCard: http://localhost:{p}/.well-known/agent.json", file=sys.stderr)
|
|
996
|
+
run_a2a_server(host="0.0.0.0", port=p, swarm_name=swarm_name or "")
|
|
997
|
+
return 0
|
|
998
|
+
|
|
999
|
+
|
|
1000
|
+
def _run_a2a_discover(url: str) -> int:
|
|
1001
|
+
"""Fetch AgentCard, print skills, optionally add to config."""
|
|
1002
|
+
try:
|
|
1003
|
+
from devsper.agents.a2a.client import A2AClient
|
|
1004
|
+
|
|
1005
|
+
client = A2AClient()
|
|
1006
|
+
import asyncio
|
|
1007
|
+
|
|
1008
|
+
card = asyncio.run(client.get_agent_card(url))
|
|
1009
|
+
print(f"Name: {card.name}")
|
|
1010
|
+
print(f"Description: {card.description}")
|
|
1011
|
+
print(f"Skills: {len(card.skills)}")
|
|
1012
|
+
for s in card.skills:
|
|
1013
|
+
desc = (s.description or "")[:60]
|
|
1014
|
+
if len(s.description or "") > 60:
|
|
1015
|
+
desc += "..."
|
|
1016
|
+
print(f" - {s.id}: {s.name} — {desc}")
|
|
1017
|
+
return 0
|
|
1018
|
+
except Exception as e:
|
|
1019
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
1020
|
+
return 1
|
|
1021
|
+
|
|
1022
|
+
|
|
1023
|
+
def _run_a2a_call(url: str, task: str) -> int:
|
|
1024
|
+
"""Send task to external A2A agent, stream output."""
|
|
1025
|
+
try:
|
|
1026
|
+
from devsper.agents.a2a.client import A2AClient
|
|
1027
|
+
from devsper.agents.a2a.types import A2ATaskRequest
|
|
1028
|
+
import asyncio
|
|
1029
|
+
import uuid
|
|
1030
|
+
|
|
1031
|
+
client = A2AClient()
|
|
1032
|
+
request = A2ATaskRequest(
|
|
1033
|
+
id=str(uuid.uuid4()), message={"text": task}, session_id=None
|
|
1034
|
+
)
|
|
1035
|
+
|
|
1036
|
+
async def _stream():
|
|
1037
|
+
async for chunk in client.stream_task(url, request):
|
|
1038
|
+
print(chunk, end="", flush=True)
|
|
1039
|
+
|
|
1040
|
+
asyncio.run(_stream())
|
|
1041
|
+
print()
|
|
1042
|
+
return 0
|
|
1043
|
+
except Exception as e:
|
|
1044
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
1045
|
+
return 1
|
|
1046
|
+
|
|
1047
|
+
|
|
1048
|
+
def _run_node_start(args) -> int:
|
|
1049
|
+
"""Start a node in the foreground (controller, worker, or hybrid)."""
|
|
1050
|
+
try:
|
|
1051
|
+
role = getattr(args, "role", "hybrid")
|
|
1052
|
+
port = getattr(args, "port", None)
|
|
1053
|
+
workers = getattr(args, "workers", None)
|
|
1054
|
+
tags = getattr(args, "tags", "") or ""
|
|
1055
|
+
print(
|
|
1056
|
+
f"Node role: {role}, port: {port or 'config default'}, workers: {workers or 'config default'}",
|
|
1057
|
+
file=sys.stderr,
|
|
1058
|
+
)
|
|
1059
|
+
if tags:
|
|
1060
|
+
print(
|
|
1061
|
+
f"Tags: {[t.strip() for t in tags.split(',') if t.strip()]}",
|
|
1062
|
+
file=sys.stderr,
|
|
1063
|
+
)
|
|
1064
|
+
print(
|
|
1065
|
+
"Distributed node start: set nodes.mode=distributed and nodes.role in devsper.toml, then run your process.",
|
|
1066
|
+
file=sys.stderr,
|
|
1067
|
+
)
|
|
1068
|
+
return 0
|
|
1069
|
+
except Exception as e:
|
|
1070
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
1071
|
+
return 1
|
|
1072
|
+
|
|
1073
|
+
|
|
1074
|
+
def _run_node_status(args) -> int:
|
|
1075
|
+
"""Query controller GET /status."""
|
|
1076
|
+
url = getattr(args, "controller_url", None)
|
|
1077
|
+
if not url:
|
|
1078
|
+
try:
|
|
1079
|
+
from devsper.config import get_config
|
|
1080
|
+
|
|
1081
|
+
url = get_config().nodes.controller_url
|
|
1082
|
+
except Exception:
|
|
1083
|
+
url = "http://localhost:7700"
|
|
1084
|
+
try:
|
|
1085
|
+
import httpx
|
|
1086
|
+
|
|
1087
|
+
r = httpx.get(f"{url.rstrip('/')}/status", timeout=10.0)
|
|
1088
|
+
r.raise_for_status()
|
|
1089
|
+
data = r.json()
|
|
1090
|
+
from rich.console import Console
|
|
1091
|
+
from rich.table import Table
|
|
1092
|
+
|
|
1093
|
+
cons = Console()
|
|
1094
|
+
cons.print(
|
|
1095
|
+
"[bold]Run[/bold]",
|
|
1096
|
+
data.get("run_id", ""),
|
|
1097
|
+
"[bold]Leader[/bold]",
|
|
1098
|
+
data.get("node_id", ""),
|
|
1099
|
+
)
|
|
1100
|
+
s = data.get("scheduler", {})
|
|
1101
|
+
cons.print(
|
|
1102
|
+
"Tasks:",
|
|
1103
|
+
s.get("completed", 0),
|
|
1104
|
+
"completed,",
|
|
1105
|
+
s.get("pending", 0),
|
|
1106
|
+
"pending",
|
|
1107
|
+
)
|
|
1108
|
+
return 0
|
|
1109
|
+
except Exception as e:
|
|
1110
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
1111
|
+
return 1
|
|
1112
|
+
|
|
1113
|
+
|
|
1114
|
+
def _run_node_workers(args) -> int:
|
|
1115
|
+
"""List workers from controller GET /status."""
|
|
1116
|
+
url = getattr(args, "controller_url", None)
|
|
1117
|
+
if not url:
|
|
1118
|
+
try:
|
|
1119
|
+
from devsper.config import get_config
|
|
1120
|
+
|
|
1121
|
+
url = get_config().nodes.controller_url
|
|
1122
|
+
except Exception:
|
|
1123
|
+
url = "http://localhost:7700"
|
|
1124
|
+
try:
|
|
1125
|
+
import httpx
|
|
1126
|
+
|
|
1127
|
+
r = httpx.get(f"{url.rstrip('/')}/status", timeout=10.0)
|
|
1128
|
+
r.raise_for_status()
|
|
1129
|
+
data = r.json()
|
|
1130
|
+
for w in data.get("workers", []):
|
|
1131
|
+
print(w.get("node_id", "")[:8], w.get("host", ""), w.get("rpc_url", ""))
|
|
1132
|
+
return 0
|
|
1133
|
+
except Exception as e:
|
|
1134
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
1135
|
+
return 1
|
|
1136
|
+
|
|
1137
|
+
|
|
1138
|
+
def _run_node_drain(args) -> int:
|
|
1139
|
+
"""POST /control drain target node."""
|
|
1140
|
+
url = getattr(args, "controller_url", None)
|
|
1141
|
+
try:
|
|
1142
|
+
from devsper.config import get_config
|
|
1143
|
+
|
|
1144
|
+
url = url or get_config().nodes.controller_url
|
|
1145
|
+
except Exception:
|
|
1146
|
+
url = "http://localhost:7700"
|
|
1147
|
+
try:
|
|
1148
|
+
import httpx
|
|
1149
|
+
|
|
1150
|
+
r = httpx.post(
|
|
1151
|
+
f"{url.rstrip('/')}/control",
|
|
1152
|
+
json={"command": "drain", "target": getattr(args, "node_id", "")},
|
|
1153
|
+
timeout=10.0,
|
|
1154
|
+
)
|
|
1155
|
+
r.raise_for_status()
|
|
1156
|
+
print("Drain sent.", file=sys.stderr)
|
|
1157
|
+
return 0
|
|
1158
|
+
except Exception as e:
|
|
1159
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
1160
|
+
return 1
|
|
1161
|
+
|
|
1162
|
+
|
|
1163
|
+
def _run_node_logs(args) -> int:
|
|
1164
|
+
"""Stream GET /stream/events."""
|
|
1165
|
+
url = getattr(args, "controller_url", None) or "http://localhost:7700"
|
|
1166
|
+
try:
|
|
1167
|
+
from devsper.config import get_config
|
|
1168
|
+
|
|
1169
|
+
url = get_config().nodes.controller_url
|
|
1170
|
+
except Exception:
|
|
1171
|
+
pass
|
|
1172
|
+
print(
|
|
1173
|
+
"Connect to", url, "stream/events (--follow); not implemented", file=sys.stderr
|
|
1174
|
+
)
|
|
1175
|
+
return 0
|
|
1176
|
+
|
|
1177
|
+
|
|
1178
|
+
def _run_build(app_idea: str, output_dir: str) -> int:
|
|
1179
|
+
"""Build a working repo from an app description (autonomous application builder)."""
|
|
1180
|
+
from devsper.dev.builder import run_build as do_build
|
|
1181
|
+
|
|
1182
|
+
out = output_dir or "./build_output"
|
|
1183
|
+
print(f"Building app: {app_idea!r}", file=sys.stderr)
|
|
1184
|
+
print(f"Output directory: {out}", file=sys.stderr)
|
|
1185
|
+
result = do_build(app_idea, out)
|
|
1186
|
+
if result.get("success"):
|
|
1187
|
+
print(f"Done. Repository at: {result['repo_path']}", file=sys.stderr)
|
|
1188
|
+
print(result["repo_path"])
|
|
1189
|
+
return 0
|
|
1190
|
+
print("Build completed with test failures.", file=sys.stderr)
|
|
1191
|
+
dr = result.get("debug_result")
|
|
1192
|
+
if dr and getattr(dr, "last_stdout", None):
|
|
1193
|
+
print(dr.last_stdout[:1500], file=sys.stderr)
|
|
1194
|
+
return 1
|
|
1195
|
+
|
|
1196
|
+
|
|
1197
|
+
def _run_replay(run_id: str, events_dir: str | None) -> int:
|
|
1198
|
+
"""Replay a swarm run by run_id; if run_id empty, list recent run IDs."""
|
|
1199
|
+
from devsper.runtime.replay_engine import replay_run, list_run_ids
|
|
1200
|
+
|
|
1201
|
+
if not run_id or not run_id.strip():
|
|
1202
|
+
try:
|
|
1203
|
+
from devsper.config import get_config
|
|
1204
|
+
|
|
1205
|
+
cfg = get_config()
|
|
1206
|
+
events_dir = events_dir or cfg.events_dir
|
|
1207
|
+
except Exception:
|
|
1208
|
+
events_dir = events_dir or ".devsper/events"
|
|
1209
|
+
ids_ = list_run_ids(events_dir)
|
|
1210
|
+
if not ids_:
|
|
1211
|
+
print("No run logs found.", file=sys.stderr)
|
|
1212
|
+
return 0
|
|
1213
|
+
print("Recent run IDs (use: devsper replay <run_id>):")
|
|
1214
|
+
for i in ids_[:20]:
|
|
1215
|
+
print(f" {i}")
|
|
1216
|
+
return 0
|
|
1217
|
+
try:
|
|
1218
|
+
from devsper.config import get_config
|
|
1219
|
+
|
|
1220
|
+
cfg = get_config()
|
|
1221
|
+
events_dir = events_dir or cfg.events_dir
|
|
1222
|
+
except Exception:
|
|
1223
|
+
events_dir = events_dir or ".devsper/events"
|
|
1224
|
+
transcript = replay_run(run_id.strip(), events_dir=events_dir)
|
|
1225
|
+
print(transcript)
|
|
1226
|
+
if "No event log found" in transcript or "Empty event log" in transcript:
|
|
1227
|
+
return 1
|
|
1228
|
+
return 0
|
|
1229
|
+
|
|
1230
|
+
|
|
1231
|
+
def _run_graph(run_id: str | None) -> int:
|
|
1232
|
+
"""Export task DAG for a run as Mermaid diagram. run_id optional (latest if omitted)."""
|
|
1233
|
+
from devsper.config import get_config
|
|
1234
|
+
from devsper.visualization.dag_export import (
|
|
1235
|
+
load_dag,
|
|
1236
|
+
export_mermaid,
|
|
1237
|
+
list_run_ids,
|
|
1238
|
+
)
|
|
1239
|
+
|
|
1240
|
+
cfg = get_config()
|
|
1241
|
+
events_dir = cfg.events_dir
|
|
1242
|
+
if run_id is None or run_id.strip() == "":
|
|
1243
|
+
run_ids = list_run_ids(events_dir)
|
|
1244
|
+
if not run_ids:
|
|
1245
|
+
print(
|
|
1246
|
+
'No runs found. Run a swarm first (e.g. devsper run "task").',
|
|
1247
|
+
file=sys.stderr,
|
|
1248
|
+
)
|
|
1249
|
+
return 1
|
|
1250
|
+
run_id = run_ids[0]
|
|
1251
|
+
nodes, edges = load_dag(events_dir, run_id.strip())
|
|
1252
|
+
if not nodes and not edges:
|
|
1253
|
+
print(f"No DAG found for run {run_id!r}.", file=sys.stderr)
|
|
1254
|
+
return 1
|
|
1255
|
+
print(export_mermaid(nodes, edges))
|
|
1256
|
+
return 0
|
|
1257
|
+
|
|
1258
|
+
|
|
1259
|
+
def _run_analytics() -> int:
|
|
1260
|
+
"""Show tool usage analytics: count, success rate, latency."""
|
|
1261
|
+
from devsper.analytics import get_default_analytics
|
|
1262
|
+
|
|
1263
|
+
stats = get_default_analytics().get_stats()
|
|
1264
|
+
if not stats:
|
|
1265
|
+
print("No tool usage recorded yet.")
|
|
1266
|
+
return 0
|
|
1267
|
+
for s in stats:
|
|
1268
|
+
print(
|
|
1269
|
+
f"{s['tool_name']}: count={s['count']} success_rate={s['success_rate']:.1f}% avg_latency_ms={s['avg_latency_ms']}"
|
|
1270
|
+
)
|
|
1271
|
+
try:
|
|
1272
|
+
from devsper.tools.scoring import get_default_score_store
|
|
1273
|
+
from devsper.tools.scoring.report import generate_tools_report
|
|
1274
|
+
|
|
1275
|
+
store = get_default_score_store()
|
|
1276
|
+
scores = store.get_all_scores()
|
|
1277
|
+
if scores:
|
|
1278
|
+
print()
|
|
1279
|
+
print(generate_tools_report(scores))
|
|
1280
|
+
except Exception:
|
|
1281
|
+
pass
|
|
1282
|
+
return 0
|
|
1283
|
+
|
|
1284
|
+
|
|
1285
|
+
def _run_tools(args: object) -> int:
|
|
1286
|
+
"""List tools with reliability scores, or reset score history."""
|
|
1287
|
+
from rich.console import Console
|
|
1288
|
+
from rich.prompt import Confirm
|
|
1289
|
+
from rich.table import Table
|
|
1290
|
+
|
|
1291
|
+
from devsper.tools.registry import list_tools
|
|
1292
|
+
from devsper.tools.selector import _tool_category
|
|
1293
|
+
from devsper.tools.scoring import get_default_score_store
|
|
1294
|
+
from devsper.tools.scoring.scorer import score_label
|
|
1295
|
+
|
|
1296
|
+
subcommand = getattr(args, "tools_subcommand", None) or "list"
|
|
1297
|
+
category_filter = getattr(args, "category", None)
|
|
1298
|
+
poor_only = getattr(args, "poor", False)
|
|
1299
|
+
reset_all = getattr(args, "reset_all", False)
|
|
1300
|
+
tool_name_reset = getattr(args, "tool_name", None)
|
|
1301
|
+
|
|
1302
|
+
if subcommand == "reset":
|
|
1303
|
+
store = get_default_score_store()
|
|
1304
|
+
if reset_all:
|
|
1305
|
+
if not Confirm.ask("Wipe all tool scores? This cannot be undone."):
|
|
1306
|
+
return 0
|
|
1307
|
+
store.reset(None)
|
|
1308
|
+
print("All tool scores wiped.")
|
|
1309
|
+
return 0
|
|
1310
|
+
if tool_name_reset:
|
|
1311
|
+
store.reset(tool_name_reset)
|
|
1312
|
+
print(f"Score history wiped for: {tool_name_reset}")
|
|
1313
|
+
return 0
|
|
1314
|
+
print(
|
|
1315
|
+
"Usage: devsper tools reset <tool_name> | devsper tools reset --all",
|
|
1316
|
+
file=sys.stderr,
|
|
1317
|
+
)
|
|
1318
|
+
return 1
|
|
1319
|
+
|
|
1320
|
+
# List: all registered tools with scores
|
|
1321
|
+
store = get_default_score_store()
|
|
1322
|
+
scores_by_name = {s.tool_name: s for s in store.get_all_scores()}
|
|
1323
|
+
all_tools = list_tools()
|
|
1324
|
+
if category_filter:
|
|
1325
|
+
allowed = {category_filter.lower().strip()}
|
|
1326
|
+
all_tools = [t for t in all_tools if _tool_category(t) in allowed]
|
|
1327
|
+
rows: list[tuple[str, str, float, str, float, float, int, str, bool]] = []
|
|
1328
|
+
for t in all_tools:
|
|
1329
|
+
s = scores_by_name.get(t.name)
|
|
1330
|
+
if s is None:
|
|
1331
|
+
score_val = 0.75
|
|
1332
|
+
label = "new"
|
|
1333
|
+
success_rate = 0.0
|
|
1334
|
+
avg_lat = 0.0
|
|
1335
|
+
calls = 0
|
|
1336
|
+
last_used = "-"
|
|
1337
|
+
is_new = True
|
|
1338
|
+
else:
|
|
1339
|
+
score_val = s.composite_score
|
|
1340
|
+
label = score_label(s.composite_score)
|
|
1341
|
+
success_rate = s.success_rate
|
|
1342
|
+
avg_lat = s.avg_latency_ms
|
|
1343
|
+
calls = s.total_calls
|
|
1344
|
+
last_used = (
|
|
1345
|
+
s.last_updated[:10] if len(s.last_updated) >= 10 else s.last_updated
|
|
1346
|
+
)
|
|
1347
|
+
is_new = s.is_new
|
|
1348
|
+
if poor_only and score_val >= 0.40:
|
|
1349
|
+
continue
|
|
1350
|
+
cat = _tool_category(t)
|
|
1351
|
+
rows.append(
|
|
1352
|
+
(
|
|
1353
|
+
t.name,
|
|
1354
|
+
cat,
|
|
1355
|
+
score_val,
|
|
1356
|
+
label,
|
|
1357
|
+
success_rate,
|
|
1358
|
+
avg_lat,
|
|
1359
|
+
calls,
|
|
1360
|
+
last_used,
|
|
1361
|
+
is_new,
|
|
1362
|
+
)
|
|
1363
|
+
)
|
|
1364
|
+
|
|
1365
|
+
rows.sort(key=lambda r: -r[2])
|
|
1366
|
+
table = Table(title="Tool reliability scores")
|
|
1367
|
+
table.add_column("Tool Name", style="bold")
|
|
1368
|
+
table.add_column("Category")
|
|
1369
|
+
table.add_column("Score", justify="right")
|
|
1370
|
+
table.add_column("Label")
|
|
1371
|
+
table.add_column("Success Rate", justify="right")
|
|
1372
|
+
table.add_column("Avg Latency", justify="right")
|
|
1373
|
+
table.add_column("Calls", justify="right")
|
|
1374
|
+
table.add_column("Last Used")
|
|
1375
|
+
for r in rows:
|
|
1376
|
+
name, cat, score_val, label, success_rate, avg_lat, calls, last_used, is_new = r
|
|
1377
|
+
if is_new and label == "new":
|
|
1378
|
+
label_style = "dim"
|
|
1379
|
+
elif label == "excellent":
|
|
1380
|
+
label_style = "green"
|
|
1381
|
+
elif label == "good":
|
|
1382
|
+
label_style = "default"
|
|
1383
|
+
elif label == "degraded":
|
|
1384
|
+
label_style = "yellow"
|
|
1385
|
+
else:
|
|
1386
|
+
label_style = "red"
|
|
1387
|
+
table.add_row(
|
|
1388
|
+
name,
|
|
1389
|
+
cat,
|
|
1390
|
+
f"{score_val:.2f}",
|
|
1391
|
+
f"[{label_style}]{label}[/]",
|
|
1392
|
+
f"{success_rate:.0%}" if not is_new else "-",
|
|
1393
|
+
f"{avg_lat:.0f} ms" if not is_new else "-",
|
|
1394
|
+
str(calls),
|
|
1395
|
+
last_used,
|
|
1396
|
+
)
|
|
1397
|
+
console = Console()
|
|
1398
|
+
if rows:
|
|
1399
|
+
console.print(table)
|
|
1400
|
+
else:
|
|
1401
|
+
console.print("No tools match the filters.")
|
|
1402
|
+
return 0
|
|
1403
|
+
|
|
1404
|
+
|
|
1405
|
+
def _run_cache(subcommand: str, threshold: float | None = None) -> int:
|
|
1406
|
+
"""Cache subcommand: stats | clear | tune."""
|
|
1407
|
+
from devsper.cache import TaskCache
|
|
1408
|
+
from pathlib import Path
|
|
1409
|
+
|
|
1410
|
+
db_path = Path(".devsper") / "task_cache.db"
|
|
1411
|
+
cache = TaskCache()
|
|
1412
|
+
if subcommand == "stats":
|
|
1413
|
+
st = cache.stats()
|
|
1414
|
+
print(f"Cached task results (exact): {st['entries']}")
|
|
1415
|
+
try:
|
|
1416
|
+
from devsper.cache.store import get_default_cache_store
|
|
1417
|
+
|
|
1418
|
+
store = get_default_cache_store(db_path)
|
|
1419
|
+
sst = store.stats()
|
|
1420
|
+
semantic_count = sst.get("semantic_entries", 0)
|
|
1421
|
+
if semantic_count > 0:
|
|
1422
|
+
try:
|
|
1423
|
+
from devsper.config import get_config
|
|
1424
|
+
|
|
1425
|
+
cfg = get_config()
|
|
1426
|
+
th = getattr(
|
|
1427
|
+
getattr(cfg, "cache", None), "similarity_threshold", 0.92
|
|
1428
|
+
)
|
|
1429
|
+
except Exception:
|
|
1430
|
+
th = 0.92
|
|
1431
|
+
print(f"Semantic cache: enabled (threshold: {th})")
|
|
1432
|
+
print(f"Cache entries: {st['entries'] + semantic_count} tasks")
|
|
1433
|
+
print("Hit rate: N/A (run with semantic cache to collect)")
|
|
1434
|
+
print("Avg similarity: N/A")
|
|
1435
|
+
print("Est. tokens saved: N/A")
|
|
1436
|
+
else:
|
|
1437
|
+
print("Semantic cache: disabled or empty")
|
|
1438
|
+
except Exception:
|
|
1439
|
+
pass
|
|
1440
|
+
return 0
|
|
1441
|
+
if subcommand == "clear":
|
|
1442
|
+
cache.clear()
|
|
1443
|
+
try:
|
|
1444
|
+
from devsper.cache.store import get_default_cache_store
|
|
1445
|
+
|
|
1446
|
+
get_default_cache_store(db_path).clear()
|
|
1447
|
+
except Exception:
|
|
1448
|
+
pass
|
|
1449
|
+
print("Cache cleared.")
|
|
1450
|
+
return 0
|
|
1451
|
+
if subcommand == "tune":
|
|
1452
|
+
try:
|
|
1453
|
+
from devsper.cache.task_cache import SemanticTaskCache
|
|
1454
|
+
from devsper.cache.embedding_index import _cosine_sim, bytes_to_embedding
|
|
1455
|
+
|
|
1456
|
+
sem = SemanticTaskCache(
|
|
1457
|
+
similarity_threshold=threshold or 0.92,
|
|
1458
|
+
max_age_hours=168.0,
|
|
1459
|
+
)
|
|
1460
|
+
entries = sem.store.list_semantic_entries()
|
|
1461
|
+
if len(entries) < 2:
|
|
1462
|
+
print("Need at least 2 semantic cache entries to tune.")
|
|
1463
|
+
return 0
|
|
1464
|
+
# Use last 50
|
|
1465
|
+
entries = entries[-50:]
|
|
1466
|
+
# Load embeddings
|
|
1467
|
+
vecs = [bytes_to_embedding(e[0]) for e in entries]
|
|
1468
|
+
ths = [0.85, 0.88, 0.90, 0.92, 0.95]
|
|
1469
|
+
print("Threshold | Entries that would match self | Avg other-match count")
|
|
1470
|
+
print("----------|-------------------------------|----------------------")
|
|
1471
|
+
for th in ths:
|
|
1472
|
+
self_ok = sum(
|
|
1473
|
+
1 for i in range(len(vecs)) if _cosine_sim(vecs[i], vecs[i]) >= th
|
|
1474
|
+
)
|
|
1475
|
+
other_count = 0
|
|
1476
|
+
for i in range(len(vecs)):
|
|
1477
|
+
for j in range(len(vecs)):
|
|
1478
|
+
if i != j and _cosine_sim(vecs[i], vecs[j]) >= th:
|
|
1479
|
+
other_count += 1
|
|
1480
|
+
avg_other = other_count / len(vecs) if vecs else 0
|
|
1481
|
+
print(
|
|
1482
|
+
f" {th:.2f} | {self_ok}/{len(vecs)} | {avg_other:.1f}"
|
|
1483
|
+
)
|
|
1484
|
+
return 0
|
|
1485
|
+
except Exception as e:
|
|
1486
|
+
print(f"Cache tune failed: {e}", file=sys.stderr)
|
|
1487
|
+
return 1
|
|
1488
|
+
print(
|
|
1489
|
+
"Usage: devsper cache stats | devsper cache clear | devsper cache tune [--threshold 0.90]",
|
|
1490
|
+
file=sys.stderr,
|
|
1491
|
+
)
|
|
1492
|
+
return 1
|
|
1493
|
+
|
|
1494
|
+
|
|
1495
|
+
def _run_memory(limit: int) -> int:
|
|
1496
|
+
"""List memory entries from the default store."""
|
|
1497
|
+
from devsper.memory.memory_store import get_default_store
|
|
1498
|
+
|
|
1499
|
+
store = get_default_store()
|
|
1500
|
+
records = store.list_memory(limit=limit)
|
|
1501
|
+
if not records:
|
|
1502
|
+
print("No memory entries.")
|
|
1503
|
+
return 0
|
|
1504
|
+
for r in records:
|
|
1505
|
+
tags = ", ".join(r.tags[:8]) if r.tags else "-"
|
|
1506
|
+
summary = (r.content or "")[:200] + (
|
|
1507
|
+
"..." if len(r.content or "") > 200 else ""
|
|
1508
|
+
)
|
|
1509
|
+
print(f"[{r.memory_type.value}] {r.id}")
|
|
1510
|
+
print(f" tags: {tags}")
|
|
1511
|
+
print(f" {summary}")
|
|
1512
|
+
print()
|
|
1513
|
+
return 0
|
|
1514
|
+
|
|
1515
|
+
|
|
1516
|
+
def _run_synthesize(
|
|
1517
|
+
query: str,
|
|
1518
|
+
no_kg: bool = False,
|
|
1519
|
+
json_out: bool = False,
|
|
1520
|
+
since: str | None = None,
|
|
1521
|
+
) -> int:
|
|
1522
|
+
"""Cross-run synthesis: answer query using all memory and optional KG."""
|
|
1523
|
+
import json
|
|
1524
|
+
from datetime import datetime, timezone
|
|
1525
|
+
from rich.console import Console
|
|
1526
|
+
from rich.panel import Panel
|
|
1527
|
+
from devsper.config import get_config
|
|
1528
|
+
from devsper.memory.memory_store import get_default_store
|
|
1529
|
+
from devsper.memory.memory_index import MemoryIndex
|
|
1530
|
+
from devsper.knowledge.knowledge_graph import KnowledgeGraph
|
|
1531
|
+
from devsper.intelligence.synthesis import CrossRunSynthesizer
|
|
1532
|
+
from devsper.utils.models import resolve_model
|
|
1533
|
+
from devsper.providers.model_router import TaskType
|
|
1534
|
+
|
|
1535
|
+
cfg = get_config()
|
|
1536
|
+
store = get_default_store()
|
|
1537
|
+
index = MemoryIndex(store=store)
|
|
1538
|
+
worker_model = resolve_model(cfg.models.worker, TaskType.ANALYSIS)
|
|
1539
|
+
kg = None if no_kg else KnowledgeGraph(store=store)
|
|
1540
|
+
if kg and not no_kg:
|
|
1541
|
+
kg.load()
|
|
1542
|
+
kg.build_from_memory(merge=True)
|
|
1543
|
+
synthesizer = CrossRunSynthesizer(
|
|
1544
|
+
memory_index=index, knowledge_graph=kg, worker_model=worker_model
|
|
1545
|
+
)
|
|
1546
|
+
since_dt = None
|
|
1547
|
+
if since:
|
|
1548
|
+
try:
|
|
1549
|
+
since_dt = datetime.fromisoformat(since.replace("Z", "+00:00"))
|
|
1550
|
+
except ValueError:
|
|
1551
|
+
pass
|
|
1552
|
+
out_chunks = []
|
|
1553
|
+
console = Console()
|
|
1554
|
+
if json_out:
|
|
1555
|
+
full = synthesizer.synthesize(
|
|
1556
|
+
query, max_sources=20, stream=False, use_kg=not no_kg, since=since_dt
|
|
1557
|
+
)
|
|
1558
|
+
answer = full if isinstance(full, str) else "".join(full)
|
|
1559
|
+
memories = index.query_across_runs(query, top_k=20, include_archived=False)
|
|
1560
|
+
if since_dt:
|
|
1561
|
+
memories = [m for m in memories if m.timestamp >= since_dt]
|
|
1562
|
+
run_ids = list(dict.fromkeys(getattr(m, "run_id", "") or "" for m in memories))
|
|
1563
|
+
run_ids = [r for r in run_ids if r]
|
|
1564
|
+
obj = {
|
|
1565
|
+
"query": query,
|
|
1566
|
+
"sources_used": len(memories),
|
|
1567
|
+
"run_ids": run_ids,
|
|
1568
|
+
"answer": answer,
|
|
1569
|
+
}
|
|
1570
|
+
print(json.dumps(obj, indent=2))
|
|
1571
|
+
return 0
|
|
1572
|
+
with console.status("Synthesizing..."):
|
|
1573
|
+
it = synthesizer.synthesize(
|
|
1574
|
+
query, max_sources=20, stream=True, use_kg=not no_kg, since=since_dt
|
|
1575
|
+
)
|
|
1576
|
+
for chunk in it:
|
|
1577
|
+
out_chunks.append(chunk)
|
|
1578
|
+
console.print(chunk, end="")
|
|
1579
|
+
console.print()
|
|
1580
|
+
memories = index.query_across_runs(query, top_k=20, include_archived=False)
|
|
1581
|
+
if since_dt:
|
|
1582
|
+
memories = [m for m in memories if m.timestamp >= since_dt]
|
|
1583
|
+
run_ids = list(dict.fromkeys(getattr(m, "run_id", "") or "" for m in memories))
|
|
1584
|
+
run_ids = [r for r in run_ids if r]
|
|
1585
|
+
console.print(
|
|
1586
|
+
Panel(
|
|
1587
|
+
f"Sources: {len(memories)} records across {len(run_ids)} runs\nRun IDs: {', '.join(run_ids[:15])}{'...' if len(run_ids) > 15 else ''}",
|
|
1588
|
+
title="Sources",
|
|
1589
|
+
)
|
|
1590
|
+
)
|
|
1591
|
+
return 0
|
|
1592
|
+
|
|
1593
|
+
|
|
1594
|
+
def _run_memory_consolidate(dry_run: bool = False, min_cluster_size: int = 3) -> int:
|
|
1595
|
+
"""Consolidate similar memory records: cluster, summarize, archive."""
|
|
1596
|
+
import asyncio
|
|
1597
|
+
from rich.console import Console
|
|
1598
|
+
from rich.progress import Progress, SpinnerColumn
|
|
1599
|
+
from devsper.config import get_config
|
|
1600
|
+
from devsper.memory.memory_store import get_default_store
|
|
1601
|
+
from devsper.memory.memory_index import MemoryIndex
|
|
1602
|
+
from devsper.memory.consolidation import MemoryConsolidator
|
|
1603
|
+
from devsper.utils.models import resolve_model
|
|
1604
|
+
from devsper.providers.model_router import TaskType
|
|
1605
|
+
|
|
1606
|
+
store = get_default_store()
|
|
1607
|
+
index = MemoryIndex(store=store)
|
|
1608
|
+
cfg = get_config()
|
|
1609
|
+
worker_model = resolve_model(cfg.models.worker, TaskType.ANALYSIS)
|
|
1610
|
+
consolidator = MemoryConsolidator(min_cluster_size=min_cluster_size)
|
|
1611
|
+
records = store.list_memory(limit=5000, include_archived=False)
|
|
1612
|
+
console = Console()
|
|
1613
|
+
console.print(f"Scanning {len(records)} memory records...")
|
|
1614
|
+
try:
|
|
1615
|
+
report = asyncio.get_event_loop().run_until_complete(
|
|
1616
|
+
consolidator.consolidate(store, index, worker_model, dry_run=dry_run)
|
|
1617
|
+
)
|
|
1618
|
+
except RuntimeError:
|
|
1619
|
+
loop = asyncio.new_event_loop()
|
|
1620
|
+
report = loop.run_until_complete(
|
|
1621
|
+
consolidator.consolidate(store, index, worker_model, dry_run=dry_run)
|
|
1622
|
+
)
|
|
1623
|
+
avg_per = (
|
|
1624
|
+
report.records_archived / report.clusters_consolidated
|
|
1625
|
+
if report.clusters_consolidated
|
|
1626
|
+
else 0
|
|
1627
|
+
)
|
|
1628
|
+
console.print(
|
|
1629
|
+
f"Found {report.clusters_found} clusters (avg {avg_per:.1f} records/cluster)"
|
|
1630
|
+
)
|
|
1631
|
+
console.print(
|
|
1632
|
+
f"Consolidating {report.clusters_consolidated} clusters with {min_cluster_size}+ records..."
|
|
1633
|
+
)
|
|
1634
|
+
with Progress(SpinnerColumn(), console=console) as progress:
|
|
1635
|
+
progress.add_task("consolidate", total=report.clusters_consolidated)
|
|
1636
|
+
console.print("Results:")
|
|
1637
|
+
console.print(f" Records archived: {report.records_archived}")
|
|
1638
|
+
console.print(f" Summaries created: {report.records_created}")
|
|
1639
|
+
console.print(f" Est. tokens saved: ~{report.tokens_saved_estimate} per run")
|
|
1640
|
+
if dry_run:
|
|
1641
|
+
console.print(
|
|
1642
|
+
"Run devsper memory consolidate without --dry-run to apply changes."
|
|
1643
|
+
)
|
|
1644
|
+
return 0
|
|
1645
|
+
|
|
1646
|
+
|
|
1647
|
+
def _run_checkpoint_dispatch(args: object) -> int:
|
|
1648
|
+
if getattr(args, "checkpoint_cmd", None) == "restore":
|
|
1649
|
+
return _run_checkpoint_restore(getattr(args, "run_id", ""))
|
|
1650
|
+
return _run_checkpoint_list(args)
|
|
1651
|
+
|
|
1652
|
+
|
|
1653
|
+
def _run_checkpoint_list(args: object) -> int:
|
|
1654
|
+
"""List all checkpoint files with run_id, task counts, timestamp."""
|
|
1655
|
+
from devsper.config import get_config
|
|
1656
|
+
from devsper.swarm.checkpointer import SchedulerCheckpointer
|
|
1657
|
+
import os
|
|
1658
|
+
|
|
1659
|
+
try:
|
|
1660
|
+
cfg = get_config()
|
|
1661
|
+
events_dir = (
|
|
1662
|
+
getattr(cfg, "events_dir", ".devsper/events") or ".devsper/events"
|
|
1663
|
+
)
|
|
1664
|
+
except Exception:
|
|
1665
|
+
events_dir = ".devsper/events"
|
|
1666
|
+
ckp = SchedulerCheckpointer(events_dir=events_dir)
|
|
1667
|
+
if not os.path.isdir(events_dir):
|
|
1668
|
+
print("No checkpoint directory found.")
|
|
1669
|
+
return 0
|
|
1670
|
+
found = []
|
|
1671
|
+
for name in os.listdir(events_dir):
|
|
1672
|
+
if name.endswith(".checkpoint.json"):
|
|
1673
|
+
run_id = name.replace(".checkpoint.json", "")
|
|
1674
|
+
path = os.path.join(events_dir, name)
|
|
1675
|
+
try:
|
|
1676
|
+
import json
|
|
1677
|
+
|
|
1678
|
+
with open(path, "r") as f:
|
|
1679
|
+
data = json.load(f)
|
|
1680
|
+
completed = data.get("completed_count", 0)
|
|
1681
|
+
failed = data.get("failed_count", 0)
|
|
1682
|
+
snapshot_at = data.get("snapshot_at", "")[:19]
|
|
1683
|
+
found.append((run_id, completed, failed, snapshot_at))
|
|
1684
|
+
except Exception:
|
|
1685
|
+
found.append((run_id, "?", "?", ""))
|
|
1686
|
+
if not found:
|
|
1687
|
+
print("No checkpoint files found.")
|
|
1688
|
+
return 0
|
|
1689
|
+
try:
|
|
1690
|
+
from rich.console import Console
|
|
1691
|
+
from rich.table import Table
|
|
1692
|
+
|
|
1693
|
+
console = Console()
|
|
1694
|
+
table = Table(title="Checkpoints")
|
|
1695
|
+
table.add_column("Run ID", style="dim")
|
|
1696
|
+
table.add_column("Completed", justify="right")
|
|
1697
|
+
table.add_column("Failed", justify="right")
|
|
1698
|
+
table.add_column("Snapshot at")
|
|
1699
|
+
for run_id, completed, failed, snapshot_at in sorted(
|
|
1700
|
+
found, key=lambda x: -len(x[0])
|
|
1701
|
+
):
|
|
1702
|
+
table.add_row(run_id[:48], str(completed), str(failed), snapshot_at)
|
|
1703
|
+
console.print(table)
|
|
1704
|
+
except ImportError:
|
|
1705
|
+
for run_id, completed, failed, snapshot_at in found:
|
|
1706
|
+
print(f"{run_id} completed={completed} failed={failed} {snapshot_at}")
|
|
1707
|
+
return 0
|
|
1708
|
+
|
|
1709
|
+
|
|
1710
|
+
def _run_checkpoint_restore(run_id: str) -> int:
|
|
1711
|
+
"""Restore a run from checkpoint and resume execution."""
|
|
1712
|
+
from devsper.config import get_config
|
|
1713
|
+
from devsper.swarm.checkpointer import SchedulerCheckpointer
|
|
1714
|
+
from devsper.types.exceptions import CheckpointNotFoundError
|
|
1715
|
+
|
|
1716
|
+
if not run_id or not run_id.strip():
|
|
1717
|
+
print(
|
|
1718
|
+
"Error: run_id required. Use: devsper checkpoint restore <run_id>",
|
|
1719
|
+
file=sys.stderr,
|
|
1720
|
+
)
|
|
1721
|
+
return 1
|
|
1722
|
+
run_id = run_id.strip()
|
|
1723
|
+
try:
|
|
1724
|
+
cfg = get_config()
|
|
1725
|
+
events_dir = (
|
|
1726
|
+
getattr(cfg, "events_dir", ".devsper/events") or ".devsper/events"
|
|
1727
|
+
)
|
|
1728
|
+
except Exception:
|
|
1729
|
+
events_dir = ".devsper/events"
|
|
1730
|
+
ckp = SchedulerCheckpointer(events_dir=events_dir)
|
|
1731
|
+
try:
|
|
1732
|
+
scheduler = ckp.restore_or_raise(run_id)
|
|
1733
|
+
except CheckpointNotFoundError as e:
|
|
1734
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
1735
|
+
return 1
|
|
1736
|
+
print(
|
|
1737
|
+
f"Restored scheduler for run_id={run_id!r}: {len(scheduler.get_all_tasks())} tasks, {scheduler.get_results()} results."
|
|
1738
|
+
)
|
|
1739
|
+
print(
|
|
1740
|
+
"Resume execution is not yet implemented (1.10). Use checkpoint list to inspect state."
|
|
1741
|
+
)
|
|
1742
|
+
return 0
|
|
1743
|
+
|
|
1744
|
+
|
|
1745
|
+
def _run_audit_dispatch(args: object) -> int:
|
|
1746
|
+
"""Audit: print table, export, or verify."""
|
|
1747
|
+
from devsper.config import get_config
|
|
1748
|
+
from devsper.audit.logger import AuditLogger
|
|
1749
|
+
|
|
1750
|
+
cmd = getattr(args, "audit_cmd", None)
|
|
1751
|
+
run_id = getattr(args, "run_id", None)
|
|
1752
|
+
export_fmt = getattr(args, "export", None)
|
|
1753
|
+
if cmd == "verify":
|
|
1754
|
+
run_id = getattr(args, "run_id", run_id)
|
|
1755
|
+
if not run_id:
|
|
1756
|
+
print("Error: run_id required for verify", file=sys.stderr)
|
|
1757
|
+
return 1
|
|
1758
|
+
cfg = get_config()
|
|
1759
|
+
ok, msg = AuditLogger.verify(run_id, cfg.data_dir)
|
|
1760
|
+
print(msg)
|
|
1761
|
+
return 0 if ok else 1
|
|
1762
|
+
if not run_id:
|
|
1763
|
+
print(
|
|
1764
|
+
"Error: run_id required (e.g. devsper audit events_2025-03-10...)",
|
|
1765
|
+
file=sys.stderr,
|
|
1766
|
+
)
|
|
1767
|
+
return 1
|
|
1768
|
+
cfg = get_config()
|
|
1769
|
+
logger = AuditLogger(cfg.data_dir, run_id=run_id)
|
|
1770
|
+
if export_fmt:
|
|
1771
|
+
out = logger.export(run_id, format=export_fmt)
|
|
1772
|
+
print(out)
|
|
1773
|
+
return 0
|
|
1774
|
+
out = logger.export(run_id, format="jsonl")
|
|
1775
|
+
if not out:
|
|
1776
|
+
print(f"No audit log for run_id={run_id}", file=sys.stderr)
|
|
1777
|
+
return 1
|
|
1778
|
+
try:
|
|
1779
|
+
from rich.console import Console
|
|
1780
|
+
from rich.table import Table
|
|
1781
|
+
|
|
1782
|
+
console = Console()
|
|
1783
|
+
table = Table(title=f"Audit log: {run_id}")
|
|
1784
|
+
table.add_column("timestamp")
|
|
1785
|
+
table.add_column("event_type")
|
|
1786
|
+
table.add_column("task_id")
|
|
1787
|
+
table.add_column("resource")
|
|
1788
|
+
table.add_column("success")
|
|
1789
|
+
for line in out.strip().split("\n"):
|
|
1790
|
+
if not line:
|
|
1791
|
+
continue
|
|
1792
|
+
import json
|
|
1793
|
+
|
|
1794
|
+
r = json.loads(line)
|
|
1795
|
+
table.add_row(
|
|
1796
|
+
r.get("timestamp", "")[:19],
|
|
1797
|
+
r.get("event_type", ""),
|
|
1798
|
+
r.get("task_id", ""),
|
|
1799
|
+
r.get("resource", ""),
|
|
1800
|
+
str(r.get("success", "")),
|
|
1801
|
+
)
|
|
1802
|
+
console.print(table)
|
|
1803
|
+
except Exception:
|
|
1804
|
+
print(out)
|
|
1805
|
+
return 0
|
|
1806
|
+
|
|
1807
|
+
|
|
1808
|
+
def _run_explain(args: object) -> int:
|
|
1809
|
+
"""Explain: decision records for run or task."""
|
|
1810
|
+
run_id = getattr(args, "run_id", None)
|
|
1811
|
+
task_id = getattr(args, "task_id", None)
|
|
1812
|
+
if not run_id:
|
|
1813
|
+
print("Error: run_id required", file=sys.stderr)
|
|
1814
|
+
return 1
|
|
1815
|
+
try:
|
|
1816
|
+
from devsper.explainability.decision_tree import DecisionTreeBuilder
|
|
1817
|
+
from devsper.config import get_config
|
|
1818
|
+
|
|
1819
|
+
cfg = get_config()
|
|
1820
|
+
events_dir = cfg.events_dir
|
|
1821
|
+
builder = DecisionTreeBuilder()
|
|
1822
|
+
records = builder.build_from_events(run_id, events_dir)
|
|
1823
|
+
if not records:
|
|
1824
|
+
print(f"No decision records for run_id={run_id}", file=sys.stderr)
|
|
1825
|
+
return 1
|
|
1826
|
+
if task_id:
|
|
1827
|
+
records = [r for r in records if r.task_id == task_id]
|
|
1828
|
+
if not records:
|
|
1829
|
+
print(f"No task {task_id} in run {run_id}", file=sys.stderr)
|
|
1830
|
+
return 1
|
|
1831
|
+
for r in records:
|
|
1832
|
+
print(f"--- {r.task_id} ---")
|
|
1833
|
+
print(f" strategy: {r.strategy_selected}")
|
|
1834
|
+
print(f" model: {r.model_selected} ({r.model_tier})")
|
|
1835
|
+
print(f" tools: {r.tools_selected}")
|
|
1836
|
+
print(f" confidence: {r.confidence:.0%}")
|
|
1837
|
+
print(
|
|
1838
|
+
f" rationale: {r.rationale[:300]}..."
|
|
1839
|
+
if len(r.rationale or "") > 300
|
|
1840
|
+
else f" rationale: {r.rationale}"
|
|
1841
|
+
)
|
|
1842
|
+
return 0
|
|
1843
|
+
except Exception as e:
|
|
1844
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
1845
|
+
return 1
|
|
1846
|
+
|
|
1847
|
+
|
|
1848
|
+
def _run_simulate(args: object) -> int:
|
|
1849
|
+
"""Simulate: dry-run planning, no LLM or tools."""
|
|
1850
|
+
import asyncio
|
|
1851
|
+
|
|
1852
|
+
task = getattr(args, "task", "")
|
|
1853
|
+
cost_only = getattr(args, "cost_only", False) or getattr(args, "cost", False)
|
|
1854
|
+
if not task:
|
|
1855
|
+
print(
|
|
1856
|
+
'Error: task required (e.g. devsper simulate "Summarize X")',
|
|
1857
|
+
file=sys.stderr,
|
|
1858
|
+
)
|
|
1859
|
+
return 1
|
|
1860
|
+
try:
|
|
1861
|
+
from devsper.explainability.simulation import SimulationMode
|
|
1862
|
+
|
|
1863
|
+
sim = SimulationMode()
|
|
1864
|
+
report = asyncio.run(sim.simulate(task))
|
|
1865
|
+
if cost_only:
|
|
1866
|
+
print(f"Estimated cost: {getattr(report, 'estimated_cost', 'N/A')}")
|
|
1867
|
+
return 0
|
|
1868
|
+
print(f"Tasks: {len(report.task_list)}")
|
|
1869
|
+
for t in report.task_list:
|
|
1870
|
+
print(f" - {t}")
|
|
1871
|
+
print(f"Estimated cost: {getattr(report, 'estimated_cost', 'N/A')}")
|
|
1872
|
+
print(f"Estimated duration: {getattr(report, 'estimated_duration', 'N/A')}")
|
|
1873
|
+
return 0
|
|
1874
|
+
except Exception as e:
|
|
1875
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
1876
|
+
return 1
|
|
1877
|
+
|
|
1878
|
+
|
|
1879
|
+
def _run_health(args: object) -> int:
|
|
1880
|
+
"""Run health checks. Exit 0 if healthy, 1 otherwise. Print ✓/✗ per check."""
|
|
1881
|
+
import asyncio
|
|
1882
|
+
from devsper.config import get_config
|
|
1883
|
+
from devsper.runtime.health import HealthChecker, HealthReport
|
|
1884
|
+
|
|
1885
|
+
try:
|
|
1886
|
+
cfg = get_config()
|
|
1887
|
+
except Exception:
|
|
1888
|
+
cfg = None
|
|
1889
|
+
if cfg is None:
|
|
1890
|
+
print("No config loaded; using defaults for health checks.")
|
|
1891
|
+
from devsper.config.schema import devsperConfigModel
|
|
1892
|
+
|
|
1893
|
+
cfg = devsperConfigModel()
|
|
1894
|
+
checker = HealthChecker()
|
|
1895
|
+
try:
|
|
1896
|
+
report = asyncio.run(checker.check(cfg))
|
|
1897
|
+
except RuntimeError:
|
|
1898
|
+
loop = asyncio.new_event_loop()
|
|
1899
|
+
report = loop.run_until_complete(checker.check(cfg))
|
|
1900
|
+
try:
|
|
1901
|
+
from rich.console import Console
|
|
1902
|
+
|
|
1903
|
+
console = Console()
|
|
1904
|
+
for name, ok in report.checks.items():
|
|
1905
|
+
if ok:
|
|
1906
|
+
console.print(f" [green]✓[/green] {name}")
|
|
1907
|
+
else:
|
|
1908
|
+
console.print(f" [red]✗[/red] {name} {report.errors.get(name, '')}")
|
|
1909
|
+
if report.healthy:
|
|
1910
|
+
console.print("[green]healthy[/green]")
|
|
1911
|
+
else:
|
|
1912
|
+
console.print("[red]unhealthy[/red]")
|
|
1913
|
+
except ImportError:
|
|
1914
|
+
for name, ok in report.checks.items():
|
|
1915
|
+
sym = "✓" if ok else "✗"
|
|
1916
|
+
print(
|
|
1917
|
+
f" {sym} {name}"
|
|
1918
|
+
+ (f" {report.errors.get(name, '')}" if not ok else "")
|
|
1919
|
+
)
|
|
1920
|
+
print("healthy" if report.healthy else "unhealthy")
|
|
1921
|
+
return 0 if report.healthy else 1
|
|
1922
|
+
|
|
1923
|
+
|
|
1924
|
+
def _run_completion(parser: argparse.ArgumentParser, args: object) -> int:
|
|
1925
|
+
"""Print shell completion script (bash, zsh, or fish)."""
|
|
1926
|
+
shell = getattr(args, "shell", "bash")
|
|
1927
|
+
try:
|
|
1928
|
+
import shtab
|
|
1929
|
+
|
|
1930
|
+
if shell == "fish":
|
|
1931
|
+
try:
|
|
1932
|
+
from devsper.cli.ui import err_console
|
|
1933
|
+
|
|
1934
|
+
err_console.print(
|
|
1935
|
+
"[hive.warning]Fish completion: use shtab for bash/zsh; fish script can be generated from parser.[/]"
|
|
1936
|
+
)
|
|
1937
|
+
except ImportError:
|
|
1938
|
+
pass
|
|
1939
|
+
sys.stderr.write(
|
|
1940
|
+
"Fish completion: add 'complete -c devsper -a \"(devsper --print-completion 2>/dev/null)\"' or use shtab for bash/zsh\n"
|
|
1941
|
+
)
|
|
1942
|
+
return 0
|
|
1943
|
+
output = shtab.complete(parser, shell=shell)
|
|
1944
|
+
try:
|
|
1945
|
+
from devsper.cli.ui import console
|
|
1946
|
+
|
|
1947
|
+
console.print(output, end="")
|
|
1948
|
+
except ImportError:
|
|
1949
|
+
print(output, end="")
|
|
1950
|
+
return 0
|
|
1951
|
+
except ImportError:
|
|
1952
|
+
try:
|
|
1953
|
+
from devsper.cli.ui import err_console
|
|
1954
|
+
|
|
1955
|
+
err_console.print("Install shtab: pip install shtab")
|
|
1956
|
+
except ImportError:
|
|
1957
|
+
print("Install shtab: pip install shtab", file=sys.stderr)
|
|
1958
|
+
return 1
|
|
1959
|
+
|
|
1960
|
+
|
|
1961
|
+
def _run_upgrade(args: object) -> int:
|
|
1962
|
+
"""Run upgrade subcommand: check, changelog, install."""
|
|
1963
|
+
from devsper.upgrade.cli import run_upgrade
|
|
1964
|
+
|
|
1965
|
+
return run_upgrade(args)
|
|
1966
|
+
|
|
1967
|
+
|
|
1968
|
+
def _run_reg_dispatch(args: object) -> int:
|
|
1969
|
+
"""Registry commands."""
|
|
1970
|
+
cmd = getattr(args, "reg_cmd", None)
|
|
1971
|
+
if not cmd:
|
|
1972
|
+
return 0
|
|
1973
|
+
from devsper.cli.commands.reg import (
|
|
1974
|
+
cmd_login,
|
|
1975
|
+
cmd_logout,
|
|
1976
|
+
cmd_whoami,
|
|
1977
|
+
cmd_publish,
|
|
1978
|
+
cmd_search,
|
|
1979
|
+
cmd_info,
|
|
1980
|
+
cmd_test,
|
|
1981
|
+
cmd_versions,
|
|
1982
|
+
cmd_yank,
|
|
1983
|
+
)
|
|
1984
|
+
|
|
1985
|
+
cmds = {
|
|
1986
|
+
"login": cmd_login,
|
|
1987
|
+
"logout": cmd_logout,
|
|
1988
|
+
"whoami": cmd_whoami,
|
|
1989
|
+
"publish": cmd_publish,
|
|
1990
|
+
"search": cmd_search,
|
|
1991
|
+
"info": cmd_info,
|
|
1992
|
+
"test": cmd_test,
|
|
1993
|
+
"versions": cmd_versions,
|
|
1994
|
+
"yank": cmd_yank,
|
|
1995
|
+
}
|
|
1996
|
+
if cmd in cmds:
|
|
1997
|
+
return cmds[cmd](args)
|
|
1998
|
+
return 0
|
|
1999
|
+
|
|
2000
|
+
|
|
2001
|
+
def _run_plugins_dispatch(args: object) -> int:
|
|
2002
|
+
cmd = getattr(args, "plugins_cmd", None)
|
|
2003
|
+
args.reg_cmd = cmd
|
|
2004
|
+
return _run_reg_dispatch(args)
|
|
2005
|
+
|
|
2006
|
+
|
|
2007
|
+
def main() -> int:
|
|
2008
|
+
if len(sys.argv) == 2 and sys.argv[1].strip() == ".":
|
|
2009
|
+
sys.argv = [sys.argv[0]]
|
|
2010
|
+
|
|
2011
|
+
# Non-blocking startup nag if update available (uses cache, ~100ms)
|
|
2012
|
+
try:
|
|
2013
|
+
from devsper.upgrade.notifier import check_and_notify
|
|
2014
|
+
|
|
2015
|
+
check_and_notify()
|
|
2016
|
+
except Exception:
|
|
2017
|
+
pass
|
|
2018
|
+
|
|
2019
|
+
parser = argparse.ArgumentParser(
|
|
2020
|
+
prog="devsper",
|
|
2021
|
+
description="Orchestrate distributed swarms of AI agents that collaboratively solve complex tasks.",
|
|
2022
|
+
epilog="""
|
|
2023
|
+
Quick start:
|
|
2024
|
+
devsper init # Set up a new project
|
|
2025
|
+
devsper run "your task here" # Run the swarm
|
|
2026
|
+
devsper tui # Launch the terminal UI
|
|
2027
|
+
|
|
2028
|
+
Examples:
|
|
2029
|
+
devsper run "Analyze diffusion models and summarize key papers"
|
|
2030
|
+
devsper build "fastapi todo app" -o ./myapp
|
|
2031
|
+
devsper credentials migrate # Import API keys from .env
|
|
2032
|
+
devsper doctor # Check your setup
|
|
2033
|
+
""",
|
|
2034
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2035
|
+
)
|
|
2036
|
+
try:
|
|
2037
|
+
import shtab
|
|
2038
|
+
|
|
2039
|
+
shtab.add_argument_to(parser, ["--print-completion"])
|
|
2040
|
+
except ImportError:
|
|
2041
|
+
pass
|
|
2042
|
+
global_grp = parser.add_argument_group("Global options")
|
|
2043
|
+
global_grp.add_argument(
|
|
2044
|
+
"--debug", action="store_true", help="Enable DEBUG log level"
|
|
2045
|
+
)
|
|
2046
|
+
global_grp.add_argument(
|
|
2047
|
+
"--trace", action="store_true", help="Enable TRACE log level (very verbose)"
|
|
2048
|
+
)
|
|
2049
|
+
global_grp.add_argument(
|
|
2050
|
+
"-q",
|
|
2051
|
+
"--quiet",
|
|
2052
|
+
action="store_true",
|
|
2053
|
+
help="WARN and above only, suppress progress",
|
|
2054
|
+
)
|
|
2055
|
+
global_grp.add_argument(
|
|
2056
|
+
"--no-color", action="store_true", help="Disable color output"
|
|
2057
|
+
)
|
|
2058
|
+
global_grp.add_argument(
|
|
2059
|
+
"--json",
|
|
2060
|
+
action="store_true",
|
|
2061
|
+
dest="json_output",
|
|
2062
|
+
help="Machine-readable JSON output",
|
|
2063
|
+
)
|
|
2064
|
+
global_grp.add_argument(
|
|
2065
|
+
"--plain", action="store_true", help="Plain text output, no Rich (for piping)"
|
|
2066
|
+
)
|
|
2067
|
+
subparsers = parser.add_subparsers(dest="command", help="Command")
|
|
2068
|
+
|
|
2069
|
+
run_parser = subparsers.add_parser(
|
|
2070
|
+
"run",
|
|
2071
|
+
help="Run the swarm on a task",
|
|
2072
|
+
description="Decompose a task into subtasks and execute them with AI workers.",
|
|
2073
|
+
epilog="""
|
|
2074
|
+
Examples:
|
|
2075
|
+
devsper run "Summarize swarm intelligence in one paragraph"
|
|
2076
|
+
devsper run "Analyze diffusion models" -q
|
|
2077
|
+
""",
|
|
2078
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2079
|
+
)
|
|
2080
|
+
run_parser.add_argument(
|
|
2081
|
+
"task",
|
|
2082
|
+
nargs="?",
|
|
2083
|
+
default="Summarize swarm intelligence in one paragraph.",
|
|
2084
|
+
help="Task prompt",
|
|
2085
|
+
)
|
|
2086
|
+
run_parser.add_argument(
|
|
2087
|
+
"-q",
|
|
2088
|
+
"--quiet",
|
|
2089
|
+
action="store_true",
|
|
2090
|
+
help="No progress output; only print results (for piping)",
|
|
2091
|
+
)
|
|
2092
|
+
run_parser.add_argument(
|
|
2093
|
+
"--summary",
|
|
2094
|
+
action="store_true",
|
|
2095
|
+
help="Only print run summary, not task results",
|
|
2096
|
+
)
|
|
2097
|
+
run_parser.set_defaults(func=lambda a: _run_swarm(a))
|
|
2098
|
+
|
|
2099
|
+
meta_parser = subparsers.add_parser(
|
|
2100
|
+
"meta",
|
|
2101
|
+
help="Run meta-planner: decompose mega-task into sub-swarms and run them",
|
|
2102
|
+
description="Decompose a mega-task into sub-swarms with dependencies and SLAs.",
|
|
2103
|
+
)
|
|
2104
|
+
meta_parser.add_argument(
|
|
2105
|
+
"mega_task",
|
|
2106
|
+
nargs="?",
|
|
2107
|
+
default="",
|
|
2108
|
+
help="Mega-task to run (e.g. 'Research and implement a todo API')",
|
|
2109
|
+
)
|
|
2110
|
+
meta_parser.add_argument(
|
|
2111
|
+
"--max-swarms", type=int, default=None, help="Max sub-swarms to run"
|
|
2112
|
+
)
|
|
2113
|
+
meta_parser.add_argument(
|
|
2114
|
+
"--budget", type=float, default=None, help="Max budget in USD"
|
|
2115
|
+
)
|
|
2116
|
+
meta_sub = meta_parser.add_subparsers(dest="meta_cmd", help="Meta subcommands")
|
|
2117
|
+
meta_plan_p = meta_sub.add_parser(
|
|
2118
|
+
"plan", help="Decompose only; print SubSwarmSpecs as table"
|
|
2119
|
+
)
|
|
2120
|
+
meta_plan_p.add_argument("mega_task", help="Mega-task to decompose")
|
|
2121
|
+
meta_plan_p.set_defaults(
|
|
2122
|
+
meta_cmd="plan", func=lambda a: _run_meta_plan(a.mega_task)
|
|
2123
|
+
)
|
|
2124
|
+
meta_parser.set_defaults(
|
|
2125
|
+
meta_cmd=None,
|
|
2126
|
+
func=lambda a: _run_meta(
|
|
2127
|
+
a.mega_task or "Summarize the state of AI in 2024",
|
|
2128
|
+
getattr(a, "max_swarms", None),
|
|
2129
|
+
getattr(a, "budget", None),
|
|
2130
|
+
),
|
|
2131
|
+
)
|
|
2132
|
+
|
|
2133
|
+
approvals_parser = subparsers.add_parser(
|
|
2134
|
+
"approvals",
|
|
2135
|
+
help="Human-in-the-loop approval requests",
|
|
2136
|
+
description="List, show, approve, or reject pending approval requests.",
|
|
2137
|
+
)
|
|
2138
|
+
approvals_sub = approvals_parser.add_subparsers(
|
|
2139
|
+
dest="approvals_cmd", help="Approval subcommands"
|
|
2140
|
+
)
|
|
2141
|
+
approvals_list_p = approvals_sub.add_parser(
|
|
2142
|
+
"list", help="Table of pending approvals"
|
|
2143
|
+
)
|
|
2144
|
+
approvals_list_p.set_defaults(func=lambda a: _run_approvals_list())
|
|
2145
|
+
approvals_show_p = approvals_sub.add_parser(
|
|
2146
|
+
"show", help="Show full approval request"
|
|
2147
|
+
)
|
|
2148
|
+
approvals_show_p.add_argument("request_id", help="Request ID")
|
|
2149
|
+
approvals_show_p.set_defaults(func=lambda a: _run_approvals_show(a.request_id))
|
|
2150
|
+
approvals_approve_p = approvals_sub.add_parser("approve", help="Approve a request")
|
|
2151
|
+
approvals_approve_p.add_argument("request_id", help="Request ID")
|
|
2152
|
+
approvals_approve_p.add_argument(
|
|
2153
|
+
"--notes", type=str, default="", help="Reviewer notes"
|
|
2154
|
+
)
|
|
2155
|
+
approvals_approve_p.set_defaults(
|
|
2156
|
+
func=lambda a: _run_approvals_approve(a.request_id, getattr(a, "notes", ""))
|
|
2157
|
+
)
|
|
2158
|
+
approvals_reject_p = approvals_sub.add_parser("reject", help="Reject a request")
|
|
2159
|
+
approvals_reject_p.add_argument("request_id", help="Request ID")
|
|
2160
|
+
approvals_reject_p.add_argument(
|
|
2161
|
+
"--notes", type=str, default="", help="Reviewer notes"
|
|
2162
|
+
)
|
|
2163
|
+
approvals_reject_p.set_defaults(
|
|
2164
|
+
func=lambda a: _run_approvals_reject(a.request_id, getattr(a, "notes", ""))
|
|
2165
|
+
)
|
|
2166
|
+
approvals_watch_p = approvals_sub.add_parser(
|
|
2167
|
+
"watch", help="Live-updating table of pending approvals (10s refresh)"
|
|
2168
|
+
)
|
|
2169
|
+
approvals_watch_p.set_defaults(func=lambda a: _run_approvals_watch())
|
|
2170
|
+
approvals_parser.set_defaults(
|
|
2171
|
+
approvals_cmd="list", func=lambda a: _run_approvals_list()
|
|
2172
|
+
)
|
|
2173
|
+
|
|
2174
|
+
tui_parser = subparsers.add_parser(
|
|
2175
|
+
"tui",
|
|
2176
|
+
help="Launch terminal UI",
|
|
2177
|
+
description="Interactive dashboard for runs, memory, and analytics.",
|
|
2178
|
+
epilog="""
|
|
2179
|
+
Examples:
|
|
2180
|
+
devsper tui
|
|
2181
|
+
""",
|
|
2182
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2183
|
+
)
|
|
2184
|
+
tui_parser.set_defaults(func=lambda a: _run_tui())
|
|
2185
|
+
|
|
2186
|
+
research_parser = subparsers.add_parser(
|
|
2187
|
+
"research",
|
|
2188
|
+
help="Run literature review on a directory",
|
|
2189
|
+
description="Run the literature review example on a directory of papers.",
|
|
2190
|
+
epilog="""
|
|
2191
|
+
Examples:
|
|
2192
|
+
devsper research .
|
|
2193
|
+
devsper research ./papers
|
|
2194
|
+
""",
|
|
2195
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2196
|
+
)
|
|
2197
|
+
research_parser.add_argument(
|
|
2198
|
+
"path", nargs="?", default=".", help="Directory with papers (PDF/DOCX)"
|
|
2199
|
+
)
|
|
2200
|
+
research_parser.set_defaults(func=lambda a: _run_research(a.path))
|
|
2201
|
+
|
|
2202
|
+
analyze_parser = subparsers.add_parser(
|
|
2203
|
+
"analyze",
|
|
2204
|
+
help="Analyze a swarm run or repository",
|
|
2205
|
+
description="With a run_id: build run report and optional LLM analysis. With a path: repository analysis.",
|
|
2206
|
+
epilog="""
|
|
2207
|
+
Examples:
|
|
2208
|
+
devsper analyze events_2025-03-09... # run analysis
|
|
2209
|
+
devsper analyze events_xxx --no-ai --json
|
|
2210
|
+
devsper analyze . # repo analysis
|
|
2211
|
+
devsper analyze /path/to/repo
|
|
2212
|
+
""",
|
|
2213
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2214
|
+
)
|
|
2215
|
+
analyze_parser.add_argument(
|
|
2216
|
+
"run_id_or_path",
|
|
2217
|
+
nargs="?",
|
|
2218
|
+
default=None,
|
|
2219
|
+
help="Run ID (e.g. events_...) for run analysis, or path (e.g. .) for repo analysis",
|
|
2220
|
+
)
|
|
2221
|
+
analyze_parser.add_argument(
|
|
2222
|
+
"--no-ai",
|
|
2223
|
+
action="store_true",
|
|
2224
|
+
help="Skip LLM analysis (run analysis only)",
|
|
2225
|
+
)
|
|
2226
|
+
analyze_parser.add_argument(
|
|
2227
|
+
"--json",
|
|
2228
|
+
action="store_true",
|
|
2229
|
+
dest="analyze_json",
|
|
2230
|
+
help="Output RunReport as JSON (run analysis only)",
|
|
2231
|
+
)
|
|
2232
|
+
analyze_parser.set_defaults(func=_run_analyze_dispatch)
|
|
2233
|
+
|
|
2234
|
+
run_analyze_parser = subparsers.add_parser(
|
|
2235
|
+
"run-analyze",
|
|
2236
|
+
help="Analyze a swarm run by run_id",
|
|
2237
|
+
description="Build run report from event log, optional LLM analysis.",
|
|
2238
|
+
epilog="""
|
|
2239
|
+
Examples:
|
|
2240
|
+
devsper run-analyze events_2025-03-09...
|
|
2241
|
+
devsper run-analyze events_2025-03-09... --no-ai
|
|
2242
|
+
devsper run-analyze events_2025-03-09... --json
|
|
2243
|
+
""",
|
|
2244
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2245
|
+
)
|
|
2246
|
+
run_analyze_parser.add_argument("run_id", help="Run ID (e.g. from devsper runs)")
|
|
2247
|
+
run_analyze_parser.add_argument(
|
|
2248
|
+
"--no-ai",
|
|
2249
|
+
action="store_true",
|
|
2250
|
+
help="Skip LLM analysis (stats only, no API call)",
|
|
2251
|
+
)
|
|
2252
|
+
run_analyze_parser.add_argument(
|
|
2253
|
+
"--json",
|
|
2254
|
+
action="store_true",
|
|
2255
|
+
dest="json_output",
|
|
2256
|
+
help="Output raw RunReport as JSON",
|
|
2257
|
+
)
|
|
2258
|
+
run_analyze_parser.set_defaults(
|
|
2259
|
+
func=lambda a: _run_analyze_run(a.run_id, a.no_ai, a.json_output)
|
|
2260
|
+
)
|
|
2261
|
+
|
|
2262
|
+
runs_parser = subparsers.add_parser(
|
|
2263
|
+
"runs",
|
|
2264
|
+
help="List run history or show run summary",
|
|
2265
|
+
description="List recent runs (or filter by --failed). With run_id: same as run-analyze <run_id> --no-ai.",
|
|
2266
|
+
epilog="""
|
|
2267
|
+
Examples:
|
|
2268
|
+
devsper runs
|
|
2269
|
+
devsper runs --limit 10 --failed
|
|
2270
|
+
devsper runs --json
|
|
2271
|
+
devsper runs events_2025-03-09...
|
|
2272
|
+
""",
|
|
2273
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2274
|
+
)
|
|
2275
|
+
runs_parser.add_argument(
|
|
2276
|
+
"run_id",
|
|
2277
|
+
nargs="?",
|
|
2278
|
+
default=None,
|
|
2279
|
+
help="If given: show report for this run (no AI, same as run-analyze <run_id> --no-ai)",
|
|
2280
|
+
)
|
|
2281
|
+
runs_parser.add_argument(
|
|
2282
|
+
"--limit",
|
|
2283
|
+
"-n",
|
|
2284
|
+
type=int,
|
|
2285
|
+
default=20,
|
|
2286
|
+
help="Max runs to list (default 20)",
|
|
2287
|
+
)
|
|
2288
|
+
runs_parser.add_argument(
|
|
2289
|
+
"--failed",
|
|
2290
|
+
action="store_true",
|
|
2291
|
+
help="Only list runs with failed_tasks > 0",
|
|
2292
|
+
)
|
|
2293
|
+
runs_parser.add_argument(
|
|
2294
|
+
"--json",
|
|
2295
|
+
action="store_true",
|
|
2296
|
+
dest="runs_json",
|
|
2297
|
+
help="Output runs list as JSON",
|
|
2298
|
+
)
|
|
2299
|
+
runs_parser.set_defaults(func=_run_runs)
|
|
2300
|
+
|
|
2301
|
+
memory_parser = subparsers.add_parser(
|
|
2302
|
+
"memory",
|
|
2303
|
+
help="List memory or consolidate",
|
|
2304
|
+
description="List stored memory entries or consolidate similar records.",
|
|
2305
|
+
epilog="""
|
|
2306
|
+
Examples:
|
|
2307
|
+
devsper memory
|
|
2308
|
+
devsper memory -n 50
|
|
2309
|
+
devsper memory consolidate [--dry-run] [--min-cluster-size 3]
|
|
2310
|
+
""",
|
|
2311
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2312
|
+
)
|
|
2313
|
+
memory_parser.add_argument(
|
|
2314
|
+
"--limit", "-n", type=int, default=20, help="Max entries to show (for list)"
|
|
2315
|
+
)
|
|
2316
|
+
memory_sub = memory_parser.add_subparsers(
|
|
2317
|
+
dest="memory_cmd", help="memory subcommand"
|
|
2318
|
+
)
|
|
2319
|
+
memory_list_p = memory_sub.add_parser("list", help="List memory entries (default)")
|
|
2320
|
+
memory_list_p.add_argument(
|
|
2321
|
+
"--limit", "-n", type=int, default=20, help="Max entries"
|
|
2322
|
+
)
|
|
2323
|
+
memory_list_p.set_defaults(func=lambda a: _run_memory(getattr(a, "limit", 20)))
|
|
2324
|
+
memory_parser.set_defaults(
|
|
2325
|
+
memory_cmd="list", func=lambda a: _run_memory(getattr(a, "limit", 20))
|
|
2326
|
+
)
|
|
2327
|
+
memory_consolidate_p = memory_sub.add_parser(
|
|
2328
|
+
"consolidate", help="Cluster and summarize similar memories"
|
|
2329
|
+
)
|
|
2330
|
+
memory_consolidate_p.add_argument(
|
|
2331
|
+
"--dry-run", action="store_true", help="Preview without writing"
|
|
2332
|
+
)
|
|
2333
|
+
memory_consolidate_p.add_argument(
|
|
2334
|
+
"--min-cluster-size",
|
|
2335
|
+
type=int,
|
|
2336
|
+
default=3,
|
|
2337
|
+
help="Min records per cluster (default 3)",
|
|
2338
|
+
)
|
|
2339
|
+
memory_consolidate_p.set_defaults(
|
|
2340
|
+
func=lambda a: _run_memory_consolidate(
|
|
2341
|
+
getattr(a, "dry_run", False), getattr(a, "min_cluster_size", 3)
|
|
2342
|
+
)
|
|
2343
|
+
)
|
|
2344
|
+
|
|
2345
|
+
synthesize_parser = subparsers.add_parser(
|
|
2346
|
+
"synthesize",
|
|
2347
|
+
help="Cross-run synthesis",
|
|
2348
|
+
description="Answer a question using all memory (and optional knowledge graph) across runs.",
|
|
2349
|
+
epilog="""
|
|
2350
|
+
Examples:
|
|
2351
|
+
devsper synthesize "What have I learned about rate limiting in APIs?"
|
|
2352
|
+
devsper synthesize "Summarize findings about transformer architectures" --no-kg
|
|
2353
|
+
devsper synthesize "What datasets have I worked with?" --json
|
|
2354
|
+
devsper synthesize "Recent findings" --since 2025-01-01
|
|
2355
|
+
""",
|
|
2356
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2357
|
+
)
|
|
2358
|
+
synthesize_parser.add_argument("query", help="Question to synthesize from memory")
|
|
2359
|
+
synthesize_parser.add_argument(
|
|
2360
|
+
"--no-kg", action="store_true", help="Skip knowledge graph, use memory only"
|
|
2361
|
+
)
|
|
2362
|
+
synthesize_parser.add_argument(
|
|
2363
|
+
"--json",
|
|
2364
|
+
action="store_true",
|
|
2365
|
+
help="Output JSON: query, sources_used, run_ids, answer",
|
|
2366
|
+
)
|
|
2367
|
+
synthesize_parser.add_argument(
|
|
2368
|
+
"--since", metavar="DATE", help="Filter memory to records after date (ISO)"
|
|
2369
|
+
)
|
|
2370
|
+
synthesize_parser.set_defaults(
|
|
2371
|
+
func=lambda a: _run_synthesize(
|
|
2372
|
+
a.query, a.no_kg, a.json, getattr(a, "since", None)
|
|
2373
|
+
)
|
|
2374
|
+
)
|
|
2375
|
+
|
|
2376
|
+
query_parser = subparsers.add_parser(
|
|
2377
|
+
"query",
|
|
2378
|
+
help="Query knowledge graph",
|
|
2379
|
+
description="Search entities and relationships in the knowledge graph built from memory.",
|
|
2380
|
+
epilog="""
|
|
2381
|
+
Examples:
|
|
2382
|
+
devsper query "diffusion models"
|
|
2383
|
+
devsper query "machine learning"
|
|
2384
|
+
""",
|
|
2385
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2386
|
+
)
|
|
2387
|
+
query_parser.add_argument(
|
|
2388
|
+
"query_text",
|
|
2389
|
+
nargs="?",
|
|
2390
|
+
default="",
|
|
2391
|
+
help="Query string (e.g. diffusion models)",
|
|
2392
|
+
)
|
|
2393
|
+
query_parser.set_defaults(func=lambda a: _run_query(a.query_text))
|
|
2394
|
+
|
|
2395
|
+
workflow_parser = subparsers.add_parser(
|
|
2396
|
+
"workflow",
|
|
2397
|
+
help="List, validate, or run workflows",
|
|
2398
|
+
description="List, validate, or run workflows from workflow.devsper.toml.",
|
|
2399
|
+
epilog="""
|
|
2400
|
+
Examples:
|
|
2401
|
+
devsper workflow list
|
|
2402
|
+
devsper workflow validate my_workflow
|
|
2403
|
+
devsper workflow run my_workflow --input text="hello"
|
|
2404
|
+
devsper workflow my_workflow
|
|
2405
|
+
""",
|
|
2406
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2407
|
+
)
|
|
2408
|
+
workflow_parser.add_argument(
|
|
2409
|
+
"first",
|
|
2410
|
+
nargs="?",
|
|
2411
|
+
help="Subcommand: list | validate | run; or workflow name to run",
|
|
2412
|
+
)
|
|
2413
|
+
workflow_parser.add_argument(
|
|
2414
|
+
"second",
|
|
2415
|
+
nargs="?",
|
|
2416
|
+
help="Workflow name (for validate/run)",
|
|
2417
|
+
)
|
|
2418
|
+
workflow_parser.add_argument(
|
|
2419
|
+
"--input",
|
|
2420
|
+
action="append",
|
|
2421
|
+
metavar="KEY=VALUE",
|
|
2422
|
+
help="Runtime input (repeat for multiple). Used with run.",
|
|
2423
|
+
)
|
|
2424
|
+
workflow_parser.set_defaults(func=_workflow_dispatch)
|
|
2425
|
+
|
|
2426
|
+
init_parser = subparsers.add_parser(
|
|
2427
|
+
"init",
|
|
2428
|
+
help="Set up a new project",
|
|
2429
|
+
description="Create devsper.toml, configure providers, and optionally store API keys securely.",
|
|
2430
|
+
epilog="""
|
|
2431
|
+
Examples:
|
|
2432
|
+
devsper init
|
|
2433
|
+
devsper init -y
|
|
2434
|
+
""",
|
|
2435
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2436
|
+
)
|
|
2437
|
+
init_parser.add_argument(
|
|
2438
|
+
"--no-interactive",
|
|
2439
|
+
"-y",
|
|
2440
|
+
action="store_true",
|
|
2441
|
+
help="Use defaults without prompting (e.g. for CI)",
|
|
2442
|
+
)
|
|
2443
|
+
init_parser.set_defaults(func=lambda a: _run_init(a.no_interactive))
|
|
2444
|
+
|
|
2445
|
+
doctor_parser = subparsers.add_parser(
|
|
2446
|
+
"doctor",
|
|
2447
|
+
help="Verify environment",
|
|
2448
|
+
description="Check API keys, config files, tool registry, and security (e.g. plaintext keys in TOML).",
|
|
2449
|
+
epilog="""
|
|
2450
|
+
Examples:
|
|
2451
|
+
devsper doctor
|
|
2452
|
+
""",
|
|
2453
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2454
|
+
)
|
|
2455
|
+
doctor_parser.set_defaults(func=lambda a: _run_doctor())
|
|
2456
|
+
|
|
2457
|
+
mcp_parser = subparsers.add_parser(
|
|
2458
|
+
"mcp",
|
|
2459
|
+
help="MCP server commands (list, test, add)",
|
|
2460
|
+
description="List configured MCP servers, test connection, or add a server interactively.",
|
|
2461
|
+
)
|
|
2462
|
+
mcp_sub = mcp_parser.add_subparsers(dest="mcp_cmd", help="Subcommand")
|
|
2463
|
+
mcp_list_p = mcp_sub.add_parser("list", help="List MCP servers and tool counts")
|
|
2464
|
+
mcp_list_p.set_defaults(func=lambda a: _run_mcp_list())
|
|
2465
|
+
mcp_test_p = mcp_sub.add_parser("test", help="Test connection to an MCP server")
|
|
2466
|
+
mcp_test_p.add_argument("server_name", help="Server name from config")
|
|
2467
|
+
mcp_test_p.set_defaults(func=lambda a: _run_mcp_test(a.server_name))
|
|
2468
|
+
mcp_add_p = mcp_sub.add_parser(
|
|
2469
|
+
"add", help="Interactively add an MCP server to devsper.toml"
|
|
2470
|
+
)
|
|
2471
|
+
mcp_add_p.set_defaults(func=lambda a: _run_mcp_add())
|
|
2472
|
+
mcp_parser.set_defaults(mcp_cmd="list", func=lambda a: _run_mcp_list())
|
|
2473
|
+
|
|
2474
|
+
a2a_parser = subparsers.add_parser(
|
|
2475
|
+
"a2a",
|
|
2476
|
+
help="A2A agent commands (serve, discover, call)",
|
|
2477
|
+
description="Run A2A server, discover external agents, or call an agent with a task.",
|
|
2478
|
+
)
|
|
2479
|
+
a2a_sub = a2a_parser.add_subparsers(dest="a2a_cmd", help="Subcommand")
|
|
2480
|
+
a2a_serve_p = a2a_sub.add_parser("serve", help="Start A2A server")
|
|
2481
|
+
a2a_serve_p.add_argument(
|
|
2482
|
+
"--port", type=int, default=None, help="Port (default: config or 8080)"
|
|
2483
|
+
)
|
|
2484
|
+
a2a_serve_p.set_defaults(func=lambda a: _run_a2a_serve(getattr(a, "port", None)))
|
|
2485
|
+
a2a_discover_p = a2a_sub.add_parser(
|
|
2486
|
+
"discover", help="Fetch AgentCard from URL, print skills"
|
|
2487
|
+
)
|
|
2488
|
+
a2a_discover_p.add_argument("url", help="Agent URL (e.g. http://localhost:8080)")
|
|
2489
|
+
a2a_discover_p.set_defaults(func=lambda a: _run_a2a_discover(a.url))
|
|
2490
|
+
a2a_call_p = a2a_sub.add_parser(
|
|
2491
|
+
"call", help="Send task to external A2A agent, stream output"
|
|
2492
|
+
)
|
|
2493
|
+
a2a_call_p.add_argument("url", help="Agent URL")
|
|
2494
|
+
a2a_call_p.add_argument("task", help="Task text to send")
|
|
2495
|
+
a2a_call_p.set_defaults(func=lambda a: _run_a2a_call(a.url, a.task))
|
|
2496
|
+
a2a_parser.set_defaults(a2a_cmd=None, func=lambda a: a2a_parser.print_help() or 0)
|
|
2497
|
+
|
|
2498
|
+
node_parser = subparsers.add_parser(
|
|
2499
|
+
"node",
|
|
2500
|
+
help="Distributed node commands (v1.10)",
|
|
2501
|
+
description="Start a node, query status, drain workers, stream events.",
|
|
2502
|
+
)
|
|
2503
|
+
node_sub = node_parser.add_subparsers(dest="node_cmd", help="Subcommand")
|
|
2504
|
+
node_start_p = node_sub.add_parser("start", help="Start a node in the foreground")
|
|
2505
|
+
node_start_p.add_argument(
|
|
2506
|
+
"--role",
|
|
2507
|
+
choices=["controller", "worker", "hybrid"],
|
|
2508
|
+
default="hybrid",
|
|
2509
|
+
help="Node role",
|
|
2510
|
+
)
|
|
2511
|
+
node_start_p.add_argument("--port", type=int, default=None, help="RPC port")
|
|
2512
|
+
node_start_p.add_argument(
|
|
2513
|
+
"--workers", type=int, default=None, help="Max workers (worker node)"
|
|
2514
|
+
)
|
|
2515
|
+
node_start_p.add_argument(
|
|
2516
|
+
"--tags", type=str, default="", help="Comma-separated tags e.g. gpu,high-mem"
|
|
2517
|
+
)
|
|
2518
|
+
node_start_p.set_defaults(func=lambda a: _run_node_start(a))
|
|
2519
|
+
node_status_p = node_sub.add_parser("status", help="Query controller status")
|
|
2520
|
+
node_status_p.add_argument(
|
|
2521
|
+
"--controller-url", type=str, default=None, help="Controller RPC URL"
|
|
2522
|
+
)
|
|
2523
|
+
node_status_p.set_defaults(func=lambda a: _run_node_status(a))
|
|
2524
|
+
node_workers_p = node_sub.add_parser("workers", help="List workers from controller")
|
|
2525
|
+
node_workers_p.add_argument("--controller-url", type=str, default=None)
|
|
2526
|
+
node_workers_p.set_defaults(func=lambda a: _run_node_workers(a))
|
|
2527
|
+
node_drain_p = node_sub.add_parser("drain", help="Drain a worker (stop new tasks)")
|
|
2528
|
+
node_drain_p.add_argument("node_id", help="Worker node ID")
|
|
2529
|
+
node_drain_p.add_argument("--controller-url", type=str, default=None)
|
|
2530
|
+
node_drain_p.set_defaults(func=lambda a: _run_node_drain(a))
|
|
2531
|
+
node_logs_p = node_sub.add_parser("logs", help="Stream events from controller")
|
|
2532
|
+
node_logs_p.add_argument(
|
|
2533
|
+
"--follow", action="store_true", help="Keep connection open"
|
|
2534
|
+
)
|
|
2535
|
+
node_logs_p.add_argument("--controller-url", type=str, default=None)
|
|
2536
|
+
node_logs_p.set_defaults(func=lambda a: _run_node_logs(a))
|
|
2537
|
+
|
|
2538
|
+
graph_parser = subparsers.add_parser(
|
|
2539
|
+
"graph",
|
|
2540
|
+
help="Export task DAG as Mermaid",
|
|
2541
|
+
description="Export the task dependency graph for a run as a Mermaid diagram.",
|
|
2542
|
+
epilog="""
|
|
2543
|
+
Examples:
|
|
2544
|
+
devsper graph
|
|
2545
|
+
devsper graph abc123-run-id
|
|
2546
|
+
""",
|
|
2547
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2548
|
+
)
|
|
2549
|
+
graph_parser.add_argument(
|
|
2550
|
+
"run_id",
|
|
2551
|
+
nargs="?",
|
|
2552
|
+
default=None,
|
|
2553
|
+
help="Run ID (default: latest)",
|
|
2554
|
+
)
|
|
2555
|
+
graph_parser.set_defaults(func=lambda a: _run_graph(a.run_id))
|
|
2556
|
+
|
|
2557
|
+
analytics_parser = subparsers.add_parser(
|
|
2558
|
+
"analytics",
|
|
2559
|
+
help="Show tool usage analytics",
|
|
2560
|
+
description="Display tool usage stats: count, success rate, and latency.",
|
|
2561
|
+
epilog="""
|
|
2562
|
+
Examples:
|
|
2563
|
+
devsper analytics
|
|
2564
|
+
""",
|
|
2565
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2566
|
+
)
|
|
2567
|
+
analytics_parser.set_defaults(func=lambda a: _run_analytics())
|
|
2568
|
+
|
|
2569
|
+
tools_parser = subparsers.add_parser(
|
|
2570
|
+
"tools",
|
|
2571
|
+
help="List tool reliability scores or reset history",
|
|
2572
|
+
description="List registered tools with reliability scores (excellent/good/degraded/poor), or reset score history.",
|
|
2573
|
+
epilog="""
|
|
2574
|
+
Examples:
|
|
2575
|
+
devsper tools
|
|
2576
|
+
devsper tools --category research
|
|
2577
|
+
devsper tools --poor
|
|
2578
|
+
devsper tools reset my_tool
|
|
2579
|
+
devsper tools reset --all
|
|
2580
|
+
""",
|
|
2581
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2582
|
+
)
|
|
2583
|
+
tools_parser.add_argument(
|
|
2584
|
+
"tools_subcommand",
|
|
2585
|
+
nargs="?",
|
|
2586
|
+
default="list",
|
|
2587
|
+
choices=["list", "reset"],
|
|
2588
|
+
help="list (default) | reset",
|
|
2589
|
+
)
|
|
2590
|
+
tools_parser.add_argument(
|
|
2591
|
+
"tool_name",
|
|
2592
|
+
nargs="?",
|
|
2593
|
+
help="Tool name (for reset)",
|
|
2594
|
+
)
|
|
2595
|
+
tools_parser.add_argument(
|
|
2596
|
+
"--category",
|
|
2597
|
+
metavar="NAME",
|
|
2598
|
+
help="Filter by category",
|
|
2599
|
+
)
|
|
2600
|
+
tools_parser.add_argument(
|
|
2601
|
+
"--poor",
|
|
2602
|
+
action="store_true",
|
|
2603
|
+
help="Show only tools with score < 0.40",
|
|
2604
|
+
)
|
|
2605
|
+
tools_parser.add_argument(
|
|
2606
|
+
"--all",
|
|
2607
|
+
dest="reset_all",
|
|
2608
|
+
action="store_true",
|
|
2609
|
+
help="Wipe all scores (with confirmation; use with reset)",
|
|
2610
|
+
)
|
|
2611
|
+
tools_parser.set_defaults(func=_run_tools)
|
|
2612
|
+
|
|
2613
|
+
cache_parser = subparsers.add_parser(
|
|
2614
|
+
"cache",
|
|
2615
|
+
help="Task result cache",
|
|
2616
|
+
description="View or clear the task result cache.",
|
|
2617
|
+
epilog="""
|
|
2618
|
+
Examples:
|
|
2619
|
+
devsper cache stats
|
|
2620
|
+
devsper cache clear
|
|
2621
|
+
devsper cache tune [--threshold 0.90]
|
|
2622
|
+
""",
|
|
2623
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2624
|
+
)
|
|
2625
|
+
cache_parser.add_argument(
|
|
2626
|
+
"subcommand",
|
|
2627
|
+
nargs="?",
|
|
2628
|
+
default="stats",
|
|
2629
|
+
choices=["stats", "clear", "tune"],
|
|
2630
|
+
help="stats | clear | tune",
|
|
2631
|
+
)
|
|
2632
|
+
cache_parser.add_argument(
|
|
2633
|
+
"--threshold",
|
|
2634
|
+
type=float,
|
|
2635
|
+
default=None,
|
|
2636
|
+
help="Similarity threshold for tune (e.g. 0.90)",
|
|
2637
|
+
)
|
|
2638
|
+
cache_parser.set_defaults(
|
|
2639
|
+
func=lambda a: _run_cache(a.subcommand, getattr(a, "threshold", None))
|
|
2640
|
+
)
|
|
2641
|
+
|
|
2642
|
+
build_parser = subparsers.add_parser(
|
|
2643
|
+
"build",
|
|
2644
|
+
help="Build an app from a description",
|
|
2645
|
+
description="Autonomous application builder: generate a working repo from an app description.",
|
|
2646
|
+
epilog="""
|
|
2647
|
+
Examples:
|
|
2648
|
+
devsper build "fastapi todo app"
|
|
2649
|
+
devsper build "CLI tool for CSV analysis" -o ./csv-tool
|
|
2650
|
+
""",
|
|
2651
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2652
|
+
)
|
|
2653
|
+
build_parser.add_argument(
|
|
2654
|
+
"app_idea",
|
|
2655
|
+
nargs="?",
|
|
2656
|
+
default="fastapi todo app",
|
|
2657
|
+
help="App description (e.g. 'fastapi todo app')",
|
|
2658
|
+
)
|
|
2659
|
+
build_parser.add_argument(
|
|
2660
|
+
"-o",
|
|
2661
|
+
"--output",
|
|
2662
|
+
default="./build_output",
|
|
2663
|
+
help="Output directory for the generated repo (default: ./build_output)",
|
|
2664
|
+
)
|
|
2665
|
+
build_parser.set_defaults(func=lambda a: _run_build(a.app_idea, a.output))
|
|
2666
|
+
|
|
2667
|
+
replay_parser = subparsers.add_parser(
|
|
2668
|
+
"replay",
|
|
2669
|
+
help="Replay a swarm run",
|
|
2670
|
+
description="Reconstruct swarm execution from the event log (deterministic replay).",
|
|
2671
|
+
epilog="""
|
|
2672
|
+
Examples:
|
|
2673
|
+
devsper replay
|
|
2674
|
+
devsper replay abc123-run-id
|
|
2675
|
+
""",
|
|
2676
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2677
|
+
)
|
|
2678
|
+
replay_parser.add_argument(
|
|
2679
|
+
"run_id",
|
|
2680
|
+
nargs="?",
|
|
2681
|
+
default="",
|
|
2682
|
+
help="Run ID (from events log filename); list recent if omitted",
|
|
2683
|
+
)
|
|
2684
|
+
replay_parser.add_argument(
|
|
2685
|
+
"--events-dir",
|
|
2686
|
+
default=None,
|
|
2687
|
+
help="Events directory (default: config)",
|
|
2688
|
+
)
|
|
2689
|
+
replay_parser.set_defaults(func=lambda a: _run_replay(a.run_id, a.events_dir))
|
|
2690
|
+
|
|
2691
|
+
credentials_parser = subparsers.add_parser(
|
|
2692
|
+
"credentials",
|
|
2693
|
+
help="Manage API keys and credentials",
|
|
2694
|
+
description="Store, list, and migrate credentials securely (OS keychain only).",
|
|
2695
|
+
epilog="""
|
|
2696
|
+
Examples:
|
|
2697
|
+
devsper credentials set openai api_key
|
|
2698
|
+
devsper credentials set azure endpoint \"https://.../openai/v1\"
|
|
2699
|
+
devsper credentials list
|
|
2700
|
+
devsper credentials migrate
|
|
2701
|
+
devsper credentials export azure # print env KEY=value for sourcing
|
|
2702
|
+
devsper credentials delete openai api_key
|
|
2703
|
+
""",
|
|
2704
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2705
|
+
)
|
|
2706
|
+
credentials_parser.add_argument(
|
|
2707
|
+
"credentials_subcommand",
|
|
2708
|
+
nargs="?",
|
|
2709
|
+
choices=["set", "list", "delete", "migrate", "export"],
|
|
2710
|
+
help="set | list | delete | migrate | export",
|
|
2711
|
+
)
|
|
2712
|
+
credentials_parser.add_argument(
|
|
2713
|
+
"provider",
|
|
2714
|
+
nargs="?",
|
|
2715
|
+
help="Provider (e.g. openai, anthropic)",
|
|
2716
|
+
)
|
|
2717
|
+
credentials_parser.add_argument(
|
|
2718
|
+
"key",
|
|
2719
|
+
nargs="?",
|
|
2720
|
+
help="Key name (e.g. api_key)",
|
|
2721
|
+
)
|
|
2722
|
+
credentials_parser.add_argument(
|
|
2723
|
+
"value",
|
|
2724
|
+
nargs="?",
|
|
2725
|
+
help="Value (for set only). Omit to be prompted, or pipe: echo 'val' | devsper credentials set azure endpoint",
|
|
2726
|
+
)
|
|
2727
|
+
credentials_parser.set_defaults(func=lambda a: _run_credentials(a))
|
|
2728
|
+
|
|
2729
|
+
reg_parser = subparsers.add_parser(
|
|
2730
|
+
"reg",
|
|
2731
|
+
help="Registry commands: login, publish, search, etc.",
|
|
2732
|
+
description="Login to registry, publish plugins, search, etc.",
|
|
2733
|
+
epilog="""
|
|
2734
|
+
Examples:
|
|
2735
|
+
devsper reg login
|
|
2736
|
+
devsper reg publish
|
|
2737
|
+
devsper reg search <query>
|
|
2738
|
+
""",
|
|
2739
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2740
|
+
)
|
|
2741
|
+
reg_sub = reg_parser.add_subparsers(dest="reg_cmd", help="Subcommand")
|
|
2742
|
+
|
|
2743
|
+
reg_login_p = reg_sub.add_parser("login", help="Login to devsper Registry")
|
|
2744
|
+
reg_login_p.set_defaults(reg_cmd="login")
|
|
2745
|
+
|
|
2746
|
+
reg_logout_p = reg_sub.add_parser("logout", help="Logout of devsper Registry")
|
|
2747
|
+
reg_logout_p.set_defaults(reg_cmd="logout")
|
|
2748
|
+
|
|
2749
|
+
reg_whoami_p = reg_sub.add_parser("whoami", help="Show current logged in user")
|
|
2750
|
+
reg_whoami_p.set_defaults(reg_cmd="whoami")
|
|
2751
|
+
|
|
2752
|
+
reg_publish_p = reg_sub.add_parser("publish", help="Publish a plugin")
|
|
2753
|
+
reg_publish_p.add_argument("--dir", default=".", help="Plugin directory")
|
|
2754
|
+
reg_publish_p.add_argument(
|
|
2755
|
+
"--skip-build", action="store_true", help="Skip building dist"
|
|
2756
|
+
)
|
|
2757
|
+
reg_publish_p.add_argument("--dry-run", action="store_true", help="Dry run")
|
|
2758
|
+
reg_publish_p.set_defaults(reg_cmd="publish")
|
|
2759
|
+
|
|
2760
|
+
reg_search_p = reg_sub.add_parser("search", help="Search for plugins")
|
|
2761
|
+
reg_search_p.add_argument("query", help="Search query")
|
|
2762
|
+
reg_search_p.add_argument("--verified", action="store_true", help="Verified only")
|
|
2763
|
+
reg_search_p.add_argument("--limit", type=int, default=10, help="Limit")
|
|
2764
|
+
reg_search_p.set_defaults(reg_cmd="search")
|
|
2765
|
+
|
|
2766
|
+
reg_info_p = reg_sub.add_parser("info", help="Get plugin info")
|
|
2767
|
+
reg_info_p.add_argument("package", help="Package name")
|
|
2768
|
+
reg_info_p.set_defaults(reg_cmd="info")
|
|
2769
|
+
|
|
2770
|
+
reg_test_p = reg_sub.add_parser("test", help="Test plugin for publishing")
|
|
2771
|
+
reg_test_p.add_argument("--dir", default=".", help="Plugin directory")
|
|
2772
|
+
reg_test_p.set_defaults(reg_cmd="test")
|
|
2773
|
+
|
|
2774
|
+
reg_versions_p = reg_sub.add_parser("versions", help="List versions")
|
|
2775
|
+
reg_versions_p.add_argument("package", help="Package name")
|
|
2776
|
+
reg_versions_p.set_defaults(reg_cmd="versions")
|
|
2777
|
+
|
|
2778
|
+
reg_yank_p = reg_sub.add_parser("yank", help="Yank a version")
|
|
2779
|
+
reg_yank_p.add_argument("package", help="Package name")
|
|
2780
|
+
reg_yank_p.add_argument("version", help="Version")
|
|
2781
|
+
reg_yank_p.add_argument("--reason", required=True, help="Reason")
|
|
2782
|
+
reg_yank_p.set_defaults(reg_cmd="yank")
|
|
2783
|
+
|
|
2784
|
+
reg_parser.set_defaults(func=_run_reg_dispatch)
|
|
2785
|
+
|
|
2786
|
+
plugins_parser = subparsers.add_parser(
|
|
2787
|
+
"plugins",
|
|
2788
|
+
help="Alias for reg commands",
|
|
2789
|
+
description="Alias for reg commands",
|
|
2790
|
+
)
|
|
2791
|
+
plugins_sub = plugins_parser.add_subparsers(dest="plugins_cmd", help="Subcommand")
|
|
2792
|
+
plugins_sub.add_parser("login", help="Alias for reg login").set_defaults(
|
|
2793
|
+
plugins_cmd="login"
|
|
2794
|
+
)
|
|
2795
|
+
p_pub = plugins_sub.add_parser("publish", help="Alias for reg publish")
|
|
2796
|
+
p_pub.add_argument("dir", nargs="?", default=".")
|
|
2797
|
+
p_pub.add_argument("--skip-build", action="store_true")
|
|
2798
|
+
p_pub.add_argument("--dry-run", action="store_true")
|
|
2799
|
+
p_pub.set_defaults(plugins_cmd="publish")
|
|
2800
|
+
plugins_parser.set_defaults(func=_run_plugins_dispatch)
|
|
2801
|
+
|
|
2802
|
+
completion_parser = subparsers.add_parser(
|
|
2803
|
+
"completion",
|
|
2804
|
+
help="Generate shell completion script",
|
|
2805
|
+
description="Print shell completion script for bash or zsh. Add to your shell config to enable tab completion.",
|
|
2806
|
+
epilog="""
|
|
2807
|
+
Examples:
|
|
2808
|
+
# Bash - add to ~/.bashrc or ~/.bash_profile
|
|
2809
|
+
eval "$(devsper completion bash)"
|
|
2810
|
+
|
|
2811
|
+
# Zsh - add to ~/.zshrc
|
|
2812
|
+
eval "$(devsper completion zsh)"
|
|
2813
|
+
|
|
2814
|
+
# Or install to a file (bash)
|
|
2815
|
+
devsper completion bash > ~/.local/share/bash-completion/completions/devsper
|
|
2816
|
+
""",
|
|
2817
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2818
|
+
)
|
|
2819
|
+
completion_parser.add_argument(
|
|
2820
|
+
"shell",
|
|
2821
|
+
choices=["bash", "zsh", "fish"],
|
|
2822
|
+
help="Shell type (bash, zsh, or fish)",
|
|
2823
|
+
)
|
|
2824
|
+
completion_parser.set_defaults(func=lambda a: _run_completion(parser, a))
|
|
2825
|
+
|
|
2826
|
+
upgrade_parser = subparsers.add_parser(
|
|
2827
|
+
"upgrade",
|
|
2828
|
+
help="Check for updates and upgrade",
|
|
2829
|
+
description="Check for updates and upgrade devsper from PyPI.",
|
|
2830
|
+
epilog="""
|
|
2831
|
+
Examples:
|
|
2832
|
+
devsper upgrade
|
|
2833
|
+
devsper upgrade --check
|
|
2834
|
+
devsper upgrade -y
|
|
2835
|
+
""",
|
|
2836
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2837
|
+
)
|
|
2838
|
+
upgrade_parser.add_argument(
|
|
2839
|
+
"--check",
|
|
2840
|
+
action="store_true",
|
|
2841
|
+
help="Only check and display if update is available",
|
|
2842
|
+
)
|
|
2843
|
+
upgrade_parser.add_argument(
|
|
2844
|
+
"-y",
|
|
2845
|
+
"--yes",
|
|
2846
|
+
action="store_true",
|
|
2847
|
+
help="Skip confirmation prompt",
|
|
2848
|
+
)
|
|
2849
|
+
upgrade_parser.add_argument(
|
|
2850
|
+
"--version",
|
|
2851
|
+
metavar="VERSION",
|
|
2852
|
+
default=None,
|
|
2853
|
+
help="Install a specific version (e.g. 1.2.3)",
|
|
2854
|
+
)
|
|
2855
|
+
upgrade_parser.add_argument(
|
|
2856
|
+
"--dry-run",
|
|
2857
|
+
action="store_true",
|
|
2858
|
+
help="Show what would happen without installing",
|
|
2859
|
+
)
|
|
2860
|
+
upgrade_parser.set_defaults(func=_run_upgrade)
|
|
2861
|
+
|
|
2862
|
+
checkpoint_parser = subparsers.add_parser(
|
|
2863
|
+
"checkpoint",
|
|
2864
|
+
help="List checkpoints or restore a run",
|
|
2865
|
+
description="List checkpoint files or restore a run from checkpoint and resume.",
|
|
2866
|
+
)
|
|
2867
|
+
checkpoint_sub = checkpoint_parser.add_subparsers(
|
|
2868
|
+
dest="checkpoint_cmd", help="Subcommand"
|
|
2869
|
+
)
|
|
2870
|
+
checkpoint_list_p = checkpoint_sub.add_parser(
|
|
2871
|
+
"list", help="List all checkpoint files"
|
|
2872
|
+
)
|
|
2873
|
+
checkpoint_list_p.set_defaults(func=_run_checkpoint_dispatch)
|
|
2874
|
+
checkpoint_restore_p = checkpoint_sub.add_parser(
|
|
2875
|
+
"restore", help="Restore run from checkpoint"
|
|
2876
|
+
)
|
|
2877
|
+
checkpoint_restore_p.add_argument("run_id", help="Run ID to restore")
|
|
2878
|
+
checkpoint_restore_p.set_defaults(func=_run_checkpoint_dispatch)
|
|
2879
|
+
checkpoint_parser.set_defaults(checkpoint_cmd="list", func=_run_checkpoint_dispatch)
|
|
2880
|
+
|
|
2881
|
+
audit_parser = subparsers.add_parser(
|
|
2882
|
+
"audit",
|
|
2883
|
+
help="View or export audit log for a run",
|
|
2884
|
+
description="Print audit log as table, export to CSV/JSONL, or verify chain integrity.",
|
|
2885
|
+
)
|
|
2886
|
+
audit_parser.add_argument(
|
|
2887
|
+
"run_id", nargs="?", default=None, help="Run ID (e.g. events_...)"
|
|
2888
|
+
)
|
|
2889
|
+
audit_parser.add_argument(
|
|
2890
|
+
"--export", choices=["jsonl", "csv", "siem"], default=None, help="Export format"
|
|
2891
|
+
)
|
|
2892
|
+
audit_sub = audit_parser.add_subparsers(dest="audit_cmd", help="Subcommand")
|
|
2893
|
+
audit_verify_p = audit_sub.add_parser(
|
|
2894
|
+
"verify", help="Verify audit log chain integrity"
|
|
2895
|
+
)
|
|
2896
|
+
audit_verify_p.add_argument("run_id", help="Run ID to verify")
|
|
2897
|
+
audit_verify_p.set_defaults(audit_cmd="verify")
|
|
2898
|
+
audit_parser.set_defaults(func=_run_audit_dispatch)
|
|
2899
|
+
|
|
2900
|
+
explain_parser = subparsers.add_parser(
|
|
2901
|
+
"explain",
|
|
2902
|
+
help="Show decision records for a run or task",
|
|
2903
|
+
description="Print decision tree and rationale for agent actions.",
|
|
2904
|
+
)
|
|
2905
|
+
explain_parser.add_argument("run_id", help="Run ID")
|
|
2906
|
+
explain_parser.add_argument(
|
|
2907
|
+
"task_id", nargs="?", default=None, help="Optional task ID for single task"
|
|
2908
|
+
)
|
|
2909
|
+
explain_parser.set_defaults(func=_run_explain)
|
|
2910
|
+
|
|
2911
|
+
simulate_parser = subparsers.add_parser(
|
|
2912
|
+
"simulate",
|
|
2913
|
+
help="Dry-run planning without LLM or tool execution",
|
|
2914
|
+
description="Run planner and scheduler only; output task list and cost estimate.",
|
|
2915
|
+
)
|
|
2916
|
+
simulate_parser.add_argument("task", help="Root task description")
|
|
2917
|
+
simulate_parser.add_argument(
|
|
2918
|
+
"--cost", action="store_true", help="Print cost estimate only"
|
|
2919
|
+
)
|
|
2920
|
+
simulate_parser.set_defaults(func=_run_simulate)
|
|
2921
|
+
|
|
2922
|
+
health_parser = subparsers.add_parser(
|
|
2923
|
+
"health",
|
|
2924
|
+
help="Health and readiness check",
|
|
2925
|
+
description="Run health checks (bus, memory, tools, KG, checkpoint dir). Exit 0 if healthy, 1 otherwise.",
|
|
2926
|
+
)
|
|
2927
|
+
health_parser.set_defaults(func=_run_health)
|
|
2928
|
+
|
|
2929
|
+
_load_project_dotenv()
|
|
2930
|
+
args = parser.parse_args()
|
|
2931
|
+
# Apply global CLI options
|
|
2932
|
+
no_color = getattr(args, "no_color", False) or os.environ.get("NO_COLOR")
|
|
2933
|
+
plain = getattr(args, "plain", False) or (not sys.stdout.isatty())
|
|
2934
|
+
try:
|
|
2935
|
+
from devsper.cli.ui import reconfigure_console, set_log_level
|
|
2936
|
+
|
|
2937
|
+
reconfigure_console(
|
|
2938
|
+
no_color=bool(no_color), force_terminal=False if plain else None
|
|
2939
|
+
)
|
|
2940
|
+
if getattr(args, "trace", False):
|
|
2941
|
+
set_log_level("trace")
|
|
2942
|
+
elif getattr(args, "debug", False):
|
|
2943
|
+
set_log_level("debug")
|
|
2944
|
+
elif getattr(args, "quiet", False):
|
|
2945
|
+
set_log_level("warn")
|
|
2946
|
+
else:
|
|
2947
|
+
set_log_level("info")
|
|
2948
|
+
except ImportError:
|
|
2949
|
+
pass
|
|
2950
|
+
if not args.command:
|
|
2951
|
+
return _run_tui()
|
|
2952
|
+
return args.func(args)
|
|
2953
|
+
|
|
2954
|
+
|
|
2955
|
+
if __name__ == "__main__":
|
|
2956
|
+
sys.exit(main())
|