devsper 2.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devsper/__init__.py +14 -0
- devsper/agents/a2a/__init__.py +27 -0
- devsper/agents/a2a/client.py +126 -0
- devsper/agents/a2a/discovery.py +24 -0
- devsper/agents/a2a/server.py +128 -0
- devsper/agents/a2a/tool_adapter.py +68 -0
- devsper/agents/a2a/types.py +49 -0
- devsper/agents/agent.py +602 -0
- devsper/agents/critic.py +80 -0
- devsper/agents/message_bus.py +124 -0
- devsper/agents/roles.py +181 -0
- devsper/agents/run_agent.py +78 -0
- devsper/analytics/__init__.py +5 -0
- devsper/analytics/tool_analytics.py +78 -0
- devsper/audit/__init__.py +5 -0
- devsper/audit/logger.py +214 -0
- devsper/bus/__init__.py +29 -0
- devsper/bus/backends/__init__.py +5 -0
- devsper/bus/backends/base.py +38 -0
- devsper/bus/backends/memory.py +55 -0
- devsper/bus/backends/redis.py +146 -0
- devsper/bus/message.py +56 -0
- devsper/bus/schema_version.py +3 -0
- devsper/bus/topics.py +19 -0
- devsper/cache/__init__.py +6 -0
- devsper/cache/embedding_index.py +98 -0
- devsper/cache/hashing.py +24 -0
- devsper/cache/store.py +153 -0
- devsper/cache/task_cache.py +191 -0
- devsper/cli/__init__.py +6 -0
- devsper/cli/commands/reg.py +733 -0
- devsper/cli/github_oauth.py +157 -0
- devsper/cli/init.py +637 -0
- devsper/cli/main.py +2956 -0
- devsper/cli/run_progress.py +103 -0
- devsper/cli/ui/__init__.py +65 -0
- devsper/cli/ui/components.py +94 -0
- devsper/cli/ui/errors.py +104 -0
- devsper/cli/ui/logging.py +120 -0
- devsper/cli/ui/onboarding.py +102 -0
- devsper/cli/ui/progress.py +43 -0
- devsper/cli/ui/run_view.py +308 -0
- devsper/cli/ui/theme.py +40 -0
- devsper/cluster/__init__.py +29 -0
- devsper/cluster/election.py +84 -0
- devsper/cluster/local.py +97 -0
- devsper/cluster/node_info.py +77 -0
- devsper/cluster/registry.py +71 -0
- devsper/cluster/router.py +117 -0
- devsper/cluster/state_backend.py +105 -0
- devsper/compliance/__init__.py +5 -0
- devsper/compliance/pii.py +147 -0
- devsper/config/__init__.py +52 -0
- devsper/config/config_loader.py +121 -0
- devsper/config/defaults.py +77 -0
- devsper/config/resolver.py +342 -0
- devsper/config/schema.py +237 -0
- devsper/credentials/__init__.py +19 -0
- devsper/credentials/cli.py +197 -0
- devsper/credentials/migration.py +124 -0
- devsper/credentials/store.py +142 -0
- devsper/dashboard/__init__.py +9 -0
- devsper/dashboard/dashboard.py +87 -0
- devsper/dev/__init__.py +25 -0
- devsper/dev/builder.py +195 -0
- devsper/dev/debugger.py +95 -0
- devsper/dev/repo_index.py +138 -0
- devsper/dev/sandbox.py +203 -0
- devsper/dev/scaffold.py +122 -0
- devsper/embeddings/__init__.py +5 -0
- devsper/embeddings/service.py +36 -0
- devsper/explainability/__init__.py +14 -0
- devsper/explainability/decision_tree.py +104 -0
- devsper/explainability/rationale.py +38 -0
- devsper/explainability/simulation.py +56 -0
- devsper/hitl/__init__.py +13 -0
- devsper/hitl/approval.py +160 -0
- devsper/hitl/escalation.py +95 -0
- devsper/intelligence/__init__.py +9 -0
- devsper/intelligence/adaptation.py +88 -0
- devsper/intelligence/analysis/__init__.py +19 -0
- devsper/intelligence/analysis/analyzer.py +71 -0
- devsper/intelligence/analysis/cost_estimator.py +66 -0
- devsper/intelligence/analysis/formatter.py +103 -0
- devsper/intelligence/analysis/run_report.py +402 -0
- devsper/intelligence/learning_engine.py +92 -0
- devsper/intelligence/strategies/__init__.py +23 -0
- devsper/intelligence/strategies/base.py +14 -0
- devsper/intelligence/strategies/code_analysis_strategy.py +33 -0
- devsper/intelligence/strategies/data_science_strategy.py +33 -0
- devsper/intelligence/strategies/document_pipeline_strategy.py +33 -0
- devsper/intelligence/strategies/experiment_strategy.py +33 -0
- devsper/intelligence/strategies/research_strategy.py +34 -0
- devsper/intelligence/strategy_selector.py +84 -0
- devsper/intelligence/synthesis.py +132 -0
- devsper/intelligence/task_optimizer.py +92 -0
- devsper/knowledge/__init__.py +5 -0
- devsper/knowledge/extractor.py +204 -0
- devsper/knowledge/knowledge_graph.py +184 -0
- devsper/knowledge/query.py +285 -0
- devsper/memory/__init__.py +35 -0
- devsper/memory/consolidation.py +138 -0
- devsper/memory/embeddings.py +60 -0
- devsper/memory/memory_index.py +97 -0
- devsper/memory/memory_router.py +62 -0
- devsper/memory/memory_store.py +221 -0
- devsper/memory/memory_types.py +54 -0
- devsper/memory/namespaces.py +45 -0
- devsper/memory/scoring.py +77 -0
- devsper/memory/summarizer.py +52 -0
- devsper/nodes/__init__.py +5 -0
- devsper/nodes/controller.py +449 -0
- devsper/nodes/rpc.py +127 -0
- devsper/nodes/single.py +161 -0
- devsper/nodes/worker.py +506 -0
- devsper/orchestration/__init__.py +19 -0
- devsper/orchestration/meta_planner.py +239 -0
- devsper/orchestration/priority_queue.py +61 -0
- devsper/plugins/__init__.py +19 -0
- devsper/plugins/marketplace/__init__.py +0 -0
- devsper/plugins/plugin_loader.py +70 -0
- devsper/plugins/plugin_registry.py +34 -0
- devsper/plugins/registry.py +83 -0
- devsper/protocols/__init__.py +6 -0
- devsper/providers/__init__.py +17 -0
- devsper/providers/anthropic.py +84 -0
- devsper/providers/base.py +75 -0
- devsper/providers/complexity_router.py +94 -0
- devsper/providers/gemini.py +36 -0
- devsper/providers/github.py +180 -0
- devsper/providers/model_router.py +40 -0
- devsper/providers/openai.py +105 -0
- devsper/providers/router/__init__.py +21 -0
- devsper/providers/router/backends/__init__.py +19 -0
- devsper/providers/router/backends/anthropic_backend.py +111 -0
- devsper/providers/router/backends/custom_backend.py +138 -0
- devsper/providers/router/backends/gemini_backend.py +89 -0
- devsper/providers/router/backends/github_backend.py +165 -0
- devsper/providers/router/backends/ollama_backend.py +104 -0
- devsper/providers/router/backends/openai_backend.py +142 -0
- devsper/providers/router/backends/vllm_backend.py +35 -0
- devsper/providers/router/base.py +60 -0
- devsper/providers/router/factory.py +92 -0
- devsper/providers/router/legacy.py +101 -0
- devsper/providers/router/router.py +135 -0
- devsper/reasoning/__init__.py +12 -0
- devsper/reasoning/graph.py +59 -0
- devsper/reasoning/nodes.py +20 -0
- devsper/reasoning/store.py +67 -0
- devsper/runtime/__init__.py +12 -0
- devsper/runtime/health.py +88 -0
- devsper/runtime/replay.py +53 -0
- devsper/runtime/replay_engine.py +142 -0
- devsper/runtime/run_history.py +204 -0
- devsper/runtime/telemetry.py +116 -0
- devsper/runtime/visualize.py +58 -0
- devsper/sandbox/__init__.py +13 -0
- devsper/sandbox/sandbox.py +161 -0
- devsper/swarm/checkpointer.py +65 -0
- devsper/swarm/executor.py +558 -0
- devsper/swarm/map_reduce.py +44 -0
- devsper/swarm/planner.py +197 -0
- devsper/swarm/prefetcher.py +91 -0
- devsper/swarm/scheduler.py +153 -0
- devsper/swarm/speculation.py +47 -0
- devsper/swarm/swarm.py +562 -0
- devsper/tools/__init__.py +33 -0
- devsper/tools/base.py +29 -0
- devsper/tools/code_intelligence/__init__.py +13 -0
- devsper/tools/code_intelligence/api_surface_extractor.py +73 -0
- devsper/tools/code_intelligence/architecture_analyzer.py +65 -0
- devsper/tools/code_intelligence/codebase_indexer.py +71 -0
- devsper/tools/code_intelligence/dependency_graph_builder.py +67 -0
- devsper/tools/code_intelligence/design_pattern_detector.py +62 -0
- devsper/tools/code_intelligence/large_function_detector.py +68 -0
- devsper/tools/code_intelligence/module_responsibility_mapper.py +56 -0
- devsper/tools/code_intelligence/parallel_codebase_analysis.py +44 -0
- devsper/tools/code_intelligence/refactor_candidate_detector.py +81 -0
- devsper/tools/code_intelligence/repository_semantic_index.py +61 -0
- devsper/tools/code_intelligence/test_coverage_estimator.py +62 -0
- devsper/tools/coding/__init__.py +12 -0
- devsper/tools/coding/analyze_code_complexity.py +48 -0
- devsper/tools/coding/dependency_analyzer.py +42 -0
- devsper/tools/coding/extract_functions.py +38 -0
- devsper/tools/coding/format_python.py +50 -0
- devsper/tools/coding/generate_docstrings.py +40 -0
- devsper/tools/coding/generate_unit_tests.py +42 -0
- devsper/tools/coding/lint_python.py +51 -0
- devsper/tools/coding/refactor_function.py +41 -0
- devsper/tools/coding/repo_structure_map.py +54 -0
- devsper/tools/coding/run_python.py +53 -0
- devsper/tools/data/__init__.py +12 -0
- devsper/tools/data/column_type_detection.py +64 -0
- devsper/tools/data/csv_summary.py +52 -0
- devsper/tools/data/dataframe_filter.py +51 -0
- devsper/tools/data/dataframe_groupby.py +47 -0
- devsper/tools/data/dataframe_stats.py +38 -0
- devsper/tools/data/dataset_sampling.py +55 -0
- devsper/tools/data/dataset_schema.py +45 -0
- devsper/tools/data/json_pretty_print.py +37 -0
- devsper/tools/data/json_query.py +46 -0
- devsper/tools/data/missing_value_report.py +47 -0
- devsper/tools/data_science/__init__.py +13 -0
- devsper/tools/data_science/correlation_heatmap.py +72 -0
- devsper/tools/data_science/dataset_bias_detector.py +49 -0
- devsper/tools/data_science/dataset_distribution_report.py +64 -0
- devsper/tools/data_science/dataset_drift_detector.py +64 -0
- devsper/tools/data_science/dataset_outlier_detector.py +65 -0
- devsper/tools/data_science/dataset_profile.py +76 -0
- devsper/tools/data_science/distributed_dataset_processor.py +54 -0
- devsper/tools/data_science/feature_engineering_suggestions.py +69 -0
- devsper/tools/data_science/feature_importance_estimator.py +82 -0
- devsper/tools/data_science/model_input_validator.py +59 -0
- devsper/tools/data_science/time_series_analyzer.py +57 -0
- devsper/tools/documents/__init__.py +11 -0
- devsper/tools/documents/_docproc.py +56 -0
- devsper/tools/documents/document_to_markdown.py +29 -0
- devsper/tools/documents/extract_document_images.py +39 -0
- devsper/tools/documents/extract_document_text.py +29 -0
- devsper/tools/documents/extract_equations.py +36 -0
- devsper/tools/documents/extract_tables.py +47 -0
- devsper/tools/documents/summarize_document.py +42 -0
- devsper/tools/documents/write_latex_document.py +133 -0
- devsper/tools/documents/write_markdown_document.py +89 -0
- devsper/tools/documents/write_word_document.py +149 -0
- devsper/tools/experiments/__init__.py +13 -0
- devsper/tools/experiments/bootstrap_estimator.py +54 -0
- devsper/tools/experiments/experiment_report_generator.py +50 -0
- devsper/tools/experiments/experiment_tracker.py +36 -0
- devsper/tools/experiments/grid_search_runner.py +50 -0
- devsper/tools/experiments/model_benchmark_runner.py +45 -0
- devsper/tools/experiments/monte_carlo_experiment.py +38 -0
- devsper/tools/experiments/parameter_sweep_runner.py +51 -0
- devsper/tools/experiments/result_comparator.py +58 -0
- devsper/tools/experiments/simulation_runner.py +43 -0
- devsper/tools/experiments/statistical_significance_test.py +56 -0
- devsper/tools/experiments/swarm_map_reduce.py +42 -0
- devsper/tools/filesystem/__init__.py +12 -0
- devsper/tools/filesystem/append_file.py +42 -0
- devsper/tools/filesystem/file_hash.py +40 -0
- devsper/tools/filesystem/file_line_count.py +36 -0
- devsper/tools/filesystem/file_metadata.py +38 -0
- devsper/tools/filesystem/file_preview.py +55 -0
- devsper/tools/filesystem/find_large_files.py +50 -0
- devsper/tools/filesystem/list_directory.py +39 -0
- devsper/tools/filesystem/read_file.py +35 -0
- devsper/tools/filesystem/search_files.py +60 -0
- devsper/tools/filesystem/write_file.py +41 -0
- devsper/tools/flagship/__init__.py +15 -0
- devsper/tools/flagship/distributed_document_analysis.py +77 -0
- devsper/tools/flagship/docproc_corpus_pipeline.py +91 -0
- devsper/tools/flagship/repository_semantic_map.py +99 -0
- devsper/tools/flagship/research_graph_builder.py +111 -0
- devsper/tools/flagship/swarm_experiment_runner.py +86 -0
- devsper/tools/knowledge/__init__.py +10 -0
- devsper/tools/knowledge/citation_graph_builder.py +69 -0
- devsper/tools/knowledge/concept_frequency_analyzer.py +74 -0
- devsper/tools/knowledge/corpus_builder.py +66 -0
- devsper/tools/knowledge/cross_document_entity_linker.py +71 -0
- devsper/tools/knowledge/document_corpus_summary.py +68 -0
- devsper/tools/knowledge/document_topic_extractor.py +58 -0
- devsper/tools/knowledge/knowledge_graph_extractor.py +58 -0
- devsper/tools/knowledge/timeline_extractor.py +59 -0
- devsper/tools/math/__init__.py +12 -0
- devsper/tools/math/calculate_expression.py +52 -0
- devsper/tools/math/correlation.py +44 -0
- devsper/tools/math/distribution_summary.py +39 -0
- devsper/tools/math/histogram.py +53 -0
- devsper/tools/math/linear_regression.py +47 -0
- devsper/tools/math/matrix_multiply.py +38 -0
- devsper/tools/math/mean_std.py +35 -0
- devsper/tools/math/monte_carlo_simulation.py +43 -0
- devsper/tools/math/polynomial_fit.py +40 -0
- devsper/tools/math/random_sample.py +36 -0
- devsper/tools/mcp/__init__.py +23 -0
- devsper/tools/mcp/adapter.py +53 -0
- devsper/tools/mcp/client.py +235 -0
- devsper/tools/mcp/discovery.py +53 -0
- devsper/tools/memory/__init__.py +16 -0
- devsper/tools/memory/delete_memory.py +25 -0
- devsper/tools/memory/list_memory.py +34 -0
- devsper/tools/memory/search_memory.py +36 -0
- devsper/tools/memory/store_memory.py +47 -0
- devsper/tools/memory/summarize_memory.py +41 -0
- devsper/tools/memory/tag_memory.py +47 -0
- devsper/tools/pipelines.py +92 -0
- devsper/tools/registry.py +39 -0
- devsper/tools/research/__init__.py +12 -0
- devsper/tools/research/arxiv_download.py +55 -0
- devsper/tools/research/arxiv_search.py +58 -0
- devsper/tools/research/citation_extractor.py +35 -0
- devsper/tools/research/duckduckgo_search.py +42 -0
- devsper/tools/research/paper_metadata_extractor.py +45 -0
- devsper/tools/research/paper_summarizer.py +41 -0
- devsper/tools/research/research_question_generator.py +39 -0
- devsper/tools/research/topic_cluster.py +46 -0
- devsper/tools/research/web_search.py +47 -0
- devsper/tools/research/wikipedia_lookup.py +50 -0
- devsper/tools/research_advanced/__init__.py +14 -0
- devsper/tools/research_advanced/citation_context_extractor.py +60 -0
- devsper/tools/research_advanced/literature_review_generator.py +79 -0
- devsper/tools/research_advanced/methodology_extractor.py +58 -0
- devsper/tools/research_advanced/paper_contribution_extractor.py +50 -0
- devsper/tools/research_advanced/paper_dataset_identifier.py +49 -0
- devsper/tools/research_advanced/paper_method_comparator.py +62 -0
- devsper/tools/research_advanced/paper_similarity_search.py +69 -0
- devsper/tools/research_advanced/paper_trend_analyzer.py +69 -0
- devsper/tools/research_advanced/parallel_document_analyzer.py +56 -0
- devsper/tools/research_advanced/research_gap_finder.py +71 -0
- devsper/tools/research_advanced/research_topic_mapper.py +69 -0
- devsper/tools/research_advanced/swarm_literature_review.py +58 -0
- devsper/tools/scoring/__init__.py +52 -0
- devsper/tools/scoring/report.py +44 -0
- devsper/tools/scoring/scorer.py +39 -0
- devsper/tools/scoring/selector.py +61 -0
- devsper/tools/scoring/store.py +267 -0
- devsper/tools/selector.py +130 -0
- devsper/tools/system/__init__.py +12 -0
- devsper/tools/system/cpu_usage.py +22 -0
- devsper/tools/system/disk_usage.py +35 -0
- devsper/tools/system/environment_variables.py +29 -0
- devsper/tools/system/memory_usage.py +23 -0
- devsper/tools/system/pip_install.py +44 -0
- devsper/tools/system/pip_search.py +29 -0
- devsper/tools/system/process_list.py +34 -0
- devsper/tools/system/python_package_list.py +40 -0
- devsper/tools/system/run_shell_command.py +51 -0
- devsper/tools/system/system_info.py +26 -0
- devsper/tools/tool_runner.py +122 -0
- devsper/tui/__init__.py +5 -0
- devsper/tui/activity_feed_view.py +73 -0
- devsper/tui/adaptive_tasks_view.py +75 -0
- devsper/tui/agent_role_view.py +35 -0
- devsper/tui/app.py +395 -0
- devsper/tui/dashboard_screen.py +290 -0
- devsper/tui/dev_view.py +99 -0
- devsper/tui/inject_screen.py +73 -0
- devsper/tui/knowledge_graph_view.py +46 -0
- devsper/tui/layout.py +43 -0
- devsper/tui/logs_view.py +83 -0
- devsper/tui/memory_view.py +58 -0
- devsper/tui/performance_view.py +33 -0
- devsper/tui/reasoning_graph_view.py +39 -0
- devsper/tui/results_view.py +139 -0
- devsper/tui/swarm_view.py +37 -0
- devsper/tui/task_detail_screen.py +55 -0
- devsper/tui/task_view.py +103 -0
- devsper/types/event.py +97 -0
- devsper/types/exceptions.py +21 -0
- devsper/types/swarm.py +41 -0
- devsper/types/task.py +80 -0
- devsper/upgrade/__init__.py +21 -0
- devsper/upgrade/changelog.py +124 -0
- devsper/upgrade/cli.py +145 -0
- devsper/upgrade/installer.py +103 -0
- devsper/upgrade/notifier.py +52 -0
- devsper/upgrade/version_check.py +121 -0
- devsper/utils/event_logger.py +88 -0
- devsper/utils/http.py +43 -0
- devsper/utils/models.py +54 -0
- devsper/visualization/__init__.py +5 -0
- devsper/visualization/dag_export.py +67 -0
- devsper/workflow/__init__.py +18 -0
- devsper/workflow/conditions.py +157 -0
- devsper/workflow/context.py +108 -0
- devsper/workflow/loader.py +156 -0
- devsper/workflow/resolver.py +109 -0
- devsper/workflow/runner.py +562 -0
- devsper/workflow/schema.py +63 -0
- devsper/workflow/validator.py +128 -0
- devsper-2.1.6.dist-info/METADATA +346 -0
- devsper-2.1.6.dist-info/RECORD +375 -0
- devsper-2.1.6.dist-info/WHEEL +4 -0
- devsper-2.1.6.dist-info/entry_points.txt +3 -0
- devsper-2.1.6.dist-info/licenses/LICENSE +639 -0
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
"""OpenAI + Azure OpenAI LLM backend (LangChain)."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
|
|
6
|
+
from devsper.providers.router.base import LLMBackend, LLMRequest, LLMResponse
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _is_azure_foundry_v1_endpoint(endpoint: str) -> bool:
|
|
10
|
+
"""True if endpoint is Azure Foundry v1 (use ChatOpenAI + base_url, not AzureChatOpenAI)."""
|
|
11
|
+
if not endpoint:
|
|
12
|
+
return False
|
|
13
|
+
e = endpoint.rstrip("/").lower()
|
|
14
|
+
return "/openai/v1" in e or "cognitiveservices.azure.com" in e
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _messages_to_lc(messages: list[dict]) -> list:
|
|
18
|
+
out = []
|
|
19
|
+
for m in messages:
|
|
20
|
+
role = (m.get("role") or "user").lower()
|
|
21
|
+
content = m.get("content") or ""
|
|
22
|
+
if role == "user":
|
|
23
|
+
out.append(HumanMessage(content=content))
|
|
24
|
+
elif role == "assistant":
|
|
25
|
+
out.append(AIMessage(content=content))
|
|
26
|
+
elif role == "system":
|
|
27
|
+
out.append(SystemMessage(content=content))
|
|
28
|
+
else:
|
|
29
|
+
out.append(HumanMessage(content=content))
|
|
30
|
+
return out
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class OpenAIBackend(LLMBackend):
|
|
34
|
+
"""OpenAI API and Azure OpenAI. Uses OPENAI_* / AZURE_OPENAI_* env or constructor args."""
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
api_key: str | None = None,
|
|
39
|
+
*,
|
|
40
|
+
azure: bool = False,
|
|
41
|
+
azure_endpoint: str | None = None,
|
|
42
|
+
azure_deployment: str | None = None,
|
|
43
|
+
api_version: str | None = None,
|
|
44
|
+
) -> None:
|
|
45
|
+
self.api_key = api_key or os.environ.get("OPENAI_API_KEY")
|
|
46
|
+
self.azure = azure or bool(
|
|
47
|
+
os.environ.get("AZURE_OPENAI_ENDPOINT") and os.environ.get("AZURE_OPENAI_API_KEY")
|
|
48
|
+
)
|
|
49
|
+
self.azure_endpoint = (azure_endpoint or os.environ.get("AZURE_OPENAI_ENDPOINT", "") or "").strip().rstrip("/")
|
|
50
|
+
self.azure_deployment = azure_deployment or os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME", "")
|
|
51
|
+
self.api_version = api_version or os.environ.get("AZURE_OPENAI_API_VERSION", "2024-05-01-preview")
|
|
52
|
+
if self.azure and not self.api_key:
|
|
53
|
+
self.api_key = os.environ.get("AZURE_OPENAI_API_KEY")
|
|
54
|
+
self._azure_foundry = _is_azure_foundry_v1_endpoint(self.azure_endpoint)
|
|
55
|
+
self._llm_cache: dict[str, object] = {}
|
|
56
|
+
|
|
57
|
+
@property
|
|
58
|
+
def name(self) -> str:
|
|
59
|
+
return "openai"
|
|
60
|
+
|
|
61
|
+
def _get_llm(self, model: str):
|
|
62
|
+
key = model or (self.azure_deployment if self.azure else "gpt-4o")
|
|
63
|
+
if key in self._llm_cache:
|
|
64
|
+
return self._llm_cache[key]
|
|
65
|
+
if self.azure:
|
|
66
|
+
dep = model or self.azure_deployment or "gpt-4o"
|
|
67
|
+
if self._azure_foundry:
|
|
68
|
+
from langchain_openai import ChatOpenAI
|
|
69
|
+
base = self.azure_endpoint if self.azure_endpoint.endswith("/") else self.azure_endpoint + "/"
|
|
70
|
+
llm = ChatOpenAI(
|
|
71
|
+
base_url=base,
|
|
72
|
+
api_key=self.api_key,
|
|
73
|
+
model=dep,
|
|
74
|
+
temperature=0,
|
|
75
|
+
)
|
|
76
|
+
else:
|
|
77
|
+
from langchain_openai import AzureChatOpenAI
|
|
78
|
+
llm = AzureChatOpenAI(
|
|
79
|
+
azure_endpoint=self.azure_endpoint,
|
|
80
|
+
azure_deployment=dep,
|
|
81
|
+
openai_api_key=self.api_key,
|
|
82
|
+
api_version=self.api_version,
|
|
83
|
+
temperature=0,
|
|
84
|
+
)
|
|
85
|
+
else:
|
|
86
|
+
from langchain_openai import ChatOpenAI
|
|
87
|
+
llm = ChatOpenAI(
|
|
88
|
+
model=model or "gpt-4o",
|
|
89
|
+
api_key=self.api_key,
|
|
90
|
+
temperature=0,
|
|
91
|
+
)
|
|
92
|
+
self._llm_cache[key] = llm
|
|
93
|
+
return llm
|
|
94
|
+
|
|
95
|
+
def supports_model(self, model_name: str) -> bool:
|
|
96
|
+
m = (model_name or "").strip().lower()
|
|
97
|
+
if self.azure:
|
|
98
|
+
return True
|
|
99
|
+
return m.startswith("gpt") or m.startswith("o1") or m.startswith("o3") or m.startswith("o4")
|
|
100
|
+
|
|
101
|
+
async def complete(self, request: LLMRequest) -> LLMResponse:
|
|
102
|
+
model = request.model or (self.azure_deployment if self.azure else "gpt-4o")
|
|
103
|
+
llm = self._get_llm(model)
|
|
104
|
+
lc_messages = _messages_to_lc(request.messages)
|
|
105
|
+
msg = await llm.ainvoke(lc_messages)
|
|
106
|
+
content = msg.content if isinstance(msg.content, str) else str(msg.content or "")
|
|
107
|
+
usage = {}
|
|
108
|
+
if hasattr(msg, "response_metadata") and msg.response_metadata:
|
|
109
|
+
meta = msg.response_metadata.get("usage", {}) or msg.response_metadata
|
|
110
|
+
usage = {
|
|
111
|
+
"prompt_tokens": meta.get("prompt_tokens", meta.get("input_tokens", 0)),
|
|
112
|
+
"completion_tokens": meta.get("completion_tokens", meta.get("output_tokens", 0)),
|
|
113
|
+
"total_tokens": meta.get("total_tokens", 0),
|
|
114
|
+
}
|
|
115
|
+
return LLMResponse(
|
|
116
|
+
content=content,
|
|
117
|
+
model=model,
|
|
118
|
+
usage=usage,
|
|
119
|
+
finish_reason="stop",
|
|
120
|
+
backend=self.name,
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
async def stream(self, request: LLMRequest):
|
|
124
|
+
model = request.model or (self.azure_deployment if self.azure else "gpt-4o")
|
|
125
|
+
llm = self._get_llm(model)
|
|
126
|
+
lc_messages = _messages_to_lc(request.messages)
|
|
127
|
+
async for chunk in llm.astream(lc_messages):
|
|
128
|
+
if hasattr(chunk, "content") and chunk.content:
|
|
129
|
+
yield chunk.content
|
|
130
|
+
|
|
131
|
+
async def health(self) -> bool:
|
|
132
|
+
try:
|
|
133
|
+
await self.complete(
|
|
134
|
+
LLMRequest(
|
|
135
|
+
model="gpt-4o-mini" if not self.azure else (self.azure_deployment or "gpt-4o"),
|
|
136
|
+
messages=[{"role": "user", "content": "Hi"}],
|
|
137
|
+
max_tokens=2,
|
|
138
|
+
)
|
|
139
|
+
)
|
|
140
|
+
return True
|
|
141
|
+
except Exception:
|
|
142
|
+
return False
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""vLLM backend (OpenAI-compatible endpoint)."""
|
|
2
|
+
|
|
3
|
+
from devsper.providers.router.base import LLMBackend, LLMRequest, LLMResponse
|
|
4
|
+
from devsper.providers.router.backends.custom_backend import CustomBackend
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class VLLMBackend(LLMBackend):
|
|
8
|
+
"""vLLM server: OpenAI-compatible API with configurable base_url."""
|
|
9
|
+
|
|
10
|
+
def __init__(self, base_url: str = "http://localhost:8000", api_key: str = "") -> None:
|
|
11
|
+
self._custom = CustomBackend(base_url=base_url, api_key=api_key or None, model_prefix_strip=None)
|
|
12
|
+
|
|
13
|
+
@property
|
|
14
|
+
def name(self) -> str:
|
|
15
|
+
return "vllm"
|
|
16
|
+
|
|
17
|
+
def supports_model(self, model_name: str) -> bool:
|
|
18
|
+
return True
|
|
19
|
+
|
|
20
|
+
async def complete(self, request: LLMRequest) -> LLMResponse:
|
|
21
|
+
resp = await self._custom.complete(request)
|
|
22
|
+
return LLMResponse(
|
|
23
|
+
content=resp.content,
|
|
24
|
+
model=resp.model,
|
|
25
|
+
usage=resp.usage,
|
|
26
|
+
finish_reason=resp.finish_reason,
|
|
27
|
+
backend=self.name,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
async def stream(self, request: LLMRequest):
|
|
31
|
+
async for chunk in self._custom.stream(request):
|
|
32
|
+
yield chunk
|
|
33
|
+
|
|
34
|
+
async def health(self) -> bool:
|
|
35
|
+
return await self._custom.health()
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Abstract LLM router types (v2.0).
|
|
3
|
+
|
|
4
|
+
LLMBackend: abstract backend interface.
|
|
5
|
+
LLMRequest / LLMResponse: unified request/response for all backends.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from abc import ABC, abstractmethod
|
|
9
|
+
from collections.abc import AsyncIterator
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class LLMRequest:
|
|
15
|
+
"""Unified request for any LLM backend."""
|
|
16
|
+
model: str
|
|
17
|
+
messages: list[dict]
|
|
18
|
+
max_tokens: int = 4096
|
|
19
|
+
temperature: float = 0.0
|
|
20
|
+
tools: list[dict] | None = None
|
|
21
|
+
stream: bool = False
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class LLMResponse:
|
|
26
|
+
"""Unified response from any LLM backend."""
|
|
27
|
+
content: str
|
|
28
|
+
model: str
|
|
29
|
+
usage: dict # prompt_tokens, completion_tokens, total_tokens
|
|
30
|
+
finish_reason: str
|
|
31
|
+
backend: str
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class LLMBackend(ABC):
|
|
35
|
+
"""Abstract backend for LLM completion. All providers implement this."""
|
|
36
|
+
|
|
37
|
+
@property
|
|
38
|
+
@abstractmethod
|
|
39
|
+
def name(self) -> str:
|
|
40
|
+
"""Backend identifier (e.g. 'openai', 'ollama')."""
|
|
41
|
+
...
|
|
42
|
+
|
|
43
|
+
@abstractmethod
|
|
44
|
+
async def complete(self, request: LLMRequest) -> LLMResponse:
|
|
45
|
+
"""Non-streaming completion."""
|
|
46
|
+
...
|
|
47
|
+
|
|
48
|
+
@abstractmethod
|
|
49
|
+
async def stream(self, request: LLMRequest) -> AsyncIterator[str]:
|
|
50
|
+
"""Streaming completion; yield content chunks."""
|
|
51
|
+
...
|
|
52
|
+
|
|
53
|
+
@abstractmethod
|
|
54
|
+
async def health(self) -> bool:
|
|
55
|
+
"""Return True if backend is reachable and usable."""
|
|
56
|
+
...
|
|
57
|
+
|
|
58
|
+
def supports_model(self, model_name: str) -> bool:
|
|
59
|
+
"""Return True if this backend can serve the given model (bare name, no prefix)."""
|
|
60
|
+
return False
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
"""Build LLMRouter from config and register backends."""
|
|
2
|
+
|
|
3
|
+
from devsper.config import get_config
|
|
4
|
+
from devsper.providers.router.router import LLMRouter
|
|
5
|
+
from devsper.providers.router.backends.openai_backend import OpenAIBackend
|
|
6
|
+
from devsper.providers.router.backends.anthropic_backend import AnthropicBackend
|
|
7
|
+
from devsper.providers.router.backends.gemini_backend import GeminiBackend
|
|
8
|
+
from devsper.providers.router.backends.github_backend import GitHubBackend
|
|
9
|
+
from devsper.providers.router.backends.ollama_backend import OllamaBackend
|
|
10
|
+
from devsper.providers.router.backends.vllm_backend import VLLMBackend
|
|
11
|
+
from devsper.providers.router.backends.custom_backend import CustomBackend
|
|
12
|
+
|
|
13
|
+
_router_instance: LLMRouter | None = None
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _emit_fallback(payload: dict) -> None:
|
|
17
|
+
try:
|
|
18
|
+
from devsper.types.event import Event, events
|
|
19
|
+
from devsper.utils.event_logger import EventLog
|
|
20
|
+
from datetime import datetime, timezone
|
|
21
|
+
from devsper.config import get_config
|
|
22
|
+
log = EventLog(events_folder_path=get_config().events_dir)
|
|
23
|
+
log.append_event(
|
|
24
|
+
Event(timestamp=datetime.now(timezone.utc), type=events.PROVIDER_FALLBACK, payload=payload)
|
|
25
|
+
)
|
|
26
|
+
except Exception:
|
|
27
|
+
pass
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def get_llm_router() -> LLMRouter | None:
|
|
31
|
+
"""Build and return the v2 LLMRouter from config. Cached. Returns None if not configured."""
|
|
32
|
+
global _router_instance
|
|
33
|
+
if _router_instance is not None:
|
|
34
|
+
return _router_instance
|
|
35
|
+
try:
|
|
36
|
+
cfg = get_config()
|
|
37
|
+
pc = cfg.providers
|
|
38
|
+
except Exception:
|
|
39
|
+
return None
|
|
40
|
+
fallback_order = getattr(pc, "fallback_order", None) or []
|
|
41
|
+
router = LLMRouter(
|
|
42
|
+
fallback_order=fallback_order,
|
|
43
|
+
max_fallbacks=2,
|
|
44
|
+
on_fallback=_emit_fallback,
|
|
45
|
+
)
|
|
46
|
+
# Register standard backends when env/config allows
|
|
47
|
+
import os
|
|
48
|
+
if os.environ.get("OPENAI_API_KEY") or (os.environ.get("AZURE_OPENAI_ENDPOINT") and os.environ.get("AZURE_OPENAI_API_KEY")):
|
|
49
|
+
try:
|
|
50
|
+
router.register(OpenAIBackend())
|
|
51
|
+
except Exception:
|
|
52
|
+
pass
|
|
53
|
+
if os.environ.get("ANTHROPIC_API_KEY") or os.environ.get("AZURE_ANTHROPIC_API_KEY") or os.environ.get("AZURE_ANTHROPIC_ENDPOINT"):
|
|
54
|
+
try:
|
|
55
|
+
router.register(AnthropicBackend())
|
|
56
|
+
except Exception:
|
|
57
|
+
pass
|
|
58
|
+
if os.environ.get("GOOGLE_API_KEY") or os.environ.get("GEMINI_API_KEY"):
|
|
59
|
+
try:
|
|
60
|
+
router.register(GeminiBackend())
|
|
61
|
+
except Exception:
|
|
62
|
+
pass
|
|
63
|
+
if os.environ.get("GITHUB_TOKEN"):
|
|
64
|
+
try:
|
|
65
|
+
router.register(GitHubBackend())
|
|
66
|
+
except Exception:
|
|
67
|
+
pass
|
|
68
|
+
if getattr(pc, "ollama", None) and getattr(pc.ollama, "enabled", False):
|
|
69
|
+
try:
|
|
70
|
+
router.register(OllamaBackend(base_url=pc.ollama.base_url or "http://localhost:11434"))
|
|
71
|
+
except Exception:
|
|
72
|
+
pass
|
|
73
|
+
if getattr(pc, "vllm", None) and getattr(pc.vllm, "enabled", False):
|
|
74
|
+
try:
|
|
75
|
+
router.register(VLLMBackend(base_url=pc.vllm.base_url or "http://localhost:8000", api_key=pc.vllm.api_key or ""))
|
|
76
|
+
except Exception:
|
|
77
|
+
pass
|
|
78
|
+
if getattr(pc, "custom", None) and getattr(pc.custom, "enabled", False) and getattr(pc.custom, "base_url", ""):
|
|
79
|
+
try:
|
|
80
|
+
router.register(
|
|
81
|
+
CustomBackend(
|
|
82
|
+
base_url=pc.custom.base_url,
|
|
83
|
+
api_key=pc.custom.api_key or None,
|
|
84
|
+
model_prefix_strip=pc.custom.model_prefix_strip or None,
|
|
85
|
+
)
|
|
86
|
+
)
|
|
87
|
+
except Exception:
|
|
88
|
+
pass
|
|
89
|
+
if not router._backends:
|
|
90
|
+
return None
|
|
91
|
+
_router_instance = router
|
|
92
|
+
return _router_instance
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Legacy provider router: map model name → provider instance.
|
|
3
|
+
|
|
4
|
+
Agent and planner call generate(model, prompt) in utils.models;
|
|
5
|
+
utils.models uses the router to get the right provider (or v2 LLMRouter when available).
|
|
6
|
+
|
|
7
|
+
When AZURE_OPENAI_ENDPOINT is set, GPT models use Azure OpenAI (model name = deployment name).
|
|
8
|
+
When AZURE_ANTHROPIC_ENDPOINT (or AZURE_ANTHROPIC_API_KEY) is set, Claude models use Azure Foundry.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
|
|
13
|
+
from dotenv import load_dotenv
|
|
14
|
+
|
|
15
|
+
from devsper.providers.base import BaseProvider, MockProvider
|
|
16
|
+
from devsper.providers.openai import OpenAIProvider
|
|
17
|
+
from devsper.providers.anthropic import AnthropicProvider
|
|
18
|
+
from devsper.providers.gemini import GeminiProvider
|
|
19
|
+
from devsper.providers.github import GitHubProvider
|
|
20
|
+
|
|
21
|
+
load_dotenv()
|
|
22
|
+
|
|
23
|
+
# Provider prefix in model spec (provider:model)
|
|
24
|
+
PROVIDERS = {
|
|
25
|
+
"openai": OpenAIProvider,
|
|
26
|
+
"anthropic": AnthropicProvider,
|
|
27
|
+
"azure": OpenAIProvider, # Azure OpenAI uses OpenAIProvider with azure=True
|
|
28
|
+
"gemini": GeminiProvider,
|
|
29
|
+
"github": GitHubProvider,
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _parse_model_spec(model: str) -> tuple[str, str]:
|
|
34
|
+
"""Return (vendor, model_name). If 'provider:model' format, vendor is provider; else infer from name."""
|
|
35
|
+
m = (model or "").strip()
|
|
36
|
+
if ":" in m:
|
|
37
|
+
vendor, name = m.split(":", 1)
|
|
38
|
+
return vendor.strip().lower(), name.strip()
|
|
39
|
+
return _model_to_vendor(m), m
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _model_to_vendor(model: str) -> str:
|
|
43
|
+
"""Return 'openai' | 'anthropic' | 'gemini' | 'github' | 'mock' from model name (no prefix)."""
|
|
44
|
+
m = (model or "").strip().lower()
|
|
45
|
+
if m in ("mock", "default", ""):
|
|
46
|
+
return "mock"
|
|
47
|
+
if m.startswith("gpt") or m.startswith("o1") or m.startswith("o3") or m.startswith("o4"):
|
|
48
|
+
return "openai"
|
|
49
|
+
if m.startswith("claude"):
|
|
50
|
+
return "anthropic"
|
|
51
|
+
if m.startswith("gemini"):
|
|
52
|
+
return "gemini"
|
|
53
|
+
return "mock"
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _use_azure_openai() -> bool:
|
|
57
|
+
"""True when Azure OpenAI env vars are set (endpoint + key)."""
|
|
58
|
+
return bool(os.environ.get("AZURE_OPENAI_ENDPOINT") and os.environ.get("AZURE_OPENAI_API_KEY"))
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class ProviderRouter:
|
|
62
|
+
"""Maps model name (or provider:model) to provider. Caches one instance per vendor."""
|
|
63
|
+
|
|
64
|
+
def __init__(self) -> None:
|
|
65
|
+
self._openai: OpenAIProvider | None = None
|
|
66
|
+
self._anthropic: AnthropicProvider | None = None
|
|
67
|
+
self._gemini: GeminiProvider | None = None
|
|
68
|
+
self._github: GitHubProvider | None = None
|
|
69
|
+
self._mock = MockProvider()
|
|
70
|
+
|
|
71
|
+
def get_provider(self, model_name: str) -> BaseProvider:
|
|
72
|
+
"""Return the provider that should handle this model name (supports provider:model)."""
|
|
73
|
+
vendor, _ = _parse_model_spec(model_name)
|
|
74
|
+
if vendor == "openai" or vendor == "azure":
|
|
75
|
+
if self._openai is None:
|
|
76
|
+
self._openai = OpenAIProvider(azure=_use_azure_openai())
|
|
77
|
+
return self._openai
|
|
78
|
+
if vendor == "anthropic":
|
|
79
|
+
if self._anthropic is None:
|
|
80
|
+
self._anthropic = AnthropicProvider()
|
|
81
|
+
return self._anthropic
|
|
82
|
+
if vendor == "gemini":
|
|
83
|
+
if self._gemini is None:
|
|
84
|
+
self._gemini = GeminiProvider()
|
|
85
|
+
return self._gemini
|
|
86
|
+
if vendor == "github":
|
|
87
|
+
if self._github is None:
|
|
88
|
+
self._github = GitHubProvider()
|
|
89
|
+
return self._github
|
|
90
|
+
return self._mock
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
_router: ProviderRouter | None = None
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def get_router() -> ProviderRouter:
|
|
97
|
+
"""Return the global router (singleton)."""
|
|
98
|
+
global _router
|
|
99
|
+
if _router is None:
|
|
100
|
+
_router = ProviderRouter()
|
|
101
|
+
return _router
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLMRouter: routes requests by model string (provider:model or bare name), with optional fallback.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from collections.abc import AsyncIterator, Callable
|
|
6
|
+
|
|
7
|
+
from devsper.providers.router.base import LLMBackend, LLMRequest, LLMResponse
|
|
8
|
+
|
|
9
|
+
# Event type for fallback (caller can emit to event bus)
|
|
10
|
+
PROVIDER_FALLBACK_EVENT = "provider_fallback"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _parse_model_spec(model: str) -> tuple[str | None, str]:
|
|
14
|
+
"""Return (prefix, model_name). prefix is None if no 'provider:' prefix."""
|
|
15
|
+
m = (model or "").strip()
|
|
16
|
+
if ":" in m:
|
|
17
|
+
prefix, name = m.split(":", 1)
|
|
18
|
+
return prefix.strip().lower(), name.strip()
|
|
19
|
+
return None, m
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class LLMRouter:
|
|
23
|
+
"""
|
|
24
|
+
Holds registered LLMBackend instances. Routes by model string:
|
|
25
|
+
- "ollama:llama3" -> OllamaBackend, model="llama3"
|
|
26
|
+
- "vllm:mistral-7b" -> VLLMBackend, model="mistral-7b"
|
|
27
|
+
- "openai:gpt-4o" -> OpenAIBackend
|
|
28
|
+
- bare "gpt-4o" -> first backend where supports_model("gpt-4o") is True
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
fallback_order: list[str] | None = None,
|
|
34
|
+
max_fallbacks: int = 2,
|
|
35
|
+
on_fallback: Callable[[dict], None] | None = None,
|
|
36
|
+
) -> None:
|
|
37
|
+
self._backends: list[LLMBackend] = []
|
|
38
|
+
self._by_name: dict[str, LLMBackend] = {}
|
|
39
|
+
self.fallback_order = fallback_order or []
|
|
40
|
+
self.max_fallbacks = max(0, max_fallbacks)
|
|
41
|
+
self.on_fallback = on_fallback
|
|
42
|
+
|
|
43
|
+
def register(self, backend: LLMBackend) -> None:
|
|
44
|
+
"""Register a backend. Name must be unique."""
|
|
45
|
+
if backend.name in self._by_name:
|
|
46
|
+
return
|
|
47
|
+
self._backends.append(backend)
|
|
48
|
+
self._by_name[backend.name] = backend
|
|
49
|
+
|
|
50
|
+
def _backend_for_prefix(self, prefix: str) -> LLMBackend | None:
|
|
51
|
+
return self._by_name.get(prefix)
|
|
52
|
+
|
|
53
|
+
async def _backend_for_bare_model(self, model_name: str) -> LLMBackend | None:
|
|
54
|
+
"""First registered backend that supports the bare model name."""
|
|
55
|
+
for b in self._backends:
|
|
56
|
+
if b.supports_model(model_name):
|
|
57
|
+
return b
|
|
58
|
+
return None
|
|
59
|
+
|
|
60
|
+
def _order_for_fallback(self, primary_name: str) -> list[LLMBackend]:
|
|
61
|
+
"""Backends to try after primary, in fallback_order order."""
|
|
62
|
+
seen = {primary_name}
|
|
63
|
+
out = []
|
|
64
|
+
for name in self.fallback_order:
|
|
65
|
+
if name not in seen and name in self._by_name:
|
|
66
|
+
seen.add(name)
|
|
67
|
+
out.append(self._by_name[name])
|
|
68
|
+
return out
|
|
69
|
+
|
|
70
|
+
async def route(self, request: LLMRequest) -> LLMResponse:
|
|
71
|
+
"""Route request to the appropriate backend. On failure, try fallback chain."""
|
|
72
|
+
prefix, model_name = _parse_model_spec(request.model)
|
|
73
|
+
primary: LLMBackend | None = None
|
|
74
|
+
if prefix is not None:
|
|
75
|
+
primary = self._backend_for_prefix(prefix)
|
|
76
|
+
if primary is not None:
|
|
77
|
+
req = LLMRequest(
|
|
78
|
+
model=model_name,
|
|
79
|
+
messages=request.messages,
|
|
80
|
+
max_tokens=request.max_tokens,
|
|
81
|
+
temperature=request.temperature,
|
|
82
|
+
tools=request.tools,
|
|
83
|
+
stream=False,
|
|
84
|
+
)
|
|
85
|
+
else:
|
|
86
|
+
primary = await self._backend_for_bare_model(request.model)
|
|
87
|
+
req = request
|
|
88
|
+
else:
|
|
89
|
+
primary = await self._backend_for_bare_model(model_name or request.model)
|
|
90
|
+
req = request
|
|
91
|
+
|
|
92
|
+
if primary is None:
|
|
93
|
+
raise ValueError(f"No backend for model: {request.model}")
|
|
94
|
+
|
|
95
|
+
fallbacks = self._order_for_fallback(primary.name)[: self.max_fallbacks]
|
|
96
|
+
last_error = None
|
|
97
|
+
for backend in [primary] + fallbacks:
|
|
98
|
+
try:
|
|
99
|
+
if backend is not primary and self.on_fallback:
|
|
100
|
+
self.on_fallback({
|
|
101
|
+
"original_backend": primary.name,
|
|
102
|
+
"fallback_backend": backend.name,
|
|
103
|
+
"reason": str(last_error) if last_error else "unknown",
|
|
104
|
+
})
|
|
105
|
+
resp = await backend.complete(req)
|
|
106
|
+
return resp
|
|
107
|
+
except Exception as e:
|
|
108
|
+
last_error = e
|
|
109
|
+
continue
|
|
110
|
+
if last_error:
|
|
111
|
+
raise last_error
|
|
112
|
+
raise ValueError(f"No backend could complete request for model: {request.model}")
|
|
113
|
+
|
|
114
|
+
async def route_stream(self, request: LLMRequest) -> AsyncIterator[str]:
|
|
115
|
+
"""Route streaming request. Fallback not applied to stream (use complete for retries)."""
|
|
116
|
+
prefix, model_name = _parse_model_spec(request.model)
|
|
117
|
+
backend: LLMBackend | None = None
|
|
118
|
+
req = request
|
|
119
|
+
if prefix is not None:
|
|
120
|
+
backend = self._backend_for_prefix(prefix)
|
|
121
|
+
if backend is not None:
|
|
122
|
+
req = LLMRequest(
|
|
123
|
+
model=model_name,
|
|
124
|
+
messages=request.messages,
|
|
125
|
+
max_tokens=request.max_tokens,
|
|
126
|
+
temperature=request.temperature,
|
|
127
|
+
tools=request.tools,
|
|
128
|
+
stream=True,
|
|
129
|
+
)
|
|
130
|
+
if backend is None:
|
|
131
|
+
backend = await self._backend_for_bare_model(model_name or request.model)
|
|
132
|
+
if backend is None:
|
|
133
|
+
raise ValueError(f"No backend for model: {request.model}")
|
|
134
|
+
async for chunk in backend.stream(req):
|
|
135
|
+
yield chunk
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Multi-agent reasoning graph: store and query intermediate reasoning artifacts.
|
|
3
|
+
|
|
4
|
+
Agents write ReasoningNode entries after completing steps; subsequent agents
|
|
5
|
+
can query the graph for context via ReasoningStore.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from devsper.reasoning.nodes import ReasoningNode
|
|
9
|
+
from devsper.reasoning.graph import ReasoningGraph
|
|
10
|
+
from devsper.reasoning.store import ReasoningStore
|
|
11
|
+
|
|
12
|
+
__all__ = ["ReasoningNode", "ReasoningGraph", "ReasoningStore"]
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Reasoning graph: DAG of reasoning nodes with add_node, query_nodes, get_dependencies.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import networkx as nx
|
|
6
|
+
|
|
7
|
+
from devsper.reasoning.nodes import ReasoningNode
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ReasoningGraph:
|
|
11
|
+
"""DAG of reasoning nodes. Supports add_node, query_nodes, get_dependencies."""
|
|
12
|
+
|
|
13
|
+
def __init__(self) -> None:
|
|
14
|
+
self._graph: nx.DiGraph = nx.DiGraph()
|
|
15
|
+
self._nodes: dict[str, ReasoningNode] = {}
|
|
16
|
+
|
|
17
|
+
def add_node(self, node: ReasoningNode) -> None:
|
|
18
|
+
"""Add a reasoning node and its dependency edges."""
|
|
19
|
+
self._nodes[node.id] = node
|
|
20
|
+
self._graph.add_node(node.id)
|
|
21
|
+
for dep_id in node.dependencies:
|
|
22
|
+
if dep_id in self._nodes:
|
|
23
|
+
self._graph.add_edge(dep_id, node.id)
|
|
24
|
+
|
|
25
|
+
def query_nodes(
|
|
26
|
+
self,
|
|
27
|
+
*,
|
|
28
|
+
agent_id: str | None = None,
|
|
29
|
+
task_id: str | None = None,
|
|
30
|
+
limit: int = 100,
|
|
31
|
+
) -> list[ReasoningNode]:
|
|
32
|
+
"""Return nodes matching optional agent_id and/or task_id, most recent first."""
|
|
33
|
+
out: list[ReasoningNode] = []
|
|
34
|
+
for n in self._nodes.values():
|
|
35
|
+
if agent_id is not None and n.agent_id != agent_id:
|
|
36
|
+
continue
|
|
37
|
+
if task_id is not None and n.task_id != task_id:
|
|
38
|
+
continue
|
|
39
|
+
out.append(n)
|
|
40
|
+
out.sort(key=lambda x: x.timestamp, reverse=True)
|
|
41
|
+
return out[:limit]
|
|
42
|
+
|
|
43
|
+
def get_dependencies(self, node_id: str) -> list[ReasoningNode]:
|
|
44
|
+
"""Return all reasoning nodes that the given node depends on."""
|
|
45
|
+
if node_id not in self._nodes:
|
|
46
|
+
return []
|
|
47
|
+
pred_ids = list(self._graph.predecessors(node_id))
|
|
48
|
+
return [self._nodes[pid] for pid in pred_ids if pid in self._nodes]
|
|
49
|
+
|
|
50
|
+
def get_node(self, node_id: str) -> ReasoningNode | None:
|
|
51
|
+
"""Return the reasoning node with the given id, or None."""
|
|
52
|
+
return self._nodes.get(node_id)
|
|
53
|
+
|
|
54
|
+
def get_successors(self, node_id: str) -> list[ReasoningNode]:
|
|
55
|
+
"""Return nodes that depend on the given node."""
|
|
56
|
+
if node_id not in self._nodes:
|
|
57
|
+
return []
|
|
58
|
+
succ_ids = list(self._graph.successors(node_id))
|
|
59
|
+
return [self._nodes[sid] for sid in succ_ids if sid in self._nodes]
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Reasoning node: single artifact produced by an agent during a step.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from datetime import datetime, timezone
|
|
6
|
+
from pydantic import BaseModel, Field
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ReasoningNode(BaseModel):
|
|
10
|
+
"""A single reasoning artifact produced by an agent."""
|
|
11
|
+
|
|
12
|
+
id: str
|
|
13
|
+
agent_id: str = ""
|
|
14
|
+
task_id: str = ""
|
|
15
|
+
content: str = ""
|
|
16
|
+
timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
17
|
+
dependencies: list[str] = Field(default_factory=list)
|
|
18
|
+
|
|
19
|
+
def __hash__(self) -> int:
|
|
20
|
+
return hash(self.id)
|