devsper 2.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devsper/__init__.py +14 -0
- devsper/agents/a2a/__init__.py +27 -0
- devsper/agents/a2a/client.py +126 -0
- devsper/agents/a2a/discovery.py +24 -0
- devsper/agents/a2a/server.py +128 -0
- devsper/agents/a2a/tool_adapter.py +68 -0
- devsper/agents/a2a/types.py +49 -0
- devsper/agents/agent.py +602 -0
- devsper/agents/critic.py +80 -0
- devsper/agents/message_bus.py +124 -0
- devsper/agents/roles.py +181 -0
- devsper/agents/run_agent.py +78 -0
- devsper/analytics/__init__.py +5 -0
- devsper/analytics/tool_analytics.py +78 -0
- devsper/audit/__init__.py +5 -0
- devsper/audit/logger.py +214 -0
- devsper/bus/__init__.py +29 -0
- devsper/bus/backends/__init__.py +5 -0
- devsper/bus/backends/base.py +38 -0
- devsper/bus/backends/memory.py +55 -0
- devsper/bus/backends/redis.py +146 -0
- devsper/bus/message.py +56 -0
- devsper/bus/schema_version.py +3 -0
- devsper/bus/topics.py +19 -0
- devsper/cache/__init__.py +6 -0
- devsper/cache/embedding_index.py +98 -0
- devsper/cache/hashing.py +24 -0
- devsper/cache/store.py +153 -0
- devsper/cache/task_cache.py +191 -0
- devsper/cli/__init__.py +6 -0
- devsper/cli/commands/reg.py +733 -0
- devsper/cli/github_oauth.py +157 -0
- devsper/cli/init.py +637 -0
- devsper/cli/main.py +2956 -0
- devsper/cli/run_progress.py +103 -0
- devsper/cli/ui/__init__.py +65 -0
- devsper/cli/ui/components.py +94 -0
- devsper/cli/ui/errors.py +104 -0
- devsper/cli/ui/logging.py +120 -0
- devsper/cli/ui/onboarding.py +102 -0
- devsper/cli/ui/progress.py +43 -0
- devsper/cli/ui/run_view.py +308 -0
- devsper/cli/ui/theme.py +40 -0
- devsper/cluster/__init__.py +29 -0
- devsper/cluster/election.py +84 -0
- devsper/cluster/local.py +97 -0
- devsper/cluster/node_info.py +77 -0
- devsper/cluster/registry.py +71 -0
- devsper/cluster/router.py +117 -0
- devsper/cluster/state_backend.py +105 -0
- devsper/compliance/__init__.py +5 -0
- devsper/compliance/pii.py +147 -0
- devsper/config/__init__.py +52 -0
- devsper/config/config_loader.py +121 -0
- devsper/config/defaults.py +77 -0
- devsper/config/resolver.py +342 -0
- devsper/config/schema.py +237 -0
- devsper/credentials/__init__.py +19 -0
- devsper/credentials/cli.py +197 -0
- devsper/credentials/migration.py +124 -0
- devsper/credentials/store.py +142 -0
- devsper/dashboard/__init__.py +9 -0
- devsper/dashboard/dashboard.py +87 -0
- devsper/dev/__init__.py +25 -0
- devsper/dev/builder.py +195 -0
- devsper/dev/debugger.py +95 -0
- devsper/dev/repo_index.py +138 -0
- devsper/dev/sandbox.py +203 -0
- devsper/dev/scaffold.py +122 -0
- devsper/embeddings/__init__.py +5 -0
- devsper/embeddings/service.py +36 -0
- devsper/explainability/__init__.py +14 -0
- devsper/explainability/decision_tree.py +104 -0
- devsper/explainability/rationale.py +38 -0
- devsper/explainability/simulation.py +56 -0
- devsper/hitl/__init__.py +13 -0
- devsper/hitl/approval.py +160 -0
- devsper/hitl/escalation.py +95 -0
- devsper/intelligence/__init__.py +9 -0
- devsper/intelligence/adaptation.py +88 -0
- devsper/intelligence/analysis/__init__.py +19 -0
- devsper/intelligence/analysis/analyzer.py +71 -0
- devsper/intelligence/analysis/cost_estimator.py +66 -0
- devsper/intelligence/analysis/formatter.py +103 -0
- devsper/intelligence/analysis/run_report.py +402 -0
- devsper/intelligence/learning_engine.py +92 -0
- devsper/intelligence/strategies/__init__.py +23 -0
- devsper/intelligence/strategies/base.py +14 -0
- devsper/intelligence/strategies/code_analysis_strategy.py +33 -0
- devsper/intelligence/strategies/data_science_strategy.py +33 -0
- devsper/intelligence/strategies/document_pipeline_strategy.py +33 -0
- devsper/intelligence/strategies/experiment_strategy.py +33 -0
- devsper/intelligence/strategies/research_strategy.py +34 -0
- devsper/intelligence/strategy_selector.py +84 -0
- devsper/intelligence/synthesis.py +132 -0
- devsper/intelligence/task_optimizer.py +92 -0
- devsper/knowledge/__init__.py +5 -0
- devsper/knowledge/extractor.py +204 -0
- devsper/knowledge/knowledge_graph.py +184 -0
- devsper/knowledge/query.py +285 -0
- devsper/memory/__init__.py +35 -0
- devsper/memory/consolidation.py +138 -0
- devsper/memory/embeddings.py +60 -0
- devsper/memory/memory_index.py +97 -0
- devsper/memory/memory_router.py +62 -0
- devsper/memory/memory_store.py +221 -0
- devsper/memory/memory_types.py +54 -0
- devsper/memory/namespaces.py +45 -0
- devsper/memory/scoring.py +77 -0
- devsper/memory/summarizer.py +52 -0
- devsper/nodes/__init__.py +5 -0
- devsper/nodes/controller.py +449 -0
- devsper/nodes/rpc.py +127 -0
- devsper/nodes/single.py +161 -0
- devsper/nodes/worker.py +506 -0
- devsper/orchestration/__init__.py +19 -0
- devsper/orchestration/meta_planner.py +239 -0
- devsper/orchestration/priority_queue.py +61 -0
- devsper/plugins/__init__.py +19 -0
- devsper/plugins/marketplace/__init__.py +0 -0
- devsper/plugins/plugin_loader.py +70 -0
- devsper/plugins/plugin_registry.py +34 -0
- devsper/plugins/registry.py +83 -0
- devsper/protocols/__init__.py +6 -0
- devsper/providers/__init__.py +17 -0
- devsper/providers/anthropic.py +84 -0
- devsper/providers/base.py +75 -0
- devsper/providers/complexity_router.py +94 -0
- devsper/providers/gemini.py +36 -0
- devsper/providers/github.py +180 -0
- devsper/providers/model_router.py +40 -0
- devsper/providers/openai.py +105 -0
- devsper/providers/router/__init__.py +21 -0
- devsper/providers/router/backends/__init__.py +19 -0
- devsper/providers/router/backends/anthropic_backend.py +111 -0
- devsper/providers/router/backends/custom_backend.py +138 -0
- devsper/providers/router/backends/gemini_backend.py +89 -0
- devsper/providers/router/backends/github_backend.py +165 -0
- devsper/providers/router/backends/ollama_backend.py +104 -0
- devsper/providers/router/backends/openai_backend.py +142 -0
- devsper/providers/router/backends/vllm_backend.py +35 -0
- devsper/providers/router/base.py +60 -0
- devsper/providers/router/factory.py +92 -0
- devsper/providers/router/legacy.py +101 -0
- devsper/providers/router/router.py +135 -0
- devsper/reasoning/__init__.py +12 -0
- devsper/reasoning/graph.py +59 -0
- devsper/reasoning/nodes.py +20 -0
- devsper/reasoning/store.py +67 -0
- devsper/runtime/__init__.py +12 -0
- devsper/runtime/health.py +88 -0
- devsper/runtime/replay.py +53 -0
- devsper/runtime/replay_engine.py +142 -0
- devsper/runtime/run_history.py +204 -0
- devsper/runtime/telemetry.py +116 -0
- devsper/runtime/visualize.py +58 -0
- devsper/sandbox/__init__.py +13 -0
- devsper/sandbox/sandbox.py +161 -0
- devsper/swarm/checkpointer.py +65 -0
- devsper/swarm/executor.py +558 -0
- devsper/swarm/map_reduce.py +44 -0
- devsper/swarm/planner.py +197 -0
- devsper/swarm/prefetcher.py +91 -0
- devsper/swarm/scheduler.py +153 -0
- devsper/swarm/speculation.py +47 -0
- devsper/swarm/swarm.py +562 -0
- devsper/tools/__init__.py +33 -0
- devsper/tools/base.py +29 -0
- devsper/tools/code_intelligence/__init__.py +13 -0
- devsper/tools/code_intelligence/api_surface_extractor.py +73 -0
- devsper/tools/code_intelligence/architecture_analyzer.py +65 -0
- devsper/tools/code_intelligence/codebase_indexer.py +71 -0
- devsper/tools/code_intelligence/dependency_graph_builder.py +67 -0
- devsper/tools/code_intelligence/design_pattern_detector.py +62 -0
- devsper/tools/code_intelligence/large_function_detector.py +68 -0
- devsper/tools/code_intelligence/module_responsibility_mapper.py +56 -0
- devsper/tools/code_intelligence/parallel_codebase_analysis.py +44 -0
- devsper/tools/code_intelligence/refactor_candidate_detector.py +81 -0
- devsper/tools/code_intelligence/repository_semantic_index.py +61 -0
- devsper/tools/code_intelligence/test_coverage_estimator.py +62 -0
- devsper/tools/coding/__init__.py +12 -0
- devsper/tools/coding/analyze_code_complexity.py +48 -0
- devsper/tools/coding/dependency_analyzer.py +42 -0
- devsper/tools/coding/extract_functions.py +38 -0
- devsper/tools/coding/format_python.py +50 -0
- devsper/tools/coding/generate_docstrings.py +40 -0
- devsper/tools/coding/generate_unit_tests.py +42 -0
- devsper/tools/coding/lint_python.py +51 -0
- devsper/tools/coding/refactor_function.py +41 -0
- devsper/tools/coding/repo_structure_map.py +54 -0
- devsper/tools/coding/run_python.py +53 -0
- devsper/tools/data/__init__.py +12 -0
- devsper/tools/data/column_type_detection.py +64 -0
- devsper/tools/data/csv_summary.py +52 -0
- devsper/tools/data/dataframe_filter.py +51 -0
- devsper/tools/data/dataframe_groupby.py +47 -0
- devsper/tools/data/dataframe_stats.py +38 -0
- devsper/tools/data/dataset_sampling.py +55 -0
- devsper/tools/data/dataset_schema.py +45 -0
- devsper/tools/data/json_pretty_print.py +37 -0
- devsper/tools/data/json_query.py +46 -0
- devsper/tools/data/missing_value_report.py +47 -0
- devsper/tools/data_science/__init__.py +13 -0
- devsper/tools/data_science/correlation_heatmap.py +72 -0
- devsper/tools/data_science/dataset_bias_detector.py +49 -0
- devsper/tools/data_science/dataset_distribution_report.py +64 -0
- devsper/tools/data_science/dataset_drift_detector.py +64 -0
- devsper/tools/data_science/dataset_outlier_detector.py +65 -0
- devsper/tools/data_science/dataset_profile.py +76 -0
- devsper/tools/data_science/distributed_dataset_processor.py +54 -0
- devsper/tools/data_science/feature_engineering_suggestions.py +69 -0
- devsper/tools/data_science/feature_importance_estimator.py +82 -0
- devsper/tools/data_science/model_input_validator.py +59 -0
- devsper/tools/data_science/time_series_analyzer.py +57 -0
- devsper/tools/documents/__init__.py +11 -0
- devsper/tools/documents/_docproc.py +56 -0
- devsper/tools/documents/document_to_markdown.py +29 -0
- devsper/tools/documents/extract_document_images.py +39 -0
- devsper/tools/documents/extract_document_text.py +29 -0
- devsper/tools/documents/extract_equations.py +36 -0
- devsper/tools/documents/extract_tables.py +47 -0
- devsper/tools/documents/summarize_document.py +42 -0
- devsper/tools/documents/write_latex_document.py +133 -0
- devsper/tools/documents/write_markdown_document.py +89 -0
- devsper/tools/documents/write_word_document.py +149 -0
- devsper/tools/experiments/__init__.py +13 -0
- devsper/tools/experiments/bootstrap_estimator.py +54 -0
- devsper/tools/experiments/experiment_report_generator.py +50 -0
- devsper/tools/experiments/experiment_tracker.py +36 -0
- devsper/tools/experiments/grid_search_runner.py +50 -0
- devsper/tools/experiments/model_benchmark_runner.py +45 -0
- devsper/tools/experiments/monte_carlo_experiment.py +38 -0
- devsper/tools/experiments/parameter_sweep_runner.py +51 -0
- devsper/tools/experiments/result_comparator.py +58 -0
- devsper/tools/experiments/simulation_runner.py +43 -0
- devsper/tools/experiments/statistical_significance_test.py +56 -0
- devsper/tools/experiments/swarm_map_reduce.py +42 -0
- devsper/tools/filesystem/__init__.py +12 -0
- devsper/tools/filesystem/append_file.py +42 -0
- devsper/tools/filesystem/file_hash.py +40 -0
- devsper/tools/filesystem/file_line_count.py +36 -0
- devsper/tools/filesystem/file_metadata.py +38 -0
- devsper/tools/filesystem/file_preview.py +55 -0
- devsper/tools/filesystem/find_large_files.py +50 -0
- devsper/tools/filesystem/list_directory.py +39 -0
- devsper/tools/filesystem/read_file.py +35 -0
- devsper/tools/filesystem/search_files.py +60 -0
- devsper/tools/filesystem/write_file.py +41 -0
- devsper/tools/flagship/__init__.py +15 -0
- devsper/tools/flagship/distributed_document_analysis.py +77 -0
- devsper/tools/flagship/docproc_corpus_pipeline.py +91 -0
- devsper/tools/flagship/repository_semantic_map.py +99 -0
- devsper/tools/flagship/research_graph_builder.py +111 -0
- devsper/tools/flagship/swarm_experiment_runner.py +86 -0
- devsper/tools/knowledge/__init__.py +10 -0
- devsper/tools/knowledge/citation_graph_builder.py +69 -0
- devsper/tools/knowledge/concept_frequency_analyzer.py +74 -0
- devsper/tools/knowledge/corpus_builder.py +66 -0
- devsper/tools/knowledge/cross_document_entity_linker.py +71 -0
- devsper/tools/knowledge/document_corpus_summary.py +68 -0
- devsper/tools/knowledge/document_topic_extractor.py +58 -0
- devsper/tools/knowledge/knowledge_graph_extractor.py +58 -0
- devsper/tools/knowledge/timeline_extractor.py +59 -0
- devsper/tools/math/__init__.py +12 -0
- devsper/tools/math/calculate_expression.py +52 -0
- devsper/tools/math/correlation.py +44 -0
- devsper/tools/math/distribution_summary.py +39 -0
- devsper/tools/math/histogram.py +53 -0
- devsper/tools/math/linear_regression.py +47 -0
- devsper/tools/math/matrix_multiply.py +38 -0
- devsper/tools/math/mean_std.py +35 -0
- devsper/tools/math/monte_carlo_simulation.py +43 -0
- devsper/tools/math/polynomial_fit.py +40 -0
- devsper/tools/math/random_sample.py +36 -0
- devsper/tools/mcp/__init__.py +23 -0
- devsper/tools/mcp/adapter.py +53 -0
- devsper/tools/mcp/client.py +235 -0
- devsper/tools/mcp/discovery.py +53 -0
- devsper/tools/memory/__init__.py +16 -0
- devsper/tools/memory/delete_memory.py +25 -0
- devsper/tools/memory/list_memory.py +34 -0
- devsper/tools/memory/search_memory.py +36 -0
- devsper/tools/memory/store_memory.py +47 -0
- devsper/tools/memory/summarize_memory.py +41 -0
- devsper/tools/memory/tag_memory.py +47 -0
- devsper/tools/pipelines.py +92 -0
- devsper/tools/registry.py +39 -0
- devsper/tools/research/__init__.py +12 -0
- devsper/tools/research/arxiv_download.py +55 -0
- devsper/tools/research/arxiv_search.py +58 -0
- devsper/tools/research/citation_extractor.py +35 -0
- devsper/tools/research/duckduckgo_search.py +42 -0
- devsper/tools/research/paper_metadata_extractor.py +45 -0
- devsper/tools/research/paper_summarizer.py +41 -0
- devsper/tools/research/research_question_generator.py +39 -0
- devsper/tools/research/topic_cluster.py +46 -0
- devsper/tools/research/web_search.py +47 -0
- devsper/tools/research/wikipedia_lookup.py +50 -0
- devsper/tools/research_advanced/__init__.py +14 -0
- devsper/tools/research_advanced/citation_context_extractor.py +60 -0
- devsper/tools/research_advanced/literature_review_generator.py +79 -0
- devsper/tools/research_advanced/methodology_extractor.py +58 -0
- devsper/tools/research_advanced/paper_contribution_extractor.py +50 -0
- devsper/tools/research_advanced/paper_dataset_identifier.py +49 -0
- devsper/tools/research_advanced/paper_method_comparator.py +62 -0
- devsper/tools/research_advanced/paper_similarity_search.py +69 -0
- devsper/tools/research_advanced/paper_trend_analyzer.py +69 -0
- devsper/tools/research_advanced/parallel_document_analyzer.py +56 -0
- devsper/tools/research_advanced/research_gap_finder.py +71 -0
- devsper/tools/research_advanced/research_topic_mapper.py +69 -0
- devsper/tools/research_advanced/swarm_literature_review.py +58 -0
- devsper/tools/scoring/__init__.py +52 -0
- devsper/tools/scoring/report.py +44 -0
- devsper/tools/scoring/scorer.py +39 -0
- devsper/tools/scoring/selector.py +61 -0
- devsper/tools/scoring/store.py +267 -0
- devsper/tools/selector.py +130 -0
- devsper/tools/system/__init__.py +12 -0
- devsper/tools/system/cpu_usage.py +22 -0
- devsper/tools/system/disk_usage.py +35 -0
- devsper/tools/system/environment_variables.py +29 -0
- devsper/tools/system/memory_usage.py +23 -0
- devsper/tools/system/pip_install.py +44 -0
- devsper/tools/system/pip_search.py +29 -0
- devsper/tools/system/process_list.py +34 -0
- devsper/tools/system/python_package_list.py +40 -0
- devsper/tools/system/run_shell_command.py +51 -0
- devsper/tools/system/system_info.py +26 -0
- devsper/tools/tool_runner.py +122 -0
- devsper/tui/__init__.py +5 -0
- devsper/tui/activity_feed_view.py +73 -0
- devsper/tui/adaptive_tasks_view.py +75 -0
- devsper/tui/agent_role_view.py +35 -0
- devsper/tui/app.py +395 -0
- devsper/tui/dashboard_screen.py +290 -0
- devsper/tui/dev_view.py +99 -0
- devsper/tui/inject_screen.py +73 -0
- devsper/tui/knowledge_graph_view.py +46 -0
- devsper/tui/layout.py +43 -0
- devsper/tui/logs_view.py +83 -0
- devsper/tui/memory_view.py +58 -0
- devsper/tui/performance_view.py +33 -0
- devsper/tui/reasoning_graph_view.py +39 -0
- devsper/tui/results_view.py +139 -0
- devsper/tui/swarm_view.py +37 -0
- devsper/tui/task_detail_screen.py +55 -0
- devsper/tui/task_view.py +103 -0
- devsper/types/event.py +97 -0
- devsper/types/exceptions.py +21 -0
- devsper/types/swarm.py +41 -0
- devsper/types/task.py +80 -0
- devsper/upgrade/__init__.py +21 -0
- devsper/upgrade/changelog.py +124 -0
- devsper/upgrade/cli.py +145 -0
- devsper/upgrade/installer.py +103 -0
- devsper/upgrade/notifier.py +52 -0
- devsper/upgrade/version_check.py +121 -0
- devsper/utils/event_logger.py +88 -0
- devsper/utils/http.py +43 -0
- devsper/utils/models.py +54 -0
- devsper/visualization/__init__.py +5 -0
- devsper/visualization/dag_export.py +67 -0
- devsper/workflow/__init__.py +18 -0
- devsper/workflow/conditions.py +157 -0
- devsper/workflow/context.py +108 -0
- devsper/workflow/loader.py +156 -0
- devsper/workflow/resolver.py +109 -0
- devsper/workflow/runner.py +562 -0
- devsper/workflow/schema.py +63 -0
- devsper/workflow/validator.py +128 -0
- devsper-2.1.6.dist-info/METADATA +346 -0
- devsper-2.1.6.dist-info/RECORD +375 -0
- devsper-2.1.6.dist-info/WHEEL +4 -0
- devsper-2.1.6.dist-info/entry_points.txt +3 -0
- devsper-2.1.6.dist-info/licenses/LICENSE +639 -0
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Provider base interface.
|
|
3
|
+
|
|
4
|
+
All providers implement generate(model, prompt, stream=False).
|
|
5
|
+
When stream=True, yield chunks; when stream=False, return full text.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from abc import ABC, abstractmethod
|
|
9
|
+
from typing import Iterator
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class BaseProvider(ABC):
|
|
13
|
+
"""Common interface for OpenAI, Anthropic, Gemini, GitHub, and mock."""
|
|
14
|
+
|
|
15
|
+
@abstractmethod
|
|
16
|
+
def generate(self, model: str, prompt: str, stream: bool = False) -> str | Iterator[str]:
|
|
17
|
+
"""Return model output (str or iterator of chunks when stream=True)."""
|
|
18
|
+
...
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _ensure_str(result) -> str:
|
|
22
|
+
"""If result is an iterator (stream=True), consume and return concatenated string."""
|
|
23
|
+
if hasattr(result, "__iter__") and not isinstance(result, str):
|
|
24
|
+
return "".join(result)
|
|
25
|
+
return result if isinstance(result, str) else str(result)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _mock_planner_response(prompt: str) -> str:
|
|
29
|
+
"""Return a valid numbered list so the planner can parse subtasks when no API key is set."""
|
|
30
|
+
if "Break the following task into" in prompt or "smaller steps" in prompt:
|
|
31
|
+
task_part = "Summarize the topic" # fallback
|
|
32
|
+
if "Task:" in prompt:
|
|
33
|
+
start = prompt.find("Task:") + 5
|
|
34
|
+
end = prompt.find("\n", start) if "\n" in prompt[start:] else len(prompt)
|
|
35
|
+
task_part = prompt[start:end].strip()[:80]
|
|
36
|
+
return (
|
|
37
|
+
f"1. Define and scope the topic: {task_part}\n"
|
|
38
|
+
"2. Gather key points and examples\n"
|
|
39
|
+
"3. Synthesize into a clear summary\n"
|
|
40
|
+
"4. Add one or two concrete details\n"
|
|
41
|
+
"5. Polish into a single paragraph"
|
|
42
|
+
)
|
|
43
|
+
return f"Completed: {prompt[:200]}{'...' if len(prompt) > 200 else ''}"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _mock_agent_response(prompt: str) -> str:
|
|
47
|
+
"""Return a short on-topic stub for agent tasks so demo output is coherent."""
|
|
48
|
+
if "Task:" in prompt:
|
|
49
|
+
task_start = prompt.find("Task:") + 5
|
|
50
|
+
rest = prompt[task_start:].strip()
|
|
51
|
+
first_line = rest.split("\n")[0].strip()[:100]
|
|
52
|
+
return (
|
|
53
|
+
f"[Mock] Summary for: {first_line}\n\n"
|
|
54
|
+
"Swarm intelligence is collective behavior that emerges from many simple agents "
|
|
55
|
+
"following local rules (e.g. ants, bees, flocks). No central controller is needed; "
|
|
56
|
+
"coordination arises from stigmergy, feedback, and self-organization."
|
|
57
|
+
)
|
|
58
|
+
return f"Completed: {prompt[:200]}{'...' if len(prompt) > 200 else ''}"
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class MockProvider(BaseProvider):
|
|
62
|
+
"""In-memory provider for tests and default stub. No API key required."""
|
|
63
|
+
|
|
64
|
+
def generate(self, model: str, prompt: str, stream: bool = False) -> str | Iterator[str]:
|
|
65
|
+
if "Break the following task" in prompt or "5 smaller steps" in prompt:
|
|
66
|
+
text = _mock_planner_response(prompt)
|
|
67
|
+
elif "You are an AI worker" in prompt or "Task:" in prompt:
|
|
68
|
+
text = _mock_agent_response(prompt)
|
|
69
|
+
else:
|
|
70
|
+
text = f"Completed: {prompt[:200]}{'...' if len(prompt) > 200 else ''}"
|
|
71
|
+
if stream:
|
|
72
|
+
def _gen() -> Iterator[str]:
|
|
73
|
+
yield text
|
|
74
|
+
return _gen()
|
|
75
|
+
return text
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
"""
|
|
2
|
+
v1.6: Route tasks to simple/medium/complex tiers and select model (fast / worker / quality).
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Any, Literal
|
|
6
|
+
|
|
7
|
+
from devsper.types.task import Task
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
TIERS: dict[str, dict[str, Any]] = {
|
|
11
|
+
"simple": {
|
|
12
|
+
"max_tools": 2,
|
|
13
|
+
"max_tokens_est": 500,
|
|
14
|
+
"roles": ["summarize", "extract", "format"],
|
|
15
|
+
},
|
|
16
|
+
"medium": {
|
|
17
|
+
"max_tools": 5,
|
|
18
|
+
"max_tokens_est": 2000,
|
|
19
|
+
"roles": ["research", "analysis", "code"],
|
|
20
|
+
},
|
|
21
|
+
"complex": {
|
|
22
|
+
"max_tools": 99,
|
|
23
|
+
"max_tokens_est": 99999,
|
|
24
|
+
"roles": ["architect", "critic", "experiment"],
|
|
25
|
+
},
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class TaskComplexityRouter:
|
|
30
|
+
"""Classify task complexity and select model tier (simple → fast, medium → worker, complex → quality)."""
|
|
31
|
+
|
|
32
|
+
TIERS = TIERS
|
|
33
|
+
|
|
34
|
+
def classify(self, task: Task, tools_selected: list[Any]) -> Literal["simple", "medium", "complex"]:
|
|
35
|
+
"""
|
|
36
|
+
Score task complexity:
|
|
37
|
+
- Role → base tier (simple/medium/complex from TIERS)
|
|
38
|
+
- Tool count → upgrade tier if tools_selected > threshold
|
|
39
|
+
- Description length → upgrade if > 200 words
|
|
40
|
+
- Dependencies count → upgrade if task.dependencies > 3
|
|
41
|
+
- Speculative flag → downgrade one tier
|
|
42
|
+
Return final tier.
|
|
43
|
+
"""
|
|
44
|
+
base = "medium"
|
|
45
|
+
role = (getattr(task, "role", None) or "").lower()
|
|
46
|
+
for tier, cfg in TIERS.items():
|
|
47
|
+
if role in [r.lower() for r in cfg["roles"]]:
|
|
48
|
+
base = tier
|
|
49
|
+
break
|
|
50
|
+
|
|
51
|
+
tier = base
|
|
52
|
+
n_tools = len(tools_selected) if tools_selected else 0
|
|
53
|
+
if n_tools > TIERS["simple"]["max_tools"] and tier == "simple":
|
|
54
|
+
tier = "medium"
|
|
55
|
+
if n_tools > TIERS["medium"]["max_tools"] and tier == "medium":
|
|
56
|
+
tier = "complex"
|
|
57
|
+
|
|
58
|
+
words = len((task.description or "").split())
|
|
59
|
+
if words > 200 and tier == "simple":
|
|
60
|
+
tier = "medium"
|
|
61
|
+
if words > 200 and tier == "medium":
|
|
62
|
+
tier = "complex"
|
|
63
|
+
|
|
64
|
+
deps = len(task.dependencies) if task.dependencies else 0
|
|
65
|
+
if deps > 3 and tier == "simple":
|
|
66
|
+
tier = "medium"
|
|
67
|
+
if deps > 3 and tier == "medium":
|
|
68
|
+
tier = "complex"
|
|
69
|
+
|
|
70
|
+
if getattr(task, "speculative", False):
|
|
71
|
+
if tier == "complex":
|
|
72
|
+
tier = "medium"
|
|
73
|
+
elif tier == "medium":
|
|
74
|
+
tier = "simple"
|
|
75
|
+
|
|
76
|
+
return tier
|
|
77
|
+
|
|
78
|
+
def select_model(self, tier: str, config: Any) -> str:
|
|
79
|
+
"""
|
|
80
|
+
simple → config.fast_model (or config.fast) — default: worker if not set
|
|
81
|
+
medium → config.worker_model (or config.worker)
|
|
82
|
+
complex → config.quality_model (or config.quality) — default: planner
|
|
83
|
+
If model not configured: fall back to config.worker for all tiers.
|
|
84
|
+
"""
|
|
85
|
+
worker = getattr(config, "worker", None) or getattr(config, "worker_model", "mock")
|
|
86
|
+
planner = getattr(config, "planner", None) or getattr(config, "planner_model", "mock")
|
|
87
|
+
fast = getattr(config, "fast", None)
|
|
88
|
+
quality = getattr(config, "quality", None) or planner
|
|
89
|
+
|
|
90
|
+
if tier == "simple":
|
|
91
|
+
return fast if fast else worker
|
|
92
|
+
if tier == "complex":
|
|
93
|
+
return quality
|
|
94
|
+
return worker
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
"""Google Gemini provider adapter using LangChain."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Iterator
|
|
5
|
+
|
|
6
|
+
from langchain_core.messages import HumanMessage
|
|
7
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
8
|
+
|
|
9
|
+
from devsper.providers.base import BaseProvider
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class GeminiProvider(BaseProvider):
|
|
13
|
+
"""Google Gemini API adapter. Uses GOOGLE_API_KEY or GEMINI_API_KEY (or pass api_key)."""
|
|
14
|
+
|
|
15
|
+
def __init__(self, api_key: str | None = None) -> None:
|
|
16
|
+
self.api_key = api_key or os.environ.get("GOOGLE_API_KEY") or os.environ.get(
|
|
17
|
+
"GEMINI_API_KEY"
|
|
18
|
+
)
|
|
19
|
+
if not self.api_key:
|
|
20
|
+
raise ValueError("Gemini requires api_key or GOOGLE_API_KEY or GEMINI_API_KEY")
|
|
21
|
+
|
|
22
|
+
def generate(self, model: str, prompt: str, stream: bool = False) -> str | Iterator[str]:
|
|
23
|
+
"""Call Gemini API and return the model output text."""
|
|
24
|
+
llm = ChatGoogleGenerativeAI(
|
|
25
|
+
model=model,
|
|
26
|
+
google_api_key=self.api_key,
|
|
27
|
+
temperature=0,
|
|
28
|
+
)
|
|
29
|
+
message = llm.invoke([HumanMessage(content=prompt)])
|
|
30
|
+
content = message.content
|
|
31
|
+
text = content if isinstance(content, str) else str(content)
|
|
32
|
+
if stream:
|
|
33
|
+
def _gen():
|
|
34
|
+
yield text
|
|
35
|
+
return _gen()
|
|
36
|
+
return text
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
"""GitHub Models provider (GitHub Models API at models.github.ai)."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import os
|
|
6
|
+
import time
|
|
7
|
+
from typing import Iterator
|
|
8
|
+
|
|
9
|
+
import httpx
|
|
10
|
+
|
|
11
|
+
from devsper.providers.base import BaseProvider
|
|
12
|
+
|
|
13
|
+
log = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
# Retry 429 (rate limit): max attempts, base delay in seconds, max delay cap
|
|
16
|
+
GITHUB_429_MAX_RETRIES = 3
|
|
17
|
+
GITHUB_429_BASE_DELAY = 1.0
|
|
18
|
+
GITHUB_429_MAX_DELAY = 32.0
|
|
19
|
+
|
|
20
|
+
# GitHub Models API (replaces deprecated models.inference.ai.azure.com)
|
|
21
|
+
# Docs: https://docs.github.com/en/rest/models/inference
|
|
22
|
+
GITHUB_MODELS_BASE = "https://models.github.ai"
|
|
23
|
+
GITHUB_MODELS_CHAT_URL = f"{GITHUB_MODELS_BASE}/inference/chat/completions"
|
|
24
|
+
GITHUB_API_VERSION = "2022-11-28"
|
|
25
|
+
|
|
26
|
+
# Default model when user selects "copilot" or "github:copilot" (publisher/model format)
|
|
27
|
+
DEFAULT_GITHUB_MODEL = "openai/gpt-4.1"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _normalize_model_id(model: str) -> str:
|
|
31
|
+
"""Return API model ID: publisher/name. 'copilot' or 'github:copilot' -> default; else strip github: prefix."""
|
|
32
|
+
s = model.split(":", 1)[-1].strip() if ":" in model else model.strip()
|
|
33
|
+
if not s or s.lower() == "copilot":
|
|
34
|
+
return DEFAULT_GITHUB_MODEL
|
|
35
|
+
return s
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class GitHubProvider(BaseProvider):
|
|
39
|
+
"""
|
|
40
|
+
GitHub Models API adapter (models.github.ai).
|
|
41
|
+
Uses GITHUB_TOKEN with models:read scope (fine-grained PAT or classic).
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
def __init__(self, token: str | None = None) -> None:
|
|
45
|
+
self.token = token or os.environ.get("GITHUB_TOKEN")
|
|
46
|
+
if not self.token:
|
|
47
|
+
raise ValueError("GitHub provider requires GITHUB_TOKEN")
|
|
48
|
+
|
|
49
|
+
def _headers(self) -> dict[str, str]:
|
|
50
|
+
return {
|
|
51
|
+
"Accept": "application/vnd.github.v3+json",
|
|
52
|
+
"Content-Type": "application/json",
|
|
53
|
+
"Authorization": f"Bearer {self.token}",
|
|
54
|
+
"X-GitHub-Api-Version": GITHUB_API_VERSION,
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
def generate(
|
|
58
|
+
self, model: str, prompt: str, stream: bool = False
|
|
59
|
+
) -> str | Iterator[str]:
|
|
60
|
+
"""Call GitHub Models inference/chat/completions and return the assistant message content."""
|
|
61
|
+
api_model = _normalize_model_id(model)
|
|
62
|
+
if stream:
|
|
63
|
+
return self._generate_stream(api_model, prompt)
|
|
64
|
+
return self._generate_sync(api_model, prompt)
|
|
65
|
+
|
|
66
|
+
def _generate_sync(self, model: str, prompt: str) -> str:
|
|
67
|
+
payload = {
|
|
68
|
+
"model": model,
|
|
69
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
70
|
+
"temperature": 0.2,
|
|
71
|
+
}
|
|
72
|
+
data = {}
|
|
73
|
+
from devsper.utils.http import ssl_verify, format_retry_after
|
|
74
|
+
with httpx.Client(timeout=60.0, verify=ssl_verify()) as client:
|
|
75
|
+
for attempt in range(GITHUB_429_MAX_RETRIES):
|
|
76
|
+
try:
|
|
77
|
+
resp = client.post(
|
|
78
|
+
GITHUB_MODELS_CHAT_URL,
|
|
79
|
+
headers=self._headers(),
|
|
80
|
+
json=payload,
|
|
81
|
+
)
|
|
82
|
+
resp.raise_for_status()
|
|
83
|
+
data = resp.json()
|
|
84
|
+
break
|
|
85
|
+
except httpx.HTTPStatusError as e:
|
|
86
|
+
retry_hint = format_retry_after(e.response)
|
|
87
|
+
if e.response.status_code == 429 and attempt < GITHUB_429_MAX_RETRIES - 1:
|
|
88
|
+
delay = min(
|
|
89
|
+
GITHUB_429_BASE_DELAY * (2**attempt),
|
|
90
|
+
GITHUB_429_MAX_DELAY,
|
|
91
|
+
)
|
|
92
|
+
log.warning(
|
|
93
|
+
"GitHub Models 429 rate limit, retry %s/%s in %.1fs%s",
|
|
94
|
+
attempt + 1,
|
|
95
|
+
GITHUB_429_MAX_RETRIES,
|
|
96
|
+
delay,
|
|
97
|
+
retry_hint,
|
|
98
|
+
)
|
|
99
|
+
time.sleep(delay)
|
|
100
|
+
else:
|
|
101
|
+
if retry_hint:
|
|
102
|
+
raise httpx.HTTPStatusError(
|
|
103
|
+
str(e) + retry_hint, request=e.request, response=e.response
|
|
104
|
+
) from e
|
|
105
|
+
raise
|
|
106
|
+
choices = data.get("choices") or []
|
|
107
|
+
if not choices:
|
|
108
|
+
return ""
|
|
109
|
+
msg = choices[0].get("message") or {}
|
|
110
|
+
content = msg.get("content")
|
|
111
|
+
if isinstance(content, str):
|
|
112
|
+
return content
|
|
113
|
+
if isinstance(content, list):
|
|
114
|
+
# OpenAI-style content parts: [{"type": "text", "text": "..."}]
|
|
115
|
+
parts = []
|
|
116
|
+
for part in content:
|
|
117
|
+
if isinstance(part, dict) and part.get("type") == "text":
|
|
118
|
+
t = part.get("text")
|
|
119
|
+
if isinstance(t, str):
|
|
120
|
+
parts.append(t)
|
|
121
|
+
return "\n".join(parts) if parts else ""
|
|
122
|
+
return str(content or "") if content is not None else ""
|
|
123
|
+
|
|
124
|
+
def _generate_stream(self, model: str, prompt: str) -> Iterator[str]:
|
|
125
|
+
payload = {
|
|
126
|
+
"model": model,
|
|
127
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
128
|
+
"temperature": 0.2,
|
|
129
|
+
"stream": True,
|
|
130
|
+
}
|
|
131
|
+
from devsper.utils.http import ssl_verify, format_retry_after
|
|
132
|
+
with httpx.Client(timeout=60.0, verify=ssl_verify()) as client:
|
|
133
|
+
for attempt in range(GITHUB_429_MAX_RETRIES):
|
|
134
|
+
try:
|
|
135
|
+
with client.stream(
|
|
136
|
+
"POST",
|
|
137
|
+
GITHUB_MODELS_CHAT_URL,
|
|
138
|
+
headers=self._headers(),
|
|
139
|
+
json=payload,
|
|
140
|
+
) as resp:
|
|
141
|
+
resp.raise_for_status()
|
|
142
|
+
for line in resp.iter_lines():
|
|
143
|
+
if not line or not line.strip():
|
|
144
|
+
continue
|
|
145
|
+
if line.startswith("data: "):
|
|
146
|
+
chunk = line[6:].strip()
|
|
147
|
+
if chunk == "[DONE]":
|
|
148
|
+
return
|
|
149
|
+
try:
|
|
150
|
+
data = json.loads(chunk)
|
|
151
|
+
choices = data.get("choices") or []
|
|
152
|
+
if choices:
|
|
153
|
+
delta = choices[0].get("delta") or {}
|
|
154
|
+
part = delta.get("content")
|
|
155
|
+
if part:
|
|
156
|
+
yield part
|
|
157
|
+
except Exception:
|
|
158
|
+
pass
|
|
159
|
+
return
|
|
160
|
+
except httpx.HTTPStatusError as e:
|
|
161
|
+
retry_hint = format_retry_after(e.response)
|
|
162
|
+
if e.response.status_code == 429 and attempt < GITHUB_429_MAX_RETRIES - 1:
|
|
163
|
+
delay = min(
|
|
164
|
+
GITHUB_429_BASE_DELAY * (2**attempt),
|
|
165
|
+
GITHUB_429_MAX_DELAY,
|
|
166
|
+
)
|
|
167
|
+
log.warning(
|
|
168
|
+
"GitHub Models 429 rate limit (stream), retry %s/%s in %.1fs%s",
|
|
169
|
+
attempt + 1,
|
|
170
|
+
GITHUB_429_MAX_RETRIES,
|
|
171
|
+
delay,
|
|
172
|
+
retry_hint,
|
|
173
|
+
)
|
|
174
|
+
time.sleep(delay)
|
|
175
|
+
else:
|
|
176
|
+
if retry_hint:
|
|
177
|
+
raise httpx.HTTPStatusError(
|
|
178
|
+
str(e) + retry_hint, request=e.request, response=e.response
|
|
179
|
+
) from e
|
|
180
|
+
raise
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Automatic model routing by cost, latency, and quality tier.
|
|
3
|
+
|
|
4
|
+
Use select_model(task_type) to get the best model for a given task type.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Literal
|
|
8
|
+
|
|
9
|
+
TaskType = Literal["planning", "analysis", "summarization", "code", "fast"]
|
|
10
|
+
|
|
11
|
+
# cost: relative cost per request, latency: relative latency tier, quality: 1-5
|
|
12
|
+
MODEL_REGISTRY: dict[str, dict[str, float]] = {
|
|
13
|
+
"gpt-4o": {"cost": 0.01, "latency": 2, "quality": 5},
|
|
14
|
+
"gpt-4o-mini": {"cost": 0.002, "latency": 1, "quality": 3},
|
|
15
|
+
"claude-3.5-sonnet": {"cost": 0.008, "latency": 2, "quality": 5},
|
|
16
|
+
"phi-3": {"cost": 0.001, "latency": 1, "quality": 2},
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def select_model(task_type: TaskType) -> str:
|
|
21
|
+
"""
|
|
22
|
+
Select the best model for the given task type.
|
|
23
|
+
|
|
24
|
+
- planning → high quality (claude-3.5-sonnet)
|
|
25
|
+
- analysis → balanced (gpt-4o)
|
|
26
|
+
- summarization → balanced (gpt-4o)
|
|
27
|
+
- code → balanced (gpt-4o)
|
|
28
|
+
- fast → cheapest / low latency (gpt-4o-mini)
|
|
29
|
+
"""
|
|
30
|
+
if task_type == "planning":
|
|
31
|
+
return "claude-3.5-sonnet"
|
|
32
|
+
if task_type == "analysis":
|
|
33
|
+
return "gpt-4o"
|
|
34
|
+
if task_type == "summarization":
|
|
35
|
+
return "gpt-4o"
|
|
36
|
+
if task_type == "code":
|
|
37
|
+
return "gpt-4o"
|
|
38
|
+
if task_type == "fast":
|
|
39
|
+
return "gpt-4o-mini"
|
|
40
|
+
return "gpt-4o-mini"
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
"""OpenAI provider adapter. Supports standard OpenAI API and Azure OpenAI."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import HumanMessage
|
|
6
|
+
from langchain_openai import AzureChatOpenAI, ChatOpenAI
|
|
7
|
+
|
|
8
|
+
from devsper.providers.base import BaseProvider
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _is_azure_foundry_v1_endpoint(endpoint: str) -> bool:
|
|
12
|
+
"""True if endpoint is Azure Foundry v1 style (use ChatOpenAI + base_url, not AzureChatOpenAI)."""
|
|
13
|
+
if not endpoint:
|
|
14
|
+
return False
|
|
15
|
+
e = endpoint.rstrip("/").lower()
|
|
16
|
+
return "/openai/v1" in e or "cognitiveservices.azure.com" in e
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class OpenAIProvider(BaseProvider):
|
|
20
|
+
"""
|
|
21
|
+
OpenAI API adapter. Supports standard OpenAI and Azure OpenAI.
|
|
22
|
+
|
|
23
|
+
Standard: OPENAI_API_KEY (or pass api_key). Model passed in generate(model, prompt).
|
|
24
|
+
Azure: set azure=True and azure_endpoint (and optionally azure_deployment for single-deployment).
|
|
25
|
+
When Azure is used, generate(model, prompt) uses model as the deployment name so multiple
|
|
26
|
+
deployments (e.g. gpt-4o, gpt-5-mini) on the same endpoint are supported.
|
|
27
|
+
|
|
28
|
+
Azure Foundry (cognitiveservices.azure.com or .../openai/v1) uses the v1 API: one base URL
|
|
29
|
+
and deployment name in the request body. Legacy Azure (openai.azure.com) uses deployment in the path.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
api_key: str | None = None,
|
|
35
|
+
*,
|
|
36
|
+
azure: bool = False,
|
|
37
|
+
azure_endpoint: str | None = None,
|
|
38
|
+
azure_deployment: str | None = None,
|
|
39
|
+
api_version: str | None = None,
|
|
40
|
+
) -> None:
|
|
41
|
+
self.api_key = api_key or os.environ.get("OPENAI_API_KEY")
|
|
42
|
+
self.azure = azure
|
|
43
|
+
self.azure_endpoint = (azure_endpoint or os.environ.get("AZURE_OPENAI_ENDPOINT") or "").strip().rstrip("/")
|
|
44
|
+
self.azure_deployment = azure_deployment or os.environ.get(
|
|
45
|
+
"AZURE_OPENAI_DEPLOYMENT_NAME"
|
|
46
|
+
)
|
|
47
|
+
self.api_version = api_version or os.environ.get(
|
|
48
|
+
"AZURE_OPENAI_API_VERSION", "2024-05-01-preview"
|
|
49
|
+
)
|
|
50
|
+
if azure:
|
|
51
|
+
if not self.azure_endpoint:
|
|
52
|
+
raise ValueError("Azure requires azure_endpoint or AZURE_OPENAI_ENDPOINT")
|
|
53
|
+
key = self.api_key or os.environ.get("AZURE_OPENAI_API_KEY")
|
|
54
|
+
if not key:
|
|
55
|
+
raise ValueError("Azure requires api_key or AZURE_OPENAI_API_KEY")
|
|
56
|
+
self._azure_key = key
|
|
57
|
+
self._azure_foundry = _is_azure_foundry_v1_endpoint(self.azure_endpoint)
|
|
58
|
+
self._llm: AzureChatOpenAI | ChatOpenAI | None = None
|
|
59
|
+
self._llm_cache: dict[str, AzureChatOpenAI | ChatOpenAI] = {}
|
|
60
|
+
else:
|
|
61
|
+
if not self.api_key:
|
|
62
|
+
raise ValueError("OpenAI requires api_key or OPENAI_API_KEY")
|
|
63
|
+
self._llm = None
|
|
64
|
+
self._llm_cache = {}
|
|
65
|
+
|
|
66
|
+
def generate(self, model: str, prompt: str, stream: bool = False):
|
|
67
|
+
"""Call OpenAI or Azure OpenAI and return the model output text (or stream chunks if stream=True)."""
|
|
68
|
+
if self.azure:
|
|
69
|
+
raw = (model or self.azure_deployment or "").strip()
|
|
70
|
+
deployment = raw.split(":", 1)[-1].strip() if ":" in raw else raw
|
|
71
|
+
deployment = deployment or self.azure_deployment
|
|
72
|
+
if not deployment:
|
|
73
|
+
raise ValueError("Azure requires model name or AZURE_OPENAI_DEPLOYMENT_NAME")
|
|
74
|
+
if deployment not in self._llm_cache:
|
|
75
|
+
if self._azure_foundry:
|
|
76
|
+
# Azure Foundry v1: single base URL, model in body (no deployment in path)
|
|
77
|
+
base = self.azure_endpoint if self.azure_endpoint.endswith("/") else self.azure_endpoint + "/"
|
|
78
|
+
self._llm_cache[deployment] = ChatOpenAI(
|
|
79
|
+
base_url=base,
|
|
80
|
+
api_key=self._azure_key,
|
|
81
|
+
model=deployment,
|
|
82
|
+
temperature=0,
|
|
83
|
+
)
|
|
84
|
+
else:
|
|
85
|
+
self._llm_cache[deployment] = AzureChatOpenAI(
|
|
86
|
+
azure_endpoint=self.azure_endpoint,
|
|
87
|
+
azure_deployment=deployment,
|
|
88
|
+
openai_api_key=self._azure_key,
|
|
89
|
+
api_version=self.api_version,
|
|
90
|
+
)
|
|
91
|
+
message = self._llm_cache[deployment].invoke([HumanMessage(content=prompt)])
|
|
92
|
+
else:
|
|
93
|
+
llm = ChatOpenAI(
|
|
94
|
+
model=model,
|
|
95
|
+
api_key=self.api_key,
|
|
96
|
+
temperature=0,
|
|
97
|
+
)
|
|
98
|
+
message = llm.invoke([HumanMessage(content=prompt)])
|
|
99
|
+
content = message.content
|
|
100
|
+
text = content if isinstance(content, str) else str(content)
|
|
101
|
+
if stream:
|
|
102
|
+
def _gen():
|
|
103
|
+
yield text
|
|
104
|
+
return _gen()
|
|
105
|
+
return text
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""Router package: legacy ProviderRouter + v2 LLMRouter."""
|
|
2
|
+
|
|
3
|
+
from devsper.providers.router.base import LLMBackend, LLMRequest, LLMResponse
|
|
4
|
+
from devsper.providers.router.router import LLMRouter
|
|
5
|
+
from devsper.providers.router.legacy import (
|
|
6
|
+
ProviderRouter,
|
|
7
|
+
get_router,
|
|
8
|
+
_parse_model_spec,
|
|
9
|
+
_model_to_vendor,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"LLMBackend",
|
|
14
|
+
"LLMRequest",
|
|
15
|
+
"LLMResponse",
|
|
16
|
+
"LLMRouter",
|
|
17
|
+
"ProviderRouter",
|
|
18
|
+
"get_router",
|
|
19
|
+
"_parse_model_spec",
|
|
20
|
+
"_model_to_vendor",
|
|
21
|
+
]
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"""LLM backends for the router."""
|
|
2
|
+
|
|
3
|
+
from devsper.providers.router.backends.openai_backend import OpenAIBackend
|
|
4
|
+
from devsper.providers.router.backends.anthropic_backend import AnthropicBackend
|
|
5
|
+
from devsper.providers.router.backends.gemini_backend import GeminiBackend
|
|
6
|
+
from devsper.providers.router.backends.github_backend import GitHubBackend
|
|
7
|
+
from devsper.providers.router.backends.ollama_backend import OllamaBackend
|
|
8
|
+
from devsper.providers.router.backends.vllm_backend import VLLMBackend
|
|
9
|
+
from devsper.providers.router.backends.custom_backend import CustomBackend
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"OpenAIBackend",
|
|
13
|
+
"AnthropicBackend",
|
|
14
|
+
"GeminiBackend",
|
|
15
|
+
"GitHubBackend",
|
|
16
|
+
"OllamaBackend",
|
|
17
|
+
"VLLMBackend",
|
|
18
|
+
"CustomBackend",
|
|
19
|
+
]
|