massgen 0.0.3__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +142 -8
- massgen/adapters/__init__.py +29 -0
- massgen/adapters/ag2_adapter.py +483 -0
- massgen/adapters/base.py +183 -0
- massgen/adapters/tests/__init__.py +0 -0
- massgen/adapters/tests/test_ag2_adapter.py +439 -0
- massgen/adapters/tests/test_agent_adapter.py +128 -0
- massgen/adapters/utils/__init__.py +2 -0
- massgen/adapters/utils/ag2_utils.py +236 -0
- massgen/adapters/utils/tests/__init__.py +0 -0
- massgen/adapters/utils/tests/test_ag2_utils.py +138 -0
- massgen/agent_config.py +329 -55
- massgen/api_params_handler/__init__.py +10 -0
- massgen/api_params_handler/_api_params_handler_base.py +99 -0
- massgen/api_params_handler/_chat_completions_api_params_handler.py +176 -0
- massgen/api_params_handler/_claude_api_params_handler.py +113 -0
- massgen/api_params_handler/_response_api_params_handler.py +130 -0
- massgen/backend/__init__.py +39 -4
- massgen/backend/azure_openai.py +385 -0
- massgen/backend/base.py +341 -69
- massgen/backend/base_with_mcp.py +1102 -0
- massgen/backend/capabilities.py +386 -0
- massgen/backend/chat_completions.py +577 -130
- massgen/backend/claude.py +1033 -537
- massgen/backend/claude_code.py +1203 -0
- massgen/backend/cli_base.py +209 -0
- massgen/backend/docs/BACKEND_ARCHITECTURE.md +126 -0
- massgen/backend/{CLAUDE_API_RESEARCH.md → docs/CLAUDE_API_RESEARCH.md} +18 -18
- massgen/backend/{GEMINI_API_DOCUMENTATION.md → docs/GEMINI_API_DOCUMENTATION.md} +9 -9
- massgen/backend/docs/Gemini MCP Integration Analysis.md +1050 -0
- massgen/backend/docs/MCP_IMPLEMENTATION_CLAUDE_BACKEND.md +177 -0
- massgen/backend/docs/MCP_INTEGRATION_RESPONSE_BACKEND.md +352 -0
- massgen/backend/docs/OPENAI_GPT5_MODELS.md +211 -0
- massgen/backend/{OPENAI_RESPONSES_API_FORMAT.md → docs/OPENAI_RESPONSE_API_TOOL_CALLS.md} +3 -3
- massgen/backend/docs/OPENAI_response_streaming.md +20654 -0
- massgen/backend/docs/inference_backend.md +257 -0
- massgen/backend/docs/permissions_and_context_files.md +1085 -0
- massgen/backend/external.py +126 -0
- massgen/backend/gemini.py +1850 -241
- massgen/backend/grok.py +40 -156
- massgen/backend/inference.py +156 -0
- massgen/backend/lmstudio.py +171 -0
- massgen/backend/response.py +1095 -322
- massgen/chat_agent.py +131 -113
- massgen/cli.py +1560 -275
- massgen/config_builder.py +2396 -0
- massgen/configs/BACKEND_CONFIGURATION.md +458 -0
- massgen/configs/README.md +559 -216
- massgen/configs/ag2/ag2_case_study.yaml +27 -0
- massgen/configs/ag2/ag2_coder.yaml +34 -0
- massgen/configs/ag2/ag2_coder_case_study.yaml +36 -0
- massgen/configs/ag2/ag2_gemini.yaml +27 -0
- massgen/configs/ag2/ag2_groupchat.yaml +108 -0
- massgen/configs/ag2/ag2_groupchat_gpt.yaml +118 -0
- massgen/configs/ag2/ag2_single_agent.yaml +21 -0
- massgen/configs/basic/multi/fast_timeout_example.yaml +37 -0
- massgen/configs/basic/multi/gemini_4o_claude.yaml +31 -0
- massgen/configs/basic/multi/gemini_gpt5nano_claude.yaml +36 -0
- massgen/configs/{gemini_4o_claude.yaml → basic/multi/geminicode_4o_claude.yaml} +3 -3
- massgen/configs/basic/multi/geminicode_gpt5nano_claude.yaml +36 -0
- massgen/configs/basic/multi/glm_gemini_claude.yaml +25 -0
- massgen/configs/basic/multi/gpt4o_audio_generation.yaml +30 -0
- massgen/configs/basic/multi/gpt4o_image_generation.yaml +31 -0
- massgen/configs/basic/multi/gpt5nano_glm_qwen.yaml +26 -0
- massgen/configs/basic/multi/gpt5nano_image_understanding.yaml +26 -0
- massgen/configs/{three_agents_default.yaml → basic/multi/three_agents_default.yaml} +8 -4
- massgen/configs/basic/multi/three_agents_opensource.yaml +27 -0
- massgen/configs/basic/multi/three_agents_vllm.yaml +20 -0
- massgen/configs/basic/multi/two_agents_gemini.yaml +19 -0
- massgen/configs/{two_agents.yaml → basic/multi/two_agents_gpt5.yaml} +14 -6
- massgen/configs/basic/multi/two_agents_opensource_lmstudio.yaml +31 -0
- massgen/configs/basic/multi/two_qwen_vllm_sglang.yaml +28 -0
- massgen/configs/{single_agent.yaml → basic/single/single_agent.yaml} +1 -1
- massgen/configs/{single_flash2.5.yaml → basic/single/single_flash2.5.yaml} +1 -2
- massgen/configs/basic/single/single_gemini2.5pro.yaml +16 -0
- massgen/configs/basic/single/single_gpt4o_audio_generation.yaml +22 -0
- massgen/configs/basic/single/single_gpt4o_image_generation.yaml +22 -0
- massgen/configs/basic/single/single_gpt4o_video_generation.yaml +24 -0
- massgen/configs/basic/single/single_gpt5nano.yaml +20 -0
- massgen/configs/basic/single/single_gpt5nano_file_search.yaml +18 -0
- massgen/configs/basic/single/single_gpt5nano_image_understanding.yaml +17 -0
- massgen/configs/basic/single/single_gptoss120b.yaml +15 -0
- massgen/configs/basic/single/single_openrouter_audio_understanding.yaml +15 -0
- massgen/configs/basic/single/single_qwen_video_understanding.yaml +15 -0
- massgen/configs/debug/code_execution/command_filtering_blacklist.yaml +29 -0
- massgen/configs/debug/code_execution/command_filtering_whitelist.yaml +28 -0
- massgen/configs/debug/code_execution/docker_verification.yaml +29 -0
- massgen/configs/debug/skip_coordination_test.yaml +27 -0
- massgen/configs/debug/test_sdk_migration.yaml +17 -0
- massgen/configs/docs/DISCORD_MCP_SETUP.md +208 -0
- massgen/configs/docs/TWITTER_MCP_ENESCINAR_SETUP.md +82 -0
- massgen/configs/providers/azure/azure_openai_multi.yaml +21 -0
- massgen/configs/providers/azure/azure_openai_single.yaml +19 -0
- massgen/configs/providers/claude/claude.yaml +14 -0
- massgen/configs/providers/gemini/gemini_gpt5nano.yaml +28 -0
- massgen/configs/providers/local/lmstudio.yaml +11 -0
- massgen/configs/providers/openai/gpt5.yaml +46 -0
- massgen/configs/providers/openai/gpt5_nano.yaml +46 -0
- massgen/configs/providers/others/grok_single_agent.yaml +19 -0
- massgen/configs/providers/others/zai_coding_team.yaml +108 -0
- massgen/configs/providers/others/zai_glm45.yaml +12 -0
- massgen/configs/{creative_team.yaml → teams/creative/creative_team.yaml} +16 -6
- massgen/configs/{travel_planning.yaml → teams/creative/travel_planning.yaml} +16 -6
- massgen/configs/{news_analysis.yaml → teams/research/news_analysis.yaml} +16 -6
- massgen/configs/{research_team.yaml → teams/research/research_team.yaml} +15 -7
- massgen/configs/{technical_analysis.yaml → teams/research/technical_analysis.yaml} +16 -6
- massgen/configs/tools/code-execution/basic_command_execution.yaml +25 -0
- massgen/configs/tools/code-execution/code_execution_use_case_simple.yaml +41 -0
- massgen/configs/tools/code-execution/docker_claude_code.yaml +32 -0
- massgen/configs/tools/code-execution/docker_multi_agent.yaml +32 -0
- massgen/configs/tools/code-execution/docker_simple.yaml +29 -0
- massgen/configs/tools/code-execution/docker_with_resource_limits.yaml +32 -0
- massgen/configs/tools/code-execution/multi_agent_playwright_automation.yaml +57 -0
- massgen/configs/tools/filesystem/cc_gpt5_gemini_filesystem.yaml +34 -0
- massgen/configs/tools/filesystem/claude_code_context_sharing.yaml +68 -0
- massgen/configs/tools/filesystem/claude_code_flash2.5.yaml +43 -0
- massgen/configs/tools/filesystem/claude_code_flash2.5_gptoss.yaml +49 -0
- massgen/configs/tools/filesystem/claude_code_gpt5nano.yaml +31 -0
- massgen/configs/tools/filesystem/claude_code_single.yaml +40 -0
- massgen/configs/tools/filesystem/fs_permissions_test.yaml +87 -0
- massgen/configs/tools/filesystem/gemini_gemini_workspace_cleanup.yaml +54 -0
- massgen/configs/tools/filesystem/gemini_gpt5_filesystem_casestudy.yaml +30 -0
- massgen/configs/tools/filesystem/gemini_gpt5nano_file_context_path.yaml +43 -0
- massgen/configs/tools/filesystem/gemini_gpt5nano_protected_paths.yaml +45 -0
- massgen/configs/tools/filesystem/gpt5mini_cc_fs_context_path.yaml +31 -0
- massgen/configs/tools/filesystem/grok4_gpt5_gemini_filesystem.yaml +32 -0
- massgen/configs/tools/filesystem/multiturn/grok4_gpt5_claude_code_filesystem_multiturn.yaml +58 -0
- massgen/configs/tools/filesystem/multiturn/grok4_gpt5_gemini_filesystem_multiturn.yaml +58 -0
- massgen/configs/tools/filesystem/multiturn/two_claude_code_filesystem_multiturn.yaml +47 -0
- massgen/configs/tools/filesystem/multiturn/two_gemini_flash_filesystem_multiturn.yaml +48 -0
- massgen/configs/tools/mcp/claude_code_discord_mcp_example.yaml +27 -0
- massgen/configs/tools/mcp/claude_code_simple_mcp.yaml +35 -0
- massgen/configs/tools/mcp/claude_code_twitter_mcp_example.yaml +32 -0
- massgen/configs/tools/mcp/claude_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/claude_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/five_agents_travel_mcp_test.yaml +157 -0
- massgen/configs/tools/mcp/five_agents_weather_mcp_test.yaml +103 -0
- massgen/configs/tools/mcp/gemini_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test.yaml +23 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_sharing.yaml +23 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_single_agent.yaml +17 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_with_claude_code.yaml +24 -0
- massgen/configs/tools/mcp/gemini_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/gemini_notion_mcp.yaml +52 -0
- massgen/configs/tools/mcp/gpt5_nano_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/gpt5_nano_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/gpt5mini_claude_code_discord_mcp_example.yaml +38 -0
- massgen/configs/tools/mcp/gpt_oss_mcp_example.yaml +25 -0
- massgen/configs/tools/mcp/gpt_oss_mcp_test.yaml +28 -0
- massgen/configs/tools/mcp/grok3_mini_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/grok3_mini_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/multimcp_gemini.yaml +111 -0
- massgen/configs/tools/mcp/qwen_api_mcp_example.yaml +25 -0
- massgen/configs/tools/mcp/qwen_api_mcp_test.yaml +28 -0
- massgen/configs/tools/mcp/qwen_local_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/qwen_local_mcp_test.yaml +27 -0
- massgen/configs/tools/planning/five_agents_discord_mcp_planning_mode.yaml +140 -0
- massgen/configs/tools/planning/five_agents_filesystem_mcp_planning_mode.yaml +151 -0
- massgen/configs/tools/planning/five_agents_notion_mcp_planning_mode.yaml +151 -0
- massgen/configs/tools/planning/five_agents_twitter_mcp_planning_mode.yaml +155 -0
- massgen/configs/tools/planning/gpt5_mini_case_study_mcp_planning_mode.yaml +73 -0
- massgen/configs/tools/web-search/claude_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gemini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gpt5_mini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gpt_oss_streamable_http_test.yaml +44 -0
- massgen/configs/tools/web-search/grok3_mini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/qwen_api_streamable_http_test.yaml +44 -0
- massgen/configs/tools/web-search/qwen_local_streamable_http_test.yaml +43 -0
- massgen/coordination_tracker.py +708 -0
- massgen/docker/README.md +462 -0
- massgen/filesystem_manager/__init__.py +21 -0
- massgen/filesystem_manager/_base.py +9 -0
- massgen/filesystem_manager/_code_execution_server.py +545 -0
- massgen/filesystem_manager/_docker_manager.py +477 -0
- massgen/filesystem_manager/_file_operation_tracker.py +248 -0
- massgen/filesystem_manager/_filesystem_manager.py +813 -0
- massgen/filesystem_manager/_path_permission_manager.py +1261 -0
- massgen/filesystem_manager/_workspace_tools_server.py +1815 -0
- massgen/formatter/__init__.py +10 -0
- massgen/formatter/_chat_completions_formatter.py +284 -0
- massgen/formatter/_claude_formatter.py +235 -0
- massgen/formatter/_formatter_base.py +156 -0
- massgen/formatter/_response_formatter.py +263 -0
- massgen/frontend/__init__.py +1 -2
- massgen/frontend/coordination_ui.py +471 -286
- massgen/frontend/displays/base_display.py +56 -11
- massgen/frontend/displays/create_coordination_table.py +1956 -0
- massgen/frontend/displays/rich_terminal_display.py +1259 -619
- massgen/frontend/displays/simple_display.py +9 -4
- massgen/frontend/displays/terminal_display.py +27 -68
- massgen/logger_config.py +681 -0
- massgen/mcp_tools/README.md +232 -0
- massgen/mcp_tools/__init__.py +105 -0
- massgen/mcp_tools/backend_utils.py +1035 -0
- massgen/mcp_tools/circuit_breaker.py +195 -0
- massgen/mcp_tools/client.py +894 -0
- massgen/mcp_tools/config_validator.py +138 -0
- massgen/mcp_tools/docs/circuit_breaker.md +646 -0
- massgen/mcp_tools/docs/client.md +950 -0
- massgen/mcp_tools/docs/config_validator.md +478 -0
- massgen/mcp_tools/docs/exceptions.md +1165 -0
- massgen/mcp_tools/docs/security.md +854 -0
- massgen/mcp_tools/exceptions.py +338 -0
- massgen/mcp_tools/hooks.py +212 -0
- massgen/mcp_tools/security.py +780 -0
- massgen/message_templates.py +342 -64
- massgen/orchestrator.py +1515 -241
- massgen/stream_chunk/__init__.py +35 -0
- massgen/stream_chunk/base.py +92 -0
- massgen/stream_chunk/multimodal.py +237 -0
- massgen/stream_chunk/text.py +162 -0
- massgen/tests/mcp_test_server.py +150 -0
- massgen/tests/multi_turn_conversation_design.md +0 -8
- massgen/tests/test_azure_openai_backend.py +156 -0
- massgen/tests/test_backend_capabilities.py +262 -0
- massgen/tests/test_backend_event_loop_all.py +179 -0
- massgen/tests/test_chat_completions_refactor.py +142 -0
- massgen/tests/test_claude_backend.py +15 -28
- massgen/tests/test_claude_code.py +268 -0
- massgen/tests/test_claude_code_context_sharing.py +233 -0
- massgen/tests/test_claude_code_orchestrator.py +175 -0
- massgen/tests/test_cli_backends.py +180 -0
- massgen/tests/test_code_execution.py +679 -0
- massgen/tests/test_external_agent_backend.py +134 -0
- massgen/tests/test_final_presentation_fallback.py +237 -0
- massgen/tests/test_gemini_planning_mode.py +351 -0
- massgen/tests/test_grok_backend.py +7 -10
- massgen/tests/test_http_mcp_server.py +42 -0
- massgen/tests/test_integration_simple.py +198 -0
- massgen/tests/test_mcp_blocking.py +125 -0
- massgen/tests/test_message_context_building.py +29 -47
- massgen/tests/test_orchestrator_final_presentation.py +48 -0
- massgen/tests/test_path_permission_manager.py +2087 -0
- massgen/tests/test_rich_terminal_display.py +14 -13
- massgen/tests/test_timeout.py +133 -0
- massgen/tests/test_v3_3agents.py +11 -12
- massgen/tests/test_v3_simple.py +8 -13
- massgen/tests/test_v3_three_agents.py +11 -18
- massgen/tests/test_v3_two_agents.py +8 -13
- massgen/token_manager/__init__.py +7 -0
- massgen/token_manager/token_manager.py +400 -0
- massgen/utils.py +52 -16
- massgen/v1/agent.py +45 -91
- massgen/v1/agents.py +18 -53
- massgen/v1/backends/gemini.py +50 -153
- massgen/v1/backends/grok.py +21 -54
- massgen/v1/backends/oai.py +39 -111
- massgen/v1/cli.py +36 -93
- massgen/v1/config.py +8 -12
- massgen/v1/logging.py +43 -127
- massgen/v1/main.py +18 -32
- massgen/v1/orchestrator.py +68 -209
- massgen/v1/streaming_display.py +62 -163
- massgen/v1/tools.py +8 -12
- massgen/v1/types.py +9 -23
- massgen/v1/utils.py +5 -23
- massgen-0.1.0.dist-info/METADATA +1245 -0
- massgen-0.1.0.dist-info/RECORD +273 -0
- massgen-0.1.0.dist-info/entry_points.txt +2 -0
- massgen/frontend/logging/__init__.py +0 -9
- massgen/frontend/logging/realtime_logger.py +0 -197
- massgen-0.0.3.dist-info/METADATA +0 -568
- massgen-0.0.3.dist-info/RECORD +0 -76
- massgen-0.0.3.dist-info/entry_points.txt +0 -2
- /massgen/backend/{Function calling openai responses.md → docs/Function calling openai responses.md} +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/WHEEL +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/licenses/LICENSE +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/top_level.txt +0 -0
massgen/agent_config.py
CHANGED
|
@@ -1,15 +1,48 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
1
2
|
"""
|
|
2
3
|
Agent configuration for MassGen framework following input_cases_reference.md
|
|
3
4
|
Simplified configuration focused on the proven binary decision approach.
|
|
5
|
+
|
|
6
|
+
TODO: This file is outdated - check claude_code config and
|
|
7
|
+
deprecated patterns. Update to reflect current backend architecture.
|
|
4
8
|
"""
|
|
5
9
|
|
|
10
|
+
import warnings
|
|
6
11
|
from dataclasses import dataclass, field
|
|
7
|
-
from typing import
|
|
12
|
+
from typing import TYPE_CHECKING, Any, Dict, Optional
|
|
8
13
|
|
|
9
14
|
if TYPE_CHECKING:
|
|
10
15
|
from .message_templates import MessageTemplates
|
|
11
16
|
|
|
12
17
|
|
|
18
|
+
@dataclass
|
|
19
|
+
class TimeoutConfig:
|
|
20
|
+
"""Configuration for timeout settings in MassGen.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
orchestrator_timeout_seconds: Maximum time for orchestrator coordination (default: 1800s = 30min)
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
orchestrator_timeout_seconds: int = 1800 # 30 minutes
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class CoordinationConfig:
|
|
31
|
+
"""Configuration for coordination behavior in MassGen.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
enable_planning_mode: If True, agents plan without executing actions during coordination.
|
|
35
|
+
Only the winning agent executes actions during final presentation.
|
|
36
|
+
If False, agents execute actions during coordination (default behavior).
|
|
37
|
+
planning_mode_instruction: Custom instruction to add when planning mode is enabled.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
enable_planning_mode: bool = False
|
|
41
|
+
planning_mode_instruction: str = (
|
|
42
|
+
"During coordination, describe what you would do without actually executing actions. Only provide concrete implementation details without calling external APIs or tools."
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
|
|
13
46
|
@dataclass
|
|
14
47
|
class AgentConfig:
|
|
15
48
|
"""Configuration for MassGen agents using the proven binary decision framework.
|
|
@@ -22,6 +55,9 @@ class AgentConfig:
|
|
|
22
55
|
message_templates: Custom message templates (None=default)
|
|
23
56
|
agent_id: Optional agent identifier for this configuration
|
|
24
57
|
custom_system_instruction: Additional system instruction prepended to evaluation message
|
|
58
|
+
timeout_config: Timeout and resource limit configuration
|
|
59
|
+
coordination_config: Coordination behavior configuration (e.g., planning mode)
|
|
60
|
+
skip_coordination_rounds: Debug/test mode - skip voting rounds and go straight to final presentation (default: False)
|
|
25
61
|
"""
|
|
26
62
|
|
|
27
63
|
# Core backend configuration (includes tool enablement)
|
|
@@ -32,7 +68,78 @@ class AgentConfig:
|
|
|
32
68
|
|
|
33
69
|
# Agent customization
|
|
34
70
|
agent_id: Optional[str] = None
|
|
35
|
-
|
|
71
|
+
_custom_system_instruction: Optional[str] = field(default=None, init=False)
|
|
72
|
+
|
|
73
|
+
# Timeout and resource limits
|
|
74
|
+
timeout_config: TimeoutConfig = field(default_factory=TimeoutConfig)
|
|
75
|
+
|
|
76
|
+
# Coordination behavior configuration
|
|
77
|
+
coordination_config: CoordinationConfig = field(default_factory=CoordinationConfig)
|
|
78
|
+
|
|
79
|
+
# Debug/test mode - skip coordination rounds and go straight to final presentation
|
|
80
|
+
skip_coordination_rounds: bool = False
|
|
81
|
+
|
|
82
|
+
@property
|
|
83
|
+
def custom_system_instruction(self) -> Optional[str]:
|
|
84
|
+
"""
|
|
85
|
+
DEPRECATED: Use backend-specific system prompt parameters instead.
|
|
86
|
+
|
|
87
|
+
For Claude Code: use append_system_prompt or system_prompt in backend_params
|
|
88
|
+
For other backends: use their respective system prompt parameters
|
|
89
|
+
"""
|
|
90
|
+
if self._custom_system_instruction is not None:
|
|
91
|
+
warnings.warn(
|
|
92
|
+
"custom_system_instruction is deprecated. Use backend-specific " "system prompt parameters instead (e.g., append_system_prompt for Claude Code)",
|
|
93
|
+
DeprecationWarning,
|
|
94
|
+
stacklevel=2,
|
|
95
|
+
)
|
|
96
|
+
return self._custom_system_instruction
|
|
97
|
+
|
|
98
|
+
@custom_system_instruction.setter
|
|
99
|
+
def custom_system_instruction(self, value: Optional[str]) -> None:
|
|
100
|
+
if value is not None:
|
|
101
|
+
warnings.warn(
|
|
102
|
+
"custom_system_instruction is deprecated. Use backend-specific " "system prompt parameters instead (e.g., append_system_prompt for Claude Code)",
|
|
103
|
+
DeprecationWarning,
|
|
104
|
+
stacklevel=2,
|
|
105
|
+
)
|
|
106
|
+
self._custom_system_instruction = value
|
|
107
|
+
|
|
108
|
+
@classmethod
|
|
109
|
+
def create_chatcompletion_config(
|
|
110
|
+
cls,
|
|
111
|
+
model: str = "gpt-oss-120b",
|
|
112
|
+
enable_web_search: bool = False,
|
|
113
|
+
enable_code_interpreter: bool = False,
|
|
114
|
+
**kwargs,
|
|
115
|
+
) -> "AgentConfig":
|
|
116
|
+
"""Create ChatCompletion configuration following proven patterns.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
model: Opensource Model Name
|
|
120
|
+
enable_web_search: Enable web search via Responses API
|
|
121
|
+
enable_code_interpreter: Enable code execution for computational tasks
|
|
122
|
+
**kwargs: Additional backend parameters
|
|
123
|
+
|
|
124
|
+
Examples:
|
|
125
|
+
# Basic configuration
|
|
126
|
+
config = AgentConfig.create_chatcompletion_config("gpt-oss-120b")
|
|
127
|
+
|
|
128
|
+
# Research task with web search
|
|
129
|
+
config = AgentConfig.create_chatcompletion_config("gpt-oss-120b", enable_web_search=True)
|
|
130
|
+
|
|
131
|
+
# Computational task with code execution
|
|
132
|
+
config = AgentConfig.create_chatcompletion_config("gpt-oss-120b", enable_code_interpreter=True)
|
|
133
|
+
"""
|
|
134
|
+
backend_params = {"model": model, **kwargs}
|
|
135
|
+
|
|
136
|
+
# Add tool enablement to backend_params
|
|
137
|
+
if enable_web_search:
|
|
138
|
+
backend_params["enable_web_search"] = True
|
|
139
|
+
if enable_code_interpreter:
|
|
140
|
+
backend_params["enable_code_interpreter"] = True
|
|
141
|
+
|
|
142
|
+
return cls(backend_params=backend_params)
|
|
36
143
|
|
|
37
144
|
@classmethod
|
|
38
145
|
def create_openai_config(
|
|
@@ -97,9 +204,7 @@ class AgentConfig:
|
|
|
97
204
|
return cls(backend_params=backend_params)
|
|
98
205
|
|
|
99
206
|
@classmethod
|
|
100
|
-
def create_grok_config(
|
|
101
|
-
cls, model: str = "grok-2-1212", enable_web_search: bool = False, **kwargs
|
|
102
|
-
) -> "AgentConfig":
|
|
207
|
+
def create_grok_config(cls, model: str = "grok-2-1212", enable_web_search: bool = False, **kwargs) -> "AgentConfig":
|
|
103
208
|
"""Create xAI Grok configuration.
|
|
104
209
|
|
|
105
210
|
Args:
|
|
@@ -115,6 +220,43 @@ class AgentConfig:
|
|
|
115
220
|
|
|
116
221
|
return cls(backend_params=backend_params)
|
|
117
222
|
|
|
223
|
+
@classmethod
|
|
224
|
+
def create_lmstudio_config(
|
|
225
|
+
cls,
|
|
226
|
+
model: str = "gpt-4o-mini",
|
|
227
|
+
enable_web_search: bool = False,
|
|
228
|
+
**kwargs,
|
|
229
|
+
) -> "AgentConfig":
|
|
230
|
+
"""Create LM Studio configuration (OpenAI-compatible local server).
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
model: Local model name exposed by LM Studio
|
|
234
|
+
enable_web_search: No builtin web search; kept for interface parity
|
|
235
|
+
**kwargs: Additional backend parameters (e.g., base_url, api_key)
|
|
236
|
+
"""
|
|
237
|
+
backend_params = {"model": model, **kwargs}
|
|
238
|
+
if enable_web_search:
|
|
239
|
+
backend_params["enable_web_search"] = True
|
|
240
|
+
return cls(backend_params=backend_params)
|
|
241
|
+
|
|
242
|
+
@classmethod
|
|
243
|
+
def create_vllm_config(cls, model: str | None = None, **kwargs) -> "AgentConfig":
|
|
244
|
+
"""Create vLLM configuration (OpenAI-compatible local server)."""
|
|
245
|
+
backend_params = {"model": model, **kwargs}
|
|
246
|
+
if model is None:
|
|
247
|
+
raise ValueError("Model is required for vLLM configuration")
|
|
248
|
+
|
|
249
|
+
return cls(backend_params=backend_params)
|
|
250
|
+
|
|
251
|
+
@classmethod
|
|
252
|
+
def create_sglang_config(cls, model: str | None = None, **kwargs) -> "AgentConfig":
|
|
253
|
+
"""Create SGLang configuration (OpenAI-compatible local server)."""
|
|
254
|
+
backend_params = {"model": model, **kwargs}
|
|
255
|
+
if model is None:
|
|
256
|
+
raise ValueError("Model is required for SGLang configuration")
|
|
257
|
+
|
|
258
|
+
return cls(backend_params=backend_params)
|
|
259
|
+
|
|
118
260
|
@classmethod
|
|
119
261
|
def create_gemini_config(
|
|
120
262
|
cls,
|
|
@@ -141,6 +283,138 @@ class AgentConfig:
|
|
|
141
283
|
|
|
142
284
|
return cls(backend_params=backend_params)
|
|
143
285
|
|
|
286
|
+
@classmethod
|
|
287
|
+
def create_zai_config(
|
|
288
|
+
cls,
|
|
289
|
+
model: str = "glm-4.5",
|
|
290
|
+
base_url: str = "https://api.z.ai/api/paas/v4/",
|
|
291
|
+
**kwargs,
|
|
292
|
+
) -> "AgentConfig":
|
|
293
|
+
"""Create ZAI configuration (OpenAI Chat Completions compatible).
|
|
294
|
+
|
|
295
|
+
Args:
|
|
296
|
+
model: ZAI model name (e.g., "glm-4.5")
|
|
297
|
+
base_url: ZAI OpenAI-compatible API base URL
|
|
298
|
+
**kwargs: Additional backend parameters (e.g., temperature, top_p)
|
|
299
|
+
"""
|
|
300
|
+
backend_params = {"model": model, "base_url": base_url, **kwargs}
|
|
301
|
+
|
|
302
|
+
return cls(backend_params=backend_params)
|
|
303
|
+
|
|
304
|
+
@classmethod
|
|
305
|
+
def create_azure_openai_config(
|
|
306
|
+
cls,
|
|
307
|
+
deployment_name: str = "gpt-4",
|
|
308
|
+
endpoint: Optional[str] = None,
|
|
309
|
+
api_key: Optional[str] = None,
|
|
310
|
+
api_version: str = "2024-02-15-preview",
|
|
311
|
+
**kwargs,
|
|
312
|
+
) -> "AgentConfig":
|
|
313
|
+
"""Create Azure OpenAI configuration.
|
|
314
|
+
|
|
315
|
+
Args:
|
|
316
|
+
deployment_name: Azure OpenAI deployment name (e.g., "gpt-4", "gpt-35-turbo")
|
|
317
|
+
endpoint: Azure OpenAI endpoint URL (optional, uses AZURE_OPENAI_ENDPOINT env var)
|
|
318
|
+
api_key: Azure OpenAI API key (optional, uses AZURE_OPENAI_API_KEY env var)
|
|
319
|
+
api_version: Azure OpenAI API version (default: 2024-02-15-preview)
|
|
320
|
+
**kwargs: Additional backend parameters (e.g., temperature, max_tokens)
|
|
321
|
+
|
|
322
|
+
Examples:
|
|
323
|
+
Basic configuration using environment variables::
|
|
324
|
+
|
|
325
|
+
config = AgentConfig.create_azure_openai_config("gpt-4")
|
|
326
|
+
|
|
327
|
+
Custom endpoint and API key::
|
|
328
|
+
|
|
329
|
+
config = AgentConfig.create_azure_openai_config(
|
|
330
|
+
deployment_name="gpt-4-turbo",
|
|
331
|
+
endpoint="https://your-resource.openai.azure.com/",
|
|
332
|
+
api_key="your-api-key"
|
|
333
|
+
)
|
|
334
|
+
"""
|
|
335
|
+
backend_params = {
|
|
336
|
+
"type": "azure_openai",
|
|
337
|
+
"model": deployment_name, # For Azure OpenAI, model is the deployment name
|
|
338
|
+
"api_version": api_version,
|
|
339
|
+
**kwargs,
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
# Add Azure-specific parameters if provided
|
|
343
|
+
if endpoint:
|
|
344
|
+
backend_params["base_url"] = endpoint
|
|
345
|
+
if api_key:
|
|
346
|
+
backend_params["api_key"] = api_key
|
|
347
|
+
|
|
348
|
+
return cls(backend_params=backend_params)
|
|
349
|
+
|
|
350
|
+
@classmethod
|
|
351
|
+
def create_claude_code_config(
|
|
352
|
+
cls,
|
|
353
|
+
model: str = "claude-sonnet-4-20250514",
|
|
354
|
+
system_prompt: Optional[str] = None,
|
|
355
|
+
allowed_tools: Optional[list] = None, # Legacy support
|
|
356
|
+
disallowed_tools: Optional[list] = None, # Preferred approach
|
|
357
|
+
max_thinking_tokens: int = 8000,
|
|
358
|
+
cwd: Optional[str] = None,
|
|
359
|
+
**kwargs,
|
|
360
|
+
) -> "AgentConfig":
|
|
361
|
+
"""Create Claude Code Stream configuration using claude-code-sdk.
|
|
362
|
+
|
|
363
|
+
This backend provides native integration with ALL Claude Code built-in tools
|
|
364
|
+
by default, with security enforced through disallowed_tools. This gives maximum
|
|
365
|
+
power while maintaining safety.
|
|
366
|
+
|
|
367
|
+
Args:
|
|
368
|
+
model: Claude model name (default: claude-sonnet-4-20250514)
|
|
369
|
+
system_prompt: Custom system prompt for the agent
|
|
370
|
+
allowed_tools: [LEGACY] List of allowed tools (use disallowed_tools instead)
|
|
371
|
+
disallowed_tools: List of dangerous operations to block
|
|
372
|
+
(default: ["Bash(rm*)", "Bash(sudo*)", "Bash(su*)", "Bash(chmod*)", "Bash(chown*)"])
|
|
373
|
+
max_thinking_tokens: Maximum tokens for internal thinking (default: 8000)
|
|
374
|
+
cwd: Current working directory for file operations
|
|
375
|
+
**kwargs: Additional backend parameters
|
|
376
|
+
|
|
377
|
+
Examples:
|
|
378
|
+
Maximum power configuration (recommended)::
|
|
379
|
+
|
|
380
|
+
config = AgentConfig.create_claude_code_config()
|
|
381
|
+
|
|
382
|
+
Custom security restrictions::
|
|
383
|
+
|
|
384
|
+
config = AgentConfig.create_claude_code_config(
|
|
385
|
+
disallowed_tools=["Bash(rm*)", "Bash(sudo*)", "WebSearch"]
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
Development task with custom directory::
|
|
389
|
+
|
|
390
|
+
config = AgentConfig.create_claude_code_config(
|
|
391
|
+
cwd="/path/to/project",
|
|
392
|
+
system_prompt="You are an expert developer assistant."
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
Legacy allowed_tools approach (not recommended)::
|
|
396
|
+
|
|
397
|
+
config = AgentConfig.create_claude_code_config(
|
|
398
|
+
allowed_tools=["Read", "Write", "Edit", "Bash"]
|
|
399
|
+
)
|
|
400
|
+
"""
|
|
401
|
+
backend_params = {"model": model, **kwargs}
|
|
402
|
+
|
|
403
|
+
# Claude Code Stream specific parameters
|
|
404
|
+
if system_prompt:
|
|
405
|
+
backend_params["system_prompt"] = system_prompt
|
|
406
|
+
if allowed_tools:
|
|
407
|
+
# Legacy support - warn that disallowed_tools is preferred
|
|
408
|
+
backend_params["allowed_tools"] = allowed_tools
|
|
409
|
+
if disallowed_tools:
|
|
410
|
+
backend_params["disallowed_tools"] = disallowed_tools
|
|
411
|
+
if max_thinking_tokens != 8000: # Only set if different from default
|
|
412
|
+
backend_params["max_thinking_tokens"] = max_thinking_tokens
|
|
413
|
+
if cwd:
|
|
414
|
+
backend_params["cwd"] = cwd
|
|
415
|
+
|
|
416
|
+
return cls(backend_params=backend_params)
|
|
417
|
+
|
|
144
418
|
# =============================================================================
|
|
145
419
|
# AGENT CUSTOMIZATION
|
|
146
420
|
# =============================================================================
|
|
@@ -166,9 +440,7 @@ class AgentConfig:
|
|
|
166
440
|
# =============================================================================
|
|
167
441
|
|
|
168
442
|
@classmethod
|
|
169
|
-
def for_research_task(
|
|
170
|
-
cls, model: str = "gpt-4o", backend: str = "openai"
|
|
171
|
-
) -> "AgentConfig":
|
|
443
|
+
def for_research_task(cls, model: str = "gpt-4o", backend: str = "openai") -> "AgentConfig":
|
|
172
444
|
"""Create configuration optimized for research tasks.
|
|
173
445
|
|
|
174
446
|
Based on econometrics test success patterns:
|
|
@@ -183,15 +455,14 @@ class AgentConfig:
|
|
|
183
455
|
return cls.create_claude_config(model, enable_web_search=True)
|
|
184
456
|
elif backend == "gemini":
|
|
185
457
|
return cls.create_gemini_config(model, enable_web_search=True)
|
|
458
|
+
elif backend == "claude_code":
|
|
459
|
+
# Maximum power research config - all tools available
|
|
460
|
+
return cls.create_claude_code_config(model)
|
|
186
461
|
else:
|
|
187
|
-
raise ValueError(
|
|
188
|
-
f"Research configuration not available for backend: {backend}"
|
|
189
|
-
)
|
|
462
|
+
raise ValueError(f"Research configuration not available for backend: {backend}")
|
|
190
463
|
|
|
191
464
|
@classmethod
|
|
192
|
-
def for_computational_task(
|
|
193
|
-
cls, model: str = "gpt-4o", backend: str = "openai"
|
|
194
|
-
) -> "AgentConfig":
|
|
465
|
+
def for_computational_task(cls, model: str = "gpt-4o", backend: str = "openai") -> "AgentConfig":
|
|
195
466
|
"""Create configuration optimized for computational tasks.
|
|
196
467
|
|
|
197
468
|
Based on Tower of Hanoi test success patterns:
|
|
@@ -204,15 +475,14 @@ class AgentConfig:
|
|
|
204
475
|
return cls.create_claude_config(model, enable_code_execution=True)
|
|
205
476
|
elif backend == "gemini":
|
|
206
477
|
return cls.create_gemini_config(model, enable_code_execution=True)
|
|
478
|
+
elif backend == "claude_code":
|
|
479
|
+
# Maximum power computational config - all tools available
|
|
480
|
+
return cls.create_claude_code_config(model)
|
|
207
481
|
else:
|
|
208
|
-
raise ValueError(
|
|
209
|
-
f"Computational configuration not available for backend: {backend}"
|
|
210
|
-
)
|
|
482
|
+
raise ValueError(f"Computational configuration not available for backend: {backend}")
|
|
211
483
|
|
|
212
484
|
@classmethod
|
|
213
|
-
def for_analytical_task(
|
|
214
|
-
cls, model: str = "gpt-4o-mini", backend: str = "openai"
|
|
215
|
-
) -> "AgentConfig":
|
|
485
|
+
def for_analytical_task(cls, model: str = "gpt-4o-mini", backend: str = "openai") -> "AgentConfig":
|
|
216
486
|
"""Create configuration optimized for analytical tasks.
|
|
217
487
|
|
|
218
488
|
Based on general reasoning test patterns:
|
|
@@ -227,10 +497,11 @@ class AgentConfig:
|
|
|
227
497
|
return cls.create_grok_config(model)
|
|
228
498
|
elif backend == "gemini":
|
|
229
499
|
return cls.create_gemini_config(model)
|
|
500
|
+
elif backend == "claude_code":
|
|
501
|
+
# Maximum power analytical config - all tools available
|
|
502
|
+
return cls.create_claude_code_config(model)
|
|
230
503
|
else:
|
|
231
|
-
raise ValueError(
|
|
232
|
-
f"Analytical configuration not available for backend: {backend}"
|
|
233
|
-
)
|
|
504
|
+
raise ValueError(f"Analytical configuration not available for backend: {backend}")
|
|
234
505
|
|
|
235
506
|
@classmethod
|
|
236
507
|
def for_expert_domain(
|
|
@@ -257,9 +528,7 @@ class AgentConfig:
|
|
|
257
528
|
elif backend == "gemini":
|
|
258
529
|
config = cls.create_gemini_config(model, enable_web_search=True)
|
|
259
530
|
else:
|
|
260
|
-
raise ValueError(
|
|
261
|
-
f"Domain expert configuration not available for backend: {backend}"
|
|
262
|
-
)
|
|
531
|
+
raise ValueError(f"Domain expert configuration not available for backend: {backend}")
|
|
263
532
|
|
|
264
533
|
config.custom_system_instruction = instruction
|
|
265
534
|
return config
|
|
@@ -287,16 +556,12 @@ class AgentConfig:
|
|
|
287
556
|
valid_agent_ids = list(agent_summaries.keys()) if agent_summaries else None
|
|
288
557
|
|
|
289
558
|
# Build base conversation
|
|
290
|
-
conversation = templates.build_initial_conversation(
|
|
291
|
-
task=task, agent_summaries=agent_summaries, valid_agent_ids=valid_agent_ids
|
|
292
|
-
)
|
|
559
|
+
conversation = templates.build_initial_conversation(task=task, agent_summaries=agent_summaries, valid_agent_ids=valid_agent_ids)
|
|
293
560
|
|
|
294
561
|
# Add custom system instruction if provided
|
|
295
562
|
if self.custom_system_instruction:
|
|
296
563
|
base_system = conversation["system_message"]
|
|
297
|
-
conversation["system_message"] =
|
|
298
|
-
f"{self.custom_system_instruction}\n\n{base_system}"
|
|
299
|
-
)
|
|
564
|
+
conversation["system_message"] = f"{self.custom_system_instruction}\n\n{base_system}"
|
|
300
565
|
|
|
301
566
|
# Add backend configuration
|
|
302
567
|
conversation.update(
|
|
@@ -304,7 +569,7 @@ class AgentConfig:
|
|
|
304
569
|
"backend_params": self.get_backend_params(),
|
|
305
570
|
"session_id": session_id,
|
|
306
571
|
"agent_id": self.agent_id,
|
|
307
|
-
}
|
|
572
|
+
},
|
|
308
573
|
)
|
|
309
574
|
|
|
310
575
|
return conversation
|
|
@@ -354,7 +619,7 @@ class AgentConfig:
|
|
|
354
619
|
{
|
|
355
620
|
"role": additional_message_role,
|
|
356
621
|
"content": str(additional_message),
|
|
357
|
-
}
|
|
622
|
+
},
|
|
358
623
|
)
|
|
359
624
|
|
|
360
625
|
# Add enforcement if requested (Case 3)
|
|
@@ -383,13 +648,9 @@ class AgentConfig:
|
|
|
383
648
|
Returns:
|
|
384
649
|
Conversation with enforcement message added
|
|
385
650
|
"""
|
|
386
|
-
return self.continue_conversation(
|
|
387
|
-
existing_messages=existing_messages, enforce_tools=True
|
|
388
|
-
)
|
|
651
|
+
return self.continue_conversation(existing_messages=existing_messages, enforce_tools=True)
|
|
389
652
|
|
|
390
|
-
def add_tool_result(
|
|
391
|
-
self, existing_messages: list, tool_call_id: str, result: str
|
|
392
|
-
) -> Dict[str, Any]:
|
|
653
|
+
def add_tool_result(self, existing_messages: list, tool_call_id: str, result: str) -> Dict[str, Any]:
|
|
393
654
|
"""Add tool result to conversation.
|
|
394
655
|
|
|
395
656
|
Args:
|
|
@@ -402,13 +663,9 @@ class AgentConfig:
|
|
|
402
663
|
"""
|
|
403
664
|
tool_message = {"role": "tool", "tool_call_id": tool_call_id, "content": result}
|
|
404
665
|
|
|
405
|
-
return self.continue_conversation(
|
|
406
|
-
existing_messages=existing_messages, additional_message=tool_message
|
|
407
|
-
)
|
|
666
|
+
return self.continue_conversation(existing_messages=existing_messages, additional_message=tool_message)
|
|
408
667
|
|
|
409
|
-
def handle_case4_error_recovery(
|
|
410
|
-
self, existing_messages: list, clarification: str = None
|
|
411
|
-
) -> Dict[str, Any]:
|
|
668
|
+
def handle_case4_error_recovery(self, existing_messages: list, clarification: Optional[str] = None) -> Dict[str, Any]:
|
|
412
669
|
"""Handle Case 4: Error recovery after tool failure.
|
|
413
670
|
|
|
414
671
|
Args:
|
|
@@ -439,6 +696,15 @@ class AgentConfig:
|
|
|
439
696
|
"backend_params": self.backend_params,
|
|
440
697
|
"agent_id": self.agent_id,
|
|
441
698
|
"custom_system_instruction": self.custom_system_instruction,
|
|
699
|
+
"timeout_config": {
|
|
700
|
+
"orchestrator_timeout_seconds": self.timeout_config.orchestrator_timeout_seconds,
|
|
701
|
+
},
|
|
702
|
+
}
|
|
703
|
+
|
|
704
|
+
# Handle coordination_config serialization
|
|
705
|
+
result["coordination_config"] = {
|
|
706
|
+
"enable_planning_mode": self.coordination_config.enable_planning_mode,
|
|
707
|
+
"planning_mode_instruction": self.coordination_config.planning_mode_instruction,
|
|
442
708
|
}
|
|
443
709
|
|
|
444
710
|
# Handle message_templates serialization
|
|
@@ -465,6 +731,18 @@ class AgentConfig:
|
|
|
465
731
|
agent_id = data.get("agent_id")
|
|
466
732
|
custom_system_instruction = data.get("custom_system_instruction")
|
|
467
733
|
|
|
734
|
+
# Handle timeout_config
|
|
735
|
+
timeout_config = TimeoutConfig()
|
|
736
|
+
timeout_data = data.get("timeout_config", {})
|
|
737
|
+
if timeout_data:
|
|
738
|
+
timeout_config = TimeoutConfig(**timeout_data)
|
|
739
|
+
|
|
740
|
+
# Handle coordination_config
|
|
741
|
+
coordination_config = CoordinationConfig()
|
|
742
|
+
coordination_data = data.get("coordination_config", {})
|
|
743
|
+
if coordination_data:
|
|
744
|
+
coordination_config = CoordinationConfig(**coordination_data)
|
|
745
|
+
|
|
468
746
|
# Handle message_templates
|
|
469
747
|
message_templates = None
|
|
470
748
|
template_data = data.get("message_templates")
|
|
@@ -478,6 +756,8 @@ class AgentConfig:
|
|
|
478
756
|
message_templates=message_templates,
|
|
479
757
|
agent_id=agent_id,
|
|
480
758
|
custom_system_instruction=custom_system_instruction,
|
|
759
|
+
timeout_config=timeout_config,
|
|
760
|
+
coordination_config=coordination_config,
|
|
481
761
|
)
|
|
482
762
|
|
|
483
763
|
|
|
@@ -486,22 +766,16 @@ class AgentConfig:
|
|
|
486
766
|
# =============================================================================
|
|
487
767
|
|
|
488
768
|
|
|
489
|
-
def create_research_config(
|
|
490
|
-
model: str = "gpt-4o", backend: str = "openai"
|
|
491
|
-
) -> AgentConfig:
|
|
769
|
+
def create_research_config(model: str = "gpt-4o", backend: str = "openai") -> AgentConfig:
|
|
492
770
|
"""Create configuration for research tasks (web search enabled)."""
|
|
493
771
|
return AgentConfig.for_research_task(model, backend)
|
|
494
772
|
|
|
495
773
|
|
|
496
|
-
def create_computational_config(
|
|
497
|
-
model: str = "gpt-4o", backend: str = "openai"
|
|
498
|
-
) -> AgentConfig:
|
|
774
|
+
def create_computational_config(model: str = "gpt-4o", backend: str = "openai") -> AgentConfig:
|
|
499
775
|
"""Create configuration for computational tasks (code execution enabled)."""
|
|
500
776
|
return AgentConfig.for_computational_task(model, backend)
|
|
501
777
|
|
|
502
778
|
|
|
503
|
-
def create_analytical_config(
|
|
504
|
-
model: str = "gpt-4o-mini", backend: str = "openai"
|
|
505
|
-
) -> AgentConfig:
|
|
779
|
+
def create_analytical_config(model: str = "gpt-4o-mini", backend: str = "openai") -> AgentConfig:
|
|
506
780
|
"""Create configuration for analytical tasks (no special tools)."""
|
|
507
781
|
return AgentConfig.for_analytical_task(model, backend)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
Message formatting utilities.
|
|
4
|
+
Provides utility classes for message formatting and conversion.
|
|
5
|
+
"""
|
|
6
|
+
from ._chat_completions_api_params_handler import ChatCompletionsAPIParamsHandler
|
|
7
|
+
from ._claude_api_params_handler import ClaudeAPIParamsHandler
|
|
8
|
+
from ._response_api_params_handler import ResponseAPIParamsHandler
|
|
9
|
+
|
|
10
|
+
__all__ = ["ChatCompletionsAPIParamsHandler", "ResponseAPIParamsHandler", "ClaudeAPIParamsHandler"]
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
Base class for API parameters handlers.
|
|
4
|
+
Provides common functionality for building API parameters across different backends.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from abc import ABC, abstractmethod
|
|
10
|
+
from typing import Any, Dict, List, Set
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class APIParamsHandlerBase(ABC):
|
|
14
|
+
"""Abstract base class for API parameter handlers."""
|
|
15
|
+
|
|
16
|
+
def __init__(self, backend_instance: Any):
|
|
17
|
+
"""Initialize the API params handler.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
backend_instance: The backend instance containing necessary formatters and config
|
|
21
|
+
"""
|
|
22
|
+
self.backend = backend_instance
|
|
23
|
+
self.formatter = backend_instance.formatter
|
|
24
|
+
|
|
25
|
+
@abstractmethod
|
|
26
|
+
async def build_api_params(
|
|
27
|
+
self,
|
|
28
|
+
messages: List[Dict[str, Any]],
|
|
29
|
+
tools: List[Dict[str, Any]],
|
|
30
|
+
all_params: Dict[str, Any],
|
|
31
|
+
) -> Dict[str, Any]:
|
|
32
|
+
"""Build API parameters for the specific backend.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
messages: List of messages in framework format
|
|
36
|
+
tools: List of tools in framework format
|
|
37
|
+
all_params: All parameters including config and runtime params
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
Dictionary of API parameters ready for the backend
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
@abstractmethod
|
|
44
|
+
def get_excluded_params(self) -> Set[str]:
|
|
45
|
+
"""Get backend-specific parameters to exclude from API calls."""
|
|
46
|
+
|
|
47
|
+
@abstractmethod
|
|
48
|
+
def get_provider_tools(self, all_params: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
49
|
+
"""Get provider-specific tools based on parameters."""
|
|
50
|
+
|
|
51
|
+
def get_base_excluded_params(self) -> Set[str]:
|
|
52
|
+
"""Get common parameters to exclude across all backends."""
|
|
53
|
+
return {
|
|
54
|
+
"upload_files",
|
|
55
|
+
# Filesystem manager parameters (handled by base class)
|
|
56
|
+
"cwd",
|
|
57
|
+
"agent_temporary_workspace",
|
|
58
|
+
"context_paths",
|
|
59
|
+
"context_write_access_enabled",
|
|
60
|
+
"enable_image_generation",
|
|
61
|
+
"enable_mcp_command_line",
|
|
62
|
+
"command_line_allowed_commands",
|
|
63
|
+
"command_line_blocked_commands",
|
|
64
|
+
"command_line_execution_mode",
|
|
65
|
+
"command_line_docker_image",
|
|
66
|
+
"command_line_docker_memory_limit",
|
|
67
|
+
"command_line_docker_cpu_limit",
|
|
68
|
+
"command_line_docker_network_mode",
|
|
69
|
+
# Backend identification (handled by orchestrator)
|
|
70
|
+
"enable_audio_generation", # Audio generation parameter
|
|
71
|
+
"type",
|
|
72
|
+
"agent_id",
|
|
73
|
+
"session_id",
|
|
74
|
+
# MCP configuration (handled by base class for MCP backends)
|
|
75
|
+
"mcp_servers",
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
def build_base_api_params(
|
|
79
|
+
self,
|
|
80
|
+
messages: List[Dict[str, Any]],
|
|
81
|
+
all_params: Dict[str, Any],
|
|
82
|
+
) -> Dict[str, Any]:
|
|
83
|
+
"""Build base API parameters common to most backends."""
|
|
84
|
+
api_params = {"stream": True}
|
|
85
|
+
|
|
86
|
+
# Add filtered parameters
|
|
87
|
+
excluded = self.get_excluded_params()
|
|
88
|
+
for key, value in all_params.items():
|
|
89
|
+
if key not in excluded and value is not None:
|
|
90
|
+
api_params[key] = value
|
|
91
|
+
|
|
92
|
+
return api_params
|
|
93
|
+
|
|
94
|
+
def get_mcp_tools(self) -> List[Dict[str, Any]]:
|
|
95
|
+
"""Get MCP tools from backend if available."""
|
|
96
|
+
if hasattr(self.backend, "_mcp_functions") and self.backend._mcp_functions:
|
|
97
|
+
if hasattr(self.backend, "get_mcp_tools_formatted"):
|
|
98
|
+
return self.backend.get_mcp_tools_formatted()
|
|
99
|
+
return []
|