massgen 0.0.3__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +142 -8
- massgen/adapters/__init__.py +29 -0
- massgen/adapters/ag2_adapter.py +483 -0
- massgen/adapters/base.py +183 -0
- massgen/adapters/tests/__init__.py +0 -0
- massgen/adapters/tests/test_ag2_adapter.py +439 -0
- massgen/adapters/tests/test_agent_adapter.py +128 -0
- massgen/adapters/utils/__init__.py +2 -0
- massgen/adapters/utils/ag2_utils.py +236 -0
- massgen/adapters/utils/tests/__init__.py +0 -0
- massgen/adapters/utils/tests/test_ag2_utils.py +138 -0
- massgen/agent_config.py +329 -55
- massgen/api_params_handler/__init__.py +10 -0
- massgen/api_params_handler/_api_params_handler_base.py +99 -0
- massgen/api_params_handler/_chat_completions_api_params_handler.py +176 -0
- massgen/api_params_handler/_claude_api_params_handler.py +113 -0
- massgen/api_params_handler/_response_api_params_handler.py +130 -0
- massgen/backend/__init__.py +39 -4
- massgen/backend/azure_openai.py +385 -0
- massgen/backend/base.py +341 -69
- massgen/backend/base_with_mcp.py +1102 -0
- massgen/backend/capabilities.py +386 -0
- massgen/backend/chat_completions.py +577 -130
- massgen/backend/claude.py +1033 -537
- massgen/backend/claude_code.py +1203 -0
- massgen/backend/cli_base.py +209 -0
- massgen/backend/docs/BACKEND_ARCHITECTURE.md +126 -0
- massgen/backend/{CLAUDE_API_RESEARCH.md → docs/CLAUDE_API_RESEARCH.md} +18 -18
- massgen/backend/{GEMINI_API_DOCUMENTATION.md → docs/GEMINI_API_DOCUMENTATION.md} +9 -9
- massgen/backend/docs/Gemini MCP Integration Analysis.md +1050 -0
- massgen/backend/docs/MCP_IMPLEMENTATION_CLAUDE_BACKEND.md +177 -0
- massgen/backend/docs/MCP_INTEGRATION_RESPONSE_BACKEND.md +352 -0
- massgen/backend/docs/OPENAI_GPT5_MODELS.md +211 -0
- massgen/backend/{OPENAI_RESPONSES_API_FORMAT.md → docs/OPENAI_RESPONSE_API_TOOL_CALLS.md} +3 -3
- massgen/backend/docs/OPENAI_response_streaming.md +20654 -0
- massgen/backend/docs/inference_backend.md +257 -0
- massgen/backend/docs/permissions_and_context_files.md +1085 -0
- massgen/backend/external.py +126 -0
- massgen/backend/gemini.py +1850 -241
- massgen/backend/grok.py +40 -156
- massgen/backend/inference.py +156 -0
- massgen/backend/lmstudio.py +171 -0
- massgen/backend/response.py +1095 -322
- massgen/chat_agent.py +131 -113
- massgen/cli.py +1560 -275
- massgen/config_builder.py +2396 -0
- massgen/configs/BACKEND_CONFIGURATION.md +458 -0
- massgen/configs/README.md +559 -216
- massgen/configs/ag2/ag2_case_study.yaml +27 -0
- massgen/configs/ag2/ag2_coder.yaml +34 -0
- massgen/configs/ag2/ag2_coder_case_study.yaml +36 -0
- massgen/configs/ag2/ag2_gemini.yaml +27 -0
- massgen/configs/ag2/ag2_groupchat.yaml +108 -0
- massgen/configs/ag2/ag2_groupchat_gpt.yaml +118 -0
- massgen/configs/ag2/ag2_single_agent.yaml +21 -0
- massgen/configs/basic/multi/fast_timeout_example.yaml +37 -0
- massgen/configs/basic/multi/gemini_4o_claude.yaml +31 -0
- massgen/configs/basic/multi/gemini_gpt5nano_claude.yaml +36 -0
- massgen/configs/{gemini_4o_claude.yaml → basic/multi/geminicode_4o_claude.yaml} +3 -3
- massgen/configs/basic/multi/geminicode_gpt5nano_claude.yaml +36 -0
- massgen/configs/basic/multi/glm_gemini_claude.yaml +25 -0
- massgen/configs/basic/multi/gpt4o_audio_generation.yaml +30 -0
- massgen/configs/basic/multi/gpt4o_image_generation.yaml +31 -0
- massgen/configs/basic/multi/gpt5nano_glm_qwen.yaml +26 -0
- massgen/configs/basic/multi/gpt5nano_image_understanding.yaml +26 -0
- massgen/configs/{three_agents_default.yaml → basic/multi/three_agents_default.yaml} +8 -4
- massgen/configs/basic/multi/three_agents_opensource.yaml +27 -0
- massgen/configs/basic/multi/three_agents_vllm.yaml +20 -0
- massgen/configs/basic/multi/two_agents_gemini.yaml +19 -0
- massgen/configs/{two_agents.yaml → basic/multi/two_agents_gpt5.yaml} +14 -6
- massgen/configs/basic/multi/two_agents_opensource_lmstudio.yaml +31 -0
- massgen/configs/basic/multi/two_qwen_vllm_sglang.yaml +28 -0
- massgen/configs/{single_agent.yaml → basic/single/single_agent.yaml} +1 -1
- massgen/configs/{single_flash2.5.yaml → basic/single/single_flash2.5.yaml} +1 -2
- massgen/configs/basic/single/single_gemini2.5pro.yaml +16 -0
- massgen/configs/basic/single/single_gpt4o_audio_generation.yaml +22 -0
- massgen/configs/basic/single/single_gpt4o_image_generation.yaml +22 -0
- massgen/configs/basic/single/single_gpt4o_video_generation.yaml +24 -0
- massgen/configs/basic/single/single_gpt5nano.yaml +20 -0
- massgen/configs/basic/single/single_gpt5nano_file_search.yaml +18 -0
- massgen/configs/basic/single/single_gpt5nano_image_understanding.yaml +17 -0
- massgen/configs/basic/single/single_gptoss120b.yaml +15 -0
- massgen/configs/basic/single/single_openrouter_audio_understanding.yaml +15 -0
- massgen/configs/basic/single/single_qwen_video_understanding.yaml +15 -0
- massgen/configs/debug/code_execution/command_filtering_blacklist.yaml +29 -0
- massgen/configs/debug/code_execution/command_filtering_whitelist.yaml +28 -0
- massgen/configs/debug/code_execution/docker_verification.yaml +29 -0
- massgen/configs/debug/skip_coordination_test.yaml +27 -0
- massgen/configs/debug/test_sdk_migration.yaml +17 -0
- massgen/configs/docs/DISCORD_MCP_SETUP.md +208 -0
- massgen/configs/docs/TWITTER_MCP_ENESCINAR_SETUP.md +82 -0
- massgen/configs/providers/azure/azure_openai_multi.yaml +21 -0
- massgen/configs/providers/azure/azure_openai_single.yaml +19 -0
- massgen/configs/providers/claude/claude.yaml +14 -0
- massgen/configs/providers/gemini/gemini_gpt5nano.yaml +28 -0
- massgen/configs/providers/local/lmstudio.yaml +11 -0
- massgen/configs/providers/openai/gpt5.yaml +46 -0
- massgen/configs/providers/openai/gpt5_nano.yaml +46 -0
- massgen/configs/providers/others/grok_single_agent.yaml +19 -0
- massgen/configs/providers/others/zai_coding_team.yaml +108 -0
- massgen/configs/providers/others/zai_glm45.yaml +12 -0
- massgen/configs/{creative_team.yaml → teams/creative/creative_team.yaml} +16 -6
- massgen/configs/{travel_planning.yaml → teams/creative/travel_planning.yaml} +16 -6
- massgen/configs/{news_analysis.yaml → teams/research/news_analysis.yaml} +16 -6
- massgen/configs/{research_team.yaml → teams/research/research_team.yaml} +15 -7
- massgen/configs/{technical_analysis.yaml → teams/research/technical_analysis.yaml} +16 -6
- massgen/configs/tools/code-execution/basic_command_execution.yaml +25 -0
- massgen/configs/tools/code-execution/code_execution_use_case_simple.yaml +41 -0
- massgen/configs/tools/code-execution/docker_claude_code.yaml +32 -0
- massgen/configs/tools/code-execution/docker_multi_agent.yaml +32 -0
- massgen/configs/tools/code-execution/docker_simple.yaml +29 -0
- massgen/configs/tools/code-execution/docker_with_resource_limits.yaml +32 -0
- massgen/configs/tools/code-execution/multi_agent_playwright_automation.yaml +57 -0
- massgen/configs/tools/filesystem/cc_gpt5_gemini_filesystem.yaml +34 -0
- massgen/configs/tools/filesystem/claude_code_context_sharing.yaml +68 -0
- massgen/configs/tools/filesystem/claude_code_flash2.5.yaml +43 -0
- massgen/configs/tools/filesystem/claude_code_flash2.5_gptoss.yaml +49 -0
- massgen/configs/tools/filesystem/claude_code_gpt5nano.yaml +31 -0
- massgen/configs/tools/filesystem/claude_code_single.yaml +40 -0
- massgen/configs/tools/filesystem/fs_permissions_test.yaml +87 -0
- massgen/configs/tools/filesystem/gemini_gemini_workspace_cleanup.yaml +54 -0
- massgen/configs/tools/filesystem/gemini_gpt5_filesystem_casestudy.yaml +30 -0
- massgen/configs/tools/filesystem/gemini_gpt5nano_file_context_path.yaml +43 -0
- massgen/configs/tools/filesystem/gemini_gpt5nano_protected_paths.yaml +45 -0
- massgen/configs/tools/filesystem/gpt5mini_cc_fs_context_path.yaml +31 -0
- massgen/configs/tools/filesystem/grok4_gpt5_gemini_filesystem.yaml +32 -0
- massgen/configs/tools/filesystem/multiturn/grok4_gpt5_claude_code_filesystem_multiturn.yaml +58 -0
- massgen/configs/tools/filesystem/multiturn/grok4_gpt5_gemini_filesystem_multiturn.yaml +58 -0
- massgen/configs/tools/filesystem/multiturn/two_claude_code_filesystem_multiturn.yaml +47 -0
- massgen/configs/tools/filesystem/multiturn/two_gemini_flash_filesystem_multiturn.yaml +48 -0
- massgen/configs/tools/mcp/claude_code_discord_mcp_example.yaml +27 -0
- massgen/configs/tools/mcp/claude_code_simple_mcp.yaml +35 -0
- massgen/configs/tools/mcp/claude_code_twitter_mcp_example.yaml +32 -0
- massgen/configs/tools/mcp/claude_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/claude_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/five_agents_travel_mcp_test.yaml +157 -0
- massgen/configs/tools/mcp/five_agents_weather_mcp_test.yaml +103 -0
- massgen/configs/tools/mcp/gemini_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test.yaml +23 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_sharing.yaml +23 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_single_agent.yaml +17 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_with_claude_code.yaml +24 -0
- massgen/configs/tools/mcp/gemini_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/gemini_notion_mcp.yaml +52 -0
- massgen/configs/tools/mcp/gpt5_nano_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/gpt5_nano_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/gpt5mini_claude_code_discord_mcp_example.yaml +38 -0
- massgen/configs/tools/mcp/gpt_oss_mcp_example.yaml +25 -0
- massgen/configs/tools/mcp/gpt_oss_mcp_test.yaml +28 -0
- massgen/configs/tools/mcp/grok3_mini_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/grok3_mini_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/multimcp_gemini.yaml +111 -0
- massgen/configs/tools/mcp/qwen_api_mcp_example.yaml +25 -0
- massgen/configs/tools/mcp/qwen_api_mcp_test.yaml +28 -0
- massgen/configs/tools/mcp/qwen_local_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/qwen_local_mcp_test.yaml +27 -0
- massgen/configs/tools/planning/five_agents_discord_mcp_planning_mode.yaml +140 -0
- massgen/configs/tools/planning/five_agents_filesystem_mcp_planning_mode.yaml +151 -0
- massgen/configs/tools/planning/five_agents_notion_mcp_planning_mode.yaml +151 -0
- massgen/configs/tools/planning/five_agents_twitter_mcp_planning_mode.yaml +155 -0
- massgen/configs/tools/planning/gpt5_mini_case_study_mcp_planning_mode.yaml +73 -0
- massgen/configs/tools/web-search/claude_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gemini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gpt5_mini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gpt_oss_streamable_http_test.yaml +44 -0
- massgen/configs/tools/web-search/grok3_mini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/qwen_api_streamable_http_test.yaml +44 -0
- massgen/configs/tools/web-search/qwen_local_streamable_http_test.yaml +43 -0
- massgen/coordination_tracker.py +708 -0
- massgen/docker/README.md +462 -0
- massgen/filesystem_manager/__init__.py +21 -0
- massgen/filesystem_manager/_base.py +9 -0
- massgen/filesystem_manager/_code_execution_server.py +545 -0
- massgen/filesystem_manager/_docker_manager.py +477 -0
- massgen/filesystem_manager/_file_operation_tracker.py +248 -0
- massgen/filesystem_manager/_filesystem_manager.py +813 -0
- massgen/filesystem_manager/_path_permission_manager.py +1261 -0
- massgen/filesystem_manager/_workspace_tools_server.py +1815 -0
- massgen/formatter/__init__.py +10 -0
- massgen/formatter/_chat_completions_formatter.py +284 -0
- massgen/formatter/_claude_formatter.py +235 -0
- massgen/formatter/_formatter_base.py +156 -0
- massgen/formatter/_response_formatter.py +263 -0
- massgen/frontend/__init__.py +1 -2
- massgen/frontend/coordination_ui.py +471 -286
- massgen/frontend/displays/base_display.py +56 -11
- massgen/frontend/displays/create_coordination_table.py +1956 -0
- massgen/frontend/displays/rich_terminal_display.py +1259 -619
- massgen/frontend/displays/simple_display.py +9 -4
- massgen/frontend/displays/terminal_display.py +27 -68
- massgen/logger_config.py +681 -0
- massgen/mcp_tools/README.md +232 -0
- massgen/mcp_tools/__init__.py +105 -0
- massgen/mcp_tools/backend_utils.py +1035 -0
- massgen/mcp_tools/circuit_breaker.py +195 -0
- massgen/mcp_tools/client.py +894 -0
- massgen/mcp_tools/config_validator.py +138 -0
- massgen/mcp_tools/docs/circuit_breaker.md +646 -0
- massgen/mcp_tools/docs/client.md +950 -0
- massgen/mcp_tools/docs/config_validator.md +478 -0
- massgen/mcp_tools/docs/exceptions.md +1165 -0
- massgen/mcp_tools/docs/security.md +854 -0
- massgen/mcp_tools/exceptions.py +338 -0
- massgen/mcp_tools/hooks.py +212 -0
- massgen/mcp_tools/security.py +780 -0
- massgen/message_templates.py +342 -64
- massgen/orchestrator.py +1515 -241
- massgen/stream_chunk/__init__.py +35 -0
- massgen/stream_chunk/base.py +92 -0
- massgen/stream_chunk/multimodal.py +237 -0
- massgen/stream_chunk/text.py +162 -0
- massgen/tests/mcp_test_server.py +150 -0
- massgen/tests/multi_turn_conversation_design.md +0 -8
- massgen/tests/test_azure_openai_backend.py +156 -0
- massgen/tests/test_backend_capabilities.py +262 -0
- massgen/tests/test_backend_event_loop_all.py +179 -0
- massgen/tests/test_chat_completions_refactor.py +142 -0
- massgen/tests/test_claude_backend.py +15 -28
- massgen/tests/test_claude_code.py +268 -0
- massgen/tests/test_claude_code_context_sharing.py +233 -0
- massgen/tests/test_claude_code_orchestrator.py +175 -0
- massgen/tests/test_cli_backends.py +180 -0
- massgen/tests/test_code_execution.py +679 -0
- massgen/tests/test_external_agent_backend.py +134 -0
- massgen/tests/test_final_presentation_fallback.py +237 -0
- massgen/tests/test_gemini_planning_mode.py +351 -0
- massgen/tests/test_grok_backend.py +7 -10
- massgen/tests/test_http_mcp_server.py +42 -0
- massgen/tests/test_integration_simple.py +198 -0
- massgen/tests/test_mcp_blocking.py +125 -0
- massgen/tests/test_message_context_building.py +29 -47
- massgen/tests/test_orchestrator_final_presentation.py +48 -0
- massgen/tests/test_path_permission_manager.py +2087 -0
- massgen/tests/test_rich_terminal_display.py +14 -13
- massgen/tests/test_timeout.py +133 -0
- massgen/tests/test_v3_3agents.py +11 -12
- massgen/tests/test_v3_simple.py +8 -13
- massgen/tests/test_v3_three_agents.py +11 -18
- massgen/tests/test_v3_two_agents.py +8 -13
- massgen/token_manager/__init__.py +7 -0
- massgen/token_manager/token_manager.py +400 -0
- massgen/utils.py +52 -16
- massgen/v1/agent.py +45 -91
- massgen/v1/agents.py +18 -53
- massgen/v1/backends/gemini.py +50 -153
- massgen/v1/backends/grok.py +21 -54
- massgen/v1/backends/oai.py +39 -111
- massgen/v1/cli.py +36 -93
- massgen/v1/config.py +8 -12
- massgen/v1/logging.py +43 -127
- massgen/v1/main.py +18 -32
- massgen/v1/orchestrator.py +68 -209
- massgen/v1/streaming_display.py +62 -163
- massgen/v1/tools.py +8 -12
- massgen/v1/types.py +9 -23
- massgen/v1/utils.py +5 -23
- massgen-0.1.0.dist-info/METADATA +1245 -0
- massgen-0.1.0.dist-info/RECORD +273 -0
- massgen-0.1.0.dist-info/entry_points.txt +2 -0
- massgen/frontend/logging/__init__.py +0 -9
- massgen/frontend/logging/realtime_logger.py +0 -197
- massgen-0.0.3.dist-info/METADATA +0 -568
- massgen-0.0.3.dist-info/RECORD +0 -76
- massgen-0.0.3.dist-info/entry_points.txt +0 -2
- /massgen/backend/{Function calling openai responses.md → docs/Function calling openai responses.md} +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/WHEEL +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/licenses/LICENSE +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/top_level.txt +0 -0
massgen/backend/base.py
CHANGED
|
@@ -1,40 +1,52 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
3
2
|
"""
|
|
4
3
|
Base backend interface for LLM providers.
|
|
5
4
|
"""
|
|
5
|
+
# -*- coding: utf-8 -*-
|
|
6
|
+
from __future__ import annotations
|
|
6
7
|
|
|
7
8
|
from abc import ABC, abstractmethod
|
|
8
|
-
from typing import Dict, List, Any, AsyncGenerator, Optional
|
|
9
9
|
from dataclasses import dataclass
|
|
10
|
+
from enum import Enum
|
|
11
|
+
from typing import Any, AsyncGenerator, Dict, List, Optional, Union
|
|
12
|
+
|
|
13
|
+
from ..filesystem_manager import FilesystemManager, PathPermissionManagerHook
|
|
14
|
+
from ..mcp_tools.hooks import FunctionHookManager, HookType
|
|
15
|
+
from ..token_manager import TokenCostCalculator, TokenUsage
|
|
16
|
+
from ..utils import CoordinationStage
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class FilesystemSupport(Enum):
|
|
20
|
+
"""Types of filesystem support for backends."""
|
|
21
|
+
|
|
22
|
+
NONE = "none" # No filesystem support
|
|
23
|
+
NATIVE = "native" # Built-in filesystem tools (like Claude Code)
|
|
24
|
+
MCP = "mcp" # Filesystem support through MCP servers
|
|
10
25
|
|
|
11
26
|
|
|
12
27
|
@dataclass
|
|
13
28
|
class StreamChunk:
|
|
14
29
|
"""Standardized chunk format for streaming responses."""
|
|
15
30
|
|
|
16
|
-
type: str # "content", "tool_calls", "
|
|
31
|
+
type: str # "content", "tool_calls", "complete_message", "complete_response", "done",
|
|
32
|
+
# "error", "agent_status", "reasoning", "reasoning_done", "reasoning_summary",
|
|
33
|
+
# "reasoning_summary_done", "backend_status"
|
|
17
34
|
content: Optional[str] = None
|
|
18
|
-
tool_calls: Optional[List[Dict[str, Any]]] = (
|
|
19
|
-
None # User-defined function tools (need execution)
|
|
20
|
-
)
|
|
21
|
-
builtin_tool_results: Optional[List[Dict[str, Any]]] = (
|
|
22
|
-
None # Provider builtin tools (already executed)
|
|
23
|
-
)
|
|
35
|
+
tool_calls: Optional[List[Dict[str, Any]]] = None # User-defined function tools (need execution)
|
|
24
36
|
complete_message: Optional[Dict[str, Any]] = None # Complete assistant message
|
|
25
37
|
response: Optional[Dict[str, Any]] = None # Raw Responses API response
|
|
26
38
|
error: Optional[str] = None
|
|
27
39
|
source: Optional[str] = None # Source identifier (e.g., agent_id, "orchestrator")
|
|
28
40
|
status: Optional[str] = None # For agent status updates
|
|
29
41
|
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
42
|
+
# Reasoning-related fields
|
|
43
|
+
reasoning_delta: Optional[str] = None # Delta text from reasoning stream
|
|
44
|
+
reasoning_text: Optional[str] = None # Complete reasoning text
|
|
45
|
+
reasoning_summary_delta: Optional[str] = None # Delta text from reasoning summary stream
|
|
46
|
+
reasoning_summary_text: Optional[str] = None # Complete reasoning summary text
|
|
47
|
+
item_id: Optional[str] = None # Reasoning item ID
|
|
48
|
+
content_index: Optional[int] = None # Reasoning content index
|
|
49
|
+
summary_index: Optional[int] = None # Reasoning summary index
|
|
38
50
|
|
|
39
51
|
|
|
40
52
|
class LLMBackend(ABC):
|
|
@@ -43,12 +55,124 @@ class LLMBackend(ABC):
|
|
|
43
55
|
def __init__(self, api_key: Optional[str] = None, **kwargs):
|
|
44
56
|
self.api_key = api_key
|
|
45
57
|
self.config = kwargs
|
|
58
|
+
|
|
59
|
+
# Initialize utility classes
|
|
46
60
|
self.token_usage = TokenUsage()
|
|
47
61
|
|
|
62
|
+
# Planning mode flag - when True, MCP tools should be blocked during coordination
|
|
63
|
+
self._planning_mode_enabled: bool = False
|
|
64
|
+
|
|
65
|
+
self.token_calculator = TokenCostCalculator()
|
|
66
|
+
|
|
67
|
+
# Filesystem manager integration
|
|
68
|
+
self.filesystem_manager = None
|
|
69
|
+
cwd = kwargs.get("cwd")
|
|
70
|
+
if cwd:
|
|
71
|
+
filesystem_support = self.get_filesystem_support()
|
|
72
|
+
if filesystem_support in (FilesystemSupport.MCP, FilesystemSupport.NATIVE):
|
|
73
|
+
# Validate execution mode
|
|
74
|
+
execution_mode = kwargs.get("command_line_execution_mode", "local")
|
|
75
|
+
if execution_mode not in ["local", "docker"]:
|
|
76
|
+
raise ValueError(
|
|
77
|
+
f"Invalid command_line_execution_mode: '{execution_mode}'. Must be 'local' or 'docker'.",
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
# Validate network mode
|
|
81
|
+
network_mode = kwargs.get("command_line_docker_network_mode", "none")
|
|
82
|
+
if network_mode not in ["none", "bridge", "host"]:
|
|
83
|
+
raise ValueError(
|
|
84
|
+
f"Invalid command_line_docker_network_mode: '{network_mode}'. Must be 'none', 'bridge', or 'host'.",
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
# Extract all FilesystemManager parameters from kwargs
|
|
88
|
+
filesystem_params = {
|
|
89
|
+
"cwd": cwd,
|
|
90
|
+
"agent_temporary_workspace_parent": kwargs.get("agent_temporary_workspace"),
|
|
91
|
+
"context_paths": kwargs.get("context_paths", []),
|
|
92
|
+
"context_write_access_enabled": kwargs.get("context_write_access_enabled", False),
|
|
93
|
+
"enable_image_generation": kwargs.get("enable_image_generation", False),
|
|
94
|
+
"enable_mcp_command_line": kwargs.get("enable_mcp_command_line", False),
|
|
95
|
+
"command_line_allowed_commands": kwargs.get("command_line_allowed_commands"),
|
|
96
|
+
"command_line_blocked_commands": kwargs.get("command_line_blocked_commands"),
|
|
97
|
+
"command_line_execution_mode": execution_mode,
|
|
98
|
+
"command_line_docker_image": kwargs.get("command_line_docker_image", "massgen/mcp-runtime:latest"),
|
|
99
|
+
"command_line_docker_memory_limit": kwargs.get("command_line_docker_memory_limit"),
|
|
100
|
+
"command_line_docker_cpu_limit": kwargs.get("command_line_docker_cpu_limit"),
|
|
101
|
+
"command_line_docker_network_mode": network_mode,
|
|
102
|
+
"enable_audio_generation": kwargs.get("enable_audio_generation", False),
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
# Create FilesystemManager
|
|
106
|
+
self.filesystem_manager = FilesystemManager(**filesystem_params)
|
|
107
|
+
|
|
108
|
+
# Inject MCP filesystem server for MCP backends only
|
|
109
|
+
if filesystem_support == FilesystemSupport.MCP:
|
|
110
|
+
self.config = self.filesystem_manager.inject_filesystem_mcp(kwargs)
|
|
111
|
+
# NATIVE backends handle filesystem tools themselves, but need command_line MCP for docker mode
|
|
112
|
+
elif filesystem_support == FilesystemSupport.NATIVE and execution_mode == "docker" and kwargs.get("enable_mcp_command_line", False):
|
|
113
|
+
self.config = self.filesystem_manager.inject_command_line_mcp(kwargs)
|
|
114
|
+
|
|
115
|
+
elif filesystem_support == FilesystemSupport.NONE:
|
|
116
|
+
raise ValueError(f"Backend {self.get_provider_name()} does not support filesystem operations. Remove 'cwd' from configuration.")
|
|
117
|
+
|
|
118
|
+
# Auto-setup permission hooks for function-based backends (default)
|
|
119
|
+
if self.filesystem_manager:
|
|
120
|
+
self._setup_permission_hooks()
|
|
121
|
+
else:
|
|
122
|
+
self.filesystem_manager = None
|
|
123
|
+
|
|
124
|
+
self.formatter = None
|
|
125
|
+
self.api_params_handler = None
|
|
126
|
+
self.coordination_stage = None
|
|
127
|
+
|
|
128
|
+
def _setup_permission_hooks(self):
|
|
129
|
+
"""Setup permission hooks for function-based backends (default behavior)."""
|
|
130
|
+
# Create per-agent hook manager
|
|
131
|
+
self.function_hook_manager = FunctionHookManager()
|
|
132
|
+
|
|
133
|
+
# Create permission hook using the filesystem manager's permission manager
|
|
134
|
+
permission_hook = PathPermissionManagerHook(self.filesystem_manager.path_permission_manager)
|
|
135
|
+
|
|
136
|
+
# Register hook on this agent's hook manager only
|
|
137
|
+
self.function_hook_manager.register_global_hook(HookType.PRE_CALL, permission_hook)
|
|
138
|
+
|
|
139
|
+
@classmethod
|
|
140
|
+
def get_base_excluded_config_params(cls) -> set:
|
|
141
|
+
"""
|
|
142
|
+
Get set of config parameters that are universally handled by base class.
|
|
143
|
+
|
|
144
|
+
These are parameters handled by the base class or orchestrator, not passed
|
|
145
|
+
directly to backend implementations. Backends should extend this set with
|
|
146
|
+
their own specific exclusions.
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Set of universal parameter names to exclude from backend options
|
|
150
|
+
"""
|
|
151
|
+
return {
|
|
152
|
+
# Filesystem manager parameters (handled by base class)
|
|
153
|
+
"cwd",
|
|
154
|
+
"agent_temporary_workspace",
|
|
155
|
+
"context_paths",
|
|
156
|
+
"context_write_access_enabled",
|
|
157
|
+
"enable_image_generation",
|
|
158
|
+
"enable_mcp_command_line",
|
|
159
|
+
"command_line_allowed_commands",
|
|
160
|
+
"command_line_blocked_commands",
|
|
161
|
+
"command_line_execution_mode",
|
|
162
|
+
"command_line_docker_image",
|
|
163
|
+
"command_line_docker_memory_limit",
|
|
164
|
+
"command_line_docker_cpu_limit",
|
|
165
|
+
"command_line_docker_network_mode",
|
|
166
|
+
# Backend identification (handled by orchestrator)
|
|
167
|
+
"type",
|
|
168
|
+
"agent_id",
|
|
169
|
+
"session_id",
|
|
170
|
+
# MCP configuration (handled by base class for MCP backends)
|
|
171
|
+
"mcp_servers",
|
|
172
|
+
}
|
|
173
|
+
|
|
48
174
|
@abstractmethod
|
|
49
|
-
async def stream_with_tools(
|
|
50
|
-
self, messages: List[Dict[str, Any]], tools: List[Dict[str, Any]], **kwargs
|
|
51
|
-
) -> AsyncGenerator[StreamChunk, None]:
|
|
175
|
+
async def stream_with_tools(self, messages: List[Dict[str, Any]], tools: List[Dict[str, Any]], **kwargs) -> AsyncGenerator[StreamChunk, None]:
|
|
52
176
|
"""
|
|
53
177
|
Stream a response with tool calling support.
|
|
54
178
|
|
|
@@ -60,43 +184,54 @@ class LLMBackend(ABC):
|
|
|
60
184
|
Yields:
|
|
61
185
|
StreamChunk: Standardized response chunks
|
|
62
186
|
"""
|
|
63
|
-
pass
|
|
64
187
|
|
|
65
188
|
@abstractmethod
|
|
66
189
|
def get_provider_name(self) -> str:
|
|
67
190
|
"""Get the name of this provider."""
|
|
68
|
-
pass
|
|
69
191
|
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
pass
|
|
192
|
+
def estimate_tokens(self, text: Union[str, List[Dict[str, Any]]], method: str = "auto") -> int:
|
|
193
|
+
"""
|
|
194
|
+
Estimate token count for text or messages.
|
|
74
195
|
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
):
|
|
85
|
-
"""
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
196
|
+
Args:
|
|
197
|
+
text: Text string or list of message dictionaries
|
|
198
|
+
method: Estimation method ("tiktoken", "simple", "auto")
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
Estimated token count
|
|
202
|
+
"""
|
|
203
|
+
return self.token_calculator.estimate_tokens(text, method)
|
|
204
|
+
|
|
205
|
+
def calculate_cost(self, input_tokens: int, output_tokens: int, model: str) -> float:
|
|
206
|
+
"""
|
|
207
|
+
Calculate cost for token usage.
|
|
208
|
+
|
|
209
|
+
Args:
|
|
210
|
+
input_tokens: Number of input tokens
|
|
211
|
+
output_tokens: Number of output tokens
|
|
212
|
+
model: Model name
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
Estimated cost in USD
|
|
216
|
+
"""
|
|
217
|
+
provider = self.get_provider_name()
|
|
218
|
+
return self.token_calculator.calculate_cost(input_tokens, output_tokens, provider, model)
|
|
219
|
+
|
|
220
|
+
def update_token_usage(self, messages: List[Dict[str, Any]], response_content: str, model: str) -> TokenUsage:
|
|
221
|
+
"""
|
|
222
|
+
Update token usage tracking.
|
|
223
|
+
|
|
224
|
+
Args:
|
|
225
|
+
messages: Input messages
|
|
226
|
+
response_content: Response content
|
|
227
|
+
model: Model name
|
|
228
|
+
|
|
229
|
+
Returns:
|
|
230
|
+
Updated TokenUsage object
|
|
231
|
+
"""
|
|
232
|
+
provider = self.get_provider_name()
|
|
233
|
+
self.token_usage = self.token_calculator.update_token_usage(self.token_usage, messages, response_content, provider, model)
|
|
234
|
+
return self.token_usage
|
|
100
235
|
|
|
101
236
|
def get_token_usage(self) -> TokenUsage:
|
|
102
237
|
"""Get current token usage."""
|
|
@@ -106,52 +241,122 @@ class LLMBackend(ABC):
|
|
|
106
241
|
"""Reset token usage tracking."""
|
|
107
242
|
self.token_usage = TokenUsage()
|
|
108
243
|
|
|
244
|
+
def format_cost(self, cost: float = None) -> str:
|
|
245
|
+
"""Format cost for display."""
|
|
246
|
+
if cost is None:
|
|
247
|
+
cost = self.token_usage.estimated_cost
|
|
248
|
+
return self.token_calculator.format_cost(cost)
|
|
249
|
+
|
|
250
|
+
def format_usage_summary(self, usage: TokenUsage = None) -> str:
|
|
251
|
+
"""Format token usage summary for display."""
|
|
252
|
+
if usage is None:
|
|
253
|
+
usage = self.token_usage
|
|
254
|
+
return self.token_calculator.format_usage_summary(usage)
|
|
255
|
+
|
|
256
|
+
def get_filesystem_support(self) -> FilesystemSupport:
|
|
257
|
+
"""
|
|
258
|
+
Get the type of filesystem support this backend provides.
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
FilesystemSupport: The type of filesystem support
|
|
262
|
+
- NONE: No filesystem capabilities
|
|
263
|
+
- NATIVE: Built-in filesystem tools (like Claude Code)
|
|
264
|
+
- MCP: Can use filesystem through MCP servers
|
|
265
|
+
"""
|
|
266
|
+
# By default, backends have no filesystem support
|
|
267
|
+
# Subclasses should override this method
|
|
268
|
+
return FilesystemSupport.NONE
|
|
269
|
+
|
|
109
270
|
def get_supported_builtin_tools(self) -> List[str]:
|
|
110
271
|
"""Get list of builtin tools supported by this provider."""
|
|
111
272
|
return []
|
|
112
273
|
|
|
113
274
|
def extract_tool_name(self, tool_call: Dict[str, Any]) -> str:
|
|
114
275
|
"""
|
|
115
|
-
Extract tool name from a tool call
|
|
276
|
+
Extract tool name from a tool call (handles multiple formats).
|
|
277
|
+
|
|
278
|
+
Supports:
|
|
279
|
+
- Chat Completions format: {"function": {"name": "...", ...}}
|
|
280
|
+
- Response API format: {"name": "..."}
|
|
281
|
+
- Claude native format: {"name": "..."}
|
|
116
282
|
|
|
117
283
|
Args:
|
|
118
|
-
tool_call: Tool call data structure from
|
|
284
|
+
tool_call: Tool call data structure from any backend
|
|
119
285
|
|
|
120
286
|
Returns:
|
|
121
287
|
Tool name string
|
|
122
288
|
"""
|
|
123
|
-
#
|
|
124
|
-
|
|
289
|
+
# Chat Completions format
|
|
290
|
+
if "function" in tool_call:
|
|
291
|
+
return tool_call.get("function", {}).get("name", "unknown")
|
|
292
|
+
# Response API / Claude native format
|
|
293
|
+
elif "name" in tool_call:
|
|
294
|
+
return tool_call.get("name", "unknown")
|
|
295
|
+
# Fallback
|
|
296
|
+
return "unknown"
|
|
125
297
|
|
|
126
298
|
def extract_tool_arguments(self, tool_call: Dict[str, Any]) -> Dict[str, Any]:
|
|
127
299
|
"""
|
|
128
|
-
Extract tool arguments from a tool call
|
|
300
|
+
Extract tool arguments from a tool call (handles multiple formats).
|
|
301
|
+
|
|
302
|
+
Supports:
|
|
303
|
+
- Chat Completions format: {"function": {"arguments": ...}}
|
|
304
|
+
- Response API format: {"arguments": ...}
|
|
305
|
+
- Claude native format: {"input": ...}
|
|
129
306
|
|
|
130
307
|
Args:
|
|
131
|
-
tool_call: Tool call data structure from
|
|
308
|
+
tool_call: Tool call data structure from any backend
|
|
132
309
|
|
|
133
310
|
Returns:
|
|
134
|
-
Tool arguments dictionary
|
|
311
|
+
Tool arguments dictionary (parsed from JSON string if needed)
|
|
135
312
|
"""
|
|
136
|
-
|
|
137
|
-
|
|
313
|
+
import json
|
|
314
|
+
|
|
315
|
+
# Chat Completions format
|
|
316
|
+
if "function" in tool_call:
|
|
317
|
+
args = tool_call.get("function", {}).get("arguments", {})
|
|
318
|
+
# Claude native format
|
|
319
|
+
elif "input" in tool_call:
|
|
320
|
+
args = tool_call.get("input", {})
|
|
321
|
+
# Response API format
|
|
322
|
+
elif "arguments" in tool_call:
|
|
323
|
+
args = tool_call.get("arguments", {})
|
|
324
|
+
else:
|
|
325
|
+
args = {}
|
|
326
|
+
|
|
327
|
+
# Parse JSON string if needed
|
|
328
|
+
if isinstance(args, str):
|
|
329
|
+
try:
|
|
330
|
+
return json.loads(args) if args.strip() else {}
|
|
331
|
+
except (json.JSONDecodeError, ValueError):
|
|
332
|
+
return {}
|
|
333
|
+
return args if isinstance(args, dict) else {}
|
|
138
334
|
|
|
139
335
|
def extract_tool_call_id(self, tool_call: Dict[str, Any]) -> str:
|
|
140
336
|
"""
|
|
141
|
-
Extract tool call ID from a tool call
|
|
337
|
+
Extract tool call ID from a tool call (handles multiple formats).
|
|
338
|
+
|
|
339
|
+
Supports:
|
|
340
|
+
- Chat Completions format: {"id": "..."}
|
|
341
|
+
- Response API format: {"call_id": "..."}
|
|
342
|
+
- Claude native format: {"id": "..."}
|
|
142
343
|
|
|
143
344
|
Args:
|
|
144
|
-
tool_call: Tool call data structure from
|
|
345
|
+
tool_call: Tool call data structure from any backend
|
|
145
346
|
|
|
146
347
|
Returns:
|
|
147
348
|
Tool call ID string
|
|
148
349
|
"""
|
|
149
|
-
#
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
350
|
+
# Check for Response API format
|
|
351
|
+
if "call_id" in tool_call:
|
|
352
|
+
return tool_call.get("call_id", "")
|
|
353
|
+
# Check for Chat Completions format or Claude native format (both use "id")
|
|
354
|
+
elif "id" in tool_call:
|
|
355
|
+
return tool_call.get("id", "")
|
|
356
|
+
else:
|
|
357
|
+
return ""
|
|
358
|
+
|
|
359
|
+
def create_tool_result_message(self, tool_call: Dict[str, Any], result_content: str) -> Dict[str, Any]:
|
|
155
360
|
"""
|
|
156
361
|
Create a tool result message in this backend's expected format.
|
|
157
362
|
|
|
@@ -178,3 +383,70 @@ class LLMBackend(ABC):
|
|
|
178
383
|
"""
|
|
179
384
|
# Default implementation assumes Chat Completions format
|
|
180
385
|
return tool_result_message.get("content", "")
|
|
386
|
+
|
|
387
|
+
def is_stateful(self) -> bool:
|
|
388
|
+
"""
|
|
389
|
+
Check if this backend maintains conversation state across requests.
|
|
390
|
+
|
|
391
|
+
Returns:
|
|
392
|
+
True if backend is stateful (maintains context), False if stateless
|
|
393
|
+
|
|
394
|
+
Stateless backends require full conversation history with each request.
|
|
395
|
+
Stateful backends maintain context internally and only need new messages.
|
|
396
|
+
"""
|
|
397
|
+
return False
|
|
398
|
+
|
|
399
|
+
def clear_history(self) -> None:
|
|
400
|
+
"""
|
|
401
|
+
Clear conversation history while maintaining session.
|
|
402
|
+
|
|
403
|
+
For stateless backends, this is a no-op.
|
|
404
|
+
For stateful backends, this clears conversation history but keeps session.
|
|
405
|
+
"""
|
|
406
|
+
|
|
407
|
+
def reset_state(self) -> None:
|
|
408
|
+
"""
|
|
409
|
+
Reset backend state for stateful backends.
|
|
410
|
+
|
|
411
|
+
For stateless backends, this is a no-op.
|
|
412
|
+
For stateful backends, this clears conversation history and session state.
|
|
413
|
+
"""
|
|
414
|
+
pass # Default implementation for stateless backends
|
|
415
|
+
|
|
416
|
+
def set_planning_mode(self, enabled: bool) -> None:
|
|
417
|
+
"""
|
|
418
|
+
Enable or disable planning mode for this backend.
|
|
419
|
+
|
|
420
|
+
When planning mode is enabled, MCP tools should be blocked to prevent
|
|
421
|
+
execution during coordination phase.
|
|
422
|
+
|
|
423
|
+
Args:
|
|
424
|
+
enabled: True to enable planning mode (block MCP tools), False to disable
|
|
425
|
+
"""
|
|
426
|
+
self._planning_mode_enabled = enabled
|
|
427
|
+
|
|
428
|
+
def is_planning_mode_enabled(self) -> bool:
|
|
429
|
+
"""
|
|
430
|
+
Check if planning mode is currently enabled.
|
|
431
|
+
|
|
432
|
+
Returns:
|
|
433
|
+
True if planning mode is enabled (MCP tools should be blocked)
|
|
434
|
+
"""
|
|
435
|
+
return self._planning_mode_enabled
|
|
436
|
+
|
|
437
|
+
async def _cleanup_client(self, client: Any) -> None:
|
|
438
|
+
"""Clean up OpenAI client resources."""
|
|
439
|
+
try:
|
|
440
|
+
if client is not None and hasattr(client, "aclose"):
|
|
441
|
+
await client.aclose()
|
|
442
|
+
except Exception:
|
|
443
|
+
pass
|
|
444
|
+
|
|
445
|
+
def set_stage(self, stage: CoordinationStage) -> None:
|
|
446
|
+
"""
|
|
447
|
+
Set the current coordination stage for the backend.
|
|
448
|
+
|
|
449
|
+
Args:
|
|
450
|
+
stage: CoordinationStage enum value
|
|
451
|
+
"""
|
|
452
|
+
self.coordination_stage = stage
|