massgen 0.0.3__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +142 -8
- massgen/adapters/__init__.py +29 -0
- massgen/adapters/ag2_adapter.py +483 -0
- massgen/adapters/base.py +183 -0
- massgen/adapters/tests/__init__.py +0 -0
- massgen/adapters/tests/test_ag2_adapter.py +439 -0
- massgen/adapters/tests/test_agent_adapter.py +128 -0
- massgen/adapters/utils/__init__.py +2 -0
- massgen/adapters/utils/ag2_utils.py +236 -0
- massgen/adapters/utils/tests/__init__.py +0 -0
- massgen/adapters/utils/tests/test_ag2_utils.py +138 -0
- massgen/agent_config.py +329 -55
- massgen/api_params_handler/__init__.py +10 -0
- massgen/api_params_handler/_api_params_handler_base.py +99 -0
- massgen/api_params_handler/_chat_completions_api_params_handler.py +176 -0
- massgen/api_params_handler/_claude_api_params_handler.py +113 -0
- massgen/api_params_handler/_response_api_params_handler.py +130 -0
- massgen/backend/__init__.py +39 -4
- massgen/backend/azure_openai.py +385 -0
- massgen/backend/base.py +341 -69
- massgen/backend/base_with_mcp.py +1102 -0
- massgen/backend/capabilities.py +386 -0
- massgen/backend/chat_completions.py +577 -130
- massgen/backend/claude.py +1033 -537
- massgen/backend/claude_code.py +1203 -0
- massgen/backend/cli_base.py +209 -0
- massgen/backend/docs/BACKEND_ARCHITECTURE.md +126 -0
- massgen/backend/{CLAUDE_API_RESEARCH.md → docs/CLAUDE_API_RESEARCH.md} +18 -18
- massgen/backend/{GEMINI_API_DOCUMENTATION.md → docs/GEMINI_API_DOCUMENTATION.md} +9 -9
- massgen/backend/docs/Gemini MCP Integration Analysis.md +1050 -0
- massgen/backend/docs/MCP_IMPLEMENTATION_CLAUDE_BACKEND.md +177 -0
- massgen/backend/docs/MCP_INTEGRATION_RESPONSE_BACKEND.md +352 -0
- massgen/backend/docs/OPENAI_GPT5_MODELS.md +211 -0
- massgen/backend/{OPENAI_RESPONSES_API_FORMAT.md → docs/OPENAI_RESPONSE_API_TOOL_CALLS.md} +3 -3
- massgen/backend/docs/OPENAI_response_streaming.md +20654 -0
- massgen/backend/docs/inference_backend.md +257 -0
- massgen/backend/docs/permissions_and_context_files.md +1085 -0
- massgen/backend/external.py +126 -0
- massgen/backend/gemini.py +1850 -241
- massgen/backend/grok.py +40 -156
- massgen/backend/inference.py +156 -0
- massgen/backend/lmstudio.py +171 -0
- massgen/backend/response.py +1095 -322
- massgen/chat_agent.py +131 -113
- massgen/cli.py +1560 -275
- massgen/config_builder.py +2396 -0
- massgen/configs/BACKEND_CONFIGURATION.md +458 -0
- massgen/configs/README.md +559 -216
- massgen/configs/ag2/ag2_case_study.yaml +27 -0
- massgen/configs/ag2/ag2_coder.yaml +34 -0
- massgen/configs/ag2/ag2_coder_case_study.yaml +36 -0
- massgen/configs/ag2/ag2_gemini.yaml +27 -0
- massgen/configs/ag2/ag2_groupchat.yaml +108 -0
- massgen/configs/ag2/ag2_groupchat_gpt.yaml +118 -0
- massgen/configs/ag2/ag2_single_agent.yaml +21 -0
- massgen/configs/basic/multi/fast_timeout_example.yaml +37 -0
- massgen/configs/basic/multi/gemini_4o_claude.yaml +31 -0
- massgen/configs/basic/multi/gemini_gpt5nano_claude.yaml +36 -0
- massgen/configs/{gemini_4o_claude.yaml → basic/multi/geminicode_4o_claude.yaml} +3 -3
- massgen/configs/basic/multi/geminicode_gpt5nano_claude.yaml +36 -0
- massgen/configs/basic/multi/glm_gemini_claude.yaml +25 -0
- massgen/configs/basic/multi/gpt4o_audio_generation.yaml +30 -0
- massgen/configs/basic/multi/gpt4o_image_generation.yaml +31 -0
- massgen/configs/basic/multi/gpt5nano_glm_qwen.yaml +26 -0
- massgen/configs/basic/multi/gpt5nano_image_understanding.yaml +26 -0
- massgen/configs/{three_agents_default.yaml → basic/multi/three_agents_default.yaml} +8 -4
- massgen/configs/basic/multi/three_agents_opensource.yaml +27 -0
- massgen/configs/basic/multi/three_agents_vllm.yaml +20 -0
- massgen/configs/basic/multi/two_agents_gemini.yaml +19 -0
- massgen/configs/{two_agents.yaml → basic/multi/two_agents_gpt5.yaml} +14 -6
- massgen/configs/basic/multi/two_agents_opensource_lmstudio.yaml +31 -0
- massgen/configs/basic/multi/two_qwen_vllm_sglang.yaml +28 -0
- massgen/configs/{single_agent.yaml → basic/single/single_agent.yaml} +1 -1
- massgen/configs/{single_flash2.5.yaml → basic/single/single_flash2.5.yaml} +1 -2
- massgen/configs/basic/single/single_gemini2.5pro.yaml +16 -0
- massgen/configs/basic/single/single_gpt4o_audio_generation.yaml +22 -0
- massgen/configs/basic/single/single_gpt4o_image_generation.yaml +22 -0
- massgen/configs/basic/single/single_gpt4o_video_generation.yaml +24 -0
- massgen/configs/basic/single/single_gpt5nano.yaml +20 -0
- massgen/configs/basic/single/single_gpt5nano_file_search.yaml +18 -0
- massgen/configs/basic/single/single_gpt5nano_image_understanding.yaml +17 -0
- massgen/configs/basic/single/single_gptoss120b.yaml +15 -0
- massgen/configs/basic/single/single_openrouter_audio_understanding.yaml +15 -0
- massgen/configs/basic/single/single_qwen_video_understanding.yaml +15 -0
- massgen/configs/debug/code_execution/command_filtering_blacklist.yaml +29 -0
- massgen/configs/debug/code_execution/command_filtering_whitelist.yaml +28 -0
- massgen/configs/debug/code_execution/docker_verification.yaml +29 -0
- massgen/configs/debug/skip_coordination_test.yaml +27 -0
- massgen/configs/debug/test_sdk_migration.yaml +17 -0
- massgen/configs/docs/DISCORD_MCP_SETUP.md +208 -0
- massgen/configs/docs/TWITTER_MCP_ENESCINAR_SETUP.md +82 -0
- massgen/configs/providers/azure/azure_openai_multi.yaml +21 -0
- massgen/configs/providers/azure/azure_openai_single.yaml +19 -0
- massgen/configs/providers/claude/claude.yaml +14 -0
- massgen/configs/providers/gemini/gemini_gpt5nano.yaml +28 -0
- massgen/configs/providers/local/lmstudio.yaml +11 -0
- massgen/configs/providers/openai/gpt5.yaml +46 -0
- massgen/configs/providers/openai/gpt5_nano.yaml +46 -0
- massgen/configs/providers/others/grok_single_agent.yaml +19 -0
- massgen/configs/providers/others/zai_coding_team.yaml +108 -0
- massgen/configs/providers/others/zai_glm45.yaml +12 -0
- massgen/configs/{creative_team.yaml → teams/creative/creative_team.yaml} +16 -6
- massgen/configs/{travel_planning.yaml → teams/creative/travel_planning.yaml} +16 -6
- massgen/configs/{news_analysis.yaml → teams/research/news_analysis.yaml} +16 -6
- massgen/configs/{research_team.yaml → teams/research/research_team.yaml} +15 -7
- massgen/configs/{technical_analysis.yaml → teams/research/technical_analysis.yaml} +16 -6
- massgen/configs/tools/code-execution/basic_command_execution.yaml +25 -0
- massgen/configs/tools/code-execution/code_execution_use_case_simple.yaml +41 -0
- massgen/configs/tools/code-execution/docker_claude_code.yaml +32 -0
- massgen/configs/tools/code-execution/docker_multi_agent.yaml +32 -0
- massgen/configs/tools/code-execution/docker_simple.yaml +29 -0
- massgen/configs/tools/code-execution/docker_with_resource_limits.yaml +32 -0
- massgen/configs/tools/code-execution/multi_agent_playwright_automation.yaml +57 -0
- massgen/configs/tools/filesystem/cc_gpt5_gemini_filesystem.yaml +34 -0
- massgen/configs/tools/filesystem/claude_code_context_sharing.yaml +68 -0
- massgen/configs/tools/filesystem/claude_code_flash2.5.yaml +43 -0
- massgen/configs/tools/filesystem/claude_code_flash2.5_gptoss.yaml +49 -0
- massgen/configs/tools/filesystem/claude_code_gpt5nano.yaml +31 -0
- massgen/configs/tools/filesystem/claude_code_single.yaml +40 -0
- massgen/configs/tools/filesystem/fs_permissions_test.yaml +87 -0
- massgen/configs/tools/filesystem/gemini_gemini_workspace_cleanup.yaml +54 -0
- massgen/configs/tools/filesystem/gemini_gpt5_filesystem_casestudy.yaml +30 -0
- massgen/configs/tools/filesystem/gemini_gpt5nano_file_context_path.yaml +43 -0
- massgen/configs/tools/filesystem/gemini_gpt5nano_protected_paths.yaml +45 -0
- massgen/configs/tools/filesystem/gpt5mini_cc_fs_context_path.yaml +31 -0
- massgen/configs/tools/filesystem/grok4_gpt5_gemini_filesystem.yaml +32 -0
- massgen/configs/tools/filesystem/multiturn/grok4_gpt5_claude_code_filesystem_multiturn.yaml +58 -0
- massgen/configs/tools/filesystem/multiturn/grok4_gpt5_gemini_filesystem_multiturn.yaml +58 -0
- massgen/configs/tools/filesystem/multiturn/two_claude_code_filesystem_multiturn.yaml +47 -0
- massgen/configs/tools/filesystem/multiturn/two_gemini_flash_filesystem_multiturn.yaml +48 -0
- massgen/configs/tools/mcp/claude_code_discord_mcp_example.yaml +27 -0
- massgen/configs/tools/mcp/claude_code_simple_mcp.yaml +35 -0
- massgen/configs/tools/mcp/claude_code_twitter_mcp_example.yaml +32 -0
- massgen/configs/tools/mcp/claude_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/claude_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/five_agents_travel_mcp_test.yaml +157 -0
- massgen/configs/tools/mcp/five_agents_weather_mcp_test.yaml +103 -0
- massgen/configs/tools/mcp/gemini_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test.yaml +23 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_sharing.yaml +23 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_single_agent.yaml +17 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_with_claude_code.yaml +24 -0
- massgen/configs/tools/mcp/gemini_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/gemini_notion_mcp.yaml +52 -0
- massgen/configs/tools/mcp/gpt5_nano_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/gpt5_nano_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/gpt5mini_claude_code_discord_mcp_example.yaml +38 -0
- massgen/configs/tools/mcp/gpt_oss_mcp_example.yaml +25 -0
- massgen/configs/tools/mcp/gpt_oss_mcp_test.yaml +28 -0
- massgen/configs/tools/mcp/grok3_mini_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/grok3_mini_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/multimcp_gemini.yaml +111 -0
- massgen/configs/tools/mcp/qwen_api_mcp_example.yaml +25 -0
- massgen/configs/tools/mcp/qwen_api_mcp_test.yaml +28 -0
- massgen/configs/tools/mcp/qwen_local_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/qwen_local_mcp_test.yaml +27 -0
- massgen/configs/tools/planning/five_agents_discord_mcp_planning_mode.yaml +140 -0
- massgen/configs/tools/planning/five_agents_filesystem_mcp_planning_mode.yaml +151 -0
- massgen/configs/tools/planning/five_agents_notion_mcp_planning_mode.yaml +151 -0
- massgen/configs/tools/planning/five_agents_twitter_mcp_planning_mode.yaml +155 -0
- massgen/configs/tools/planning/gpt5_mini_case_study_mcp_planning_mode.yaml +73 -0
- massgen/configs/tools/web-search/claude_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gemini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gpt5_mini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gpt_oss_streamable_http_test.yaml +44 -0
- massgen/configs/tools/web-search/grok3_mini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/qwen_api_streamable_http_test.yaml +44 -0
- massgen/configs/tools/web-search/qwen_local_streamable_http_test.yaml +43 -0
- massgen/coordination_tracker.py +708 -0
- massgen/docker/README.md +462 -0
- massgen/filesystem_manager/__init__.py +21 -0
- massgen/filesystem_manager/_base.py +9 -0
- massgen/filesystem_manager/_code_execution_server.py +545 -0
- massgen/filesystem_manager/_docker_manager.py +477 -0
- massgen/filesystem_manager/_file_operation_tracker.py +248 -0
- massgen/filesystem_manager/_filesystem_manager.py +813 -0
- massgen/filesystem_manager/_path_permission_manager.py +1261 -0
- massgen/filesystem_manager/_workspace_tools_server.py +1815 -0
- massgen/formatter/__init__.py +10 -0
- massgen/formatter/_chat_completions_formatter.py +284 -0
- massgen/formatter/_claude_formatter.py +235 -0
- massgen/formatter/_formatter_base.py +156 -0
- massgen/formatter/_response_formatter.py +263 -0
- massgen/frontend/__init__.py +1 -2
- massgen/frontend/coordination_ui.py +471 -286
- massgen/frontend/displays/base_display.py +56 -11
- massgen/frontend/displays/create_coordination_table.py +1956 -0
- massgen/frontend/displays/rich_terminal_display.py +1259 -619
- massgen/frontend/displays/simple_display.py +9 -4
- massgen/frontend/displays/terminal_display.py +27 -68
- massgen/logger_config.py +681 -0
- massgen/mcp_tools/README.md +232 -0
- massgen/mcp_tools/__init__.py +105 -0
- massgen/mcp_tools/backend_utils.py +1035 -0
- massgen/mcp_tools/circuit_breaker.py +195 -0
- massgen/mcp_tools/client.py +894 -0
- massgen/mcp_tools/config_validator.py +138 -0
- massgen/mcp_tools/docs/circuit_breaker.md +646 -0
- massgen/mcp_tools/docs/client.md +950 -0
- massgen/mcp_tools/docs/config_validator.md +478 -0
- massgen/mcp_tools/docs/exceptions.md +1165 -0
- massgen/mcp_tools/docs/security.md +854 -0
- massgen/mcp_tools/exceptions.py +338 -0
- massgen/mcp_tools/hooks.py +212 -0
- massgen/mcp_tools/security.py +780 -0
- massgen/message_templates.py +342 -64
- massgen/orchestrator.py +1515 -241
- massgen/stream_chunk/__init__.py +35 -0
- massgen/stream_chunk/base.py +92 -0
- massgen/stream_chunk/multimodal.py +237 -0
- massgen/stream_chunk/text.py +162 -0
- massgen/tests/mcp_test_server.py +150 -0
- massgen/tests/multi_turn_conversation_design.md +0 -8
- massgen/tests/test_azure_openai_backend.py +156 -0
- massgen/tests/test_backend_capabilities.py +262 -0
- massgen/tests/test_backend_event_loop_all.py +179 -0
- massgen/tests/test_chat_completions_refactor.py +142 -0
- massgen/tests/test_claude_backend.py +15 -28
- massgen/tests/test_claude_code.py +268 -0
- massgen/tests/test_claude_code_context_sharing.py +233 -0
- massgen/tests/test_claude_code_orchestrator.py +175 -0
- massgen/tests/test_cli_backends.py +180 -0
- massgen/tests/test_code_execution.py +679 -0
- massgen/tests/test_external_agent_backend.py +134 -0
- massgen/tests/test_final_presentation_fallback.py +237 -0
- massgen/tests/test_gemini_planning_mode.py +351 -0
- massgen/tests/test_grok_backend.py +7 -10
- massgen/tests/test_http_mcp_server.py +42 -0
- massgen/tests/test_integration_simple.py +198 -0
- massgen/tests/test_mcp_blocking.py +125 -0
- massgen/tests/test_message_context_building.py +29 -47
- massgen/tests/test_orchestrator_final_presentation.py +48 -0
- massgen/tests/test_path_permission_manager.py +2087 -0
- massgen/tests/test_rich_terminal_display.py +14 -13
- massgen/tests/test_timeout.py +133 -0
- massgen/tests/test_v3_3agents.py +11 -12
- massgen/tests/test_v3_simple.py +8 -13
- massgen/tests/test_v3_three_agents.py +11 -18
- massgen/tests/test_v3_two_agents.py +8 -13
- massgen/token_manager/__init__.py +7 -0
- massgen/token_manager/token_manager.py +400 -0
- massgen/utils.py +52 -16
- massgen/v1/agent.py +45 -91
- massgen/v1/agents.py +18 -53
- massgen/v1/backends/gemini.py +50 -153
- massgen/v1/backends/grok.py +21 -54
- massgen/v1/backends/oai.py +39 -111
- massgen/v1/cli.py +36 -93
- massgen/v1/config.py +8 -12
- massgen/v1/logging.py +43 -127
- massgen/v1/main.py +18 -32
- massgen/v1/orchestrator.py +68 -209
- massgen/v1/streaming_display.py +62 -163
- massgen/v1/tools.py +8 -12
- massgen/v1/types.py +9 -23
- massgen/v1/utils.py +5 -23
- massgen-0.1.0.dist-info/METADATA +1245 -0
- massgen-0.1.0.dist-info/RECORD +273 -0
- massgen-0.1.0.dist-info/entry_points.txt +2 -0
- massgen/frontend/logging/__init__.py +0 -9
- massgen/frontend/logging/realtime_logger.py +0 -197
- massgen-0.0.3.dist-info/METADATA +0 -568
- massgen-0.0.3.dist-info/RECORD +0 -76
- massgen-0.0.3.dist-info/entry_points.txt +0 -2
- /massgen/backend/{Function calling openai responses.md → docs/Function calling openai responses.md} +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/WHEEL +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/licenses/LICENSE +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/top_level.txt +0 -0
massgen/chat_agent.py
CHANGED
|
@@ -1,16 +1,21 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
1
2
|
"""
|
|
2
3
|
Common chat interface for MassGen agents.
|
|
3
4
|
|
|
4
5
|
Defines the standard interface that both individual agents and the orchestrator implement,
|
|
5
6
|
allowing seamless interaction regardless of whether you're talking to a single agent
|
|
6
7
|
or a coordinated multi-agent system.
|
|
8
|
+
|
|
9
|
+
# TODO: Consider how to best handle stateful vs stateless backends in this interface.
|
|
7
10
|
"""
|
|
8
11
|
|
|
9
12
|
import uuid
|
|
10
13
|
from abc import ABC, abstractmethod
|
|
11
|
-
from typing import
|
|
14
|
+
from typing import Any, AsyncGenerator, Dict, List, Optional
|
|
12
15
|
|
|
13
16
|
from .backend.base import LLMBackend, StreamChunk
|
|
17
|
+
from .stream_chunk import ChunkType
|
|
18
|
+
from .utils import CoordinationStage
|
|
14
19
|
|
|
15
20
|
|
|
16
21
|
class ChatAgent(ABC):
|
|
@@ -32,6 +37,7 @@ class ChatAgent(ABC):
|
|
|
32
37
|
tools: List[Dict[str, Any]] = None,
|
|
33
38
|
reset_chat: bool = False,
|
|
34
39
|
clear_history: bool = False,
|
|
40
|
+
current_stage: CoordinationStage = None,
|
|
35
41
|
) -> AsyncGenerator[StreamChunk, None]:
|
|
36
42
|
"""
|
|
37
43
|
Enhanced chat interface supporting tool calls and responses.
|
|
@@ -45,11 +51,11 @@ class ChatAgent(ABC):
|
|
|
45
51
|
tools: Optional tools to provide to the agent
|
|
46
52
|
reset_chat: If True, reset the agent's conversation history to the provided messages
|
|
47
53
|
clear_history: If True, clear history but keep system message before processing messages
|
|
54
|
+
current_stage: Optional current coordination stage for orchestrator use
|
|
48
55
|
|
|
49
56
|
Yields:
|
|
50
57
|
StreamChunk: Streaming response chunks
|
|
51
58
|
"""
|
|
52
|
-
pass
|
|
53
59
|
|
|
54
60
|
async def chat_simple(self, user_message: str) -> AsyncGenerator[StreamChunk, None]:
|
|
55
61
|
"""
|
|
@@ -68,12 +74,22 @@ class ChatAgent(ABC):
|
|
|
68
74
|
@abstractmethod
|
|
69
75
|
def get_status(self) -> Dict[str, Any]:
|
|
70
76
|
"""Get current agent status and state."""
|
|
71
|
-
pass
|
|
72
77
|
|
|
73
78
|
@abstractmethod
|
|
74
|
-
def reset(self) -> None:
|
|
79
|
+
async def reset(self) -> None:
|
|
75
80
|
"""Reset agent state for new conversation."""
|
|
76
|
-
|
|
81
|
+
|
|
82
|
+
@abstractmethod
|
|
83
|
+
def get_configurable_system_message(self) -> Optional[str]:
|
|
84
|
+
"""
|
|
85
|
+
Get the user-configurable part of the system message.
|
|
86
|
+
|
|
87
|
+
Returns the domain expertise, role definition, or custom instructions
|
|
88
|
+
that were configured for this agent, without backend-specific details.
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
The configurable system message if available, None otherwise
|
|
92
|
+
"""
|
|
77
93
|
|
|
78
94
|
# Common conversation management
|
|
79
95
|
def get_conversation_history(self) -> List[Dict[str, Any]]:
|
|
@@ -133,13 +149,27 @@ class SingleAgent(ChatAgent):
|
|
|
133
149
|
|
|
134
150
|
# Add system message to history if provided
|
|
135
151
|
if self.system_message:
|
|
136
|
-
self.conversation_history.append(
|
|
137
|
-
{"role": "system", "content": self.system_message}
|
|
138
|
-
)
|
|
152
|
+
self.conversation_history.append({"role": "system", "content": self.system_message})
|
|
139
153
|
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
154
|
+
@staticmethod
|
|
155
|
+
def _get_chunk_type_value(chunk) -> str:
|
|
156
|
+
"""
|
|
157
|
+
Extract chunk type as string, handling both legacy and typed chunks.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
chunk: StreamChunk, TextStreamChunk, or MultimodalStreamChunk
|
|
161
|
+
|
|
162
|
+
Returns:
|
|
163
|
+
String representation of chunk type (e.g., "content", "tool_calls")
|
|
164
|
+
"""
|
|
165
|
+
chunk_type = chunk.type
|
|
166
|
+
|
|
167
|
+
if isinstance(chunk_type, ChunkType):
|
|
168
|
+
return chunk_type.value
|
|
169
|
+
|
|
170
|
+
return str(chunk_type)
|
|
171
|
+
|
|
172
|
+
async def _process_stream(self, backend_stream, tools: List[Dict[str, Any]] = None) -> AsyncGenerator[StreamChunk, None]:
|
|
143
173
|
"""Common streaming logic for processing backend responses."""
|
|
144
174
|
assistant_response = ""
|
|
145
175
|
tool_calls = []
|
|
@@ -147,50 +177,41 @@ class SingleAgent(ChatAgent):
|
|
|
147
177
|
|
|
148
178
|
try:
|
|
149
179
|
async for chunk in backend_stream:
|
|
150
|
-
|
|
180
|
+
chunk_type = self._get_chunk_type_value(chunk)
|
|
181
|
+
if chunk_type == "content":
|
|
151
182
|
assistant_response += chunk.content
|
|
152
183
|
yield chunk
|
|
153
|
-
elif
|
|
184
|
+
elif chunk_type == "tool_calls":
|
|
154
185
|
chunk_tool_calls = getattr(chunk, "tool_calls", []) or []
|
|
155
186
|
tool_calls.extend(chunk_tool_calls)
|
|
156
187
|
yield chunk
|
|
157
|
-
elif
|
|
188
|
+
elif chunk_type == "complete_message":
|
|
158
189
|
# Backend provided the complete message structure
|
|
159
190
|
complete_message = chunk.complete_message
|
|
160
191
|
# Don't yield this - it's for internal use
|
|
161
|
-
elif
|
|
192
|
+
elif chunk_type == "complete_response":
|
|
162
193
|
# Backend provided the raw Responses API response
|
|
163
194
|
if chunk.response:
|
|
164
195
|
complete_message = chunk.response
|
|
165
196
|
|
|
166
197
|
# Extract and yield tool calls for orchestrator processing
|
|
167
|
-
if (
|
|
168
|
-
isinstance(chunk.response, dict)
|
|
169
|
-
and "output" in chunk.response
|
|
170
|
-
):
|
|
198
|
+
if isinstance(chunk.response, dict) and "output" in chunk.response:
|
|
171
199
|
response_tool_calls = []
|
|
172
200
|
for output_item in chunk.response["output"]:
|
|
173
201
|
if output_item.get("type") == "function_call":
|
|
174
202
|
response_tool_calls.append(output_item)
|
|
175
|
-
tool_calls.append(
|
|
176
|
-
output_item
|
|
177
|
-
) # Also store for fallback
|
|
203
|
+
tool_calls.append(output_item) # Also store for fallback
|
|
178
204
|
|
|
179
205
|
# Yield tool calls so orchestrator can process them
|
|
180
206
|
if response_tool_calls:
|
|
181
|
-
yield StreamChunk(
|
|
182
|
-
type="tool_calls", tool_calls=response_tool_calls
|
|
183
|
-
)
|
|
207
|
+
yield StreamChunk(type="tool_calls", tool_calls=response_tool_calls)
|
|
184
208
|
# Complete response is for internal use - don't yield it
|
|
185
|
-
elif
|
|
209
|
+
elif chunk_type == "done":
|
|
186
210
|
# Add complete response to history
|
|
187
211
|
if complete_message:
|
|
188
212
|
# For Responses API: complete_message is the response object with 'output' array
|
|
189
213
|
# Each item in output should be added to conversation history individually
|
|
190
|
-
if (
|
|
191
|
-
isinstance(complete_message, dict)
|
|
192
|
-
and "output" in complete_message
|
|
193
|
-
):
|
|
214
|
+
if isinstance(complete_message, dict) and "output" in complete_message:
|
|
194
215
|
self.conversation_history.extend(complete_message["output"])
|
|
195
216
|
else:
|
|
196
217
|
# Fallback if it's already in message format
|
|
@@ -220,23 +241,41 @@ class SingleAgent(ChatAgent):
|
|
|
220
241
|
tools: List[Dict[str, Any]] = None,
|
|
221
242
|
reset_chat: bool = False,
|
|
222
243
|
clear_history: bool = False,
|
|
244
|
+
current_stage: CoordinationStage = None,
|
|
223
245
|
) -> AsyncGenerator[StreamChunk, None]:
|
|
246
|
+
# print("Agent: ", self.agent_id)
|
|
247
|
+
# for message in messages:
|
|
248
|
+
# print(f"Message: {message}\n")
|
|
249
|
+
# print("Messages End. \n")
|
|
224
250
|
"""Process messages through single backend with tool support."""
|
|
225
251
|
if clear_history:
|
|
226
252
|
# Clear history but keep system message if it exists
|
|
227
|
-
system_messages = [
|
|
228
|
-
msg for msg in self.conversation_history if msg.get("role") == "system"
|
|
229
|
-
]
|
|
253
|
+
system_messages = [msg for msg in self.conversation_history if msg.get("role") == "system"]
|
|
230
254
|
self.conversation_history = system_messages.copy()
|
|
255
|
+
# Clear backend history while maintaining session
|
|
256
|
+
if self.backend.is_stateful():
|
|
257
|
+
await self.backend.clear_history()
|
|
231
258
|
|
|
232
259
|
if reset_chat:
|
|
233
260
|
# Reset conversation history to the provided messages
|
|
234
261
|
self.conversation_history = messages.copy()
|
|
262
|
+
# Reset backend state completely
|
|
263
|
+
if self.backend.is_stateful():
|
|
264
|
+
await self.backend.reset_state()
|
|
235
265
|
backend_messages = self.conversation_history.copy()
|
|
236
266
|
else:
|
|
237
267
|
# Regular conversation - append new messages to agent's history
|
|
238
268
|
self.conversation_history.extend(messages)
|
|
239
|
-
|
|
269
|
+
# Handle stateful vs stateless backends differently
|
|
270
|
+
if self.backend.is_stateful():
|
|
271
|
+
# Stateful: only send new messages, backend maintains context
|
|
272
|
+
backend_messages = messages.copy()
|
|
273
|
+
else:
|
|
274
|
+
# Stateless: send full conversation history
|
|
275
|
+
backend_messages = self.conversation_history.copy()
|
|
276
|
+
|
|
277
|
+
if current_stage:
|
|
278
|
+
self.backend.set_stage(current_stage)
|
|
240
279
|
|
|
241
280
|
# Create backend stream and process it
|
|
242
281
|
backend_stream = self.backend.stream_with_tools(
|
|
@@ -244,11 +283,16 @@ class SingleAgent(ChatAgent):
|
|
|
244
283
|
tools=tools, # Use provided tools (for MassGen workflow)
|
|
245
284
|
agent_id=self.agent_id,
|
|
246
285
|
session_id=self.session_id,
|
|
286
|
+
**self._get_backend_params(),
|
|
247
287
|
)
|
|
248
288
|
|
|
249
289
|
async for chunk in self._process_stream(backend_stream, tools):
|
|
250
290
|
yield chunk
|
|
251
291
|
|
|
292
|
+
def _get_backend_params(self) -> Dict[str, Any]:
|
|
293
|
+
"""Get additional backend parameters. Override in subclasses."""
|
|
294
|
+
return {}
|
|
295
|
+
|
|
252
296
|
def get_status(self) -> Dict[str, Any]:
|
|
253
297
|
"""Get current agent status."""
|
|
254
298
|
return {
|
|
@@ -259,15 +303,21 @@ class SingleAgent(ChatAgent):
|
|
|
259
303
|
"conversation_length": len(self.conversation_history),
|
|
260
304
|
}
|
|
261
305
|
|
|
262
|
-
def reset(self) -> None:
|
|
306
|
+
async def reset(self) -> None:
|
|
263
307
|
"""Reset conversation for new chat."""
|
|
264
308
|
self.conversation_history.clear()
|
|
265
309
|
|
|
310
|
+
# Reset stateful backend if needed
|
|
311
|
+
if self.backend.is_stateful():
|
|
312
|
+
await self.backend.reset_state()
|
|
313
|
+
|
|
266
314
|
# Re-add system message if it exists
|
|
267
315
|
if self.system_message:
|
|
268
|
-
self.conversation_history.append(
|
|
269
|
-
|
|
270
|
-
|
|
316
|
+
self.conversation_history.append({"role": "system", "content": self.system_message})
|
|
317
|
+
|
|
318
|
+
def get_configurable_system_message(self) -> Optional[str]:
|
|
319
|
+
"""Get the user-configurable part of the system message."""
|
|
320
|
+
return self.system_message
|
|
271
321
|
|
|
272
322
|
def set_model(self, model: str) -> None:
|
|
273
323
|
"""Set the model for this agent."""
|
|
@@ -278,16 +328,11 @@ class SingleAgent(ChatAgent):
|
|
|
278
328
|
self.system_message = system_message
|
|
279
329
|
|
|
280
330
|
# Remove old system message if exists
|
|
281
|
-
if (
|
|
282
|
-
self.conversation_history
|
|
283
|
-
and self.conversation_history[0].get("role") == "system"
|
|
284
|
-
):
|
|
331
|
+
if self.conversation_history and self.conversation_history[0].get("role") == "system":
|
|
285
332
|
self.conversation_history.pop(0)
|
|
286
333
|
|
|
287
334
|
# Add new system message at the beginning
|
|
288
|
-
self.conversation_history.insert(
|
|
289
|
-
0, {"role": "system", "content": system_message}
|
|
290
|
-
)
|
|
335
|
+
self.conversation_history.insert(0, {"role": "system", "content": system_message})
|
|
291
336
|
|
|
292
337
|
|
|
293
338
|
class ConfigurableAgent(SingleAgent):
|
|
@@ -297,6 +342,13 @@ class ConfigurableAgent(SingleAgent):
|
|
|
297
342
|
This bridges the gap between SingleAgent and the MassGen system by supporting
|
|
298
343
|
all the advanced configuration options (web search, code execution, etc.)
|
|
299
344
|
while maintaining the simple chat interface.
|
|
345
|
+
|
|
346
|
+
TODO: Consider merging with SingleAgent. The main difference is:
|
|
347
|
+
- SingleAgent: backend parameters passed directly to constructor/methods
|
|
348
|
+
- ConfigurableAgent: backend parameters come from AgentConfig object
|
|
349
|
+
|
|
350
|
+
Could be unified by making SingleAgent accept an optional config parameter
|
|
351
|
+
and using _get_backend_params() pattern for all parameter sources.
|
|
300
352
|
"""
|
|
301
353
|
|
|
302
354
|
def __init__(
|
|
@@ -323,42 +375,9 @@ class ConfigurableAgent(SingleAgent):
|
|
|
323
375
|
|
|
324
376
|
# ConfigurableAgent relies on backend_params for model configuration
|
|
325
377
|
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
tools: List[Dict[str, Any]] = None,
|
|
330
|
-
reset_chat: bool = False,
|
|
331
|
-
clear_history: bool = False,
|
|
332
|
-
) -> AsyncGenerator[StreamChunk, None]:
|
|
333
|
-
"""Process messages with full AgentConfig capabilities."""
|
|
334
|
-
if clear_history:
|
|
335
|
-
# Clear history but keep system message if it exists
|
|
336
|
-
system_messages = [
|
|
337
|
-
msg for msg in self.conversation_history if msg.get("role") == "system"
|
|
338
|
-
]
|
|
339
|
-
self.conversation_history = system_messages.copy()
|
|
340
|
-
|
|
341
|
-
if reset_chat:
|
|
342
|
-
# Reset conversation history to the provided messages
|
|
343
|
-
self.conversation_history = messages.copy()
|
|
344
|
-
backend_messages = self.conversation_history.copy()
|
|
345
|
-
else:
|
|
346
|
-
# Regular conversation - append new messages to agent's history
|
|
347
|
-
self.conversation_history.extend(messages)
|
|
348
|
-
backend_messages = self.conversation_history.copy()
|
|
349
|
-
|
|
350
|
-
# Create backend stream with config parameters and process it
|
|
351
|
-
backend_params = self.config.get_backend_params()
|
|
352
|
-
backend_stream = self.backend.stream_with_tools(
|
|
353
|
-
messages=backend_messages,
|
|
354
|
-
tools=tools, # Use provided tools (for MassGen workflow)
|
|
355
|
-
agent_id=self.agent_id,
|
|
356
|
-
session_id=self.session_id,
|
|
357
|
-
**backend_params,
|
|
358
|
-
)
|
|
359
|
-
|
|
360
|
-
async for chunk in self._process_stream(backend_stream, tools):
|
|
361
|
-
yield chunk
|
|
378
|
+
def _get_backend_params(self) -> Dict[str, Any]:
|
|
379
|
+
"""Get backend parameters from config."""
|
|
380
|
+
return self.config.get_backend_params()
|
|
362
381
|
|
|
363
382
|
def get_status(self) -> Dict[str, Any]:
|
|
364
383
|
"""Get current agent status with config details."""
|
|
@@ -368,26 +387,43 @@ class ConfigurableAgent(SingleAgent):
|
|
|
368
387
|
"agent_type": "configurable",
|
|
369
388
|
"config": self.config.to_dict(),
|
|
370
389
|
"capabilities": {
|
|
371
|
-
"web_search": self.config.backend_params.get(
|
|
372
|
-
|
|
373
|
-
),
|
|
374
|
-
"code_execution": self.config.backend_params.get(
|
|
375
|
-
"enable_code_interpreter", False
|
|
376
|
-
),
|
|
390
|
+
"web_search": self.config.backend_params.get("enable_web_search", False),
|
|
391
|
+
"code_execution": self.config.backend_params.get("enable_code_interpreter", False),
|
|
377
392
|
},
|
|
378
|
-
}
|
|
393
|
+
},
|
|
379
394
|
)
|
|
380
395
|
return status
|
|
381
396
|
|
|
397
|
+
def get_configurable_system_message(self) -> Optional[str]:
|
|
398
|
+
"""Get the user-configurable part of the system message for ConfigurableAgent."""
|
|
399
|
+
# Try multiple sources in order of preference
|
|
400
|
+
|
|
401
|
+
# First check if backend has system prompt configuration
|
|
402
|
+
if self.config and self.config.backend_params:
|
|
403
|
+
backend_params = self.config.backend_params
|
|
404
|
+
|
|
405
|
+
# For Claude Code: prefer system_prompt (complete override)
|
|
406
|
+
if "system_prompt" in backend_params:
|
|
407
|
+
return backend_params["system_prompt"]
|
|
408
|
+
|
|
409
|
+
# Then append_system_prompt (additive)
|
|
410
|
+
if "append_system_prompt" in backend_params:
|
|
411
|
+
return backend_params["append_system_prompt"]
|
|
412
|
+
|
|
413
|
+
# Fall back to custom_system_instruction (deprecated but still supported)
|
|
414
|
+
if self.config and self.config.custom_system_instruction:
|
|
415
|
+
return self.config.custom_system_instruction
|
|
416
|
+
|
|
417
|
+
# Finally fall back to parent class implementation
|
|
418
|
+
return super().get_configurable_system_message()
|
|
419
|
+
|
|
382
420
|
|
|
383
421
|
# =============================================================================
|
|
384
422
|
# CONVENIENCE FUNCTIONS
|
|
385
423
|
# =============================================================================
|
|
386
424
|
|
|
387
425
|
|
|
388
|
-
def create_simple_agent(
|
|
389
|
-
backend: LLMBackend, system_message: str = None, agent_id: str = None
|
|
390
|
-
) -> SingleAgent:
|
|
426
|
+
def create_simple_agent(backend: LLMBackend, system_message: str = None, agent_id: str = None) -> SingleAgent:
|
|
391
427
|
"""Create a simple single agent."""
|
|
392
428
|
# Use MassGen evaluation system message if no custom system message provided
|
|
393
429
|
if system_message is None:
|
|
@@ -395,24 +431,10 @@ def create_simple_agent(
|
|
|
395
431
|
|
|
396
432
|
templates = MessageTemplates()
|
|
397
433
|
system_message = templates.evaluation_system_message()
|
|
398
|
-
|
|
399
|
-
import pdb
|
|
400
|
-
|
|
401
|
-
pdb.set_trace()
|
|
402
|
-
print(system_message)
|
|
403
|
-
|
|
404
|
-
import pdb
|
|
405
|
-
|
|
406
|
-
pdb.set_trace()
|
|
407
|
-
print(system_message)
|
|
408
|
-
return SingleAgent(
|
|
409
|
-
backend=backend, agent_id=agent_id, system_message=system_message
|
|
410
|
-
)
|
|
434
|
+
return SingleAgent(backend=backend, agent_id=agent_id, system_message=system_message)
|
|
411
435
|
|
|
412
436
|
|
|
413
|
-
def create_expert_agent(
|
|
414
|
-
domain: str, backend: LLMBackend, model: str = "gpt-4o-mini"
|
|
415
|
-
) -> ConfigurableAgent:
|
|
437
|
+
def create_expert_agent(domain: str, backend: LLMBackend, model: str = "gpt-4o-mini") -> ConfigurableAgent:
|
|
416
438
|
"""Create an expert agent for a specific domain."""
|
|
417
439
|
from .agent_config import AgentConfig
|
|
418
440
|
|
|
@@ -420,9 +442,7 @@ def create_expert_agent(
|
|
|
420
442
|
return ConfigurableAgent(config=config, backend=backend)
|
|
421
443
|
|
|
422
444
|
|
|
423
|
-
def create_research_agent(
|
|
424
|
-
backend: LLMBackend, model: str = "gpt-4o-mini"
|
|
425
|
-
) -> ConfigurableAgent:
|
|
445
|
+
def create_research_agent(backend: LLMBackend, model: str = "gpt-4o-mini") -> ConfigurableAgent:
|
|
426
446
|
"""Create a research agent with web search capabilities."""
|
|
427
447
|
from .agent_config import AgentConfig
|
|
428
448
|
|
|
@@ -430,9 +450,7 @@ def create_research_agent(
|
|
|
430
450
|
return ConfigurableAgent(config=config, backend=backend)
|
|
431
451
|
|
|
432
452
|
|
|
433
|
-
def create_computational_agent(
|
|
434
|
-
backend: LLMBackend, model: str = "gpt-4o-mini"
|
|
435
|
-
) -> ConfigurableAgent:
|
|
453
|
+
def create_computational_agent(backend: LLMBackend, model: str = "gpt-4o-mini") -> ConfigurableAgent:
|
|
436
454
|
"""Create a computational agent with code execution."""
|
|
437
455
|
from .agent_config import AgentConfig
|
|
438
456
|
|