massgen 0.0.3__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +142 -8
- massgen/adapters/__init__.py +29 -0
- massgen/adapters/ag2_adapter.py +483 -0
- massgen/adapters/base.py +183 -0
- massgen/adapters/tests/__init__.py +0 -0
- massgen/adapters/tests/test_ag2_adapter.py +439 -0
- massgen/adapters/tests/test_agent_adapter.py +128 -0
- massgen/adapters/utils/__init__.py +2 -0
- massgen/adapters/utils/ag2_utils.py +236 -0
- massgen/adapters/utils/tests/__init__.py +0 -0
- massgen/adapters/utils/tests/test_ag2_utils.py +138 -0
- massgen/agent_config.py +329 -55
- massgen/api_params_handler/__init__.py +10 -0
- massgen/api_params_handler/_api_params_handler_base.py +99 -0
- massgen/api_params_handler/_chat_completions_api_params_handler.py +176 -0
- massgen/api_params_handler/_claude_api_params_handler.py +113 -0
- massgen/api_params_handler/_response_api_params_handler.py +130 -0
- massgen/backend/__init__.py +39 -4
- massgen/backend/azure_openai.py +385 -0
- massgen/backend/base.py +341 -69
- massgen/backend/base_with_mcp.py +1102 -0
- massgen/backend/capabilities.py +386 -0
- massgen/backend/chat_completions.py +577 -130
- massgen/backend/claude.py +1033 -537
- massgen/backend/claude_code.py +1203 -0
- massgen/backend/cli_base.py +209 -0
- massgen/backend/docs/BACKEND_ARCHITECTURE.md +126 -0
- massgen/backend/{CLAUDE_API_RESEARCH.md → docs/CLAUDE_API_RESEARCH.md} +18 -18
- massgen/backend/{GEMINI_API_DOCUMENTATION.md → docs/GEMINI_API_DOCUMENTATION.md} +9 -9
- massgen/backend/docs/Gemini MCP Integration Analysis.md +1050 -0
- massgen/backend/docs/MCP_IMPLEMENTATION_CLAUDE_BACKEND.md +177 -0
- massgen/backend/docs/MCP_INTEGRATION_RESPONSE_BACKEND.md +352 -0
- massgen/backend/docs/OPENAI_GPT5_MODELS.md +211 -0
- massgen/backend/{OPENAI_RESPONSES_API_FORMAT.md → docs/OPENAI_RESPONSE_API_TOOL_CALLS.md} +3 -3
- massgen/backend/docs/OPENAI_response_streaming.md +20654 -0
- massgen/backend/docs/inference_backend.md +257 -0
- massgen/backend/docs/permissions_and_context_files.md +1085 -0
- massgen/backend/external.py +126 -0
- massgen/backend/gemini.py +1850 -241
- massgen/backend/grok.py +40 -156
- massgen/backend/inference.py +156 -0
- massgen/backend/lmstudio.py +171 -0
- massgen/backend/response.py +1095 -322
- massgen/chat_agent.py +131 -113
- massgen/cli.py +1560 -275
- massgen/config_builder.py +2396 -0
- massgen/configs/BACKEND_CONFIGURATION.md +458 -0
- massgen/configs/README.md +559 -216
- massgen/configs/ag2/ag2_case_study.yaml +27 -0
- massgen/configs/ag2/ag2_coder.yaml +34 -0
- massgen/configs/ag2/ag2_coder_case_study.yaml +36 -0
- massgen/configs/ag2/ag2_gemini.yaml +27 -0
- massgen/configs/ag2/ag2_groupchat.yaml +108 -0
- massgen/configs/ag2/ag2_groupchat_gpt.yaml +118 -0
- massgen/configs/ag2/ag2_single_agent.yaml +21 -0
- massgen/configs/basic/multi/fast_timeout_example.yaml +37 -0
- massgen/configs/basic/multi/gemini_4o_claude.yaml +31 -0
- massgen/configs/basic/multi/gemini_gpt5nano_claude.yaml +36 -0
- massgen/configs/{gemini_4o_claude.yaml → basic/multi/geminicode_4o_claude.yaml} +3 -3
- massgen/configs/basic/multi/geminicode_gpt5nano_claude.yaml +36 -0
- massgen/configs/basic/multi/glm_gemini_claude.yaml +25 -0
- massgen/configs/basic/multi/gpt4o_audio_generation.yaml +30 -0
- massgen/configs/basic/multi/gpt4o_image_generation.yaml +31 -0
- massgen/configs/basic/multi/gpt5nano_glm_qwen.yaml +26 -0
- massgen/configs/basic/multi/gpt5nano_image_understanding.yaml +26 -0
- massgen/configs/{three_agents_default.yaml → basic/multi/three_agents_default.yaml} +8 -4
- massgen/configs/basic/multi/three_agents_opensource.yaml +27 -0
- massgen/configs/basic/multi/three_agents_vllm.yaml +20 -0
- massgen/configs/basic/multi/two_agents_gemini.yaml +19 -0
- massgen/configs/{two_agents.yaml → basic/multi/two_agents_gpt5.yaml} +14 -6
- massgen/configs/basic/multi/two_agents_opensource_lmstudio.yaml +31 -0
- massgen/configs/basic/multi/two_qwen_vllm_sglang.yaml +28 -0
- massgen/configs/{single_agent.yaml → basic/single/single_agent.yaml} +1 -1
- massgen/configs/{single_flash2.5.yaml → basic/single/single_flash2.5.yaml} +1 -2
- massgen/configs/basic/single/single_gemini2.5pro.yaml +16 -0
- massgen/configs/basic/single/single_gpt4o_audio_generation.yaml +22 -0
- massgen/configs/basic/single/single_gpt4o_image_generation.yaml +22 -0
- massgen/configs/basic/single/single_gpt4o_video_generation.yaml +24 -0
- massgen/configs/basic/single/single_gpt5nano.yaml +20 -0
- massgen/configs/basic/single/single_gpt5nano_file_search.yaml +18 -0
- massgen/configs/basic/single/single_gpt5nano_image_understanding.yaml +17 -0
- massgen/configs/basic/single/single_gptoss120b.yaml +15 -0
- massgen/configs/basic/single/single_openrouter_audio_understanding.yaml +15 -0
- massgen/configs/basic/single/single_qwen_video_understanding.yaml +15 -0
- massgen/configs/debug/code_execution/command_filtering_blacklist.yaml +29 -0
- massgen/configs/debug/code_execution/command_filtering_whitelist.yaml +28 -0
- massgen/configs/debug/code_execution/docker_verification.yaml +29 -0
- massgen/configs/debug/skip_coordination_test.yaml +27 -0
- massgen/configs/debug/test_sdk_migration.yaml +17 -0
- massgen/configs/docs/DISCORD_MCP_SETUP.md +208 -0
- massgen/configs/docs/TWITTER_MCP_ENESCINAR_SETUP.md +82 -0
- massgen/configs/providers/azure/azure_openai_multi.yaml +21 -0
- massgen/configs/providers/azure/azure_openai_single.yaml +19 -0
- massgen/configs/providers/claude/claude.yaml +14 -0
- massgen/configs/providers/gemini/gemini_gpt5nano.yaml +28 -0
- massgen/configs/providers/local/lmstudio.yaml +11 -0
- massgen/configs/providers/openai/gpt5.yaml +46 -0
- massgen/configs/providers/openai/gpt5_nano.yaml +46 -0
- massgen/configs/providers/others/grok_single_agent.yaml +19 -0
- massgen/configs/providers/others/zai_coding_team.yaml +108 -0
- massgen/configs/providers/others/zai_glm45.yaml +12 -0
- massgen/configs/{creative_team.yaml → teams/creative/creative_team.yaml} +16 -6
- massgen/configs/{travel_planning.yaml → teams/creative/travel_planning.yaml} +16 -6
- massgen/configs/{news_analysis.yaml → teams/research/news_analysis.yaml} +16 -6
- massgen/configs/{research_team.yaml → teams/research/research_team.yaml} +15 -7
- massgen/configs/{technical_analysis.yaml → teams/research/technical_analysis.yaml} +16 -6
- massgen/configs/tools/code-execution/basic_command_execution.yaml +25 -0
- massgen/configs/tools/code-execution/code_execution_use_case_simple.yaml +41 -0
- massgen/configs/tools/code-execution/docker_claude_code.yaml +32 -0
- massgen/configs/tools/code-execution/docker_multi_agent.yaml +32 -0
- massgen/configs/tools/code-execution/docker_simple.yaml +29 -0
- massgen/configs/tools/code-execution/docker_with_resource_limits.yaml +32 -0
- massgen/configs/tools/code-execution/multi_agent_playwright_automation.yaml +57 -0
- massgen/configs/tools/filesystem/cc_gpt5_gemini_filesystem.yaml +34 -0
- massgen/configs/tools/filesystem/claude_code_context_sharing.yaml +68 -0
- massgen/configs/tools/filesystem/claude_code_flash2.5.yaml +43 -0
- massgen/configs/tools/filesystem/claude_code_flash2.5_gptoss.yaml +49 -0
- massgen/configs/tools/filesystem/claude_code_gpt5nano.yaml +31 -0
- massgen/configs/tools/filesystem/claude_code_single.yaml +40 -0
- massgen/configs/tools/filesystem/fs_permissions_test.yaml +87 -0
- massgen/configs/tools/filesystem/gemini_gemini_workspace_cleanup.yaml +54 -0
- massgen/configs/tools/filesystem/gemini_gpt5_filesystem_casestudy.yaml +30 -0
- massgen/configs/tools/filesystem/gemini_gpt5nano_file_context_path.yaml +43 -0
- massgen/configs/tools/filesystem/gemini_gpt5nano_protected_paths.yaml +45 -0
- massgen/configs/tools/filesystem/gpt5mini_cc_fs_context_path.yaml +31 -0
- massgen/configs/tools/filesystem/grok4_gpt5_gemini_filesystem.yaml +32 -0
- massgen/configs/tools/filesystem/multiturn/grok4_gpt5_claude_code_filesystem_multiturn.yaml +58 -0
- massgen/configs/tools/filesystem/multiturn/grok4_gpt5_gemini_filesystem_multiturn.yaml +58 -0
- massgen/configs/tools/filesystem/multiturn/two_claude_code_filesystem_multiturn.yaml +47 -0
- massgen/configs/tools/filesystem/multiturn/two_gemini_flash_filesystem_multiturn.yaml +48 -0
- massgen/configs/tools/mcp/claude_code_discord_mcp_example.yaml +27 -0
- massgen/configs/tools/mcp/claude_code_simple_mcp.yaml +35 -0
- massgen/configs/tools/mcp/claude_code_twitter_mcp_example.yaml +32 -0
- massgen/configs/tools/mcp/claude_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/claude_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/five_agents_travel_mcp_test.yaml +157 -0
- massgen/configs/tools/mcp/five_agents_weather_mcp_test.yaml +103 -0
- massgen/configs/tools/mcp/gemini_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test.yaml +23 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_sharing.yaml +23 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_single_agent.yaml +17 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_with_claude_code.yaml +24 -0
- massgen/configs/tools/mcp/gemini_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/gemini_notion_mcp.yaml +52 -0
- massgen/configs/tools/mcp/gpt5_nano_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/gpt5_nano_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/gpt5mini_claude_code_discord_mcp_example.yaml +38 -0
- massgen/configs/tools/mcp/gpt_oss_mcp_example.yaml +25 -0
- massgen/configs/tools/mcp/gpt_oss_mcp_test.yaml +28 -0
- massgen/configs/tools/mcp/grok3_mini_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/grok3_mini_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/multimcp_gemini.yaml +111 -0
- massgen/configs/tools/mcp/qwen_api_mcp_example.yaml +25 -0
- massgen/configs/tools/mcp/qwen_api_mcp_test.yaml +28 -0
- massgen/configs/tools/mcp/qwen_local_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/qwen_local_mcp_test.yaml +27 -0
- massgen/configs/tools/planning/five_agents_discord_mcp_planning_mode.yaml +140 -0
- massgen/configs/tools/planning/five_agents_filesystem_mcp_planning_mode.yaml +151 -0
- massgen/configs/tools/planning/five_agents_notion_mcp_planning_mode.yaml +151 -0
- massgen/configs/tools/planning/five_agents_twitter_mcp_planning_mode.yaml +155 -0
- massgen/configs/tools/planning/gpt5_mini_case_study_mcp_planning_mode.yaml +73 -0
- massgen/configs/tools/web-search/claude_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gemini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gpt5_mini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gpt_oss_streamable_http_test.yaml +44 -0
- massgen/configs/tools/web-search/grok3_mini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/qwen_api_streamable_http_test.yaml +44 -0
- massgen/configs/tools/web-search/qwen_local_streamable_http_test.yaml +43 -0
- massgen/coordination_tracker.py +708 -0
- massgen/docker/README.md +462 -0
- massgen/filesystem_manager/__init__.py +21 -0
- massgen/filesystem_manager/_base.py +9 -0
- massgen/filesystem_manager/_code_execution_server.py +545 -0
- massgen/filesystem_manager/_docker_manager.py +477 -0
- massgen/filesystem_manager/_file_operation_tracker.py +248 -0
- massgen/filesystem_manager/_filesystem_manager.py +813 -0
- massgen/filesystem_manager/_path_permission_manager.py +1261 -0
- massgen/filesystem_manager/_workspace_tools_server.py +1815 -0
- massgen/formatter/__init__.py +10 -0
- massgen/formatter/_chat_completions_formatter.py +284 -0
- massgen/formatter/_claude_formatter.py +235 -0
- massgen/formatter/_formatter_base.py +156 -0
- massgen/formatter/_response_formatter.py +263 -0
- massgen/frontend/__init__.py +1 -2
- massgen/frontend/coordination_ui.py +471 -286
- massgen/frontend/displays/base_display.py +56 -11
- massgen/frontend/displays/create_coordination_table.py +1956 -0
- massgen/frontend/displays/rich_terminal_display.py +1259 -619
- massgen/frontend/displays/simple_display.py +9 -4
- massgen/frontend/displays/terminal_display.py +27 -68
- massgen/logger_config.py +681 -0
- massgen/mcp_tools/README.md +232 -0
- massgen/mcp_tools/__init__.py +105 -0
- massgen/mcp_tools/backend_utils.py +1035 -0
- massgen/mcp_tools/circuit_breaker.py +195 -0
- massgen/mcp_tools/client.py +894 -0
- massgen/mcp_tools/config_validator.py +138 -0
- massgen/mcp_tools/docs/circuit_breaker.md +646 -0
- massgen/mcp_tools/docs/client.md +950 -0
- massgen/mcp_tools/docs/config_validator.md +478 -0
- massgen/mcp_tools/docs/exceptions.md +1165 -0
- massgen/mcp_tools/docs/security.md +854 -0
- massgen/mcp_tools/exceptions.py +338 -0
- massgen/mcp_tools/hooks.py +212 -0
- massgen/mcp_tools/security.py +780 -0
- massgen/message_templates.py +342 -64
- massgen/orchestrator.py +1515 -241
- massgen/stream_chunk/__init__.py +35 -0
- massgen/stream_chunk/base.py +92 -0
- massgen/stream_chunk/multimodal.py +237 -0
- massgen/stream_chunk/text.py +162 -0
- massgen/tests/mcp_test_server.py +150 -0
- massgen/tests/multi_turn_conversation_design.md +0 -8
- massgen/tests/test_azure_openai_backend.py +156 -0
- massgen/tests/test_backend_capabilities.py +262 -0
- massgen/tests/test_backend_event_loop_all.py +179 -0
- massgen/tests/test_chat_completions_refactor.py +142 -0
- massgen/tests/test_claude_backend.py +15 -28
- massgen/tests/test_claude_code.py +268 -0
- massgen/tests/test_claude_code_context_sharing.py +233 -0
- massgen/tests/test_claude_code_orchestrator.py +175 -0
- massgen/tests/test_cli_backends.py +180 -0
- massgen/tests/test_code_execution.py +679 -0
- massgen/tests/test_external_agent_backend.py +134 -0
- massgen/tests/test_final_presentation_fallback.py +237 -0
- massgen/tests/test_gemini_planning_mode.py +351 -0
- massgen/tests/test_grok_backend.py +7 -10
- massgen/tests/test_http_mcp_server.py +42 -0
- massgen/tests/test_integration_simple.py +198 -0
- massgen/tests/test_mcp_blocking.py +125 -0
- massgen/tests/test_message_context_building.py +29 -47
- massgen/tests/test_orchestrator_final_presentation.py +48 -0
- massgen/tests/test_path_permission_manager.py +2087 -0
- massgen/tests/test_rich_terminal_display.py +14 -13
- massgen/tests/test_timeout.py +133 -0
- massgen/tests/test_v3_3agents.py +11 -12
- massgen/tests/test_v3_simple.py +8 -13
- massgen/tests/test_v3_three_agents.py +11 -18
- massgen/tests/test_v3_two_agents.py +8 -13
- massgen/token_manager/__init__.py +7 -0
- massgen/token_manager/token_manager.py +400 -0
- massgen/utils.py +52 -16
- massgen/v1/agent.py +45 -91
- massgen/v1/agents.py +18 -53
- massgen/v1/backends/gemini.py +50 -153
- massgen/v1/backends/grok.py +21 -54
- massgen/v1/backends/oai.py +39 -111
- massgen/v1/cli.py +36 -93
- massgen/v1/config.py +8 -12
- massgen/v1/logging.py +43 -127
- massgen/v1/main.py +18 -32
- massgen/v1/orchestrator.py +68 -209
- massgen/v1/streaming_display.py +62 -163
- massgen/v1/tools.py +8 -12
- massgen/v1/types.py +9 -23
- massgen/v1/utils.py +5 -23
- massgen-0.1.0.dist-info/METADATA +1245 -0
- massgen-0.1.0.dist-info/RECORD +273 -0
- massgen-0.1.0.dist-info/entry_points.txt +2 -0
- massgen/frontend/logging/__init__.py +0 -9
- massgen/frontend/logging/realtime_logger.py +0 -197
- massgen-0.0.3.dist-info/METADATA +0 -568
- massgen-0.0.3.dist-info/RECORD +0 -76
- massgen-0.0.3.dist-info/entry_points.txt +0 -2
- /massgen/backend/{Function calling openai responses.md → docs/Function calling openai responses.md} +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/WHEEL +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/licenses/LICENSE +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
Stream Chunk Module
|
|
4
|
+
|
|
5
|
+
This module provides classes for handling streaming responses from LLM backends.
|
|
6
|
+
It supports both text-based content (regular text, tool calls, reasoning) and
|
|
7
|
+
multimodal content (images, audio, video, documents).
|
|
8
|
+
|
|
9
|
+
Classes:
|
|
10
|
+
BaseStreamChunk: Abstract base class for all stream chunks
|
|
11
|
+
TextStreamChunk: Stream chunk for text-based content
|
|
12
|
+
|
|
13
|
+
Enums:
|
|
14
|
+
ChunkType: Types of stream chunks
|
|
15
|
+
|
|
16
|
+
Data Classes:
|
|
17
|
+
MediaMetadata: Metadata for media content
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from .base import BaseStreamChunk, ChunkType
|
|
21
|
+
from .multimodal import MediaEncoding, MediaMetadata, MediaType, MultimodalStreamChunk
|
|
22
|
+
from .text import TextStreamChunk
|
|
23
|
+
|
|
24
|
+
__all__ = [
|
|
25
|
+
# Base classes
|
|
26
|
+
"BaseStreamChunk",
|
|
27
|
+
"ChunkType",
|
|
28
|
+
# Text chunks
|
|
29
|
+
"TextStreamChunk",
|
|
30
|
+
# Multimodal classes
|
|
31
|
+
"MediaType",
|
|
32
|
+
"MediaEncoding",
|
|
33
|
+
"MediaMetadata",
|
|
34
|
+
"MultimodalStreamChunk",
|
|
35
|
+
]
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
Base classes for stream chunks.
|
|
4
|
+
Provides abstract base class and enums for streaming responses.
|
|
5
|
+
"""
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from abc import ABC, abstractmethod
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from enum import Enum
|
|
11
|
+
from typing import Any, Dict, Optional
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ChunkType(Enum):
|
|
15
|
+
"""Enumeration of chunk types for streaming responses."""
|
|
16
|
+
|
|
17
|
+
# Text-based chunks
|
|
18
|
+
CONTENT = "content"
|
|
19
|
+
TOOL_CALLS = "tool_calls"
|
|
20
|
+
COMPLETE_MESSAGE = "complete_message"
|
|
21
|
+
COMPLETE_RESPONSE = "complete_response"
|
|
22
|
+
DONE = "done"
|
|
23
|
+
ERROR = "error"
|
|
24
|
+
AGENT_STATUS = "agent_status"
|
|
25
|
+
BACKEND_STATUS = "backend_status"
|
|
26
|
+
|
|
27
|
+
# Reasoning chunks (OpenAI Response API)
|
|
28
|
+
REASONING = "reasoning"
|
|
29
|
+
REASONING_DONE = "reasoning_done"
|
|
30
|
+
REASONING_SUMMARY = "reasoning_summary"
|
|
31
|
+
REASONING_SUMMARY_DONE = "reasoning_summary_done"
|
|
32
|
+
|
|
33
|
+
# MCP-related chunks
|
|
34
|
+
MCP_STATUS = "mcp_status"
|
|
35
|
+
|
|
36
|
+
# Multimodal chunks
|
|
37
|
+
MEDIA = "media"
|
|
38
|
+
MEDIA_PROGRESS = "media_progress"
|
|
39
|
+
ATTACHMENT = "attachment"
|
|
40
|
+
ATTACHMENT_COMPLETE = "attachment_complete"
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@dataclass
|
|
44
|
+
class BaseStreamChunk(ABC):
|
|
45
|
+
"""
|
|
46
|
+
Abstract base class for stream chunks.
|
|
47
|
+
|
|
48
|
+
All stream chunks must inherit from this class and implement
|
|
49
|
+
the required abstract methods for validation and serialization.
|
|
50
|
+
|
|
51
|
+
Attributes:
|
|
52
|
+
type: ChunkType enum value indicating the chunk type
|
|
53
|
+
source: Optional source identifier (e.g., agent_id, backend name)
|
|
54
|
+
timestamp: Optional timestamp when the chunk was created
|
|
55
|
+
sequence_number: Optional sequence number for ordering chunks
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
type: ChunkType
|
|
59
|
+
source: Optional[str] = None
|
|
60
|
+
timestamp: Optional[float] = None
|
|
61
|
+
sequence_number: Optional[int] = None
|
|
62
|
+
|
|
63
|
+
@abstractmethod
|
|
64
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
65
|
+
"""
|
|
66
|
+
Convert chunk to dictionary representation.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
Dictionary representation of the chunk, suitable for JSON serialization.
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
@abstractmethod
|
|
73
|
+
def validate(self) -> bool:
|
|
74
|
+
"""
|
|
75
|
+
Validate chunk data integrity.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
True if the chunk data is valid, False otherwise.
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
def __post_init__(self):
|
|
82
|
+
"""Post-initialization validation."""
|
|
83
|
+
# Ensure type is a ChunkType enum
|
|
84
|
+
if not isinstance(self.type, ChunkType):
|
|
85
|
+
# Try to convert string to ChunkType
|
|
86
|
+
if isinstance(self.type, str):
|
|
87
|
+
try:
|
|
88
|
+
self.type = ChunkType(self.type)
|
|
89
|
+
except ValueError:
|
|
90
|
+
raise ValueError(f"Invalid chunk type: {self.type}")
|
|
91
|
+
else:
|
|
92
|
+
raise TypeError(f"Chunk type must be ChunkType enum or string, got {type(self.type)}")
|
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
Multimodal stream chunk implementation.
|
|
4
|
+
Handles media content including images, audio, video, and documents.
|
|
5
|
+
"""
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from enum import Enum
|
|
10
|
+
from typing import Any, Dict, List, Optional
|
|
11
|
+
|
|
12
|
+
from .base import BaseStreamChunk, ChunkType
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class MediaType(Enum):
|
|
16
|
+
"""Supported media types for multimodal content."""
|
|
17
|
+
|
|
18
|
+
IMAGE = "image"
|
|
19
|
+
AUDIO = "audio"
|
|
20
|
+
VIDEO = "video"
|
|
21
|
+
FILE = "file"
|
|
22
|
+
DOCUMENT = "document"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class MediaEncoding(Enum):
|
|
26
|
+
"""Media encoding types."""
|
|
27
|
+
|
|
28
|
+
BASE64 = "base64"
|
|
29
|
+
URL = "url"
|
|
30
|
+
FILE_PATH = "file_path"
|
|
31
|
+
FILE_ID = "file_id"
|
|
32
|
+
BINARY = "binary"
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class MediaMetadata:
|
|
37
|
+
"""
|
|
38
|
+
Metadata for media content.
|
|
39
|
+
|
|
40
|
+
Attributes:
|
|
41
|
+
mime_type: MIME type of the media (e.g., "image/jpeg", "audio/mp3")
|
|
42
|
+
size_bytes: Size of the media in bytes
|
|
43
|
+
width: Width in pixels (for images/video)
|
|
44
|
+
height: Height in pixels (for images/video)
|
|
45
|
+
duration_seconds: Duration in seconds (for audio/video)
|
|
46
|
+
filename: Original filename
|
|
47
|
+
checksum: Checksum for integrity verification (e.g., SHA-256)
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
mime_type: str
|
|
51
|
+
size_bytes: Optional[int] = None
|
|
52
|
+
width: Optional[int] = None
|
|
53
|
+
height: Optional[int] = None
|
|
54
|
+
duration_seconds: Optional[float] = None
|
|
55
|
+
filename: Optional[str] = None
|
|
56
|
+
checksum: Optional[str] = None
|
|
57
|
+
|
|
58
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
59
|
+
"""Convert to dictionary, excluding None values."""
|
|
60
|
+
return {k: v for k, v in self.__dict__.items() if v is not None}
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@dataclass
|
|
64
|
+
class MultimodalStreamChunk(BaseStreamChunk):
|
|
65
|
+
"""
|
|
66
|
+
Stream chunk for multimodal content.
|
|
67
|
+
|
|
68
|
+
This class handles streaming of media content including:
|
|
69
|
+
- Images (JPEG, PNG, GIF, WebP)
|
|
70
|
+
- Audio files (MP3, WAV, etc.)
|
|
71
|
+
- Video files (MP4, WebM, etc.)
|
|
72
|
+
- Documents (PDF, etc.)
|
|
73
|
+
- Generic files
|
|
74
|
+
|
|
75
|
+
Supports both complete media and streaming/chunked media delivery.
|
|
76
|
+
|
|
77
|
+
Attributes:
|
|
78
|
+
type: ChunkType enum value (typically MEDIA or MEDIA_PROGRESS)
|
|
79
|
+
text_content: Optional text caption or description
|
|
80
|
+
media_type: Type of media (IMAGE, AUDIO, VIDEO, etc.)
|
|
81
|
+
media_encoding: How the media is encoded (BASE64, URL, etc.)
|
|
82
|
+
media_data: The actual media data (URL string, base64 string, bytes, or file_id)
|
|
83
|
+
media_metadata: Metadata about the media
|
|
84
|
+
attachments: List of multiple attachments (for batch processing)
|
|
85
|
+
progress_percentage: Progress percentage for large media (0-100)
|
|
86
|
+
bytes_transferred: Number of bytes transferred so far
|
|
87
|
+
total_bytes: Total bytes to transfer
|
|
88
|
+
is_partial: True if this is part of a larger media stream
|
|
89
|
+
chunk_index: Index of this chunk in the stream
|
|
90
|
+
total_chunks: Total number of expected chunks
|
|
91
|
+
source: Source identifier
|
|
92
|
+
timestamp: When the chunk was created
|
|
93
|
+
sequence_number: Sequence number for ordering
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
# Text content (optional caption/description)
|
|
97
|
+
text_content: Optional[str] = None
|
|
98
|
+
|
|
99
|
+
# Media fields
|
|
100
|
+
media_type: Optional[MediaType] = None
|
|
101
|
+
media_encoding: Optional[MediaEncoding] = None
|
|
102
|
+
media_data: Optional[Any] = None # URL, base64 string, bytes, or file_id
|
|
103
|
+
media_metadata: Optional[MediaMetadata] = None
|
|
104
|
+
|
|
105
|
+
# Multiple attachments support
|
|
106
|
+
attachments: Optional[List[Dict[str, Any]]] = None
|
|
107
|
+
|
|
108
|
+
# Progress tracking for large media
|
|
109
|
+
progress_percentage: Optional[float] = None
|
|
110
|
+
bytes_transferred: Optional[int] = None
|
|
111
|
+
total_bytes: Optional[int] = None
|
|
112
|
+
|
|
113
|
+
# Streaming support
|
|
114
|
+
is_partial: bool = False
|
|
115
|
+
chunk_index: Optional[int] = None
|
|
116
|
+
total_chunks: Optional[int] = None
|
|
117
|
+
|
|
118
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
119
|
+
"""
|
|
120
|
+
Convert to dictionary with proper serialization.
|
|
121
|
+
|
|
122
|
+
Handles enum conversion and special types like bytes and MediaMetadata.
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
Dictionary representation suitable for JSON serialization.
|
|
126
|
+
"""
|
|
127
|
+
result = {}
|
|
128
|
+
for key, value in self.__dict__.items():
|
|
129
|
+
if value is not None:
|
|
130
|
+
if key == "type" and isinstance(value, ChunkType):
|
|
131
|
+
result[key] = value.value
|
|
132
|
+
elif isinstance(value, (MediaType, MediaEncoding)):
|
|
133
|
+
result[key] = value.value
|
|
134
|
+
elif isinstance(value, MediaMetadata):
|
|
135
|
+
result[key] = value.to_dict()
|
|
136
|
+
elif isinstance(value, bytes):
|
|
137
|
+
# Convert bytes to base64 for JSON serialization
|
|
138
|
+
import base64
|
|
139
|
+
|
|
140
|
+
result[key] = base64.b64encode(value).decode("utf-8")
|
|
141
|
+
else:
|
|
142
|
+
result[key] = value
|
|
143
|
+
return result
|
|
144
|
+
|
|
145
|
+
def validate(self) -> bool:
|
|
146
|
+
"""
|
|
147
|
+
Validate multimodal chunk integrity.
|
|
148
|
+
|
|
149
|
+
Checks that required fields are present based on chunk type.
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
True if chunk is valid, False otherwise.
|
|
153
|
+
"""
|
|
154
|
+
if self.type == ChunkType.MEDIA:
|
|
155
|
+
# Media chunks must have media_type, encoding, and data
|
|
156
|
+
return self.media_type is not None and self.media_encoding is not None and self.media_data is not None
|
|
157
|
+
|
|
158
|
+
elif self.type == ChunkType.MEDIA_PROGRESS:
|
|
159
|
+
# Progress chunks must have progress_percentage
|
|
160
|
+
return self.progress_percentage is not None
|
|
161
|
+
|
|
162
|
+
elif self.type == ChunkType.ATTACHMENT:
|
|
163
|
+
# Attachment chunks should have media data or attachments list
|
|
164
|
+
return self.media_data is not None or self.attachments is not None
|
|
165
|
+
|
|
166
|
+
elif self.type == ChunkType.ATTACHMENT_COMPLETE:
|
|
167
|
+
# Attachment complete chunks are always valid
|
|
168
|
+
return True
|
|
169
|
+
|
|
170
|
+
# Unknown chunk type or no specific validation
|
|
171
|
+
return True
|
|
172
|
+
|
|
173
|
+
def is_complete(self) -> bool:
|
|
174
|
+
"""
|
|
175
|
+
Check if media streaming is complete.
|
|
176
|
+
|
|
177
|
+
For non-partial chunks, always returns True.
|
|
178
|
+
For partial chunks, checks if this is the last chunk.
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
True if media is complete, False if more chunks expected.
|
|
182
|
+
"""
|
|
183
|
+
if not self.is_partial:
|
|
184
|
+
return True
|
|
185
|
+
|
|
186
|
+
if self.chunk_index is not None and self.total_chunks is not None:
|
|
187
|
+
return self.chunk_index >= self.total_chunks - 1
|
|
188
|
+
|
|
189
|
+
return False
|
|
190
|
+
|
|
191
|
+
def get_progress(self) -> Optional[float]:
|
|
192
|
+
"""
|
|
193
|
+
Get progress percentage.
|
|
194
|
+
|
|
195
|
+
Calculates progress from either:
|
|
196
|
+
- Explicit progress_percentage field
|
|
197
|
+
- bytes_transferred / total_bytes
|
|
198
|
+
- chunk_index / total_chunks
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
Progress percentage (0-100) or None if not available.
|
|
202
|
+
"""
|
|
203
|
+
if self.progress_percentage is not None:
|
|
204
|
+
return self.progress_percentage
|
|
205
|
+
|
|
206
|
+
if self.bytes_transferred is not None and self.total_bytes is not None and self.total_bytes > 0:
|
|
207
|
+
return (self.bytes_transferred / self.total_bytes) * 100
|
|
208
|
+
|
|
209
|
+
if self.chunk_index is not None and self.total_chunks is not None and self.total_chunks > 0:
|
|
210
|
+
return ((self.chunk_index + 1) / self.total_chunks) * 100
|
|
211
|
+
|
|
212
|
+
return None
|
|
213
|
+
|
|
214
|
+
def __repr__(self) -> str:
|
|
215
|
+
"""String representation for debugging."""
|
|
216
|
+
parts = [f"MultimodalStreamChunk(type={self.type.value}"]
|
|
217
|
+
|
|
218
|
+
if self.media_type:
|
|
219
|
+
parts.append(f"media_type={self.media_type.value}")
|
|
220
|
+
|
|
221
|
+
if self.media_encoding:
|
|
222
|
+
parts.append(f"encoding={self.media_encoding.value}")
|
|
223
|
+
|
|
224
|
+
if self.text_content:
|
|
225
|
+
parts.append(f"text='{self.text_content[:30]}...'")
|
|
226
|
+
|
|
227
|
+
if self.is_partial:
|
|
228
|
+
parts.append(f"partial={self.chunk_index}/{self.total_chunks}")
|
|
229
|
+
|
|
230
|
+
progress = self.get_progress()
|
|
231
|
+
if progress is not None:
|
|
232
|
+
parts.append(f"progress={progress:.1f}%")
|
|
233
|
+
|
|
234
|
+
if self.source:
|
|
235
|
+
parts.append(f"source='{self.source}'")
|
|
236
|
+
|
|
237
|
+
return ", ".join(parts) + ")"
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
Text stream chunk implementation.
|
|
4
|
+
Handles text-based content including regular text, tool calls, and reasoning.
|
|
5
|
+
"""
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from typing import Any, Dict, List, Optional
|
|
10
|
+
|
|
11
|
+
from .base import BaseStreamChunk, ChunkType
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class TextStreamChunk(BaseStreamChunk):
|
|
16
|
+
"""
|
|
17
|
+
Stream chunk for text-based content.
|
|
18
|
+
|
|
19
|
+
This class handles all text-based streaming content including:
|
|
20
|
+
- Regular text content
|
|
21
|
+
- Tool calls and function execution
|
|
22
|
+
- Reasoning text and summaries
|
|
23
|
+
- Status messages and errors
|
|
24
|
+
- Complete messages and responses
|
|
25
|
+
|
|
26
|
+
Attributes:
|
|
27
|
+
type: ChunkType enum value
|
|
28
|
+
content: Text content (for CONTENT, ERROR, STATUS chunks)
|
|
29
|
+
tool_calls: List of tool call dictionaries (for TOOL_CALLS chunks)
|
|
30
|
+
complete_message: Complete assistant message (for COMPLETE_MESSAGE chunks)
|
|
31
|
+
response: Raw API response (for COMPLETE_RESPONSE chunks)
|
|
32
|
+
error: Error message (for ERROR chunks)
|
|
33
|
+
status: Status message (for STATUS chunks)
|
|
34
|
+
reasoning_delta: Incremental reasoning text (for REASONING chunks)
|
|
35
|
+
reasoning_text: Complete reasoning text (for REASONING_DONE chunks)
|
|
36
|
+
reasoning_summary_delta: Incremental reasoning summary (for REASONING_SUMMARY chunks)
|
|
37
|
+
reasoning_summary_text: Complete reasoning summary (for REASONING_SUMMARY_DONE chunks)
|
|
38
|
+
item_id: Reasoning item identifier
|
|
39
|
+
content_index: Reasoning content index
|
|
40
|
+
summary_index: Reasoning summary index
|
|
41
|
+
source: Source identifier (e.g., agent_id, backend name)
|
|
42
|
+
timestamp: When the chunk was created
|
|
43
|
+
sequence_number: Sequence number for ordering
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
# Text content
|
|
47
|
+
content: Optional[str] = None
|
|
48
|
+
|
|
49
|
+
# Tool-related fields
|
|
50
|
+
tool_calls: Optional[List[Dict[str, Any]]] = None
|
|
51
|
+
complete_message: Optional[Dict[str, Any]] = None
|
|
52
|
+
response: Optional[Dict[str, Any]] = None
|
|
53
|
+
|
|
54
|
+
# Status fields
|
|
55
|
+
error: Optional[str] = None
|
|
56
|
+
status: Optional[str] = None
|
|
57
|
+
|
|
58
|
+
# Reasoning fields (OpenAI Response API)
|
|
59
|
+
reasoning_delta: Optional[str] = None
|
|
60
|
+
reasoning_text: Optional[str] = None
|
|
61
|
+
reasoning_summary_delta: Optional[str] = None
|
|
62
|
+
reasoning_summary_text: Optional[str] = None
|
|
63
|
+
item_id: Optional[str] = None
|
|
64
|
+
content_index: Optional[int] = None
|
|
65
|
+
summary_index: Optional[int] = None
|
|
66
|
+
|
|
67
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
68
|
+
"""
|
|
69
|
+
Convert to dictionary, excluding None values.
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
Dictionary with all non-None fields, with ChunkType converted to string.
|
|
73
|
+
"""
|
|
74
|
+
result = {}
|
|
75
|
+
for key, value in self.__dict__.items():
|
|
76
|
+
if value is not None:
|
|
77
|
+
if key == "type" and isinstance(value, ChunkType):
|
|
78
|
+
result[key] = value.value
|
|
79
|
+
else:
|
|
80
|
+
result[key] = value
|
|
81
|
+
return result
|
|
82
|
+
|
|
83
|
+
def validate(self) -> bool:
|
|
84
|
+
"""
|
|
85
|
+
Validate text chunk integrity.
|
|
86
|
+
|
|
87
|
+
Checks that required fields are present based on chunk type.
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
True if chunk is valid, False otherwise.
|
|
91
|
+
"""
|
|
92
|
+
if self.type == ChunkType.CONTENT:
|
|
93
|
+
# Content chunks should have content (can be empty string)
|
|
94
|
+
return self.content is not None
|
|
95
|
+
|
|
96
|
+
elif self.type == ChunkType.TOOL_CALLS:
|
|
97
|
+
# Tool call chunks must have non-empty tool_calls list
|
|
98
|
+
return self.tool_calls is not None and len(self.tool_calls) > 0
|
|
99
|
+
|
|
100
|
+
elif self.type == ChunkType.COMPLETE_MESSAGE:
|
|
101
|
+
# Complete message chunks must have complete_message dict
|
|
102
|
+
return self.complete_message is not None
|
|
103
|
+
|
|
104
|
+
elif self.type == ChunkType.COMPLETE_RESPONSE:
|
|
105
|
+
# Complete response chunks must have response dict
|
|
106
|
+
return self.response is not None
|
|
107
|
+
|
|
108
|
+
elif self.type == ChunkType.ERROR:
|
|
109
|
+
# Error chunks must have error message
|
|
110
|
+
return self.error is not None or self.content is not None
|
|
111
|
+
|
|
112
|
+
elif self.type == ChunkType.REASONING:
|
|
113
|
+
# Reasoning chunks should have reasoning_delta
|
|
114
|
+
return self.reasoning_delta is not None
|
|
115
|
+
|
|
116
|
+
elif self.type == ChunkType.REASONING_DONE:
|
|
117
|
+
# Reasoning done chunks should have reasoning_text
|
|
118
|
+
return self.reasoning_text is not None
|
|
119
|
+
|
|
120
|
+
elif self.type == ChunkType.REASONING_SUMMARY:
|
|
121
|
+
# Reasoning summary chunks should have reasoning_summary_delta
|
|
122
|
+
return self.reasoning_summary_delta is not None
|
|
123
|
+
|
|
124
|
+
elif self.type == ChunkType.REASONING_SUMMARY_DONE:
|
|
125
|
+
# Reasoning summary done chunks should have reasoning_summary_text
|
|
126
|
+
return self.reasoning_summary_text is not None
|
|
127
|
+
|
|
128
|
+
elif self.type in [ChunkType.AGENT_STATUS, ChunkType.BACKEND_STATUS, ChunkType.MCP_STATUS]:
|
|
129
|
+
# Status chunks should have status or content
|
|
130
|
+
return self.status is not None or self.content is not None
|
|
131
|
+
|
|
132
|
+
elif self.type == ChunkType.DONE:
|
|
133
|
+
# Done chunks are always valid
|
|
134
|
+
return True
|
|
135
|
+
|
|
136
|
+
# Unknown chunk type or no specific validation
|
|
137
|
+
return True
|
|
138
|
+
|
|
139
|
+
def __repr__(self) -> str:
|
|
140
|
+
"""String representation for debugging."""
|
|
141
|
+
parts = [f"TextStreamChunk(type={self.type.value}"]
|
|
142
|
+
|
|
143
|
+
if self.content:
|
|
144
|
+
content_preview = self.content[:50] + "..." if len(self.content) > 50 else self.content
|
|
145
|
+
parts.append(f"content='{content_preview}'")
|
|
146
|
+
|
|
147
|
+
if self.tool_calls:
|
|
148
|
+
parts.append(f"tool_calls={len(self.tool_calls)} calls")
|
|
149
|
+
|
|
150
|
+
if self.error:
|
|
151
|
+
parts.append(f"error='{self.error}'")
|
|
152
|
+
|
|
153
|
+
if self.status:
|
|
154
|
+
parts.append(f"status='{self.status}'")
|
|
155
|
+
|
|
156
|
+
if self.reasoning_delta:
|
|
157
|
+
parts.append(f"reasoning_delta='{self.reasoning_delta[:30]}...'")
|
|
158
|
+
|
|
159
|
+
if self.source:
|
|
160
|
+
parts.append(f"source='{self.source}'")
|
|
161
|
+
|
|
162
|
+
return ", ".join(parts) + ")"
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Simple MCP test server for testing MCP integration.
|
|
5
|
+
|
|
6
|
+
This server provides basic tools for testing MCP functionality.
|
|
7
|
+
It implements the MCP protocol over stdio transport.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import asyncio
|
|
11
|
+
import json
|
|
12
|
+
import sys
|
|
13
|
+
from typing import Any, Dict
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class SimpleMCPServer:
|
|
17
|
+
"""Simple MCP server implementation for testing."""
|
|
18
|
+
|
|
19
|
+
def __init__(self):
|
|
20
|
+
self.tools = {
|
|
21
|
+
"mcp_echo": {
|
|
22
|
+
"name": "mcp_echo",
|
|
23
|
+
"description": "Echo back the input text",
|
|
24
|
+
"inputSchema": {
|
|
25
|
+
"type": "object",
|
|
26
|
+
"properties": {"text": {"type": "string", "description": "Text to echo back"}},
|
|
27
|
+
"required": ["text"],
|
|
28
|
+
},
|
|
29
|
+
},
|
|
30
|
+
"add_numbers": {
|
|
31
|
+
"name": "add_numbers",
|
|
32
|
+
"description": "Add two numbers together",
|
|
33
|
+
"inputSchema": {
|
|
34
|
+
"type": "object",
|
|
35
|
+
"properties": {
|
|
36
|
+
"a": {"type": "number", "description": "First number"},
|
|
37
|
+
"b": {"type": "number", "description": "Second number"},
|
|
38
|
+
},
|
|
39
|
+
"required": ["a", "b"],
|
|
40
|
+
},
|
|
41
|
+
},
|
|
42
|
+
"get_current_time": {
|
|
43
|
+
"name": "get_current_time",
|
|
44
|
+
"description": "Get the current timestamp",
|
|
45
|
+
"inputSchema": {"type": "object", "properties": {}},
|
|
46
|
+
},
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
async def handle_initialize(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
|
50
|
+
"""Handle initialize request."""
|
|
51
|
+
return {
|
|
52
|
+
"protocolVersion": "2024-11-05",
|
|
53
|
+
"capabilities": {
|
|
54
|
+
"tools": {"listChanged": True},
|
|
55
|
+
"resources": {"subscribe": False, "listChanged": False},
|
|
56
|
+
"prompts": {"listChanged": False},
|
|
57
|
+
},
|
|
58
|
+
"serverInfo": {"name": "simple-test-server", "version": "1.0.0"},
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
async def handle_tools_list(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
|
62
|
+
"""Handle tools/list request."""
|
|
63
|
+
return {"tools": list(self.tools.values())}
|
|
64
|
+
|
|
65
|
+
async def handle_tools_call(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
|
66
|
+
"""Handle tools/call request."""
|
|
67
|
+
tool_name = params.get("name")
|
|
68
|
+
arguments = params.get("arguments", {})
|
|
69
|
+
|
|
70
|
+
if tool_name == "mcp_echo":
|
|
71
|
+
text = arguments.get("text", "")
|
|
72
|
+
return {"content": [{"type": "text", "text": f"Echo: {text}"}]}
|
|
73
|
+
|
|
74
|
+
elif tool_name == "add_numbers":
|
|
75
|
+
a = arguments.get("a", 0)
|
|
76
|
+
b = arguments.get("b", 0)
|
|
77
|
+
result = a + b
|
|
78
|
+
return {"content": [{"type": "text", "text": f"Result: {a} + {b} = {result}"}]}
|
|
79
|
+
|
|
80
|
+
elif tool_name == "get_current_time":
|
|
81
|
+
import datetime
|
|
82
|
+
|
|
83
|
+
now = datetime.datetime.now().isoformat()
|
|
84
|
+
return {"content": [{"type": "text", "text": f"Current time: {now}"}]}
|
|
85
|
+
|
|
86
|
+
else:
|
|
87
|
+
raise ValueError(f"Unknown tool: {tool_name}")
|
|
88
|
+
|
|
89
|
+
async def handle_request(self, request: Dict[str, Any]) -> Dict[str, Any]:
|
|
90
|
+
"""Handle incoming JSON-RPC request."""
|
|
91
|
+
method = request.get("method")
|
|
92
|
+
params = request.get("params", {})
|
|
93
|
+
request_id = request.get("id")
|
|
94
|
+
|
|
95
|
+
try:
|
|
96
|
+
if method == "initialize":
|
|
97
|
+
result = await self.handle_initialize(params)
|
|
98
|
+
elif method == "tools/list":
|
|
99
|
+
result = await self.handle_tools_list(params)
|
|
100
|
+
elif method == "tools/call":
|
|
101
|
+
result = await self.handle_tools_call(params)
|
|
102
|
+
else:
|
|
103
|
+
raise ValueError(f"Unknown method: {method}")
|
|
104
|
+
|
|
105
|
+
return {"jsonrpc": "2.0", "id": request_id, "result": result}
|
|
106
|
+
|
|
107
|
+
except Exception as e:
|
|
108
|
+
return {
|
|
109
|
+
"jsonrpc": "2.0",
|
|
110
|
+
"id": request_id,
|
|
111
|
+
"error": {"code": -32000, "message": str(e)},
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
async def run(self):
|
|
115
|
+
"""Run the MCP server."""
|
|
116
|
+
while True:
|
|
117
|
+
try:
|
|
118
|
+
# Read line from stdin
|
|
119
|
+
line = sys.stdin.readline()
|
|
120
|
+
if not line:
|
|
121
|
+
break
|
|
122
|
+
|
|
123
|
+
# Parse JSON-RPC request
|
|
124
|
+
request = json.loads(line.strip())
|
|
125
|
+
|
|
126
|
+
# Handle request
|
|
127
|
+
response = await self.handle_request(request)
|
|
128
|
+
|
|
129
|
+
# Send response to stdout
|
|
130
|
+
json.dump(response, sys.stdout)
|
|
131
|
+
sys.stdout.write("\n")
|
|
132
|
+
sys.stdout.flush()
|
|
133
|
+
|
|
134
|
+
except KeyboardInterrupt:
|
|
135
|
+
break
|
|
136
|
+
except Exception as e:
|
|
137
|
+
# Send error response
|
|
138
|
+
error_response = {
|
|
139
|
+
"jsonrpc": "2.0",
|
|
140
|
+
"id": None,
|
|
141
|
+
"error": {"code": -32603, "message": f"Internal error: {str(e)}"},
|
|
142
|
+
}
|
|
143
|
+
json.dump(error_response, sys.stdout)
|
|
144
|
+
sys.stdout.write("\n")
|
|
145
|
+
sys.stdout.flush()
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
if __name__ == "__main__":
|
|
149
|
+
server = SimpleMCPServer()
|
|
150
|
+
asyncio.run(server.run())
|
|
@@ -4,14 +4,6 @@
|
|
|
4
4
|
|
|
5
5
|
This document outlines the design approach for implementing multi-turn conversations in the MassGen orchestrator, based on the proven approach used in MassGen v0.0.1.
|
|
6
6
|
|
|
7
|
-
## Current State
|
|
8
|
-
|
|
9
|
-
The current orchestrator has **partial multi-turn support**:
|
|
10
|
-
- ✅ Accepts conversation history through `chat(messages)` interface
|
|
11
|
-
- ✅ Maintains conversation history at orchestrator level
|
|
12
|
-
- ❌ **Limited**: Only processes the last user message for coordination
|
|
13
|
-
- ❌ **CLI Issue**: Multi-agent mode with history bypasses coordination UI display
|
|
14
|
-
|
|
15
7
|
## V0.0.1 Approach Analysis
|
|
16
8
|
|
|
17
9
|
### Key Innovation: Dynamic Context Reconstruction
|