massgen 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +1 -1
- massgen/backend/base_with_custom_tool_and_mcp.py +453 -23
- massgen/backend/capabilities.py +39 -0
- massgen/backend/chat_completions.py +111 -197
- massgen/backend/claude.py +210 -181
- massgen/backend/gemini.py +1015 -1559
- massgen/backend/grok.py +3 -2
- massgen/backend/response.py +160 -220
- massgen/chat_agent.py +340 -20
- massgen/cli.py +399 -25
- massgen/config_builder.py +20 -54
- massgen/config_validator.py +931 -0
- massgen/configs/README.md +95 -10
- massgen/configs/memory/gpt5mini_gemini_baseline_research_to_implementation.yaml +94 -0
- massgen/configs/memory/gpt5mini_gemini_context_window_management.yaml +187 -0
- massgen/configs/memory/gpt5mini_gemini_research_to_implementation.yaml +127 -0
- massgen/configs/memory/gpt5mini_high_reasoning_gemini.yaml +107 -0
- massgen/configs/memory/single_agent_compression_test.yaml +64 -0
- massgen/configs/tools/custom_tools/claude_code_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/claude_custom_tool_example_no_path.yaml +1 -1
- massgen/configs/tools/custom_tools/claude_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/computer_use_browser_example.yaml +1 -1
- massgen/configs/tools/custom_tools/computer_use_docker_example.yaml +1 -1
- massgen/configs/tools/custom_tools/gemini_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/gpt_oss_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/grok3_mini_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/interop/ag2_and_langgraph_lesson_planner.yaml +65 -0
- massgen/configs/tools/custom_tools/interop/ag2_and_openai_assistant_lesson_planner.yaml +65 -0
- massgen/configs/tools/custom_tools/interop/ag2_lesson_planner_example.yaml +48 -0
- massgen/configs/tools/custom_tools/interop/agentscope_lesson_planner_example.yaml +48 -0
- massgen/configs/tools/custom_tools/interop/langgraph_lesson_planner_example.yaml +49 -0
- massgen/configs/tools/custom_tools/interop/openai_assistant_lesson_planner_example.yaml +50 -0
- massgen/configs/tools/custom_tools/interop/smolagent_lesson_planner_example.yaml +49 -0
- massgen/configs/tools/custom_tools/qwen_api_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/two_models_with_tools_example.yaml +44 -0
- massgen/formatter/_gemini_formatter.py +61 -15
- massgen/memory/README.md +277 -0
- massgen/memory/__init__.py +26 -0
- massgen/memory/_base.py +193 -0
- massgen/memory/_compression.py +237 -0
- massgen/memory/_context_monitor.py +211 -0
- massgen/memory/_conversation.py +255 -0
- massgen/memory/_fact_extraction_prompts.py +333 -0
- massgen/memory/_mem0_adapters.py +257 -0
- massgen/memory/_persistent.py +687 -0
- massgen/memory/docker-compose.qdrant.yml +36 -0
- massgen/memory/docs/DESIGN.md +388 -0
- massgen/memory/docs/QUICKSTART.md +409 -0
- massgen/memory/docs/SUMMARY.md +319 -0
- massgen/memory/docs/agent_use_memory.md +408 -0
- massgen/memory/docs/orchestrator_use_memory.md +586 -0
- massgen/memory/examples.py +237 -0
- massgen/orchestrator.py +207 -7
- massgen/tests/memory/test_agent_compression.py +174 -0
- massgen/tests/memory/test_context_window_management.py +286 -0
- massgen/tests/memory/test_force_compression.py +154 -0
- massgen/tests/memory/test_simple_compression.py +147 -0
- massgen/tests/test_ag2_lesson_planner.py +223 -0
- massgen/tests/test_agent_memory.py +534 -0
- massgen/tests/test_config_validator.py +1156 -0
- massgen/tests/test_conversation_memory.py +382 -0
- massgen/tests/test_langgraph_lesson_planner.py +223 -0
- massgen/tests/test_orchestrator_memory.py +620 -0
- massgen/tests/test_persistent_memory.py +435 -0
- massgen/token_manager/token_manager.py +6 -0
- massgen/tool/__init__.py +2 -9
- massgen/tool/_decorators.py +52 -0
- massgen/tool/_extraframework_agents/ag2_lesson_planner_tool.py +251 -0
- massgen/tool/_extraframework_agents/agentscope_lesson_planner_tool.py +303 -0
- massgen/tool/_extraframework_agents/langgraph_lesson_planner_tool.py +275 -0
- massgen/tool/_extraframework_agents/openai_assistant_lesson_planner_tool.py +247 -0
- massgen/tool/_extraframework_agents/smolagent_lesson_planner_tool.py +180 -0
- massgen/tool/_manager.py +102 -16
- massgen/tool/_registered_tool.py +3 -0
- massgen/tool/_result.py +3 -0
- {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/METADATA +138 -77
- {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/RECORD +82 -37
- massgen/backend/gemini_mcp_manager.py +0 -545
- massgen/backend/gemini_trackers.py +0 -344
- {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/WHEEL +0 -0
- {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/entry_points.txt +0 -0
- {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/licenses/LICENSE +0 -0
- {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/top_level.txt +0 -0
|
@@ -1,545 +0,0 @@
|
|
|
1
|
-
# -*- coding: utf-8 -*-
|
|
2
|
-
"""
|
|
3
|
-
Lightweight MCP manager for Gemini that wraps backend_utils with Gemini-specific streaming status, session conversion, and planning-mode checks.
|
|
4
|
-
"""
|
|
5
|
-
|
|
6
|
-
import asyncio
|
|
7
|
-
from typing import Any, AsyncGenerator, Callable, Dict, List, Optional, Tuple
|
|
8
|
-
|
|
9
|
-
from ..logger_config import log_backend_activity, logger
|
|
10
|
-
from ..mcp_tools import (
|
|
11
|
-
MCPCircuitBreakerManager,
|
|
12
|
-
MCPClient,
|
|
13
|
-
MCPConfigurationError,
|
|
14
|
-
MCPConfigValidator,
|
|
15
|
-
MCPConnectionError,
|
|
16
|
-
MCPError,
|
|
17
|
-
MCPErrorHandler,
|
|
18
|
-
MCPExecutionManager,
|
|
19
|
-
MCPResourceManager,
|
|
20
|
-
MCPRetryHandler,
|
|
21
|
-
MCPServerError,
|
|
22
|
-
MCPSetupManager,
|
|
23
|
-
MCPTimeoutError,
|
|
24
|
-
MCPValidationError,
|
|
25
|
-
)
|
|
26
|
-
from ..mcp_tools.hooks import convert_sessions_to_permission_sessions
|
|
27
|
-
from .base import StreamChunk
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
class GeminiMCPManager:
|
|
31
|
-
def __init__(self, backend_instance) -> None:
|
|
32
|
-
self.backend = backend_instance
|
|
33
|
-
# References to backend state/config
|
|
34
|
-
self.mcp_servers = getattr(self.backend, "mcp_servers", [])
|
|
35
|
-
self.allowed_tools = getattr(self.backend, "allowed_tools", None)
|
|
36
|
-
self.exclude_tools = getattr(self.backend, "exclude_tools", None)
|
|
37
|
-
self.agent_id = getattr(self.backend, "agent_id", None)
|
|
38
|
-
|
|
39
|
-
# Counters (will be updated on backend instance)
|
|
40
|
-
self._mcp_tool_calls_count = getattr(self.backend, "_mcp_tool_calls_count", 0)
|
|
41
|
-
self._mcp_tool_failures = getattr(self.backend, "_mcp_tool_failures", 0)
|
|
42
|
-
self._mcp_tool_successes = getattr(self.backend, "_mcp_tool_successes", 0)
|
|
43
|
-
|
|
44
|
-
# MCP client and init state mirror backend
|
|
45
|
-
self._mcp_client: Optional[MCPClient] = getattr(self.backend, "_mcp_client", None)
|
|
46
|
-
self._mcp_initialized: bool = getattr(self.backend, "_mcp_initialized", False)
|
|
47
|
-
|
|
48
|
-
# Circuit breaker and filesystem manager references
|
|
49
|
-
self._mcp_tools_circuit_breaker = getattr(self.backend, "_mcp_tools_circuit_breaker", None)
|
|
50
|
-
self.filesystem_manager = getattr(self.backend, "filesystem_manager", None)
|
|
51
|
-
|
|
52
|
-
async def setup_mcp_with_status_stream(self, agent_id: Optional[str] = None) -> AsyncGenerator[StreamChunk, None]:
|
|
53
|
-
"""Initialize MCP client with status streaming."""
|
|
54
|
-
if not self.mcp_servers or self._mcp_initialized:
|
|
55
|
-
if False:
|
|
56
|
-
yield # make this an async generator
|
|
57
|
-
return
|
|
58
|
-
|
|
59
|
-
status_queue: asyncio.Queue[StreamChunk] = asyncio.Queue()
|
|
60
|
-
_agent_id = agent_id or self.agent_id
|
|
61
|
-
|
|
62
|
-
async def status_callback(status: str, details: Dict[str, Any]) -> None:
|
|
63
|
-
"""Callback to queue status updates as StreamChunks."""
|
|
64
|
-
chunk = StreamChunk(
|
|
65
|
-
type="mcp_status",
|
|
66
|
-
status=status,
|
|
67
|
-
content=details.get("message", ""),
|
|
68
|
-
source="mcp_tools",
|
|
69
|
-
)
|
|
70
|
-
await status_queue.put(chunk)
|
|
71
|
-
|
|
72
|
-
setup_task = asyncio.create_task(self.setup_mcp_internal(_agent_id, status_callback))
|
|
73
|
-
|
|
74
|
-
while not setup_task.done():
|
|
75
|
-
try:
|
|
76
|
-
chunk = await asyncio.wait_for(status_queue.get(), timeout=0.1)
|
|
77
|
-
yield chunk
|
|
78
|
-
except asyncio.TimeoutError:
|
|
79
|
-
continue
|
|
80
|
-
|
|
81
|
-
try:
|
|
82
|
-
await setup_task
|
|
83
|
-
except Exception as e:
|
|
84
|
-
yield StreamChunk(
|
|
85
|
-
type="mcp_status",
|
|
86
|
-
status="error",
|
|
87
|
-
content=f"MCP setup failed: {e}",
|
|
88
|
-
source="mcp_tools",
|
|
89
|
-
)
|
|
90
|
-
|
|
91
|
-
async def setup_mcp_tools(self, agent_id: Optional[str] = None) -> None:
|
|
92
|
-
"""Initialize MCP client (sessions only) - wrapper that consumes status stream."""
|
|
93
|
-
async for _ in self.setup_mcp_with_status_stream(agent_id):
|
|
94
|
-
pass
|
|
95
|
-
|
|
96
|
-
async def setup_mcp_internal(
|
|
97
|
-
self,
|
|
98
|
-
agent_id: Optional[str] = None,
|
|
99
|
-
status_callback: Optional[Callable[[str, Dict[str, Any]], None]] = None,
|
|
100
|
-
) -> None:
|
|
101
|
-
"""Internal MCP setup logic leveraging backend_utils managers."""
|
|
102
|
-
if not self.mcp_servers or self._mcp_initialized:
|
|
103
|
-
return
|
|
104
|
-
|
|
105
|
-
_agent_id = agent_id or self.agent_id
|
|
106
|
-
|
|
107
|
-
try:
|
|
108
|
-
# Validate MCP configuration
|
|
109
|
-
backend_config = {
|
|
110
|
-
"mcp_servers": self.mcp_servers,
|
|
111
|
-
"allowed_tools": self.allowed_tools,
|
|
112
|
-
"exclude_tools": self.exclude_tools,
|
|
113
|
-
}
|
|
114
|
-
|
|
115
|
-
try:
|
|
116
|
-
validated_config = MCPConfigValidator.validate_backend_mcp_config(backend_config)
|
|
117
|
-
self.mcp_servers = validated_config.get("mcp_servers", self.mcp_servers)
|
|
118
|
-
if status_callback:
|
|
119
|
-
await status_callback("info", {"message": f"MCP configuration validated: {len(self.mcp_servers)} servers"})
|
|
120
|
-
log_backend_activity("gemini", "MCP configuration validated", {"server_count": len(self.mcp_servers)}, agent_id=_agent_id)
|
|
121
|
-
except MCPConfigurationError as e:
|
|
122
|
-
if status_callback:
|
|
123
|
-
await status_callback("error", {"message": f"Invalid MCP configuration: {e}"})
|
|
124
|
-
raise RuntimeError(f"Invalid MCP configuration: {e}") from e
|
|
125
|
-
except MCPValidationError as e:
|
|
126
|
-
if status_callback:
|
|
127
|
-
await status_callback("error", {"message": f"MCP validation error: {e}"})
|
|
128
|
-
raise RuntimeError(f"MCP validation error: {e}") from e
|
|
129
|
-
except Exception as e:
|
|
130
|
-
# Validation unavailable or unexpected error; continue with normalization path
|
|
131
|
-
log_backend_activity("gemini", "MCP validation unavailable or error", {"error": str(e)}, agent_id=_agent_id)
|
|
132
|
-
|
|
133
|
-
# Normalize servers
|
|
134
|
-
normalized_servers = MCPSetupManager.normalize_mcp_servers(self.mcp_servers, backend_name="gemini", agent_id=_agent_id)
|
|
135
|
-
if status_callback:
|
|
136
|
-
await status_callback("info", {"message": f"Setting up MCP sessions for {len(normalized_servers)} servers"})
|
|
137
|
-
|
|
138
|
-
# Apply circuit breaker filtering
|
|
139
|
-
if self._mcp_tools_circuit_breaker:
|
|
140
|
-
filtered_servers = MCPCircuitBreakerManager.apply_circuit_breaker_filtering(
|
|
141
|
-
normalized_servers,
|
|
142
|
-
self._mcp_tools_circuit_breaker,
|
|
143
|
-
backend_name="gemini",
|
|
144
|
-
agent_id=_agent_id,
|
|
145
|
-
)
|
|
146
|
-
else:
|
|
147
|
-
filtered_servers = normalized_servers
|
|
148
|
-
|
|
149
|
-
if not filtered_servers:
|
|
150
|
-
log_backend_activity("gemini", "All MCP servers blocked by circuit breaker", {}, agent_id=_agent_id)
|
|
151
|
-
if status_callback:
|
|
152
|
-
await status_callback("warning", {"message": "All MCP servers blocked by circuit breaker"})
|
|
153
|
-
return
|
|
154
|
-
|
|
155
|
-
# Extract tool filtering parameters
|
|
156
|
-
allowed_tools = backend_config.get("allowed_tools")
|
|
157
|
-
exclude_tools = backend_config.get("exclude_tools")
|
|
158
|
-
|
|
159
|
-
# Setup MCP client via resource manager (handles retries and filtering)
|
|
160
|
-
client = await MCPResourceManager.setup_mcp_client(
|
|
161
|
-
servers=filtered_servers,
|
|
162
|
-
allowed_tools=allowed_tools,
|
|
163
|
-
exclude_tools=exclude_tools,
|
|
164
|
-
circuit_breaker=self._mcp_tools_circuit_breaker,
|
|
165
|
-
timeout_seconds=30,
|
|
166
|
-
backend_name="gemini",
|
|
167
|
-
agent_id=_agent_id,
|
|
168
|
-
)
|
|
169
|
-
|
|
170
|
-
if not client:
|
|
171
|
-
# Treat as connection failure
|
|
172
|
-
self._mcp_client = None
|
|
173
|
-
self.backend._mcp_client = None
|
|
174
|
-
if status_callback:
|
|
175
|
-
await status_callback("error", {"message": "MCP connection failed: no servers connected"})
|
|
176
|
-
log_backend_activity("gemini", "MCP connection failed: no servers connected", {}, agent_id=_agent_id)
|
|
177
|
-
return
|
|
178
|
-
|
|
179
|
-
# Assign on success
|
|
180
|
-
self._mcp_client = client
|
|
181
|
-
self.backend._mcp_client = client
|
|
182
|
-
self._mcp_initialized = True
|
|
183
|
-
self.backend._mcp_initialized = True
|
|
184
|
-
|
|
185
|
-
log_backend_activity("gemini", "MCP sessions initialized successfully", {}, agent_id=_agent_id)
|
|
186
|
-
if status_callback:
|
|
187
|
-
# Attempt to list connected servers
|
|
188
|
-
try:
|
|
189
|
-
names = client.get_server_names()
|
|
190
|
-
except Exception:
|
|
191
|
-
names = []
|
|
192
|
-
await status_callback(
|
|
193
|
-
"success",
|
|
194
|
-
{"message": f"MCP sessions initialized successfully with {len(names)} servers"},
|
|
195
|
-
)
|
|
196
|
-
|
|
197
|
-
except Exception as e:
|
|
198
|
-
# Enhanced error mapping using backend_utils
|
|
199
|
-
log_type, user_message, _ = MCPErrorHandler.get_error_details(e)
|
|
200
|
-
log_backend_activity("gemini", f"MCP {log_type} during setup", {"error": str(e)}, agent_id=_agent_id)
|
|
201
|
-
self._mcp_client = None
|
|
202
|
-
self.backend._mcp_client = None
|
|
203
|
-
self._mcp_initialized = False
|
|
204
|
-
self.backend._mcp_initialized = False
|
|
205
|
-
if status_callback:
|
|
206
|
-
await status_callback("error", {"message": f"MCP session setup failed: {e}"})
|
|
207
|
-
|
|
208
|
-
async def handle_mcp_retry_error(self, error: Exception, retry_count: int, max_retries: int) -> Tuple[bool, AsyncGenerator[StreamChunk, None]]:
|
|
209
|
-
"""Delegate retry error handling to backend_utils with StreamChunk emission."""
|
|
210
|
-
return await MCPRetryHandler.handle_retry_error(
|
|
211
|
-
error=error,
|
|
212
|
-
retry_count=retry_count,
|
|
213
|
-
max_retries=max_retries,
|
|
214
|
-
stream_chunk_class=StreamChunk,
|
|
215
|
-
backend_name="gemini",
|
|
216
|
-
agent_id=self.agent_id,
|
|
217
|
-
)
|
|
218
|
-
|
|
219
|
-
async def handle_mcp_error_and_fallback(self, error: Exception) -> AsyncGenerator[StreamChunk, None]:
|
|
220
|
-
"""Handle MCP errors with specific messaging and fallback behavior."""
|
|
221
|
-
# increment backend failure counter
|
|
222
|
-
try:
|
|
223
|
-
self.backend._mcp_tool_failures += 1
|
|
224
|
-
except Exception:
|
|
225
|
-
pass
|
|
226
|
-
|
|
227
|
-
async for chunk in MCPRetryHandler.handle_error_and_fallback(
|
|
228
|
-
error=error,
|
|
229
|
-
tool_call_count=getattr(self.backend, "_mcp_tool_calls_count", 0),
|
|
230
|
-
stream_chunk_class=StreamChunk,
|
|
231
|
-
backend_name="gemini",
|
|
232
|
-
agent_id=self.agent_id,
|
|
233
|
-
):
|
|
234
|
-
yield chunk
|
|
235
|
-
|
|
236
|
-
async def execute_mcp_function_with_retry(self, function_name: str, args: Dict[str, Any], functions: Dict, agent_id: Optional[str] = None) -> Any:
|
|
237
|
-
"""Execute MCP function with retry and circuit breaker recording."""
|
|
238
|
-
_agent_id = agent_id or self.agent_id
|
|
239
|
-
|
|
240
|
-
async def stats_callback(action: str) -> int:
|
|
241
|
-
if action == "increment_calls":
|
|
242
|
-
self.backend._mcp_tool_calls_count += 1
|
|
243
|
-
return self.backend._mcp_tool_calls_count
|
|
244
|
-
elif action == "increment_failures":
|
|
245
|
-
self.backend._mcp_tool_failures += 1
|
|
246
|
-
return self.backend._mcp_tool_failures
|
|
247
|
-
return 0
|
|
248
|
-
|
|
249
|
-
async def circuit_breaker_callback(event: str, error_msg: str) -> None:
|
|
250
|
-
try:
|
|
251
|
-
if event == "failure":
|
|
252
|
-
# Record failure for all configured servers
|
|
253
|
-
servers = MCPSetupManager.normalize_mcp_servers(self.mcp_servers, backend_name="gemini", agent_id=_agent_id)
|
|
254
|
-
if self._mcp_tools_circuit_breaker:
|
|
255
|
-
await MCPCircuitBreakerManager.record_event(
|
|
256
|
-
servers,
|
|
257
|
-
self._mcp_tools_circuit_breaker,
|
|
258
|
-
"failure",
|
|
259
|
-
error_message=error_msg,
|
|
260
|
-
backend_name="gemini",
|
|
261
|
-
agent_id=_agent_id,
|
|
262
|
-
)
|
|
263
|
-
else:
|
|
264
|
-
# Record success only for connected servers
|
|
265
|
-
connected_names: List[str] = []
|
|
266
|
-
try:
|
|
267
|
-
if self._mcp_client:
|
|
268
|
-
connected_names = self._mcp_client.get_server_names()
|
|
269
|
-
except Exception:
|
|
270
|
-
connected_names = []
|
|
271
|
-
|
|
272
|
-
if connected_names and self._mcp_tools_circuit_breaker:
|
|
273
|
-
servers_to_record = [{"name": name} for name in connected_names]
|
|
274
|
-
await MCPCircuitBreakerManager.record_event(
|
|
275
|
-
servers_to_record,
|
|
276
|
-
self._mcp_tools_circuit_breaker,
|
|
277
|
-
"success",
|
|
278
|
-
backend_name="gemini",
|
|
279
|
-
agent_id=_agent_id,
|
|
280
|
-
)
|
|
281
|
-
except Exception:
|
|
282
|
-
# Never fail the call due to circuit breaker recording issues
|
|
283
|
-
pass
|
|
284
|
-
|
|
285
|
-
return await MCPExecutionManager.execute_function_with_retry(
|
|
286
|
-
function_name=function_name,
|
|
287
|
-
args=args,
|
|
288
|
-
functions=functions,
|
|
289
|
-
max_retries=3,
|
|
290
|
-
stats_callback=stats_callback,
|
|
291
|
-
circuit_breaker_callback=circuit_breaker_callback,
|
|
292
|
-
logger_instance=logger,
|
|
293
|
-
)
|
|
294
|
-
|
|
295
|
-
async def setup_mcp_sessions_with_retry(self, agent_id: Optional[str] = None, max_retries: int = 5) -> Tuple[bool, AsyncGenerator[StreamChunk, None]]:
|
|
296
|
-
"""Attempt to setup sessions with retries; returns (connected, status_chunks)."""
|
|
297
|
-
_agent_id = agent_id or self.agent_id
|
|
298
|
-
chunks: List[StreamChunk] = []
|
|
299
|
-
success: bool = False
|
|
300
|
-
|
|
301
|
-
for retry_count in range(1, max_retries + 1):
|
|
302
|
-
# Emit retry status
|
|
303
|
-
chunks.append(
|
|
304
|
-
StreamChunk(
|
|
305
|
-
type="mcp_status",
|
|
306
|
-
status="mcp_retry",
|
|
307
|
-
content=f"Retrying MCP connection (attempt {retry_count}/{max_retries})",
|
|
308
|
-
source="mcp_tools",
|
|
309
|
-
),
|
|
310
|
-
)
|
|
311
|
-
|
|
312
|
-
try:
|
|
313
|
-
# Apply circuit breaker filtering before attempts
|
|
314
|
-
normalized = MCPSetupManager.normalize_mcp_servers(self.mcp_servers, backend_name="gemini", agent_id=_agent_id)
|
|
315
|
-
if self._mcp_tools_circuit_breaker:
|
|
316
|
-
filtered = MCPCircuitBreakerManager.apply_circuit_breaker_filtering(
|
|
317
|
-
normalized,
|
|
318
|
-
self._mcp_tools_circuit_breaker,
|
|
319
|
-
backend_name="gemini",
|
|
320
|
-
agent_id=_agent_id,
|
|
321
|
-
)
|
|
322
|
-
else:
|
|
323
|
-
filtered = normalized
|
|
324
|
-
|
|
325
|
-
if not filtered:
|
|
326
|
-
chunks.append(
|
|
327
|
-
StreamChunk(
|
|
328
|
-
type="mcp_status",
|
|
329
|
-
status="mcp_blocked",
|
|
330
|
-
content="All MCP servers blocked by circuit breaker",
|
|
331
|
-
source="mcp_tools",
|
|
332
|
-
),
|
|
333
|
-
)
|
|
334
|
-
success = False
|
|
335
|
-
break
|
|
336
|
-
|
|
337
|
-
# Validate config for allowed/excluded tools
|
|
338
|
-
try:
|
|
339
|
-
MCPConfigValidator.validate_backend_mcp_config({"mcp_servers": filtered})
|
|
340
|
-
except Exception:
|
|
341
|
-
pass
|
|
342
|
-
|
|
343
|
-
allowed_items = self.allowed_tools
|
|
344
|
-
exclude_items = self.exclude_tools
|
|
345
|
-
allowed_count = len(allowed_items) if isinstance(allowed_items, (list, tuple, set)) else (1 if allowed_items else 0)
|
|
346
|
-
exclude_count = len(exclude_items) if isinstance(exclude_items, (list, tuple, set)) else (1 if exclude_items else 0)
|
|
347
|
-
logger.debug(
|
|
348
|
-
"[GeminiMCPManager] Retry filter settings | allowed=%d excluded=%d",
|
|
349
|
-
allowed_count,
|
|
350
|
-
exclude_count,
|
|
351
|
-
)
|
|
352
|
-
|
|
353
|
-
client = await MCPResourceManager.setup_mcp_client(
|
|
354
|
-
servers=filtered,
|
|
355
|
-
allowed_tools=self.allowed_tools,
|
|
356
|
-
exclude_tools=self.exclude_tools,
|
|
357
|
-
circuit_breaker=self._mcp_tools_circuit_breaker,
|
|
358
|
-
timeout_seconds=30,
|
|
359
|
-
backend_name="gemini",
|
|
360
|
-
agent_id=_agent_id,
|
|
361
|
-
)
|
|
362
|
-
|
|
363
|
-
if client:
|
|
364
|
-
# Assign to backend/manager and mark success
|
|
365
|
-
self._mcp_client = client
|
|
366
|
-
self.backend._mcp_client = client
|
|
367
|
-
self._mcp_initialized = True
|
|
368
|
-
self.backend._mcp_initialized = True
|
|
369
|
-
|
|
370
|
-
# Record success event
|
|
371
|
-
if self._mcp_tools_circuit_breaker:
|
|
372
|
-
await MCPCircuitBreakerManager.record_event(
|
|
373
|
-
filtered,
|
|
374
|
-
self._mcp_tools_circuit_breaker,
|
|
375
|
-
"success",
|
|
376
|
-
backend_name="gemini",
|
|
377
|
-
agent_id=_agent_id,
|
|
378
|
-
)
|
|
379
|
-
|
|
380
|
-
chunks.append(
|
|
381
|
-
StreamChunk(
|
|
382
|
-
type="mcp_status",
|
|
383
|
-
status="mcp_connected",
|
|
384
|
-
content=f"MCP connection successful on attempt {retry_count}",
|
|
385
|
-
source="mcp_tools",
|
|
386
|
-
),
|
|
387
|
-
)
|
|
388
|
-
success = True
|
|
389
|
-
break
|
|
390
|
-
|
|
391
|
-
# Client not connected; handle as transient error with retry logic
|
|
392
|
-
should_continue, error_chunks = await self.handle_mcp_retry_error(RuntimeError("No servers connected"), retry_count, max_retries)
|
|
393
|
-
# Drain any error chunks into list
|
|
394
|
-
async for ch in error_chunks:
|
|
395
|
-
chunks.append(ch)
|
|
396
|
-
if not should_continue:
|
|
397
|
-
success = False
|
|
398
|
-
break
|
|
399
|
-
|
|
400
|
-
except (MCPConnectionError, MCPTimeoutError, MCPServerError, MCPError, Exception) as e:
|
|
401
|
-
should_continue, error_chunks = await self.handle_mcp_retry_error(e, retry_count, max_retries)
|
|
402
|
-
async for ch in error_chunks:
|
|
403
|
-
chunks.append(ch)
|
|
404
|
-
if not should_continue:
|
|
405
|
-
success = False
|
|
406
|
-
break
|
|
407
|
-
|
|
408
|
-
# Progressive backoff between retries
|
|
409
|
-
await asyncio.sleep(0.5 * retry_count)
|
|
410
|
-
|
|
411
|
-
async def _generator():
|
|
412
|
-
for ch in chunks:
|
|
413
|
-
yield ch
|
|
414
|
-
|
|
415
|
-
return success, _generator()
|
|
416
|
-
|
|
417
|
-
def get_active_mcp_sessions(self, convert_to_permission_sessions: bool = True) -> List[Any]:
|
|
418
|
-
"""Return active MCP ClientSession objects, optionally wrapped with permission sessions."""
|
|
419
|
-
sessions: List[Any] = []
|
|
420
|
-
try:
|
|
421
|
-
if self._mcp_client:
|
|
422
|
-
sessions = self._mcp_client.get_active_sessions()
|
|
423
|
-
except Exception:
|
|
424
|
-
sessions = []
|
|
425
|
-
|
|
426
|
-
if convert_to_permission_sessions and sessions and self.filesystem_manager:
|
|
427
|
-
try:
|
|
428
|
-
return convert_sessions_to_permission_sessions(sessions, self.filesystem_manager.path_permission_manager)
|
|
429
|
-
except Exception as e:
|
|
430
|
-
logger.error(f"[GeminiMCPManager] Failed to convert sessions to permission sessions: {e}")
|
|
431
|
-
return sessions
|
|
432
|
-
return sessions
|
|
433
|
-
|
|
434
|
-
def should_block_mcp_tools_in_planning_mode(self, is_planning_mode: bool, available_tools: List[str]) -> bool:
|
|
435
|
-
"""Return True to block MCP tools if planning mode is enabled; logs details."""
|
|
436
|
-
if is_planning_mode:
|
|
437
|
-
log_backend_activity(
|
|
438
|
-
"gemini",
|
|
439
|
-
"MCP tools blocked in planning mode",
|
|
440
|
-
{"blocked_tools": len(available_tools or []), "tools_preview": (available_tools or [])[:5]},
|
|
441
|
-
agent_id=self.agent_id,
|
|
442
|
-
)
|
|
443
|
-
return True
|
|
444
|
-
return False
|
|
445
|
-
|
|
446
|
-
async def cleanup_genai_resources(self, stream, client) -> None:
|
|
447
|
-
"""Cleanup google-genai resources to avoid unclosed aiohttp sessions."""
|
|
448
|
-
# Close stream
|
|
449
|
-
try:
|
|
450
|
-
if stream is not None:
|
|
451
|
-
close_fn = getattr(stream, "aclose", None) or getattr(stream, "close", None)
|
|
452
|
-
if close_fn is not None:
|
|
453
|
-
maybe = close_fn()
|
|
454
|
-
if hasattr(maybe, "__await__"):
|
|
455
|
-
await maybe
|
|
456
|
-
except Exception as e:
|
|
457
|
-
log_backend_activity(
|
|
458
|
-
"gemini",
|
|
459
|
-
"Stream cleanup failed",
|
|
460
|
-
{"error": str(e)},
|
|
461
|
-
agent_id=self.agent_id,
|
|
462
|
-
)
|
|
463
|
-
# Close internal aiohttp session held by google-genai BaseApiClient
|
|
464
|
-
try:
|
|
465
|
-
if client is not None:
|
|
466
|
-
base_client = getattr(client, "_api_client", None)
|
|
467
|
-
if base_client is not None:
|
|
468
|
-
session = getattr(base_client, "_aiohttp_session", None)
|
|
469
|
-
if session is not None and hasattr(session, "close"):
|
|
470
|
-
if not session.closed:
|
|
471
|
-
await session.close()
|
|
472
|
-
log_backend_activity(
|
|
473
|
-
"gemini",
|
|
474
|
-
"Closed google-genai aiohttp session",
|
|
475
|
-
{},
|
|
476
|
-
agent_id=self.agent_id,
|
|
477
|
-
)
|
|
478
|
-
base_client._aiohttp_session = None
|
|
479
|
-
# Yield control to allow connector cleanup
|
|
480
|
-
await asyncio.sleep(0)
|
|
481
|
-
except Exception as e:
|
|
482
|
-
log_backend_activity(
|
|
483
|
-
"gemini",
|
|
484
|
-
"Failed to close google-genai aiohttp session",
|
|
485
|
-
{"error": str(e)},
|
|
486
|
-
agent_id=self.agent_id,
|
|
487
|
-
)
|
|
488
|
-
# Close internal async transport if exposed
|
|
489
|
-
try:
|
|
490
|
-
if client is not None and hasattr(client, "aio") and client.aio is not None:
|
|
491
|
-
aio_obj = client.aio
|
|
492
|
-
for method_name in ("close", "stop"):
|
|
493
|
-
method = getattr(aio_obj, method_name, None)
|
|
494
|
-
if method:
|
|
495
|
-
maybe = method()
|
|
496
|
-
if hasattr(maybe, "__await__"):
|
|
497
|
-
await maybe
|
|
498
|
-
break
|
|
499
|
-
except Exception as e:
|
|
500
|
-
log_backend_activity(
|
|
501
|
-
"gemini",
|
|
502
|
-
"Client AIO cleanup failed",
|
|
503
|
-
{"error": str(e)},
|
|
504
|
-
agent_id=self.agent_id,
|
|
505
|
-
)
|
|
506
|
-
|
|
507
|
-
# Close client
|
|
508
|
-
try:
|
|
509
|
-
if client is not None:
|
|
510
|
-
for method_name in ("aclose", "close"):
|
|
511
|
-
method = getattr(client, method_name, None)
|
|
512
|
-
if method:
|
|
513
|
-
maybe = method()
|
|
514
|
-
if hasattr(maybe, "__await__"):
|
|
515
|
-
await maybe
|
|
516
|
-
break
|
|
517
|
-
except Exception as e:
|
|
518
|
-
log_backend_activity(
|
|
519
|
-
"gemini",
|
|
520
|
-
"Client cleanup failed",
|
|
521
|
-
{"error": str(e)},
|
|
522
|
-
agent_id=self.agent_id,
|
|
523
|
-
)
|
|
524
|
-
|
|
525
|
-
def is_mcp_connected(self) -> bool:
|
|
526
|
-
try:
|
|
527
|
-
return bool(self._mcp_initialized and self._mcp_client and self._mcp_client.is_connected())
|
|
528
|
-
except Exception:
|
|
529
|
-
return False
|
|
530
|
-
|
|
531
|
-
def get_mcp_server_names(self) -> List[str]:
|
|
532
|
-
try:
|
|
533
|
-
if self._mcp_client:
|
|
534
|
-
return self._mcp_client.get_server_names()
|
|
535
|
-
except Exception:
|
|
536
|
-
pass
|
|
537
|
-
return []
|
|
538
|
-
|
|
539
|
-
def get_mcp_tools(self) -> Dict[str, Any]:
|
|
540
|
-
try:
|
|
541
|
-
if self._mcp_client and hasattr(self._mcp_client, "tools"):
|
|
542
|
-
return self._mcp_client.tools
|
|
543
|
-
except Exception:
|
|
544
|
-
pass
|
|
545
|
-
return {}
|