kweaver-dolphin 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (199) hide show
  1. DolphinLanguageSDK/__init__.py +58 -0
  2. dolphin/__init__.py +62 -0
  3. dolphin/cli/__init__.py +20 -0
  4. dolphin/cli/args/__init__.py +9 -0
  5. dolphin/cli/args/parser.py +567 -0
  6. dolphin/cli/builtin_agents/__init__.py +22 -0
  7. dolphin/cli/commands/__init__.py +4 -0
  8. dolphin/cli/interrupt/__init__.py +8 -0
  9. dolphin/cli/interrupt/handler.py +205 -0
  10. dolphin/cli/interrupt/keyboard.py +82 -0
  11. dolphin/cli/main.py +49 -0
  12. dolphin/cli/multimodal/__init__.py +34 -0
  13. dolphin/cli/multimodal/clipboard.py +327 -0
  14. dolphin/cli/multimodal/handler.py +249 -0
  15. dolphin/cli/multimodal/image_processor.py +214 -0
  16. dolphin/cli/multimodal/input_parser.py +149 -0
  17. dolphin/cli/runner/__init__.py +8 -0
  18. dolphin/cli/runner/runner.py +989 -0
  19. dolphin/cli/ui/__init__.py +10 -0
  20. dolphin/cli/ui/console.py +2795 -0
  21. dolphin/cli/ui/input.py +340 -0
  22. dolphin/cli/ui/layout.py +425 -0
  23. dolphin/cli/ui/stream_renderer.py +302 -0
  24. dolphin/cli/utils/__init__.py +8 -0
  25. dolphin/cli/utils/helpers.py +135 -0
  26. dolphin/cli/utils/version.py +49 -0
  27. dolphin/core/__init__.py +107 -0
  28. dolphin/core/agent/__init__.py +10 -0
  29. dolphin/core/agent/agent_state.py +69 -0
  30. dolphin/core/agent/base_agent.py +970 -0
  31. dolphin/core/code_block/__init__.py +0 -0
  32. dolphin/core/code_block/agent_init_block.py +0 -0
  33. dolphin/core/code_block/assign_block.py +98 -0
  34. dolphin/core/code_block/basic_code_block.py +1865 -0
  35. dolphin/core/code_block/explore_block.py +1327 -0
  36. dolphin/core/code_block/explore_block_v2.py +712 -0
  37. dolphin/core/code_block/explore_strategy.py +672 -0
  38. dolphin/core/code_block/judge_block.py +220 -0
  39. dolphin/core/code_block/prompt_block.py +32 -0
  40. dolphin/core/code_block/skill_call_deduplicator.py +291 -0
  41. dolphin/core/code_block/tool_block.py +129 -0
  42. dolphin/core/common/__init__.py +17 -0
  43. dolphin/core/common/constants.py +176 -0
  44. dolphin/core/common/enums.py +1173 -0
  45. dolphin/core/common/exceptions.py +133 -0
  46. dolphin/core/common/multimodal.py +539 -0
  47. dolphin/core/common/object_type.py +165 -0
  48. dolphin/core/common/output_format.py +432 -0
  49. dolphin/core/common/types.py +36 -0
  50. dolphin/core/config/__init__.py +16 -0
  51. dolphin/core/config/global_config.py +1289 -0
  52. dolphin/core/config/ontology_config.py +133 -0
  53. dolphin/core/context/__init__.py +12 -0
  54. dolphin/core/context/context.py +1580 -0
  55. dolphin/core/context/context_manager.py +161 -0
  56. dolphin/core/context/var_output.py +82 -0
  57. dolphin/core/context/variable_pool.py +356 -0
  58. dolphin/core/context_engineer/__init__.py +41 -0
  59. dolphin/core/context_engineer/config/__init__.py +5 -0
  60. dolphin/core/context_engineer/config/settings.py +402 -0
  61. dolphin/core/context_engineer/core/__init__.py +7 -0
  62. dolphin/core/context_engineer/core/budget_manager.py +327 -0
  63. dolphin/core/context_engineer/core/context_assembler.py +583 -0
  64. dolphin/core/context_engineer/core/context_manager.py +637 -0
  65. dolphin/core/context_engineer/core/tokenizer_service.py +260 -0
  66. dolphin/core/context_engineer/example/incremental_example.py +267 -0
  67. dolphin/core/context_engineer/example/traditional_example.py +334 -0
  68. dolphin/core/context_engineer/services/__init__.py +5 -0
  69. dolphin/core/context_engineer/services/compressor.py +399 -0
  70. dolphin/core/context_engineer/utils/__init__.py +6 -0
  71. dolphin/core/context_engineer/utils/context_utils.py +441 -0
  72. dolphin/core/context_engineer/utils/message_formatter.py +270 -0
  73. dolphin/core/context_engineer/utils/token_utils.py +139 -0
  74. dolphin/core/coroutine/__init__.py +15 -0
  75. dolphin/core/coroutine/context_snapshot.py +154 -0
  76. dolphin/core/coroutine/context_snapshot_profile.py +922 -0
  77. dolphin/core/coroutine/context_snapshot_store.py +268 -0
  78. dolphin/core/coroutine/execution_frame.py +145 -0
  79. dolphin/core/coroutine/execution_state_registry.py +161 -0
  80. dolphin/core/coroutine/resume_handle.py +101 -0
  81. dolphin/core/coroutine/step_result.py +101 -0
  82. dolphin/core/executor/__init__.py +18 -0
  83. dolphin/core/executor/debug_controller.py +630 -0
  84. dolphin/core/executor/dolphin_executor.py +1063 -0
  85. dolphin/core/executor/executor.py +624 -0
  86. dolphin/core/flags/__init__.py +27 -0
  87. dolphin/core/flags/definitions.py +49 -0
  88. dolphin/core/flags/manager.py +113 -0
  89. dolphin/core/hook/__init__.py +95 -0
  90. dolphin/core/hook/expression_evaluator.py +499 -0
  91. dolphin/core/hook/hook_dispatcher.py +380 -0
  92. dolphin/core/hook/hook_types.py +248 -0
  93. dolphin/core/hook/isolated_variable_pool.py +284 -0
  94. dolphin/core/interfaces.py +53 -0
  95. dolphin/core/llm/__init__.py +0 -0
  96. dolphin/core/llm/llm.py +495 -0
  97. dolphin/core/llm/llm_call.py +100 -0
  98. dolphin/core/llm/llm_client.py +1285 -0
  99. dolphin/core/llm/message_sanitizer.py +120 -0
  100. dolphin/core/logging/__init__.py +20 -0
  101. dolphin/core/logging/logger.py +526 -0
  102. dolphin/core/message/__init__.py +8 -0
  103. dolphin/core/message/compressor.py +749 -0
  104. dolphin/core/parser/__init__.py +8 -0
  105. dolphin/core/parser/parser.py +405 -0
  106. dolphin/core/runtime/__init__.py +10 -0
  107. dolphin/core/runtime/runtime_graph.py +926 -0
  108. dolphin/core/runtime/runtime_instance.py +446 -0
  109. dolphin/core/skill/__init__.py +14 -0
  110. dolphin/core/skill/context_retention.py +157 -0
  111. dolphin/core/skill/skill_function.py +686 -0
  112. dolphin/core/skill/skill_matcher.py +282 -0
  113. dolphin/core/skill/skillkit.py +700 -0
  114. dolphin/core/skill/skillset.py +72 -0
  115. dolphin/core/trajectory/__init__.py +10 -0
  116. dolphin/core/trajectory/recorder.py +189 -0
  117. dolphin/core/trajectory/trajectory.py +522 -0
  118. dolphin/core/utils/__init__.py +9 -0
  119. dolphin/core/utils/cache_kv.py +212 -0
  120. dolphin/core/utils/tools.py +340 -0
  121. dolphin/lib/__init__.py +93 -0
  122. dolphin/lib/debug/__init__.py +8 -0
  123. dolphin/lib/debug/visualizer.py +409 -0
  124. dolphin/lib/memory/__init__.py +28 -0
  125. dolphin/lib/memory/async_processor.py +220 -0
  126. dolphin/lib/memory/llm_calls.py +195 -0
  127. dolphin/lib/memory/manager.py +78 -0
  128. dolphin/lib/memory/sandbox.py +46 -0
  129. dolphin/lib/memory/storage.py +245 -0
  130. dolphin/lib/memory/utils.py +51 -0
  131. dolphin/lib/ontology/__init__.py +12 -0
  132. dolphin/lib/ontology/basic/__init__.py +0 -0
  133. dolphin/lib/ontology/basic/base.py +102 -0
  134. dolphin/lib/ontology/basic/concept.py +130 -0
  135. dolphin/lib/ontology/basic/object.py +11 -0
  136. dolphin/lib/ontology/basic/relation.py +63 -0
  137. dolphin/lib/ontology/datasource/__init__.py +27 -0
  138. dolphin/lib/ontology/datasource/datasource.py +66 -0
  139. dolphin/lib/ontology/datasource/oracle_datasource.py +338 -0
  140. dolphin/lib/ontology/datasource/sql.py +845 -0
  141. dolphin/lib/ontology/mapping.py +177 -0
  142. dolphin/lib/ontology/ontology.py +733 -0
  143. dolphin/lib/ontology/ontology_context.py +16 -0
  144. dolphin/lib/ontology/ontology_manager.py +107 -0
  145. dolphin/lib/skill_results/__init__.py +31 -0
  146. dolphin/lib/skill_results/cache_backend.py +559 -0
  147. dolphin/lib/skill_results/result_processor.py +181 -0
  148. dolphin/lib/skill_results/result_reference.py +179 -0
  149. dolphin/lib/skill_results/skillkit_hook.py +324 -0
  150. dolphin/lib/skill_results/strategies.py +328 -0
  151. dolphin/lib/skill_results/strategy_registry.py +150 -0
  152. dolphin/lib/skillkits/__init__.py +44 -0
  153. dolphin/lib/skillkits/agent_skillkit.py +155 -0
  154. dolphin/lib/skillkits/cognitive_skillkit.py +82 -0
  155. dolphin/lib/skillkits/env_skillkit.py +250 -0
  156. dolphin/lib/skillkits/mcp_adapter.py +616 -0
  157. dolphin/lib/skillkits/mcp_skillkit.py +771 -0
  158. dolphin/lib/skillkits/memory_skillkit.py +650 -0
  159. dolphin/lib/skillkits/noop_skillkit.py +31 -0
  160. dolphin/lib/skillkits/ontology_skillkit.py +89 -0
  161. dolphin/lib/skillkits/plan_act_skillkit.py +452 -0
  162. dolphin/lib/skillkits/resource/__init__.py +52 -0
  163. dolphin/lib/skillkits/resource/models/__init__.py +6 -0
  164. dolphin/lib/skillkits/resource/models/skill_config.py +109 -0
  165. dolphin/lib/skillkits/resource/models/skill_meta.py +127 -0
  166. dolphin/lib/skillkits/resource/resource_skillkit.py +393 -0
  167. dolphin/lib/skillkits/resource/skill_cache.py +215 -0
  168. dolphin/lib/skillkits/resource/skill_loader.py +395 -0
  169. dolphin/lib/skillkits/resource/skill_validator.py +406 -0
  170. dolphin/lib/skillkits/resource_skillkit.py +11 -0
  171. dolphin/lib/skillkits/search_skillkit.py +163 -0
  172. dolphin/lib/skillkits/sql_skillkit.py +274 -0
  173. dolphin/lib/skillkits/system_skillkit.py +509 -0
  174. dolphin/lib/skillkits/vm_skillkit.py +65 -0
  175. dolphin/lib/utils/__init__.py +9 -0
  176. dolphin/lib/utils/data_process.py +207 -0
  177. dolphin/lib/utils/handle_progress.py +178 -0
  178. dolphin/lib/utils/security.py +139 -0
  179. dolphin/lib/utils/text_retrieval.py +462 -0
  180. dolphin/lib/vm/__init__.py +11 -0
  181. dolphin/lib/vm/env_executor.py +895 -0
  182. dolphin/lib/vm/python_session_manager.py +453 -0
  183. dolphin/lib/vm/vm.py +610 -0
  184. dolphin/sdk/__init__.py +60 -0
  185. dolphin/sdk/agent/__init__.py +12 -0
  186. dolphin/sdk/agent/agent_factory.py +236 -0
  187. dolphin/sdk/agent/dolphin_agent.py +1106 -0
  188. dolphin/sdk/api/__init__.py +4 -0
  189. dolphin/sdk/runtime/__init__.py +8 -0
  190. dolphin/sdk/runtime/env.py +363 -0
  191. dolphin/sdk/skill/__init__.py +10 -0
  192. dolphin/sdk/skill/global_skills.py +706 -0
  193. dolphin/sdk/skill/traditional_toolkit.py +260 -0
  194. kweaver_dolphin-0.1.0.dist-info/METADATA +521 -0
  195. kweaver_dolphin-0.1.0.dist-info/RECORD +199 -0
  196. kweaver_dolphin-0.1.0.dist-info/WHEEL +5 -0
  197. kweaver_dolphin-0.1.0.dist-info/entry_points.txt +27 -0
  198. kweaver_dolphin-0.1.0.dist-info/licenses/LICENSE.txt +201 -0
  199. kweaver_dolphin-0.1.0.dist-info/top_level.txt +2 -0
@@ -0,0 +1,616 @@
1
+ import asyncio
2
+ import traceback
3
+ import time
4
+ from typing import Dict, List, Any, Optional
5
+ from dataclasses import dataclass, field
6
+ from collections import deque
7
+
8
+ # Using the official MCP SDK
9
+ from dolphin.core.logging.logger import get_logger
10
+ from mcp.client.session import ClientSession
11
+ from mcp.client.stdio import StdioServerParameters, stdio_client
12
+
13
+ logger = get_logger("skill.mcp_skillkit")
14
+
15
+
16
+ @dataclass
17
+ class MCPServerConfig:
18
+ """MCP Server Configuration"""
19
+
20
+ name: str
21
+ command: str # Start command, such as "npx"
22
+ args: List[str] = field(default_factory=list) # Parameter List
23
+ env: Optional[Dict[str, str]] = None # Environment Variables
24
+ timeout: int = 30
25
+ enabled: bool = True
26
+ auth: Optional[Dict[str, str]] = None # Authentication Information
27
+
28
+ def to_dict(self) -> Dict[str, Any]:
29
+ """Converts the object to a dictionary."""
30
+ return {
31
+ "name": self.name,
32
+ "command": self.command,
33
+ "args": self.args,
34
+ "env": self.env,
35
+ "timeout": self.timeout,
36
+ "enabled": self.enabled,
37
+ "auth": self.auth,
38
+ }
39
+
40
+
41
+ class MCPConnectionPool:
42
+ """MCP Connection Pool Manager - Supports Concurrency and Connection Reuse"""
43
+
44
+ def __init__(self, max_connections_per_server: int = 5):
45
+ self.max_connections_per_server = max_connections_per_server
46
+ self.pool: Dict[str, deque[Dict[str, Any]]] = {}
47
+ self.conditions: Dict[str, asyncio.Condition] = {}
48
+ self._pool_lock = asyncio.Lock()
49
+ # Add health check cache
50
+ self._health_check_cache: Dict[str, Dict[str, Any]] = {}
51
+ self._health_check_cache_ttl = 30 # 30-second cache time
52
+
53
+ async def _create_connection_object(
54
+ self, server_params: StdioServerParameters
55
+ ) -> Dict[str, Any]:
56
+ """Creates a single connection object."""
57
+ logger.debug("Creating new connection...")
58
+
59
+ client = None
60
+ session = None
61
+
62
+ try:
63
+ # Create client
64
+ client = stdio_client(server_params)
65
+ read_stream, write_stream = await client.__aenter__()
66
+
67
+ # Create session
68
+ session = ClientSession(read_stream, write_stream)
69
+ await session.__aenter__()
70
+
71
+ # Initialize connection
72
+ await session.initialize()
73
+
74
+ return {
75
+ "client": client,
76
+ "session": session,
77
+ "in_use": False,
78
+ "created_at": asyncio.get_event_loop().time(),
79
+ "invalid": False,
80
+ }
81
+
82
+ except Exception as e:
83
+ logger.error(f"Failed to create connection: {e}")
84
+
85
+ # Resources created in the cleanup section
86
+ if session:
87
+ try:
88
+ await session.__aexit__(None, None, None)
89
+ except:
90
+ pass
91
+
92
+ if client:
93
+ try:
94
+ await client.__aexit__(None, None, None)
95
+ except:
96
+ pass
97
+
98
+ raise
99
+
100
+ async def _cleanup_connection_object(self, conn: Dict[str, Any]):
101
+ """Cleans up a single connection object."""
102
+ logger.debug("Cleaning up connection object")
103
+
104
+ # Remove from health check cache before cleanup
105
+ conn_id = id(conn)
106
+ self._health_check_cache.pop(conn_id, None)
107
+
108
+ # Improve cleanup logic to avoid cancel scope errors
109
+ session = conn.get("session")
110
+ client = conn.get("client")
111
+
112
+ # First clean up the session
113
+ if session:
114
+ try:
115
+ # Ensure cleanup within the same async context
116
+ if hasattr(session, "__aexit__"):
117
+ await session.__aexit__(None, None, None)
118
+ logger.debug("Successfully cleaned up session")
119
+ except Exception as e:
120
+ logger.warning(f"Error cleaning up session: {e}")
121
+
122
+ # Clean up client again
123
+ if client:
124
+ try:
125
+ # Avoid cleaning up the client across different tasks
126
+ if hasattr(client, "__aexit__"):
127
+ await client.__aexit__(None, None, None)
128
+ logger.debug("Successfully cleaned up client")
129
+ except Exception as e:
130
+ logger.warning(f"Error cleaning up client: {e}")
131
+
132
+ # Clean up the connection dictionary
133
+ conn.clear()
134
+
135
+ async def is_connection_healthy(self, conn: Dict[str, Any]) -> bool:
136
+ """Checks if a connection is still healthy."""
137
+ if not conn.get("session"):
138
+ return False
139
+
140
+ # Get the unique identifier of the connection
141
+ conn_id = id(conn)
142
+ current_time = (
143
+ time.time()
144
+ ) # Use time.time() instead of loop.time() for thread safety
145
+
146
+ # Check cache
147
+ if conn_id in self._health_check_cache:
148
+ cached_result = self._health_check_cache[conn_id]
149
+ if current_time - cached_result["timestamp"] < self._health_check_cache_ttl:
150
+ return cached_result["healthy"]
151
+
152
+ # Perform health check
153
+ try:
154
+ session = conn["session"]
155
+ if hasattr(session, "_read_stream") and hasattr(session, "_write_stream"):
156
+ # More robust health check - avoid calling methods that may not exist
157
+ read_stream = session._read_stream
158
+ write_stream = session._write_stream
159
+
160
+ # Check if write stream is closing
161
+ writer_closing = False
162
+ if hasattr(write_stream, "is_closing"):
163
+ try:
164
+ writer_closing = write_stream.is_closing()
165
+ except Exception:
166
+ # If we can't check, assume it's not closing
167
+ writer_closing = False
168
+
169
+ # Check if read stream is at EOF (more safely)
170
+ reader_eof = False
171
+ if hasattr(read_stream, "at_eof"):
172
+ try:
173
+ reader_eof = read_stream.at_eof()
174
+ except Exception:
175
+ # If at_eof doesn't exist or fails, check alternative ways
176
+ if hasattr(read_stream, "_closed"):
177
+ reader_eof = read_stream._closed
178
+ elif hasattr(read_stream, "is_closing"):
179
+ try:
180
+ reader_eof = read_stream.is_closing()
181
+ except Exception:
182
+ reader_eof = False
183
+ else:
184
+ reader_eof = False
185
+
186
+ is_healthy = not (reader_eof or writer_closing)
187
+
188
+ # Cached Results
189
+ self._health_check_cache[conn_id] = {
190
+ "healthy": is_healthy,
191
+ "timestamp": current_time,
192
+ }
193
+
194
+ if reader_eof or writer_closing:
195
+ logger.warning("Connection streams are closed or at eof.")
196
+ return False
197
+ return True
198
+ return False
199
+ except Exception as e:
200
+ logger.warning(f"Health check failed: {e}")
201
+ # Cache failed results
202
+ self._health_check_cache[conn_id] = {
203
+ "healthy": False,
204
+ "timestamp": current_time,
205
+ }
206
+ return False
207
+
208
+ async def acquire(
209
+ self, server_name: str, server_params: StdioServerParameters
210
+ ) -> Dict[str, Any]:
211
+ """Acquire a connection from the pool."""
212
+ async with self._pool_lock:
213
+ if server_name not in self.conditions:
214
+ self.conditions[server_name] = asyncio.Condition()
215
+ self.pool[server_name] = deque()
216
+
217
+ condition = self.conditions[server_name]
218
+
219
+ async with condition:
220
+ while True:
221
+ # Find an available connection - use list copy to avoid concurrent modification
222
+ available_conn = None
223
+ stale_connections = []
224
+
225
+ # Create a snapshot of the connection list to avoid concurrent modification
226
+ connections_snapshot = list(self.pool[server_name])
227
+
228
+ for conn in connections_snapshot:
229
+ if not conn.get("in_use", False) and not conn.get("invalid", False):
230
+ if await self.is_connection_healthy(conn):
231
+ conn["in_use"] = True
232
+ available_conn = conn
233
+ # Remove from pool to prevent duplicates
234
+ if conn in self.pool[server_name]:
235
+ self.pool[server_name].remove(conn)
236
+ logger.debug(
237
+ f"Reusing existing connection for {server_name}"
238
+ )
239
+ break
240
+ else:
241
+ logger.warning(
242
+ f"Found stale connection for {server_name}. Marking for cleanup."
243
+ )
244
+ conn["invalid"] = True
245
+ stale_connections.append(conn)
246
+
247
+ # Clean invalid connections - a safer approach
248
+ for stale_conn in stale_connections:
249
+ if stale_conn in self.pool[server_name]:
250
+ self.pool[server_name].remove(stale_conn)
251
+ # Asynchronous cleanup to avoid blocking
252
+ try:
253
+ await self._cleanup_connection_object(stale_conn)
254
+ except Exception as e:
255
+ logger.warning(
256
+ f"Error cleaning up stale connection for {server_name}: {e}"
257
+ )
258
+
259
+ if available_conn:
260
+ return available_conn
261
+
262
+ # No free connection found, check if we can create one
263
+ # Recalculate the number of active connections to prevent race conditions
264
+ active_connections = sum(
265
+ 1
266
+ for c in self.pool[server_name]
267
+ if c.get("in_use", False) and not c.get("invalid", False)
268
+ )
269
+ total_connections = len(
270
+ [c for c in self.pool[server_name] if not c.get("invalid", False)]
271
+ )
272
+
273
+ if total_connections < self.max_connections_per_server:
274
+ logger.debug(
275
+ f"Creating new connection for {server_name} (active: {active_connections}, total: {total_connections})"
276
+ )
277
+ try:
278
+ conn = await self._create_connection_object(server_params)
279
+ conn["in_use"] = True
280
+ # Add to pool immediately to prevent duplicate creation
281
+ self.pool[server_name].append(conn)
282
+ return conn
283
+ except Exception as e:
284
+ logger.error(
285
+ f"Failed to create connection for {server_name}: {e}"
286
+ )
287
+ # Clean up any invalid connections that may have been created
288
+ if "conn" in locals():
289
+ try:
290
+ await self._cleanup_connection_object(conn)
291
+ except:
292
+ pass
293
+ raise
294
+ else:
295
+ # Pool is full, wait for a connection to be released
296
+ logger.debug(
297
+ f"Pool for {server_name} is full. Waiting for a connection."
298
+ )
299
+ await condition.wait()
300
+
301
+ async def release(self, server_name: str, conn: Dict[str, Any]):
302
+ """Release a connection back to the pool."""
303
+ if server_name not in self.conditions:
304
+ logger.warning(
305
+ f"Attempted to release a connection for a non-existent pool: {server_name}"
306
+ )
307
+ await self._cleanup_connection_object(conn)
308
+ return
309
+
310
+ condition = self.conditions[server_name]
311
+ async with condition:
312
+ conn["in_use"] = False
313
+ # Only add back to pool if not already present (prevent duplicates)
314
+ if conn not in self.pool[server_name]:
315
+ self.pool[server_name].appendleft(conn) # Add to the left of the deque
316
+ logger.debug(f"Connection for {server_name} released back to pool.")
317
+ else:
318
+ logger.debug(
319
+ f"Connection for {server_name} already in pool, skipping add."
320
+ )
321
+ condition.notify()
322
+
323
+ async def cleanup(self):
324
+ """Cleanup all connections in the pool."""
325
+ async with self._pool_lock:
326
+ server_names = list(self.pool.keys())
327
+
328
+ for server_name in server_names:
329
+ connections = self.pool.pop(server_name, deque())
330
+ logger.debug(
331
+ f"Cleaning up {len(connections)} connections for {server_name}"
332
+ )
333
+ for conn in connections:
334
+ await self._cleanup_connection_object(conn)
335
+
336
+ async with self._pool_lock:
337
+ self.conditions.clear()
338
+ # Clean health check cache
339
+ self._health_check_cache.clear()
340
+
341
+ def _cleanup_stale_health_cache(self):
342
+ """Clean up expired health check cache"""
343
+ current_time = time.time() # Use time.time() for thread safety
344
+ stale_keys = []
345
+
346
+ for conn_id, cached_result in self._health_check_cache.items():
347
+ if (
348
+ current_time - cached_result["timestamp"]
349
+ > self._health_check_cache_ttl * 2
350
+ ):
351
+ stale_keys.append(conn_id)
352
+
353
+ for key in stale_keys:
354
+ del self._health_check_cache[key]
355
+
356
+ def mark_connection_used(self, server_name: str):
357
+ """Marked connections are used"""
358
+ # Periodically clean up expired health check caches
359
+ self._cleanup_stale_health_cache()
360
+ pass # This is now handled by acquire/release logic
361
+
362
+
363
+ # Global connection pool instance
364
+ _connection_pool = MCPConnectionPool(max_connections_per_server=5)
365
+
366
+
367
+ class MCPAdapter:
368
+ """MCP Adapter - Simplified Version"""
369
+
370
+ def __init__(self, config: MCPServerConfig):
371
+ self.config = config
372
+
373
+ async def call_tool_with_connection_reuse(
374
+ self, tool_name: str, arguments: Dict[str, Any]
375
+ ) -> Any:
376
+ """A tool for calling methods using connection reuse"""
377
+ global _connection_pool
378
+ max_retries = 2
379
+ last_exception = None
380
+
381
+ for attempt in range(max_retries + 1):
382
+ connection = None
383
+ try:
384
+ server_params = StdioServerParameters(
385
+ command=self.config.command,
386
+ args=self.config.args,
387
+ env=self.config.env,
388
+ )
389
+
390
+ connection = await _connection_pool.acquire(
391
+ self.config.name, server_params
392
+ )
393
+ session = connection["session"]
394
+
395
+ logger.debug(f"Calling tool {tool_name} with arguments: {arguments}")
396
+ # Add timeout protection to prevent tools from waiting indefinitely
397
+ try:
398
+ result = await asyncio.wait_for(
399
+ session.call_tool(tool_name, arguments),
400
+ timeout=self.config.timeout,
401
+ )
402
+ except asyncio.TimeoutError:
403
+ # Mark connection as invalid immediately on timeout
404
+ if connection:
405
+ connection["invalid"] = True
406
+ logger.error(
407
+ f"Tool call timeout after {self.config.timeout}s, please check your tool, mcp server config: {self.config.to_dict()}"
408
+ )
409
+ raise Exception(
410
+ f"Tool call timeout after {self.config.timeout}s, please check your tool, mcp server config: {self.config.to_dict()}"
411
+ )
412
+
413
+ if hasattr(result, "content") and result.content:
414
+ content_texts = []
415
+ for content in result.content:
416
+ if hasattr(content, "text"):
417
+ content_texts.append(content.text)
418
+ elif hasattr(content, "data"):
419
+ content_texts.append(str(content.data))
420
+ final_result = (
421
+ "\n".join(content_texts) if content_texts else str(result)
422
+ )
423
+ else:
424
+ final_result = str(result)
425
+
426
+ logger.debug(f"Tool {tool_name} executed successfully")
427
+ return final_result
428
+
429
+ except Exception as e:
430
+ logger.error(
431
+ f"Tool call failed (attempt {attempt + 1}/{max_retries + 1}): {tool_name}, error: {e}, mcp server config: {self.config.to_dict()}"
432
+ )
433
+ logger.error(
434
+ f"Full traceback: {traceback.format_exc()}"
435
+ ) # Add full traceback
436
+ last_exception = e
437
+
438
+ if connection:
439
+ # Mark the connection as invalid, allowing the connection pool to clean it up when it is next acquired.
440
+ connection["invalid"] = True
441
+ # Attempt immediate cleanup for invalid connections
442
+ try:
443
+ await _connection_pool._cleanup_connection_object(connection)
444
+ except Exception as cleanup_e:
445
+ logger.warning(
446
+ f"Failed to cleanup invalid connection: {cleanup_e}"
447
+ )
448
+ connection = None # Ensure it's not released in finally
449
+
450
+ if attempt < max_retries:
451
+ # Exponential backoff for retries
452
+ delay = 0.5 * (2**attempt) # 0.5s, 1s, 2s...
453
+ logger.debug(f"Retrying in {delay}s...")
454
+ await asyncio.sleep(delay)
455
+ else:
456
+ raise Exception(
457
+ f"Tool call failed after {max_retries + 1} attempts: {str(last_exception)}"
458
+ ) from last_exception
459
+
460
+ finally:
461
+ if connection and not connection.get("invalid", False):
462
+ await _connection_pool.release(self.config.name, connection)
463
+
464
+ raise Exception(
465
+ f"Tool call failed after all retry attempts: {str(last_exception)}"
466
+ )
467
+
468
+ async def get_available_tools_from_pool(self) -> List[Dict[str, Any]]:
469
+ """Get available tool list using connection pool"""
470
+ global _connection_pool
471
+ connection = None
472
+ try:
473
+ server_params = StdioServerParameters(
474
+ command=self.config.command, args=self.config.args, env=self.config.env
475
+ )
476
+
477
+ connection = await _connection_pool.acquire(self.config.name, server_params)
478
+ session = connection["session"]
479
+
480
+ logger.debug(
481
+ f"Getting available tools for {self.config.name} using connection pool"
482
+ )
483
+ tools_response = await session.list_tools()
484
+ tools = [
485
+ {
486
+ "name": tool.name,
487
+ "description": tool.description or "",
488
+ "parameters": tool.inputSchema or {},
489
+ }
490
+ for tool in tools_response.tools
491
+ ]
492
+ logger.debug(f"Successfully got {len(tools)} tools from {self.config.name}")
493
+ return tools
494
+
495
+ except Exception as e:
496
+ logger.error(f"Failed to get tools from {self.config.name} using pool: {e}")
497
+ if connection:
498
+ # Connection might be stale, clean it up instead of releasing
499
+ await _connection_pool._cleanup_connection_object(connection)
500
+ connection = None # Prevent release in finally
501
+ raise
502
+
503
+ finally:
504
+ if connection:
505
+ await _connection_pool.release(self.config.name, connection)
506
+
507
+ async def get_available_tools_standalone(self) -> List[Dict[str, Any]]:
508
+ """Get the list of available tools independently, using connection reuse"""
509
+ try:
510
+ server_params = StdioServerParameters(
511
+ command=self.config.command, args=self.config.args, env=self.config.env
512
+ )
513
+
514
+ async with stdio_client(server_params) as (read_stream, write_stream):
515
+ async with ClientSession(read_stream, write_stream) as session:
516
+ await session.initialize()
517
+
518
+ tools_response = await session.list_tools()
519
+ return [
520
+ {
521
+ "name": tool.name,
522
+ "description": tool.description or "",
523
+ "parameters": tool.inputSchema or {},
524
+ }
525
+ for tool in tools_response.tools
526
+ ]
527
+
528
+ except Exception as e:
529
+ logger.error(f"Failed to get tools from {self.config.name}: {e}")
530
+ raise
531
+
532
+ @staticmethod
533
+ def cleanup_connections():
534
+ """Clean up all connections - simple version, avoid atexit hanging"""
535
+ global _connection_pool
536
+
537
+ try:
538
+ logger.debug("Starting connection cleanup")
539
+ # Use simplified cleanup directly to avoid blocking caused by using asyncio.run() in atexit
540
+ MCPAdapter._simplified_cleanup()
541
+ logger.debug("Connection cleanup completed")
542
+ except Exception as e:
543
+ logger.error(f"Error during connection cleanup: {e}")
544
+ # Ensure that program exit is not blocked due to cleanup failures
545
+
546
+ @staticmethod
547
+ def _simplified_cleanup():
548
+ """Simplified Synchronization Cleanup Method"""
549
+ global _connection_pool
550
+
551
+ try:
552
+ if hasattr(_connection_pool, "pool"):
553
+ # Mark connection as invalid
554
+ for server_name, connections in _connection_pool.pool.items():
555
+ for conn in connections:
556
+ if isinstance(conn, dict):
557
+ conn["invalid"] = True
558
+
559
+ # Clear connection pool
560
+ _connection_pool.pool.clear()
561
+ logger.debug("Cleared connection pool")
562
+
563
+ except Exception as e:
564
+ logger.warning(f"Error in simplified cleanup: {e}")
565
+ # Force reset connection pool
566
+ try:
567
+ _connection_pool.pool = {}
568
+ except:
569
+ pass
570
+
571
+ @staticmethod
572
+ def shutdown_gracefully():
573
+ """Gracefully close all connections"""
574
+ global _connection_pool
575
+
576
+ try:
577
+ # Mark all connections as invalid to prevent new calls
578
+ for server_name, connections in _connection_pool.pool.items():
579
+ for conn in connections:
580
+ conn["invalid"] = True
581
+
582
+ logger.debug("Marked all connections as invalid")
583
+
584
+ # Asynchronous connection cleanup
585
+ MCPAdapter.cleanup_connections()
586
+
587
+ except Exception as e:
588
+ logger.error(f"Error during graceful shutdown: {e}")
589
+
590
+ @staticmethod
591
+ def get_connection_status() -> Dict[str, Any]:
592
+ """Get connection pool status"""
593
+ global _connection_pool
594
+ status = {}
595
+ try:
596
+ # Use list() to avoid the pool size changing during iteration
597
+ for server_name, connections in list(_connection_pool.pool.items()):
598
+ status[server_name] = {
599
+ "pool_size": len(connections),
600
+ "in_use": sum(1 for c in connections if c.get("in_use")),
601
+ "max_connections": _connection_pool.max_connections_per_server,
602
+ }
603
+ except Exception as e:
604
+ logger.error(f"Error getting connection status: {e}")
605
+ return status
606
+
607
+ async def test_connection(self) -> bool:
608
+ """Test whether the connection is normal"""
609
+ try:
610
+ # Test using pooling connections to maintain consistency
611
+ tools = await self.get_available_tools_from_pool()
612
+ # Check if we actually got tools (empty list is valid)
613
+ return isinstance(tools, list) # Valid if we get a list (even empty)
614
+ except Exception as e:
615
+ logger.error(f"Connection test failed: {e}")
616
+ return False